text
stringlengths
56
7.94M
\begin{document} \title{Generating Simple Near-Bipartite Bricks} \author{Nishad Kothari\footnote{Partially supported by {\sc NSERC} grant (RGPIN-2014-04351, J. Cheriyan).} \and Marcelo H. de Carvalho\footnote{Supported by {\sc Fundect-MS} and {\sc CNP}q.}} \UKvardate \date{14 January, 2020} \maketitle \thispagestyle{empty} \begin{abstract} A {\it brick} is a $3$-connected graph such that the graph obtained from it by deleting any two distinct vertices has a perfect matching. A brick $G$ is {\it near-bipartite} if it has a pair of edges $\alpha$ and $\beta$ such that $G-\{\alpha,\beta\}$ is bipartite and matching covered; examples are $K_4$ and the triangular prism~$\overline{C_6}$. The significance of near-bipartite bricks arises from the theory of ear decompositions of {\mcg}s. The object of this paper is to establish a generation procedure which is specific to the class of simple near-bipartite bricks. In particular, we prove that every simple near-bipartite brick $G$ has an edge~$e$ such that the graph obtained from $G-e$ by contracting each edge that is incident with a vertex of degree two is also a simple near-bipartite brick, unless $G$ belongs to any of eight well-defined infinite families. This is a refinement of the brick generation theorem of Norine and Thomas \cite{noth07} which is appropriate for the restricted class of near-bipartite bricks. Earlier, the first author proved a similar generation theorem for (not necessarily simple) near-bipartite bricks \cite{koth16,koth19}; we deduce our main result from this theorem. Our proof is based on the strategy of Carvalho, Lucchesi and Murty \cite{clm08} and uses several of their techniques and results. The results presented here also appear in the Ph.D. thesis of the first author \cite{koth16}. \end{abstract} \tableofcontents This paper is a sequel to a recent paper of the first author \cite{koth19}. In the following section, we recall the most important definitions and results from \cite{koth19}, and we explain the contributions of this paper. Please see \cite{koth19} for any missing definitions. The reader, who is interested in gaining a deeper understanding, should perhaps read the first two sections of \cite{koth19}. \section{Brick Generation} \label{sec:brick-generation} A \mcg\ free of nontrivial tight cuts is called a {\it brace} if it is bipartite; otherwise, it is called a {\it brick}. Lov{\'a}sz~\cite{lova87} proved the remarkable result that any two tight cut decompositions of a \mcg~$G$ yield the same list of bricks and braces (except possibly for multiplicities of edges). In particular, any two tight cut decompositions of~$G$ yields the same number of bricks; this number is denoted by $b(G)$. Edmonds, Lov{\'a}sz and Pulleyblank~\cite{elp82} established the following important equivalence. \begin{thm} A graph~$G$ is a brick if and only if it is $3$-connected and bicritical. \end{thm} An edge~$e$ of a \mcg\ is {\it removable} if $G-e$ is also matching covered; furthermore, it is {\it \binv} if $b(G-e)=b(G)$. A \binv\ edge~$e$ of a brick~$G$ is {\it thin} if the retract of $G-e$ is also a brick. (The retract is the graph obtained by contracting each edge that is incident with a vertex of degree two.) Carvalho, Lucchesi and Murty~\cite{clm06} established the following. \begin{thm} {\sc [Thin Edge Theorem]} \label{thm:clm-thin-bricks} Every brick distinct from $K_4$, $\overline{C_6}$ and the Petersen graph has a thin edge. \end{thm} As a consequence, every brick may be constructed from one of $K_4$, $\overline{C_6}$ and the Petersen graph using four elementary ``expansion operations'' that are described in their paper~\cite{clm06}. In order to establish a recursive procedure for generating simple bricks, one needs the notion of a strictly thin edge. A thin edge~$e$ of a simple brick~$G$ is {\it strictly thin} if the retract of~$G-e$ is simple. For each brick shown in Figure~\ref{fig:NT-bricks}, its thin edges are indicated by bold lines; it is easily seen that none of these edges is strictly thin. As an example, consider the Tricorn, shown in Figure~\ref{fig:Tricorn}, which has precisely three thin edges indicated by bold lines; deleting one of them, say~$e$, and taking the retract yields the simple odd wheel~$W_5$. Thus each thin edge of the Tricorn is strictly thin. \begin{figure} \caption{Thin edges of the Tricorn} \label{fig:Tricorn} \end{figure} Next, we describe infinite families of bricks that do not contain any strictly thin edges. Norine and Thomas \cite{noth07} proved that these families, together with the Petersen graph, include all the bricks that are free of strictly thin edges; for this reason, we refer to these families as {\it Norine-Thomas families}, and we refer to their members as {\it Norine-Thomas bricks}. \subsection{Norine-Thomas families} \label{sec:NT-bricks} \noindent {\sc Odd Wheels.} The {\it odd wheel} $W_{2k+1}$, for $k \ge 1$, is defined to be the join of an odd cycle $C_{2k+1}$ and $K_1$. See Figure~\ref{fig:NT-bricks}a. The smallest odd wheel is $K_4$. If $k \ge 2$, then~$W_{2k+1}$ has exactly one vertex of degree $2k+1$, called its {\it hub}, and the edges incident at the hub are called its {\it spokes}. The remaining $2k+1$ vertices lie on a cycle, called the {\it rim}, and they are referred to as {\it rim vertices}. Each member of the remaining four families contains a bipartite matching covered subgraph which is either a `ladder' or a `partial biwheel'. These bipartite graphs are also the main building blocks of additional families of bricks which are of interest in Section~\ref{sec:new-families}. For this reason, we start with a description of these two families of bipartite graphs. \noindent {\sc Ladders.} Let $x_0x_1 \dots x_j$ and $y_0y_1 \dots y_j$ be two vertex-disjoint paths, where $j \geq 2$. The graph~$K$ obtained by the union of these two paths, and by adding edges $x_iy_i$ for $0 \leq i \leq j$, is called a {\it ladder}, and its edges joining $x_i$ and $y_i$ are referred to as its {\it rungs}. See Figure~\ref{fig:biwheels-ladders}. The two rungs $x_0y_0$ and $x_jy_j$ are {\it external}, and the remaining rungs are {\it internal}. We say that $K$ is {\it odd} ({\it even}) if it has an odd (even) number of rungs. \noindent {\sc Partial Biwheels.} Let $x_0x_1 \dots x_{2j+1}$ be an odd path, where $j \geq 1$. The graph~$K$ obtained by adding two new vertices $u$ and $w$, joining $u$ to vertices in $\{x_0, x_2, \dots, x_{2j}\}$, and joining $w$ to vertices in $\{x_1, x_3, \dots, x_{2j+1}\}$, is called a {\it partial biwheel}; the vertices $x_0$~and~$x_{2j+1}$ are referred to as its {\it ends}, whereas $u$~and~$w$ are referred to as its {\it hubs}; and an edge incident with a hub is called a {\it spoke}. See Figure~\ref{fig:biwheels-ladders}. The two spokes $ux_0$~and~$wx_{2j+1}$ are {\it external}, and the remaining spokes are {\it internal}. \begin{figure} \caption{Partial biwheels (top) and Ladders (bottom)} \label{fig:biwheels-ladders} \end{figure} When referring to a ladder or to a partial biwheel, say~$K[A,B]$, with external rungs/spokes $au$ and $bw$, we adopt the convention that $a,w \in A$ and $b, u \in B$; furthermore, when $K$ is a partial biwheel, $u$ and $w$ shall denote its hubs; as shown in Figure~\ref{fig:biwheels-ladders}. (Sometimes, we may also use subscript notation, such as $A_i$, $B_i$, $a_iu_i$ and $b_iw_i$ where $i$ is an integer, and this convention extends naturally.) It should be noted that a partial biwheel of order six is also a ladder. However, a partial biwheel of order eight or more has only two vertices of degree two, namely, its ends; whereas every ladder has four such vertices. We remark that, a {\it biwheel}, as defined by McCuaig \cite{mccu01}, has order at least eight and contains an additional edge joining its ends; and these constitute an important class of braces. We now proceed to describe the remaining four Norine-Thomas families using ladders and partial biwheels. \noindent {\sc Prisms, M{\"o}bius Ladders and Truncated Biwheels.} Let $H[A,B]$ denote either a ladder or a partial biwheel of order~$n$, with external rungs/spokes $au$ and $bw$, and let $G$ be the graph obtained from~$H$ by adding two edges, namely, $aw$ and $bu$. If $H$ is an odd ladder then $G$ is a {\it prism} and it is denoted by~$P_n$, see Figure~\ref{fig:NT-bricks}b. If $H$ is an even ladder then $G$ is a {\it M{\"o}bius ladder} and it is denoted by~$M_n$, see Figure~\ref{fig:NT-bricks}f. Finally, if $H$ is a partial biwheel then $G$ is a {\it truncated biwheel} and it is denoted by~$T_n$, see Figure~\ref{fig:NT-bricks}c. Note that $\overline{C_6}$ is the smallest prism as well as the smallest truncated biwheel. For convenience, we shall consider $K_4$ to be the smallest M{\"o}bius ladder. \begin{figure} \caption{(a) Odd wheel $W_7$, (b) Prism $P_{10} \label{fig:NT-bricks} \end{figure} \noindent {\sc Staircases.} Let $K[A_1,B_1]$ denote a ladder of order $n$, with external rungs $a_1u_1$ and $b_1w_1$. Then the graph~$G$ obtained from~$K$, by adding two new vertices $a_2$ and $b_2$, and by adding five new edges $a_1a_2, u_1a_2, b_1b_2, w_1b_2$ and $a_2b_2$, is called a {\it staircase}, and it is denoted by $St_{n+2}$. See Figures~\ref{fig:NT-bricks}d and \ref{fig:NT-bricks}e. Using this terminology, the theorem of Norine and Thomas \cite{noth07} may be stated as follows. \begin{thm} {\sc [Strictly Thin Edge Theorem]} \label{thm:nt-strictly-thin-bricks} Let $G$ be a simple brick. If $G$ is free of strictly thin edges then $G$ is either the Petersen graph, or it is an odd wheel, a prism, a M{\"o}bius ladder, a truncated biwheel or a staircase. \end{thm} It should be noted that Norine and Thomas did not state their results in terms of strictly thin edges. Subsequently, Carvalho, Lucchesi and Murty \cite{clm08} used their Thin Edge Theorem (\ref{thm:clm-thin-bricks}) to deduce the Strictly Thin Edge Theorem (\ref{thm:nt-strictly-thin-bricks}). The following result of Norine and Thomas \cite{noth07} is an immediate consequence of Theorem~\ref{thm:nt-strictly-thin-bricks}. \begin{thm} \label{thm:nt-simple-brick-reduction} Given any simple brick~$G$, there exists a sequence $G_1, G_2, \dots, G_k$ of simple bricks such that: \begin{enumerate}[(i)] \item $G_1$ is a Norine-Thomas brick, \item $G_k := G$, and \item for $2 \leq i \leq k$, there exists a strictly thin edge~$e_i$ of~$G_i$ such that $G_{i-1}$ is the retract of~$G_i - e_i$. \end{enumerate} \end{thm} The above theorem implies that every simple brick can be generated from one of the Norine-Thomas bricks by means of four expansion operations as described by Carvalho, Lucchesi and Murty (see \cite{clm06}). These expansion operations are simply the inverse of the operation of deleting a strictly thin edge and then taking the retract. We remark that Norine and Thomas proved a generalization of Theorem~\ref{thm:nt-simple-brick-reduction}, which they refer to as the `splitter theorem for bricks', since it is motivated by the splitter theorem for $3$-connected graphs due to Seymour \cite{seym80}. The notions of thin and strictly thin edges are easily generalized to braces (see \cite{clm08}). A `splitter theorem for braces' was established by McCuaig~\cite{mccu01}. \subsection{Near-Bipartite Bricks} A nonbipartite \mcg~$G$ is {\it \nb} if it has a pair $R:=\{\alpha,\beta\}$ of edges such that $H:=G-R$ is matching covered and bipartite. Such a pair $R$ is called a {\it removable doubleton}. Furthermore, if $G$ happens to be a brick, we say that $G$ is a {\it \nb\ brick}. For instance, $K_4$ and $\overline{C_6}$ are \nb\ bricks, and each of them has three distinct removable doubletons. On the other hand, the Petersen graph is not \nb. (A result of Carvalho, Lucchesi and Murty \cite{clm02b} implies that if $G$ is a \nb\ \mcg\ then $b(G)=1$.) Observe that the edge $\alpha$ joins two vertices in one color class of~$H$, and that $\beta$ joins two vertices in the other color class. Consequently, if $M$ is any perfect matching of~$G$ then $\alpha \in M$ if and only~if $\beta \in M$. (In particular, neither $\alpha$ nor $\beta$ is a removable edge of~$G$.) It is easily verified that every Norine-Thomas brick, except for the odd wheels and for the Petersen graph, is near-bipartite. The difficulty in using Theorem~\ref{thm:nt-simple-brick-reduction} as an induction tool for studying \nb\ bricks, is that even if $G_k := G$ is a \nb\ brick, there is no guarantee that all of the intermediate bricks $G_1, G_2, \dots G_{k-1}$ are also \nb. For instance, the brick shown in Figure~\ref{fig:double-biwheel-of-typeI}a is \nb\ with a (unique) removable doubleton~\mbox{$R:=\{\alpha,\beta\}$}. Although the edge~$e$ is strictly thin; the retract of~$G-e$, as shown in Figure~\ref{fig:double-biwheel-of-typeI}b, is not \nb\ since it has three edge-disjoint triangles. \begin{figure} \caption{(a) A \nb\ brick~$G$ with a thin edge~$e$ ; (b) The retract of~$G-e$ is not \nb} \label{fig:double-biwheel-of-typeI} \end{figure} In other words, deleting an arbitrary thin edge may not preserve the property of being \nb. In this sense, the Thin Edge Theorem~(\ref{thm:clm-thin-bricks}) and the Strictly Thin Edge Theorem~(\ref{thm:nt-strictly-thin-bricks}) are inadequate for obtaining inductive proofs of results that pertain only to the class of \nb\ bricks. To fix this problem, the first author started this line of investigation and decided to look for thin edges whose deletion preserves the property of being \nb. Kothari \cite{koth16,koth19} proved a `thin edge theorem' for near-bipartite bricks; in particular, he showed that every near-bipartite brick~$G$ distinct from $K_4$ and $\overline{C_6}$ has a thin edge $e$ such that the retract of $G-e$ is also near-bipartite (see Theorem~\ref{thm:Rthin-nb-bricks}). In the present paper, we use this to deduce a `strictly thin edge theorem' for near-bipartite bricks. This is similar to the approach of Carvalho, Lucchesi and Murty \cite{clm08} --- they use their Thin Edge Theorem (\ref{thm:clm-thin-bricks}) to deduce the Strictly Thin Edge Theorem (\ref{thm:nt-strictly-thin-bricks}) of Norine and Thomas. As in \cite{koth16,koth19}, we find it convenient to fix a removable doubleton~$R$ (of the brick under consideration), and then look for a strictly thin edge whose deletion preserves this removable doubleton. To make this precise, we will first define a special type of removable edge which we call `\Rcomp'. \subsubsection{\Rcomp\ Edges} \label{sec:Rcompatible-edges} We use the abbreviation {\it \Rgraph} for a \nb\ graph~$G$ with (fixed) removable doubleton~$R$, and we shall refer to $H:=G-R$ as its {\it underlying bipartite graph}. In the same spirit, an {\it \Rbrick} is a brick with a removable doubleton~$R$. A removable edge~$e$ of an \Rgraph~$G$ is {\it \Rcomp} if it is removable in~$H$ as well. Equivalently, an edge~$e$ is \Rcomp\ if $G-e$ and $H-e$ are both matching covered. For instance, the graph~$St_8$ shown in Figure~\ref{fig:NT-bricks}d has two removable doubletons \mbox{$R:=\{\alpha,\beta\}$} and $R' := \{\alpha', \beta'\}$, and its unique removable edge~$e$ is \Rcomp\ as well as \comp{R'}. Now, let $G$ denote the \Rbrick\ shown in Figure~\ref{fig:double-biwheel-of-typeI}a, where $R:=\{\alpha,\beta\}$. The thin edge~$e$ is incident with an edge of~$R$ at a cubic vertex; consequently, $H-e$ has a vertex whose degree is only one, and so it is not matching covered. In particular, $e$ is not \Rcomp. The brick shown in Figure~\ref{fig:pseudo-biwheel} has two distinct removable doubletons $R:=\{\alpha,\beta\}$ and $R':=\{\alpha',\beta'\}$. Its edges $e$~and~$f$ are both \comp{R'}, but neither of them is \Rcomp. \begin{figure} \caption{$e$ and $f$ are \comp{R'} \label{fig:pseudo-biwheel} \end{figure} Observe that, if $e$ is an \Rcomp\ edge of an \Rgraph~$G$, then $R$ is a removable doubleton of~$G-e$, whence $G-e$ is also \nb\ and thus $b(G-e)=1$. Consequently, every \Rcomp\ edge is \binv. Furthermore, as shown in \cite{koth16,koth19}, if $e$ is an \Rcomp\ edge of an \Rbrick~$G$ then the unique brick~$J$ of~$G-e$ is also an \Rbrick; in particular, $J$ is \nb. The following is a special case of a theorem of Carvalho, Lucchesi and Murty \cite{clm99}. \begin{thm} {\sc [\Rcomp\ Edge Theorem]} \label{thm:clm-Rcompatible-nb-bricks} Every \Rbrick\ distinct from $K_4$ and $\overline{C_6}$ has an \Rcomp\ edge. \end{thm} In \cite{clm99}, they proved a stronger result. In particular, they showed the existence of an \Rcomp\ edge in {\Rgraph}s with minimum degree at least three. (They did not use the term `\Rcomp'.) Using the notion of \mbox{$R$-compatibility}, we now define a type of thin edge whose deletion preserves the property of being \nb. \subsubsection{\Rthin\ and Strictly \Rthin\ Edges} \label{sec:Rthin-edges} A thin edge~$e$ of an \Rbrick~$G$ is {\it \Rthin} if it is \Rcomp. Equivalently, an edge~$e$ is \Rthin\ if it is \Rcomp\ as well as thin, and in this case, the retract of~$G-e$ is also an \Rbrick. As noted earlier, the graph $St_8$, shown in Figure~\ref{fig:NT-bricks}d, has two removable doubletons $R$~and~$R'$. Its unique removable edge~$e$ is \Rthin\ as well as \thin{R'}. Using the \Rcomp\ Edge Theorem (\ref{thm:clm-Rcompatible-nb-bricks}) of Carvalho, Lucchesi and Murty, the following `thin edge theorem' was proved by Kothari \cite{koth16,koth19}. \begin{thm} {\sc [\Rthin\ Edge Theorem]} \label{thm:Rthin-nb-bricks} Every \Rbrick\ distinct from $K_4$ and $\overline{C_6}$ has an \Rthin\ edge. \end{thm} Each \Rcomp\ edge of an \Rbrick\ may be associated with two integer parameters --- {\it rank} and {\it index} --- as defined in \cite{koth19}. In fact, Kothari proved the following stronger result that immediately implies the \Rthin\ Edge Theorem (\ref{thm:Rthin-nb-bricks}) since the rank and index are bounded quantities. \begin{thm}\label{thm:rank-plus-index} Let $G$ be an \Rbrick\ which is distinct from $K_4$~and~$\overline{C_6}$, and let $e$ denote an \Rcomp\ edge of~$G$. Then one of the following alternatives hold: \begin{itemize} \item either $e$ is \Rthin, \item or there exists another \Rcomp\ edge~$f$ such that: \begin{enumerate}[(i)] \item $f$ has an end each of whose neighbours in~$G-e$ lies in a barrier of~$G-e$, and \item ${\sf rank}(f) + {\sf index}(f) > {\sf rank}(e) + {\sf index}(e)$. \end{enumerate} \end{itemize} \end{thm} An \Rthin\ edge~$e$ of a simple \Rbrick~$G$ is {\it \sRthin} if it is strictly thin. In other words, a \sRthin\ edge~$e$ is one which is \Rcomp\ as well as strictly thin; and in this case, the retract of~$G-e$ is also a simple \Rbrick. For instance, let $G$ denote the \Rbrick\ shown in Figure~\ref{fig:strictly-Rthin}(a), where \mbox{$R:=\{\alpha,\beta\}$}. The retract of~$G-e$ is the truncated biwheel~$T_8$ shown in Figure~\ref{fig:strictly-Rthin}(b); consequently, $e$ is \sRthin. \begin{figure} \caption{Edge $e$ is \sRthin} \label{fig:strictly-Rthin} \end{figure} Recall that the Norine-Thomas bricks are precisely those simple bricks which are free of strictly thin edges. In particular, every $R$-brick, which is a member of the Norine-Thomas families, is free of \sRthin\ edges. A natural question arises as to whether there are any simple {\Rbrick}s, different from the Norine-Thomas bricks, which are also free of \sRthin\ edges. It turns out that there indeed are such bricks; we have already encountered two examples in Figures~\ref{fig:double-biwheel-of-typeI}a and \ref{fig:pseudo-biwheel}, as explained below. Let $G$ denote the \Rbrick, shown in Figure~\ref{fig:double-biwheel-of-typeI}a, where $R:=\{\alpha, \beta\}$ is its unique removable doubleton. It can be checked that $G$ has precisely four strictly thin edges, depicted by bold lines; these are similar under the automorphisms of the graph. As noted earlier, if $e$ is any of these edges, then $e$ is not \Rcomp; furthermore, the retract of~$G-e$ is isomorphic to the graph shown in Figure~\ref{fig:double-biwheel-of-typeI}b, which is not even near-bipartite as it has three edge-disjoint triangles. Thus, the generation of~$G$ using the Norine-Thomas procedure cannot be achieved within the class of \nb\ bricks. Now, let $G$ denote the brick shown in Figure~\ref{fig:pseudo-biwheel}; it has two removable doubletons $R:=\{\alpha,\beta\}$ and $R':=\{\alpha',\beta'\}$. It may be verified that $G$ has precisely two strictly thin edges, namely $e$ and $f$, each of which is \comp{R'} but neither is \Rcomp. In particular, $G$ is free of \sRthin\ edges; in this sense it is similar to the graph in Figure~\ref{fig:double-biwheel-of-typeI}a. On the other hand, $G$ has strictly \thin{R'}\ edges; if $e$ is any such edge then the retract of~$G-e$ is a simple \nb\ brick with removable doubleton~$R'$. In this sense, $G$ is different from the graph in Figure~\ref{fig:double-biwheel-of-typeI}. \subsection{Families of {\Rbrick}s free of Strictly \Rthin\ Edges} \label{sec:new-families} We will introduce seven infinite families of simple {\Rbrick}s which are free of \sRthin\ edges, and are different from the Norine-Thomas families. The members of these will be described using their specific bipartite subgraphs, each of which is either a ladder or a partial biwheel; see Figure~\ref{fig:biwheels-ladders}. The occurrence of these subgraphs may be justified as follows. Let $G$ be a simple \Rbrick\ which is free of \sRthin\ edges. If $e$ is any \Rthin\ edge of~$G$, at least one end of~$e$ is cubic and the retract of~$G-e$ has multiple edges. These strictures can be used to deduce that $G$ contains either a ladder or a partial biwheel, or both, as subgraphs. In our descriptions of these families, we use $\alpha$~and~$\beta$ to denote the edges of the (fixed) removable doubleton~$R$. Apart from~$R$, a member may have at most one removable doubleton which will be denoted as~$R':=\{\alpha',\beta'\}$. We adopt the notational conventions stated in Section~\ref{sec:NT-bricks}. (Recall that a partial biwheel of order six is also a ladder; for this reason, some of our families overlap.) \noindent {\sc Pseudo-Biwheels.} Let $K[A_1,B_1]$ denote a partial biwheel, of order at least eight, and with external spokes $a_1u_1$ and $b_1w_1$. Then the graph~$G$ obtained from~$K$, by adding two new vertices $a_2$ and $b_2$, and by adding five new edges $\alpha:=a_1a_2, \alpha':=u_1a_2, \beta:=b_1b_2, \beta':=w_1b_2$ and $a_2b_2$, is called a {\it pseudo-biwheel}. Figure~\ref{fig:pseudo-biwheel} shows the smallest pseudo-biwheel. It is worth comparing the above with our desription of staircases in Section~\ref{sec:NT-bricks}. Although a pseudo-biwheel $G$ is free of \sRthin\ edges, the two external spokes of~$K$, namely $a_1u_1$ and $b_1w_1$, are both strictly \thin{R'}. In order to describe the members of the remaining six families, we need two (sub)graphs. For \mbox{$i \in \{1,2\}$}, let $K_i[A_i,B_i]$ denote either a ladder or a partial biwheel with external rungs/spokes $a_iu_i$ and $b_iw_i$, such that $K_1$ and $K_2$ are disjoint. \begin{figure} \caption{(a) A double ladder of type~I ; (b) A laddered biwheel of type~I is obtained by identifying $u_1$ with $u_2$ and likewise $w_1$ with $w_2$} \label{fig:typeI-families} \end{figure} \noindent {\sc Double Biwheels, Double Ladders and Laddered Biwheels of Type~I.} Let the graph~$G$ be obtained from $K_1 \cup K_2$, by adding edges $\alpha:=a_1a_2$ and $\beta:=b_1b_2$, by identifying vertices $u_1$~and~$u_2$, and by identifying vertices $w_1$~and~$w_2$. There are three possibilities depending on the graphs $K_1$ and $K_2$. In the case in which $K_1$ and $K_2$ are both partial biwheels, $G$ is a {\it double biwheel of type~I}. Likewise, in the case in which $K_1$ and $K_2$ are both ladders, $G$ is a {\it double ladder of type~I}. Finally, when one of $K_1$ and $K_2$ is a partial biwheel and the other one is a ladder, $G$ is a {\it laddered biwheel of type~I}. A member of any of these families has a unique removable doubleton~$R$, and is free of \sRthin\ edges. The graph in Figure~\ref{fig:double-biwheel-of-typeI}a is the smallest member of each of these families, although its drawing is suggestive of a double biwheel. Figure~\ref{fig:typeI-families}a shows a double ladder. A laddered biwheel is obtained from the graph in Figure~\ref{fig:typeI-families}b by identifying $u_1$~with~$u_2$, and likewise, $w_1$~with~$w_2$. \noindent {\sc Double Biwheels, Double Ladders and Laddered Biwheels of type~II.} Let the graph~$G$ be obtained from $K_1 \cup K_2$, by adding four edges, namely, $\alpha:=a_1a_2$, \mbox{$\beta:=b_1b_2$}, $\alpha':=u_1w_2$ and $\beta':=w_1u_2$. As before, we have three possibilities. In the case in which $K_1$~and~$K_2$ are both partial biwheels of order at least eight, $G$ is a {\it double biwheel of type~II}. Likewise, in the case in which $K_1$~and~$K_2$ are both ladders, $G$ is a {\it double ladder of type~II}. Finally, when one of $K_1$~and~$K_2$ is a partial biwheel of order at least eight, and the other one is a ladder, $G$ is a {\it laddered biwheel of type~II}. A member of any of these families has two removable doubletons $R$ and $R'$, and it is free of \sRthin\ edges. However, a double biwheel or a laddered biwheel as shown in Figure~\ref{fig:typeII-families} has \sthin{R'}\ edges; these are the external spokes of a partial biwheel of order at least eight as depicted by the bold lines in the figure. \begin{figure} \caption{(a) A laddered biwheel of type II ; (b) A double biwheel of type II} \label{fig:typeII-families} \end{figure} \begin{figure} \caption{A double ladder of type II} \label{fig:double-ladder-of-typeII} \end{figure} On the other hand, a double ladder, as shown in Figure~\ref{fig:double-ladder-of-typeII}, is free of \sthin{R'}\ edges as well. This may be explained as follows. Every double ladder is cubic, and it has precisely four strictly thin edges; these are the external rungs of the two ladders, depicted by bold lines in the figure. One end of any such edge, say~$e$, is incident with an edge of~$R$ and the other end is incident with an edge of~$R'$; since each end of~$e$ is cubic, it is neither \Rcomp\ nor \comp{R'}. Using a strengthening (see Theorem~\ref{thm:rank-plus-index}) of the \Rthin\ Edge Theorem~(\ref{thm:Rthin-nb-bricks}), we will prove that the seven families described above and four of the Norine-Thomas families are the only simple {\Rbrick}s which are free of \sRthin\ edges. \begin{thm} {\sc [Strictly \Rthin\ Edge Theorem]} \label{thm:strictly-Rthin-nb-bricks} Let $G$ be a simple \Rbrick. If $G$ is free of \sRthin\ edges then $G$ belongs to one of the following infinite families: \begin{multicols}{2} \begin{enumerate}[(i)] \item Truncated biwheels \item Prisms \item M{\"o}bius ladders \item Staircases \item Pseudo-biwheels \item Double biwheels of type I \item Double ladders of type I \item Laddered biwheels of type I \item Double biwheels of type II \item Double ladders of type II \item Laddered biwheels of type II \end{enumerate} \end{multicols} \end{thm} We present a proof of the above theorem in Section \ref{sec:proof-of-strictly-Rthin-edge-theorem}. As mentioned earlier, our proof is inspired by the proof of the Strictly Thin Edge Theorem (\ref{thm:nt-strictly-thin-bricks}) given by Carvalho et al. \cite{clm08}, and uses several of their results and techniques. We shall denote by $\mathcal{N}$ the union of all of the eleven families which appear in the statement of Theorem~\ref{thm:strictly-Rthin-nb-bricks}. The following is an immediate consequence. \begin{thm} \label{thm:simple-nb-brick-reduction} Given any simple \Rbrick~$G$, there exists a sequence $G_1, G_2, \dots, G_k$ of simple {\Rbrick}s such that: \begin{enumerate}[(i)] \item $G_1 \in \mathcal{N}$, \item $G_k:=G$, and \item for $2 \leq i \leq k$, there exists an \Rthin\ edge~$e_i$ of~$G_i$ such that $G_{i-1}$ is the retract of~$G_i - e_i$. \end{enumerate} \end{thm} In other words, every simple \Rbrick\ can be generated from some member of~$\mathcal{N}$ by repeated application of the expansion operations such that at each step we have a simple \Rbrick. Finally, recall that members of three of the aforementioned families do have \sthin{R'}\ edges, where $R':=\{\alpha',\beta'\}$ in our description of these families; these are pseudo-biwheels, double biwheels of type~II and laddered biwheels of type~II. In view of this, we say that a strictly thin edge~$e$ of a simple \nb\ brick~$G$ is {\it compatible} if it is \Rcomp\ for some removable doubleton~$R$. We thus have the following theorem (with eight infinite families) alluded to in the abstract. \begin{thm} \label{thm:compatible-strictly-thin-nb-bricks} Let $G$ be a simple near-bipartite brick. If $G$ is free of compatible strictly thin edges then $G$ belongs to one of the following infinite families: \begin{multicols}{2} \begin{enumerate}[(i)] \item Truncated biwheels \item Prisms \item M{\"o}bius ladders \item Staircases \item Double biwheels of type I \item Double ladders of type I \item Laddered biwheels of type I \item Double ladders of type II \end{enumerate} \end{multicols} \end{thm} Four of the families in the above theorem are Norine-Thomas families; these are free of strictly thin edges. As we did in Figure~\ref{fig:double-biwheel-of-typeI}, it may be verified that if $G$ is a member of any of the remaining four families and $e$ is any strictly thin edge of~$G$ then the retract~$J$ of~$G-e$ is not near-bipartite. (For example, consider the graph~$G$ and edge~$e$ shown in Figure~\ref{fig:double-ladder-of-typeII}, and let $J$ be the retract of~$G-e$. It can be checked that $J$ has four odd cycles, $C_0, C_1, C_2$ and $C_3$, such that $C_1, C_2$ and $C_3$ are edge-disjoint with $C_0$, and furthermore, there is no single edge which belongs to all three of them.) For the rest of this paper, our goal is to present a complete proof of Theorem~\ref{thm:strictly-Rthin-nb-bricks}. This result and its proof also appear in the Ph.D. thesis of the first author \cite[Chapter 6]{koth16}. \noindent {\bf Organization of this paper:} \noindent In Section~\ref{sec:R-configurations}, we define two special types of subgraphs, namely, an `\Rbiwheel' and an `\Rladder', we state a few theorems related to these configurations without proofs, and we conclude with a proof sketch of Theorem~\ref{thm:strictly-Rthin-nb-bricks}. In Sections~\ref{sec:R-thin} and \ref{sec:properties-of-Rconfigurations}, we prove the technical results and theorems that are stated in Section~\ref{sec:R-configurations}. Finally, in Section~\ref{sec:proof-of-strictly-Rthin-edge-theorem}, we provide a complete proof of Theorem~\ref{thm:strictly-Rthin-nb-bricks}. \section{{\Rconf}s}\label{sec:R-configurations} \label{sec:R-configurations} Recall the definitions of ladders and partial biwheels from Section~\ref{sec:NT-bricks}. In our descriptions of the eleven families that appear in the statement of Theorem~\ref{thm:strictly-Rthin-nb-bricks}, we constructed their members using either one or two disjoint bipartite matching covered graphs, each of which is either a ladder or a partial biwheel, and thereafter, adding a few vertices and/or edges and possibly identifying two pairs of vertices. As we will see, these constructions are indicative of how these graphs appear in our proof of Theorem~\ref{thm:strictly-Rthin-nb-bricks}. In this section, we will define two special types of subgraphs, namely, an `\Rbiwheel' and an `\Rladder'; we will conclude with a proof sketch of Theorem~\ref{thm:strictly-Rthin-nb-bricks}. For the rest of this paper, we adopt the following notational and figure conventions. \begin{Not} \label{Not:Rbrick-doubleton} For a simple \Rbrick~$G$, we shall denote by $H[A,B]$ the underlying bipartite graph $G-R$. We let $\alpha$ and $\beta$ denote the constituent edges of~$R$, and we adopt the convention that $\alpha:=a_1a_2$ has both ends in~$A$, whereas $\beta:=b_1b_2$ has both ends in~$B$. We denote by~$V(R)$ the set~$\{a_1,a_2,b_1,b_2\}$. Furthermore, in all of the figures, the hollow vertices are in~$A$, and the solid vertices are in~$B$. \end{Not} We will also adopt the following notational conventions for a subgraph which is either a ladder or a partial biwheel. \begin{Not} \label{Not:biwheel-ladder-convention} When referring to a subgraph~$K$ of~$H$, such that $K$ is either a ladder or a partial biwheel with \external\ $au$ and $bw$, we adopt the convention that $a,w \in A$ and $b, u \in B$; furthermore, when $K$ is a partial biwheel, $u$~and~$w$ shall denote its hubs; as shown in Figures~\ref{fig:Rbiwheel-configuration}~and~\ref{fig:Rladder-configuration}. (We may also use subscript notation, such as $a_iu_i$ and $b_iw_i$ where $i$ is an integer, and this convention extends naturally.) \end{Not} \subsection{{\Rbiwheel}s} \label{sec:Rbiwheel-configurations} Let $K$ be a subgraph of $H$ such that~$K$ is a partial biwheel with external spokes $au$~and~$bw$; see Figure~\ref{fig:Rbiwheel-configuration}. We say that $K$ is an {\it \Rbiwheel} of~$G$ if it satisfies the following conditions: \begin{enumerate}[{\it (i)}] \item in~$G$, the hubs $u$ and $w$ are both noncubic, and every other vertex of~$K$ is cubic, \item the ends of~$K$, namely $a$~and~$b$, both lie in~$V(R)$, and, \item in~$G$, every internal spoke of~$K$ is an \Rthin\ edge whose index is one. \end{enumerate} \begin{figure} \caption{An \Rbiwheel; in~$G$, the free corners (hubs) $u$ and $w$ are noncubic, and every other vertex is cubic} \label{fig:Rbiwheel-configuration} \end{figure} A pseudo-biwheel, as shown in Figure~\ref{fig:pseudo-biwheel-Rconfiguration}, has two removable doubletons~$R:=\{\alpha,\beta\}$ and $R':=\{\alpha',\beta'\}$. The subgraph~$K$, depicted by solid lines, is an $R$-biwheel configuration. (To see this, note that every internal spoke of~$K$ is an \Rthin\ edge of index one.) However, $K$ is not an $R'$-biwheel configuration because its ends $a$ and $b$ are not incident with edges of~$R'$. \begin{figure} \caption{A pseudo-biwheel has only one $R$-biwheel configuration} \label{fig:pseudo-biwheel-Rconfiguration} \end{figure} \subsection{{\Rladder}s} \label{sec:Rladder-configurations} Let $K$ be a subgraph of~$H$ such that $K$ is a ladder with external rungs $au$ and $bw$; see Figure~\ref{fig:Rladder-configuration}. We say that $K$ is an {\it \Rladder} of~$G$ if it satisfies the following conditions: \begin{enumerate}[{\it (i)}] \item in~$G$, every vertex of~$K$, except possibly for~$u$ and $w$, is cubic, \item the vertices $a$ and $b$ both lie in~$V(R)$, and, \item in~$G$, every internal rung of~$K$ is an \Rthin\ edge whose index is two. \end{enumerate} \begin{figure} \caption{Two {\Rladder} \label{fig:Rladder-configuration} \end{figure} A prism of order~$n$ has $\frac{n}{2}$ removable doubletons. If $R:=\{\alpha,\beta\}$ is a fixed removable doubleton of a prism~$G$ of order ten or more, then the graph~$H=G-R$ is itself an \Rladder, as shown in Figure~\ref{fig:prism-Rconfiguration}. (An analogous statement holds for M{\"o}bius ladders of order eight or more.) \begin{figure} \caption{A prism has only one $R$-ladder configuration} \label{fig:prism-Rconfiguration} \end{figure} \subsection{Corners, Rungs and Spokes} We shall often need the flexibility of referring to a subgraph~$K$ which is either an \Rladder\ or an \Rbiwheel, and in this case, we simply write that $K$ is an {\it \Rconf}. Additionally, we may also state that $K$ has \external\ $au$~and~$bw$ (possibly with subscript notation); in this case, we implicitly adopt the conventions stated in Notation~\ref{Not:biwheel-ladder-convention}, and we refer to $a, u, b$ and $w$ as the {\it corners} of~$K$. Furthermore, as shown in Figures~\ref{fig:Rbiwheel-configuration} and \ref{fig:Rladder-configuration}, we will assume that $a, b \in V(R)$. We refer to $u$ and $w$ as the {\it free corners} of~$K$; these may lie in~$V(R)$ as in Figure~\ref{fig:prism-Rconfiguration}, or they may not lie in~$V(R)$ as in Figure~\ref{fig:pseudo-biwheel-Rconfiguration}. Observe that any vertex of~$K$, which is not a corner, does not lie in~$V(R)$. For any two distinct rungs/spokes of an \Rconf~$K$, say $e$ and $f$, we say that $e$~and~$f$ are {\it consecutive}, or equivalently, that $e$ is {\it consecutive with}~$f$, whenever an end of~$e$ which is not a free corner is adjacent with an end of~$f$ which is also not a free corner. Clearly, each internal rung (spoke) is consecutive with two rungs (spokes); whereas each external rung (spoke) is consecutive with only one rung (spoke) and the latter is internal. Now, let $e$ denote an internal rung (spoke) of~$K$, and let $f$~and~$g$ denote the two rungs (spokes) with which $e$ is consecutive. By definition, $e$ is an \Rthin\ edge of~$G$. Observe that $f$~and~$g$ are multiple edges in the retract of~$G-e$; consequently, $e$ is not strictly thin. \subsection{Two distinct $R$-configurations} \begin{figure} \caption{A laddered biwheel of type~II has two vertex-disjoint {\Rconf} \label{fig:laddered-biwheel-typeII-Rconfigurations} \end{figure} A laddered biwheel of type~II, as shown in Figure~\ref{fig:laddered-biwheel-typeII-Rconfigurations}, has two removable doubletons \mbox{$R:=\{\alpha,\beta\}$} and \mbox{$R':=\{\alpha',\beta'\}$}. Observe that the graph obtained by removing the edge set~$R \cup R'$ has two connected components, of which one is an \Rladder\ with external rungs $a_1u_1$ and $b_1w_1$, and the other is an \Rbiwheel\ with external spokes $a_2u_2$ and $b_2w_2$. In this case, the two $R$-configurations are vertex-disjoint. On the other hand, a double ladder of type~I, as shown in Figure~\ref{fig:double-ladder-typeI-Rconfigurations}, has only one removable doubleton~$R:=\{\alpha,\beta\}$ and it has two {\Rladder}s which share their free corners $u_1$~and~$w_1$, but are otherwise vertex-disjoint. One of these is depicted by dashed lines, and it has external rungs $a_1u_1$~and~$b_1w_1$, whereas the other one has external rungs $a_2u_1$~and~$b_2w_1$. \begin{figure} \caption{A double ladder of type I has two {\Rconf} \label{fig:double-ladder-typeI-Rconfigurations} \end{figure} The reader is advised to check that members of all of the eleven families that appear in Theorem~\ref{thm:strictly-Rthin-nb-bricks}, except for $K_4$ and $\overline{C_6}$, have either one or two $R$-configurations for an appropriately chosen removable doubleton~$R$. (The choice of~$R$ matters only in the case of three families, namely, pseudo-biwheels, double biwheels of Type~II and laddered biwheels of Type~II. Figure~\ref{fig:pseudo-biwheel-Rconfiguration} shows a pseudo-biwheel and its two removable doubletons.) In order to sketch a proof of Theorem~\ref{thm:strictly-Rthin-nb-bricks}, we will require a few results which are stated next; their proofs will appear in later sections. In particular, the following proposition states that two distinct {\Rconf}s are either vertex-disjoint, or they have the same free corners but are otherwise vertex-disjoint; its proof appears in Section~\ref{sec:proof-Rconfigurations-almost-disjoint}. \begin{prop} {\sc [$R$-configurations are Almost Disjoint]} \label{prop:Rconfigurations-almost-disjoint} Let $G$ be a simple \Rbrick, and let $K_1$ denote an \Rconf\ with free corners $u_1$ and $w_1$. If $K_2$ is any \Rconf\ distinct from~$K_1$, then precisely one of the following statements holds: \begin{enumerate}[(i)] \item $K_1$ and $K_2$ are vertex-disjoint, or, \item $u_1$ and $w_1$ are the free corners of~$K_2$, and $K_2$ is otherwise vertex-disjoint with~$K_1$. \end{enumerate} \end{prop} By the above proposition, the only vertices that can be possibly shared between two distinct {\Rconf}s are their respective free corners. The remaining two corners of each $R$-configuration lie in~$V(R)$. Since $|V(R)|=4$, we immediately have the following consequence. \begin{cor} \label{cor:at-most-two-Rconfigurations} A simple $R$-brick has at most two distinct $R$-configurations. \qed \end{cor} For instance, if $G$ is a Norine-Thomas brick or if it is a pseudo-biwheel then it has only one \Rconf. On the other hand, if $G$ is a double biwheel or a double ladder or a laddered biwheel, then it has two {\Rconf}s, say $K_1$~and~$K_2$. Furthermore, if $G$ is of type~II then $K_1$ and $K_2$ are vertex-disjoint as in Proposition~\ref{prop:Rconfigurations-almost-disjoint}{\it (i)}; whereas, if~$G$ is of type~I then $K_1$ and $K_2$ have the same free corners but they do not have any other vertices in common as in Proposition~\ref{prop:Rconfigurations-almost-disjoint}{\it (ii)}. \subsection{The $R$-biwheel and $R$-ladder Theorems} It is easily verified that if~$G$ is any \Rbrick\ in~$\mathcal{N}$, then every \Rthin\ edge of~$G$ lies in an \Rconf. Here, we state two theorems which show that this is not a coincidence. Now, let $G$ be a simple $R$-brick which is free of strictly \Rthin\ edges. Given any \Rthin\ edge~$e$ of~$G$, we may invoke one of these theorems (depending on the index of~$e$) to find an \Rconf~$K$ containing the edge~$e$. In particular, if the index of~$e$ is one, we apply Theorem~\ref{thm:Rbiwheel-configuration} and in this case $K$ is an \Rbiwheel; whereas, if the index of~$e$ is two, we apply Theorem~\ref{thm:Rladder-configuration} and in this case $K$ is an \Rladder. \begin{thm} {\sc [$R$-biwheel Theorem]} \label{thm:Rbiwheel-configuration} Let $G$ be a simple \Rbrick\ which is free of strictly \Rthin\ edges, and let $e$ denote an \Rthin\ edge whose index is one. Then $G$ contains an \Rbiwheel, say~$K$, such that $e$ is an internal spoke of~$K$. \end{thm} The proof of the above theorem appears in Section~\ref{sec:proof-Rbiwheel-theorem}, and it is along the same lines as the proof of \cite[Theorem 4.6]{clm08}. Given the statement of Theorem~\ref{thm:Rbiwheel-configuration}, one would expect that, likewise, if $e$ is an $R$-thin edge whose index is two then $G$ contains an \Rladder, say~$K$, such that $e$ is an internal rung of~$K$. Unfortunately, this is not true, in general. Consider the double ladder of type~I, shown in Figure~\ref{fig:double-ladder-typeI-Rconfigurations}; $e$ is an $R$-thin edge of index two, and although it is part of an \Rladder, it is not a rung of that ladder. We instead prove the following slightly weaker statement concerning \Rthin\ edges of index two. \begin{thm} {\sc [$R$-ladder Theorem]} \label{thm:Rladder-configuration} Let $G$ be a simple \Rbrick\ which is free of strictly \Rthin\ edges, and let $e$ denote an \Rthin\ edge whose index is two. Then $G$ contains an \Rladder, say~$K$, such that $e \in E(K)$. \end{thm} The proof of the above theorem appears in Section~\ref{sec:proof-Rladder-theorem} and it is significantly longer than that of the \mbox{$R$-biwheel} Theorem~(\ref{thm:Rbiwheel-configuration}). These two theorems (\ref{thm:Rbiwheel-configuration} and \ref{thm:Rladder-configuration}) are central to our proof of the Strictly $R$-thin Edge Theorem (\ref{thm:strictly-Rthin-nb-bricks}). \subsection{Proof Sketch of Theorem~\ref{thm:strictly-Rthin-nb-bricks}} \label{sec:proof-sketch} As in the statement of the theorem, let $G$ be a simple \Rbrick\ which is free of strictly \Rthin\ edges. Our goal is to show that $G$ is a member of one of the eleven infinite families which appear in the statement of the theorem, that is, to show that $G \in \mathcal{N}$. We adopt Notation~\ref{Not:Rbrick-doubleton}. We may assume that $G$ is different from $K_4$ and $\overline{C_6}$, and thus, by the $R$-thin Edge Theorem (\ref{thm:Rthin-nb-bricks}), $G$ has an $R$-thin edge, say~$e_1$. Depending on the index of~$e_1$, we invoke either the \mbox{$R$-biwheel} Theorem (\ref{thm:Rbiwheel-configuration}) or the \mbox{$R$-ladder} Theorem (\ref{thm:Rladder-configuration}) to deduce that $G$ has an \Rconf, say~$K_1$, such that $e_1 \in E(K_1)$. We shall let $a_1u_1$ and $b_1w_1$ denote the \external\ of~$K_1$, and adjust notation so that $u_1$ and $w_1$ are its free corners. We will show that either $u_1$ and $w_1$ both lie in~$V(R)$, or otherwise neither of them lies in~$V(R)$. In the former case, we will conclude that $G$ is either a prism or a M{\"o}bius ladder or a truncated biwheel, and we are done. Now suppose that $u_1, w_1 \notin V(R)$. In this case, we will show that either $G$ is a staircase or a pseudo-biwheel, and we are done; or otherwise, $G$ has an $R$-compatible edge which is not in~$E(K_1)$. In the latter case, we will apply Theorem~\ref{thm:rank-plus-index} to deduce that $G$ has an $R$-thin edge, say~$e_2$, which is not in~$E(K_1)$. Depending on the index of~$e_2$, we may once again use either the \mbox{$R$-biwheel} Theorem (\ref{thm:Rbiwheel-configuration}) or the \mbox{$R$-ladder} Theorem (\ref{thm:Rladder-configuration}) to conclude that $G$ has an \Rconf, say~$K_2$, such that $e_2 \in E(K_2)$. By Proposition~\ref{prop:Rconfigurations-almost-disjoint}, either $K_1$ and $K_2$ are vertex-disjoint, or otherwise $K_2$ has the same free corners as~$K_1$ but is otherwise vertex-disjoint with~$K_1$. In the latter case, we will conclude that $G$ is either a double biwheel or a double ladder or a laddered biwheel, each of type~I, and we are done. Now suppose that $K_1$ and $K_2$ are vertex-disjoint. We will argue that either $G$ is a double biwheel or a double ladder or a laddered biwheel, each of type~II, and we are done; or otherwise, $G$ has an $R$-compatible edge which is not in~$E(K_1 \cup K_2)$. In the latter case, we will once again apply Theorem~\ref{thm:rank-plus-index} to conclude that $G$ has an $R$-thin edge, say~$e_3$, which is not in~$E(K_1 \cup K_2)$. As usual, depending on the index of~$e_3$, we invoke either the \mbox{$R$-biwheel} Theorem (\ref{thm:Rbiwheel-configuration}) or the \mbox{$R$-ladder} Theorem (\ref{thm:Rladder-configuration}) to deduce that $G$ has an \Rconf, say~$K_3$, such that $e_3 \in E(K_3)$. We have thus located three distinct {\Rconf}s in the brick~$G$, namely, $K_1, K_2$ and $K_3$. However, this contradicts Corollary~\ref{cor:at-most-two-Rconfigurations}, and completes the proof sketch of the Strictly \Rthin\ Edge Theorem~(\ref{thm:strictly-Rthin-nb-bricks}). \section{$R$-thin edges} \label{sec:R-thin} Here, we will prove the \mbox{$R$-biwheel} Theorem (\ref{thm:Rbiwheel-configuration}) and the \mbox{$R$-ladder} Theorem (\ref{thm:Rladder-configuration}). Our proofs are inspired by the work of Carvalho et al. \cite{clm08}. In the next section, we will review conditions under which an $R$-thin edge is not strictly thin, and we will state a few key lemmas (\ref{lem:index-one-non-removable}, \ref{lem:index-two-non-removable} and \ref{lem:removable-not-thin}) from \cite{clm08} which are used in our proofs. Before that, we state a few preliminary facts; their proofs may be found in \cite[Chapter 4]{koth16}. The removable edges of a bipartite graph satisfy the following `exchange property'. \begin{prop} \label{prop:exchange-property-removable-bipmcg} Let $H$ denote a bipartite matching covered graph, and let $e$ denote a \mbox{removable} edge of~$H$. If $f$ is a removable edge of~$H-e$, then: \begin{enumerate}[(i)] \item $f$ is removable in~$H$, and \item $e$ is removable in~$H-f$. \qed \end{enumerate} \end{prop} A matching covered subgraph $K$ of a matching covered graph $H$ is {\it conformal} if the graph $H-V(K)$ has a perfect matching; equivalently; $K$ is conformal if each perfect matching of $K$ extends to a perfect matching of~$H$. The following is a generalization of Proposition~\ref{prop:exchange-property-removable-bipmcg} that is easily proved using the theory of ear decompositions (see \cite{koth16}). \begin{prop} \label{prop:conformal-exchange-property-removable-bipmcg} Let $K$ be a conformal matching covered subgraph of a bipartite matching covered graph~$H$. Let $e$ denote a removable edge of~$K$. Then $e$ is removable in~$H$ as well. \qed \end{prop} The following is a useful characterization of non-removable edges in bipartite graphs. \begin{prop} \label{prop:characterization-non-removable-bipartite} Let $H[A,B]$ denote a bipartite matching covered graph on four or more vertices. An edge~$e$ of~$H$ is non-removable if and only if there exist partitions $(A_0,A_1)$ of~$A$ and $(B_0,B_1)$ of~$B$ such that $|A_0| = |B_0|$ and $e$ is the only edge joining a vertex in~$B_0$ to a vertex in~$A_1$. \qed \end{prop} This fact yields the following corollary. \begin{cor} \label{cor:quadrilateral-admissible-removable} Suppose that $Q$ is a $4$-cycle of a bipartite matching covered graph~$H$, and let $e$ and $f$ denote two nonadjacent edges of~$Q$. If $f$ is admissible in~$H-e$ then $e$ is removable in~$H$. \qed \end{cor} \subsection{Multiple Edges in Retracts} \label{sec:multiple-edges-retracts} Throughout this section, $G$ is a simple \Rbrick, and we adopt Notation~\ref{Not:Rbrick-doubleton}. Furthermore, we shall let $e$ denote an \Rthin\ edge which is not strictly thin, and $J$ the retract of~$G-e$. Since $e$ is not strictly thin, $J$ is not simple, and we shall let $f$ and $g$ denote two multiple (parallel) edges of~$J$. It should be noted that since $J$ is also an \Rbrick, neither edge of~$R$ is a multiple edge of~$J$. In particular, $f$ and $g$ do not lie in~$R$. We denote the ends of~$e$ by letters $y$~and~$z$ with subscripts~$1$; that is, $e:=y_1z_1$. Adjust notation so that $y_1 \in A$ and $z_1 \in B$. If either end of~$e$ is cubic, then we denote its two neighbours in~$G-e$ by subscripts $0$~and~$2$. For example, if $y_1$ is cubic then $N(y_1) = \{z_1,y_0,y_2\}$. As $G$ is simple, it follows that $J$ has a contraction vertex which is incident with both $f$~and~$g$. We infer that one end of~$e$, say~$y_1$, is cubic, and that $f$ is incident with~$y_0$, and $g$ is incident with~$y_2$. See Figure~\ref{fig:Rthin-not-strictly-thin}. As noted earlier, $f \notin R$; consequently, $e$ and $f$ are nonadjacent. Likewise, $e$ and $g$ are nonadjacent. \begin{figure} \caption{$f$ and $g$ are multiple edges in the retract~$J$ of~$G-e$; the vertex~$y_1$ is cubic} \label{fig:Rthin-not-strictly-thin} \end{figure} We will consider two separate cases depending on whether the edges $f$~and~$g$ are adjacent (in~$G$) or not. In the case in which they are adjacent, we shall denote their common end by~$w$, as shown in Figure~\ref{fig:f-and-g-adjacent-or-not}a. Now suppose that $f$ and $g$ are nonadjacent. Since they are multiple (parallel) edges of~$J$, we infer that both ends of~$e$ are cubic, and that $f$ and $g$ join the two contraction vertices of~$J$. This proves the following proposition; see Figure~\ref{fig:f-and-g-adjacent-or-not}b. \begin{prop} \label{prop:f-g-nonadjacent} Suppose that $f$ and $g$ are nonadjacent in~$G$. Then the following hold: \begin{enumerate}[(i)] \item each end of~$e$ is cubic, \item consequently, the index of~$e$ is two, and \item one of $f$ and $g$ is incident with $z_0$ whereas the other one is incident with~$z_2$. \qed \end{enumerate} \end{prop} In view of statement {\it (iii)}, whenever $f$ and $g$ are nonadjacent, we shall assume without loss of generality that $f:=y_0z_0$ and $g:=y_2z_2$, as shown in Figure~\ref{fig:f-and-g-adjacent-or-not}b. \begin{figure} \caption{(a) when $f$ and $g$ are adjacent; (b) when $f$ and $g$ are nonadjacent} \label{fig:f-and-g-adjacent-or-not} \end{figure} Let us now focus on the case in which $f$ and $g$ are adjacent, as shown in Figure~\ref{fig:f-and-g-adjacent-or-not}a. We remark that, in this case, the index of~$e$ is not determined; that is, its index could be either one or two depending on the degree of its end~$z_1$. Instead, we are able to say something about the degree of~$w$. \begin{prop} \label{prop:f-g-adjacent} Suppose that $f$ and $g$ are adjacent in~$G$, and let $w$ be their common end. Then $w$ has degree four or more. \end{prop} \begin{proof} First suppose that $w$ is not a neighbour of~$z_1$. In this case, $w$ is not affected by the bicontractions in~$G-e$. Consequently, $w$ is a vertex of the brick~$J$, whence it has at least three distinct neighbours. Since $f$ and $g$ are multiple edges, $w$ has degree four or more. Now suppose that $w$ is a neighbour of~$z_1$. Observe that the neighbours of~$y_1$ are precisely $y_0, y_2$ and $z_1$; each of which is adjacent with~$w$. See Figure~\ref{fig:f-and-g-adjacent-or-not}a. Note that, if $w$ is cubic, then its neighbourhood is the same as that of~$y_1$; and in this case, $\{y_0,y_2,z_1\}$ is a barrier of the brick~$G$; this is absurd. Thus $w$ has degree four or more. \end{proof} Note that $f$ and $g$, being multiple edges of~$J$, are both \Rthin\ in~$J$. We shall now examine conditions under which one of them, say~$f$, fails to be \Rthin\ in~$G$. This may be the case for three different reasons; firstly, $f$ is non-removable in the bipartite graph~$H=G-R$; secondly, $f$ is non-removable in~$G$; and thirdly, $f$ is removable in~$G$ but it is not thin. We begin with the situation in which $f$ is non-removable in~$H$. Note that, if an end of~$f$ is cubic (in~$G$) and if it also lies in~$V(R)$, then it has degree two in~$H$, rendering $f$ non-removable. We will now argue that the converse also holds. \begin{lem}\label{lem:non-removable-in-H} The edge $f$ is non-removable in~$H$ if and only if it has a cubic end which lies in~$V(R)$. \end{lem} \begin{proof} Suppose that $f$ has no cubic end which lies in~$V(R)$. Consequently, each end of~$f$ has degree two or more in~$H-f$. Furthermore, since $e$ and $f$ are nonadjacent, each end of~$f$ has degree two or more in~$H-e-f$ as well. We will argue $H-e-f$ is matching covered, that is, $f$ is removable in~$H-e$. The exchange property (Proposition~\ref{prop:exchange-property-removable-bipmcg}) then implies that $f$ is also removable in~$H$. Note that $f$ is a multiple edge of~$J-R$, whence $J-R-f$ is matching covered. Note that any graph obtained from a matching covered graph by means of bi-splitting a vertex is also matching covered; see \cite[Section~1.5.2]{koth16}. We will argue that~$H-e-f$ may be obtained from~$J-R-f$ by means of bi-splitting one or two vertices. Note that $J$ is obtained from $G-e$ by means of bicontracting one or two vertices (of degree two); likewise, $J-R$ may be obtained from $H-e$ by means of bicontractions. Conversely, $H-e$ may be obtained from~$J-R$ by means of bi-splitting one or two vertices; these are the contraction vertices of~$J$. As noted earlier, since each end of~$f$ has degree two or more in~$H-e-f$, we may similarly obtain~$H-e-f$ from~$J-R-f$ by means of bi-splitting the same vertices. As discussed above, $H-e-f$ is matching covered; consequently, $f$ is removable in~$H$. \end{proof} We now turn to the situation in which $f$ is non-removable in~$G$. For convenience, we will state two lemmas (\ref{lem:index-one-non-removable} and \ref{lem:index-two-non-removable}), depending on the index of~$e$. These appear in the work of Carvalho et al. \cite[Lemma~4.2]{clm08} as a single lemma. (In their work, they deal with the more general context in which $e$ is a thin edge of a brick~$G$, which need not be near-bipartite.) The first lemma (\ref{lem:index-one-non-removable}) considers the scenario in which the index of~$e$ is one. By Proposition~\ref{prop:f-g-nonadjacent}{\it (ii)}, $f$ and $g$ are adjacent; and by Proposition~\ref{prop:f-g-adjacent}, their common end~$w$ is non-cubic. \begin{lem} {\rm \cite{clm08}} \label{lem:index-one-non-removable} Suppose that the index of~$e$ is one. If $f$ is non-removable in~$G$ then~$f$ has a cubic end which is adjacent with both ends of~$e$. {\rm (}In particular, the cubic end of~$f$ lies in~$V(R)$.{\rm )} \qed \end{lem} As $w$ is non-cubic, $y_0$ is the cubic end of~$f$, and it is adjacent with~$z_1$, as shown in Figure~\ref{fig:index-one-non-removable}a. Clearly, the edge joining $y_0$ and $z_1$ is none other than $\beta$. \begin{figure} \caption{Illustration for Lemma~\ref{lem:index-one-non-removable} \label{fig:index-one-non-removable} \end{figure} The situation in Lemma~\ref{lem:index-one-non-removable} arises in truncated biwheels, as shown in Figure~\ref{fig:index-one-non-removable}. Note that, every perfect matching which contains $e$ also contains~$f$, rendering $f$ non-removable. The second lemma (\ref{lem:index-two-non-removable}) deals with the scenario in which the index of~$e$ is two, that is, each end of~$e$ is cubic. \begin{lem} \label{lem:index-two-non-removable} {\rm \cite{clm08}} Suppose that the index of~$e$ is two. If $f$ is non-removable in~$G$ then the following hold: \begin{enumerate}[(i)] \item each end of~$f$ is cubic, \item consequently, $f$ and $g$ are nonadjacent, and \item the ends of~$f$ have a common neighbour. \end{enumerate} {\rm (}In particular, one of the ends of~$f$ is cubic and it also lies in~$V(R)$.{\rm )} \qed \end{lem} By statement {\it (i)}, each end of~$f$ is cubic; thus $f$ and $g$ are nonadjacent (see Proposition~\ref{prop:f-g-adjacent}). By Proposition~\ref{prop:f-g-nonadjacent}, and as per our notation, $f=y_0z_0$ and $g=y_2z_2$, as shown in Figure~\ref{fig:index-two-non-removable}a. By statement {\it (iii)}, $y_0$~and~$z_0$ have a common neighbour, say~$x$. Clearly, one of $xy_0$ and $xz_0$ is an edge of~$R$, depending on whether $x$ lies in~$A$ or in~$B$; however, these cases are symmetric. Adjust notation so that $x \in B$; thus $xy_0$ is the edge~$\beta$. Using the fact that $G$ is free of nontrivial barriers, it is easily verified that $x$ is not an end of~$g$. \begin{figure} \caption{Illustration for Lemma~\ref{lem:index-two-non-removable} \label{fig:index-two-non-removable} \end{figure} The situation in Lemma~\ref{lem:index-two-non-removable} is observed in staircases, as shown in Figure~\ref{fig:index-two-non-removable}b. The edge~$f$ is non-removable since every perfect matching which contains~$e$ also contains~$f$. Finally, we turn to the case in which $f$ is removable in~$G$ but it is not thin. This is handled by Lemma~\ref{lem:removable-not-thin} which appears in the work of Carvalho et al. \cite[Lemma~4.3]{clm08}. \begin{lem} \label{lem:removable-not-thin} {\rm \cite{clm08}} If $f$ is removable in~$G$ but it is not thin then the following hold: \begin{enumerate}[(i)] \item the index of~$e$ is two, \item $f$ and $g$ are adjacent and their common end $w$ is not adjacent with any end of~$e$, \item $g$ is a thin edge, and \item $N(y_0) \subseteq N(z_1) \cup \{w\}$; recall that $y_0$ is the other end of~$f$, and $z_1$ is the end of~$e$ not adjacent with~$y_0$. \qed \end{enumerate} \end{lem} The lemma concludes that the index of~$e$ is two; that is, its end~$z_1$ is cubic, and as per our notation, the neighbours of~$z_1$ are precisely $y_1, z_0$ and $z_2$. Furthermore, it concludes that $f$ and $g$ are adjacent and that their common end~$w$ is distinct from each of $z_0$~and~$z_2$, as shown in Figure~\ref{fig:removable-not-thin}a. Another consequence which may be inferred from their proof is that all of the neighbours of~$y_0$ lie in the set~$N(z_1) \cup \{w\} = \{w,y_1,z_0,z_2\}$. (This is not stated explicitly in the statement of \cite[Lemma 4.3]{clm08}.) Since $y_0$ has degree at least three, we may adjust notation so that $y_0$ is adjacent with~$z_0$, and it may or may not be adjacent with~$z_2$. \begin{figure} \caption{Illustration for Lemma~\ref{lem:removable-not-thin} \label{fig:removable-not-thin} \end{figure} The situation in Lemma~\ref{lem:removable-not-thin} is best illustrated by a double ladder of type~I in which at least one of the two {\Rladder}s is of order eight, as shown in Figure~\ref{fig:removable-not-thin}b. The edge $e$ is $R$-thin; deleting it and taking the retract yields the staircase~$St_{10}$ with multiple edges, two of which are $f$ and $g$. It may be verified that both $f$ and $g$ are removable, but of them only $g$ is thin. \subsection{Proof of the $R$-biwheel Theorem} \label{sec:proof-Rbiwheel-theorem} In this section, we prove the $R$-biwheel Theorem (\ref{thm:Rbiwheel-configuration}); our proof is along the same lines as that of \cite[Theorem~4.6]{clm08}. Before that, we need one more lemma pertaining to the structure of \Rthin\ edges of index one (in an \Rbrick\ which is free of \sRthin\ edges). \begin{lem} \label{lem:index-one-Rthin-edge} Let $G$ be a simple $R$-brick which is free of strictly $R$-thin edges, $e$ an \Rthin\ edge whose index is one, and $y_1$ the cubic end of~$e$. Let $y_0$ and $y_2$ denote the neighbours of~$y_1$ in~$G-e$. Then $y_0$ and $y_2$ are both cubic, and they have a common neighbour~$w$ which is non-cubic. Let $f:=wy_0$ and $g:=wy_2$. Furthermore, the following statements hold: \begin{enumerate}[(i)] \item if $f$ is not $R$-compatible then $y_0 \in V(R)$, and \item if $f$ is $R$-compatible then it is $R$-thin and its index is one. \end{enumerate} (Similar statements also apply to~$g$.) \end{lem} \begin{proof} Let $J$ denote the retract of~$G-e$, that is, $J$ is obtained from~$G-e$ by bicontracting the vertex~$y_1$. By hypothesis, $e$ is not strictly thin, whence $J$ has multiple edges. This implies that $G$ has a vertex~$w$, distinct from~$y_1$, that is adjacent to both $y_0$ and $y_2$, as shown in Figure~\ref{fig:f-and-g-adjacent-or-not}a. As in the statement of the lemma, let $f:=wy_0$ and $g:=wy_2$. By Proposition~\ref{prop:f-g-adjacent}, $w$ has degree four or more. First consider the case in which $f$ is not \Rcomp. That is, either $f$ is not removable in~$H$ or it is not removable in~$G$, and it follows from Lemma~\ref{lem:non-removable-in-H} or from Lemma~\ref{lem:index-one-non-removable}, respectively, that the end~$y_0$ of $f$ is cubic and it lies in~$V(R)$. Now consider the case in which $f$ is \Rcomp. Since the index of~$e$ is one, Lemma~\ref{lem:removable-not-thin} implies that $f$ is thin, whence it is \Rthin. By hypothesis, $f$ is not strictly \Rthin. Consequently, the end~$y_0$ of $f$ is cubic, and the index of~$f$ is one. Applying a similar argument to the edge~$g$, we may conclude that $y_2$ is also cubic. \end{proof} \begin{proofOf}{the $R$-biwheel Theorem~(\ref{thm:Rbiwheel-configuration})} As in the statement of the theorem, let $G$ be a simple $R$-brick which is free of strictly \Rthin\ edges, and let $e$ denote an \Rthin\ edge whose index is one. Our goal is to show that $G$ has an \Rbiwheel\ of which $e$ is an internal spoke. As in the statement of Lemma~\ref{lem:index-one-Rthin-edge}, we let $y_1$ denote the cubic end of~$e$, and $y_0$~and~$y_2$ the neighbours of~$y_1$ in~$G-e$. By the lemma, $y_0$~and~$y_2$ are both cubic, and they have a common neighbour~$w$ which is non-cubic. We denote by~$u$ the non-cubic end of~$e$, as shown in Figure~\ref{fig:index-one-Rthin-edge}. Observe that $y_0y_1y_2$ is a path in~$H-\{u,w\}$. \begin{figure} \caption{$e$ is an $R$-thin edge of index one; $y_0, y_1$ and $y_2$ are cubic; $u$ and $w$ are non-cubic} \label{fig:index-one-Rthin-edge} \end{figure} We let $P:=v_1v_2 \dots v_j$, where $j \geq 3$, be a path of maximum length in the graph \mbox{$H-\{u,w\}$} that has the following properties (see Figure~\ref{fig:illustration-for-Rbiwheel-theorem}): \begin{enumerate}[(i)] \item $y_1$ is an internal vertex of~$P$, \item every vertex of~$P$ is cubic in~$G$; furthermore, if it lies in~$A$ then it is adjacent with~$u$, and if it lies in~$B$ then it is adjacent with~$w$, and \item for every internal vertex~$v_i$ of~$P$, the edge that joins $v_i$ to one of $u$ and $w$ is \Rthin\ of index one. \end{enumerate} (Note that the path $y_0y_1y_2$ shown in Figure~\ref{fig:index-one-Rthin-edge} satisfies all of the above properties; thus such a path~$P$ exists.) \begin{figure} \caption{Illustration for the $R$-biwheel Theorem} \label{fig:illustration-for-Rbiwheel-theorem} \end{figure} We adjust notation so that $v_1$ lies in~$B$ as shown in Figure~\ref{fig:illustration-for-Rbiwheel-theorem}. It should be noted that the other end of~$P$, namely $v_j$, may lie in~$A$ or in~$B$, depending on whether $P$ is an odd path or even. We shall let $K$ denote the subgraph of~$H$, which has vertex set $V(P) \cup \{u,w\}$ and edge set $E(P) \cup \{v_iw : 1 \leq i \leq j{\rm ~and~}i {\rm ~odd}\} \cup \{v_iu : 1 \leq i \leq j{\rm ~and~}i {\rm ~even}\}$. Our goal is to show that $K$ is an \Rbiwheel. To this end, we need to establish two additional properties of the path~$P$: first, that it is an odd path; and second, that both its ends $v_1$ and $v_j$ lie in~$V(R)$. We begin by arguing that the two ends of~$P$ are nonadjacent (in~$G$). Suppose not, that is, say~$v_1v_j$ is an edge of~$G$. Since each vertex of~$P$ is cubic, it follows that $V(G) = V(K)$; since otherwise $\{u,w\}$ is a $2$-vertex-cut of~$G$, and we have a contradiction. Since $G$ has an even number of vertices, $P$ is of odd length. Furthermore, either $G$ is the same as~$K$, or otherwise, $G$ has an additional edge joining $u$ and $w$. In both cases, the graph~$G$ is bipartite; this is absurd. Thus $v_1$ and $v_j$ are nonadjacent. Now, let $f$ denote the edge~$v_1w$. We will argue that $f$ is not \Rcomp, and then use this fact to deduce that $v_1 \in V(R)$. Suppose instead that $f$ is \Rcomp. Applying Lemma~\ref{lem:index-one-Rthin-edge}{\it (ii)}, with~$v_2u$ playing the role of~$e$, we conclude that $f$ is \Rthin\ and its index is one. Let $v_0$ denote the neighbour of~$v_1$ which is distinct from~$v_2$ and $w$; note that $v_0 \in A$. By the preceding paragraph, $v_0$ is distinct from~$v_j$, and since each vertex of~$P$ is cubic, $v_0$ is not in~$V(P)$. Applying Lemma~\ref{lem:index-one-Rthin-edge} again, this time with~$f$ playing the role of~$e$, we deduce that $v_0$ is cubic. Furthermore, $v_0$ and $v_2$ have a common neighbour whose degree is four or more; thus $v_0$ is adjacent with~$u$. Observe that the path~$v_0v_1P$ contradicts the maximality of~$P$. We conclude that $f=v_1w$ is not \Rcomp. By Lemma~\ref{lem:index-one-Rthin-edge}{\it (i)}, the cubic end~$v_1$ of~$f$ lies in~$V(R)$. A similar argument shows that $v_j$ lies in~$V(R)$. Since $v_1$ and $v_j$ are nonadjacent, one of them lies in~$A$ and the other one lies in~$B$. (As per our notation, $v_1 \in B$ and $v_j \in A$.) In particular, $P$ is an odd path, and thus $K$ is an \Rbiwheel. Observe that by property (i) of the path~$P$, the end~$y_1$ of $e$ is an internal vertex of~$P$, whence $e$ is an internal spoke of~$K$, as desired. This completes the proof of Theorem~\ref{thm:Rbiwheel-configuration}. \end{proofOf} \subsection{Proof of the $R$-ladder Theorem} \label{sec:proof-Rladder-theorem} Here, we prove the \mbox{$R$-ladder} Theorem (\ref{thm:Rladder-configuration}); its proof is significantly longer than that of the \mbox{$R$-biwheel} Theorem. In its proof, we will need two lemmas (\ref{lem:index-two-Rthin-edge-no-common-neighbour} and \ref{lem:index-two-Rthin-edge-common-neighbour}), each of which pertains to the structure of \Rthin\ edges of index two (in an \Rbrick\ which is free of \sRthin\ edges); these lemmas correspond to two cases that appear in the proof of Theorem~\ref{thm:Rladder-configuration}. \begin{lem} \label{lem:index-two-Rthin-edge-no-common-neighbour} Let $G$ be a simple \Rbrick\ which is free of strictly \Rthin\ edges and $e:=y_1z_1$ an \Rthin\ edge whose index is two. Let $y_0$~and~$y_2$ denote the neighbours of~$y_1$ which are distinct from~$z_1$, and let $z_0$~and~$z_2$ denote the neighbours of~$z_1$ which are distinct from~$y_1$. Suppose that $y_1$ is the only common neighbour of $y_0$~and~$y_2$, and that $z_1$ is the only common neighbour of $z_0$~and~$z_2$. Then there are precisely two (nonadjacent) edges, say $f$~and~$g$, between $\{y_0,y_2\}$ and $\{z_0,z_2\}$. Adjust notation so that $f:=y_0z_0$ and $g:=y_2z_2$. Furthermore, the following statements hold: \begin{enumerate}[(i)] \item if $f$ is not \Rcomp\ then an end of~$f$ is cubic and it lies in~$V(R)$, and \item if $f$ is \Rcomp\ then it is \Rthin\ and its index is two. \end{enumerate} (Similar statements also apply to~$g$.) \end{lem} \begin{proof} Let $J$ denote the retract of~$G-e$, that is, $J$ is obtained from~$G-e$ by bicontracting vertices $y_1$~and~$z_1$. By hypothesis, $e$ is not strictly thin, whence $J$ has multiple edges. Also, as stated in the assumptions, $y_1$ is the only common neighbour of $y_0$~and~$y_2$, and likewise, $z_1$ is the only common neighbour of $z_0$~and~$z_2$. It follows that there are precisely two nonadjacent edges between $\{y_0,y_2\}$ and $\{z_0,z_2\}$, as shown in Figure~\ref{fig:f-and-g-adjacent-or-not}b. As in the statement, adjust notation so that $f:=y_0z_0$ and $g:=y_2z_2$. First consider the case in which $f$ is not \Rcomp. That is, either $f$ is not removable in~$H$ or it is not removable in~$G$, and it follows from Lemma~\ref{lem:non-removable-in-H} or from Lemma~\ref{lem:index-two-non-removable}, respectively, that an end of~$f$ is cubic and it lies in~$V(R)$. Now consider the case in which $f$ is \Rcomp. Since $f$ and $g$ are nonadjacent, Lemma~\ref{lem:removable-not-thin} implies that $f$ is thin, whence it is \Rthin. It remains to argue that the index of~$f$ is two. Suppose to the contrary that an end of~$f$, say~$z_0$, is non-cubic. By hypothesis, $f$ is not strictly \Rthin, whence its other end~$y_0$ is cubic. Using the fact that $y_1$ is the only common neighbour of $y_0$~and~$y_2$, it is easily verified that the retract of~$G-f$ has no multiple edges, that is, $f$ is strictly \Rthin; this contradicts the hypothesis. Thus, each end of~$f$ is cubic, whence the index of~$f$ is two. \end{proof} \begin{lem} \label{lem:index-two-Rthin-edge-common-neighbour} Let $G$ be a simple \Rbrick\ which is free of strictly \Rthin\ edges and $e:=y_1z_1$ an \Rthin\ edge whose index is two. Let $y_0$~and~$y_2$ denote the neighbours of~$y_1$ which are distinct from~$z_1$, and let $z_0$~and~$z_2$ denote the neighbours of~$z_1$ which are distinct from~$y_1$. Suppose that $y_0$~and~$y_2$ have a common neighbour~$w$ which is distinct from $y_1$. Let $f:=y_0w$ and $g:=y_2w$. Then $w$ is non-cubic and is distinct from each of $z_0$~and~$z_2$. Furthermore, $f$~and~$g$ are both removable, $y_0$~and~$y_2$ are both cubic, and the following statements hold: \begin{enumerate}[(i)] \item one of $f$~and~$g$ is \Rcomp; adjust notation so that $f$ is \Rcomp; \item $f$ is not thin, and its cubic end~$y_0$ is adjacent with (exactly) one of $z_0$~and~$z_2$; and, \item $g$ is thin but it is not \Rcomp, and its cubic end~$y_2$ lies in~$V(R)$. \end{enumerate} \end{lem} \begin{proof} Note that $f$~and~$g$ are multiple edges in the retract~$J$ of~$G-e$. Since $f$ and $g$ are adjacent, by Proposition \ref{prop:f-g-adjacent}, their common end~$w$ is non-cubic. Consequently, by Lemma~\ref{lem:index-two-non-removable}, $f$~and~$g$ are both removable. Note that $y_0$~and~$y_2$ are nonadjacent, since otherwise $e$ is non-removable. In particular, at least one of $y_0$~and~$y_2$ does not lie in~$V(R)$. By Lemma~\ref{lem:non-removable-in-H}, at least one of $f$~and~$g$ is \Rcomp. We now argue that $w$ is distinct from each of $z_0$~and~$z_2$. Suppose not, and assume without loss of generality that $w=z_0$. By Lemma~\ref{lem:removable-not-thin}{\it (ii)}, $f$~and~$g$ are both thin; in particular, at least one of them is \Rthin. Adjust notation so that $f$ is \Rthin. By hypothesis, $f$ is not \sRthin, whence the retract of~$G-f$ has multiple edges; consequently, the end~$y_0$ of~$f$ is cubic. Let $v$ denote the neighbour of~$y_0$ which is distinct from $y_1$~and~$z_0$. Furthermore, as $f$ is not \sRthin, we infer that $v$~and~$y_1$ have a common neighbour which is distinct from~$y_0$; by Proposition~\ref{prop:f-g-adjacent}, such a common neighbour is non-cubic. Since $z_1$ is cubic, we infer that $y_2$ is non-cubic. By Lemma~\ref{lem:non-removable-in-H}, $g$ is \Rcomp. As noted earlier, $g$ is thin; whence $g$ is \Rthin. Since each end of~$g$ is non-cubic, $g$ is \sRthin, contrary to the hypothesis. Thus $w$ is distinct from each of $z_0$~and~$z_2$; see Figure~\ref{fig:index-two-Rthin-edge-common-neighbour}. \begin{figure} \caption{Illustration for Lemma~\ref{lem:index-two-Rthin-edge-common-neighbour} \label{fig:index-two-Rthin-edge-common-neighbour} \end{figure} Let us review what we have proved so far. We have shown that $y_0$~and~$y_2$ are not both adjacent with~$z_0$. An analogous argument shows that $y_0$~and~$y_2$ are not both adjacent with~$z_2$. By symmetry, $z_0$~and~$z_2$ are not both adjacent with~$y_0$; likewise, $z_0$~and~$z_2$ are not both adjacent with~$y_2$. In summary, there are at most two edges between $\{y_0,y_2\}$~and~$\{z_0,z_2\}$; and if there are precisely two such edges then they are nonadjacent. Now we argue that $y_0$ and $y_2$ are both cubic. Suppose instead that $y_0$ is non-cubic; then, by Lemma~\ref{lem:non-removable-in-H}, $f$ is \Rcomp. Note that since each end of~$f$ is non-cubic, if $f$ is thin then it is \sRthin, contrary to the hypothesis. So it must be the case that $f$ is not thin. By Lemma~\ref{lem:removable-not-thin}{\it (iv)}, $N(y_0) \subseteq N(z_1) \cup \{w\} = \{z_0,z_2,y_1,w\}$. As $y_0$ is non-cubic, it must be adjacent with each of $z_0$~and~$z_2$; however, this contradicts what we have already established in the preceding paragraph. We conclude that $y_0$~and~$y_2$ are both cubic. As noted earlier, at least one of $f$~and~$g$ is \Rcomp. As in statement {\it (i)} of the lemma, adjust notation so that $f$ is \Rcomp. We will now argue that $f$ is not thin. Suppose instead that $f$ is thin. Let $v$ denote the neighbour of~$y_0$ which is distinct from $y_1$~and~$w$. By hypothesis, $f$ is not \sRthin, whence $v$~and~$y_1$ have a common neighbour which is distinct from~$y_0$; by Proposition~\ref{prop:f-g-adjacent}, such a common neighbour is non-cubic. However, this is not possible as each neighbour of~$y_1$ is cubic. Thus, $f$ is not thin. An analogous argument shows that if $g$ is \Rcomp\ then $g$ is not thin. Since $f$ is removable but it is not thin, by Lemma~\ref{lem:removable-not-thin}{\it (iv)}, $N(y_0) \subseteq N(z_1) \cup \{w\} = \{z_0,z_2,y_1,w\}$. It follows from our previous observation that $y_0$ is adjacent with exactly one of $z_0$~and~$z_2$; adjust notation so that $y_0$ is adjacent with~$z_0$. This proves statement {\it (ii)}. Also, by Lemma~\ref{lem:removable-not-thin}, one of $f$~and~$g$ is thin; as per our notation, $g$ is thin. Consequently, $g$ is not \Rcomp. By Lemma~\ref{lem:non-removable-in-H}, the cubic end~$y_2$ of~$g$ lies in~$V(R)$. This proves statement {\it (iii)}, and we are done. \end{proof} \begin{proofOf}{the $R$-ladder Theorem~(\ref{thm:Rladder-configuration})} As in the statement of the theorem, let $G$ be a simple \Rbrick\ which is free of strictly \Rthin\ edges, and let $e$ denote an \Rthin\ edge whose index is two. We shall let $y_1$~and~$z_1$ denote the ends of~$e$, where $y_1 \in A$ and $z_1 \in B$. Furthermore, we let $y_0$~and~$y_2$ denote the neighbours of~$y_1$ which are distinct from~$z_1$, and likewise, we let $z_0$~and~$z_2$ denote the neighbours of~$z_1$ which are distinct from~$y_1$. Our goal is to show that $G$ has an \Rladder\ which contains the edge~$e$. As mentioned earlier, we will consider two separate cases which correspond to the situations in Lemmas~\ref{lem:index-two-Rthin-edge-no-common-neighbour} and \ref{lem:index-two-Rthin-edge-common-neighbour}, respectively. \noindent \underline{Case 1}: $y_1$ is the only common neighbour of $y_0$~and~$y_2$, and likewise, $z_1$ is the only common neighbour of $z_0$~and~$z_2$. \noindent By Lemma~\ref{lem:index-two-Rthin-edge-no-common-neighbour}, there are precisely two nonadjacent edges between $\{y_0,y_2\}$ and $\{z_0,z_2\}$. Adjust notation so that $y_0z_0$ and $y_2z_2$ are edges of~$G$, as shown in Figure~\ref{fig:no-common-neighbour}. Observe that the graph in the figure is a ladder of which $e$ is an internal rung; furthermore, it is a subgraph of~$H$. \begin{figure} \caption{The situation in Case 1} \label{fig:no-common-neighbour} \end{figure} We let $K$ be a subgraph of~$H$ of maximum order that has the following properties: \begin{enumerate}[(i)] \item $K$ is a ladder and $e$ is an internal rung of~$K$, and \item every internal rung of~$K$ is an \Rthin\ edge whose index is two. \end{enumerate} Note that the subgraph~$K$ is either an odd ladder or an even ladder; see Figure~\ref{fig:illustration-for-Rladder-theorem-Case1}. We shall denote by $au$ and $bw$ the external rungs of~$K$ such that $a, w \in A$ and $b, u \in B$, as shown in the figure. It follows from property (ii) of~$K$ that each of its vertices, except possibly $a, u, b$~and~$w$, is cubic in~$G$. \begin{rem} \label{rem:ladder-of-order-six} Note that, if~$|V(K)|=6$ then $K$ is the same as the subgraph of~$H$ shown in Figure~\ref{fig:no-common-neighbour}; in particular, $\{u,b\}=\{y_0,y_2\}$, and likewise, $\{w,a\}=\{z_0,z_2\}$; consequently, by our hypothesis, $y_1$ is the only common neighbour of $u$~and~$b$, and likewise, $z_1$ is the only common neighbour of $w$~and~$a$. \end{rem} \begin{figure} \caption{Illustration for Case 1 of the $R$-ladder Theorem} \label{fig:illustration-for-Rladder-theorem-Case1} \end{figure} Our goal is to show that $K$ is an \Rladder. To this end, we need to establish that $a$~and~$b$ (or likewise, $u$ and $w$) are both cubic in~$G$ and they lie in~$V(R)$. Now, let $f$ denote the edge~$au$. We will argue that $f$ is not \Rcomp, and then use this fact to deduce that one of the ends of~$f$ is cubic and it lies in~$V(R)$. As shown in Figure~\ref{fig:illustration-for-Rladder-theorem-Case1}, let $s_2$ denote the neighbour of~$u$ in~$K$ which is distinct from~$a$, and likewise, let $t_2$ denote the neighbour of~$a$ in~$K$ which is distinct from~$u$. Suppose instead that $f$ is \Rcomp. By Lemma~\ref{lem:index-two-Rthin-edge-no-common-neighbour}{\it (ii)}, with $s_2t_2$ playing the role of~$e$, we conclude that $f$ is \Rthin\ and its index is two. We shall let $s_0$ denote the neighbour of~$u$ which is distinct from $s_2$~and~$a$, and likewise, let~$t_0$ denote the neighbour of~$a$ which is distinct from $t_2$~and~$u$. Note that $s_0 \in A$ and $t_0 \in B$. It is easily seen that if $s_0$ is the same as~$w$ then $V(K) \cap A$ is a (nontrivial) barrier of~$G$; this is absurd as~$G$ is a brick. Thus $s_0 \neq w$, and likewise, $t_0 \neq b$. It follows that $s_0, t_0 \notin V(K)$. We will use the fact that $f$ is not strictly \Rthin\ to deduce that $s_0$~and~$t_0$ are adjacent; this will help us contradict the maximality of~$K$. First suppose that $s_0$~and~$s_2$ have a common neighbour~$x$ which is distinct from~$u$. By Proposition~\ref{prop:f-g-adjacent}, $x$ is non-cubic. Observe that, if $|V(K)| \geq 8$ then every neighbour of~$s_2$ is cubic; and if $|V(K)| = 6$ then $b$ is the only neighbour of~$s_2$ which is possibly non-cubic. We conclude that $|V(K)| = 6$ and that $x = b$. Now, $s_0$ is a common neighbour of $u$ and $b$; this contradicts the hypothesis (see Remark~\ref{rem:ladder-of-order-six}). We conclude that $u$ is the only common neighbour of $s_0$~and~$s_2$. An analogous argument shows that $a$ is the only common neighbour of $t_0$~and~$t_2$. It follows that $s_0$ and $t_0$ are adjacent, as $f$ is not strictly thin. Now, let $K'$ denote the subgraph of~$H$ obtained from~$K$ by adding the vertices $s_0$~and~$t_0$, and the edges $us_0, s_0t_0$ and $t_0a$; then $K'$ contradicts the maximality of~$K$. We thus conclude that~$f=au$ is not \Rcomp. Consequently, by Lemma~\ref{lem:index-two-Rthin-edge-no-common-neighbour}{\it (i)}, with $s_2t_2$ playing the role of~$e$, at least one of $a$ and $u$ is cubic and it also lies in~$V(R)$. Adjust notation so that $a$ is cubic and it lies in~$V(R)$. An analogous argument shows that at least one of $b$ and $w$ is cubic and it lies in~$V(R)$; we claim that $b$ must satisfy both of these properties. Suppose not; then $w$ is cubic and it lies in~$V(R)$; this means that the edge~$\alpha$ of~$R$ joins the vertices $a$~and~$w$. Observe that $\{b,u\}$ is a $2$-vertex cut of~$G$; this is absurd as $G$ is a brick. We have shown that $a$ and $b$ both are cubic and they lie in~$V(R)$. Thus $K$ is an \Rladder. Observe that, by property (i) of~$K$, the edge~$e$ is an internal rung of~$K$. In particular, $e$ is an edge of~$K$, as desired. \noindent \underline{Case 2}: $y_0$~and~$y_2$ have a common neighbour which is distinct from~$y_1$, or likewise, $z_0$~and~$z_2$ have a common neighbour which is distinct from~$z_1$. \noindent As shown in Figure~\ref{fig:common-neighbour}, assume without loss of generality that $y_0$~and~$y_2$ have a common neighbour, say~$w$, which is distinct from~$y_1$. We let $f:=y_0w$ and $g:=y_2w$. We invoke Lemma~\ref{lem:index-two-Rthin-edge-common-neighbour} to infer the following: $w$ is non-cubic and it is distinct from each of $z_0$~and~$z_2$; whereas $y_0$ and $y_2$ are both cubic; $f$~and~$g$ are both removable edges. Furthermore, adjusting notation as in the lemma, $f$ is \Rcomp\ but it is not thin and its cubic end~$y_0$ is adjacent with one of $z_0$ and $z_2$. Assume without loss of generality that $y_0$ is adjacent with~$z_0$. The edge~$g$ is thin but it is not \Rcomp\ and its cubic end~$y_2$ lies in~$V(R)$. As per our notation, $y_2$ is an end of $\beta$; we shall let $x$ denote the other end of~$\beta$. \begin{figure} \caption{The situation in Case 2} \label{fig:common-neighbour} \end{figure} We will consider two subcases. In the first one, we assume that $z_0$ is cubic and it lies in~$V(R)$; and in the second case, we assume that either $z_0$ is non-cubic or it is not in~$V(R)$. \noindent \underline{Case 2.1}: $z_0$ is cubic and it lies in~$V(R)$. \noindent In this case, we shall denote by $K$ the subgraph whose vertex set is~$\{z_0,z_1,y_0,y_1,w,y_2\}$ and edge set is~$\{e,y_1y_2,g,f,y_0z_0,z_0z_1,y_0y_1\}$. Observe that $K$ is a ladder of order six and it is a subgraph of~$H$; furthermore, two of its corners, namely $y_2$ and $z_0$, are cubic and they both lie in~$V(R)$. To complete the proof in this case, we will show that $K$ is an \Rladder; for this, we only need to prove that the internal rung~$y_0y_1$ is \Rthin\ and its index is two. We begin by showing that $y_0y_1$ is \Rcomp, that is, $y_0y_1$ is removable in~$H$ as well as in~$G$. Here, we will not require the hypothesis that $z_0$ is cubic and it lies in~$V(R)$. \begin{Claim} \label{claim:y0y1-Rcompatible} The edge~$y_0y_1$ is \Rcomp. \end{Claim} \begin{proof} Note that $y_0y_1$ is removable in the subgraph~$K$. We will argue that $K$ is a conformal subgraph of~$H$, and then use Proposition~\ref{prop:conformal-exchange-property-removable-bipmcg} to deduce that $y_0y_1$ is removable in~$H$. Let $M$ be any perfect matching of~$H$ which contains the edge~$z_0z_1$. Since $M$ does not contain $\alpha$ or $\beta$, it is easily verified that~$M \cap E(K)$ is a perfect matching of~$K$, whence $K$ is a conformal subgraph of~$H$; consequently, $y_0y_1$ is removable in~$H$. To conclude that $y_0y_1$ is removable in~$G$, we will show that $G-y_0y_1$ has a perfect matching~$M$ which contains both $\alpha$ and $\beta$. Let $N$ be a perfect matching of~$G-\{z_1,x\}$; such a perfect matching exists as $G$ is a brick; note that $\alpha \in N$ and $\beta \notin N$. Clearly, either $y_1y_2 \in N$ or $g \in N$. If $y_1y_2 \in N$, we let $M:= (N-y_1y_2) + e + \beta$. On the other hand, if $g \in N$ then $y_0y_1 \in N$, and we let $M:=(N-g-y_0y_1) + e + f + \beta$. In either case, $M$ is the desired perfect matching, and this completes the proof. \end{proof} We now proceed to show that $y_0y_1$ is an \Rthin\ edge. To this end, we will use the characterization of \Rthin\ edges in terms of barriers given by \cite[Proposition~2.9]{koth19}. \begin{Claim} \label{claim:y0y1-Rthin} The edge~$y_0y_1$ is \Rthin, and its index is two. \end{Claim} \begin{proof} Observe that, since $y_0$ and $y_1$ are both cubic, $G-y_0y_1$ has two maximal nontrivial barriers; one of them, say~$S_A$, is a subset of~$A$ and it contains $z_0$~and~$w$; the other one, say~$S_B$, is a subset of~$B$ and it contains $z_1$~and~$y_2$. In particular, the index of~$y_0y_1$ is two. We will argue that $S_A = \{z_0,w\}$; our argument does not use the fact that $w$ is non-cubic, and it may be mimicked to show that $S_B = \{z_1,y_2\}$; thereafter, we apply \cite[Proposition~2.9]{koth19} to infer that $y_0y_1$ is \Rthin. Note that $w$ is in the barrier~$S_A$. Now, let $v$ be any vertex in~$A-\{z_0,w\}$. We will show that $(G-y_0y_1) - \{w,v\}$ has a perfect matching~$M$; this would imply that $v$ is not in the barrier~$S_A$. Let $N$ be a perfect matching of~$G-\{w,v\}$; note that $\beta \in N$ and $\alpha \notin N$. If $y_0y_1 \notin N$ then let $M:=N$, and we are done. Now suppose that $y_0y_1 \in N$. By our hypothesis, $z_0$ is cubic and it lies in~$V(R)$; this means that the three edges incident at $z_0$ are $z_0y_0, z_0z_1$ and $\alpha$. Since, $y_0y_1 \in N$ and $\alpha \notin N$ and $v \neq z_0$, we conclude that $z_0z_1 \in N$. Now, $M:= (N-y_0y_1-z_0z_1) + y_0z_0 + e$ is the desired perfect matching. We conclude that $S_A = \{z_0,w\}$. As discussed in the preceding paragraph, this completes the proof. \end{proof} We have shown that the only internal rung of~$K$, namely $y_0y_1$, is an \Rthin\ edge whose index is two. As discussed earlier, $K$ is indeed an \Rladder, and since it contains~$e$, this completes the proof in this case (2.1). \noindent \underline{Case 2.2}: Either $z_0$ is non-cubic or it does not lie in~$V(R)$, possibly both. \noindent As per our notation, $z_0 \in A$; it follows from the hypothesis of this case that $z_0$ has at least one neighbour which lies in~$B-\{z_1,y_0\}$; we shall let $u$ denote such a neighbour of~$z_0$, as shown in Figure~\ref{fig:ladder-of-order-eight}. Observe that $u$ is distinct from~$y_2$; however, it is possible that $u=x$. \begin{figure} \caption{The situation in Case 2.2 (all labelled vertices are pairwise distinct, except possibly $u$ and $x$)} \label{fig:ladder-of-order-eight} \end{figure} In this case, we will prove that $z_0z_1$ is an \Rthin\ edge whose index is two; in particular, $z_0$ is cubic and $z_0 \notin V(R)$. (If not, we will find a \sRthin\ edge contrary to the hypothesis.) Thereafter, we argue that $u$ is adjacent with~$z_2$; this establishes a certain symmetry between $y_0,y_1,y_2,w$ and $z_0,z_1,z_2,u$, respectively; see Figure~\ref{fig:establish-symmetry}. We shall exploit this to deduce that $y_0y_1$ is an \Rthin\ edge (whose index is two), and that $z_2$ is cubic and it lies in~$V(R)$. In the end, we will find an \Rladder\ of order eight whose internal rungs are $y_0y_1$~and~$z_0z_1$. Our first step is to show that $z_0z_1$ is \Rcomp, that is, $z_0z_1$ is removable in~$H$ as well as in~$G$. \begin{Claim} \label{claim:z0z1-Rcompatible} The edge~$z_0z_1$ is \Rcomp. \end{Claim} \begin{proof} Note that $y_0y_1z_1z_0y_0$ is a $4$-cycle containing the edges $y_0y_1$~and~$z_0z_1$. We will show that $y_0y_1$ is admissible in~$H-z_0z_1$, and then invoke Corollary~\ref{cor:quadrilateral-admissible-removable} to deduce that $z_0z_1$ is removable in~$H$. We need to show that $H-z_0z_1$ has a perfect matching~$M$ which contains~$y_0y_1$. Let $N$ be any perfect matching of~$H-\{u,y_1\}$; such a perfect matching exists by \cite[Proposition~2.1]{koth19}. Observe that $g \in N$; consequently, $y_0z_0 \in N$. Now, $M:=(N-y_0z_0) + uz_0 + y_0y_1$ is the desired perfect matching. As discussed above, $z_0z_1$ is removable in~$H$. To conclude that $z_0z_1$ is removable in~$G$, we will show that $G-z_0z_1$ has a perfect matching~$M$ which contains both $\alpha$~and~$\beta$. Let $N$ be any perfect matching of~$G$ which contains $\alpha$ and $\beta$. If $z_0z_1 \notin N$ then let $M:=N$, and we are done. Now suppose that $z_0z_1 \in N$. Observe that $y_0y_1 \in N$; furthermore, $M:= (N-y_0y_1-z_0z_1) + e + y_0z_0$ is the desired perfect matching. This completes the proof. \end{proof} We proceed to prove that $z_0z_1$ is an \Rthin\ edge whose index is two. As we did in Claim~\ref{claim:y0y1-Rthin}, we will use the characterization of \Rthin\ edges given by \cite[Proposition~2.9]{koth19}. However, here we need more general arguments since we do not know the degree of~$z_0$. \begin{Claim} \label{claim:z0z1-Rthin} The edge~$z_0z_1$ is \Rthin, and its index is two. \end{Claim} \begin{proof} Observe that, since $z_1$ is cubic, $G-z_0z_1$ has a maximal nontrivial barrier, say~$S_A$, which is a subset of~$A$ and contains $y_1$~and~$z_2$. We will first prove that $S_A=\{y_1,z_2\}$. Let $v$ be any vertex in~$A-\{y_1,z_2\}$. We will show that $(G-z_0z_1) - \{z_2,v\}$ has a perfect matching~$M$; this would imply that $v$ is not in the barrier~$S_A$. Let $N$ be a perfect matching of~$G-\{z_2,v\}$; note that $\beta \in N$ and $\alpha \notin N$. If $z_0z_1 \notin N$ then let $M:=N$, and we are done. Now suppose that $z_0z_1 \in N$, and observe that $y_0y_1 \in N$; consequently, $M:=(N-z_0z_1-y_0y_1)+e+y_0z_0$ is the desired perfect matching. Thus, $S_A=\{y_1,z_2\}$. Since $z_0z_1$ is \Rcomp, by \cite[Lemma~2.8]{koth19}, either~$S_A$ is the only maximal nontrivial barrier of~$G-z_0z_1$, or $G-z_0z_1$ has another maximal nontrivial barrier, say~$S_B$, which is a subset of~$B$. We now argue that, in the former case, $z_0z_1$ is \sRthin, contrary to the hypothesis. Suppose that $S_A$ is the only maximal nontrivial barrier of~$G-z_0z_1$; in this case, the index of~$z_0z_1$ is one. By \cite[Proposition~2.9]{koth19}, $z_0z_1$ is \Rthin. Also, $z_0$ is non-cubic, since otherwise its two neighbours distinct from~$z_1$ would lie in a barrier. Observe that, since $z_1$ is the only common neighbour of $y_1$~and~$z_2$, the retract of~$G-z_0z_1$ is simple, and thus $z_0z_1$ is \sRthin; this is a contradiction. It follows that $G-z_0z_1$ has a maximal nontrivial barrier, say~$S_B$, which is a subset of~$B$; in particular, the index of~$z_0z_1$ is two. By \cite[Lemma~2.8]{koth19}, $z_0$ is isolated in~$(G-z_0z_1)-S_B$; that is, in~$G-z_0z_1$, every neighbour of~$z_0$ lies in the barrier~$S_B$. In particular, $u, y_0 \in S_B$. We will prove that $S_B=\{u,y_0\}$. Let $v$ be any vertex in~$B-\{u,y_0\}$. We will show that $(G-z_0z_1) - \{u, v\}$ has a perfect matching~$M$; this would imply that $v$ is not in the barrier~$S_B$. Let $N$ be a perfect matching of~$G-\{u,v\}$; note that $\alpha \in N$ and $\beta \notin N$. If $z_0z_1 \notin N$ then let $M:=N$, and we are done. Now suppose that $z_0z_1 \in N$. If $y_0y_1 \in N$ then $M:= (N-z_0z_1-y_0y_1) + e + y_0z_0$ is the desired perfect matching. Now suppose that $y_0y_1 \notin N$; then $f, y_1y_2 \in N$, and $M:=(N-z_0z_1-f-y_1y_2) + y_0z_0 + g + e$ is the desired perfect matching. Thus, as discussed above, $v \notin S_B$; consequently, $S_B = \{u,y_0\}$. In particular, $z_0$ is cubic. Furthermore, by \cite[Proposition~2.9]{koth19}, $z_0z_1$ is \Rthin. \end{proof} We have shown that $z_0z_1$ is an \Rthin\ edge and its index is two; in particular, both its ends are cubic. The three neighbours of $z_0$ are $y_0, z_1$ and $u$; see Figure~\ref{fig:ladder-of-order-eight}. By hypothesis, $z_0z_1$ is not \sRthin; whence the retract of~$G-z_0z_1$ has multiple edges. Observe that $z_1$ is the only common neighbour of $y_1$~and~$z_2$. Consequently, at least one of the following must hold: either $u$~and~$y_0$ have a common neigbour which is distinct from~$z_0$, or $u$ and $z_2$ are adjacent. We shall rule out the former case by arriving at a contradiction. \begin{figure} \caption{When $u$ is adjacent with~$w$} \label{fig:u-adjacent-with-w} \end{figure} Suppose that $u$~and~$y_0$ have a common neighbour which is distinct from~$z_0$; this is true if and only if $u$ is adjacent with~$w$. We now invoke Lemma~\ref{lem:index-two-Rthin-edge-common-neighbour}, with $z_0z_1$ playing the role of~$e$, with $u$ playing the role of~$y_2$, and with $uw$ playing the role of~$g$; see Figure~\ref{fig:u-adjacent-with-w}a, and compare with Figure~\ref{fig:ladder-of-order-eight}. The lemma implies that $u$ is a cubic vertex, and since $f$ is \Rcomp, $uw$ is thin but it is not \Rcomp; furthermore, $u \in V(R)$. In particular, $u$ is an end of~$\beta$ which implies that $u = x$; see Figures~\ref{fig:ladder-of-order-eight} and \ref{fig:u-adjacent-with-w}b. Note that all of the labelled vertices in Figure~\ref{fig:u-adjacent-with-w}b are pairwise distinct; furthermore, each of them except $w$ and possibly~$z_2$, is cubic. Since $z_2$ has at least one neighbour in~$B$ which is distinct from~$z_1$, the graph has more vertices; consequently, $\{w,z_2\}$ is a $2$-vertex cut of~$G$; this is a contradiction. We have shown that $z_0$ is the only common neighbour of $u$~and~$y_0$; as discussed earlier, this implies that $u$~and~$z_2$ are adjacent; see Figure~\ref{fig:establish-symmetry}. Note that $u$ is now a common neighbour of $z_0$ and $z_2$, and it is distinct from~$z_1$; this establishes a symmetry between $y_0,y_1,y_2,w$, and $z_0,z_1,z_2,u$, respectively. We invoke Lemma~\ref{lem:index-two-Rthin-edge-common-neighbour} to conclude that $u$ is non-cubic, whereas $z_2$ is cubic and it lies in~$V(R)$. Using arguments analogous to those in the proofs of Claims~\ref{claim:z0z1-Rcompatible}~and~\ref{claim:z0z1-Rthin}, we conclude that $y_0y_1$ is an \Rthin\ edge (whose index is two). \begin{figure} \caption{Illustration for Case 2.2 of the $R$-ladder Theorem; $u$ is a common neighbour of $z_0$~and~$z_2$ which is distinct from~$z_1$} \label{fig:establish-symmetry} \end{figure} Now, let $K$ denote the subgraph which consists of all of the labelled vertices shown in Figure~\ref{fig:establish-symmetry}, and all of the edges between those vertices which are shown in the figure. Note that $K$ is an \Rladder, and since it contains~$e$, this completes the proof of the \mbox{$R$-ladder} Theorem (\ref{thm:Rladder-configuration}). \end{proofOf} \section{Properties of {\Rconf}s} \label{sec:properties-of-Rconfigurations} In this section, we prove a few results pertaining to {\Rconf}s. These are used in our proof of the Strictly \Rthin\ Edge Theorem (\ref{thm:strictly-Rthin-nb-bricks}), which appears in the next section. We will find the following consequence of \cite[Lemma~2.3]{koth19} useful; its proof may be found in \cite{koth16}. \begin{cor} \label{cor:application-of-LV} Let $G$ be an $R$-brick, and let $H:=G-R$. Then for any vertex~$b$, at most two edges of $\partial_H(b)$ are non-removable in~$H$. \end{cor} For the rest of this section, $G$ is a simple \Rbrick, and we adopt Notation~\ref{Not:Rbrick-doubleton}; furthermore, $K_1$ is an \Rconf\ with \external\ $a_1u_1$~and~$b_1w_1$. As usual, $u_1$~and~$w_1$ are the free corners of~$K_1$; see Figure~\ref{fig:first-Rconfiguration}. \begin{figure} \caption{The \Rconf~$K_1$} \label{fig:first-Rconfiguration} \end{figure} Note that $K_1$ is either a ladder or a partial biwheel. In either case, it is easily verified that the graph obtained from~$K_1$ by adding two edges, one joining $a_1$~and~$b_1$, and another joining $u_1$~and~$w_1$, is a brace. This fact, in conjunction with the characterization of braces provided by \cite[Proposition~4.12]{koth16}, yields the following easy observation. \begin{prop} \label{prop:bracelike-property-of-Rconfigurations} The following statements hold: \begin{enumerate}[(i)] \item for every pair of distinct vertices \mbox{$v_1,v_2 \in A \cap V(K_1)$}, the graph $K_1 - \{b_1,u_1,v_1,v_2\}$ has a perfect matching; and likewise, \item for every pair of distinct vertices \mbox{$v_1,v_2 \in B \cap V(K_1)$}, the graph $K_1 - \{a_1,w_1,v_1,v_2\}$ has a perfect matching. \qed \end{enumerate} \end{prop} In the following lemma, we prove some conformality properties of {\Rconf}s; these are useful in subsequent lemmas to show that a certain edge is \Rcomp. \begin{lem} \label{lem:conformality-of-Rconfigurations} The following statements hold: \begin{enumerate}[(i)] \item $u_1$ lies in~$V(R)$ if and only if $w_1$ lies in $V(R)$, \item $K_1$ is a conformal matching covered subgraph, and \item the subgraph induced by $E(K_1) \cup R$ is conformal. \end{enumerate} \end{lem} \begin{proof} First, we prove {\it (i)}. Suppose instead that $u_1 \in V(R)$ and $w_1 \notin V(R)$; that is, $u_1=b_2$, whereas $w_1$~and~$a_2$ are distinct; see Figure~\ref{fig:only-one-free-corner-in-VR}. For $X:=V(K_1)-w_1$, note that every edge in~$\partial(X)$, except for~$\alpha$, is either incident with~$u_1$ or with~$w_1$. Recall that if $M$ is any perfect matching, then $\alpha \in M$ if and only if $\beta \in M$. Using these facts, it is easy to see that $\partial(X)$ is a tight cut; this is a contradiction. \begin{figure} \caption{$\partial(X)$ is a nontrivial tight cut, where $X:=V(K_1)-w_1$} \label{fig:only-one-free-corner-in-VR} \end{figure} Now, we prove~{\it (ii)}. Since $K_1$ is either a ladder or a partial biwheel, it is matching covered. To show that $K_1$ is conformal, we will display a perfect matching~$M$ of~$G-V(K_1)$. Let $N$ be a perfect matching of~$H$ which contains~$a_1u_1$; observe that $M:=N-E(K_1)$ is the desired perfect matching. Note that, if $u_1,w_1 \in V(R)$, then {\it (iii)} follows immediately from~{\it (ii)}. Now suppose that $u_1,w_1 \notin V(R)$, and let $N$ be a perfect matching of~$G-\{a_2,w_1\}$; note that $\beta \in N$. A simple counting argument shows that $M:=N - E(K_1) - R$ is a perfect matching of~$G-V(K_1)-V(R)$; and this proves {\it (iii)}. \end{proof} In the following two lemmas, apart from other things, we show that under certain circumstances there exists an \Rcomp\ edge which is not in~$K_1$. \begin{lem} \label{lem:find-Rcompatible-edge-at-high-degree-free-corner} Suppose that $u_1,w_1 \notin V(R)$. Then at most one edge of~$\partial(u_1)-E(K_1)$ is not \Rcomp. {\rm (}An analogous statement holds for~$w_1$.{\rm )} \end{lem} \begin{proof} Note that, by Corollary~\ref{cor:application-of-LV}, at most two edges of~$\partial(u_1)$ are non-removable in~$H$; one of these is $a_1u_1$. Consequently, at most one edge of~$\partial(u_1)-E(K_1)$ is non-removable in~$H$. To complete the proof we will show that if $e$ is any removable edge of~$H$ such that $e \in \partial(u_1)-E(K_1)$, then $e$ is removable in~$G$ as well; for this, it suffices to show a perfect matching~$M$ which contains $\alpha$~and~$\beta$ but does not contain~$e$. Let $M_1$ be a perfect matching of~$G-V(K_1) - V(R)$; such a perfect matching exists by Lemma~\ref{lem:conformality-of-Rconfigurations}{\it (iii)}. Let $M_2$ be a perfect matching of~$K_1-\{a_1,b_1\}$; since $K_1$ is bipartite matching covered, such a perfect matching exists by \cite[Proposition~2.1]{koth19}. Now, \mbox{$M:=M_1 \cup M_2 \cup R$} is the desired perfect matching alluded to above, and this completes the proof. \end{proof} \begin{figure} \caption{When $|\partial(u_1)-E(K_1)| = |\partial(w_1)-E(K_1)| = 1$} \label{fig:only-one-edge-at-each-free-corner} \end{figure} \begin{lem} \label{lem:find-Rcompatible-when-free-corners-low-degree} Suppose that $u_1,w_1 \notin V(R)$. If {$|\partial(u_1) - E(K_1)| \leq 1$} and {$|\partial(w_1) - E(K_1)| \leq 1$} then the following statements hold: \begin{enumerate}[(i)] \item $u_1$ and $w_1$ are nonadjacent, \item $\partial(u_1) - E(K_1)$ has exactly one member, say~$\alpha'$, and likewise, $\partial(w_1)-E(K_1)$ has exactly one member, say~$\beta'$, \item $\alpha$~and~$\alpha'$ are adjacent if and only if $\beta$~and~$\beta'$ are adjacent, \item if $\alpha$~and~$\alpha'$ are nonadjacent then at most one edge of~$\partial(v)-\alpha'$ is not \Rcomp, where $v$ denotes the end of~$\alpha'$ which is distinct from~$u_1$; an analogous statement holds for $\beta$~and~$\beta'$. \end{enumerate} \end{lem} \begin{proof} We first verify {\it (i)} and {\it (ii)}. Observe that, if $u_1$~and~$w_1$ are adjacent, or, if the sets \mbox{$\partial(u_1)-E(K_1)$} and \mbox{$\partial(w_1)-E(K_1)$} are both empty, then $\{a_1,b_1\}$ is a $2$-vertex cut of~$G$; this is absurd. This proves {\it (i)}. Note that, if only one of \mbox{$\partial(u_1)-E(K_1)$} and \mbox{$\partial(w_1)-E(K_1)$} is nonempty then~$H$ has a cut-edge; this is a contradiction. This proves {\it (ii)}. As in the statement, let $\alpha'$ denote the only member of~$\partial(u_1)-E(K_1)$; and likewise, let $\beta'$ denote the only member of $\partial(w_1)-E(K_1)$. See Figure~\ref{fig:only-one-edge-at-each-free-corner}. We now show that {\it (iii)} holds. Suppose instead that $\beta$~and~$\beta'$ are adjacent, whereas $\alpha$~and~$\alpha'$ are nonadjacent. In particular, $\beta'$ has ends $w_1$~and~$b_2$. We let \mbox{$T:=B - V(K_1) - b_2$}, and note that $T$ is nonempty. Furthermore, all of the neigbours of~$T$ lie in the set \mbox{$S:=A-V(K_1)$}; consequently, $S$ is a nontrivial barrier of~$G$; this is absurd. We now proceed to prove {\it (iv)}. Suppose that $\alpha$~and~$\alpha'$ are nonadjacent; and as in the statement of the lemma, let $v$ denote the end of~$\alpha'$ which is distinct from~$u_1$. By {\it (iii)}, $\beta$~and~$\beta'$ are also nonadjacent. We will first argue that at most one edge of~$\partial(v)-\alpha'$ is non-removable in~$H$. Observe that $\{\alpha',\beta'\}$ is a $2$-cut of~$H$; thus, neither~$\alpha'$ nor~$\beta'$ is removable in~$H$. By Corollary~\ref{cor:application-of-LV}, at most two edges of~$\partial(v)$ are non-removable in~$H$; one of these is~$\alpha'$. Consequently, at most one edge of $\partial(v)-\alpha'$ is non-removable in~$H$. To complete the proof we will show that if $e$ is any removable edge of~$H$ such that $e \in \partial(v)-\alpha'$, then $e$ is removable in~$G$ as well; for this, it suffices to show a perfect matching~$M$ which contains $\alpha$~and~$\beta$ but does not contain~$e$. Let $M_1$ be any perfect matching of~$G-\{a_2,v\}$; note that $\beta \in M_1$. A simple counting argument shows that $\beta'$ lies in~$M_1$ as well. Now, let $M_2$ be a perfect matching of \mbox{$K_1- \{a_1,u_1,b_1,w_1\}$}; such a perfect matching exists due to Proposition~\ref{prop:bracelike-property-of-Rconfigurations}. Observe that $M:=(M_1 - E(K_1)) \cup M_2 \cup \{\alpha,\alpha'\}$ is the desired perfect matching alluded to above. As discussed, this completes the proof. \end{proof} In the previous two lemmas, we have shown that under certain circumstances there exists an \Rcomp\ edge which is not in~$K_1$. However, in the proof of the Strictly \Rthin\ Edge Theorem (\ref{thm:strictly-Rthin-nb-bricks}), we will be interested in finding an \Rthin\ edge which is not in~$K_1$. To do so, we will choose an \Rcomp\ edge appropriately, and use Theorem~\ref{thm:rank-plus-index}, in conjunction with the following lemma, to argue that the chosen edge is indeed \Rthin. \begin{lem} \label{lem:structure-of-outside-Rcompatible-edge} Suppose that $u_1,w_1 \notin V(R)$. Let $e$ denote an \Rcomp\ edge which does not lie in $E(K_1)$, let $S$ denote a nontrivial barrier of~$G-e$, and $I$ the set of isolated vertices of~$(G-e)-S$. Then the following statements hold: \begin{enumerate}[(i)] \item $S \cap V(K_1)$ contains at most one vertex, and \item $I \cap V(K_1)$ is empty. \end{enumerate} \end{lem} \begin{proof} Since $e$ is \Rcomp, $S$ is a subset of one of the two color classes of~$H$; assume without loss of generality that $S \subset A$. To establish {\it (i)}, we will show that if $v_1$~and~$v_2$ are any two distinct vertices in~$V(K_1) \cap A$, then $(G-e)-\{v_1,v_2\}$ has a perfect matching~$M$. Let $M_1$ be a perfect matching of~$(H-e)-\{v_1,b_2\}$ where $b_2$ is the end of~$\beta$ which is not in~$V(K_1)$; such a perfect matching exists by \cite[Proposition~2.1]{koth19} as $H-e$ is matching covered. A simple counting argument shows that $M \cap \partial(V(K_1))$ contains only one edge, and this edge is incident with the free corner~$u_1$. Let $M_2$ be a perfect matching of~$K_1 - \{b_1,u_1,v_1,v_2\}$; such a perfect matching exists due to Proposition~\ref{prop:bracelike-property-of-Rconfigurations}. Observe that $M:= (M_1 - E(K_1)) + M_2 + \beta$ is the desired perfect matching of~$(G-e)-\{v_1,v_2\}$, and this proves {\it (i)}. We now deduce {\it (ii)} from {\it (i)}. Suppose to the contrary that $I \cap V(K_1)$ is nonempty, and let $x$ denote any of its members. Observe that $x$ is adjacent with at least two vertices in~$V(K_1)$, and each of these must lie in~$S$; this contradicts {\it (i)}, and completes the proof. \end{proof} \subsection{Proof of Proposition~\ref{prop:Rconfigurations-almost-disjoint}} \label{sec:proof-Rconfigurations-almost-disjoint} As in the statement of the proposition, let $G$ be a simple \Rbrick, and let $K_1$ be an \Rconf\ with \external\ $a_1u_1$~and~$b_1w_1$, where $u_1$~and~$w_1$ denote the free corners of~$K_1$; see Figure~\ref{fig:first-Rconfiguration}. Suppose that $G$ has an \Rconf~$K_2$ which is distinct from~$K_1$; that is, $K_1$ and $K_2$ are not identical subgraphs of~$G$. We assume that $K_1$~and~$K_2$ are not vertex-disjoint. Our goal is to deduce that $u_1$~and~$w_1$ are the free corners of~$K_2$, and that $K_2$ is otherwise vertex-disjoint with~$K_1$. We first argue that $u_1, w_1 \notin V(R)$. Note that every vertex of~$K_1$, except possibly $u_1$~and~$w_1$, is cubic in~$G$. Consequently, if $u_1,w_1 \in V(R)$ then $V(G) = V(K_1)$, since otherwise $\{u_1,w_1\}$ is a $2$-vertex cut of~$G$; furthermore, either $G$ is precisely the graph induced by $E(K_1) \cup R$, or otherwise, $G$ has one additional edge joining $u_1$~and~$w_1$; in either case, it is easily seen that $K_1$ is the only subgraph with all the properties of an \Rconf; this contradicts the hypothesis. By Lemma~\ref{lem:conformality-of-Rconfigurations}{\it (i)}, $u_1,w_1 \notin V(R)$. \begin{Claim} \label{claim:not-a-free-corner} Let $z_1$ be any vertex of~$K_1$ which is distinct from $u_1$~and~$w_1$. If $z_1 \in V(K_2)$ then every edge of~$K_1$ which is incident with~$z_1$ lies in~$E(K_2)$. \end{Claim} \begin{proof} Assume that $z_1 \in V(K_2)$. First consider the case in which $z_1 \in \{a_1,b_1\}$. Note that the degree of~$z_1$ in~$H$ is two; consequently, both edges of~$H$ incident with~$z_1$ lie in~$E(K_2)$. Now consider the case in which $z_1 \notin \{a_1,b_1\}$. Note that $z_1$ is cubic. Observe that, for an \Rconf~$K$, any vertex of~$K$, which is not one of its corners, is cubic in~$K$ as well as in~$G$. Thus, it suffices to show that $z_1$ is not a corner of~$K_2$. Suppose instead that $z_1$ is a corner of~$K_2$. As $z_1 \notin V(R)$, it is a free corner. Since $z_1$ is cubic, $K_2$ is an \Rladder. Also, $z_1$ must be adjacent with a corner of~$K_2$ which lies in~$V(R)$; such a corner is either $a_1$~or~$b_1$. Adjust notation so that $z_1$ is adjacent with~$a_1$; thus, both edges of~$H$ incident with~$a_1$ lie in~$E(K_2)$. Note that $a_1z_1$ is an external rung of~$K_2$. Also, since $u_1$ is not a corner of~$K_2$, it is cubic in~$K_2$ and in~$G$. We infer that $K_1$ is also an \Rladder; see Figure~\ref{fig:not-a-free-corner}. \begin{figure} \caption{Illustration for Claim~\ref{claim:not-a-free-corner} \label{fig:not-a-free-corner} \end{figure} Let $y_1$ denote the neighbour of~$u_1$ in~$K_1$ which is distinct from~$a_1$, and let $v$ denote the third neighbour of~$u_1$. Note that $y_1, v \in V(K_2)$. Since $|\partial(u_1)-E(K_1)| =1$, Lemma~\ref{lem:find-Rcompatible-when-free-corners-low-degree}{\it (i)} implies that $v$ is distinct from~$w_1$. Since $K_2$ is a ladder, $a_1z_1$ lies in a $4$-cycle of~$K_2$; this implies that $y_1z_1 \in E(K_2)$. Note that $u_1y_1$ is an internal rung of~$K_2$. Let $y_2$ denote the neighbour of~$y_1$ which is distinct from $u_1$~and~$z_1$. Note that \mbox{$y_2 \in V(K_2)$}. Since $a_1z_1$ and $u_1y_1$ are rungs of~$K_2$, it must be the case that $v$ and $y_2$ are adjacent and the edge joining them is a rung of~$K_2$; however, it is easily seen that $v$ and $y_2$ are nonadjacent. We thus have a contradiction. This completes the proof of Claim~\ref{claim:not-a-free-corner}. \end{proof} We will now use Claim~\ref{claim:not-a-free-corner} to deduce that, since $K_1$~and~$K_2$ are distinct {\Rconf}s, the only vertices of~$K_1$ which may lie in~$K_2$ are its free corners (that is, $u_1$~and~$w_1$). Suppose instead that \mbox{$(V(K_1)- \{u_1,w_1\}) \cap V(K_2)$} is nonempty. Since \mbox{$K_1 - \{u_1,w_1\}$} is connected, Claim~\ref{claim:not-a-free-corner} implies that \mbox{$V(K_1) \subseteq V(K_2)$} and {$E(K_1) \subseteq E(K_2)$}. Furthermore, since \mbox{$|V(K_1) \cap V(K_2)| \geq 6$}, the set {$(V(K_2) - \{u_2,w_2\}) \cap V(K_1)$} is also nonempty, where $u_2$~and~$w_2$ denote the free corners of~$K_2$. By symmetry, \mbox{$V(K_2) \subseteq V(K_1)$} and \mbox{$E(K_2) \subseteq E(K_1)$}. We conclude that $K_1$~and~$K_2$ are identical subgraphs of~$G$; contrary to our hypothesis. \begin{figure} \caption{When $K_1$~and~$K_2$ share only one free corner} \label{fig:K1-K2-share-only-one-free-corner} \end{figure} Thus, each member of~$V(K_1) \cap V(K_2)$ is a free corner of~$K_1$, and by symmetry, it is a free corner of~$K_2$ as well. By our hypothesis, $V(K_1) \cap V(K_2)$ is nonempty; thus, at least one of $u_1$~and~$w_1$ is a free corner of~$K_2$. Adjust notation so that $u_1$ is a free corner of~$K_2$. To complete the proof, we will show that $w_1$ is also a free corner of~$K_2$. Suppose not, that is, say $V(K_1) \cap V(K_2) = \{u_1\}$, and let $w_2$ denote the free corner of~$K_2$ distinct from~$u_1$. Observe that the ends~$a_2$ of $\alpha$ and $b_2$ of $\beta$ both lie in~$V(K_2)$; see Figure~\ref{fig:K1-K2-share-only-one-free-corner}. Furthermore, $|B - V(K_1 \cup K_2)| = |A - V(K_1 \cup K_2)| + 1$. We shall let \mbox{$T:=B -V(K_1 \cup K_2)$}. Since every vertex of~$K_1 \cup K_2$, except possibly $u_1,w_1$~and~$w_2$, is cubic, all neighbours of~$T$ lie in the set $S:= (A - V(K_1 \cup K_2)) \cup \{w_1,w_2\}$. Consequently, $S$ is a nontrivial barrier of~$G$; this is absurd. Thus, $u_1$~and~$w_1$ are the free corners of~$K_2$, and $K_2$ is otherwise vertex-disjoint with~$K_1$. This completes the proof of Proposition~\ref{prop:Rconfigurations-almost-disjoint}. \qed \section{Strictly \Rthin\ Edge Theorem} \label{sec:proof-of-strictly-Rthin-edge-theorem} As in the statement of the theorem (\ref{thm:strictly-Rthin-nb-bricks}), let $G$ be a simple \Rbrick\ which is free of \sRthin\ edges. Our goal is to show that $G$ is a member of one of the eleven infinite families which appear in the statement of the theorem, that is, to show that $G \in \mathcal{N}$. We adopt Notation~\ref{Not:Rbrick-doubleton}. We may assume that $G$ is different from $K_4$~and~$\overline{C_6}$, and thus, by the \Rthin\ Edge Theorem (\ref{thm:Rthin-nb-bricks}), $G$ has an \Rthin\ edge, say~$e_1$. Depending on the index of~$e_1$, we invoke either the \mbox{$R$-biwheel} Theorem~(\ref{thm:Rbiwheel-configuration}) or the \mbox{$R$-ladder} Theorem~(\ref{thm:Rladder-configuration}) to deduce that $G$ has an \Rconf, say~$K_1$, such that $e_1 \in E(K_1)$. We shall let $a_1u_1$~and~$b_1w_1$ denote the \external\ of~$K_1$, and adjust notation so that $u_1$~and~$w_1$ are its free corners. See Notation~\ref{Not:biwheel-ladder-convention} and Figure~\ref{fig:first-Rconfiguration}. Note that $a_1$ is an end of~$\alpha$ and $b_1$ is an end of~$\beta$. By Lemma~\ref{lem:conformality-of-Rconfigurations}, either both free corners $u_1$~and~$w_1$ lie in~$V(R)$, or otherwise, neither of them lies in~$V(R)$; let us first deal with the former case. \begin{Claim} \label{claim:prisms-Mobius-ladders-truncated-biwheels} If $u_1,w_1 \in V(R)$ then $G$ is either a prism, or a M{\"o}bius ladder or a truncated biwheel. \end{Claim} \begin{proof} Suppose that $u_1,w_1 \in V(R)$; that is, $\alpha = a_1w_1$ and $\beta = b_1u_1$. Since every vertex of~$K_1$ is cubic in~$G$, except possibly $u_1$~and~$w_1$, we conclude that $V(G) = V(K_1)$ as otherwise $\{u_1,w_1\}$ is a $2$-vertex cut of~$G$. Furthermore, either $G$ is precisely the graph induced by $E(K_1) \cup R$, or otherwise, $G$ has one additional edge joining $u_1$~and~$w_1$. In the latter case, $u_1w_1$ is a \sRthin\ edge, contrary to the hypothesis. In the former case, observe that: if $K_1$ is an \Rbiwheel, as shown in Figure~\ref{fig:first-Rconfiguration}a, then $G$ is a truncated biwheel; if $K_1$ is an \Rladder\ of odd parity, as shown in Figure~\ref{fig:first-Rconfiguration}b, then $G$ is a prism; and if $K_1$ is an \Rladder\ of even parity, as shown in Figure~\ref{fig:first-Rconfiguration}c, then $G$ is a M{\"o}bius ladder. \end{proof} We may thus assume that neither $u_1$ nor $w_1$ lies in~$V(R)$. Consequently, the end~$a_2$ of~$\alpha$ and the end~$b_2$ of~$\beta$ are both in~$V(G)-V(K_1)$. \begin{Claim} \label{claim:staircases-pseudo-biwheels} Either $G$ is a staircase or a pseudo-biwheel, or otherwise, $G$ has an \Rcomp\ edge which is not in~$E(K_1)$. \end{Claim} \begin{proof} We begin by noting that, if $|\partial(u_1) - E(K_1)| \geq 2$, then by Lemma~\ref{lem:find-Rcompatible-edge-at-high-degree-free-corner}, some edge of~$\partial(u_1)-E(K_1)$ is \Rcomp, and we are done; an analogous argument applies when $|\partial(w_1) - E(K_1)| \geq 2$. Now suppose that $|\partial(u_1)-E(K_1)| \leq 1$ and that $|\partial(w_1)-E(K_1)| \leq 1$. By Lemma~\ref{lem:find-Rcompatible-when-free-corners-low-degree} {\it (i)}~and~{\it (ii)}, $u_1$ and $w_1$ are nonadjacent; furthermore, $\partial(u_1)-E(K_1)$ has a single element, say~$\alpha'$; likewise, $\partial(w_1)-E(K_1)$ has a single element, say~$\beta'$; see Figure~\ref{fig:only-one-edge-at-each-free-corner}. We let $R':=\{\alpha',\beta'\}$. By {\it (iii)} of the same lemma, $\alpha$~and~$\alpha'$ are adjacent if and only if $\beta$~and~$\beta'$ are adjacent. First consider the case in which $\alpha$~and~$\alpha'$ are nonadjacent, and as in the statement of Lemma~\ref{lem:find-Rcompatible-when-free-corners-low-degree}{\it (iv)}, let $v$ denote the end of~$\alpha'$ which is distinct from~$u_1$; note that $v \notin V(K_1)$. By the lemma, $\partial(v)-\alpha'$ contains an \Rcomp\ edge, and we are done. Now suppose that $\alpha$~and~$\alpha'$ are adjacent; whence $\beta$~and~$\beta'$ are also adjacent. Note that $\alpha' = u_1a_2$ and $\beta' = w_1b_2$. Every vertex of~$K_1$, except possibly $u_1$~and~$w_1$, is cubic in~$G$; furthermore, $\partial(u_1)-E(K_1) = \{\alpha'\}$, and likewise, $\partial(w_1)-E(K_1) = \{\beta'\}$. We infer that $V(G) = V(K_1) \cup \{a_2,b_2\}$ as otherwise $\{a_2,b_2\}$ is a $2$-vertex cut of~$G$. Furthermore, since each of $a_2$~and~$b_2$ has degree at least three, there is an edge joining them; and $G$ is precisely the graph induced by~$E(K_1) \cup R \cup R' \cup \{a_2b_2\}$. Observe that if $K_1$ is an \Rbiwheel\ of order at least eight then $G$ is a pseudo-biwheel, and otherwise, $G$ is a staircase. \end{proof} We may thus assume that $G$ has an \Rcomp\ edge which is not in~$E(K_1)$. We will now use Theorem~\ref{thm:rank-plus-index} and Lemma~\ref{lem:structure-of-outside-Rcompatible-edge} to deduce that $G$ has an \Rthin\ edge which is not in~$E(K_1)$. \begin{Claim} \label{claim:second-Rthin-edge} $G$ has an \Rthin\ edge, say~$e_2$, which is not in~$E(K_1)$. \end{Claim} \begin{proof} Among all \Rcomp\ edges which are not in~$E(K_1)$, we choose one, say~$e_2$, such that ${\sf rank}(e_2) + {\sf index}(e_2)$ is maximum; we intend to show that $e_2$ is \Rthin. Suppose not; then, by Theorem~\ref{thm:rank-plus-index}, with~$e_2$ playing the role of~$e$, there exists another \Rcomp\ edge~$f$ such that (i) $f$ has an end each of whose neighbours in~$G-e_2$ lies in a (nontrivial) barrier~$S$ of~$G-e_2$, and (ii) ${\sf rank}(f) + {\sf index}(f) > {\sf rank}(e_2) + {\sf index}(e_2)$. Let $I$ denote the set of isolated vertices of~$(G-e_2)-S$. Condition (i) above implies that $f$ has one end in~$I$ and another end in~$S$. By Lemma~\ref{lem:structure-of-outside-Rcompatible-edge}, with $e_2$ playing the role of~$e$, the set $I \cap V(K_1)$ is empty. Since $f$ has one end in~$I$, we infer that $f$ is not in~$E(K_1)$; this, combined with condition (ii) above, contradicts our choice of~$e_2$. We thus conclude that $e_2$ is \Rthin. \end{proof} Now, depending on the index of~$e_2$, we invoke either the $R$-biwheel Theorem~(\ref{thm:Rbiwheel-configuration}) or the $R$-ladder Theorem~(\ref{thm:Rladder-configuration}) to deduce that $G$ has an \Rconf, say~$K_2$, such that $e_2 \in E(K_2)$. As $e_2$ is not in~$E(K_1)$ but it is in~$E(K_2)$, the {\Rconf}s $K_1$~and~$K_2$ are clearly distinct. By Proposition~\ref{prop:Rconfigurations-almost-disjoint}: either $K_1$~and~$K_2$ are vertex-disjoint; or otherwise, $u_1$~and~$w_1$ are the free corners of~$K_2$, and $K_2$ is otherwise vertex-disjoint with~$K_1$. In either case, the end~$a_2$ of~$\alpha$ and the end~$b_2$ of~$\beta$ are the two corners of~$K_2$ which are distinct from its free corners. Let us first deal with the case in which $K_1$~and~$K_2$ are not vertex-disjoint; Figure~\ref{fig:Rconfigurations-not-disjoint} shows an example in which $K_1$~and~$K_2$ are both {\Rbiwheel}s. \begin{figure} \caption{When the two {\Rconf} \label{fig:Rconfigurations-not-disjoint} \end{figure} The proof of the following claim closely resembles that of Claim~\ref{claim:prisms-Mobius-ladders-truncated-biwheels}. \begin{Claim} \label{claim:type-I-families} If $K_1$~and~$K_2$ are not vertex-disjoint then $G$ is either a double biwheel or a double ladder or a laddered biwheel, each of type~I. \end{Claim} \begin{proof} As noted above, $u_1$~and~$w_1$ are the free corners of~$K_2$, and $K_2$ is otherwise vertex-disjoint with~$K_1$. Consequently, the \external\ of~$K_2$ are $a_2u_1$~and~$b_2w_1$; see Figure~\ref{fig:Rconfigurations-not-disjoint}. Since every vertex of~$K_1 \cup K_2$ is cubic in~$G$, except $u_1$~and~$w_1$, we infer that $V(G) = V(K_1) \cup V(K_2)$, as otherwise $\{u_1,w_1\}$ is a $2$-vertex cut of~$G$. Furthermore, either $G$ is precisely the graph induced by~$E(K_1 \cup K_2) \cup R$, or otherwise, $G$ has one additional edge joining $u_1$~and~$w_1$. In the latter case, $u_1w_1$ is a \sRthin\ edge, contrary to the hypothesis. In the former case, observe that if $K_1$~and~$K_2$ are both {\Rbiwheel}s then $G$ is a double biwheel of type~I; likewise, if $K_1$~and~$K_2$ are both {\Rladder}s then $G$ is a double ladder of type~I; finally, if one of $K_1$~and~$K_2$ is an \Rladder\ and the other one is an \Rbiwheel\ then $G$ is a laddered biwheel of type~I. \end{proof} We may thus assume that $K_1$ and $K_2$ are vertex-disjoint; and we shall let $a_2u_2$ and $b_2w_2$ denote the \external\ of~$K_2$; in particular, $u_2$~and~$w_2$ denote the free corners of~$K_2$. Figure~\ref{fig:Rconfigurations-disjoint} shows an example in which $K_1$ is an \Rladder\ and $K_2$ is an \Rbiwheel. \begin{figure} \caption{When the two {\Rconf} \label{fig:Rconfigurations-disjoint} \end{figure} We now find the remaining three families, or show the existence of an \Rcomp\ edge which is not in~$E(K_1 \cup K_2)$; the proof is similar to that of Claim~\ref{claim:staircases-pseudo-biwheels}. \begin{Claim} \label{claim:type-II-families} Either $G$ is a double biwheel or a double ladder or a laddered biwheel, each of type~II, or otherwise, $G$ has an \Rcomp\ edge which is not in~$E(K_1 \cup K_2)$. \end{Claim} \begin{proof} We begin by noting that, if $|\partial(u_1) - E(K_1)| \geq 2$, then by Lemma~\ref{lem:find-Rcompatible-edge-at-high-degree-free-corner}, some edge of~$\partial(u_1)-E(K_1)$ is \Rcomp, and since $u_1 \notin V(K_2)$, such an edge is not in~$E(K_2)$, and we are done; an analogous argument applies when $|\partial(w_1) - E(K_1)| \geq 2$, or when $|\partial(u_2)-E(K_2)| \geq 2$ or when $|\partial(w_2)-E(K_2)| \geq 2$. Now suppose that, for $i \in \{1,2\}$, $|\partial(u_i) - E(K_i)| \leq 1$ and $|\partial(w_i) - E(K_i)| \leq 1$; by Lemma~\ref{lem:find-Rcompatible-when-free-corners-low-degree} {\it (i)}~and~{\it (ii)}, $u_i$~and~$w_i$ are nonadjacent; furthermore, each of these inequalities holds with equality. Let $\alpha'$ denote the only member of~$\partial(u_1)-E(K_1)$, and let $\beta'$ denote the only member of~$\partial(w_1)-E(K_1)$. First consider the case in which either $w_2$ is not an end of~$\alpha'$ or $u_2$ is not an end of~$\beta'$. Assume without loss of generality that $w_2$ is not an end of~$\alpha'$; thus the end of~$\alpha'$ distinct from~$u_1$, say~$v$, is not in~$V(K_1 \cup K_2)$. By Lemma~\ref{lem:find-Rcompatible-when-free-corners-low-degree}{\it (iv)}, $\partial(v)-\alpha'$ contains an \Rcomp\ edge; such an edge is not in~$E(K_1 \cup K_2)$, and we are done. Now suppose that $w_2$ is an end of~$\alpha'$ and $u_2$ is an end of~$\beta'$. Note that every vertex of~$K_1 \cup K_2$, except possibly $u_1, w_1, u_2$ and $w_2$, is cubic in~$G$; furthermore, $\partial(u_1)-E(K_1) = \partial(w_2)-E(K_2) = \{\alpha'\}$, and likewise, $\partial(w_1)-E(K_1) = \partial(u_2)-E(K_2) = \{\beta'\}$. We conclude that $V(G) = V(K_1 \cup K_2)$ and $E(G) = E(K_1 \cup K_2) \cup R \cup R'$. Observe that: if $K_1$~and~$K_2$ are both {\Rbiwheel}s then $G$ is a double biwheel of type~II; likewise, if $K_1$~and~$K_2$ are both {\Rladder}s then $G$ is a double ladder of type~II; finally, if one of $K_1$~and~$K_2$ is an \Rladder\ and the other one is an \Rbiwheel\ then $G$ is a laddered biwheel of type~II. \end{proof} We may thus assume that $G$ has an \Rcomp\ edge which is not in~$E(K_1 \cup K_2)$. We will now use Theorem~\ref{thm:rank-plus-index} and Lemma~\ref{lem:structure-of-outside-Rcompatible-edge} to deduce that $G$ has an \Rthin\ edge which is not in~$E(K_1 \cup K_2)$. The proof is almost identical to that of Claim~\ref{claim:second-Rthin-edge}, except that now we have to deal with two {\Rconf}s instead of just one. \begin{Claim} \label{claim:third-Rthin-edge} $G$ has an \Rthin\ edge, say~$e_3$, which is not in~$E(K_1 \cup K_2)$. \end{Claim} \begin{proof} Among all \Rcomp\ edges which are not in~$E(K_1 \cup K_2)$, we choose one, say~$e_3$, such that ${\sf rank}(e_3) + {\sf index}(e_3)$ is maximum; we intend to show that $e_3$ is \Rthin. Suppose not; then, by Theorem~\ref{thm:rank-plus-index}, with~$e_3$ playing the role of~$e$, there exists another \Rcomp\ edge~$f$ such that (i) $f$ has an end each of whose neighbours in~$G-e_3$ lies in a (nontrivial) barrier~$S$ of~$G-e_3$, and (ii) ${\sf rank}(f) + {\sf index}(f) > {\sf rank}(e_3) + {\sf index}(e_3)$. Let $I$ denote the set of isolated vertices of~$(G-e_3)-S$. Condition (i) above implies that $f$ has one end in~$I$ and another end in~$S$. By Lemma~\ref{lem:structure-of-outside-Rcompatible-edge}, with $e_3$ playing the role of~$e$, the set $I \cap V(K_1)$ is empty; likewise, the set~$I \cap V(K_2)$ is empty. Since $f$ has one end in~$I$, we infer that $f$ is not in~$E(K_1 \cup K_2)$; this, combined with condition (ii) above, contradicts our choice of~$e_3$. We thus conclude that $e_3$ is \Rthin. \end{proof} Now, depending on the index of~$e_3$, we invoke either the $R$-biwheel Theorem~(\ref{thm:Rbiwheel-configuration}) or the $R$-ladder Theorem~(\ref{thm:Rladder-configuration}) to deduce that $G$ has an \Rconf, say~$K_3$, such that $e_3 \in E(K_3)$. As $e_3$ is not in~$E(K_1 \cup K_2)$ but it is in~$E(K_3)$, the \Rconf~$K_3$ is distinct from each of $K_1$~and~$K_2$. We have thus located three distinct {\Rconf}s in the brick~$G$; namely, $K_1, K_2$~and~$K_3$. However, this contradicts Corollary~\ref{cor:at-most-two-Rconfigurations}, and completes the proof of the Strictly \Rthin\ Edge Theorem (\ref{thm:strictly-Rthin-nb-bricks}). \qed \noindent {\bf Acknowledgments}: This work commenced in April 2015 when the first author was a Ph.D. candidate, and the second author was visiting the University of Waterloo for a fortnight. We are thankful to Joseph Cheriyan who helped facilitate this research visit, and participated in several of our discussions. We are indebted to both Cl{\'a}udio L. Lucchesi and U. S. R. Murty for their constant guidance and support, and for sharing their invaluable insights. \end{document}
\begin{document} \title{Spectra of Wishart Matrices \ with size-dependent entries.} \begin{abstract} We prove the convergence of the empirical spectral measure of Wishart matrices with size-dependent entries and characterize the limiting law by its moments. We apply our result to the cases where the entries are Bernoulli variables with parameter $c/n$ or truncated heavy-tailed random variables. In both cases, when $c$ goes to infinity or when the truncation is small, the limiting spectrum is a perturbation of the Marchenko-Pastur distribution and we compute its leading term. \end{abstract} \noindent {\bf MSC 2010 Classification:} 05C80; 60B20.\\ {\bf Keywords:} Wishart matrices; Marchenko-Pastur distribution; Erdös-Rényi bipartite random graphs; heavy tailed random variables. \section{Introduction} Let $X_n$ be a real random matrix of size $n \times m$ with i.i.d. entries. We define the Wishart matrix $W_n = \frac{1}{n} X_nX_n^T$, where $X_n^T$ is the transpose of $X_n$. The spectral measure of $W_n$ is the random probability law: \[ \mu_{W_n} = \frac{1}{n} \sum\limits_{ \lambda \in \text{Spec}(W_n)} \delta_{\lambda}, \] where $\text{Spec}(W_n)$ is the spectrum of $W_n$ and $\delta_{ \lambda}$ the Dirac at $\lambda$. Since $W_n$ is a positive symmetric matrix, its eigenvalues are nonnegative reals. The work of Marchenko and Pastur \cite{marchenko1967distribution} implies that, when the entries have variance equal to $1$ and finite moments of all order. Then, almost surely, $\mu_{W_n}$ weakly converges to a probability law $\mu_{ \alpha}$ as $n,m \rightarrow + \infty$ and $m/n \rightarrow \alpha >0$. The law $\mu_{ \alpha}$ is given by: \[ \mu_{ \alpha}( \mathrm{d}x) = \frac{\sqrt{(b-x)(x-a)}}{ 2 \pi x} \mathrm{d}x + \mathbf{1}_{ \alpha <1} \left(1- \alpha \right) \delta_0( \mathrm{d}x), \] where $a = ( 1 - \sqrt{\alpha})^2$ and $b = (1 + \sqrt{ \alpha } )^2$. The main issue of this paper is to let the law of the entries of $X_n$ depend on $n$. Informally, our first result (Theorem \ref{Theorem A.S. Convergence}) states that in that case, under some moment conditions, the measures $\mu_{W_n}$ converge weakly to a probability law which is characterized by its moments, for which we provide a formula. This is an analog for Wishart matrices of a result obtained by Zakharevich for Wigner matrices in \cite{zakharevich2006generalization}. The method here is based on a proof of the convergence of all the moments of the spectral measures $\mu_{W_n}$. The $k$-th limiting moment will write: \[ \sum\limits_{a=1}^{k} \sum\limits_{l=1}^{a} \alpha^l \sum\limits_{ \substack{ \mathbf{b}=(b_1, \ldots, b_a) \\ \substack{ b_1 \geq b_2 \geq \ldots \geq b_a \geq 2 \\ b_1 + b_2 + \cdots + b_a = 2k } } } | \mathcal{W}_k(a,a+1,l,\mathbf{b})| \prod\limits_{i=1}^{a} A_{b_i}. \] The set $\mathcal{W}_k(a,a+1,l,\mathbf{b})$ is a combinatorial object linked with closed words on planar rooted trees and encodes the combinatorics of moments. We give a precise definition in Section \ref{Section Zakharevich}. Interestingly, the $A_i$'s coefficients, given by the formula \eqref{Assumption Limiting Moment}, are the only reminders of the laws of the entries of the matrices. The convergence of the spectral measure was already proved in \cite{benaych2012marchenko} by Benaych-Georges and Cabanal-Duvillard, using different arguments. See also Male in \cite{male2017limiting} for related work. However, the main advantage of our approach is the explicit formula we obtain for the moments, which is more amenable to analysis, as we will see in Sections \ref{Section Zakharevich} and \ref{Section the Bernoulli case}. In \cite{vengerovsky2014eigenvalue}, Vengerovsky treated the particular case of diluted matrices $X_n(i,j) = a(i,j) d_n(i,j)$ where the $a(i,j)$'s are i.i.d. centered random variables and the $d_n(i,j)$'s are i.d.d. with Bernoulli law of parameter $c/n$. He derived a formula for the limiting moments, in terms of combinatorial quantities that admit a recursive formula. In the second part of the paper, we will focus on this particular case and let the entries of $X_n$ be i.i.d. Bernoulli laws with parameter $c/n$. In this setting, the Wishart matrices can be easily linked with the adjacency matrix of a bipartite random graph which admits a limit for the local weak topology. This convergence can be used to prove the convergence of the resolvent of the bipartite graph and therefore of $\mu_{W_n}$ itself, as explained in \cite{bordenave2010resolvent} by Bordenave and Lelarge. This is the content of Theorem \ref{Theorem Bernoulli case}. The limiting spectral measure $\mu_{ \alpha,c}$ depends only on $\alpha$ and $c$ and converges to the law $\mu_{\alpha}$ as $c \rightarrow \infty$. In Theorem \ref{Theorem Asympt Dvpt Bernoulli case}, we describe how $\mu_{ \alpha,c}$ differs from its limit $\mu_{ \alpha}$ by giving an asymptotic expansion in $1/c$ of its moments. More precisely, we will obtain that, in the sense of moments convergence: \[ c \big( \mu_{ \alpha ,c} - \mu_{ \alpha } \big) \underset{ c \rightarrow + \infty}{ \longrightarrow } \mu_{ \alpha}^{(1)}, \] where $\mu_{ \alpha}^{(1)}$ is a signed measure of total mass zero, see Theorem \ref{Theorem Asympt Dvpt Bernoulli case}. The proof, based on a more careful analysis of the moment formula obtained in \ref{Theorem A.S. Convergence}, is inspired by the computations made in \cite{enriquez2015spectra} by Enriquez and Ménard. A natural extension would be to prove that the convergence holds in the sense of weak convergence, but it should involved new techniques since the moments of a signed measure of total mass zero do not characterized it. See Figure \ref{fig6} for numerical simulations. \begin{figure}\label{fig6} \end{figure} In the last part of this paper, we apply our results to heavy tailed random matrices. In that case, the entries of $X_n$ do not have finite moments of all order so that our main result does not apply. Instead, we truncate the entries at a constant $B>0$ times the largest $n$-th quantile of the corresponding law. By Theorem \ref{Theorem A.S. Convergence}, the spectral measures associated to the truncated random matrices converges to a deterministic probability law. The moments of this limiting law admit an asymptotic expansion involving the measures $\mu_{ \alpha}$ and $\mu_{ \alpha}^{(1)}$, as $B \rightarrow 0$. See Theorem \ref{Theorem Heavy Tailed}. \section{A generalized Marchenko-Pastur theorem}\label{Section Zakharevich} Let $ \mathcal{P} = \{P_n \}_{ n \geq 1}$ be a family of probability laws on $\R$ which have zero mean. For all $n \geq 1$, let $X_n = (X_n(i,j))_{1 \leq i,j \leq n}$ be a random $n \times m$ matrix with i.i.d. entries with law $P_n$. We will make the hypothesis that the ratio $m/n$ converges to a real $\alpha >0$ and that for all $k \geq 1$, the following limit exists and is finite: \begin{equation} A_k := \lim\limits_{n \rightarrow + \infty} \frac{M_k(P_n)}{n^{k/2-1} M_2(P_n)^{k/2}}, \label{Assumption Limiting Moment} \end{equation} where $M_k(P_n)$ is the $k$-th moment of $P_n$. Denote by $\mathcal{A}$ the sequence formed by the $A_k$'s. We are interested in the behavior of the spectral measures of the sequence of random matrices \begin{equation*} W_n := \frac{1}{n M_2(P_n)} X_n X_n^T. \end{equation*} In order to properly state our first result, we need to introduce the notion of word on a labeled graph. A labeled graph is a graph $\G = (\V, \mathrm{E})$ together with a labeling of the vertices, that is a one-to-one application from $\V$ to $\{1, \ldots, |\V| \}$. A relabeling of a labeled graph is a new choice of bijection between $\V$ and $\{1, \ldots, |\V|\}$. Note that there are $|\V|!$ choices of labelings for a given graph $\G = (\V, \mathrm{E})$. A word of length $k \geq 1$ on a labeled graph $\G$ is a sequence of labels $i_1, i_2, \ldots, i_k$ such that $\{i_j, i_{j+1}\}$ is a pair of adjacent labels (that is the associated vertices are neighbours in $\G$) for all $1 \leq j \leq k-1$. A word of length $k$ is said to be closed if $i_1 = i_k$. Let $\ii = i_1, \ldots, i_k$ and $\ii' = i_1', \ldots, i_k'$ be two words of length $k$ on two labeled graphs $\G$ and $\G'$ having the same number of vertices. Then, $\ii$ and $\ii'$ are said to be equivalent if there exists a bijection $\sigma$ of $\{1, \ldots, |\V|\}$ such that $\sigma(i_j) = i_j'$ for all $1 \leq j \leq k$. In words, $\ii$ and $\ii'$ are equivalents if there exists a relabeling of a $\G$ such that the word associated to $\ii$ is exactly $\ii'$. One can check that this defines an equivalence relation on words on labeled graphs. Recall that a planar rooted tree is a connected graph without loop embedded in the plane, with a distinguished vertex called the root. A vertex at odd (resp. even) distance from the root will be called an odd (resp. even) vertex. An edge with an odd (resp. even) origin vertex will be called an odd (resp. even) edge. \begin{theorem}\label{Theorem A.S. Convergence} Suppose that for some $\gamma>0$, $A_k = O( \gamma^k )$ as $k\rightarrow +\infty$. Then there exists a probability law $\mu_{ \mathcal{A}, \alpha}$ depending only on $\mathcal{A}$ and $\alpha$, such that $\mu_{W_n}$ converges weakly to $\mu_{ \mathcal{A}, \alpha}$ in probability: for all $\varepsilon >0$ and all bounded continuous function $f: \R \rightarrow \R$, \begin{equation*} \PP \left( \left| \int_{\R} f \mathrm{d}\mu_{W_n} - \int_{\R} f \mathrm{d}\mu_{ \mathcal{A}, \alpha} \right| > \varepsilon \right) \underset{ n \rightarrow +\infty }{ \longrightarrow } 0. \end{equation*} Moreover, the measure $\mu_{\mathcal{A}, \alpha}$ is characterized by its sequence of moments: \begin{equation} M_k( \mu_{ \mathcal{A},\alpha}) = \sum\limits_{a=1}^{k} \sum\limits_{l=1}^{a} \alpha^l \sum\limits_{ \substack{ \mathbf{b}=(b_1, \ldots, b_a) \\ \substack{ b_1 \geq b_2 \geq \ldots \geq b_a \geq 2 \\ b_1 + b_2 + \cdots + b_a = 2k } } } | \mathcal{W}_k(a,a+1,l,\mathbf{b})| \prod\limits_{i=1}^{a} A_{b_i}, \label{Moments formula in the Theorem} \end{equation} where $\mathcal{W}_k(a,a+1,l,\mathbf{b})$ is a set of representatives of the equivalence classes of closed words on labeled rooted planar trees having ``$a$" edges, of which $l$ are odd edges, starting from the root and such that for all $1 \leq i \leq a$, one edge is browsed $b_i$ times. \end{theorem} \begin{remark} The theorem can be thought as a universality result. Namely, if two sequences of probability law $P_n$ and $P_n'$ have the same asymptotic $\mathcal{A}$, the limiting spectral measures of $W_n$ and $W_n'$ are the same (in probability). \end{remark} \begin{corollaire} If for all $k>2$, $A_k = 0$, the measures $\mu_{ \mathcal{A}, \alpha}$ and $\mu_{ \alpha }$ coincide. For example, this is the case when the laws $P_n$ are all equal. \end{corollaire} As the statement suggests, we are going to prove the result by the method of moments. Classically, we start with a computation of the average moments of $\mu_{W_n}$. For $k \geq 1$, we can write: \begin{equation} \E M_k(\mu_{W_n}) = \frac{1}{n^{k+1}M_2(P_n)^k} \sum\limits_{ \substack{1\leq i_1, \ldots, i_k \leq n \\ 1 \leq j_1, \ldots j_k \leq m} } \E[ X(i_1,j_1) X(i_2,j_1) \cdots X(i_k,j_k)X(i_1,j_k) ]. \label{Equation Average Moment} \end{equation} Denote $(\ii,\jj)$ the generic word $i_1j_1i_2 \ldots i_1j_k$ appearing in \eqref{Equation Average Moment}. We define the bipartite graph $\G=(\V,\EE)$ associated to the word $(\ii,\jj)$ by: \[ \V = \{ (i_r, \ii), (j_r, \jj); \, 1 \leq r \leq k \} \quad \text{and} \quad \EE = \big\{ \{(i_r, \ii), (j_r, \jj)\}, \{ (i_{r+1}, \ii), (j_r, \jj) \}; \, 1 \leq r \leq k \big\}, \] where we used the convention $k+1=1$. The abstract symbols $\ii$ and $\jj$ are needed to obtain a bipartite graph since the $i_r$'s and $j_r$'s can have common values (see Figure \ref{fig4} for illustration). We will refer to $\ii$ and $\jj$ letters. In words, the vertices of $\G$ are the letters of the word $( \ii, \jj)$ and two vertices are linked by an edge when they are consecutive in $(\ii,\jj)$. Denote by $s$ the number of vertices, $a$ the number of edges, $l$ the number of $\jj$-vertices and $\overline{l}$ the number of $\ii$-vertices in the word. Since $\G$ is connected, $s \leq a+1$. Moreover, since $P_n$ has zero mean, each edge must appear at least twice in the word to give a non-zero contribution in \eqref{Equation Average Moment}. As a consequence we obtain the bound $a \leq k$ because $i_1j_1 \ldots j_k$ possesses $2k$ edges counted with multiplicity. \begin{center} \includegraphics[scale=0.65]{DessinExempleGraphe.pdf} \captionof{figure}{Example of a word $(\ii,\jj)$ with its associated graph and quantities.} \label{fig4} \end{center} Two words $(\ii,\jj)$ and $(\ii', \jj')$ are said equivalent if one can find a permutation $\sigma$ of $\{1, \ldots,n \}$ and another one $\tau$ of $\{1, \ldots, m \}$ such that \[ \forall p \in \{1, \ldots k\}, \quad \sigma(i_p) = i_p' \, \, \, \text{and} \, \, \, \tau(j_p) = j_p'. \] One can check that this is an equivalence relation on the words appearing in \eqref{Equation Average Moment}. Note that $(\ii,\jj)$ has \[ C(s,l) = n(n-1) \cdots (n-l+s+1) \times m(m-1) \cdots (m-l+1) \sim \alpha^l n^s\] equivalents. Fix $a \in \{1, \ldots, k\}$, $1 \leq s \leq a+1$ and $1 \leq l \leq a$. Let $\mathcal{B}_{a,k}$ be the set of $a$-tuples $\mathbf{b}=(b_1, \ldots, b_a)$ of integers such that \begin{enumerate} \item $b_1 \geq b_2 \geq \cdots \geq b_a \geq 2$; \item $b_1+ \cdots +b_a = 2k$. \end{enumerate} For all $k \geq 1$ and $\mathbf{b} \in \mathcal{B}_{a,k}$, we introduce $\mathcal{W}_k(a,s,l,\mathbf{b})$ a set of representatives of the equivalence classes of words $(\ii,\jj)$ such that the associated graph has $a$ edges, $s$ vertices of which $l$ are $\jj$-vertices and such that for all $1 \leq i \leq a$ there is an edge in $\EE$ which has multiplicity $b_i$ in $(\ii,\jj)$. We can rewrite \eqref{Equation Average Moment} as: \begin{equation} \sum\limits_{a=1}^{k} \sum\limits_{s=1}^{a+1} \sum \limits_{l=1}^{s} \frac{C(s,l)}{n^{a+1}} \sum\limits_{b \in \mathcal{B}_{a,k}} \, \, \sum\limits_{ (\ii,\jj) \in \mathcal{W}(a,s,l,b)} \, \, \prod\limits_{1 \leq i \leq a} \frac{M_{b_i}(P_n)}{n^{b_i/2-1} M_2(P_n)^{b_i/2}}. \label{Average Moment and Graphs} \end{equation} From this equation we easily deduce the form of the limiting moments: \begin{lemme} An asymptotic contribution arises only if $s=a+1$ that is when the graph associated to $(\ii,\jj)$ is a tree. More precisely the limit of \eqref{Average Moment and Graphs} when $n \rightarrow +\infty$ is \begin{equation} M_k := \lim\limits_{n \rightarrow +\infty} \E M_k( \mu_{W_n} )= \sum\limits_{a=1}^{k} \sum\limits_{l=1}^{a} \alpha^l \sum\limits_{b \in \mathcal{B}_{a,k}} | \mathcal{W}_k(a,a+1,l,b)| \prod\limits_{i=1}^{a} A_{b_i}. \label{What is M_k equal to} \end{equation} \end{lemme} \begin{proof}[Proof] Since $C(s,l)n^{-a-1} \sim \alpha^l n^{s-a-1}$ when $n \rightarrow +\infty$ we deduce that when $s<a+1$ the asymptotic contribution is zero. Hence a possible non-zero contribution arises only when $s=a+1$. The formula is a consequence of \eqref{Assumption Limiting Moment}. \end{proof} \begin{remark}\label{Remark on the parity} The only non-zero contributions arise when $(\ii,\jj)$ is a walk on a tree that browses every edge and starts and finishes at the same vertex. Therefore each edge must be visited an even number of time: each $b_i$ in the tuple $\mathbf{b}$ is even. \end{remark} \begin{remark} The set $\mathcal{W}_k(a,a+1,l,\mathbf{b})$ is also the set of closed words on rooted planar trees having $a$ edges out of which $l$ are odd edges, starting from the root and such that for all $1 \leq i \leq a$, one edge is browsed $b_i$ times. Notice that the number of $\jj$-vertices is equal to the number of vertices in odd generations. \end{remark} In view of Theorem \ref{Theorem A.S. Convergence} we have to prove that $M_k( \mu_{W_n})$ concentrates around its mean. Since we are looking for a convergence in probability, it is sufficient to show that its variance vanishes when $n$ tends to infinity. \begin{lemme}\label{Lemme Variance Sommable} For all $k\geq 1$, $\Var( M_k(P_n) ) = O(n^{-1})$. In particular $M_k(P_n)$ converges to $M_k$ in probability. \end{lemme} \begin{proof}[Proof] Let $k \geq 1$. We can write $\E[ M_k(P_n)^2 ] - \E[ M_k(P_n)]^2$ as \begin{equation} \frac{1}{n^{2(k+1)}M_2(P_n)^{2k}} \sum\limits_{ (\ii,\jj),(\ii',\jj') } \Big( \E[P(\ii,\jj)P(\ii',\jj')] - \E[ P(\ii,\jj) ] \E[ P(\ii',\jj')] \Big), \label{Variance Write} \end{equation} where $P(\ii,\jj)$ is the product $X(i_1,j_1)X(i_2,j_1) \cdots X(i_1,j_k)$. We note $\G$ (resp. $\G'$) the graph associated to $(\ii,\jj)$ (resp. $(\ii',\jj')$), the corresponding quantities such as $s$ and $s'$ being defined as before. We also consider the graph $\G \cup \G'$ associated to $i_1 j_1 \ldots i_1 j_k i_1' j_1' \ldots i_1'j_k'$, and introduce $S$ its number of vertices, $A$ its number of edges and $L$ its number of $\jj$-vertices and $\jj'$-vertices. Note that if $\G$ and $\G'$ have no edge in common, then the contribution is zero by independence of $P(\ii,\jj)$ and $P(\ii',\jj')$. We can therefore restrict the sum to pairs of words $\big( (\ii,\jj), (\ii',\jj') \big)$ sharing at least an edge. In this case $\G \cup \G'$ is connected, hence $A \geq S-1$. Moreover, each edge must appear at least twice otherwise the contribution is zero since $P_n$ has mean zero. Therefore, the sum \eqref{Variance Write} rewrites \begin{multline} \frac{1}{n^{2(k+1)} M_2(P_n)^{2k}}\sum\limits_{S=1}^{2k+1} \, \, \sum\limits_{L=1}^{S} \, \, \sum \limits_{A=S-1}^{2k} C(S,L) \\ \times \sum\limits_{\mathbf{B} \in \mathcal{B}_{A,2k}} \, \, \, \, \sum\limits_{ (\ii,\jj),(\ii',\jj') \in \mathcal{W}_k(A,S,L,\mathbf{B})} \Big( \E[P(\ii,\jj)P(\ii',\jj')] - \E[ P(\ii,\jj) ] \E[ P(\ii',\jj')] \Big). \label{Variance Write 2} \end{multline} Fix a generic couple $\big( (\ii,\jj),(\ii',\jj') \big)$. Let $e$ be an edge of $\G \cup \mathrm{G}'$. The corresponding $A$-tuple $\mathbf{B} \in \mathcal{B}_{A,2k}$ possesses a coefficient $B_i$ such that $e$ has multiplicity $B_i$. Note $b_i$ (resp. $b_i'$) the multiplicity of $e$ in $(\ii,\jj)$ (resp. $(\ii',\jj')$). We have the relation $b_i+b_i'=B_i$. The contribution of this generic couple in \eqref{Variance Write 2} is therefore \begin{equation*} \frac{C(S,L)}{n^{A+2}} \times \left( \frac{\E[P(\ii,\jj)P(\ii',\jj')]}{\prod\limits_{1 \leq i \leq A} n^{B_i/2-1}M_2^{B_i/2}} \right. \left. - \frac{\E[P(\ii,\jj)]}{ \prod\limits_{1 \leq i \leq A} n^{b_i/2-1}M_2^{b_i/2} } \times \frac{\E[ P(\ii',\jj')]}{\prod\limits_{1 \leq i \leq A} n^{b_i'/2-1}M_2^{b_i'/2} } \right). \end{equation*} By assumption \eqref{Assumption Limiting Moment}, the absolute value of the difference inside the parentheses is bounded. This gives the conclusion since $C(S,L) \sim \alpha^L n^S$ and $S \leq A+1$. \end{proof} In order to obtain Theorem \ref{Theorem A.S. Convergence}, it remains to show that the sequence $\{M_k\}_{k \geq 1}$ entirely determines a probability law. To that aim, it is enough to prove that $M_k$ does not grow faster than $k^{ck}$ for some positive constant $c$. First, remark that \[ |\mathcal{W}_k(a,a+1,l,\mathbf{b})| \leq \frac{{(2k)}^k}{k+1} { 2k \choose k}. \] Indeed there are $\frac{1}{a+1} { 2a \choose a}$ rooted planar trees having $a$ edges. Moreover, two elements $(\ii,\jj)$ and $(\ii',\jj')$ in $\mathcal{W}(a,a+1,l,b)$ inducing the same tree differ only by the order in which each edge is browsed in the reading of $(\ii,\jj)$ (resp. $(\ii',\jj')$). For a fixed multiplicity $b_i$ and its associated edge $e$, there are at most ${ 2k \choose b_i}$ different possibilities to place the occurrences of $e$ because a word has $2k$ edges counted with multiplicity. Therefore the number of $(\ii,\jj) \in \mathcal{W}_k(a,s,l,\mathbf{b})$ associated to a fixed tree is bounded by \[ \prod\limits_{1 \leq i \leq a} { 2k \choose b_i } \sim \prod\limits_{1 \leq i \leq a} \frac{(2k)^{b_i}}{b_i!} \leq (2k)^{2k}. \] Using formula \eqref{What is M_k equal to} and the assumption $A_i = O(\gamma^i)$, we obtain the estimation \[ M_k = O \left( \gamma^{k} \alpha^k k (2k)^{2k+1} \sum\limits_{a=1}^{k} | \mathcal{B}_{a,k} | \right). \] Finally the cardinality of $\mathcal{B}_{a,k}$ is bounded by the cardinality of the number of unsorted partitions of the integer $2k$. This last quantity is equal to \[ \sum\limits_{i=1}^{2k} { 2k-1 \choose i-1} = 2^{2k-1}, \] where we summed over the number of partitions of $2k$ in $i$ parts. As a result $M_k = O(k^{ck})$ for some constant $c >0$. This concludes the proof of Theorem \ref{Theorem A.S. Convergence}.\\ The proof of the almost sure convergence of $\mu_{W_n}$ would require a concentration result analogous for instance to \cite[lemma 4.18]{bordenave2012around}. Rather than proving this kind of result, which would be a technical task, we present an alternative approach, specific to the case where the entries have Bernoulli law, borrowed from Bordenave and Lelarge's paper \cite{bordenave2010resolvent}. \section{The Bernoulli case}\label{Section the Bernoulli case} In this section, we study the particular case where $P_n$ is the the centered Bernoulli law of parameter $c/n$, $c$ being a positive number, that is: \[ P_n \left( - \frac{c}{n} \right) = 1 - \frac{c}{n} \quad \text{and} \quad P_n \left( 1 - \frac{c}{n} \right) = \frac{c}{n}. \] In this case, since the second moment of $P_n$ verifies $M_2(P_n) = c/n + o(c/n)$ as $n \rightarrow \infty$, we set \[ W_n = \frac{1}{c}X_n X_n^T \] to simplify notations. We first give another proof for the convergence of the $\mu_{W_n}$, thanks to an interpretation of the hermitization of $X$ as the adjacency matrix of a random bipartite graph $\G_{n,m}$. This makes possible the use of the results of Bordenave and Lelarge in \cite{bordenave2010resolvent} after identifying the local limit of $\G_{n,m}$. In a second part, we give an asymptotic expansion in $1/c$ for the moments of the limiting spectral measure, inspired by Enriquez and Ménard (see \cite{enriquez2015spectra}). \subsection{Another proof of the convergence} To obtain the almost sure convergence of $\mu_{W_n}$, we will rather study the convergence of $W'_n = c^{-1} A_n A_n^T$, where $A_n$ is an $n \times m$ matrix having i.i.d. entries with (non-centered) Bernoulli law of parameter $c/n$. It is indeed sufficient because, denoting respectively $F$ and $F'$ the cumulative distribution functions of $\mu_{W_n}$ and $\mu_{W'_n}$, a consequence of Lidskii's inequalities is that: \[ || F - F' ||_{ \infty} \leq \frac{ \text{rk}(X_n - A_n)}{n}, \] where $\text{rk}$ is the rank operator. As announced before, we have the following theorem. \begin{theorem}\label{Theorem Bernoulli case} There exists a probability law $\mu_{ \alpha,c}$ depending only on $\alpha$ and $c$ such that, almost surely, $\mu_{W'_n}$ converges weakly to $\mu_{ \alpha,c}$. Hence, $\mu_{W_n}$ converges weakly to $\mu_{ \alpha,c}$. \end{theorem} \begin{remark} It can be proved that the set of atoms of $\mu_{ \alpha,c}$ is dense in $\mathbf{R}_+$. More precisely, it is the image by $x \mapsto x^2$ of the set of totally real algebraic integers, which coincides with the set of eigenvalues of finite trees as proved in \cite{salez2015every} by Salez. Besides, a consequence of the results of Bordenave, Sen and Virag in \cite{bordenave2013mean} is that $\mu_{ \alpha, c}$ possesses a continuous part if and only if $c>1$. \end{remark} Define the hermitization of $A_n$ as the hermitian matrix: \begin{equation} H(A_n) = \begin{pmatrix} 0 & A_n \\ A_n^T & 0 \end{pmatrix}. \end{equation} Remark that the spectrum of $H(A_n)$ is $\{ \pm \sqrt{ \lambda_i(A_n A_n^T) } \}_{1 \leq i \leq n}$. Let $f$ be the bijection of $\mathbf{R}_+$: $f(x)=x^2$. For a measure $\nu$ on $\mathbf{R}_+$ we define $\mathrm{Sym}( \nu )( \cdot ) = ( \nu( \cdot ) + \nu( - \cdot ) )/2$ the symmetrized version of $\nu$. Then \[ \mu_{ H(A_n) } = (\mathrm{Sym} \circ f_{*}) \mu_{A_n A_n^T}, \] where $f_{*} \nu$ is the pushforward of a measure $\nu$ by $f$. Since $ \mathrm{Sym} \circ f_{*} $ defines a bijection between the measures which are supported on $\mathbf{R}_+$ and the symmetric measures on $\mathbf{R}$, it suffices to show the convergence of $\mu_{H(A_n)}$ to obtain Theorem \ref{Theorem Bernoulli case}. To avoid some unpleasant confusions, we will add an apostrophe to the asymptotic measures involved in the proof. Now, $H(A_n)$ can be interpreted as the adjacency matrix of a random bipartite graph. Let $\mathrm{K}_{n,m}$ be the complete bipartite graph with $n$ and $m$ vertices of each color. The vertices of $\mathrm{K}_{n,m}$ will be denoted $1, \ldots, n+m$, two of them being linked by an edge if and only if one belongs to $\{1, \ldots, n \}$ and the other to $\{n+1, \ldots, n+m \}$. Perform a Bernoulli percolation with parameter $c/n$ on $K_{n,m}$: keep (independently) each edge with probability $c/n$ and remove it with probability $1-c/n$. We denote by $\G_{n,m}$ the resulting random graph. The adjacency matrix of $\G_{n,m}$ has the same law as $H(A_n)$. In the setting of local convergence introduced by Benjamini and Schramm \cite{benjamini2011recurrence} and Aldous and Steele \cite{aldous2004objective}, $\G_{n,m}$ converges in law to a random tree $\T_{ \alpha, c}$ for the local topology. To give a precise statement, we give some definitions in what follows. For any connected, locally finite graph $\G$ and any vertex $v \in \G$ we will note $(\G,v)$ the class of pointed graphs isomorphic to the graph $\G$ pointed in $v$. For any $r \geq 0$, $[\G,v]_r$ will denote the ball of radius $r$ around $v$ in $\G$ for the graph distance. This induces a topology (called the local topology) on the set $\mathcal{G}^*$ of pointed graphs (up to isomorphism) which are locally finite and connected, making it a separable and complete space. For all nonnegative real number $x$ let $\mathcal{P}(x)$ denote the Poisson law with parameter $x$. Let $\T_{ \alpha, c, 1 }$ be the random tree where each individual reproduces independently from each other and such that individuals of an even and an odd generation reproduce respectively according to the laws $\mathcal{P}(c)$ and $\mathcal{P}( \alpha c)$. Let $\T_{ \alpha, c, 2}$ be the random tree where each individual reproduces independently from each other and such that individuals of an even and an odd generation reproduce respectively according to the laws $\mathcal{P}( \alpha c)$ and $\mathcal{P}(c)$. Notice that $\T_{ \alpha, c, 2}$ has the same law as the random tree issued from a children of the root of $\T_{ \alpha, c, 1}$. \begin{center} \includegraphics[scale=0.65]{PoissonLimitTrees.pdf} \captionof{figure}{The recursive relation between $\T_{ \alpha, c, 1}$ and $\T_{ \alpha, c, 2}$. Here $N_1$ and $N_2$ are independent random variables with law $\mathcal{P}(c)$ and $\mathcal{P}(\alpha c)$; and the $\T_{ \alpha, c, 1}^{(i)}$ (resp. the $\T_{ \alpha, c, 2}^{(i)}$) i.i.d. copies of $\T_{ \alpha, c, 1}$ (resp. $\T_{ \alpha, c, 2}$).} \label{fig3} \end{center} Let $B$ be Bernoulli random variable of parameter $1 / ( \alpha + 1)$ independent of $\T_{ \alpha, c, 1}$ and $\T_{ \alpha, c, 2}$. We define $\mu_{ \alpha, c}'$ as the law of the random tree $\T_{ \alpha, c} := \mathbf{1}_{B=1} \T_{ \alpha, c, 1} + \mathbf{1}_{B=0} \T_{ \alpha, c, 2}$. Let $o$ be a uniformly distributed vertex on $\G_{n,m}$. We define the random probability measure on $\mathcal{G}^*$ \[ U_o(\G_{n,m}) := \delta_{ (\G_{n,m}(o),o)} = \frac{1}{n+m} \sum\limits_{i=1}^{n+m} \delta_{(\G_{n,m}(i),i)}. \] Integrating with respect to the randomness of $\G_{n,m}$ gives a new measure $\E[ U_o(\G_{n,m}) ]$ which is characterized by the relation $ \E[ U_o(\G_{n,m}) ](A) = \PP( (\G_{n,m}(o),o) \in A )$ for all measurable set $A \in \mathcal{B}(\mathcal{G}^*)$. \begin{prop}\label{Local Convergence Theorem} The deterministic probability measure $\E[U_o(G_{n,m})]$ converges weakly to $\mu_{ \alpha, c}'$. Moreover, if $o_1$ and $o_2$ are two independent copies of $o$, the product $ \E[U_{o_1}(G_{n,m})] \otimes \E[U_{o_2}(G_{n,m})]$ converges weakly to $\mu_{ \alpha, c}' \otimes \mu_{ \alpha, c}'$. \end{prop} \begin{proof} The first part is a combinatorial argument that shows that $\PP( [\G_{n,m},1]_r \equiv t )$ converges to $\PP( [ \T_{\alpha,c,1}, \rho]_r \equiv t)$ as $n \rightarrow + \infty$ for all $r \geq 1$ and all rooted planar tree $t$ of depth $r$. For the second part, it suffices to remark that to independent uniform vertices $o_1$ and $o_2$ are almost surely at distance greater than $r$ as $n \rightarrow + \infty$, for any $r \geq 1$. \end{proof} Let us discuss the consequences of this proposition. It implies the validity of the main assumptions of the convergence theorem of Bordenave and Lelarge \cite[theorem 5]{bordenave2010resolvent}, relative to the empirical spectral measure of the adjacency matrix of large graphs having a local limit. What remains to check is the uniform integrability of the sequence of degrees $\{\deg_{ \G_{n,m} }(o)\}_{n \geq 1}$, which holds. The existence of a probability law $\mu_c'$ such that almost surely $\mu_{H(A_n)}$ converges weakly to $\mu_c'$ is then a direct application of a result of Bordenave and Lelarge \cite[theorem 5]{bordenave2010resolvent}. We also get a description of the Stieltjes transform of $\mu_c'$. Indeed \cite{bordenave2010resolvent} shows that there exists a unique pair of probability laws $(\mathcal{L}_1, \mathcal{L}_2)$ on the set of analytic functions on $\mathbf{C}_+ = \{ z \in \mathbf{C}: \, \, \mathrm{Im}(z) > 0 \}$ such that for all $z \in \mathbf{C}_+$: \[ \left\{ \begin{array}{l} X_1(z) \overset{ (d) }{=} - \left(z + \sum\limits_{i=1}^{N_1} X_2^{(i)}(z) \right)^{-1} \\ X_2(z) \overset{ (d) }{=} - \left(z + \sum\limits_{i=1}^{N_2} X_1^{(i)}(z) \right)^{-1}, \end{array} \right. \] where $X_1$ and $X_2$ are independent random variables having laws $\mathcal{L}_1$ and $\mathcal{L}_2$, the $X_1^{(i)} $ and $X_2^{(i)}$ are i.i.d. copies of $X_1$ (resp. $X_2$), $N_1$ has law $\mathcal{P}(c)$ and $N_2$ has law $\mathcal{P}( \alpha c)$; each of these variables being independent. Then, the Stieltjes transform of $\mu_c'$ is given by: \begin{equation} \forall z \in \mathbf{C}_+, \quad m_{\mu_c'}(z) := \int_{ \mathbf{R}} \frac{1}{x-z} \mathrm{d}\mu_c'(x) = \frac{1}{\alpha + 1} \E[ X_1(z) ] + \frac{\alpha}{\alpha + 1} \E[X_2(z)]. \end{equation} This concludes the proof of theorem \ref{Theorem Bernoulli case} the limiting law being $\mu_c = (\mathrm{Sym} \circ f_*)^{-1}\mu_c'$. \subsection{Asymptotic expansion of the moments} Combining Theorem \ref{Theorem A.S. Convergence} with Theorem \ref{Theorem Bernoulli case}, we deduce that, almost surely, $\mu_{W_n}$ converges weakly to a probability law $\mu_{ \alpha, c}$ which is characterized by its sequence of moments. In that case, the asymptotic $\mathcal{A} = \{ A_k \}_{k \geq 1}$ of the laws $P_n$ which are Bernoulli laws of parameter $c/n$ is given by: \begin{equation*} A_k = \lim_{ n \rightarrow + \infty } \frac{1}{n^{k/2-1}} \frac{( 1 - c/n )^k (c/n) + (-c/n)^k (1-c/n) }{ [( 1 - c/n )^2 (c/n) + (-c/n)^2 (1-c/n)]^{k/2} } = c^{1 - k/2} \mathbf{1}_{ k > 1}. \end{equation*} This leads to the following formula for the $k$-th moment of $\mu_{ \alpha,c}$: \begin{equation} M_k( \mu_{ \alpha,c}) = \sum\limits_{a=1}^{k} \sum\limits_{l=1}^{a} \alpha^l \sum\limits_{ \substack{ b_1 \geq b_2 \geq \ldots \geq b_a \geq 2 \\ b_1 + b_2 + \cdots + b_a = 2k } } | \mathcal{W}(a,a+1,l,b)| \prod\limits_{i=1}^{a} c^{1 - b_i/2}. \label{Formula In The Bernoulli Case} \end{equation} When $c \rightarrow + \infty$, we retrieve the moments of the Marchenko-Pastur law $\mu_{ \alpha }$. It is therefore natural to try to understand how $\mu_{ \alpha, c}$ differs from $\mu_{\alpha}$ when $c$ is large but finite. We give an answer to this question by giving an asymptotic expansion in $1/c$ of the moments of $\mu_{ \alpha,c}$. This is done by a more careful treatment of equation \eqref{Formula In The Bernoulli Case}, which is combinatorial in nature. The method is inspired by the paper \cite{enriquez2015spectra} of Enriquez and Ménard, where the authors treated the case of adjacency matrices of Erdös-Rényi graphs with parameter $c/n$. \begin{theorem}\label{Theorem Asympt Dvpt Bernoulli case} There exists a signed measure $\mu_{\alpha}^{(1)}$ such that for all $k \geq 1$, as $c \rightarrow +\infty$: \begin{equation} M_k(\mu_{\alpha,c}) = M_k \left( \mu_{ \alpha} + \frac{1}{c} \mu_{\alpha}^{(1)} \right) + o \left( \frac{1}{c} \right). \label{DVLPTAsymptoticTheorem} \end{equation} Moreover, the measure $\mu_{\alpha}^{(1)}$ has a total mass zero and the following density: \begin{equation} \frac{x^2-2x(\alpha+1)+(\alpha^2+1)}{2 \alpha \sqrt{(b-x)(x-a)}} \mathbf{1}_{(a,b)}. \label{Density Of The Perturbation In the Theorem} \end{equation} \end{theorem} \begin{proof} Fix an integer $k \geq 1$. First, Remark \ref{Remark on the parity} ensures that all the $b_i$'s in \eqref{Formula In The Bernoulli Case} are even. Therefore, we can rewrite: \begin{equation} M_k( \mu_{ \alpha,c}) = \sum\limits_{a=1}^{k} \sum\limits_{l=1}^{a} \alpha^l \sum\limits_{ \substack{ d_1 \geq d_2 \geq \ldots \geq d_a \geq 1 \\ d_1 + d_2 + \cdots + d_a = k } } | \mathcal{W}_k(a,a+1,l,2d)| \prod\limits_{i=1}^{a} c^{1 - d_i}. \label{Formula In The Bernoulli Case 2} \end{equation} As $c \rightarrow + \infty$, nonvanishing terms correspond to the case where all the $d_i$'s are equal to $1$. This forces $a$ to be equal to $k$ and leads to: \[ M_k( \mu_{ \alpha,c}) = \sum\limits_{l=1}^{a} \alpha^l | \mathcal{W}_k(k,k+1,l,(2, \ldots ,2))| + o(1) \] as $c \rightarrow +\infty$. Recall that $\mathcal{W}_k(k,k+1,l,(2, \ldots ,2))$ is a set of representatives of closed words starting at the root, of length $2k+1$ on labeled planar rooted trees having $k$ edges, $l$ of these being odd edges. This allows to write: \[ M_k( \mu_{ \alpha,c}) = \sum\limits_{ \T \in \mathcal{T}_k } \alpha^{l(\T)} + o(1) \] as $c \rightarrow +\infty$, where $\mathcal{T}_k$ is the set of planar rooted trees having $k$ edges and $l(\T)$ the number of odd edges in a given tree $\T \in \mathcal{T}_k$. For convenience, we introduce the notations \[ a_k := \sum\limits_{ \T \in \mathcal{T}_k } \alpha^{l(\T)} \quad \text{and} \quad b_k := \sum\limits_{ \T \in \mathcal{T}_k } \alpha^{ \overline{l}(\T)} , \] where $\overline{l}(\T)$ is the number of even edges of a given tree $\mathrm{T} \in \mathcal{T}_k$. It turns out that the $a_k$'s are the moments of $\mu_{ \alpha }$. To obtain the term of order $1/c$ we will need to compute the generating series of the $a_k$'s and $b_k$'s. Let $\T$ be a planar tree having $k+1$ edges. Let $\T_1$ be the tree induced by the first child of the root and $\T_2$ the connected component of the root after removing the edge between the root and its first child (see Figure \ref{fig5}). {\begin{center} \includegraphics[scale=0.5]{DecompositionArbre.pdf} \captionof{figure}{Decomposition of a planar tree.} \label{fig5} \end{center}} Denoting $p$ (resp. $q$) the number of edges of $\T_1$ (resp. $\T_2$), we have $p+q=k$. It is straightforward to obtain the relations $l(\T) = 1 + \overline{l}(\T_1) + l(\T_2)$ and $\overline{l}(\T) = l(\T_1) + \overline{l}(\T_2)$. Therefore \begin{equation} \left\{ \begin{array}{l} a_{k+1} = \alpha \sum\limits_{p+q=k} a_pb_q \\ b_{k+1} = \phantom{ \alpha} \sum\limits_{p+q=k} a_pb_q. \end{array} \right. \end{equation} Denoting $A(z) = \sum_{k \geq 0} a_k z^k$ and $B(z) = \sum_{k \geq 0} b_k z^k$ the generating functions of the $a_k$'s and the $b_k$'s we obtain the functional relations: \begin{equation} \left\{ \begin{array}{l} A = 1 + \alpha z A B \\ B = 1 + z A B . \end{array} \right. \label{LinearRelationBetweenAandB} \end{equation} It implies that $zA^2 + ( \alpha z - z -1)A +1 =0$. If we denote $S(z) := -z^{-1}A(z^{-1})$ the Stieltjes transform of the measure with moments $a_k$'s, then $S$ satisfies the equation: \begin{equation} zS^2 - (\alpha - z - 1) S +1 =0. \label{Equation on the Stieltjes transform} \end{equation} The function $S$ of the variable $z \in \mathbf{C}_+$ is the limit of the Stieltjes transform of the $\mu_{W_n}$ when $c \rightarrow +\infty$. The imaginary part of a Stieltjes transform is positive: this allows us to choose the right solution for equation \eqref{Equation on the Stieltjes transform}. For a complex $z$, if we denote $\sqrt{z}$ the square root having a positive imaginary part on the upper half plane: \begin{equation} S(z) = \frac{\alpha - z - 1 + \sqrt{(z-b)(z-a)}}{2z}, \label{Stieltjes formula S} \end{equation} where $a = (1 - \sqrt{ \alpha } )^2$ and $ b = (1 + \sqrt{ \alpha } )^2$. This is the Stieltjes transform of the Marchenko-Pastur law $\mu_{ \alpha}$, as announced. \\ Let us compute the perturbation of order $1/c$. It arises when all the $d_i$'s are equal to $1$ except one which is equal to $2$ in \eqref{Formula In The Bernoulli Case 2}. This forces $a$ to be equal to $k-1$ and leads to the following expansion as $c \rightarrow +\infty$: \[ M_k( \mu_{\alpha,c} ) = M_k( \mu_{ \alpha} ) + \frac{1}{c} \sum\limits_{ l = 1}^{k-1} \alpha^{l} | \mathcal{W}_k(k-1,k,l,(4,2, \ldots, 2))| + o \left( \frac{1}{c} \right). \] In that case $\mathcal{W}_k(k-1,k,l,(4,2, \ldots, 2)) $ is the set of equivalence classes of closed words of length $2k+1$ on labeled planar rooted tree having $k-1$ edges, starting at the root and such that each edge is browsed exactly two times except one which is browsed four times. Let us denote \[ a_k^{(1)} = \sum\limits_{ l = 1}^{k-1} \alpha^{l} | \mathcal{W}_k(k-1,k,l,(4,2, \ldots, 2))|, \] and \[ b_k^{(1)} = \sum\limits_{ \overline{l} = 1}^{k-1} \alpha^{l} | \mathcal{W}_k(k-1,k,l,(4,2, \ldots, 2))|. \] The associated generating series will be denoted $A^{(1)}$ and $B^{(1)}$. Remark that by definition $a_0^{(1)} = a_1^{(1)} = b_0^{(1)}= b_1^{(1)} = 0$. We are going to obtain a recursion linking the four generating series $A,B,A^{(1)}$ and $B^{(1)}$. The idea is to use a first generation decomposition of the planar rooted tree on which the words are written, and then to distinguished whether or not the quadruple edge is an edge of this generation. For all $k \geq 1$, we use the partition \[ \mathcal{W}_k(k-1,k,l,(4,2, \ldots, 2)) = \mathcal{W}_k^{(0)}(k-1,k,l,(4,2, \ldots, 2)) \bigsqcup \mathcal{W}_k^{(1)}(k-1,k,l,(4,2, \ldots, 2)), \] where $\mathcal{W}_k^{(0)}(k-1,k,l,(4,2, \ldots, 2))$ is the set of representative belonging to $\mathcal{W}_k(k-1,k,l,(4,2, \\ \ldots, 2))$ such that the quadruple edge is not a first generation edge, and $\mathcal{W}_k^{(1)}(k-1,k,l,(4,2, \ldots, 2))$ is the set of representatives belonging to $\mathcal{W}_k(k-1,k,l,(4,2, \ldots, 2))$ such that the quadruple edge is a first generation edge. The associated quantities will be denoted $a_k^{(1,0)},a_k^{(1,1)},A^{(1,0)},...$ For example: \[ a_k^{(1,0)} = \sum\limits_{ l = 1}^{k-1} \alpha^{l} | \mathcal{W}_k^{(0)}(k-1,k,l,(4,2, \ldots, 2))|. \] A representative word $(\ii,\jj) \in \mathcal{W}_k(k-1,k,l,(4,2, \ldots, 2))$ can be written: \[ (\ii,\jj) = i_1 \mathbf{S}_1 \zeta \xi \mathbf{S}_2 \xi \zeta \mathbf{S}_3 \zeta \xi \mathbf{S}_4 \xi \zeta \mathbf{S}_5 i_1, \] where: \begin{enumerate} \item $i_1 \mathbf{S}_1 \zeta \mathbf{S}_5 i_1$ is the contour of a planar tree having $p_1$ edges; \item $\xi \mathbf{S}_2 \xi$ is the contour of a planar tree having $p_2$ edges; \item $ \zeta \mathbf{S}_3 \zeta$ is the contour of a planar tree having $p_3$ edges; \item $\xi \mathbf{S}_4 \xi$ is the contour of a planar tree having $p_4$ edges; \item $\xi \mathbf{S}_2 \xi \mathbf{S}_4 \xi$ is the contour of a planar tree having $p_2+p_4$ edges. \end{enumerate} The above integers satisfy $p_1+p_2+p_3+p_4 = k-2$. See Figure \ref{fig6} for an illustration. \begin{center} \includegraphics[scale=0.75]{FourEdge.pdf} \captionof{figure}{The writing $(\ii,\jj)$ and its quadruple edge $\{ \zeta, \xi\}$.} \label{fig6} \end{center} All of these conditions are sufficient to define a class of canonical representatives. Let $\T$ be the planar rooted tree on which a representative word $(\ii,\jj)$ is written. Denote $e_4$ the quadruple edge, $\T \setminus e_4$ the connected component of the root after removing $e_4$ and $\T^{e_4}$ the planar rooted tree formed by the descendants of $e_4$. Then, the above conditions ensures that $(\ii,\jj)$ is such that $\T \setminus e_4$ and $\T^{e_4}$ are respectively browsed in lexicographic order. Let $(\ii,\jj) \in \mathcal{W}_k^{(0)}(k-1,k,l,(4,2, \ldots, 2))$. The underlying tree can have $p \in \{1, \ldots, k-2\}$ edges which are all browsed two times by $(\ii,\jj)$. One of the tree induced by the children of the root contains the quadruple edge, leading to $p$ different choices. On another side, if $(\ii,\jj) \in \mathcal{W}_k^{(1)}(k-1,k,l,(4,2, \ldots, 2))$ then the underlying tree can have $p \in \{1, \ldots, k-1\}$ edges out of which one is the quadruple edge. There are ${p+1 \choose 2}$ choices for the locations of the the visits of the quadruple edge. See Figure \ref{fig7} for an illustration. \begin{center} \includegraphics[scale=0.65]{FirstEdgeDecomposition.pdf} \captionof{figure}{First edge decomposition of a word respectively in $\mathcal{W}_k^{(0)}(k-1,k,l,(4,2, \ldots, 2))$ on the left and in $\mathcal{W}_k^{(1)}(k-1,k,l,(4,2, \ldots, 2))$ on the right, where the quadruple edge is in red.} \label{fig7} \end{center} As a consequence, we get the following recursions: \begin{equation*} a_k^{(1,0)} = \sum\limits_{p=1}^{k-2} \alpha^p p \sum\limits_{q_1 + \cdots + q_p = k-p-1} b_{q_1+1}^{(1)} b_{q_2} \cdots b_{q_p}, \phantom{bblbllbb} \label{Equation 1st gen out} \end{equation*} and \begin{equation*} a_k^{(1,1)} = \sum\limits_{p=1}^{k-2} \alpha^p {p+1 \choose 2} \sum\limits_{q_1 + \cdots + q_{p+1} = k-p-1} b_{q_1} b_{q_2} \cdots b_{q_{p+1}}. \end{equation*} This yields \[ A^{(1,0)} = \frac{\alpha z B^{(1)}}{(1- \alpha z B)^2} = \alpha z A^2 B^{(1)} \] and \[ A^{(1,1)}= \frac{\alpha z^2 B^2}{(1- \alpha z B)^3} = \alpha z^2 A^3 B^2, \] where we used equation \eqref{LinearRelationBetweenAandB}. The same arguments and computations give $B^{(1,0)} = z A^{(1)} B^2$ and $B^{(1,1)} = z^2 A^2 B^3$, to finally obtain \begin{equation} \left\{ \begin{array}{l} A^{(1)} = \alpha z A^2 B^{(1)} + \alpha z^2 A^3 B^2 \\ B^{(1)} = z A^{(1)} B^2 + z^2 A^2 B^3. \end{array} \right. \end{equation} We deduce, using equation \eqref{LinearRelationBetweenAandB}, that $A^{(1)}$ is given by: \begin{equation} A^{(1)} = \frac{\alpha (zAB)^2}{1- \alpha (zAB)^2}(zA^2B+A) = \frac{AB}{1- \alpha (zAB)^2} \alpha (zAB)^2. \label{premiere formule pour A1} \end{equation} To obtain a more explicit formula for $A^{(1)}$, one can compute $\alpha(zAB)^2$ using first that $B=(A+\alpha-1)/ \alpha$ and then that $zA^2 = (1-(\alpha-1)z)A-1$. After simplifications: \begin{equation} \alpha (zAB)^2 = \frac{(1- \alpha z - z)A +z -1}{\alpha z} = \frac{(\alpha^2+1)z^2-2z(\alpha-1)+1-(1-\alpha z - z) \sqrt{\delta}}{2 \alpha z^2}, \label{equation pour A} \end{equation} since $A = (2z)^{-1}(1-( \alpha-1)z-\sqrt{ \delta})$. Using that $\sqrt{\delta}=-2zA-(\alpha-1)z+1$, one can then check that $\sqrt{ \delta} AB = 1 - \alpha (zAB)^2$. From \eqref{equation pour A}, one can finally rewrite \eqref{premiere formule pour A1} as \begin{equation*} A^{(1)} = \frac{1}{\sqrt{\delta}} \frac{(\alpha^2+1)z^2-2z(\alpha+1)+1-(1-\alpha z - z) \sqrt{\delta}}{2 \alpha z^2}. \end{equation*} Therefore, the function $S^{(1)}(z) = -\frac{1}{z}A^{(1)}( \frac{1}{z})$ is given by \begin{equation} S^{(1)}(z) = - \frac{z^2-2z( \alpha+1) + (\alpha^2+1)}{2 \alpha \sqrt{(z-b)(z-a)}} + \frac{z-\alpha-1}{2 \alpha}. \end{equation} It corresponds to the Stieltjes transform of the measure $\mu_{\alpha}^{(1)}$ with density: \[ -\frac{1}{\pi} \lim\limits_{ \varepsilon \rightarrow 0} \mathrm{Im} \big( S^{(1)}(x+ i \varepsilon) \big) = \frac{x^2-2x(\alpha+1)+(\alpha^2+1)}{2 \alpha \pi \sqrt{(b-x)(x-a)}} \mathbf{1}_{(a,b)}. \] This concludes the proof of Theorem \ref{Theorem Asympt Dvpt Bernoulli case}. \end{proof} The case $\alpha =1$, which corresponds to asymptotic square matrices $X_n$, should be emphasized. In this setting the density is \[ \frac{1}{2 \pi} \frac{x^2-4x+2}{\sqrt{x(4-x)}} \mathbf{1}_{[0,4]}, \] which corresponds to the pushforward by $x \mapsto x^2$ of the density obtained in \cite{enriquez2015spectra} by Enriquez and Ménard for the Wigner case, as expected. \section{Heavy tailed random matrices}\label{Heavy tailed random matrices} In this section we use Theorem \ref{Theorem A.S. Convergence} to study the spectral measure associated to heavy tailed Wishart matrices. For all $n\geq 1$, let $X_n$ be a random matrix of size $n \times m$ having i.i.d. entries with heavy tailed law $P$. As before, we suppose that the ratio $m/n$ converges to $\alpha>0$. We will consider the case where $P$ has density \[ \frac{C(\beta)}{1+|x|^{\beta}}, \] where $1< \beta <3$ and $C(\beta) = ( \int_{ \mathbf{R}} (1+|x|^{ \beta})^{-1} \mathrm{d}x )^{-1}$. Theorem $1.10$ of Belinschi, Dembo and Guionnet in \cite{belinschi2009spectral} ensures that, since $P$ is in the domain of attraction of a $(\beta-1)$-stable law, the spectral measure of \[ n^{ -\frac{2}{\beta-1} } X_nX_n^T\] converges to a deterministic probability law $\mu_{ \alpha, \beta}$ depending only on $\alpha$ and $\beta$. To apply Theorem \ref{Theorem A.S. Convergence}, let us consider the truncated version of $X_n$. For all $n \geq 1$, let $P_n$ be the probability law given by \[ P_n(\mathrm{d}x) = \frac{C}{1+|x|^{ \beta}} \mathbf{1}_{[-B n^{1/(\beta-1)},B n^{1/(\beta-1)}]} \mathrm{d}x + Z(B,\beta) \big( \delta_{-B n^{1/(\beta-1)}}( \mathrm{d}x) + \delta_{B n^{1/(\beta-1)}} ( \mathrm{d}x) \big), \] where $B>0$ and $Z(B,\beta) = 2C(\beta) \int_{B n^{1/(\beta-1)} }^{+ \infty} (1+|x|^{ \beta})^{-1} \mathrm{d}x$. In words, $P_n$ is the truncation of $P$ at $-B n^{1/(\beta-1)}$ and $B n^{1/(\beta-1)}$. We will denote $Y_n$ the random matrix of size $n \times m$ with i.i.d. entries having law $P_n$. Let us compute the asymptotic $\mathscr{A} = \{A_k\}_{ k \geq 2}$ of the sequence $\{P_n\}_{n \geq 1}$. For all $k \geq 1$, as $n$ tends to infinity: \begin{align*} \frac{M_k(P_n)}{n^{k/2-1} M_2(P_n)^{k/2}} &\sim n^{1-k/2} (2C)^{1-k/2} \, \frac{\int_1^{B n^{1/(\beta-1)}} x^{k-\beta} \mathrm{d}x + \frac{(B n^{1/(\beta-1)})^{k+1-\beta}}{1-\beta}}{( \int_1^{B n^{1/(\beta-1)}} x^{2-\beta} \mathrm{d}x + \frac{(B n^{1/(\beta-1)})^{3-\beta}}{1-\beta})^{k/2}} \\ &\sim n^{1-k/2} (2C)^{1-k/2} \, \frac{\frac{(Bn^{1/(\beta-1)})^{k+1-\beta}}{k+1-\beta} + \frac{(B n^{1/(\beta-1)})^{k+1-\beta}}{1-\beta}}{( \frac{(B n^{1/(\beta-1)})^{3-\beta}}{3-\beta} + \frac{(B n^{1/(\beta-1)} )^{3-\beta}}{1-\beta})^{k/2}} \\ &\sim n^{1-k/2} (2C)^{1-k/2} \, \frac{\frac{1}{k+1-\beta} + \frac{1}{1-\beta}}{ \frac{1}{3-\beta} + \frac{1}{1-\beta}} B^{1-\beta + \frac{k}{2}(\beta-1)} n^{ \frac{1}{\beta-1} \big( k+1-\beta + \frac{k}{2}(\beta-3) \big)}. \end{align*} We finally obtain: \[ \frac{M_k(P_n)}{n^{k/2-1} M_2(P_n)^{k/2}} \sim (2C)^{1-k/2} \, \frac{\frac{1}{k+1-\beta} + \frac{1}{1-\beta}}{ \frac{1}{3-\beta} + \frac{1}{1-\beta}} B^{1-\beta + \frac{k}{2}(\beta-1)}. \] The quantity $n^{1/( \beta-1)}$ corresponds to the largest $n$-th quantile of $P$. Therefore, our choice of law $P_n$ can be interpreted as a truncation of the largest entries in each rows of $X_n$. If one had chosen an order of truncation smaller than $n^{1/(\beta-1)}$, the $A_k$'s would have been all equal to zero which corresponds to the Marchenko-Pastur regime, meaning that the truncation is too large and leads to a non-heavy tailed behavior. On the contrary, if one had chosen an order of truncation larger than $n^{1/(\beta-1)}$, the $A_k$'s would have been all infinite, meaning that the truncation is not large enough to apply Theorem \ref{Theorem A.S. Convergence}. In this spirit, the parameter $B>0$ can be seen as a finer adjustment of the truncation. Theorem \ref{Theorem A.S. Convergence} ensures that there exists a probability law $\mu_{\mathscr{A}, \alpha} = \mu_{\alpha,\beta,B}$ such that the spectral measures $\mu_n$ associated to the Wishart matrices $\frac{1}{nM_2(P_n)}Y_nY_n^T$ converges weakly in probability to $\mu_{\alpha,\beta,B}$. Using equation \eqref{Moments formula in the Theorem}, we obtain an asymptotic development of the moments of $\mu_{\alpha,\beta,B}$: \begin{theorem}\label{Theorem Heavy Tailed} For all $k \geq 1$, as $B \rightarrow 0$: \[ M_k(\mu_{\alpha,\beta,B}) = M_k( \mu_{ \alpha }) + B^{ \beta-1} \frac{1}{2C( \beta )} \cdot \frac{(3-\beta)^2}{(2-\beta)(5-\beta)} M_k \left( \mu_{ \alpha }^{(1)} \right) + o\left( B^{\beta-1} \right). \] \end{theorem} \begin{remark} For simplicity we considered the explicit law $P(\mathrm{d}x) = \frac{C(\beta)}{1+|x|^{\beta}}$. However, using Karamata's estimates (Theorem 2, Section VIII.9 of \cite{feller1967introduction}) on truncated moments of regularly varying functions, one could have studied in a similar way the case when $P$ is in the domain of attraction of a $(\beta -1)$-stable law, for $1 < \beta <3$. \end{remark} \paragraph*{Acknowledgments.} The author would like to warmly thank his advisors Nathanaël Enriquez and Laurent Ménard for many helpful discussions and suggestions about this work. \noindent Nathan Noiry :\\ Laboratoire Modal'X, \\ UPL, Université Paris Nanterre,\\ F92000 Nanterre France \end{document}
\begin{document} \title{Intermediate and Extrapolated Spaces for Bi-Continuous Semigroups} \author{Christian Budde} \address{University of Wuppertal, School of Mathematics and Natural Sciences, Gaussstrasse 20, 42119 Wuppertal, Germany} \email{[email protected]} \author{B\'alint Farkas} \address{University of Wuppertal, School of Mathematics and Natural Sciences, Gaussstrasse 20, 42119 Wuppertal, Germany} \email{[email protected]} \begin{abstract} We discuss the construction of the full Sobolev (H\"older) scale for non-densely defined operators on a Banach space with rays of minimal growth. In particular, we give a construction for extrapolation- and Favard spaces of generators of (bi-continuous) semigroups, or which is essentially the same, Hille--Yosida operators on \emph{Saks spaces}. \end{abstract} \date{} \maketitle \def\mathrm{C}_{\mathrm{b}}{\mathrm{C}_{\mathrm{b}}} \def\mathrm{UC}_{\mathrm{b}}{\mathrm{UC}_{\mathrm{b}}} \def\mathrm{Lip}_{\mathrm{b}}{\mathrm{Lip}_{\mathrm{b}}} \defD{D} \def\widetilde{X}{\widetilde{X}} \def\underline{X}{\underline{X}} \def\mathbb{N}{\mathbb{N}} \def\mathrm{C}C{\mathbb{C}} \def\mathbb{Z}{\mathbb{Z}} \def\mathbb{R}{\mathbb{R}} \def\underline{A}{\underline{A}} \def\underline{T}{\underline{T}} \def\mathcal{P}{\mathcal{P}} \section*{Introduction} Extrapolation spaces for generators of \emph{$C_0$-semigroups} (used here synonymously to \emph{`strongly continuous, one-parameter operator semigroups of bounded linear operators'}) on Banach spaces, or for more general operators, have been designed to study maximal regularity questions by Da Prato and Grisvard \cite{DAPRATO1984107}; see also Walter \cite{Walter}, Amann \cite{Amann}, van Neerven \cite{van1992adjoint}, Nagel, Sinestrari \cite{nagel1993inhomogeneous}, Nagel \cite{NagelSurvey}, and Sinestrari \cite{Sinestrari1996}. These spaces (and the extrapolated operators) also play a central role in certain abstract perturbation results, most prominently for boundary-type perturbations, see e.g., Desch, Schappacher \cite{Desch}, Greiner \cite{Greiner}, Staffans, Weiss \cite{Staffans2004}, Adler, Bombieri, Engel \cite{adler2014}, and as a result their application area is vast. In this paper, we concentrate on the construction of extrapolation spaces for linear operators $A$ having non-empty resolvent set, but we do not assume the operator to be a Hille--Yosida operator or to be densely defined. For the densely defined case such a construction is known due to the seminal papers of Da Prato, Grisvard, \cite{DaPrato1982}, Amann \cite{Amann} and Nagel, Sinestrari \cite{nagel1993inhomogeneous}. In the case of non-densely defined, sectorial operators there is a very general---\emph{almost purely algebraic}---construction due to Haase \cite{Haase}, who also discusses universal extrapolation spaces. Here, we present a slightly different construction of extrapolation- and extrapolated Favard spaces, leading to the construction of \emph{extrapolated semigroups} in the absence of norm strong continuity. For a non-densely defined Hille--Yosida operator $A$ on the Banach space $X_0$ such a construction is possible by taking the part of $A$ in $\underline{X}_0:=\overline{D(A)}$, on which space the restricted operator becomes a generator of a $C_0$-semigroup, so one can construct an extrapolated semigroup on the extrapolation space $\underline{X}_{-1}$, see Nagel, Sinestrari \cite{NS}. But this semigroup will usually not leave the original Banach space $X_0$ invariant. This is why we restrict our attention to the situation where strong continuity of the semigroup is guaranteed with respect to some coarser locally convex topology. Here the framework of bi-continuous semigroups, or that of Saks spaces, (see K\"uhnemund \cite{Ku} and Section \ref{sec:bicont}) appears to be adequate. However, most of the results presented here are valid also for generators of other classes of semigroups: integrable semigroups of Kunze \cite{Kunze2009}, ``C-class'' semigroups of Kraaij \cite{Kraaij2016}, $\pi$-semigroups of Priola \cite{Priola}, weakly continuous semigroups of Cerrai \cite{Cerrai} to mention a few. Given a Banach space $X_0$, a Hausdorff locally convex topology $\tau$ on $X_0$ (with certain properties described in Section \ref{sec:bicont}), a bi-continuous semigroup $(T(t))_{t\geq 0}$ with generator $A$, we construct the full scale of abstract Sobolev (or H\"older) and Favard spaces $X_\alpha$, $\underline{X}_\alpha$, $F_\alpha$ for $\alpha\in \mathbb{R}$, and the corresponding extrapolated semigroups $(T_\alpha(t))_{t\geq 0}$. (If $\tau$ is the norm topology, there is nothing new here, and everything can be found in \cite[Section II.5]{EN}.) These constructions, along with some applications, form the main content of this paper. Here we illustrate the results on the following well-known example (see also Nagel, Nickel, Romanelli \cite{NagelIdent} and Section \ref{sec:examp} for details): Consider the Banach space $X_0:=\mathrm{C}_{\mathrm{b}}(\mathbb{R})$ of bounded, continuous functions and the (left) translation semigroup $(S(t))_{t\geq 0}$ thereon, defined by $(S(t)f)(x)=f(x+t)$, $x\in\mathbb{R}$, $t\geq 0$, $f\in X_0$. For $\alpha\in (0,1)$ \[ \mathrm{C}_{\mathrm{b}}^1(\mathbb{R})\mathrm{h}ookrightarrow \mathrm{Lip}_{\mathrm{b}}(\mathbb{R}) \mathrm{h}ookrightarrow\mathrm{h}_b^{\alpha}(\mathbb{R})\mathrm{h}ookrightarrow \mathrm{h}_{b,\text{loc}}^{\alpha}(\mathbb{R})\mathrm{h}ookrightarrow\mathrm{C}_{\mathrm{b}}^{\alpha}(\mathbb{R})\mathrm{h}ookrightarrow\mathrm{UC}_{\mathrm{b}}(\mathbb{R})\mathrm{h}ookrightarrow \mathrm{C}_{\mathrm{b}}(\mathbb{R})\mathrm{h}ookrightarrow \mathrm{L}^{\infty}(\mathbb{R}), \] where $\mathrm{C}_{\mathrm{b}}^1$ is the space of differentiable functions with derivative in $\mathrm{C}_{\mathrm{b}}$, $\mathrm{Lip}_{\mathrm{b}}(\mathbb{R})$ is the space of bounded, Lipschitz functions, $\mathrm{h}_b^{\alpha}$ is the space of bounded, little-H\"older continuous functions, $\mathrm{h}_{b,\text{loc}}^{\alpha}$ is the space of bounded, locally little-H\"older continuous functions, $\mathrm{C}_{\mathrm{b}}^\alpha$ is the space of bounded, H\"older continuous functions, $\mathrm{UC}_{\mathrm{b}}(\mathbb{R})$ is the space of bounded, uniformly continuous functions. From the abstract perspective and using our notation this corresponds to the inclusions of Banach spaces: \[ X_1\mathrm{h}ookrightarrow F_1\mathrm{h}ookrightarrow \underline{X}_{\alpha}\mathrm{h}ookrightarrow X_{\alpha}\mathrm{h}ookrightarrow F_{\alpha}\mathrm{h}ookrightarrow\underline{X}_0\mathrm{h}ookrightarrow X_0\mathrm{h}ookrightarrow F_0. \] The extension of the previous diagram for the full scale $\alpha\in \mathbb{R}$ is possible by extrapolation. The spaces $\underline{X}_\alpha$ and $F_\alpha$ ($\alpha\in (0,1)$) are well studied and we refer to the books by Lunardi \cite{Lunardi} and by Engel, Nagel \cite[Section II.5]{EN} for a systematic treatment. However, the definition of $X_\alpha$ is new, and requires a recollection of results concerning the other two kinds of spaces. Extrapolated Favard spaces are not only important from the perturbation theoretic point of view: They sometimes help to reduce problems concerning semigroups being no strongly continuous, to the study of the underlying $C_0$-semigroup. This perspective is propagated by Nagel and Sinestrari in \cite{NS}: To any Hille--Yosida operator on $X_0$ one can construct a Banach space $F_0$ (the Favard class) containing $X_0$ as a closed subspace, and a semigroup $(T(t))_{t\geq 0}$ on $F_0$. We adapt this point of view also in this paper. In particular, we provide an alternative (and short) proof of the Hille--Yosida type generation theorem for bi-continuous semigroups (due to K\"uhnemund \cite{Ku}) by employing solely the $C_0$-theory. Note, however, that the semigroup $(T(t))_{t\geq 0}$ defined on $F_0$ may not leave $X_0$ invariant in general, this is the issue, where the additional topology $\tau$ can be helpful. Applications of the Sobolev (H\"older) scale, as presented here, to perturbation theory, in the spirit of the results of Desch, Schappacher \cite{Desch}, or of Jacob, Wegner, Wintermayr \cite{JWW}, will be presented in a forthcoming paper. This work is organized as follows: In Section \ref{sec:invert} we recall the standard constructions and result for extrapolation spaces for densely defined (invertible) operators. Moreover, we construct extrapolation spaces for not densely defined operators $A$ with $D(A^2)$ dense in $D(A)$ for the norm of $X_0$. Our argumentation differs form the one in Haase \cite{Haase} in that we build the space $X_{-1}$ based on $\underline{X}_{-2}$ (which, in turn, arises from $\underline{X}_0$ and $\underline{X}_{-1}$), i.e., in a bottom-to-top and back to-bottom manner, resulting in the continuous inclusions \[ \underline{X}_0\mathrm{h}ookrightarrow X_0\mathrm{h}ookrightarrow \underline{X}_{-1}\mathrm{h}ookrightarrow X_{-1}\mathrm{h}ookrightarrow\underline{X}_{-2}. \] (All these inclusions are not surjective in general.) This approach becomes convenient when we compare the arising extrapolation spaces $X_{-1}$ and $\underline{X}_{-1}$ and construct the extrapolated semigroups. In Section \ref{sec:minimal} we turn to intermediate spaces; the results there are classical, but are put in the general perspective of this paper. We also present a method for a `concrete' representation of extrapolation spaces. Section \ref{sec:sgrps} discusses the Sobolev (H\"older) scale for semigroup generators, and has a survey character. In Section \ref{sec:bicont} we recall the concept of bi-continuous semigroups, construct the corresponding extrapolated semigroups and give a direct proof of the Hille--Yosida generation theorem (due to K\"uhnemund \cite{Ku}) which uses extrapolation techniques. We conclude this paper with some examples in Section \ref{sec:examp}, where we determine the extrapolation spaces of concrete semigroup generators. Among others the previously mentioned example of the translation semigroup (complementing results of Nagel, Nickel, Romanelli \cite[Sec.3.1, 3.2]{NagelIdent}) and then \emph{implemented semigroups} (cf.{} Alber \cite{Alber2001}) are discussed in detail. \section{Spaces for invertible operators}\label{sec:invert} In this section we construct abstract Sobolev (H\"older) and extrapolation spaces (the so-called Sobolev scale) for a boundedly invertible linear operator $A$ defined on a Banach space. Some of the results are well-known and nowadays even standard, but we chose to include them here for a sake of completeness, and also because a review of these is necessary for the construction of spaces when we deal with not densely defined operators. The emphasis will be, however, put on this case, when the construction of these extrapolation spaces is new, see Section \ref{sec:extra} below. We also note that everything in what follows is also valid for operators on Fr\'echet spaces, an except for some assertions one does not even need metrizability. \noindent Let $X_0$ be Banach space, and let $A:D(A)\to X_0$ be a not necessarily densely defined linear operator with non-empty resolvent set $\rho(A)\neq\emptyset$. As a matter of fact, for convenience we suppose $0\in \rho(A)$. If this was not so, then by taking $\lambda\in \rho(A)$ we may consider $A-\lambda$ instead of $A$ and carry out the constructions for this new operator, for which in fact $0\in \rho(A-\lambda)$. The arising spaces will not depend on $\lambda\in\rho(A)$ (up to isomorphism). \subsection{Abstract Sobolev spaces} The material presented here is known, see Nagel \cite{NagelSobolev}, Nagel, Nickel, Romanelli \cite{NagelIdent} or Engel, Nagel \cite[Section II.5]{EN}, and some parts are valid even for operators on locally convex spaces. We set $X_1:=D(A)$ which becomes a Banach space if endowed with the graph norm \[ \|x\|_{A}:=\|x\|+\|Ax\|. \] An equivalent norm is given by $\|x\|_{X_1}:=\|Ax\|$, since we have assumed $0\in\rho(A)$. Then we have the isometric isomorphism \[ A:X_1\to X_0\quad \text{with inverse}\quad A^{-1}:X_0\to X_1. \] \begin{definition} Suppose $0\in\rho(A)$ here and throughout in the following. Let $n\in \mathbb{N}$. \begin{abc} \item We define \[ X_n:=D(A^n)\quad\text{and}\quad \|x\|_{X_n}:=\|A^n x\|\quad\text{for $x\in X_n$}. \] If we want to stress the dependence on $A$, then we write $X_{n}(A)$ and $\|\cdot\|_{X_{n}(A)}$. \item Let \[ X_{\infty}(A):=\bigcap_{n\in\mathbb{N}} X_n, \] often abbreviated as $X_\infty$. \item We further set \[ \underline{X}_0:=\overline{D(A)}, \quad \underline{A}:=A|_{\underline{X}_0}, \] the part of $A$ in $\underline{X}_0$, i.e., \[ D(\underline{A})=\bigl\{x\inD(A):Ax\in \underline{X}_0\bigr\}. \] Moreover, we let \[ \underline{X}_n:=D (\underline{A}^n), \quad \|x\|_{\underline{X}_n}:=\|\underline{A}^n x\|. \] To be specific about the underlying operator $A$ we write $\underline{X}_n(A)$ and $\|x\|_{\underline{X}_n(A)}$. \item For $n\in\mathbb{N}$ we set $A_{n}:=A|_{X_n}$, the part of $A$ in $X_n$, in particular $A_0=A$. Similarly, we let $\underline{A}_{n}:=\underline{A}|_{\underline{X}_n}$, for example $\underline{A}_0=\underline{A}$. By this notation we also understand implicitly that the surrounding space is $X_n(A)$ respectively $\underline{X}_n(A)$ with its norm, see the next Remark \ref{rem:1}. \end{abc} \end{definition} \begin{remark}\label{rem:1} \begin{num} \item The choice of the notation $X_0$ and $\underline{X}_0$ and the like should be self-explanatory: By ``underlining'' we always indicate an object which is in some sense smaller than the one without underlining. The space $\underline{X}_0(A)$ is connected with the domain of $D(A)$, and the whole issue of distinguishing between $X_0$ and $\underline{X}_0$ becomes interesting only if $A$ is not densely defined but its part $\underline{A}$ \emph{is} (cf. Remark \ref{rem:2}). We will stick to the notation $\underline{A}$ for the part of the operator $A$ instead of $A|_{\underline{X}_0}$, because it fits better with our general notation. \item If $A$ is densely defined, then $X_n(A)=\underline{X}_n(A)$ for each $n\in \mathbb{N}$. In particular, if $\underline{X}_1(A)=D(\underline{A})$ is dense in $\underline{X}_0(A)$, then $\underline{X}_n(A)=\underline{X}_n(\underline{A})$ for each $n\in\mathbb{N}$. \item For $n\in\mathbb{N}$ we evidently have $X_1(A^n)=X_n(A)$. Also $\underline{X}_1(A^n)=\underline{X}_n(A)$ holds, because $D(\underline{A}^n)=D(\underline{A^n})$. Indeed, the inclusion ``$D(\underline{A}^n)\subseteq D(\underline{A^n})$'' is trivial. While for $x\in D(\underline{A^n})$ we have $x\in \underline{X}_0$ and $A^nx\in \underline{X}_0$, implying $A^{n-1}x\in D(\underline{A})$, and then recursively $x\in D(\underline{A}^n)$. \item For $x\in D(A_n)=D(A^{n+1})$ we have $\|x\|_{X_1(A_n)}=\|A_nx\|_{X_n(A)}=\|A^{n+1}x\|=\|x\|_{X_{n+1}(A)}$. Similarly $D(\underline{A}_n)=D(\underline{A}^{n+1})$. \end{num} \end{remark} \begin{proposition}Suppose $\underline{A}$ is densely defined in $\underline{X}_0$. \begin{abc} \item For $n\in \mathbb{N}$ the mappings $A^n:X_n\to X_0$ and $\underline{A}^n:\underline{X}_n\to \underline{X}_0$ are isometric isomorphisms. \item For $n\in\mathbb{N}$ the operators $A_n:X_{n+1}\to X_{n}$ and $\underline{A}_n:\underline{X}_{n+1}\to \underline{X}_{n}$ are isometric isomorphisms that intertwine $A_{n+1}$ and $A_n$, respectively, $\underline{A}_{n+1}$ and $\underline{A}_n$. \item If $D(\underline{A})$ is dense in $\underline{X}_0$, then $X_\infty$ is dense in $\underline{X}_n$ for each $n\in\mathbb{N}$. As a consequence $\underline{X}_m$ is dense in $\underline{X}_n$ for each $m,n\in \mathbb{N}$ with $m\geq n$. \end{abc} \end{proposition} \begin{proof} The statements (a) and (b) are trivial by construction. \noindent (c) This is \cite[Thm. 6.2]{Arendt94} due to Arendt, El-Mennaoui and K\'eyantuo, because $\underline{A}$ is densely defined in $\underline{X}_0$. \end{proof} \begin{remark} We note that the proof of the previous assertion (c) in \cite[Thm. 6.2]{Arendt94} is based on a Mittag-Leffler type result due to Esterle \cite{Esterle} which is valid in complete metric spaces. Hence the previous statements (a), (b) and (c) are all remain true for Fr\'echet-spaces with verbatim the same proof as in \cite{Arendt94}. \end{remark} Henceforth, another standing assumption in this paper (though not everywhere needed) is that $\underline{A}$ is in $\underline{X}_0$ densely defined, i.e., \[ \overline{D(\underline{A})}=\underline{X}_0. \] \begin{remark}\label{rem:2} The condition of $D(\underline{A})$ being dense in $\underline{X}_0$ can be for example assured if there are $M,\omega>0$ such that $(\omega,\infty)\subseteq \rho(A)$ and \begin{equation}\label{eq:wHY1} \|\lambda R(\lambda,A)\|\leq M\quad\text{for all $\lambda>\omega$}. \end{equation} Indeed, in this case we have for $x\in D(A)$ \[ \|\lambda R(\lambda, A)x-x\|=\|R(\lambda,A)Ax\|\leq \frac{M\|Ax\|}{\lambda}\to 0\quad\text{for $\lambda\to \infty$}. \] Hence $D(A^2)\subseteq D(\underline{A})$ is dense in $D(A)$ for the norm of $X_0$, and this implies the density of $D(\underline{A})$ in $\underline{X}_0$. An operator $A$ satisfying \eqref{eq:wHY1} is often said to have a \emph{ray of minimal growth}, see, e.g., \cite[Chapter 3]{Lunardi}, and also Section \ref{sec:minimal} below. Another term being used is \emph{``weak Hille--Yosida operator''}. \end{remark} \begin{proposition} If $T\in\mathscr{L}(X_0)$ is a linear operator commuting with $A^{-1}$, then the spaces $X_n$ and $\underline{X}_n$ are $T$-invariant, and $T\in \mathscr{L}(X_n)$ for $n\in \mathbb{N}$. \end{proposition} \begin{proof}The condition means that $Tx\in D(A)$ for each $x\inD(A)$ and for such $x$ we have $ATx=TAx$. This implies the invariance of $X_1$ and that $\|Tx\|_{X_1(A)}\leq \|T\|\|x\|_{X_{1}(A)}$. Using the boundedness assumption we see that $\underline{X}_1$ too stays invariant under $T$. For general $n\in \mathbb{N}$ we may argue by recursion, or simply invoke Remark \ref{rem:1}. \end{proof} \subsection{Extrapolation spaces}\label{sec:extra} The construction for the extrapolation spaces here is known and more or less standard if $A$ is densely defined, or if $A$ is Hille--Yosida operator, see, e.g., \cite{NS}. For $x\in X_0$ we define $\|x\|_{\underline{X}_{-1}(A)}:=\|A^{-1}x\|$. Then the surjective mapping \[ A:(D(A),\|\cdot\|)\to (X_0,\|\cdot\|_{\underline{X}_{-1}(A)}) \] becomes isometric, and hence has a uniquely determined continuous extension \[ \underline{A}_{-1}:(\underline{X}_0,\|\cdot\|)\to(\underline{X}_{-1},\|\cdot\|_{\underline{X}_{-1}(\underline{A})}), \] which is then an isometric isomorphism, where $(\underline{X}_{-1},\|\cdot\|_{\underline{X}_{-1}(\underline{A})})$ denotes a completion of \\$(\underline{X}_0,\|\cdot\|_{\underline{X}_{-1}(A)})$. Of course, uniqueness is given as soon as the completion is fixed. By construction we obtain immediately: \begin{proposition}\label{prop:altercompl} $X_0$ is continuously and densely embedded in $\underline{X}_{-1}$. If $\underline{A}$ is densely defined in $\underline{X}_0$, then also $X_\infty$ is dense in $\underline{X}_{-1}$. As a consequence $(\underline{X}_{-1},\|\cdot\|_{\underline{X}_{-1}(\underline{A})})$ is the completion of $(\underline{X}_0,\|\underline{A}^{-1}\cdot \|)$. \end{proposition} \begin{proof} Of course $X_0$is dense in $\underline{X}_{-1}$ because of completion. For $x\in X_0$ we have \[ \|x\|_{\underline{X}_{-1}(\underline{A})}=\|AA^{-1}x\|_{\underline{X}_{-1}(\underline{A})}=\|\underline{A}_{-1}A^{-1}x\|_{\underline{X}_{-1}(A)}\leq \|\underline{A}_{-1}\|\cdot \|A^{-1}x\|\leq \|\underline{A}_{-1}\|\cdot \|A^{-1}\|\cdot \|x\|, \] showing the continuity of the embedding. The last assertion follows since $X_\infty$ is dense in $D(A)$ with respect to $\|\cdot\|$. \end{proof} Of course one can iterate the whole procedure an obtain the following chain of dense and continuous embeddings \[ \underline{X}_{0}\mathrm{h}ookrightarrow \underline{X}_{-1}\mathrm{h}ookrightarrow \underline{X}_{-2}\mathrm{h}ookrightarrow \cdots\mathrm{h}ookrightarrow \underline{X}_{-n} \quad\text{for $n\in \mathbb{N}$}, \] where for $n\geq 1$ the space $\underline{X}_{-n}$ is a completion of $\underline{X}_{-n+1}$ with respect to the norm $\|x\|_{\underline{X}_{-n}(\underline{A})}$ defined by $\|x\|_{\underline{X}_{-n}(\underline{A})}=\|\underline{A}^{-1}_{-n+1}x\|_{\underline{X}_{-n+1}(\underline{A})}$ and \[ \underline{A}_{-n}:\underline{X}_{-n+1}\to \underline{X}_{-n} \] is a unique continuous extension of $\underline{A}_{-n+1}:D(\underline{A}_{-n+1})\to \underline{X}_{-n+1}$ to $\underline{X}_{-n}$. \noindent These spaces, just as well the ones in the next definition, are called \emph{extrapolation spaces} for the operator $A$, see, e.g., \cite{NS} or \cite[Section II.5]{EN} for the case of semigroup generators. The spaces $\underline{X}_{-1}$ and $\underline{X}_{-2}$, just as well the operator $\underline{A}_{-2}$ will be used to define the extrapolation space $X_{-1}(A)$. To this purpose we identify $X_0$ with a subspace of $\underline{X}_{-1}$ and of $\underline{X}_{-2}$. \begin{definition}\label{def:extra} Consider $X_0$ as a subspace of $\underline{X}_{-2}$, and define \[ X_{-1}:=\underline{A}_{-2}(X_0):=\bigl\{\underline{A}_{-2}x:\ x\in X_0\bigr\}\quad \text{and}\quad \|x\|_{X_{-1}}:=\|\underline{A}_{-2}^{-1}x\|. \] Furthermore we set $A_{-1}:=\underline{A}_{-2}|_{X_0}$, the part of $\underline{A}_{-2}$ in $X_0$. Again, $X_{-1}(A)$ and $\|\cdot\|_{X_{-1}(A)}$ make the notation unambiguous. \end{definition} In what follows, we will define higher order extrapolation spaces and prove that all these spaces line up in a scale, where one can switch between the levels with the help of (a version) of the operator $A$ (or $A_{-1}$). \begin{proposition} The operator $A_{-1}$ is an extension of $\underline{A}_{-1}$, $(X_{-1},\|\cdot\|_{X_{-1}})$ is a Banach space, the norms of $\underline{X}_{-1}$ and $X_{-1}$ coincide on $\underline{X}_{-1}$, and $\underline{X}_{-1}$ is a closed subspace of $X_{-1}$. The mapping $A_{-1}:X_0\to X_{-1}$ is an isometric isomorphism. \end{proposition} \begin{proof}The first assertion is true because $\underline{A}_{-2}$ is an extension of $\underline{A}_{-1}$. That $X_{-1}$ is a Banach space is immediate from the definition. Since $\underline{A}_{-2}^{-1}\underline{A}_{-1}=I$ on $\underline{X}_0$, for $x\in \underline{X}_{-1}$ we have $\underline{A}_{-1}^{-1}x\in \underline{X}_0\subseteq X_0$, so that $\|\underline{A}_{-2}^{-1}x\|=\|\underline{A}_{-2}^{-1}\underline{A}_{-1}\underline{A}^{-1}_{-1}x\|=\|\underline{A}^{-1}_{-1}x\|=\|x\|_{\underline{X}_{-1}}$. This establishes that the norms coincide. Since $\underline{X}_{-1}$ is a Banach space (with its own norm), it is a closed subspace of $X_{-1}$. That $A_{-1}$ is an isometric isomorphism follows from the definition. \end{proof} \begin{remark}\label{rem:iter-1} For any $n\in\mathbb{N}$ we have by construction that $\underline{X}_{-1}(\underline{A}_{-n})=\underline{X}_{-(n+1)}(\underline{A})$ and $X_{-1}(A_{-n})=X_{-(n+1)}(A)$. \end{remark} \begin{proposition}\label{prop:intertwine+1} For $n\in\mathbb{Z}$ the operators $A_n:X_{n+1}\to X_{n}$ and $\underline{A}_n:\underline{X}_{n+1}\to \underline{X}_{n}$ are isometric isomorphisms that intertwine $A_{n+1}$ and $A_n$, respectively, $\underline{A}_{n+1}$ and $\underline{A}_n$. \end{proposition} \begin{proof} For $n\in \mathbb{N}$ these have been proved in Proposition \ref{prop:intertwine+1}. So we assume $n<0$. For $n=-1$ the statement about isometric isomorphisms is just the definition, and the intertwining property is also evident. By recursion we obtain the validity of the assertion for general $n\leq -1$ and for the operator $\underline{A}_n$. By Remark \ref{rem:iter-1} it suffices to prove that $A_{-1}$ intertwines $A_{-1}$ and $A_0$. For $x\in D(A_0)=D(A)$ we have $A_{-1}x\in X_0=D(A_{-1})$ and $Ax=A^{-1}_{-1}A_{-1}A_{-1}x$. \end{proof} Thus for $n\in \mathbb{N}$ we have the following chain of embeddings (continuous, dense, denoted by $\mathrm{h}ookrightarrow$) and inclusions as closed subspaces (denoted by $\subseteq$): \[ \cdots\mathrm{h}ookrightarrow \underline{X}_{n}\subseteq X_n\mathrm{h}ookrightarrow \underline{X}_0\subseteq X_0\mathrm{h}ookrightarrow \underline{X}_{-1}\subseteq X_{-1}\mathrm{h}ookrightarrow \underline{X}_{-2}\subseteq X_{-2}\mathrm{h}ookrightarrow \cdots \underline{X}_{-n}\subseteq X_{-n}\mathrm{h}ookrightarrow\cdots,\] where in general the inclusions are strict (see the examples in Section \ref{sec:examp}). We also have the following chain of isometric isomorphisms \[ \cdots\longrightarrow\underline{X}_{n+1}\stackrel{\underline{A}_n^{-1}}{\longrightarrow} \underline{X}_{n}\longrightarrow\cdots \longrightarrow\underline{X}_{1}\stackrel{\underline{A}_{0}^{-1}}{\longrightarrow} \underline{X}_{0}\stackrel{\underline{A}_{-1}^{-1}}{\longrightarrow}\underline{X}_{-1}{\longrightarrow}\cdots \longrightarrow \underline{X}_{-n+1}\stackrel{\underline{A}_{-n}^{-1}}{\longrightarrow}\underline{X}_{-n}\longrightarrow \cdots \] and \[ \cdots\longrightarrow X_{n+1}\stackrel{A_n^{-1}}{\longrightarrow} X_{n}\longrightarrow\cdots \longrightarrow X_{1}\stackrel{A_{0}^{-1}}{\longrightarrow} X_{0}\stackrel{A_{-1}^{-1}}{\longrightarrow}X_{-1}{\longrightarrow}\cdots \longrightarrow X_{-n+1}\stackrel{A_{-n}^{-1}}{\longrightarrow}X_{-n}\longrightarrow \cdots. \] \begin{proposition} \begin{abc} \item $\underline{X}_1({\underline{A}_{-1}})=\underline{X}_0$ and $X_1(A_{-1})=X_0$ with the same norms. \item $\underline{X}_{-1}({\underline{A}_{1}})=\underline{X}_0$ with the same norms. \item $(\underline{A}_1)_{-1}=\underline{A}$. \item $X_{-1}(A_{1})=X_0$ with the same norms, and $(A_1)_{-1}=A$. \end{abc} \end{proposition} \begin{proof} (a) By definition $X_1(A_{-1})=D(A_{-1})=X_0$ with the graph norm of $A_{-1}$. Since $A_{-1}$ extends $A$, for $x\in X_0$ we have $\|A_{-1}x\|_{X_{-1}(A)}=\|Ax\|_{-1}=\|A^{-1}Ax\|=\|x\|$. The first statement then follows as well, because $\underline{X}_1({\underline{A}_{-1}})=X_1(\underline{A}_{-1})=\overline{D(\underline{A})}=\underline{X}_0$ with the same norms. \noindent (b) For $x\in \underline{X}_1(\underline{A})=D(\underline{A}^2)$ we have \[ \|x\|_{\underline{X}_{-1}(\underline{A}_1)}=\|\underline{A}_1^{-1}x\|_{\underline{X}_1(\underline{A})}=\|\underline{A} \underline{A}_{1}^{-1}x\|=\|x\|, \] which can be extended by density for all $x\in \underline{X}_0$, showing also the equality of the spaces $\underline{X}_{-1}({\underline{A}_{1}})=\underline{X}_0$ (with the same norm). \noindent (c) By construction the operator $(\underline{A}_1)_{-1}:\underline{X}_1(A)\to \underline{X}_{-1}(\underline{A}_1)$ is the unique continuous extension of \[ \underline{A}_1:D(\underline{A}_1)=D(\underline{A}^2)\to \underline{X}_1(A), \] and $(\underline{A}_1)_{-1}$ is an isometric isomorphism. For $x\in \underline{X}_1(A)$ we have $\|x\|_{\underline{X}_{-1}(A_1)}=\|\underline{A}_{1}^{-1}x\|_{\underline{X}_1(A)}=\|x\|$. But then it follows that $(\underline{A}_1)_{-1}=\underline{A}:D(\underline{A})\to \underline{X}_0$. \noindent (d) The space $X_{-1}(A_{1})$ is defined by \[ X_{-1}(A_{1}):=(\underline{A_1})_{-2}(X_1(A))={((\underline{A_1})_{-1})}_{-1}(X_1(A))=\underline{A}_{-1}(X_1(A))=A X_1(A)=X_0, \] by part (c). For the norm equality let $x\in X_0$. Then \[ \|x\|=\|AA^{-1}x\|=\|A^{-1}x\|_{X_1(A)}=\|\underline{A}_{-1}^{-1}x\|_{X_1(A)}=\|(\underline{A}_{1})_{-2}^{-1}x\|_{X_1(A)}=\|x\|_{X_{-1}(A_1)}. \] The last assertion is equally easy to prove: $(A_1)_{-1}=(\underline{A}_1)_{-2}|_{X_1(A)}=A$. \end{proof} Recall the standing assumption that $\underline{A}=A|_{\underline{X}_0}$ is densely defined in $\underline{X}_0=\overline(D(A))$. The following proposition plays the key role for the extension of operators on the extrapolation spaces, particularly for the construction of extrapolated semigroups in Section \ref{sec:sgrps}. \begin{proposition} \label{prop:extT} \begin{abc} \item Let $n\in \mathbb{N}$.If $T\in\mathscr{L}(X_0)$ is a linear operator commuting with $A^{-1}$, then the operator $T$ has a unique continuous extension to $\underline{X}_{-n}$ denoted by $\underline{T}_{-n}$. The operator $\underline{T}_{-n}$ is the restriction of $\underline{T}_{-n-1}$. $X_{-n}$ is invariant under $\underline{T}_{-n-1}$, its restriction is denoted by $T_{-n}$, for which $T_{-n}\in \mathscr{L}(X_{-n})$. For $k,n\in -\mathbb{N}$ the operators $\underline{T}_n$, $\underline{T}_k$ are all similar; the same holds for $T_n$ and $T_k$. \item Let $\underline{T}\in\mathscr{L}(\underline{X}_0)$ such that it leaves $D(A)$ invariant and it commutes with $\underline{A}^{-1}=A^{-1}|_{X_0}$. Then $\underline{T}_{-1}x=A\underline{T} A^{-1}x$ for each $x\in X_0$, and as a consequence, $\underline{T}_{-1}:\underline{X}_{-1}\to \underline{X}_{-1}$ leaves $X_0$ invariant (and, of course, extends $\underline{T}$). \end{abc} \end{proposition} \begin{proof}(a) For $x\in X_0$ we have $\|Tx\|_{X_{-1}(A)}=\|A^{-1}Tx\|=\|TA^{-1}x\|\leq \|T\|\cdot \|A^{-1}x\|=\|T\|\cdot \|x\|_{X_{-1}(A)}$. So that $T:(X_0,\|\cdot\|_{X_{-1}(A)})\to (X_0,\|\cdot\|_{X_{-1}(A)})$ is continuous, and hence has a unique continuous extension $\underline{T}_{-1}$ to $\underline{X}_{-1}$. This extension commutes with $\underline{A}^{-1}_{-1}$, because $T$ commutes with $A^{-1}$ and $\underline{A}_{-1}^{-1}$ is the unique continuous extension of $A^{-1}$. By iteration we obtain the continuous extensions $T_{-n}$ onto $\underline{X}_{-n}$, which then all commute with the corresponding $\underline{A}_{-n}^{-1}$. By construction $\underline{T}_{-n}$ is a restriction of $\underline{T}_{-n-1}$. We prove that $X_{-1}$ is invariant under $\underline{T}_{-2}$. Let $x\in X_{-1}$, hence $x=\underline{A}_{-2}y$ for some $y\in X_0$. Then $Ty=\underline{T}_{-2}y=\underline{T}_{-2}\underline{A}_{-2}^{-1}x=\underline{A}_{-2}^{-1}\underline{T}_{-2}x$, hence $\underline{T}_{-2}x=\underline{A}_{-2}Ty\in X_{-1}$, i.e., the invariance of $X_{-1}$ is proved. We have for $x\in X_{-1}$ that $\|T_{-1}x\|_{X_{-1}}=\|A_{-2}^{-1}T_{-1}x\|=\|A_{-2}^{-1}\underline{T}_{-2}x\|=\|\underline{T}_{-2}A_{-2}^{-1}x\|\leq \|\underline{T}_{2}\|\cdot \|\underline{A}_{-2}^{-1}x\|=\|\underline{T}_{2}\|\cdot \|x\|_{X_{-1}}$, therefore $T_{-1}\in \mathscr{L}(X_{-1})$. The assertion about $T_{-n}$ follows by recursion. It is enough to prove the similarity of $T_{0}=T$ and $T_{-1}$, and the similarity of $\underline{T}_{0}$ and $\underline{T}_{-1}$. The latter assertions can be proved as follows: For $x\in D(A)$ we have \[ \underline{A}_{-1}^{-1}\underline{T}_{-1}\underline{A}_{-1}x=\underline{A}_{-1}^{-1}\underline{T}_{-1}Ax=\underline{A}_{-1}^{-1}TAx=\underline{A}_{-1}^{-1}ATx=\underline{A}_{-1}^{-1}A_{-1}Tx=\underline{T} x, \] then by continuity and denseness the equality follows even for $x\in \underline{X}_0$. For the similarity of $T$ and $T_{-1}$ take $x\in X_0$. Then \[ A_{-1}^{-1}T_{-1}A_{-1}x=\underline{A}_{-2}^{-1}\underline{T}_{-2}\underline{A}_{-2}x=\underline{T}_{-1}x=T x. \] \noindent (b) Let $x\in X_0\subseteq \underline{X}_{-1}$. Then there is a sequence $(x_n)$ in $\underline{X}_0$ with $x_n\to x$ in $\underline{X}_{-1}$ (see Proposition \ref{prop:altercompl}). But then $A^{-1}x_n\to A^{-1}x$ in $\underline{X}_0$ and $\underline{T} x_n\to \underline{T}_{-1}x$ in $\underline{X}_{-1}$ by part (a). These imply $\underline{T} A^{-1}x_n=A^{-1}\underline{T} x_n\to \underline{A}^{-1}_{-1}\underline{T}_{-1}x$. Hence we conclude $\underline{T} A^{-1}x=\underline{A}^{-1}_{-1}\underline{T}_{-1}x$ and $A\underline{T} A^{-1}x=\underline{T}_{-1}x$ for $x\in X_0$. \end{proof} Haase in \cite{Haase} and Wegner in \cite{W} have constructed the so-called \emph{universal extrapolation space} $\underline{X}_{-\infty}$ as follows: Suppose $A$ is densely defined (this assumption is \emph{not} posed by Haase), then $X_n=\underline{X}_n$ for each $n\in \mathbb{Z}$ and let $X_{-\infty}$ to be the inductive limit of the sequence of Banach spaces $(X_{-n})_{n\in\mathbb{N}}$ (algebraic inductive limit in \cite{Haase}). One can extend the operator $A$ to an operator $A_{-\infty}:X_{\infty}\to X_\infty$, for which \[ A_{-\infty}|_{X_n}=A_n,\quad n\in\mathbb{Z}. \] We now look at a converse situation, and our starting point is the following: Let $\mathscr{E}$ be a locally convex space and suppose that we have a continuous operator $\mathcal{A}:\mathscr{E}\to\mathscr{E}$ such that we can embed the Banach space $X_0$ continuously in $\mathscr{E}$, i.e., there is a continuous injective map $i:X_0\to\mathscr{E}$, and so we can identify $X_0$ with a subspace of $\mathscr{E}$. We also assume that $\lambda-\mathcal{A}:i(X_0)\to\mathscr{E}$ is injective and that \[ D(A)=\{x\in X_0:\ \mathcal{A}\circ i(x)\in i(X_0)\}, \] and \[ i\circ A=\mathcal{A}\circ i|_{D(A)}. \] As a matter of fact, this setting can be also used to construct the extrapolation spaces $\underline{X}_{-n}$, $X_{-n}$ for $n\in \mathbb{N}$ similarly to our Definition \ref{def:extra}, as indicated by the next theorem, which is proved here based on the results in Section \ref{sec:invert}. Notice that we do not assume that $A$ is a Hille--Yosida operator or densely defined. \begin{theorem}\label{thm:iden} Let $X_0$ be a Banach space with a continuous embedding $i:X_0\to\mathscr{E}$ into a locally convex space $\mathscr{E}$, let $A:D(A)\to X_0$ be a linear operator with $\lambda\in\rho(A)$ such that $A=\mathcal{A}_{|X_0}$ (after identifying $X_0$ with a subspace of $\mathscr{E}$ as described above). We suppose furthermore that $\lambda-\mathcal{A}$ is injective on $X_0$. There is a continuous embedding $i_{-1}:X_{-1}\to\mathscr{E}$ which extends $i$. After identifying $X_{-1}$ with a subspace of $\mathscr{E}$ (under $i_{-1}$) we have \begin{align*} X_{-1}=\{(\lambda-\mathcal{A})x:\ x\in X_0\},\quad \underline{X}_{-1}=\{(\lambda-\mathcal{A})x:\ x\in\underline{X_0}\}\quad \text{and}\quad A_{-1}=\mathcal{A}|_{X_{-1}}. \end{align*} \end{theorem} \begin{proof} Without lost of generality we may assume that $\lambda=0$. Recall that ${A_{-1}}|_{X_0}=A$ and $A_{-1}$ is an isometric isomorphism $A_{-1}:X_0\to X_{-1}$. We now define the embedding $i_{-1}:X_{-1}\to\mathscr{E}$ by \[ i_{-1}:=\mathcal{A}\circ i\circ A_{-1}^{-1}, \] which is indeed injective and continuous by assumption. Of course, $i_{-1}$ extends $i$ since we have $i=\mathcal{A}\circ i\circ A^{-1}$. We can write \[ i_{-1}\circ A_{-1}=\mathcal{A}\circ i\circ A_{-1}^{-1}\circ A_{-1}=\mathcal{A}\circ i, \] which yields the following commutative diagram: \begin{align*} \xymatrix { X_0\ar[rr]^{i}\ar[dd]^{A_{-1}} & & \mathscr{E}\ar[dd]_{\mathcal{A}} \\ & &\\ X_{-1}\ar[rr]_{i_{-1}} & & \mathscr{E} } \end{align*} All the assertions follow from this. \end{proof} The last corollary in this s can be proved by recursion based on the facts proved in Section \ref{sec:invert}. \begin{corollary} Let $\mathcal{A}$, $X_0$ and $\mathscr{E}$ as in Theorem \ref{thm:iden}. Then $X_{n}\subseteq\mathscr{E}$ and $A_{n}=\mathcal{A}|_{X_{-n}}$ for each $n\in\mathbb{Z}$ (identifying $X_n$ under an embedding $i_n$ with a subspace of $\mathscr{E}$). \end{corollary} \section{Intermediate spaces for operators with rays of minimal growth}\label{sec:minimal} The following definition of intermediate, and as a matter of fact interpolation spaces, just as well many results in this section are standard, and we refer, e.g., to the book of Lunardi \cite[Chapter 3]{Lunardi}, and to Engel, Nagel \cite[Section II.5]{EN} for the case of semigroup generators. \begin{definition}\label{def:Fav} Let $A$ be a linear operator on the Banach space $X_0$ with a ray of minimal growth, i.e., suppose that $(0, \infty)\subseteq\rho(A)$ and for some $M\geq 0$ \begin{equation}\label{eq:weakHY} \|\lambda R(\lambda,A)\|\leq M\quad\text{for all $\lambda>0$}. \end{equation} For $\alpha\in\left(0,1\right]$ and $x\in X_0$ we define \[ \|x\|_{F_\alpha(A)}:=\sup_{\lambda>0}\|\lambda^\alpha AR(\lambda,A)x\|, \] and the abstract Favard space of order $\alpha$ \[ F_\alpha(A):=\bigl\{x\in X_0:\|x\|_{F_\alpha(A)}<\infty\bigr\}. \] In the literature the notation $D_A(\alpha,\infty)$ is more common, see, e.g., \cite{Lunardi}, but for notational convenience we stick to our notation in this paper. We further set \[ F_0(A)=F_1(A_{-1}), \] see \cite[Section II.5(b)]{EN} for the case of semigroup generators. \end{definition} The standing assumption in this section will be that $A$ satisfies \eqref{eq:weakHY}. \begin{proposition} \begin{abc} \item The Favard space $F_\alpha(A)$ becomes a Banach space if endowed with the norm $\|\cdot\|_{F_\alpha(A)}$. \item $X_0$ is isomorphic to a closed subspace of $F_0(A)$. \end{abc} \end{proposition} For the case when $A$ is a Hille--Yosida operator, The statement that $X_0$ is closed subspace of $F_0(A)$ is due to Nagel and Sinestrari \cite[Proof of Prop. 2.7]{NS} \begin{proof} (a) is an easy checking of properties. \noindent (b) For $x\in X_0$ we have \[\|\lambda A_{-1}R(\lambda,A_{-1})x\|_{X_{-1}(A)}=\|\lambda A R(\lambda,A)x\|_{X_{-1}(A)}=\|\lambda A^{-1}A R(\lambda,A)x\|\leq M\|x\|,\] yielding \[ \|x\|_{F_0(A)}=\|x\|_{F_1(A_{-1})}\leq M\|x\|. \] On the other hand, since $A$ and $A_{-1}$ are similar, we have $\sup_{\lambda>0}\|\lambda R(\lambda,A_{-1})\|\leq M'$ for some $M'\geq0$ and for all $\lambda>0$. In particular, by Remark \ref{rem:2}, $\lambda R(\lambda, A_{-1})x\to x$ for each $x\in \underline{X}_{-1}$. From which we obtain for $x\in X_0$ that \begin{align*} \|x\|&=\|A_{-1}x\|_{X_{-1}(A)}=\Bigl\|\lim_{\lambda\to 0} \lambda R(\lambda,A_{-1})A_{-1}x\Bigr\|_{X_{-1}(A)}\leq \sup_{\lambda>0}\|\lambda A_{-1}R(\lambda,A_{-1})x\|_{X_{-1}(A)}\\ &=\|x\|_{F_1(A_{-1})}=\|x\|_{F_0(A)}, \end{align*} showing the equivalence of norms $\|\cdot\|$ and $\|x\|_{F_0(A)}$ on $X_0$. \end{proof} We will also need the following well-known result, see, e.g., \cite[Chapters 1 and 3]{Lunardi}, for which we give a direct, short proof. \begin{proposition} For $\alpha\in (0,1]$ we have $F_\alpha(A)\subseteq \overline{D(A)}=\underline{X}_0$. \end{proposition} \begin{proof} We have \[ AR(\lambda,A)x=\lambda R(\lambda,A)x-x, \] so that \[ \|\lambda R(\lambda,A)x-x\|\leq \frac{\|x\|_{F_\alpha(A)}}{\lambda^\alpha}\to 0\quad \text{as $\lambda\to \infty$.} \] \end{proof} \begin{definition}\label{def:XHol} Let $A$ be a linear operator on the Banach space $X_0$ satisfying \eqref{eq:weakHY}. For $\alpha\in (0,1)$ we set \begin{align*} \XHol_\alpha(A)&:=\Bigl\{x\in F_\alpha(A):\lim_{\lambda\to\infty} \lambda^\alpha AR(\lambda,A)x=0\Bigr\},\\ \intertext{and we recall from Section \ref{sec:invert}} \XHol_0(A)&:=\overline{D(A)},\quad \XHol_1(A)=D(A|_{\XHol_0(A)}). \end{align*} \end{definition} The proof of the next proposition is straightforward, but also well-known. \begin{proposition} For $\alpha,\beta\in (0,1)$ with $\alpha>\beta$ we have \[ \XHol_1(A)\mathrm{h}ookrightarrow \XHol_\alpha(A)\subseteq F_\alpha(A) \mathrm{h}ookrightarrow \XHol_\beta(A)\subseteq F_\beta(A)\mathrm{h}ookrightarrow \XHol_0(A)\subseteq X_0(A) \] with $\mathrm{h}ookrightarrow$ denoting continuous and dense embeddings of Banach spaces, and $\subseteq$ denoting inclusion of closed subspaces. \end{proposition} \begin{proof} For $x\in F_\alpha(A)$ we have \[ \|\lambda^{\beta} AR(\lambda,A)x\|=\lambda^{\beta-\alpha}\|\lambda^{\alpha} AR(\lambda,A)x\|\leq \lambda^{\beta-\alpha}\|x\|_\alpha\to 0\quad\text{as $\lambda\to\infty$}, \] and this proves also the continuity of $F_\alpha(A)\mathrm{h}ookrightarrow \XHol_\beta(A)$. The other statements can be proved by similar reasonings. \end{proof} \begin{proposition} \begin{abc} \item The spaces $F_\alpha(A)$ and $\XHol_\alpha(A)$ are invariant under each $T\in\mathscr{L}(X_0)$ which commutes with $A^{-1}$. \item Let $T\in\mathscr{L}(X_0)$ be commuting with $A^{-1}$. The space $F_0(A)$ is invariant under $T_{-1}$. \end{abc} \end{proposition} \begin{proof} (a) Suppose that $T\in\mathscr{L}(X_0)$ commutes with $R(\cdot, A)$ and let $x\in \XHol_\alpha(A)$. We have to show that $Tx\in\XHol_\alpha(A)$. Since $T$ is assumed to be bounded we make the following observation: $$\|\lambda^{\alpha}AR(\lambda,A)Tx\|=\|\lambda^{\alpha}ASR(\lambda,A)x\|\leq\|T\|\cdot\|\lambda^{\alpha}AR(\lambda,A)x\|.$$ This implies both assertions. \noindent (b) Follows from part (a) applied to $T_{-1}$ on the space $X_{-1}$. \end{proof} \begin{definition} For $\alpha\in \mathbb{R}$ we write $\alpha=m+\beta$ with $m\in \mathbb{Z}$ and $\beta\in (0,1]$, and define \[ F_\alpha(A):=F_{\beta}(A_m), \] with the corresponding norms. For $\alpha\not\in\mathbb{Z}$ we define \[ \XHol_\alpha(A):=\XHol_{\beta}(A_m), \] also with the corresponding norms. \end{definition} In particular we have for $\alpha\in (0,1)$ that \[ \XHol_{-\alpha}(A)=\XHol_{1-\alpha}(A_{-1})\quad \text{and}\quad F_{-\alpha}(A)=F_{1-\alpha}(A_{-1}). \] This definition is consistent with Definitions \ref{def:Fav} and \ref{def:XHol}. The following property of these spaces can be directly deduced from the definitions and the previous assertions (by using recursion): \begin{proposition} For any $\alpha,\beta\in \mathbb{R}$ with $\alpha>\beta$ we have \[ \XHol_\alpha(A)\subseteq F_\alpha(A) \mathrm{h}ookrightarrow \XHol_\beta(A)\subseteq F_\beta(A) \] with $\mathrm{h}ookrightarrow$ denoting continuous and dense embeddings of Banach spaces, and $\subseteq$ denoting inclusion of closed subspaces. \end{proposition} Now we put these spaces in a more general context presented at the end of Section \ref{sec:invert}. \begin{proposition}\label{cor:ExtFav} \begin{abc} \item For $\alpha\in\left(0,1\right]$ we have $A_{-1}F_{\alpha}=F_{\alpha-1}$ and $A_{-1}\underline{X}_{\alpha}=\underline{X}_{\alpha-1}$. \item For $\alpha\in\left(0,1\right]$ and $\mathcal{A}$, $\lambda$ and $\mathscr{E}$ as in Theorem \ref{thm:iden} we have \[ F_{-\alpha}=\Bigl\{(\lambda-\mathcal{A})y\in X_{-1}:\ y\in F_{1-\alpha}\Bigl\}. \] If $\alpha\in\left(0,1\right)$, then \begin{align*} \underline{X}_{-\alpha}&=\Bigl\{(\lambda-\mathcal{A})y\in X_{-1}:\ y\in \underline{X}_{1-\alpha}\Bigl\}. \end{align*} \end{abc} \end{proposition} \section{Semigroup generators}\label{sec:sgrps} In this section we consider intermediate and extrapolation spaces when the linear operator $A:D(A)\to X_0$ is the generator of a semigroup $(T(t))_{t\geq 0}$ (meaning that $T:[0,\infty)\to \mathscr{L}(X_0)$ is a monoid homomorphism) in the following sense: \begin{assumption}\label{asp:sgrps}\begin{num} \item Let $X_0$ be a Banach space, and let $Y\subseteq X_0'$ be a norming subspace, i.e., \[ \|x\|=\sup_{y\in Y, \|y\|\leq1}|\dprod{ x}{y}|\quad\text{for each $x\in X_0$}. \] \item Let $T:[0,\infty)\to \mathscr{L}(X_0)$ be a semigroup of contractions, which is not necessarily supposed to be strongly continuous, but for which a generator $A:D(A)\to X_0$ exists in the sense that \begin{equation}\label{eq:laplace} R(\lambda, A)x=\int_0^\infty \mathrm{e}^{-\lambda s}T(s)x\ \mathrm{d} s \end{equation} exists for each $\lambda\geq 0$ as a weak integral with respect to the dual pair $(X_0,Y)$, i.e., for each $y\in Y$ and $x\in X_0$ \[ \dprod{R(\lambda,A)x}{y}=\int_0^\infty\mathrm{e}^{-\lambda s}\dprod{T(s)x}{y}\ \mathrm{d} s, \] and $R(\lambda,A)\in\mathscr{L}(X_0)$ is in fact the resolvent of a linear operator $A$. \item We also suppose that $T(t)$ commutes with $A^{-1}$ for each $t\geq 0$. \end{num} \end{assumption} If the semigroup $(T(t))_{t>0}$ is only exponentially bounded of type $(M,\omega)$, that is \[ \|T(t)\|\leq M\mathrm{e}^{\omega t}\quad\text{for all $t\geq0$,} \] then one rescale it (consider$(\mathrm{e}^{-(\omega+1) t}T(t))_{t\geq 0}$), and renorm the space such that the rescaled semigroup becomes a contraction semigroup. Moreover, the new semigroup has negative growth bound, meaning that $T(t)\to 0$ in norm exponentially fast as $t\to \infty$, and has an invertible generator. \begin{remark} \begin{abc} \item There are several important classes of semigroups, whose elements satisfy Assumption \ref{asp:sgrps}, hence can be treated in a unified manner: $\pi$-semigroups of Priola \cite{Priola}, weakly continuous semigroups of Cerrai \cite{Cerrai}, bi-continuous semigroups of K\"uhnemund. We will concentrate on this latter class of semigroups in Section \ref{sec:bicont}. In this framework Kunze \cite{Kunze2009} introduced the notion of integrable semigroups, which we briefly describe next. \item Since we have \[ \|y\|=\sup_{x\in X_0, \|x\|\leq1}{|\langle x,y\rangle|} \] and by the norming assumption \[ \|x\|=\sup_{y\in Y, \|y\|\leq1}{|\langle x,y\rangle|}, \] the pair $(X_0,Y)$ is called a norming dual pair. Kunze has worked out the theory of semigroups on such norming dual pairs in \cite{Kunze2009}, we recall at least the basic definitions here: We assume without loss of generality that $Y$ is a Banach space and consider the weak topology $\sigma(X_0,Y)$ on $X_0$. An \emph{integrable semigroup} on the pair $(X_0,Y)$ is a semigroup $(T(t))_{t\geq0}$ of $\sigma$-continuous linear operators of type $(M,\omega)$ such that: \begin{num} \item $(T(t))_{t\geq0}$ is a semigroup, i.e. $T(t+s)=T(t)T(s)$ and $T(0)=I$ for all $t,s\geq0$. \item $(T(t))_{t\geq0}$ is exponentially bounded, i.e. there exists $M\geq1$ and $\omega\in\mathbb{R}$ such that $\|T(t)\|\leq M\mathrm{e}^{\omega t}$ for all $t\geq0$. We then say that $(T(t))_{t\geq0}$ is of type $(M,\omega)$. \item For all $\lambda$ with $\text{Re}(\lambda)>\omega$, there exists an $\sigma$-continuous linear operator $R(\lambda)$ such that for all $x\in X_0$ and all $y\in Y$ \[ \dprod {R(\lambda)x}{y}=\int_0^{\infty}\mathrm{e}^{-\lambda t}\dprod{T(t)x}{y} \ \mathrm{d} t. \] \end{num} Kunze defines the generator $A$ of the semigroup as the (unique) operator $A:D(A)\to X_0$ (if it exists at all) with $R(\lambda)=(\lambda-A)^{-1}$, precisely as we did in Assumption \ref{asp:sgrps}. Note that $\sigma$-continuity of $T(t)$ can be used to assure that $Y$ is invariant under $T'(t)$, cf.{} the next remark. \end{abc} \end{remark} Some further consequences of the previous assumptions follow: \begin{remark} The commutation property can be verified easily if $Y$ can be chosen such that it is invariant under $T'(t)$ for each $t\geq 0$: \begin{align*} \dprod{A^{-1}T(t)x}{y}&=\int_0^{\infty}\dprod{T(s)T(t)x}{y}\ \mathrm{d} s=\int_0^{\infty}\dprod{T(s+t)x}{y}\ \mathrm{d} s\\ &=\mathscr{E}prod{\int_0^{\infty}T(s)x\ \mathrm{d} s}{T'(t)y}=\dprod{T(t)A^{-1}x}{y}.\end{align*} \end{remark} \begin{remark} \begin{num} \item From \eqref{eq:laplace} it follows that for each $x\in X_0$ \begin{equation}\label{eq:midnight} T(t)x-x=A\int_0^t T(s)x\ \mathrm{d} s. \end{equation} Indeed, we have by \eqref{eq:laplace} that \begin{align*} x&=A\int_0^\infty T(s)x\ \mathrm{d} s\\ T(t)x&=A\int_0^\infty T(s)T(t)x\ \mathrm{d} s=\int_t^\infty T(s)x\ \mathrm{d} s. \end{align*} Subtracting the first of these equation from the second one, we obtain the statement. \item If moreover $A$ commutes with $T(t)$ for each $t\geq0$, then for each $x\inD(A)$ we have \begin{equation}\label{eq:midnightDA} T(t)x-x=\int_0^t T(s)Ax\ \mathrm{d} s. \end{equation} Indeed, as in (1) we have by \eqref{eq:laplace} \begin{align*} -x&=-A^{-1}Ax=\int_0^\infty T(s)Ax\ \mathrm{d} s\\ -T(t)x&=-A^{-1}T(t)Ax=\int_0^\infty T(s)T(t)Ax\ \mathrm{d} s=\int_t^\infty T(s)Ax\ \mathrm{d} s. \end{align*} By a simple subtraction we obtain the statement. \end{num} \end{remark} The next lemma and its proof is standard for various classes of semigroups. \begin{lemma}\label{lem:spacecont} Let $(T(t))_{t\geq 0}$ is (locally) norm bounded, then \[ X_{\mathrm{cont}}:=\{x\in X_0: t\mapsto T(t)x\text{ is}\ \|\cdot\|\text{-continuous}\bigr\} \] is a closed a subspace of $X_0$ invariant under the semigroup. Under Assumption \ref{asp:sgrps} we have \[ \underline{X}_0=\overline{D(A)}=X_{\mathrm{cont}}. \] \end{lemma} \begin{proof} The closedness and invariance of $X_{\mathrm{cont}}$ are evident. We first show $D(A)\subseteq X_{\mathrm{cont}}$, which implies $\overline{D(A)}\subseteq X_{\mathrm{cont}}$ by closedness of $X_{\mathrm{cont}}$. Let $x\inD(A)$. By \eqref{eq:midnightDA} we conclude that $T(t)x-x=\int_0^t{T(s)Ax\ \mathrm{d} s}$. As $t\to0$ the integral here tends to zero in $\|\cdot\|$: \begin{align*} \|T(t)x-x\|=\sup_{\|y\|\leq1}|\dprod{T(t)x-x}{y}|\leq \sup_{\|y\|\leq1}\int_0^t|\dprod{T(s)Ax}{y}|\ \mathrm{d} s\leq t\|Ax\|. \end{align*} Whence we conclude that $D(A)\subseteq X_{\mathrm{cont}}$. For the converse inclusion suppose that $x\in X_{\mathrm{cont}}$. Again by \eqref{eq:midnight} we obtain that the sequence of vectors $x_n:=n\int_0^{\frac{1}{n}}{T(s)x\ \mathrm{d} s}\in D(A)$ ($n\in\mathbb{N}$) converges to $x$. Indeed: \begin{align*} \|x_n-x\|=\sup_{\|y\|\leq1}|\dprod{ x_n-x}{y}|\leq\sup_{\|y\|\leq1}n\int_0^{\frac{1}{n}}|\dprod {T(s)x-x}{y}|\ \mathrm{d} s\leq n\int_0^{\frac{1}{n}}{\|T(s)x-x\|\ \mathrm{d} s}. \end{align*} By the continuity of $s\mapsto T(s)x$ we obtain the inclusion $X_{\mathrm{cont}}\subseteq\overline{D(A)}$. \end{proof} Based on this lemma one can prove the following characterization of the Favard and H\"older spaces: \begin{proposition}\label{prop:FavCont} Let $(T(t))_{t\geq0}$ be a semigroup satisfying Assumption \ref{asp:sgrps} with negative growth bound and generator $A$. For $\alpha\in(0,1]$ define \begin{equation}\label{equ:Fav} \TFav_{\alpha}(T):=\Bigl\{x\in X_0:\sup_{s>0}\frac{\|T(s)x-x\|}{s^{\alpha}}<\infty\Bigr\}=\Bigl\{x\in X_0:\sup_{s\in(0,1)}\frac{\|T(s)x-x\|}{s^{\alpha}}<\infty\Bigr\}, \end{equation} and for $\alpha\in (0,1)$ define \begin{align}\label{equ:Hol} \THol_{\alpha}(T)&:=\Bigl\{x\in X_0:\sup_{s>0}\frac{\|T(s)x-x\|}{s^{\alpha}}<\infty\text{ and }\lim_{s\downarrow 0}\frac{\|T(s)x-x\|}{s^{\alpha}}=0\Bigr\}\\ \notag &=\Bigl\{x\in X_0:\lim_{s\downarrow 0}\frac{\|T(s)x-x\|}{s^{\alpha}}=0\Bigr\}, \end{align} which become Banach spaces if endowed with the norm \[ \|x\|_{\alpha}:=\sup_{s>0}\frac{\|T(s)x-x\|}{s^{\alpha}}. \] The space $ \THol_{\alpha}(T)$ is a closed subspace of $\TFav_\alpha(T)$. These spaces are invariant under the semigroup $(T(t))_{t\geq0}$, and $\THol_\alpha(T)$ is the space of $\|\cdot\|_\alpha$-strong continuity in $\TFav_\alpha(T)$. For $\alpha\in (0,1]$ we have $F_\alpha(A)=\TFav_\alpha(T)$ and for $\alpha\in (0,1)$ we have $\XHol_\alpha(A)=\THol_\alpha(T)$ with equivalent norms. \end{proposition} \begin{proof} The invariance of the spaces $\TFav_\alpha$ can be proved as follows: For $x\in\TFav_{\alpha}$ we have: \begin{align*} \|T(t) x\|_{{\alpha}}&=\sup_{s>0}{\frac{\|T(s)T(t)x-T(t)x\|}{s^{\alpha}}}\leq\|T(t)\|\cdot\sup_{s>0}{\frac{\|T(s)x-x\|}{s^{\alpha}}}\leq M\|x\|_{{\alpha}}. \end{align*} Similar reasoning proves the invariance of $\THol_\alpha$. Since $\TFav_\alpha\subseteq X_{\mathrm{cont}}=\underline{X}_0=\overline{D(A)}$ and $F_\alpha(A)\subseteq \underline{X}_0=\overline{D(A)}$, the rest of the assertions follow from the corresponding results concerning $C_0$-semigroups, see, e.g., \cite[Sec. II.5]{EN}. \end{proof} To complete the picture we recall the next result from \cite[Chapter 5]{Lunardi}, which is formulated there only for $C_0$-semigroups as a theorem, but Lunardi also remarks, without stating the precise assumptions, that this result still holds if one omits the strong continuity assumption. We require here the conditions from Assumption \ref{asp:sgrps}, under which the proof is verbatim the same as for the $C_0$-case, and is based on formulas \eqref{eq:midnight} and \eqref{eq:midnightDA}. \begin{proposition}Let $A$ generate the semigroup $(T(t))_{t>0}$ of negative growth bound in the sense describe in Assumption \ref{asp:sgrps}. Then for $p\in [1,\infty]$ and $\alpha\in (0,1)$ we have for the interpolation space: \[ (X,D(A))_{\alpha,p}=\{x\in X:\ t\mapsto\psi(t):=t^{-\alpha}\|T(t)x-x\|\in \mathrm{L}^p_*(0,\infty)\}, \] where $\mathrm{L}^p_*(0,\infty)$ denotes the $\mathrm{L}^p$-space with respect to the Haar measure $\frac {\mathrm{d} t}{t}$ on the multiplicative group $(0,\infty)$. Moreover, the norms $\|x\|_{\alpha,p}$ and \[ \|x\|^{**}_{\alpha,p}=\|x\|+\|\psi\|_{\mathrm{L}^p_*(0,\infty)} \] are equivalent. \end{proposition} We conclude this section with the construction of the extrapolated semigroup as a direct consequence of the results in Section \ref{sec:invert}, particularly of Proposition \ref{prop:extT}. \begin{proposition} Let $A$ generate the semigroup $(T(t))_{t\geq 0}$ of negative growth bound in the sense of Assumption \ref{asp:sgrps}. Then there is an extension $(T_{-1}(t))_{t\geq 0}$ of the semigroup $(T(t))_{t\geq 0}$ on the extrapolated space $X_{-1}$, whose generator is $A_{-1}$. \end{proposition} \section{Bi-continuous semigroups}\label{sec:bicont} In this section we concentrate on extrapolation spaces for generators of \emph{bi-continuous semigroups}. Such semigroups were introduced by K\"uhnemund in \cite{Ku} and possess generators as described in Section\ref{sec:sgrps}. The following assumptions, as proposed by K\"uhnemund, will be made during the whole section. \begin{assumption}\label{asp:bicontspace} Consider a triple $(X_0,\|\cdot\|,\tau)$ where $X_0$ is a Banach space, \begin{num} \item $\tau$ is a locally convex Hausdorff topology coarser than the norm-topology on $X_0$, i.e. the identity map $(X_0,\|\cdot\|)\to(X_0,\tau)$ is continuous; \item $\tau$ is sequentially complete on the closed unit ball; \item The dual space of $(X_0,\tau)$ is norming for $X_0$, i.e., \begin{equation}\label{eq:norm} \|x\|=\sup_{\substack{\varphi\in(X_0,\tau)'\\\|\varphi\|\leq1}}{|\varphi(x)|}.\end{equation} \end{num} \end{assumption} \begin{remark} \label{rem:seminorm}\label{rem:pnorming} \begin{num} \item There is a related notion of so-called Saks spaces, see \cite{CooperSaks}. By definition a \emph{Saks space} is a triple $(X_0,\|\cdot\|,\tau)$ such that $X_0$ is a vector space with a norm $\|\cdot\|$ and locally convex topology $\tau$ in such a way that $\tau$ is weaker than the $\|\cdot\|$-topology, but the closed unit ball is $\tau$-complete. In particular, $X_0$ is a Banach space. \item There is also a connection to the the norming dual pairs we discussed in the previous section. In particular, $(X_0,Y)$ with $Y=(X_0,\tau)'$ is a norming dual pair. \item Kraaij puts this setting in a more general framework of locally convex spaces with mixed topologies, see \cite[Sec.{} 4]{Kraaij2016}, and also \cite[App.{} A]{FaPHD} \item Assumption \eqref{eq:norm} is equivalent to the following: There is a set $\mathcal{P}$ of $\tau$-continuous seminorms defining the topology $\tau$, such that \begin{equation}\label{eq:semisnorm} \|x\|=\sup_{p\in\mathcal{P}}p(x). \end{equation} This description is also used by Kraaij in \cite{Kraaij2016}, cf.{} his Lemma 4.4. Note also that by this remark and by Lemma 3.1 in \cite{CooperSaks} we see that a Saks space satisfies Assumption \ref{asp:bicontspace}. Indeed, assume \eqref{eq:norm} and let $\mathcal{P}$ be the collection of \emph{all} $\tau$-continuous seminorms $p$ such that $p(x)\leq \|x\|$. Then $|\varphi(\cdot)|\in \mathcal{P}$ for each $\varphi\in(X_0,\tau)'$ with $\|\varphi\|\leq 1$, and \eqref{eq:semisnorm} is trivially satisfied. If $q$ is any $\tau$-continuous seminorm, then $q(x)\leq M\|x\|$ for some constant $M$ and for all $x\in X_0$. So that $q/M\in \mathcal{P}$, proving that $\mathcal{P}$ defines precisely the topology $\tau$. For the converse implication suppose that \eqref{eq:semisnorm} holds. Then by the application of the Hahn--Banach theorem we obtain \eqref{eq:norm}. \end{num} \end{remark} Now we are in state to formulate the definition of a bi-continuous semigroup. \begin{definition}[K\"uhnemund \cite{Ku}]\label{def:bicontsemi} Let $X_0$ be a Banach space with norm $\|\cdot\|$ together with a locally convex topology $\tau$, such that conditions in Assumption \ref{asp:bicontspace} are satisfied. We call $(T(t))_{t\geq0}$ a \emph{bi-continuous semigroup} if \begin{num} \item $ T(t+s)=T(t)T(s)$ and $T(0)=I$ for all $s,t\geq 0$. \item $(T(t))_{t\geq0}$ is strongly $\tau$-continuous, i.e. the map $\varphi_x:[0,\infty)\to(X_0,\tau)$ defined by $\varphi_x(t)=T(t)x$ is continuous for every $x\in X_0$. \item $(T(t))_{t\geq0}$ is exponentially bounded, i.e., has type $(M,\omega)$ for some $M\geq 1$ and $\omega\in \mathbb{R}$. \item $(T(t))_{t\geq0}$ is locally-bi-equicontinuous, i.e., if $(x_n)_{n\in\mathbb{N}}$ is a norm-bounded sequence in $X_0$ which is $\tau$-convergent to $0$, then also $(T(s)x_n)_{n\in\mathbb{N}}$ is $\tau$-convergent to $0$ uniformly for $s\in[0,t_0]$ for each fixed $t_0\geq0$. \end{num} \end{definition} As in the case of $C_0$-semigroups we can define a generator for a bi-continuous semigroup in the following way: \begin{definition}Let $(T(t))_{t\geq0}$ be a bi-continuous semigroup on $X_0$. The generator $A$ is defined by \[Ax:=\mathop{\tau\mathrm{lim}}_{t\to0}{\frac{T_0(t)x-x}{t}}\] with the domain \[D(A):=\Bigl\{x\in X_0:\ \mathop{\tau\mathrm{lim}}_{t\to0}{\frac{T(t)x-x}{t}}\ \text{exists and} \ \sup_{t\in(0,1]}{\frac{\|T(t)x-x\|}{t}}<\infty\Bigr\}.\] \end{definition} This definition of the generator leads to a couple of important properties and important examples in this context are evolution semigroups on $\mathrm{C}_{\mathrm{b}}(\mathbb{R},X)$, semigroups induced by flows, adjoint semigroups and the Ornstein--Uhlenbeck semigroup on $\mathrm{C}_{\mathrm{b}}(\mathcal{H})$. The following theorem sums up some properties of bi-continuous semigroups and their generators (see \cite{Ku},\cite{FaStud}): \begin{theorem} Let $(T(t))_{t\geq0}$ be a bi-continuous semigroup with generator $A$. Then the following hold: \begin{abc} \item $A$ is bi-closed, i.e., whenever $x_n\stackrel{\tau}{\to}x$ and $Ax_n\stackrel{\tau}{\to}y$ and both sequences are norm-bounded, then $y\inD(A)$ and $Ax=y$. \item $D(A)$ is bi-dense in $X_0$, i.e., for each $x\in X_0$ there exists a norm-bounded sequence $(x_n)_{n\in\mathbb{N}}$ in $D(A)$ such that $x_n\stackrel{\tau}{\to}x$. \item For $x\inD(A)$ we have $T(t)x\inD(A)$ and $T(t)Ax=AT(t)x$ for all $t\geq0$. \item For $t>0$ and $x\in X_0$ one has \begin{align}\int_0^t{T(s)x\ \mathrm{d} s}\inD(A)\ \ \text{and}\ \ A\int_0^t{T(s)x\ \mathrm{d} s}=T(t)x-x \end{align} \item For $\lambda>\omega_0(T)$ one has $\lambda\in\rho(A)$ (thus $A$ is closed) and for $x\in X$ holds: \begin{align}\label{eq:bicontlaplace} R(\lambda,A)x=\int_0^{\infty}{\mathrm{e}^{-\lambda s}T(s)x\ \mathrm{d} s}\end{align} where the integral is a $\tau$-improper integral. \end{abc} \end{theorem} Recall the following result of K\"uhnemund from \cite{Ku}, whose proof is originally based on integrated semigroups. We present here a short proof based on extrapolation spaces. \begin{theorem}[K\"uhnemund]\label{thm:bistrong} Let $(X_0,\|\cdot\|,\tau)$ be a triple satisfying Assumption \ref{asp:bicontspace}, and let $A$ be a linear operator on the Banach space $X_0$. The following are equivalent: \begin{iiv} \item $A$ is the generator of a bi-continuous semigroup $(T(t))_{t\geq0}$ of type $(M,\omega)$. \item $A$ is a Hille--Yosida operator of type $(M,\omega)$, i.e., \[ \|R(s,A)^{k}\|\leq\frac{M}{(s-\omega)^k} \] for all $k\in\mathbb{N}$ and for all $s>\omega$. $A$ is bi-densely defined and the family \begin{equation}\label{eq:resbiequi} \bigl\{(s-\alpha)^kR(s,A)^k:\ k\in\mathbb{N},\ s\geq\alpha\bigr\} \end{equation} is bi-equicontinuous for each $\alpha>\omega$, meaning that for each norm bounded $\tau$-null sequence $(x_n)$ one has $(s-\alpha)^kR(s,A)^kx_n\to 0$ in $\tau$ uniformly for $k\in\mathbb{N}$ and $s\geq\alpha$ as $n\to \infty$. \end{iiv} In this case we have the Euler formula \[ T(t)x:=\mathop{\tau\mathrm{lim}}_{m\to\infty}\left(\frac{m}{t}R\left(\frac{m}{t},A\right)\right)^mx \] for each $x\in X_0$. Moreover, the subspace $\underline{X}_0:=\overline{D(A)}\subseteq X_0$ is the space of norm strong continuity for $(T(t))_{t\geq0}$, it is invariant under the semigroup, and $(\underline{T}(t))_{t\geq0}:=(T(t)|_{\underline{X}_0})_{t\geq0}$ is the strongly continuous semigroup on $\underline{X}_0$ generated by the part of $A$ in $\underline{X}_0$. \end{theorem} \begin{proof} That $\underline{X}_0$ is the space of norm strong continuity for a bi-continuous semigroup $(T(t))_{t\geq 0}$ follows from Lemma \ref{lem:spacecont}. \noindent We only prove the implication (ii) $\Rightarrow$ (i) and Euler formula; the other implication is standard. We may suppose that $\omega<0$. Since $A$ is a Hille--Yosida operator, the part $\underline{A}_0$ of $A$ in $\underline{X}_0$ generates a $C_0$-semigroup $(\underline{T}(t))_{t\geq0}$ on the space $\underline{X}_0:=\overline{D(A)}$. Define the function \[ F(s):=\begin{cases} \frac1sR(\frac1s,A)&\text{for } s>0,\\ I&\text{for } s=0, \end{cases} \] which is strongly continuous on $\underline{X}_0$ by Remark \ref{rem:2}. Moreover, we have the Euler formula \[ \underline{T}_0(t)x=\lim_{m\to\infty}{F\bigl(\tfrac{t}{m}\bigr)^mx} \] for $x\in \underline{X}_0$ with convergence being uniform for $t$ in compact intervals $[0,t_0]$, see, e.g., \cite[Section~III.5(a)]{EN}. Since $R(\lambda,A)|_{\underline{X}_0}=R(\lambda, \underline{A}_0)$ and since $D(A)$ is bi-dense in $X_0$, by the local bi-equicontinuity assumption in \eqref{eq:resbiequi} we conclude that for $x\in X_0$ and $t>0$ the limit \begin{equation}\label{eq:taueuler} S(t)x:=\mathop{\tau\mathrm{lim}}_{m\to\infty}F\bigl(\tfrac{t}{m}\bigr)^mx \end{equation} exists, and the convergence is uniform for $t$ in compact intervals $[0,t_0]$. It follows that $t\mapsto S(t)x$ is $\tau$-strongly continuous for each $x\in X_0$. The operator family $(S(t))_{t\geq 0}$ is locally bi-equicontinuous because of the bi-equicontinuity assumption in \eqref{eq:resbiequi}. \noindent Next, we prove that $\underline{T}(t)$ leaves $D(A)$ invariant. Let $x\in D(A)$, so that $x=A^{-1}y$ for some $y\in X_0$, and insert $x$ in the previous formula \eqref{eq:taueuler} to obtain \begin{equation}\label{eq:invA} \underline{T}(t)x=S(t)A^{-1}y=\mathop{\tau\mathrm{lim}}_{m\to\infty}F\bigl(\tfrac{t}{m}\bigr)^m A^{-1}y=A^{-1}\mathop{\tau\mathrm{lim}}_{n\to\infty}F\bigl(\tfrac{t}{m}\bigr)^m y=A^{-1}S(t)y\inD(A), \end{equation} where we have used the bi-continuity of $A^{-1}$ and the boundedness of $(\bigl[\tfrac{n}{t}R\bigl(\tfrac{n}t,A\bigr)\bigr]^ny)_{n\in \mathbb{N}}$. By Proposition \ref{prop:extT} (b) we can extend $\underline{T}(t)$ to $X_0$ by setting $T(t):=A\underline{T}(t)A^{-1}\in \mathscr{L}(X_0)$. It follows that $(T(t))_{t\geq 0}$ is a semigroup. By formula \eqref{eq:invA}, we have then $T(t)y=A\underline{T}(t)A^{-1}y=AA^{-1}S(t)y=S(t)y$ for each $y\in X_0$. So that $(T(t))_{t\geq 0}$, coinciding with $(S(t))_{t\geq0}$, is locally bi-equicontinuous, and hence a bi-continuous semigroup. It remains to show that the generator of $(T(t))_{t\geq 0}$ is precisely $A$. Let $B$ denote the generator of $(T(t))_{t\geq 0}$. Then, for large $\lambda>0$ and $x\in \underline{X}_0$, we have \[ R(\lambda,B)x=\int_0^\infty \mathrm{e}^{-\lambda s}T(s)x\ \mathrm{d} s=\int_0^\infty \mathrm{e}^{-\lambda s}\underline{T}(s)x\ \mathrm{d} s=R(\lambda, \underline{A}_0)x=R(\lambda,A)x. \] Since $R(\lambda,B)$ and $R(\lambda,A)$ are sequentially $\tau$-continuous on norm bounded sets and since $D(A)$ is bi-dense in $X_0$, we obtain $R(\lambda,B)=R(\lambda,A)$. This finishes the proof. \end{proof} The first statement in the next proposition is proved by Nagel and Sinestrari, see \cite{NagelSurvey} and \cite{nagel1993inhomogeneous}, while the second one follows directly from the results in Section \ref{sec:invert}. \begin{proposition} Let $A$ be Ha Hille--Yosida operator on the Banach space $X_0$ with domain $D(A)$. Denote by $(\underline{T}(t)_{t\geq 0}$ the $C_0$-semigroup on $\underline{X}_0=\overline{D(A)}$ generated by the part $\underline{A}$ of $A$. \begin{abc} \item There is a one-parameter semigroup $(\overline{T}(t))_{t\geq 0}$ on $F_0(A)$ which extends $(\underline{T}(t)_{t\geq 0}$. This semigroup is strongly continuous for the $\|\cdot\|_{-1}$ norm. \item Suppose for each $t\geq 0$ the operator $\underline{T}(t)$ leaves $D(A)$ invariant. The space $X_0$ is invariant under the semigroup operators $\overline{T}(t)$ for every $t\geq 0$, i.e., $T(t)\in\mathscr{L}(X_0)$ for $T(t):=\overline{T}(t)|_{X_0}$. \end{abc} \end{proposition} \subsection{Extrapolated semigroups} Next we extend a a bi-continuous semigroup on $X_0$ to the extrapolation space $X_{-1}$ as a bi-continuous semigroup. We have to handle two topologies, and the next proposition provides means to describe an additional locally convex topology on $X_{-1}$ still satisfying Assumption \ref{asp:bicontspace}. The definition of extrapolated spaces can be based on this proposition as was suggested by Haase \cite{Haase}. \begin{proposition}\label{prop:Edef} Let the triple $(X_0,\|\cdot\|,\tau)$ satisfy Assumption \ref{asp:bicontspace}, let $\mathcal{P}$ be as in Remark \ref{rem:seminorm}, let $E$ be a vector space over $\mathrm{C}C$, and let $B:X_0\to E$ be a bijective linear mapping. We define for $e\in E$ and $p\in \mathcal{P}$ \[ \|e\|_E:=\|B^{-1}e\|,\quad\text{and}\quad p_{E}(e):=p(B^{-1}e). \] Then the following assertions hold: \begin{abc} \item $\|\cdot\|_E$ is a norm, $p_E$ is a seminorm for each $p\in \mathcal{P}$. \item With $\mathcal{P}_E:=\{p_E:p\in \mathcal{P}\}$ an with $\tau_E$ being the topology defined by $\mathcal{P}_E$, the triple $(E,\|\cdot\|_E,\tau_E)$ satisfies the conditions in Assumption \ref{asp:bicontspace}. \item If $(T(t))_{t\geq 0}$ is a bi-continuous semigroup on $X_0$ with respect to the topology $\tau$, then by $T_E(t):=BT(t)B^{-1}$ we define a bi-continuous semigroup on $E$. If $A$ is the generator of $(T(t))_{t\geq 0}$, then $BAB^{-1}$ is the generator of $(T_E(t))_{t\geq 0}$. \end{abc} \end{proposition} \begin{proof} Assertion (a) is evident. The conditions (1){} and (2){} from Assumption \ref{asp:bicontspace} are obviously satisfied by the definition of $\|\cdot\|_E$ and $p_E$. For condition (3) note that \[ \|e\|_E=\|B^{-1}e\|=\sup_{p\in \mathcal{P}}p(B^{-1}e)=\sup_{p_E\in \mathcal{P}_E}p_E(e), \] so that, by Remark \ref{rem:pnorming}, (3){} in Assumption \ref{asp:bicontspace} is fulfilled. The proof of (b) is complete. \noindent (c) For $e\in E$ we have $\|T_E(t)\|_E=\|B^{-1}BT(t)B^{-1}e\|=\|T(t)B^{-1}e\|\leq \|T(t)\|\cdot \|e\|_E$. Showing that $T_E(t)\in\mathscr{L}(E)$. That $(T_E(t))_{t\geq 0}$ satisfies the semigroup property is evident. For $e\in E$ and $p_E\in \mathcal{P}_E$ we have \[ p_E(T_E(t)e-e)=p(B^{-1}BT(t)B^{-1}e-B^{-1}e)=p(T(t)B^{-1}e-B^{-1}e)\to 0\quad \text{for $t\to 0$}, \] showing the $\tau_E$-strong continuity of $(T_E(t))_{t\geq 0}$. If $(e_n)$ is a $\|\cdot\|_E$-bounded, $\tau_E$-null sequence, then $(B^{-1}e_n)$ is a $\|\cdot\|$-bounded $\tau$-null sequence, so that by assumption $T_E(t)e_n=T(t)B^{-1}e_n\to 0$ uniformly for $t$ in compact intervals. If $A$ is the generator of $(T(t))_{t\geq 0}$, then by means of \eqref{eq:bicontlaplace} we can conclude that $B^{-1}AB$ is the generator of $(T_E(t))_{t\geq 0}$. \end{proof} \begin{definition}\label{def:bicontext} Let $(T(t))_{t\geq 0}$ be a bi-continuous semigroup in $X_0$ with generator $A$. \begin{abc} \item For $B=A^{-1}:X_0\to X_1$ and $E=X_1$ in Proposition \ref{prop:Edef} define $\mathcal{P}_1:=\mathcal{P}_E$, $\tau_{1}:=\tau_E$, $(T_1(t))_{t\geq 0}=(T_E(t))_{t\geq 0}$. \item For $B=A_{-1}:X_0\to X_{-1}$ and $E=X_{-1}$ in Proposition \ref{prop:Edef} define $\mathcal{P}_{-1}:=\mathcal{P}_E$, $\tau_{-1}:=\tau_E$, $(T_{-1}(t))_{t\geq 0}=(T_E(t))_{t\geq 0}$. \end{abc} \end{definition} We obtain immediately. \begin{proposition}\label{prop:T-1bicont} By construction $(T_1(t))_{t\geq 0}$ and $(T_{-1}(t))_{t\geq 0}$ are bi-continuous semigroups on $X_1$ and $X_{-1}$, with generators $A_1=A|D(A)$ and $A_{-1}$, respectively. \end{proposition} Iterating the procedure in Definition \ref{def:bicontext} we obtain the (extrapolated) semigroups $(T_n(t))_{t\geq 0}$ for the full scale $n\in \mathbb{Z}$. \begin{definition}\label{def:bicontext2} Let $(T(t))_{t\geq 0}$ be a bi-continuous semigroup on $X_0$ with generator $A$. Recursively we make the following definitions: \begin{abc} \item For $B=A_n^{-1}:X_n\to X_{n+1}$ and $E=X_{n+1}$ in Proposition \ref{prop:Edef} define $\mathcal{P}_{n+1}:=\mathcal{P}_E$, $\tau_{n+1}:=\tau_E$, $(T_{n+1}(t))_{t\geq 0}=(T_n(t))_{t\geq 0}$. \item For $B=A_{-n-1}:X_{-n}\to X_{-n-1}$ and $E=X_{-n-1}$ in Proposition \ref{prop:Edef} define $\mathcal{P}_{-n-1}:=\mathcal{P}_E$, $\tau_{-n-1}:=\tau_E$, $(T_{-n-1}(t))_{t\geq 0}=(T_{-n}(t))_{t\geq 0}$. \end{abc} \end{definition} \begin{proposition} For each $n\in \mathbb{Z}$ the semigroup $(T_n(t))_{t\geq 0}$ is bi-continuous on $X_n$ with generator $A_n:X_{n+1}\to X_n$. Its space of norm strong continuity is $\underline{X}_n$. \end{proposition} \begin{proof} The first statement follows directly from Proposition \ref{prop:T-1bicont} by recursion. For $n=0$ the second assertion is the content of Lemma \ref{lem:spacecont}, for general $n\in \mathbb{Z}$ one can argue recursively. \end{proof} The following diagram summarizes the situation: \begin{align*} \xymatrix{ \underline{X}_{-2}\ar[rr]^{\underline{T}_{-2}(t)}& & \underline{X}_{-2}\ar@/^2pc/[dd]^{\underline{A}^{-1}_{-2}}\\ X_{-1}\ar[u]\ar[rr]^{T_{-1}(t)}& & X_{-1}\ar[u]\ar@/^2pc/[dd]^{A^{-1}_{-1}}\\ \underline{X}_{-1}\ar[u]\ar[rr]^{\underline{T}_{-1}(t)(t)}\ar@/^2pc/[uu]^{\underline{A}_{-2}} & & \underline{X}_{-1}\ar[u]\ar@/^2pc/[dd]^{\underline{A}^{-1}_{-1}}\\ X_0\ar[u]\ar[rr]^{T(t)(t)}\ar@/^2pc/[uu]^{A_{-1}} & & X_0\ar[u]\ar@/^2pc/[dd]^{A^{-1}}\\ \underline{X}_0\ar[u]\ar[rr]^{\underline{T}(t)(t)}\ar@/^2pc/[uu]^{\underline{A}_{-1}} & & \underline{X}_0\ar[u]\ar@/^2pc/[dd]^{\underline{A}^{-1}}\\ X_1\ar[rr]^{T_1(t)(t)}\ar@/^2pc/[uu]^{A}\ar[u] & & X_1\ar[u]\ar[u]\\ \underline{X}_1\ar[u]\ar[rr]^{\underline{T}_1(t)(t)}\ar@/^2pc/[uu]^{\underline{A}} & & \underline{X}_1\ar[u]\\ } \end{align*} The spaces $\underline{X}_{n+1}$ are bi-dense in $X_n$ for the topology $\tau_{X_n}$ and dense in $\underline{X}_n$ for the norm $\|\cdot\|_{X_n}$. The semigroups $(T_n(t))_{t\geq 0}$ are bi-continuous on $X_n$, while $(\underline{T}_n(t))_{t\geq 0}$ are $C_0$-semigroups (strongly continuous for the norm) on $\underline{X}_n$. \subsection{H\"older spaces of bi-continuous semigroups} Suppose $A$ generates the bi-continuous semigroup $(T(t))_{t\geq0}$ of negative growth bound on $X_0$. Recall from Theorem \ref{thm:bistrong} that the restricted operators $\underline{T}(t):=T(t)|_{\underline{X}_0}$ form a $C_0$-semigroup $(\underline{T}(t))_{t\geq0}$ on $\underline{X}_0$. Also recall from Proposition \ref{prop:FavCont} that for $\alpha\in(0,1]$ \[ F_{\alpha}(A)=\Bigl\{x\in\underline{X}_0:\ \sup_{t>0}{\frac{\|\underline{T}(t)x-x\|}{t^{\alpha}}}<\infty\Bigr\}=\Bigl\{x\in X_0:\ \sup_{t>0}{\frac{\|T(t)x-x\|}{t^{\alpha}}}<\infty\Bigr\}\] with the norm \[ \|x\|_{\alpha}=\sup_{t>0}{\frac{\|\underline{T}(t)x-x\|}{t^{\alpha}}}, \] and for $\alpha\in(0,1)$: \[ \XHol_{\alpha}(A):=\Bigl\{x\in\underline{X}_0:\ \lim_{t\to0}{\frac{\|\underline{T}(t)x-x\|}{t^{\alpha}}}=0\Bigr\}=\Bigl\{x\in X_0:\ \lim_{t\to0}{\frac{\|T(t)x-x\|}{t^{\alpha}}}=0\Bigr\}. \] We have the following (continuous) inclusions: \[ \underline{X}_1\mathrm{h}ookrightarrow X_1\to \XHol_{\alpha}(A)\mathrm{h}ookrightarrowF_{\alpha}(A)\to\underline{X}_0\mathrm{h}ookrightarrow X_0; \] all these spaces are invariant under $(T(t))_{t\geq0}$.We now extend this diagram by a space which lies between $\underline{X}_{\alpha}$ and $F_{\alpha}$. \begin{definition}\label{def:biHoelder} Let $(T(t))_{t\geq0}$ be a bi-continuous semigroup of negative growth bound on a Banach space $X_0$ with respect to a locally convex topology $\tau$ that is generated by a family $\mathcal{P}$ of seminorms satisfying \eqref{eq:semisnorm}. For $\alpha\in(0,1)$ we define the space \begin{align} &X_{\alpha}:=\Bigl\{x\in X_0:\ \mathop{\tau\mathrm{lim}}_{t\to0}{\frac{T(t)x-x}{t^{\alpha}}}=0\ \text{and}\ \sup_{t>0}{\frac{\|T(t)x-x\|}{t^{\alpha}}}<\infty\Bigr\}, \end{align} and endow it with the norm $\|\cdot\|_{F_{\alpha}}$, We further equip $F_{\alpha}$ and $X_\alpha$ with the locally convex topology $\tau_{F_{\alpha}}$ generated by the family of seminorms $\mathcal{P}_{F_{\alpha}}:=\{p_{F_{\alpha}}\}$, where $p_{F\alpha}$ is given by \begin{align} p_{F_{\alpha}}(x):=\sup_{t>0}{\frac{p(T(t)x-x)}{t^{\alpha}}}. \end{align} \end{definition} It is easy to see that $X_\alpha$ is a Banach space, i.e., as closed subspace of $F_\alpha$. By construction we have that $\XHol_{\alpha}(A)\subseteq X_{\alpha}\subseteqF_{\alpha}(A)$, which was our first goal. Next we discuss some properties of this space. \begin{lemma} \begin{abc} \item Let $(x_n)$ be a $\|\cdot\|_{F_\alpha}$-norm bounded sequence in $F_\alpha$ with $x_n\to x\in X_0$ in the topology $\tau$. Then $x\in F_\alpha$. \item The triple $(F_{\alpha},\|\cdot\|_{F_\alpha},\tau_{F_\alpha})$ satisfies the conditions in Assumption \ref{asp:bicontspace}. \item $X_{\alpha}$ is bi-closed in $F_{\alpha}$, i.e., every $\|\cdot\|_{F_\alpha}$-bounded an $\tau_{F_\alpha}$-convergent sequence in $X_{\alpha}$ has its limit in $X_{\alpha}$. \end{abc} \end{lemma} \begin{proof} \noindent (a) The statement follows from the fact that the norm $\|\cdot\|_{F_\alpha}$ is lower semicontinuous for the topology $\tau$. In detail: Let $M\geq 0$ be such that \[\frac{\|T(t)x_n-x_n\|}{t^{\alpha}}\leq \|x_n\|_{F_\alpha}\leq M\] for each $n\in\mathbb{N}$ and $t>0$. Hence we can estimate as follows: \begin{align*} \sup_{t>0}\frac{\|T(t)x-x\|}{t^{\alpha}}&=\sup_{t>0}\sup_{p\in\mathcal{P}}p\Bigl(\frac{T(t)x-x}{t^{\alpha}}\Bigr)=\sup_{t>0}\sup_{p\in\mathcal{P}}\lim_{n\to\infty}p\Bigl(\frac{T(t)x_n-x_n}{t^{\alpha}}\Bigr)\\ &\leq \sup_{t>0}\sup_{p\in\mathcal{P}}\limsup_{n\to\infty}\Bigl\|\frac{T(t)x_n-x_n}{t^{\alpha}}\Bigr\|\leq \sup_{t>0}\sup_{n\in\mathbb{N}}\Bigl\|\frac{T(t)x_n-x_n}{t^{\alpha}}\Bigr\|\leq M. \end{align*} \noindent (b) We have for $p\in\mathcal{P}$ and $x\in F_\alpha$ that \[ p_{F_{\alpha}}(x)=\sup_{t>0}{\frac{p(T(t)x-x)}{t^{\alpha}}}\leq\sup_{t>0}{\frac{\|T(t)x-x\|}{t^{\alpha}}}=\|x\|_{F_{\alpha}}. \] This proves that $\tau_{F_{\alpha}}$ is weaker than the $\|\cdot\|_{F_{\alpha}}$-topology, but $\tau_{F_\alpha}$ is still Hausdorff by construction. For the second property of Assumption \ref{asp:bicontspace} let $(x_n)_{n\in\mathbb{N}}$ be a $\tau_{F_{\alpha}}$-Cauchy sequence in $F_{\alpha}$ such that there exists $M>0$ with $\|x_n\|_{F_{\alpha}}\leq M$ for each $n\in\mathbb{N}$. Since $\tau$ is weaker than $\tau_{F_\alpha}$, we conclude that $(x_n)$ is $\tau$-Cauchy sequence which is also bounded in $\|\cdot\|_{F_\alpha}$ hence in $\|\cdot\|$. By assumption there is $x\in X_0$ such that $x_n\to x$ in $\tau$. By part (a) we obtain $x\in F_\alpha$. It remains to prove that $x_n\to x$ in $\tau_{F_\alpha}$. Let $\varepsilon>0$, and take $N\in\mathbb{N}$ such that for each $n,m\in \mathbb{N}$ with $n,m\geq N$ we have $p_{F_\alpha}(x_n-x_m)<\varepsilon$. For $t>0$ \[ p\Bigl(\frac{T(t)(x_n-x)-(x_n-x)}{t^{\alpha}}\Bigr)=\lim_{m\to\infty} p\Bigl(\frac{T(t)(x_n-x_m)-(x_n-x_m)}{t^{\alpha}}\Bigr)\leq p_{F_\alpha}(x_n-x_m)<\varepsilon \] for each $n\geq N$. Taking supremum in $t>0$ we obtain $p_{F_\alpha}(x-x_n)\leq \varepsilon$ for each $n\geq N$. \noindent The norming property in \eqref{eq:norm} follows again simply by applying the argumentation from Remark \ref{rem:seminorm} and the fact that the family $\mathcal{P}$ is already norming by assumption. \noindent (c) Let $(x_n)_{n\in\mathbb{N}}$ be a $\|\cdot\|_{F_{\alpha}}$-bounded and $\tau_{F_{\alpha}}$ convergent sequence in $X_{\alpha}$ with limit $x\in X_0$. For $p\in \mathcal{P}$ we then have \[ \sup_{t>0}{p\Bigl(\frac{T(t)(x_n-x)-(x_n-x)}{t^{\alpha}}\Bigr)}\to 0. \] Since $x_n\in X_{\alpha}$ for each $n\in\mathbb{N}$, we have \[ \lim_{t\to0}{p\Bigl(\frac{T(t)x_n-x_n}{t^{\alpha}}\Bigr)}=0, \quad\text{and}\quad \sup_{t>0}{\Bigl\|\frac{T(t)x_n-x_n}{t^{\alpha}}\Bigr\|}<\infty. \] We now can conclude for a fixed $p\in \mathcal{P}$ \begin{align*} p\Bigl(\frac{T(t)x-x}{t^{\alpha}}\Bigr)&=p\Bigl(\frac{T(t)(x-x_n)-(x-x_n)+T(t)x_n-x_n}{t^{\alpha}}\Bigr)\\ &\leq p\Bigl(\frac{T(t)(x-x_n)-(x-x_n)}{t^{\alpha}}\Bigr)+p(\frac{T(t)x_n-x_n}{t^{\alpha}}\Bigr)\\ &\leq p_{F_{\alpha}}(x-x_n)+p\Bigl(\frac{T(t)x_n-x_n}{t^{\alpha}}\Bigr)<\frac{\varepsilon}2+\frac{\varepsilon}2=\varepsilon, \end{align*} where we first fix $n\in\mathbb{N}$ such that $p_{F_{\alpha}}(x-x_n)<\frac{\varepsilon}{2}$, and then we take $\delta>0$ such that $0<t<\delta$ implies $p(\frac{T(t)x_n-x_n}{t^{\alpha}})<\frac{\varepsilon}{2}$. \end{proof} The next goal is to see that $(T(t))_{t\geq 0}$ can be restricted to $X_{\alpha}$ to obtain a bi-continuous semigroup with respect to the topology $\tau_{F_\alpha}$. \begin{remark} For the proof of the next lemma we notice that we have an equivalent formulation of the fact that $$\displaystyle{{\mathop{\tau\mathrm{lim}}}_{t\to0}\frac{T(s)x-x}{s^{\alpha}}=0}$$ by means of sequences in $\mathbb{R}$. In fact, to prove this convergence to $0$ we only have to check that $$\frac{p(T(s_n)x-x)}{s_n^{\alpha}}\to0$$ for $n\to\infty$ for every null-sequence $(s_n)_{n\in\mathbb{N}}$ in $[0,\infty)$ and for each $p\in \mathcal{P}$. \end{remark} \begin{lemma} If $(T(t))_{t\geq0}$ is a bi-continuous semigroup, then $X_{\alpha}$ is invariant under the semigroup. \end{lemma} \noindent \begin{proof} Let $x\in X_{\alpha}$. Then we have that $y_n:=\frac{T(s_n)x-x}{s_n^{\alpha}}$ converges to $0$ with respect to $\tau$ if $(s_n)_{n\in\mathbb{N}}$ is any null-sequence and $n\to\infty$. Moreover, this sequence $(y_n)_{n\in\mathbb{N}}$ is $\|\cdot\|$-bounded by the assumption that $x\in X_{\alpha}$. Whence we conclude \[ \mathop{\tau\mathrm{lim}}_{n\to\infty}T(t)y_n=\mathop{\tau\mathrm{lim}}_{n\to\infty}\frac{T(s_n)T(t)x-T(t)x}{s_n^{\alpha}}=0, \] so that $T(t)x\in X_{\alpha}$. \end{proof} To prove that $(T(t))_{t\geq0}$ is bi-continuous on $X_{\alpha}$ we have to show that the semigroup satisfies all conditions from Definition \ref{def:bicontsemi}. Notice that the local boundedness and the semigroup property are trivial. \begin{proposition} \label{prop:strongcontalpha} If $(T(t))_{t\geq0}$ is a bi-continuous semigroup on $X_0$ and $\alpha\in(0,1)$, then $(T(t))_{t\geq0}$ is strongly $\tau_{F_{\alpha}}$-continuous on $X_{\alpha}$. \end{proposition} \begin{proof}We have to show that $p_{F_{\alpha}}(T(t_n)x-x)\to 0$ for all $p\in \mathcal{P}$ whenever $t_n\downarrow 0$. Let $s_n,t_n>0$ be with $s_n,t_n\to 0$. Then \begin{align} \notag\frac{p(T(s_n)T(t_n)x-T(s_n)x-T(t_n)x+x)}{s_n^{\alpha}}&\leq\frac{p(T(t_n)T(s_s)x-T(t_n)x)}{s_n^{\alpha}}+\frac{p(T(s_n)x-x)}{s_n^{\alpha}}\\ &\label{eq:last} =\frac{p(T(t_n)(T(s_n)x-x))}{s_n^{\alpha}}+\frac{p(T(s_n)x-x)}{s_n^{\alpha}}. \end{align} The sequence $(y_n)$ given by $y_n:=\frac{T(s_n)x-x}{s_n^{\alpha}}$ is $\|\cdot\|$-bounded and $\tau$-convergent to $0$, because $x\in X_\alpha$. So that the last term in the previous equation \eqref{eq:last} converges to $0$. But since $\{T(t_n):n\in \mathbb{N}\}$ is bi-equicontinuous, also the first term in \eqref{eq:last} converges to $0$. This proves strong continuity with respect to $\tau_{F_{\alpha}}$. \end{proof} To conclude with the result that $(T(t))_{t\geq0}$ is bi-continuous on $X_{\alpha}$ we have to show that this restriction is in particular locally bi-equicontinuous. \begin{proposition}Let $(T(t))_{t\geq0}$ be a bi-continuous semigroup on $X_0$. Then $(T(t))_{t\geq0}$ is locally bi-equicontinuous on $F_{\alpha}$.\end{proposition} \begin{proof}Let $(x_n)_{n\in\mathbb{N}}$ be a $\|\cdot\|_{F_{\alpha}}$-bounded sequence which converges to zero with respect to $\tau_{F_{\alpha}}$ and assume that $(T(t)x_n)_{n\in\mathbb{N}}$ does not converges to zero uniformly for $t\in[0,t_0]$ for some $t_0>0$. Hence there exists $p\in\mathcal{P}$, $\delta>0$ and a sequence $(t_n)_{n\in\mathbb{N}}$ of positive real numbers such that \[ p_{F_{\alpha}}(T(t_n)x_n)>\delta \] for all $n\in\mathbb{N}$. As a consequence there exists a sequence $(s_n)_{n\in\mathbb{N}}$ in $\mathbb{R}$ which is a null-sequence such that \[ \frac{p(T(s_n)T(t_n)x_n-T(t_n)x_n)}{s_n^{\alpha}}>\delta \] for each $n\in\mathbb{N}$. Now notice that the sequence $(y_n)_{n\in\mathbb{N}}$ defined by $y_n:=\frac{T(s_n)x_n-x_n}{s_n^{\alpha}}$ is a $\tau$-null sequence since for $q\in\mathcal{P}$: \[ \frac{q(T(s_n)x_n-x_n)}{s_n^{\alpha}}\leq\sup_{s>0}\frac{q(T(s)x_n-x_n)}{s^{\alpha}} \] and the term on the right hand side converges to zero as $n\to\infty$ by assumption. Using the local bi-equicontinuity of the semigroup $(T(t))_{t\geq0}$ with respect to $\tau$, we conclude that $\frac{T(t)T(s_n)x_n-T(t)x_n}{s_n}$ converges to zero uniformly for $t\in[0,t_0]$, contradiction. Hence we conclude that $(T(t))_{t\geq0}$ is locally bi-equicontinuous on $X_{\alpha}$. \end{proof} \begin{remark} Notice that the local bi-equicontinuity with respect to $\tau_{F_\alpha}$ holds on the whole space $F_{\alpha}$, while strong $\tau_{F_\alpha}$-continuity holds on $X_{\alpha}$ only. In particular, we will see in Theorem \ref{thm:strong}, that $X_{\alpha}$ is the space of strong $\tau_{F_{\alpha}}$-continuity. \end{remark} We can summarize the previous results in the following theorem. \begin{theorem} Let $(T(t))_{t\geq0}$ be a bi-continuous semigroup on $X_0$. Then the restricted operators $T_\alpha(t):=T(t)|_{X_\alpha}$ to $X_{\alpha}$ form a bi-continuous semigroup. Moreover, the generator $A_\alpha$ of $(T_\alpha(t))_{t\geq 0}$ is the part of $A$ in $X_{\alpha}$ which is continuous with respect to $\tau_{F_{\alpha}}$ and $\tau_{F_{\alpha-1}}$. \end{theorem} \begin{proof} We only have to prove the last part of the theorem, namely that the part of $A$ in $X_{\alpha}$ generates the restricted semigroup on $X_{\alpha}$. The proof goes similarly to that of Proposition in \cite[Chap. II, Par. 2.3]{EN}. Since the embedding $X_{\alpha}\subseteq X_0$ is continuous for the topologies $\tau_{F_\alpha}$ and $\tau$, we conclude that $A_\alpha\subseteq A|_{X_{\alpha}}$. For the converse take $\lambda\in\mathbb{R}$ large enough such that \[ R(\lambda,C)x=\int_0^{\infty}{\mathrm{e}^{-\lambda s}T(s)x\ \mathrm{d} s}=R(\lambda,A)x,\ \ x\in X_{\alpha}. \] For $x\inD(A_{|X_{\alpha}})$ we obtain \[ x=R(\lambda,A)(\lambda-A)x=R(\lambda,C)(\lambda-A)x\inD(C) \] and hence $A_{|X_{\alpha}}\subseteq A_\alpha$. This proves that the part of $A$ in $X_{\alpha}$ generates the restricted semigroup. \end{proof} By similar reasoning as in Lemma \ref{lem:spacecont} one can prove the following result: \begin{theorem}\label{thm:strong} Let $\alpha\in(0,1)$ and let $(T(t))_{t\geq0}$ be a bi-continuous semigroup on $X$. Then $D(A)$ is $\tau_{F_{\alpha}}$-bi-dense in $X_{\alpha}$ and \begin{equation}\label{eq:xalphacont} X_{\alpha}=\bigl\{x\in F_{\alpha}:\ \tau_{F_{\alpha}}\lim_{t\to0}T(t)x=x\bigr\}, \end{equation} i.e., for $x\in F_\alpha$ the mapping $t\mapsto T(t)x$ is $\tau_{F_\alpha}$-continuous, if and only if $x\in X_\alpha$. \end{theorem} \begin{proof} Denote by $X_{\alpha,\mathrm{cont}}$ the right-hand side of \eqref{eq:xalphacont}, i.e., the space of $\tau_{F_\alpha}$-strong continuity. Notice first of all that $D(A)\subseteq \underline{X}_\alpha\subseteq X_\alpha\subseteq X_{\alpha,\mathrm{cont}}$. \noindent Suppose $x\in X_{\alpha,\mathrm{cont}}$. For each $n\in \mathbb{N}$ we have \begin{align*}x_n:={n\int_0^\frac{1}{n}{T_\alpha(t)x\ \mathrm{d} t}}={n\int_0^\frac{1}{n}{T(t)x\ \mathrm{d} t}}\inD(A)\end{align*} as a $\tau$- and $\tau_{F_\alpha}$-convergent Riemann integral. Whence it follows that $x_n\stackrel{\tau_{F_{\alpha}}}\to x$, whereas the $\|\cdot\|_{F_{\alpha}}$-boundedness of $(x_n)_{n\in\mathbb{N}}$ clear. We conclude that $x\in X_\alpha$ (because $X_\alpha$ is bi-closed in $F_\alpha$), implying $X_{\alpha,\mathrm{cont}}\subseteq X_\alpha$. As a byproduct we also obtain that $D(A)$ is bi-dense in $X_\alpha$. \end{proof} \begin{proposition} For $0\leq \alpha< \beta\leq 1$ we have \[ X_1=D(A)\mathrm{h}ookrightarrow F_\beta\mathrm{h}ookrightarrow \underline{X}_\alpha\subseteq X_\alpha, \] where the embeddings are continuous for the respective norms and for the respective topologies $\tau_{1}$, $\tau_{F_\beta}$, $\tau_{F_\alpha}$. The space $D(A)$ bi-dense in $X_\alpha$, and as a consequence $X_\beta$ is bi-dense in $X_\alpha$. \end{proposition} \subsection{Representation of H\"older spaces by generators} Analogously to the Proposition \ref{prop:FavCont} we have a representation of the H\"older space $X_{\alpha}$ by means of the semigroup generator. \begin{theorem} Let $(T(t))_{t\geq0}$ be a bi-continuous semigroup with negative growth bound and generator $A$. For $\alpha\in(0,1)$ we have \begin{equation} \label{eq:Xalphadef} X_{\alpha}=\Bigl\{x\in X_0:\ \mathop{\tau\mathrm{lim}}_{\lambda\to\infty}{\lambda^{\alpha}AR(\lambda,A)x}=0\text{ and } \sup_{\lambda>0}{\|\lambda^{\alpha}AR(\lambda,A)x\|}<\infty\Bigr\}.\end{equation} \end{theorem} \begin{proof} Suppose $x\in X_{\alpha}$. From Proposition \ref{prop:FavCont} we deduce immediately \[ \sup_{\lambda>0}{\|\lambda^{\alpha}AR(\lambda,A)x\|}<\infty. \] Let now $\varepsilon>0$ be arbitrary. Then, since $x\in X_{\alpha}$, for $p\in\mathcal{P}$ we can find $\delta>0$ such that $0\leq t<\delta$ implies that $\frac{p(T(t)x-x)}{t^{\alpha}}<\varepsilon$. Recall the following formula for the resolvent: \begin{align*}\lambda^{\alpha}AR(\lambda,A)x=\lambda^{\alpha+1}\int_0^{\infty}{\mathrm{e}^{-\lambda s}(T(s)x-x)\ \mathrm{d} s}.\end{align*} From this we deduce \begin{align*} p(\lambda^{\alpha}AR(\lambda,A)x)&\leq\lambda^{\alpha+1}\int_0^{\infty}{\mathrm{e}^{-\lambda s}\cdot\frac{p(T(s)x-x)}{s^{\alpha}}s^{\alpha}\ \mathrm{d} s}\\ &=\lambda^{\alpha+1}\int_0^{\delta}{\mathrm{e}^{-\lambda s}\cdot\frac{p(T(s)x-x)}{s^{\alpha}}s^{\alpha}\ \mathrm{d} s}+\lambda^{\alpha+1}\int_{\delta}^{\infty}{\mathrm{e}^{-\lambda s}\cdot\frac{p(T(s)x-x)}{s^{\alpha}}s^{\alpha}\ \mathrm{d} s}\\ &<\lambda^{\alpha+1}\varepsilon\int_0^{\delta}{\mathrm{e}^{-\lambda s}s^{\alpha}\ \mathrm{d} s}+\lambda^{\alpha+1}\int_{\delta}^{\infty}{\mathrm{e}^{-\lambda s}\cdot\frac{\|T(s)x-x\|}{s^{\alpha}}s^{\alpha}\ \mathrm{d} s}\\ &\leq\lambda^{\alpha+1}\varepsilon\int_0^{\delta}{\mathrm{e}^{-\lambda s}s^{\alpha}\ \mathrm{d} s}+\|x\|_{F_\alpha}\lambda^{\alpha+1}\int_{\delta}^{\infty}{\mathrm{e}^{-\lambda s}\cdot s^{\alpha}\ \mathrm{d} s}\\ &=\varepsilon\int_{0}^{\lambda\delta}{\mathrm{e}^{-t}t^{\alpha}\ \mathrm{d} t}+\|x\|_{F_\alpha}\int_{\lambda\delta}^{\infty}{\mathrm{e}^{-t}t^{\alpha}\ \mathrm{d} t}\\ &\leq L\varepsilon+\|x\|_{F_\alpha}\int_{\lambda\delta}^{\infty}{\mathrm{e}^{-t}t^{\alpha}\ \mathrm{d} t} \end{align*} where $L:=\int_0^{\infty}{\mathrm{e}^{-\lambda s}s^{\alpha}\ \mathrm{d} s}<\infty$. Notice that the last part of the sum tends to zero if $\lambda\to\infty$ since we fixed $\delta>0$ in the beginning. So that $\mathop{\tau\mathrm{lim}}_{\lambda\to\infty} \lambda^{\alpha}AR(\lambda,A)x=0$. \noindent For the converse inclusion suppose that $\mathop{\tau\mathrm{lim}}_{\lambda\to\infty}{\lambda^{\alpha}AR(\lambda,A)x}=0$ and $\sup_{\lambda>0}{\|\lambda^{\alpha}AR(\lambda,A)x\|}<\infty$, the latter immediately implying $\|x\|_{F_\alpha}<\infty$ (see Proposition \ref{prop:FavCont}). We have to show now that $\mathop{\tau\mathrm{lim}}_{t\to0}{\frac{T(t)x-x}{t^{\alpha}}}=0$. For $\lambda>0$ define $x_{\lambda}=\lambda R(\lambda,A)$ and $y_{\lambda}=AR(\lambda,A)$, then we have $$x=\lambda R(\lambda,A)x-AR(\lambda,A)x=x_{\lambda}-y_{\lambda}.$$ Let $\varepsilon>0$ be arbitrary. First notice that for $p\in\mathcal{P}$ \begin{equation}\label{eq:xl} \frac{p(T(t)x_{\lambda}-x_{\lambda})}{t^{\alpha}}\leq\frac{1}{t^{\alpha}}p(T(t)\lambda R(\lambda,A)x-\lambda R(\lambda,A)x)\leq\frac{\lambda^{1-\alpha}}{t^{\alpha}}\int_0^t{p(T(s)\lambda^{\alpha}AR(\lambda,A)x)\ \mathrm{d} s}. \end{equation} By assumption the term $\lambda^{\alpha}AR(\lambda,A)x$ is norm-bounded and converges in the topology $\tau$ to zero as $\lambda\to\infty$, hence by the local bi-equicontinuity we conclude that $p(T(s)\lambda^{\alpha}AR(\lambda,A)x)\to0$ uniformly for $s\in [0,1]$. Now let $\lambda_0>1$ so large that for $\lambda>\lambda_0$ and $s\in [0,1]$ we have $p(T(s)\lambda^{\alpha}AR(\lambda,A)x)<\varepsilon$. If $t<\frac{1}{\lambda_0}$, then $\lambda:=\frac{1}{t}>\lambda_0$ and we obtain, that the expression in \eqref{eq:xl} becomes smaller than $\varepsilon$. For the estimate of the second part we observe: \begin{equation*} \frac{p(T(t)y_{\lambda}-y_{\lambda})}{t^{\alpha}}\leq\frac{1}{(t\lambda)^{\alpha}}p(T(t)\lambda^{\alpha}AR(\lambda,A)x)+\frac{1}{(t\lambda)^{\alpha}}p(\lambda^{\alpha}AR(\lambda,A)x). \end{equation*} Now by taking $t<\frac1{\lambda_0}$ and $\lambda:=\frac{1}{t}$ we obtain the estimate: \begin{equation}\label{eq:yl} \frac{p(T(t)y_{\lambda}-y_{\lambda})}{t^{\alpha}}\leq p(T(\tfrac{1}{\lambda})\lambda^{\alpha}AR(\lambda,A)x)+p(\lambda^{\alpha}AR(\lambda,A)x)<\varepsilon+\varepsilon, \end{equation} by the choice of $\lambda_0$. Altogether we obtain for $t<\frac1{\lambda_0}$ that $ \frac{p(T(t)x-x)}{t^{\alpha}}<3\varepsilon$, showing \[ \mathop{\tau\mathrm{lim}}_{t\to0}\frac{T(t)x-x}{t^{\alpha}}=0, \] i.e., $x\in X_\alpha$ as required. \end{proof} \begin{remark} We remark that it is possible to define the space $X_{\alpha}(A)$ as the right-hand side of \eqref{eq:Xalphadef} for operators $A$ which are not necessarily generators of bi-continuous semigroups but whose resolvent fulfills certain continuity assumptions with respect to a topology satisfying, say, Assumption \ref{asp:bicontspace}. \end{remark} Again, we put our spaces $X_{\alpha}$ in the general context of Theorem \ref{thm:iden}. \begin{proposition}\label{cor:ExtFav2} For $\alpha\in\left(0,1\right)$ and $\mathcal{A}$, $\lambda$ and $\mathscr{E}$ as in Theorem \ref{thm:iden} we have \begin{align*} X_{-\alpha}&=\Bigl\{(\lambda-\mathcal{A})y\in X_{-1}:\ \sup_{t>0}{\frac{\left\|T(t)y-y\right\|}{t^{1-\alpha}}}<\infty,\ \mathop{\tau\mathrm{lim}}_{t\to0}{\frac{T(t)y-y}{t^{1-\alpha}}}=0\Bigl\}. \end{align*} \end{proposition} Finally, we extend the scale of spaces $X_{\alpha}$ to the whole range $\alpha\in\mathbb{R}$. \begin{definition} For $\alpha\in\mathbb{R}\setminus\mathbb{Z}$ we write $\alpha=m+\beta$ with $m\in\mathbb{Z}$ and $\beta\in (0,1]$, and define \[ X_{\alpha}(A):=X_{\beta}(A_m), \] also with the corresponding norms. The locally convex topology on $X_{\alpha}$ comes from $X_{\beta}$ via the mapping $A_m$. \end{definition} \begin{remark} We summarize all previous results in the following diagram: \footnotesize{ \begin{align*} \xymatrix{ X_1\ar@/^2pc/[rrrrr]^{A}\ar[r]\ar[d]&\underline{X}_{\alpha}\ar[r]\ar[d]&X_{\alpha}\ar[r]\ar[d]\ar@/^2pc/[rrrrr]^{A_{\alpha-1}}&F_{\alpha}\ar[r]\ar[d]&\underline{X}_0\ar[r]\ar[d]&X_0\ar[r]\ar[d]&\underline{X}_{\alpha-1}\ar[r]\ar[d]&X_{\alpha-1}\ar[r]\ar[d]&F_{\alpha-1}\ar[r]\ar[d]&X_{-1}\ar[d]\\ X_1\ar[r]&\underline{X}_{\alpha}\ar[r]\ar@/_2pc/[rrrrr]_{\underline{A}_{\alpha-1}}&X_{\alpha}\ar[r]&F_{\alpha}\ar@/_2pc/[rrrrr]_{{A_{-1}}_{|F_{\alpha}}}\ar[r]&\underline{X}_0\ar[r]&X_0\ar[r]\ar@/_2pc/[rrrr]_{A_{-1}}&\underline{X}_{\alpha-1}\ar[r]&X_{\alpha-1}\ar[r]&F_{\alpha-1}\ar[r]&X_{-1} } \end{align*} }\\\normalsize where $\alpha\in\left(0,1\right)$. Here $A_{\alpha-1}$ and $\underline{A}_{\alpha-1}$ are defined to be the part of $A_{-1}$ in $X_{\alpha-1}$ and the part of $\underline{A}_{-1}$ in $\underline{X}_{\alpha-1}$, respectively and are all continuous with respect to the norms and topologies on the spaces. In addition we recall that $X_{\alpha-1}$ and $\underline{X}_{\alpha-1}$ are the extrapolation spaces of $X_{\alpha}(A_{-1})$ and $\underline{X}_{\alpha}(A_{-1})$, respectively. This shows that we can extend the space $X_{\alpha}$ from $\alpha\in\left(0,1\right)$ to $\alpha\in\mathbb{R}$ by extra- and interpolation. All horizontal arrows are inclusions which are all continuous, whereas the vertical arrows are the action(s) of the semigroup(s). All spaces are dense in the underlined ones that contain them, while the spaces without underlining are bi-dense in each of the bigger ones. \end{remark} \section{Examples}\label{sec:examp} In this section we present examples for extrapolation and intermediate spaces for (generators of) bi-continuous semigroups. We will use Theorem \ref{thm:iden} and its variants to identify the space $X_{\alpha}$ with $\alpha<0$. \subsection{The translation semigroup} Let $X_0=\mathrm{C}_{\mathrm{b}}(\mathbb{R})$, the space of bounded and continuous functions equipped with the supremum norm $\|\cdot\|_{\infty}$ and consider thereon the compact-open topology $\tau_{\mathrm{co}}$ generated by the family of seminorms $\mathcal{P}=\{p_K:\ K\subseteq\mathbb{R}\ \text{compact}\}$, where \[ p_K(f)=\sup_{x\in K}{|f(x)|},\quad f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}). \] The left translation semigroup $(T(t))_{t\geq0}$ defined by \[ T(t)f(x)=f(x+t),\quad t\geq0 \] is bi-continuous on $X_0$ with respect to $\tau_{co}$. The generator $A$ of this semigroup is given by the first derivative $Af=f'$ on the domain (see \cite{Ku}) \[ D(A)=\{f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}):\ f\quad \text{is differentiable}\quad f'\in\mathrm{C}_{\mathrm{b}}(\mathbb{R})\}. \] The space of strong continuity is $\underline{X}_0=\mathrm{UC}_{\mathrm{b}}(\mathbb{R})$, the space of all bounded, uniformly continuous functions. We use Theorem \ref{thm:iden} to determine the corresponding extrapolation spaces. To this purpose let $\mathscr{E}=\mathscr{E}istr(\mathbb{R})$ be the space of all distributions on $\mathbb{R}$, let $\mathcal{A}:\mathscr{E}'(\mathbb{R})\to\mathscr{E}'(\mathbb{R})$ be the distributional derivative and let $i:\mathrm{C}_{\mathrm{b}}(\mathbb{R})\to\mathscr{E}istr(\mathbb{R})$ be the regular embedding. From Theorem \ref{thm:iden} it then follows \begin{align*} X_{-1}&=\{F\in\mathscr{E}istr(\mathbb{R}):\ F=f-Df\ \text{for some}\ f\in \mathrm{UC}_{\mathrm{b}}(\mathbb{R})\},\\ X_{-1}&=\{F\in\mathscr{E}istr(\mathbb{R}):\ F=f-Df\ \text{for some}\ f\in \mathrm{C}_{\mathrm{b}}(\mathbb{R})\}. \end{align*} For the Favard and H\"older spaces we have \begin{align*} F_{\alpha}&=\Bigl\{f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}):\ \sup_{\substack{x,y\in\mathbb{R}\\x\neq y}}{\frac{|f(x)-f(y)|}{|x-y|^{\alpha}}}<\infty\Bigl\}=\mathrm{C}_{\mathrm{b}}^{\alpha}(\mathbb{R}),\\ \underline{X}_{\alpha}&=\Bigl\{f\in\mathrm{UC}_{\mathrm{b}}(\mathbb{R}):\ \lim_{t\to0}{\sup_{\substack{x,y\in\mathbb{R}\\0<|x-y|<t}}{\frac{|f(x)-f(y)|}{|x-y|^{\alpha}}}}=0\Bigl\}=\mathrm{h}_b^{\alpha}(\mathbb{R}). \end{align*} Hence $F_{\alpha}$ can be identified with the space of bounded $\alpha$-H\"older-continuous functions and $\underline{X}_{\alpha}$ with the so-called little H\"older space $\mathrm{h}_b^{\alpha}(\mathbb{R})$. For the abstract H\"older space $X_{\alpha}$ corresponding to the bi-continuous semigroup we obtain the local version $\mathrm{h}_{b,\text{loc}}^{\alpha}(\mathbb{R})$ of the little H\"older space: \begin{align*} \mathrm{h}_{b,\text{loc}}^{\alpha}&=\Bigl\{f\in\mathrm{C}_{\mathrm{b}}^{\alpha}(\mathbb{R}):\ \lim_{t\to0}{\sup_{\substack{x,y\in K\\0<|x-y|<t}}{\frac{|f(x)-f(y)|}{|x-y|^{\alpha}}}}=0\ \text{for each}\ K\subseteq\mathbb{R}\ \text{compact} \Bigr\}. \end{align*} Then $X_{\alpha}=h_{b,\text{loc}}^{\alpha}(\mathbb{R})$. It is easy to see $\underline{X}_{\alpha}\subsetneq X_{\alpha}\subsetneq F_{\alpha}$. The extrapolated Favard class $F_0$ can be identified with $\mathrm{L}^{\infty}(\mathbb{R})$. We know from the general theory that $F_0(T)=(1-D)F_1(T)$ where $F_1(T)$ are precisely the bounded Lipschitz functions on $\mathbb{R}$. Now using the fact that $\mathrm{Lip}_{\mathrm{b}}(\mathbb{R})=\mathrm{W}^{1,\infty}(\mathbb{R})$ with equivalent norms we obtain the result. For an alternative proof we refer to \cite[Chapter~II.5(b)]{EN}. Moreover, we obtain \[ F_{-\alpha}=\Bigl\{f\in\mathscr{E}istr(\mathbb{R}):\quad F=f-Df\ \text{for}\ f\in\mathrm{C}_{\mathrm{b}}^{1-\alpha}(\mathbb{R})\Bigl\}, \] and \[ X_{-\alpha}=\Bigl\{f\in\mathscr{E}istr(\mathbb{R}):\quad F=f-Df\ \text{for}\ f\in\mathrm{h}_{b,\text{loc}}^{1-\alpha}(\mathbb{R})\Bigl\}, \] which follow from Corollary \ref{cor:ExtFav}. We summarize this example by the following diagram: \[ \mathrm{C}_{\mathrm{b}}^1(\mathbb{R})\mathrm{h}ookrightarrow\mathrm{Lip}_{\mathrm{b}}(\mathbb{R})\mathrm{h}ookrightarrow\mathrm{h}_b^{\alpha}(\mathbb{R})\mathrm{h}ookrightarrow\mathrm{h}_{b,\text{loc}}^{\alpha}(\mathbb{R})\mathrm{h}ookrightarrow\mathrm{C}_{\mathrm{b}}^{\alpha}(\mathbb{R})\mathrm{h}ookrightarrow\mathrm{UC}_{\mathrm{b}}(\mathbb{R})\mathrm{h}ookrightarrow\mathrm{C}_{\mathrm{b}}(\mathbb{R})\mathrm{h}ookrightarrow\mathrm{L}^{\infty}(\mathbb{R}) \] according to the following abstract chain of spaces \[ X_1\mathrm{h}ookrightarrow F_1\mathrm{h}ookrightarrow\underline{X}_{\alpha}\mathrm{h}ookrightarrow X_{\alpha}\mathrm{h}ookrightarrow F_{\alpha}\mathrm{h}ookrightarrow \underline{X}_0\mathrm{h}ookrightarrow X_0\mathrm{h}ookrightarrow F_0 \] for $\alpha\in(0,1)$. For the higher order spaces we have: \begin{align*} X_n:=D(A^n)&=\Bigl\{f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}):\ f\ \text{is}\ n\text{-times differentiable and}\ f^{(n)}\in\mathrm{C}_{\mathrm{b}}(\mathbb{R})\Bigr\}\\ &=\Bigl\{f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}):\ f^{(k)}\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}),\ k=1,\dots,n\Bigr\}=\mathrm{C}_{\mathrm{b}}^n(\mathbb{R}) \end{align*} for $n\in\mathbb{N}$. We denote by $F_{n+\alpha}$ the Favard space which belongs to the restricted semigroup on $X_n$. \begin{align*} F_{n+\alpha} =\Bigl\{f\in\mathrm{C}_{\mathrm{b}}^n(\mathbb{R}):\ \sup_{\substack{x,y\in\mathbb{R}\\x\neq y}}{\frac{|f^{(n)}(x)-f^{(n)}(y)|}{|x-y|^{\alpha}}}<\infty\Bigr\}=\mathrm{C}_{\mathrm{b}}^{n,\alpha}(\mathbb{R}) \end{align*} This example extends Nagel, Nickel, Romanelli \cite[Sec.{} 3.2]{NagelIdent}. \subsection{The multiplication semigroup} Let $\Omega$ the a locally compact space and $X_0=\mathrm{C}_{\mathrm{b}}(\Omega)$. Let $q:\Omega\to\mathrm{C}C$ be continuous such that $\sup_{x\in\Omega}{\text{Re}(q(x))}<0$. We define the multiplication operator $M_q:D(M_q)\to\mathrm{C}_{\mathrm{b}}(\Omega)$ by $M_qf=qf$ on the maximal domain \[ D(M_q)=\{f\in \mathrm{C}_{\mathrm{b}}(\Omega):\ qf\in\mathrm{C}_{\mathrm{b}}(\Omega)\}. \] This operator generates the semigroup $(T_q(t))_{t\geq0}$ defined by \[ (T_q(t)f)(x)=\mathrm{e}^{tq(x)}f(x),\quad t\geq0, x\in\Omega, f\in\mathrm{C}_{\mathrm{b}}(\Omega), \] which is bi-continuous on $\mathrm{C}_{\mathrm{b}}(\Omega)$ with respect to the compact-open topology. Now let $\mathscr{E}=\mathrm{C}(\Omega)$ the space of all continuous functions on $\Omega$, let $\mathcal{M}_q:\mathrm{C}(\Omega)\to\mathrm{C}(\Omega)$ be the multplication operator $\mathcal{M}_qf:=qf$ and $i:\mathrm{C}_{\mathrm{b}}(\Omega)\to\mathrm{C}(\Omega)$ the identity. Then by Theorem \ref{thm:iden} we obtain \[ X_{-1}=\{g\in\mathrm{C}(\mathbb{R}):\ q^{-1}g\in\mathrm{C}_{\mathrm{b}}(\mathbb{R})\}. \] For $\alpha\in(0,1)$, the (abstract) Favard space: \[ F_{\alpha}=\{f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}):\ |q|^{\alpha}f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R})\}. \] To see this suppose first that $f\in F_{\alpha}$, hence $\|f\|_{\alpha}<\infty$ which means in particular that \[ \sup_{t>0}\sup_{x\in\Omega}{\frac{|\mathrm{e}^{tq(x)}f(x)-f(x)|}{t^{\alpha}}}<\infty. \] By specializing $t=\frac{1}{|q(x)|}$ we obtain \[ {\bigl|\mathrm{e}^{\frac{q(x)}{|q(x)|}}-1\bigr|\cdot|f(x)|\cdot|q(x)|^{\alpha}}, \] since \begin{align}\label{eqn:expf} \frac{|\mathrm{e}^{tq(x)}f(x)-f(x)|}{t^{\alpha}}=\frac{|\mathrm{e}^{tq(x)}-1|\cdot|f(x)||q(x)|^{\alpha}}{|q(x)|^{\alpha}t^{\alpha}}. \end{align} Hence $|q|^{\alpha}f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R})$. For the converse assume that $|q|^{\alpha}f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R})$. Since the function $g(z)=\frac{|\mathrm{e}^z-1|}{|z|^{\alpha}}$ is bounded on the left half plane we obtain that $f\in F_{\alpha}$ by \eqref{eqn:expf}. This proves the equality. We also conclude that $F_{\alpha}=X_{\alpha}$ since \[ \sup_{x\in K}{\left|\frac{\mathrm{e}^{tq(x)}f(x)-f(x)}{t^{\alpha}}\right|}=\sup_{x\in K}{\left|\frac{\mathrm{e}^{tq(x)}-1}{tq(x)}\right|\cdot\left|f(x)\right|\cdot\left|q(x)\right|^{\alpha}t^{1-\alpha}} \] for each compact set $K\subseteq\Omega$. The extrapolated Favard spaces are then given by \[ F_{-\alpha}=\bigl\{f\in\mathrm{C}_{\mathrm{b}}(\Omega):\ |q|^{1-\alpha} f\in\mathrm{C}_{\mathrm{b}}(\Omega)\bigr\}=X_{-\alpha}. \] The spaces $\underline{X}_{\alpha}$ are more difficult to describe in general, since the space of strong continuity $\underline{X}_0$ depends substantially on the choice of $q$. For example, if $\frac{1}{q}\in\mathrm{C}_0(\Omega)$, then $\underline{X}_0=\mathrm{C}_0(\mathbb{R})$. To see this first notice that $\mathrm{C}_0(\Omega)\subseteq\underline{X}_0$ trivially. On the other hand \[ \left|f\right|=\left|\frac{1}{q}\right|\cdot\left|fq\right| \] which shows that $D(M_q)\subseteq\mathrm{C}_0(\mathbb{R})$ and hence that $\underline{X}_0\subseteq\mathrm{C}_0(\mathbb{R})$. Now one obtains \[ \underline{X}_{\alpha}=\{f\in\mathrm{C}_0(\Omega): |q|^{\alpha}f\in\mathrm{C}_0(\Omega)\}, \] and \[ \underline{X}_{-\alpha}=\{f\in\mathrm{C}_0(\Omega):\ |q|^{1-\alpha}f\in\mathrm{C}_0(\Omega)\}. \] This example extends Nagel, Nickel, Romanelli \cite[Sec.{} 3.2]{NagelIdent}. \subsection{The Gau\ss{}-Weierstra\ss{} semigroup} On $X_0=\mathrm{C}_{\mathrm{b}}(\mathbb{R}^d)$ ($d\geq1$) we consider the Gauss-Weierstrass semigroup, defined by $T(0)=I$ and \begin{align}\label{eqn:Gauss} T(t)f(x)=\frac{1}{(4\pi t)^{\frac{d}{2}}}\int_{\mathbb{R}^d}{\mathrm{e}^{-\frac{\left|x-y\right|^2}{4t}}f(y)\ \mathrm{d} y},\quad t>0,\quad x\in\mathbb{R}^d, \end{align} If one equips $\mathrm{C}_{\mathrm{b}}(\mathbb{R}^d)$ again with the compact-open topology one concludes that $(T(t))_{t\geq0}$ defined by \eqref{eqn:Gauss} is bi-continuous and its space of strong continuity is $\mathrm{UC}_{\mathrm{b}}(\mathbb{R}^d)$. From \cite[Proposition~2.3.6]{LB} we know that the generator $A$ of this semigroup is given $Af=\mathscr{E}elta f$ on the maximal domain \[ D(A)=\{f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}^d):\quad \mathscr{E}elta f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}^d)\}, \] where $\mathscr{E}elta$ is the distributional Laplacian. Now the extrapolation space can again be obtained by Theorem \ref{thm:iden}. Take $\mathscr{E}$ again to be the space of all distributions $\mathscr{E}istr(\mathbb{R}^d)$ on $\mathbb{R}^d$ with $\mathcal{A}$ the distributional Laplacian and $i:\mathrm{C}_{\mathrm{b}}(\mathbb{R}^d)\to\mathscr{E}istr(\mathbb{R}^d)$ the regular embedding. Applying Theorem \ref{thm:iden} we obtain \[ X_{-1}=\{F\in\mathscr{E}istr(\mathbb{R}^d):\ F=f-\mathscr{E}elta f\ \text{for some}\ f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}^d)\}. \] The domain of the generator can explicitly be written down, see, e.g., \cite{LB} or\cite{Lunardi}. For $d=1$ the domain is given by \[ D(\mathscr{E}elta)=\mathrm{C}_{\mathrm{b}}^2(\mathbb{R}), \] while for $d\geq2$ \[ D(\mathscr{E}elta)=\Bigl\{f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}^d)\cap\mathrm{W}^{2,p}_{\text{loc}}(\mathbb{R}^d),\ \text{for all}\ p\in[1,\infty),\ \text{and}\ \mathscr{E}elta f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}^d)\Bigl\}. \] For $\alpha\in(0,1)\setminus\{\frac{1}{2}\}$ the Favard spaces are \[ F_{\alpha}=\mathrm{C}_{\mathrm{b}}^{2\alpha}(\mathbb{R}^d) \] while for $\alpha=\frac{1}{2}$ one obtains \[ F_{\frac{1}{2}}=\Bigl\{f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}^d):\ \sup_{x\neq y}{\frac{|f(x)+f(y)-2f(\frac{x+y}{2})|}{|x-y|}}<\infty\Bigl\}. \] From Corollary \ref{cor:ExtFav} it follows that \[ F_{-\alpha}=\Bigl\{F\in\mathscr{E}istr(\mathbb{R}^d):\ F=f-\mathscr{E}elta f\ \text{for some}\ f\in\mathrm{C}_{\mathrm{b}}^{2(1-\alpha)}(\mathbb{R}^d)\Bigr\} \] and \[ F_{-\frac{1}{2}}=\Bigl\{F\in\mathscr{E}istr(\mathbb{R}^d):\ F=f-\mathscr{E}elta f\ \text{for some}\ f\in F_{\frac{1}{2}}\Bigr\}. \] \subsection{The left implemented semigroup} \label{subs:implem} Let $X_0:=\mathscr{L}(E)$ the space of bounded linear operator on a Banach space $E$. We equip $\mathscr{L}(E)$ with the operator norm and the strong topology $\tau_{\text{stop}}$ generated by the family of seminorms $\mathcal{P}=\{p_x:\ x\in E\}$, where \[ p_x(B)=\|Bx\|,\quad B\in\mathscr{L}(E). \] Let $(S(t))_{t\geq0}$ be a $C_0$-semigroups with negative growth bound on a Banach space $E$. The semigroup $(\mathcal{U}(t))_{t\geq0}$ on $X_0$ defined by \[ \mathcal{U}(t)B=S(t)B,\quad B\in X_0,\quad t\geq0, \] is called the semigroup left implemented by $(S(t))_{t\geq0}$. Note that $(\mathcal{U}(t))_{t\geq0}$ has negative growth bound and is a bi-continuous semigroup if $(S(t))_{t\geq0}$ is a $C_0$-semigroup. We determine the intermediate and extrapolation spaces for this semigroup. We can write: \begin{align*} \|B\|_{F_{\alpha}(\mathcal{U})}&=\sup_{t>0}{\frac{\|\mathcal{U}(t)B-B\|}{t^{\alpha}}}=\sup_{t>0}{\frac{\|S(t)B-B\|}{t^{\alpha}}}\\ &=\sup_{t>0}{\sup_{\|x\|\leq1}{\frac{\|S(t)Bx-Bx\|}{t^{\alpha}}}} =\sup_{\|x\|\leq1}{\sup_{t>0}{\frac{\|S(t)Bx-Bx\|}{t^{\alpha}}}} =\sup_{\|x\|\leq1}{\|Bx\|_{F_{\alpha}(S)}}. \end{align*} From this we conclude the following. \begin{proposition}\label{prop:implFav} Let $(\mathcal{U}_L(t))_{t\geq0}$ be the semigroup which is left-implemented by $(S(t))_{t\geq0}$. Then \[F_{\alpha}(\mathcal{U})=\mathscr{L}(E,F_{\alpha}(S))\] with the same norms.\end{proposition} From the definition we obtain: \begin{align*} X_{\alpha}(\mathcal{U})&=\Bigl\{B\in\mathscr{L}(E):\ \mathop{\tau\mathrm{lim}}_{t\to0}{\frac{\mathcal{U}_L(t)B-B}{t^{\alpha}}}=0,\ \|B\|_{F_{\alpha}(\mathcal{U})}<\infty\Bigl\}\\ &=\Bigl\{B\in\mathscr{L}(E):\ \lim_{t\to0}{\frac{\|S(t)Bx-Bx\|}{t^{\alpha}}}=0\quad \text{for all } x\in E\Bigl\},\\ \underline{X}_{\alpha}(\mathcal{U})&=\Bigl\{B\in \mathscr{L}(E):\ \lim_{t\to0}{\frac{S(t)B-B}{t^{\alpha}}}=0\Bigl\}. \end{align*} \begin{proposition}Let $(\mathcal{U}(t))_{t\geq0}$ be the semigroup which is left-implemented by $(S(t))_{t\geq0}$. Then \[X_{\alpha}(\mathcal{U})=\mathscr{L}(E,X_{\alpha}(S))\] with the same norms.\end{proposition} After the discussion of abstract Favard and H\"older spaces of the implemented semigroup, we turn to extrapolation spaces. Theses spaces have been studied by Alber in \cite{Alber2001} but only for the $C_0$-semigroup $(\underline{\mathcal{U}}(t))_{t\geq 0}$ on the space $\underline{X}_0=\overline{D(\mathcal{G})}$ which depends on the semigroup $(\underline{\mathcal{U}}(t))_{t\geq 0}$. First we recall a result from \cite{Alber2001}: The generator $\mathcal{G}$ of $(\mathcal{U}(t))_{t\geq0}$ is given by \[ \mathcal{G}V=A_{-1}V, \] on \[ D(\mathcal{G})=\left\{V\in\mathscr{L}(E):\quad A_{-1}V\in\mathscr{L}(E)\right\}, \] where $A_{-1}$ denotes the generator of the extrapolated $C_0$-semigroup $(S_{-1}(t))_{t\geq 0}$ on $E_{-1}$. The extrapolation spaces $X_{-1}$ and $\underline{X}_{-1}$ can now we obtained by Theorem \ref{thm:iden}. For that let \[ \mathscr{E}=\bigl\{S:E\to E_{-\infty}:\ \text{linear and continuous}\bigr\}, \] where $E_{-\infty}$ is the universal extrapolation space of $(S(t))_{t\geq 0}$ (see the paragraph preceding Theorem \ref{thm:iden}), and let $i:\mathscr{L}(E)\to\mathscr{E}$ be the identity. Consider the operator-valued multiplication operator \[ \mathcal{A}V=A_{-\infty}V,\quad V\in\mathscr{E} \] where $A_{-\infty}x=A_{-(n-1)}x$ for $x\in E_{-n}$. Notice that $\lambda-\mathcal{A}:X_0\to\mathscr{E}$ is injective for $\lambda>0$ since $A_{-\infty}$ and $A_{-1}$ coincide on $E$. Hence by applying Theorem \ref{thm:iden} we obtain \[ X_{-1}=\left\{A_{-1}V:\quad V\in\mathscr{L}(E)\right\} \] and \[ \underline{X}_{-1}=\left\{A_{-1}V:\quad V\in\underline{X}_0\right\}. \] From this we conclude the following description for $X_{-1}$: \[ X_{-1}=\Bigl\{V\in\mathscr{L}(E,E_{-1}):\ \exists(V_n)_{n\in\mathbb{N}}\subseteq\mathscr{L}(E)\text{ with } V_n\to V\text{ strongly}\Bigr\}=\overline{\mathscr{L}(E)}^{\mathscr{L}_{\mathrm{stop}}(E,E_{-1})}. \] And similarly for $\underline{X}_{-1}$: \begin{align*} \underline{X}_{-1}&=\Bigl\{V\in\mathscr{L}(E,E_{-1}):\ \exists(V_n)_{n\in\mathbb{N}}\subseteq\mathscr{L}(E)\text{ with } V_n\to V\text{ in $\mathscr{L}(E,E_{-1})$}\Bigr\}=\overline{\mathscr{L}(E)}^{\mathscr{L}(E,E_{-1})}. \end{align*} This is a result of Alber, see \cite{Alber2001}, which we could recover as a simple consequence of the abstract techniques described in this paper. Finally, we obtain by Corollary \ref{cor:ExtFav} that for $\alpha\in (0,1)$ \[ F_{-\alpha}(\mathcal{U})=A_{-1}\mathscr{L}(E,F_{1-\alpha}(S)) \quad\text{and}\quad X_{-\alpha}(\mathcal{U})=A_{-1}\mathscr{L}(E,X_{1-\alpha}(S)). \] \providecommand{\bysame}{\leavevmode\mathrm{h}box to3em{\mathrm{h}rulefill}\thinspace} \providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR } \providecommand{\MRhref}[2]{ \mathrm{h}ref{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\mathrm{h}ref}[2]{#2} \parindent0pt \end{document}
\begin{document} \title{Boundary Values of Functions of Dirichlet Spaces $L^1_2$ on Capacitary Boundaries} \author{V.~Gol'dshtein and A.~Ukhlov} \begin{abstract} We prove that any weakly differentiable function with square integrable gradient can be extended to a capacitary boundary of any simply connected plane domain $\Omega\ne\mathbb R^2$ except a set of a conformal capacity zero. For locally connected at boundary points domains the capacitary boundary coincides with the Euclidean one. A concept of a capacitary boundary was proposed by V.~Gol'dshtein and S.~K.~Vodop'yanov in 1978 for a study of boundary behavior of quasi-conformal homeomorphisms. We prove in details the main properties of the capacitary boundary. An abstract version of the extension property for more general classes of plane domains is discussed also. \end{abstract} \maketitle \footnotetext{\textbf{Key words and phrases:} Sobolev Spaces, Conformal Mappings.} \footnotetext{\textbf{2000 Mathematics Subject Classification:} 46E35, 30C65, 30C85.} \section{Introduction } Let $\Omega$ be a domain in $ \mathbb R^2$. We consider a Dirichlet space (a uniform Sobolev space) $L^1_2(\Omega)$ of locally integrable functions with the square integrable weak gradient $\nabla u \in L_2(\Omega)$ equipped with the seminorm $$ \|u | L^1_2(\Omega)\|= \|\nabla u | L_2(\Omega)\|. $$ The paper is devoted to study of the boundary behavior for functions $u \in L^1_2(\Omega)$. By the standard definition functions of $L^1_2(\Omega)$ are defined only up to a set of measure zero, but they can be redefined quasieverywhere i.~e. up to a set of conformal capacity zero. Indeed, every function $u\in L^1_2(\Omega)$ has a unique quasicontinuous representation $\tilde{u}\in L^1_2(\Omega)$. A function $\tilde{u}$ is termed quasicontinuous if for any $\varepsilon >0$ there is an open set $U_{\varepsilon}$ such that the conformal capacity of $U_{\varepsilon}$ is less then $\varepsilon$ and the function $\tilde{u}$ is continuous on the set $\Omega\setminus U_{\varepsilon}$ (see, for example \cite{HKM,Maz}). The concept of quasicontinuity can be obviously extended to the closure $\overline{\Omega}$ of $\Omega$. In this paper we deals with quasicontinuous representations of functions $u\in L^1_2(\Omega)$. One of main results of the paper is: \begin{thm} Let $\Omega\subset\mathbb R^2$, $\Omega\ne\mathbb R^2$, be a simply connected domain which is locally connected at any boundary point $x\in\partial\Omega$. Then for any function $u\in L_{2}^{1}(\Omega)$ there exists a quasicontinuous function $\widetilde{u}:\overline{\Omega} \to \mathbb R$ such that $\tilde{u}|_{\Omega}=u$. \end{thm} \begin{rem} The quasicontinuous function $\widetilde{u}:\overline{\Omega} \to \mathbb R$ is defined at any point of $\partial \Omega$ except a set of conformal capacity zero (i.e. quasieverywhere). \end{rem} The main ingredient of our method is a well-known concept of the conformal capacity and a less known concept of the conformal capacitary boundary introduced by V.~Go\-l'd\-sh\-tein and S.~K.~Vodop'yanov \cite{GV} for quasiconformal homeomorphisms. In the plane case "points" of the conformal capacitary boundary coincide with the Caratheodory prime ends. Main properties of the space $L^1_2(\mathbb D)$ where $\mathbb D\subset\mathbb R^2$ is the unit disc are well known. Dirichlet spaces $L^1_2(\Omega)$ are conformal invariants. Therefore the Riemann Mapping Theorem permits us to transfer necessary information about boundary behavior of spaces $L^1_2(\Omega$ from $L^1_2(\mathbb D)$ in the case of simply connected domains $\Omega$. More precisely, we extend the concept of quasicontinuity to a "capacitary" completion of a domain $\Omega$. We construct a conformal capacitary boundary as a completion $\big\{ \widetilde{\Omega_{\rho}},\rho\big\}$ of a metric space $\big\{{\Omega_{\rho}},\rho\big\}$ for a conformal capacitary metric $\rho$ (see section 1). Roughly speaking, an "ideal" capacitary boundary point is a boundary continuum of the conformal capacity zero. Our method allow us to treat the general case of simply connected plane domains $\Omega\subset\mathbb R^2$. We prove that any function $u\in L^1_2(\Omega)$ has a quasicontinuous extension onto the conformal capacitary boundary $H_{\rho}=\widetilde{\Omega}_{\rho}\setminus{\Omega}_{\rho}$. The main result is: \begin{thm} Let $\Omega\subset\mathbb R^2$ be a simply connected domain, $\Omega\ne\mathbb R^2$. Then for any function $u\in L_{2}^{1}(\Omega)$ there exists a quasicontinuous function $\widetilde{u}:\widetilde{\Omega}_{\rho}\to \mathbb R$ such that $\tilde{u}|_{\Omega}=u$. \end{thm} \begin{rem} A concept of the conformal capacitary metric $\rho$ and the conformal capacitary boundary was proposed in \cite{GV}. By a quasi-invariance of the conformal capacity under (quasi)conformal homeomorphisms any such homeomorphism $\varphi: \Omega\to\Omega'$ is a bi-Lipschitz homeomorphism $\varphi: (\Omega, \rho)\to(\Omega', \rho)$ for corresponding conformal metrics and can be extended to a homeomorphism $\tilde{\varphi}: (\tilde{\Omega}, \rho)\to(\tilde{\Omega'}, \rho)$ of the capacitary completions \cite{GV}. Recall that the paper \cite{GV} is a short note and contains only sketches of proofs. \end{rem} There is the vast literature concerning of "ideal" boundaries of plane domains in the context of conformal homeomorphisms. We discuss few such concepts in the last section. The paper is organized as follows: Main properties of the conformal capacitary metric are proved in Section 2. The focus is on the local properties of the metric at boundary points and its dependence on the local topological properties of the boundary. In section 3 we discuss an analog of the Luzin property for the capacitary metric. In section 4 we discuss a sufficient condition for existence of an extension of functions $u\in L_{2}^{1}(\Omega)$ to the capacitary boundary. We call this condition as a strong Luzin property for the capacitary metric. We prove this condition for comparatively large classes of domains that include extension domains for $ L_{2}^{1}(\Omega)$. In Section 5 we apply the abstract construction of Section 4 to simply connected plane domains and we prove main results about extension of functions $u\in L_{2}^{1}(\Omega)$ to the capacitary boundary. {\it In terminology of the theory of Sobolev spaces we solved the classical trace problem for $ L_{2}^{1}(\Omega)$ in simply connected plane domains.} \begin{rem} The classical trace problem for Sobolev spaces is of essential interest, mainly due to its important applications to boundary-value problems for partial differential equations. Boundary value problems can be specified with the help of traces to $\partial\Omega$ of Sobolev functions. There is an extensive literature devoted to the trace problem of Sobolev spaces. Among the multitude of results we mention the monographs of P.~Grisvard \cite{Gri}, J.~L.~Lions and E.~Magenes \cite{LM}, V.~G.~Maz'ya and S.~Poborchi \cite{Maz}, \cite{MazP}, and the papers \cite{AS}, \cite{Be}, \cite{Ga}, \cite{J1}, \cite{J2}, \cite{JW}, \cite{MazP1}, \cite{MazP2}, \cite{MazP3}, \cite{MazPN}, \cite{Nik}, \cite{P1}, \cite{Vas}, \cite{Yak1}, \cite{Yak2}. For smooth domains the traces of Sobolev functions are Besov spaces. In the case of Lipschitz domains the traces can be described also in terms of Besov spaces. For arbitrary non Lipschitz domain the trace problem is open. For cusp type singularities a description of traces can be found in \cite{GVas} in terms of weighted Sobolev spaces. \end{rem} \section{ Conformal Capacitary Metric } Let $\Omega$ be a plane domain and $F_{0}$, $F_{1}$ two disjoint compact subset of $\Omega$. We call the triple $E=(F_{0},F_{1};\Omega)$ a condenser. The value \[ \cp(E)=\cp(F_{0},F_{1};\Omega)=\inf\int\limits _{\Omega}|\nabla v|^{2}~dx, \] where the infimum is taken over all nonnegative functions $v\in C(\Omega)\cap L_{2}^{1}(\Omega)$, such that $v=0$ in a neighborhood of the set $F_{0}$, and $v\geq1$ in a neighborhood of the set $F_{1}$, is called the conformal capacity of the condenser $E=(F_{0},F_{1};\Omega)$. For finite values of capacity $0<\cp(F_{0},F_{1};\Omega)<+\infty$ there exists a unique function $u_{0}$ (an extremal function) such that: \[ \cp(F_{0},F_{1};\Omega)=\int\limits _{\Omega}|\nabla u_{0}|^{2}~dx.\] An extremal function is continuous in $\Omega$, monotone in the domain $\Omega\setminus(F_{0}\cup F_{1})$, equal to zero on $F_{0}$ and is equal to one on $F_{1}$ \cite{HKM,VGR}. \begin{defn} A homeomorphism $\varphi:\Omega\to\Omega'$ between plane domains is called $K$-quasiconformal if it preserves orientation, belongs to the Sobolev class $L^1_{2,loc}(\Omega)$ and the distortion inequality $$ \max\limits_{|\xi|=1}|D\varphi(x)\cdot\xi|\leq K\min\limits_{|\xi|=1}|D\varphi(x)\cdot\xi| $$ holds for almost all $x\in\Omega$. \end{defn} Infinitesimally, quasiconformal homeomorphisms carry circles to ellipses with eccentricity uniformly bounded by $K$. If $K=1$ we recover conformal homeomorphisms, while for $K>1$ plane quasiconformal mappings need not be smooth. The theory of quasiconformal mappings can be find, for example, in \cite{Va}. It is well known that the conformal capacity is quasi-invariant under action of plane quasiconformal homeomorphisms. \subsection{Definition of the conformal capacitary metrics} A connected closed (with respect to $\Omega$) set is called a continuum. Fix a continuum $F$ in the domain $\Omega\subset\mathbb{R}^{2}$ and a compact domain $V$ such that $F\subset V\subset\overline{V}\subset\Omega$, and the boundary $\partial V$ is an image of the unit circle $S(0,1)$ under some quasiconformal homeomorphism of $\mathbb{R}^{2}$. \begin{defn} Choose arbitrarily points $x,y\in\Omega\subset\mathbb R^n$ and joint $x,y$ by a rectifiable curve $l(x,y)$. Define the conformal capacitary distance between $x$ and $y$ in $\Omega$ with respect to pair $(F,V)$ as the following quantity \[ \rho_{(F,V)}(x,y)=\inf\limits _{l(x,y)}\{\cp^{\frac{1}{2}}(F,l(x,y)\setminus V;\Omega)+\cp^{\frac{1}{2}}(\partial\Omega,l(x,y)\cap V;\Omega)\}\] where the infimum is taken over all curves $l(x,y)$ satisfying the above conditions. \end{defn} This definition firstly was introduced in \cite{GV}, where was claimed that the distance $\rho_{(F,V)}(x,y)$ is a metric in $\Omega$ which is quasi-invariant under quasiconformal homeomorphisms and is invariant under conformal ones. Denote by $\big\{ \widetilde{\Omega},\rho_{(F,V)}\big\} $ the standard completion of the metric space $\big\{ \Omega,\rho_{(F,V)}\big\} $ and by $H_{\rho}$ the set $\big\{ \widetilde{\Omega},\rho_{(F,V)}\big\}\setminus \left\{ \Omega,\rho_{(F,V)}\right\}$. We call $H_{\rho}$ a conformal capacitary boundary of $\Omega$. It will be proved that the topology of $H_{\rho}$ does not depends on choice of a pair $(F,V)$. Moreover two conformal capacitary metrics are equivalent for any different choice of pairs $(F_{1},V_{1})$ and $(F_{2},V_{2})$. This is a justification of the notation $H_{\rho}$ for the conformal capacitary boundary. \subsection{Equivalence of different conformal capacitary distances} We start from an important technical observation: \begin{lem} \label{lem:QuasiEst} Let $\Omega$ be a domain in $\mathbb R^2$ and $F_{1}$ be a compact in $\Omega$. Then there exists a constant $0<K<\infty$ such that \[ \frac{1}{K}\cp(F_{02},F_1;\Omega)\leq \cp(F_{01},F_1;\Omega)\leq {K}\cp(F_{02},F_1;\Omega) \] for every compacts $F_{01}\subset\Omega$, $F_{02}\subset\Omega$ such that compacts $F_{01}$, $F_{02}$, $F_{1}$ are mutually disjoint. \end{lem} \begin{proof} Let $U_{1}\supset F_{01}$ be a $\varepsilon$-neighborhood of a set $F_{01}$ in $\Omega$ and $U_{2}\supset F_{02}$ be a $\varepsilon$-neighborhood of the set $F_{02}$ in $\Omega$ such that $\overline{U_1}\subset\Omega$ and $\overline{U_2}\subset\Omega$. Because the set $F_{01}$ is a compact set there exist a finite covering $\left\{ B_{i}\right\} _{i=1,...,N}$ of $F_{01}$ by balls $B_{i}\in\Omega$ of radius $\varepsilon$. It means that $\Omega\supset\bigcup_{i=1}^{N}B_{i}\supset F_{1}$. Then \[ \cp(F_{01},F_1;\Omega)\leq\cp(\bigcup_{i=1}^{N}\overline{B_{i}},F_1;\Omega).\] For every ball $B_i$, $i=2,..N$, we can construct a bi-Lipschitz homeomorphism $\psi_i$ of $\Omega$ onto itself that maps $B_i$ onto $B_1$. Using quasi-invariance of the conformal capacity under bi-Lipschitz homeomorphisms we have \[ \cp(F_{01},F_1;\Omega)\leq\cp(\bigcup_{i=1}^{N}\overline{B_{i}},F_1;\Omega)\leq C_{1}\cdot\cp(\overline{B_1},F_1;\Omega)\] where a constant $C_{1}$ depends only on the multiplicity of the covering and $F_{1}$. Applying the same construction to $F_{02}$ we can construct a finite covering $\left\{ \tilde{B}_{i}\right\} _{i=1}^{N}$ of $F_{02}$ by balls $\tilde{B}_{i}\in\Omega$ of radius $\varepsilon$ such that $\Omega\supset\bigcup_{i=1}^{N}\tilde{B}_{i}\supset F_{02}$. Then \[\cp(F_{02},F_1;\Omega)\leq C_{2}\cdot\cp(\overline{\tilde{B}_1},F_1;\Omega)\] where a constant $C_{2}$ depends only on the multiplicity of the covering of $F_{2}$ and $F_{2}$ itself. To combine both estimate we construct a bi-Lipschitz homeomorphism $\varphi$ of $\Omega$ that maps the ball $B_1\subset U_{1}$ onto a ball $\tilde{B}_1\subset U_{2}$. Using quasi invariance of the conformal capacity under bi-Lipschitz homeomorphisms we obtain \[ \cp(\overline{B_1},F_1;\Omega)\leq M\cp(\overline{\tilde{B}_1},F_1;\Omega)\leq M\cp(\overline{U_{2}},F_1;\Omega) \] where $M$ is the Lipschitz constant of $\varphi$. Hence \[ \cp_{p}(F_{01},F_1;\Omega)\leq C_{1}M\cp_{p}(\overline{U_{2}},F_1;\Omega). \] Therefore \begin{equation} \cp(F_{01},F_1;\Omega)\leq\lim_{\varepsilon\to 0}K\cp(\overline{U_{2}},F_1;\Omega)=K\cp(F_{02},F_1;\Omega).\label{eq:Pcap} \end{equation} Using the same construction and the inverse bi-Lipschitz homeomorphisms $\varphi^{-1}$ we obtain \begin{equation} \cp(F_{02},F_1;\Omega)\leq\lim_{\varepsilon\to 0}C_{2}{M}\cp(\overline{U_{1}},F_1;\Omega)=C_{2}{M}\cp(F_{01},F_1;\Omega).\label{eq:Pcap2} \end{equation} Using inequalities \ref{eq:Pcap} and \ref{eq:Pcap2} we obtain \[ \frac{1}{K}\cp(F_{02},F_1;\Omega)\leq \cp(F_{01},F_1;\Omega)\leq {K}\cp(F_{02},F_1;\Omega). \] \end{proof} Using this lemma we prove \begin{thm} \label{thm:QuasiInvMet} Let $\Omega$ be a domain in $\mathbb R^2$. Suppose that $\rho_{(F_{1},V_{1})}$ and $\rho_{(F_{2},V_{2})}$ are two conformal capacitary distances on $\Omega$. Then there exists a constants $0<K<\infty$ such that \[ \frac{1}{K}\rho_{(F_{2},V_{2})}(x,y)\leq\rho_{(F_{1},V_{1})}(x,y)\leq K\rho_{(F_{1},V_{1})}(x,y)\,\,\,\text{for any}\,\,\,x,y\in \Omega. \] \end{thm} \begin{proof} Consider two continuums $F_{1}$ and $F_{2}$ in the domain $\Omega$. Without loss of generality we can suppose that any admissible for the conformal capacitary metric curve $l(x,y)$ does not intersect $V_{1}$ and $V_{2}$. Then by Lemma \ref{lem:QuasiEst} \[ \frac{1}{K}\cp(F_{2},l(x,y);\Omega)\leq \cp(F_{1},l(x,y);\Omega)\leq {K}\cp(F_{2},l(x,y);\Omega) \] for any admissible for the conformal capacitary metric curve $l(x,y)$. Hence \[ \frac{1}{K}\rho_{(F_{2},V_{2})}(x,y)\leq\rho_{(F_{1},V_{1})}(x,y)\leq K\rho_{(F_{1},V_{1})}(x,y)\] for any $x,y\in \Omega$. \end{proof} \subsection{Conformal capacitary distance is a metric} The sketch of the proof can be found in \cite{GV}. For readers convenience we prove this fact in details. \begin{lem} \label{lem:CapContin} Let $\Omega$ be a domain in $\mathbb R^2$ and $F$ be a compact subdomain of $\Omega$. Suppose that $x\in\Omega\setminus\overline{F}$, $B(x,2r)\subset\Omega\setminus\overline{F}$, $r>0$ and curve $\gamma$ joints $x$ and $S(x,2r)$. Then $\cp(F,\gamma;\Omega)>c(r)>0$. \end{lem} \begin{proof} Choose a continuum $F_r$, such that $F_r\cap\gamma=\emptyset$, which connects spheres $S(x,r)$ and $S(x,2r)$. By Lemma~\ref{lem:QuasiEst} $$ \cp(F,\gamma;\Omega)\geq Q\cp(F_r,\gamma;\Omega). $$ Using the properties of capacity and Proposition~4.6 from \cite{GGR} we obtain $$ \cp(F_r,\gamma;\Omega)\geq \cp(F_r,\gamma\cap\{z:r\leq |x-z|\leq 2r \};\Omega)\geq c(r)>0. $$ Hence $\cp(F,\gamma;\Omega)\geq c(r)>0$. \end{proof} \begin{thm} \label{thm:ProofMetric} Let $\Omega$ be a domain in $\mathbb R^2$. Then the $\rho_{(F,V)}(x,y)$ is a metric in $\Omega$. \end{thm} \begin{proof} By standard properties of the conformal capacity \cite{Maz} $\rho_{(F,V)}(x,x)=0$. Let $\rho_{(F,V)}(x,y)=0$. Assume by contradiction that $x\ne y$ and denote the Euclidean distance $\dist(x,y)$ as $r$. Then there exists a sequence of curves $\{l_{k}(x,y)\}$, $k=1,2,...$, such that $$\cp(F,l_{k}(x,y)\setminus V;\Omega)\rightarrow 0\,\,\,\text{while}\,\,\, k\rightarrow\infty $$ and $$\cp(\partial\Omega,l_{k}(x,y)\cap V;\Omega)\rightarrow 0\,\,\,\text{while}\,\,\,k\rightarrow\infty. $$ Hence a sequence of extremal functions $u_{k}\in L_{2}^{1}(\Omega)$ for the capacities $$ \cp (F,l_{k}(x,y)\setminus V;\Omega) $$ tends to zero in the space $L_{2}^{1}(\Omega)$ and a sequence of extremal functions $v_{k}\in L_{2}^{1}(\Omega)$ for the capacities $$ \cp(\partial\Omega,l_{k}(x,y)\cap V;\Omega) $$ tends to zero in the space $L_{2}^{1}(\Omega)$. So, the sequence $u_{k}$ tends to zero except a set of the conformal capacity zero and the sequence $v_{k}$ tends to zero except for a set of the conformal capacity zero. But by Lemma~\ref{lem:CapContin} $$ \|u_k| L^1_2(\Omega)\|+\|v_k| L^1_2(\Omega)\|\geq c(r)>0\quad \text{for all} \quad k. $$ Contradiction. Therefore $x=y$. By the definition of the conformal capacity we have \begin{multline} \cp^{\frac{1}{2}}(F,l(x,y)\setminus V;\Omega)+\cp^{\frac{1}{2}}(\partial\Omega,l(x,y)\cap V;\Omega)\\ =\cp^{\frac{1}{2}}(F,l(y,x)\setminus V;\Omega)+\cp^{\frac{1}{2}}(\partial\Omega,l(y,x)\cap V;\Omega). \nonumber \end{multline} Hence $\rho_{(F,V)}(x,y)=\rho_{(F,V)}(y,x)$. Prove the triangle inequality. Choose arbitrary points $x,y,z\in\Omega$. By the subadditive property of the capacity \cite{Maz} we obtain \begin{multline} \cp^{\frac{1}{2}}(F,(l(x,z)\setminus V)\cup(l(z,y)\setminus V);\Omega)\\ \leq\cp^{\frac{1}{2}}(F,(l(x,z)\setminus V);\Omega)+\cp^{\frac{1}{2}}(F,(l(z,y)\setminus V);\Omega) \nonumber \end{multline} and \begin{multline} \cp^{\frac{1}{2}}(\partial\Omega,(l(x,z)\cap V)\cup(l(z,y)\cap V);\Omega)\\ \leq\cp^{\frac{1}{2}}(\partial\Omega,(l(x,z)\cap V);\Omega)+\cp^{\frac{1}{2}}(\partial\Omega,(l(z,y)\cap V);\Omega). \nonumber \end{multline} Hence $\rho_{(F,V)}(x,y)\leq\rho_{(F,V)}(x,z)+\rho_{(F,V)}(y,z)$. Therefore $\rho_{p;(F,V)}$ is a metric. \end{proof} \begin{thm} \label{thm:CoinTopol} The topology induced by the conformal capacitary metric $\rho_{(F,V)}$ into the domain $\Omega\subset\mathbb R^2$ coincides with the Euclidean topology. \end{thm} \begin{proof} Let $U\subset\Omega$ be an open set with respect to Euclidean metric. Let $x_{0}\in U$ such that \[ \overline{B(x_{0},3r)}=\overline{\{x\in\Omega:|x-x_{0}|<3r\}}\subset U.\] Then for every point $y\in\partial B(x_{0},r)$ we have that \begin{multline} \rho_{(F,V)}(x_{0},y)=\\ \inf\limits _{l(x_{0}y)}\{\cp^{\frac{1}{2}}(F,l(x_{0}y)\setminus V;\Omega)+\cp^{\frac{1}{2}}(\partial\Omega,l(x_{0}y)\cap V;\Omega)\}>0, \nonumber \end{multline} since $H^{1}$-Hausdorff measure of the sets $l(x_{0}y)\setminus V$ (or $l(x_{0}y)\cap V$ is positive). Hence $U$ is the open set with respect to the conformal capacity metric in the domain $\Omega$. The inverse inclusion can be proved similarly. \end{proof} We complete by the standard manner the metric space $\Omega_{\rho}=\Omega_{\rho_{(F,V)}}$. In the completion $\tilde{\Omega}_{\rho}$ we define the conformal capacitary boundary of $\Omega$ as $H_{\rho}=\tilde{\Omega}_{\rho}\setminus{\Omega_{\rho}}$. It means that a boundary element $h\in H_{\rho}$ is a class of fundamental (in the metric $\rho$) sequences $\{x_{n}\}_{n=1}^{\infty}$. \vskip 0.3cm \begin{thm} \label{thm:IsometryMetric} Suppose that $\rho_{(F_{1},V_{1})}$ and $\rho_{(F_{2},V_{2})}$ are two conformal capacitary metrics on $\Omega$. The metric spaces $\left\{ H_{\rho},\rho_{(F_{1},V_{1})}\right\} $ and $\left\{ H_{\rho},\rho_{(F_{2},V_{2})}\right\} $ are quasi-isometric, i.e there exist a constant $0<K<\infty$ such that \[ \frac{1}{K}\rho_{(F_{2},V_{2})}(x,y)\leq\rho_{(F_{1},V_{1})}(x,y)\leq K\rho_{(F_{1},V_{1})}(x,y)\] for any $x,y\in H_{p}$. \end{thm} \begin{proof} Consider two continuums $F_{1}$ and $F_{2}$ in the domain $\Omega$ and suppose that sequences $\{x_{k}\}$ and $\{y_{k}\}$ are fundamental sequences in the metric $\rho_{(F_{2},V_{2})}$. If the sequences $\{x_{k}\}$ and $\{y_{k}\}$ have limit point $x,y\in\Omega$, then the sequences are fundamental in the metric $\rho_{(F_{1},V_{1})}$ because in the domain $\Omega$ conformal capacitary topologies are equivalent to the Euclidean topology. Let fundamental sequences $\{x_{k}\in\Omega\}$ and $\{y_{k}\in\Omega\}$ has limit points$\,$$x,y\in\left\{ H_{\rho},\rho_{(F_{2},V_{2})}\right\} $. By Theorem~\ref{thm:QuasiInvMet} for any $x_k, y_k\in\Omega$ there exist a constant $0<K<\infty$ such that $$ \frac{1}{K}\rho_{(F_{2},V_{2})}(x_k,y_k)\leq\rho_{(F_{1},V_{1})}(x_k,y_k)\leq K\rho_{(F_{1},V_{1})}(x_k,y_k) $$ Passing to limit by $k\to\infty$ we conclude the proof. \end{proof} \begin{rem} Both metrics are equivalent in completions of $\Omega$. It can be proved by the same way but it is more technical. For our main aims this fact is not important. \end{rem} \vskip 0.3cm \subsection{Asymptotic behavior of the conformal capacitary metric} For study of an asymptotic behavior of the conformal capacitary metric we need a few well known estimates of the conformal capacity (see, for example \cite{Maz}). For readers convenience we reproduce simple proofs of these facts adapted to the conformal capacitary metric study: \begin{lem} Consider the unit disc $\mathbb D(0,1)\subset\mathbb R^2$ and continuums $$ F_0=(-1,-\frac{1}{2}]\subset \mathbb D\,\,\,\text{and}\,\,\, F_1=[0,\varepsilon]\subset \mathbb D, \,\,0<\varepsilon<\frac{1}{4}. $$ Then $$ \cp(F_0, F_1; \mathbb R^2 \geq c_1\ln(1+\varepsilon),\,\,\, c_1=const. $$ \end{lem} \begin{proof} Consider the conformal mapping $$ \varphi: \mathbb C\to \mathbb C,\,\,\, \varphi(z)=\frac{1}{z}. $$ Then by the capacity estimates \cite{GR} \begin{multline} \cp(F_0, F_1; \mathbb R^2)=\cp(\varphi(F_0), \varphi(F_1); \mathbb R^2)\geq \cp([-2,-1], [\frac{1}{\varepsilon},1+\frac{1}{\varepsilon}]; \mathbb R^2)\\ =c_1\ln{\frac{1+\frac{1}{\varepsilon}}{\frac{1}{\varepsilon}}}=c_1\ln(1+\varepsilon). \nonumber \end{multline} \end{proof} \begin{lem} Consider the unit disc $\mathbb D(0,1)\subset\mathbb R^2$ and continuums $F_0=\{z: |z|\geq 1\}$ and $F_1=[0,\varepsilon]\subset \mathbb D$, $0<\varepsilon<\frac{1}{4}$. Then $$ \cp(F_0, F_1; \mathbb R^2) \leq c_2\biggr(\ln\frac{1}{\varepsilon}\biggl)^{-1},\,\,\, c_2=const. $$ \end{lem} \begin{proof} By the capacity estimates \cite{GR} $$ \cp(F_0, F_1; \mathbb R^2)\leq\cp(F_0, \overline{D(0,\varepsilon)}, \mathbb R^2)=c_2\biggr(\ln\frac{1}{\varepsilon}\biggl)^{-1}. $$ \end{proof} From these lemmas immediately follows: \begin{prop} Let $x=(0,0)$ and $y=(\varepsilon,0)$ are points of the unit disc $\mathbb D(0,1)\subset\mathbb R^2$. Then we have the following asymptotic behavior of the conformal metric in the unit disk $D(0,1)$: $$ \lim\limits_{x\to y}\frac{\rho(x,y)}{|x-y|}\geq \lim\limits_{\varepsilon\to 0}c_1\frac{\ln(1+\varepsilon)}{\varepsilon}=c_1. $$ and $$ \lim\limits_{x\to y}\frac{\rho(x,y)}{|x-y|}\leq \lim\limits_{\varepsilon\to 0}c_2 \frac{\biggr(\ln\frac{1}{\varepsilon}\biggl)^{-1}}{\varepsilon}<+\infty. $$ \end{prop} Now we study topological properties of the boundary $H_{\rho}$. \begin{defn} For arbitrary point $h\in H_{\rho}$ we consider disks $D(h,\varepsilon$), $\varepsilon>0$, defined in terms of the conformal capacitary metric $\rho_{(F,V)}$. Call the set \[ s_{h}=\bigcap_{\varepsilon>0}\overline{D(h,\varepsilon)\cap\Omega}\subset\overline{\mathbb{R}^{2}}\] the realization (impression) of a boundary element $h\in H_{\rho}$. \end{defn} Recall that a domain $\Omega$ is called locally connected at a point $z_0\in \partial \Omega$ if $z_0$ has arbitrarily small connected neighborhoods in $\Omega$. By C.~Caratheodory \cite{Cr} the domain $\Omega$ is locally connected at every boundary point if and only if every prime end has trivial realization. \begin{lem} \label{lem:OnePoint}Let a realization $s_{h}$ of a boundary element $h\in H_{\rho}$ is one-point. Then for every sequence $\{x_{m}\in\Omega\}$ from $\rho_{(F,V)}(x_{m},h)\rightarrow0$ follows that $|x_{m}-s_{h}|\rightarrow0$ (while $m\rightarrow\infty$). \end{lem} \begin{proof} Suppose that $\rho_{(F,V)}(x_{m},h)\rightarrow0$ while $m\rightarrow\infty$. Because the realization $s_{h}$ of a boundary element $h\in H_{\rho}$ is a point, then $$ diam\left(\overline{D(h,\varepsilon)\cap\Omega}\right)=\sup\limits_{x,y\in \overline{D(h,\varepsilon)\cap\Omega}}|x-y| \to 0\,\,\,\text{while}\,\,\,\varepsilon\to 0. $$ The sequence $\{x_{m}\}$ belongs to a boundary element $h\in H_{\rho}$ and so $$ |x_m-x_n|\to 0\,\,\,\text{while}\,\,\,m,n\to\infty. $$ Hence, the sequence $\{x_n\}$ is a sequence Cauchy in the Euclidean metric, and we have that $|x_{m}-s_{h}|\rightarrow0$ while $m\rightarrow\infty$. \end{proof} \begin{lem} \label{lem:LocConnected} Let a domain $\Omega$ is locally connected at a point $x\in\partial\Omega$ and $x\in s_h$ for some and $h\in H_{\rho}$. Then for every sequence $\{x_{m}\in\Omega\}$ such that $|x_{m}-x|\rightarrow 0$ we have $\rho_{(F,V)}(x_{m},h)\rightarrow 0$ (while $m\rightarrow\infty$). \end{lem} \begin{proof} Since the domain $\Omega$ be locally connected at the point $x\in\partial\Omega$ then any two points $x_{k},x_{m}$ from the sequence $\{x_{n}\}$ can be connected by a geodesic path $l(x_{k},x_{m})$ such that its length tends to zero for $k,m\to\infty$. Without loss of generality we can suppose that $l(x_{k},x_{m})\cap V=\emptyset$. Hence $\cp_{p}(F_{1},l(x_{k},x_{m});\Omega)$ tends to zero for $k,m\to\infty$ and therefore\[ \lim\limits _{n\rightarrow\infty}\rho_{(F,V)}(x_{n},h)=0.\] \end{proof} From these lemmas follows: \begin{thm} \label{LocCon} Let a domain $\Omega$ is locally connected at any point $x\in\partial\Omega$. Then the identical mapping $i:\Omega\to\Omega$ can be extend to a homeomorphism $\tilde{i_\rho}: \overline{\Omega}\to \tilde{\Omega}_{\rho}$ if and only if all realizations $s_{h}$ of boundary elements $h\in H_{\rho}$ are one-points. \end{thm} \begin{proof} Suppose that an identical mapping $i:\Omega\to\Omega$ can be extended to a homeomorphism $\tilde{i_\rho}: \overline{\Omega}\to \tilde{\Omega}_{\rho}$. Then every boundary element $h\in H_{\rho}$ coincides with a point $x\in \partial\Omega$ and so has an one-point realization. Inversely, let all realizations $s_{h}$ of a boundary elements $h\in H_{\rho}$ are one-points. Then extending an identical mapping $i:\Omega\to\Omega$ to the mapping $\tilde{i_{\rho}}:\tilde{\Omega}_{\rho}\to\overline{\Omega}$ by the rule $\tilde{i_{\rho}}(h)=s_h$ we obtain a one-to-one correspondence $\tilde{i_{\rho}}: \tilde{\Omega}_{\rho}\to\overline{\Omega}$. Let us check continuity of $\tilde{i_{\rho}}$ and ${\tilde{i_p}}^{-1}$. Suppose that $x_k\to x$ in $\overline{\Omega}$. Because all realizations $s_{h}$ of boundary elements $h\in H_{\rho}$ are one-points and $x\in s_h$ then by Lemma~\ref{lem:LocConnected} follows $\rho_{(F,V)}(x_{m},h)\rightarrow 0$, while $m\rightarrow\infty$. We prove that $\tilde{i_{\rho}}$ is continuous. Suppose $h_k\to h_0$ in $\tilde{\Omega}_{\rho}$. Because $\Omega$ is locally connected then realizations of $h_k$ and $h_0$ are one point sets and we can identify $h_k$ and $h_0$ with their realizations. By Lemma~\ref{lem:OnePoint} $h_k\to h_0$ in $\Omega$. Therefore ${\tilde{i_{\rho}}}^{-1}$ is also continuous. Therefore $\tilde{i_{\rho}}$ is a homeomorphism. \end{proof} \subsection{Conformal capacitary boundary and Carath\'eodory prime ends} The notion of the ideal boundary in the terms of prime end was introduced by Carath\'eodory \cite{Cr}. The Cartheodory prime ends represent a compactification of plane domains in the relative distance introduced by Lavrentiev \cite{Lv}. (A detailed historical sketch can be found in \cite{Mi}). We prove that the capacitary boundary is homeomorphic to the Carath\'eodory boundary. \begin{thm}\label{thm:CapCar} Let $\Omega\subset\mathbb R^2$ be a simply connected domain, $\Omega\ne \mathbb R^2$. Then the capacitary boundary $H_{\rho}$ is homeomorphic to the Carath\'eodory boundary $\partial_{C}\Omega$. \end{thm} \begin{proof} The Carath\'eodory boundary $\partial_{C}\Omega$ is homeomorphic to the boundary of the unit disc $\partial \mathbb D$. The capacitary boundary is homeomorphic to the boundary of the unit disc $\partial \mathbb D$ also. Hence the capacitary boundary $H_{\rho}$ is conformally equivalent to the Carath\'eodory boundary $\partial_{C}\Omega$. \end{proof} On the base of this theorem we give some examples \cite{Eps} of boundary elements $h\in H_{\rho}$ of the conformal capacitary boundary. \begin{exa} Let $$ X=\{(x,y): y=1/3^n\,\, \text{for some}\,\, n\geq 1\,\, \text{and}\,\, -1\leq x\leq 2\} $$ and $$ Y=\{(x,y): y=2/3^n\,\, \text{for some}\,\, n\geq 1\,\, \text{and}\,\, -2\leq x\leq 1\}. $$ Let $\Omega = (-2,2)\times(0,1)\setminus(X\cup Y)$. The boundary element of this domain is $h=\{(x,0): -1\leq x\leq 1\}$. \end{exa} \begin{exa} Let $\Omega=\mathbb R^2\setminus K$, where $K$ is given in polar coordinates by \begin{multline} K=\{(r,\theta): \theta=2\pi p\,\,\text{for some integer}\,\, n\geq 1\,\, \text{and some odd integer}\,\,p\,\,\\ \text{such that}\,\, 0<p<2^n, \, 0\leq r\leq 1/2^n\}. \nonumber \end{multline} The boundary element $h\in H_{\rho}$ of this domain at the origin is homeomorphic to a Cantor set. \end{exa} By C.~Caratheodory \cite{Cr} the domain $\Omega$ is called locally connected at boundary points point if and only if every boundary element has trivial realization. Hence we have the following corollary of Theorem \ref{LocCon}: \begin{cor} \label{LocConSim} Let a simply connected domain $\Omega$ is locally connected at any point $x\in\partial\Omega$. Then the identical mapping $i:\Omega\to\Omega$ can be extend to a homeomorphism $\tilde{i_\rho}: \tilde{\Omega}_{\rho}\to\overline{\Omega}$. \end{cor} \section{ Strong Luzin Property for the Capacitary Metric and Boundary Values of Sobolev functions} Recall the notion of the conformal capacity of a set $E\subset \Omega$. Let $\Omega$ be a domain in $\mathbb R^2$ and a compact $F\subset\Omega$. The conformal capacity of the compact $F$ is defined by $$ \cp(F;\Omega)=\inf\{\|u|L^1_2(\Omega\|^2,\,\,u\geq 1\,\, \text{on}\,\, F, \,\,u\in C_0(\Omega)\}. $$ By the similar way we can define the conformal capacity of open sets. For arbitrary set $E\subset\Omega$ we define a inner conformal capacity as $$ \underline{\cp}(E;\Omega)=\sup\{\cp(e;\Omega),\,\,e\subset E\subset\Omega,\,\, e\,\,\text{is a compact}\}, $$ and a outer conformal capacity as $$ \overline{\cp}(E;\Omega)=\inf\{\cp(U;\Omega),\,\,E\subset U\subset\Omega,\,\, U\,\,\text{is an open set}\}. $$ A set $E\subset\Omega$ is called conformal capacity measurable, if $\underline{\cp}(E;\Omega)=\overline{\cp}(E;\Omega)$. The value $$ \cp(E;\Omega)=\underline{\cp}(E;\Omega)=\overline{\cp}(E;\Omega) $$ is called the conformal capacity of the set $E\subset\Omega$. The classical Luzin theorem asserts that every measurable function is {\bf uniformly} continuous if it is restricted to the complement of an open set of arbitrary small measure. It is reasonable to conjecture that every function $u\in L^{1}_{2}(\Omega)$ is {\bf uniformly} continuous if it is restricted to the complement of an open subset of $\Omega\subset R^{2}$ of arbitrary small conformal capacity. Unfortunately this conjecture is wrong for an arbitrary domain and is correct only under additional conditions on $\Omega.$ The weak version of The Luzin theorem is correct for the capacity: \begin{thm} \label{thm:WeakLuzin} (Weak Luzin theorem for $p$-capacity \cite{Maz}) Let $\Omega\subset \mathbb R^2$ be an open set. For any function $u\in L^1_2(\Omega)$ and for any $\varepsilon>0$ there exists an open set $U_{\varepsilon}\in\Omega$, $\cp(U_{\varepsilon};\Omega)<\varepsilon$, such that $u|_{\Omega\setminus U_{\varepsilon}}$ is continuous. \end{thm} We discuss here the strong version of the Luzin property for the capacity: \begin{defn} {\bf Strong Luzin capacitary property}. A domain $\Omega\subset\mathbb R^2$ possesses a strong Luzin capacitary property if for every function $u\in L^{1}_{2}(\Omega)$ and for any $\varepsilon>0$ there exists an open set $U_{\varepsilon}$ of the conformal capacity less then $\varepsilon$ and such that the restriction of the function $u$ on $\Omega \setminus U_{\varepsilon}$ is uniformly continuous for the conformal capacitary metric. \end{defn} This property looks very restrictive, but, in reality, it is correct for a large set of domains. We prove in this section that any extension domain possess the Luzin capacitary property and we prove in next section that any quasiconformal homeomorphism preserves the strong Luzin capacitary property. Our main motivation for a study of this property is the following result: \begin{thm} \label{thm:Tietz} Let a domain $\Omega\subset \mathbb R^{2}$ possesses the strong Luzin capacitary property. Then for any function $u\in L_{2}^{1}(\Omega)$ there exists a quasicontinuous function $\widetilde{u}:\widetilde{\Omega}_{\rho}\to \mathbb R$ defined quasieverywhere on $H_{\rho}$ such that $\tilde{u}|_\Omega=u$. \end{thm} \begin{proof} Because $\Omega$ possesses the strong Luzin capacitary property then for every $\varepsilon>0$ there exists an open set $U_{\varepsilon}\subset\Omega$ such that $\cp(U_{\varepsilon})<\varepsilon$ and any function $u\in L^1_2(\Omega)$ is uniformly continuous for the conformal capacitary metric on the closed (with respect to $\Omega$) set $\Omega^{\varepsilon}=\Omega\setminus U_{\varepsilon}$. Consider the completion $\widetilde{\Omega}^{\varepsilon}$ of the set $\Omega^{\varepsilon}$ in the complete metric space $\left(\widetilde{\Omega}_{\rho},\rho\right)$. The function $u\in L_{p}^{1}(\Omega)$ will be uniformly continuous on the metric space $\left(\Omega^{\varepsilon}_{\rho},\rho\right)$. Hence by the Tietz theorem there exists an extension $\tilde{u}_{\varepsilon}$ of $u$ to $\widetilde{\Omega}^{\varepsilon}$. Put $\widetilde{\Omega}^{0}=\cup_{\varepsilon>0}\widetilde{\Omega}^{\varepsilon}$. Then the function $u$ possesses an extension $\widetilde{u}$ to the metric space $\left(\widetilde{\Omega}^{0},\rho\right)$ and $\cp(\widetilde{\Omega}_{\rho}\setminus\widetilde{\Omega}^{0})=0$ because $\Omega_{\varepsilon_{1}}\supset\Omega_{\varepsilon_{2}}$ if ${\varepsilon_{1}}<{\varepsilon_{2}}$. Therefore $\widetilde{u}|_{H_{\rho}}$ defined quasi-everywhere on $H_{\rho}$ and represents the boundary value of the function $u\in L^1_2(\Omega)$ on the capacitary boundary $H_{\rho}$. \end{proof} \begin{rem} The function $\tilde{u}$ defined quasi-everywhere on $H_{\rho}$ in the following sense. For any $\varepsilon>0$ there exists an open set $U_{\varepsilon}\subset\Omega$ such that the function $u$ is uniformly continuous on $\Omega\setminus U_{\varepsilon}$, $\cp(\Omega_{\rho}\setminus U_{\varepsilon})<\varepsilon$, and the continuous extension of $\widetilde{u}:\Omega\setminus U_{\varepsilon}\to \mathbb R$ to its completion $\left(\widetilde{\Omega\setminus U_{\varepsilon}},\rho\right)$ coincides with $\tilde{u}$ on $H_{\rho}\cap\left(\widetilde{\Omega\setminus U_{\varepsilon}},\rho\right)$. \end{rem} Combining the previous Theorem and Theorem \ref{LocConSim} we obtain immediately \begin{thm} \label{thm:Tietz1} Let a domain $\Omega\subset \mathbb R^{2}$ possesses the strong Luzin capacitary property and be locally connected at any boundary point. Then for any function $u\in L_{2}^{1}(\Omega)$ there exists a quasicontinuous function $\widetilde{u}:\overline{\Omega} \to \mathbb R$ defined quasieverywhere on $\partial \Omega$ such that $\tilde{u}|_\Omega=u$. \end{thm} The strong Luzin capacitary property is valid for the large class of domains, namely extension domains. The class of extension domains includes domains with smooth or Lipschitz boundaries (see for example \cite{Maz}). \begin{defn} A domain $\Omega\subset R^{2}$ is said to be a Sobolev $L_{2}^{1}$ -extension domain if there exists a bounded linear operator $E:L_{2}^{1}(\Omega)\to L_{2}^{1}(R^{n})$ such that for any $u\in L_{2}^{1}(\Omega)$ the condition $E(u)|_{\Omega}=u$ holds. \end{defn} We call the operator $E$ an extension operator. It is known that a simply connected domain $\Omega\subset\mathbb R^2$ is a $L^1_2$-extension domain if and only if $\Omega$ is a quasidisc \cite{GV1}. Recall that a domain $\Omega\subset\mathbb R^2$ is called a quasidisc if there exists a quasiconformal homeomorphism $\varphi:\mathbb R^2\to \mathbb R^2$ such that $\Omega=\varphi(\mathbb D)$ \begin{thm} \label{thm:HomForExtension} If a bounded domain $\Omega\subset\mathbb R^2$ is a $L_{2}^{1}$ -extension domain then the identity mapping $id:H_{\rho}\to\partial{\Omega}$ is a homeomorphism. \end{thm} \begin{proof} Because $\Omega$ is a $L_{2}^{1}$ -extension domain then there exists an extension operator $$ E:L_{2}^{1}(\Omega)=L_{2}^{1}(R^{n}) $$ such that for any $u\in L_{2}^{1}(\Omega)$ we have $E(u)|_\Omega=u$. Hence \[ \frac{1}{\left\Vert E\right\Vert }\left\Vert E(u)\right\Vert _{L_{2}^{1}(R^{n})}\leq\left\Vert u\right\Vert _{L_{2}^{1}(\Omega)}\leq\left\Vert E(u)\right\Vert _{L_{2}^{1}(R^{n})}.\] By the definition of the conformal capacity for any condensor $(F_{0},F_{1};\Omega)$ the following inequality \[ \frac{1}{\left\Vert E\right\Vert ^{2}}\cp(F_{0},F_{1};R^{n})\leq \cp(F_{0},F_{1};\Omega)\leq \cp(F_{0},F_{1};R^{n}) \] holds So, by the definition of the conformal capacitary metric for any points $x,y\in\Omega$ and any pair $(F,V)$ from the previous inequality follows \[ \frac{1}{\left\Vert E\right\Vert^2 }\hat{\rho}_{(F,V)}(x,y)\leq\rho_{(F,V)}(x,y)\leq\hat{\rho}_{(F,V)}(x,y) \] where $\hat{\rho}_{(F,V)}(x,y)$ is the conformal capacitary metric in $R^{2}$ and $\rho_{(F,V)}(x,y)$ is the conformal capacitary metric in $\Omega$. It means that the metric $\rho_{(F,V)}(x,y)$ is equivalent to the metric $\hat{\rho}_{(F,V)}(x,y)$ on $\Omega$. By Theorem~\ref{thm:CoinTopol} the topology induced by the metric $\hat{\rho}_{(F,V)}(x,y)$ on $\mathbb R^2$ coincides with the Euclidean topology and so the topology of $H_{\rho}$ coincides with the Euclidean topology of $\partial\Omega$. Because metrics $\rho_{(F,V)}(x,y)$ and $\hat{\rho}_{(F,V)}(x,y)$ are equivalent on $\Omega$ the theorem proved. \end{proof} \begin{thm} \label{thm:ExtLuzin} Let $\Omega\subset\mathbb R^2$ be a bounded $L_{2}^{1}$ -extension domain. Then $\Omega$ possesses the strong Luzin capacitary property. \end{thm} \begin{proof} Choose arbitrarily a function $u\in L_{2}^{1}$. Because $\Omega$ is an extension domain there exists an extension $\widehat{u}\in L_{2}^{1}(R^{2})$ of $u$. By the Theorem~\ref{thm:WeakLuzin} for any $\varepsilon>0$ there exist such open set $U_{\varepsilon}\in R^{2}$ of conformal capacity less then $\varepsilon$ such that the function $\widehat{u}$ is continuous on $R^{2}\setminus U_{\varepsilon}$. Because the domain $\Omega$ is bounded the function $\hat{u}|_{\bar{\Omega}\setminus U_{\varepsilon}}$ is uniformly continuous. Hence the function $u$ is uniformly continuous on $\Omega\setminus U_{\varepsilon}$ . By monotonicity of conformal capacity $\cp(U_{\varepsilon}\cap\Omega)<\cp(U_{\varepsilon})<\varepsilon$. By the previous Theorem~\ref{thm:HomForExtension} the function $u$ is uniformly continuous for $p$-capacitary metric in $\Omega\setminus U_{\varepsilon}$ also. \end{proof} Combining Theorem \ref{thm:Tietz}, Theorem \ref{thm:ExtLuzin} and \ref{thm:HomForExtension} we obtain \begin{thm} \label{thm:TietzExtension} Let a domain $\Omega\subset \mathbb R^{2}$ be a bounded $L_{2}^{1}$ -extension domain. Then for any function $u\in L_{2}^{1}(\Omega)$ there exists a quasicontinuous function $\widetilde{u}:\overline{\Omega} \to \mathbb R$ defined quasieverywhere on $\partial \Omega$ such that $\tilde{u}|_\Omega=u$. \end{thm} Theorem \ref{thm:Tietz}, Theorem \ref{thm:ExtLuzin} and \ref{thm:HomForExtension} can be easily extended to a more flexible class of so-called quasi-extension domains: \begin{defn} A domain $\Omega\subset \mathbb R^{2}$ is said to be a Sobolev $L_{2}^{1}$ -quasi-extension domain if for any $\varepsilon>0$ there exist such open set $U_{\varepsilon}$ of conformal capacity less then $\varepsilon$ that $\Omega\setminus\bar{U_{\varepsilon}}$ is a $L_{2}^{1}$ -extension domain. \end{defn} Typical examples of such domains are domains with boundary singularities of conformal capacity zero. \begin{thm} If a bounded domain $\Omega$ is a $L_{2}^{1}$ quasi-extension domain then the identity mapping $id:\partial{\Omega}\to H_{\rho}$ is a homeomorphism. \end{thm} \begin{proof} Follows directly from Theorem~\ref{thm:HomForExtension} and the countable subadditivity of capacity. \end{proof} \begin{thm} Let $\Omega\subset\mathbb R^2$ be a bounded $L_{2}^{1}$ quasi-extension domain. Then $\Omega$ possesses the strong Luzin capacitary property. \end{thm} \begin{proof} Follows directly from Theorem~\ref{thm:HomForExtension} and the countable subadditivity of capacity. \end{proof} \begin{thm} Let a domain $\Omega\subset \mathbb R^{2}$ be a bounded $L_{2}^{1}$ quasi-extension domain. Then for any function $u\in L_{2}^{1}(\Omega)$ there exists a quasicontinuous function $\widetilde{u}:\overline{\Omega} \to \mathbb R$ defined quasieverywhere on $\partial \Omega$ such that $\tilde{u}|_\Omega=u$. \end{thm} \section{Boundary Values of Sobolev Functions for Simply Connected Domains} Using the Riemann Mapping Theorem we prove that any simply connected domain possess the strong Luzin capacitary property, that permits to extend main results to any simply connected domain. The unit disk $\mathbb D(0,1)\subset\mathbb R^2$ is the $L^1_2$-extension domain and possess the strong Luzin capacitary property. Remember that the conformal capacity of condensors is a quasi-invariant for quasiconformal homeomorphisms $\varphi:\Omega \to \Omega'$ between two plane domains $\Omega$ and $\Omega'$. Hence the conformal capacitary metric is also a quasi-invariant for quasiconformal homeomorphisms. Moreover, from this remark immediately follows \begin{prop} (\cite{GV} \label{QuasiInv} Any quasiconformal homeomorphism $\varphi:\Omega \to \Omega'$ between two plane domains $\Omega$ and $\Omega'$ induces a quasi-isometry of $\widetilde{\Omega}_{\rho}$ and $\widetilde{\Omega'}_{\rho}$. \end{prop} \begin{cor} Let $\varphi: \mathbb D \to \Omega$ be a quasiconformal homeomorphism of the unit disc $\mathbb D$ onto a domain $\Omega \in R^2$. Then $\Omega$ possesses the strong Luzin capacitary property. \end{cor} \begin{proof} Choose a function $u\in L^1_2(\Omega)$. Because $\varphi:\mathbb D \to \Omega$ is a quasiconformal homeomorphism, then the composition $v:=u \circ \varphi$ belongs to $L^1_2(D)$ (see, for example \cite{GR}). Because $\mathbb D$ possesses the Luzin capacitary property, then for any $\varepsilon >0$ there exist an open set $V_{\varepsilon}$ of the conformal capacity less then $\varepsilon$ such that $v|_{D \setminus V_{\varepsilon}}$ is uniformly continuous. The conformal capacity is a quasiinvariant for a quasiconformal homeomorphism $\varphi$. It means that there exist a constant $Q$ which depends only on the quasiconformal distortion of $\varphi$ and such that the conformal capacity of $U_{\varepsilon}:=\varphi(V_{\varepsilon})$ is less then $Q \varepsilon$. By the previous proposition $\varphi^{-1}$ induces a quasiisometry of $\widetilde{\Omega}_{\rho}$ and $\widetilde{D}_{\rho}$. Therefore $u=v \circ \varphi^{-1}$ is uniformly continuous on $G \setminus U_{\varepsilon}$. We proved that $\Omega$ possesses the strong Luzin capacitary property \end{proof} From the previous proposition and Theorem \ref{thm:Tietz} immediately follows \begin{thm} \label{MainTh} Let $\Omega\subset\mathbb R^2$ be a simply connected domain, $\Omega\ne\mathbb R^2$. Then for any function $u\in L_{2}^{1}(\Omega)$ there exists a quasicontinuous function $\widetilde{u}:\widetilde{\Omega}_{\rho}\to \mathbb R$ defined quasi-everywhere on the capacitary boundary $H_{\rho}$ such that $\tilde{u}|_{\Omega}=u$. \end{thm} And its version for simply connected domains locally connected at any boundary point \begin{thm} \label{MainTh1} Let $\Omega\subset\mathbb R^2$, $\Omega\ne\mathbb R^2$ be a simply connected domain locally connected at any boundary point. Then for any function $u\in L_{2}^{1}(\Omega)$ there exists a quasicontinuous function $\widetilde{u}:\overline{\Omega} \to \mathbb R$ defined quasi-everywhere on the boundary $\partial \Omega$ such that $\tilde{u}|_{\Omega}=u$. \end{thm} For readers convenience we repeat some basic facts about quasidiscs. \begin{defn} A domain $\Omega$ is called a $K$-quasidisc if it is an image of the unit disc $\mathbb{D}$ of a $K$-quasiconformal homeomorphism of the plane onto itself. \end{defn} It is well known that the boundary of any $K$-quasidisc $\Omega$ admits a $K^{2}$-quasiconformal reflections and thus, for example, any conformal homeomorphism $\varphi:\mathbb{D}\to\Omega$ can be extended to a $K^{2}$-quasiconformal homeomorphism of the hole plane to itself. Boundaries of quasidisc are called quasicircles. It is known that there are quasicircles for which no segment has finite length. The Hausdorff dimension of quasicircles was first investigated by Gehring and Vaisala (1973) \cite{GV73}, who proved that it can take all values in the interval $[1,2)$. S. Smirnov proved recently \cite{Smi10} that the Hausdorff dimension of any $K$-quasicircle is at most $1+k^2$, where $k = (K-1)/(K +1)$. Ahlfors's 3-point condition \cite{Ahl63} gives a complete geometric characterization: a Jordan curve $\gamma$ in the plane is a quasicircle if and only if for each two points $a, b$ on $\gamma$ the (smaller) arc between them has diameter comparable to $|a-b|$. This condition is easily checked for the snowflake. On the other hand, every quasicircle can be obtained by an explicit snowflake-type construction (see \cite{Roh01}). Because any quasidisc is an $L_2^1$-extension domain we can reformulate previous results in the terms of quasidiscs. \begin{prop} \label{prop:HomForExtension} Let a domain $\Omega\subset\mathbb R^2$ is a quasidisc. Then the identity mapping $id:H_{\rho}\to\partial{\Omega}$ is a homeomorphism. \end{prop} \begin{prop} \label{prop:ExtLuzin} Let a domain $\Omega\subset\mathbb R^2$ is a quasidisc. Then $\Omega$ possesses the strong Luzin capacitary property. \end{prop} \section {Historical Sketch and Conclusions} The concept of the ideal boundaries is common for the geometry and the analysis. The Poincare disc is a model of the hyperbolic plane that provides a geometrical realization of the ideal boundary of the hyperbolic plane with help of a conformal homeomorphism. By the Riemann Mapping Theorem any simply connected plane domain $\Omega\ne\mathbb R^2$ is conformally equivalent to the unit disc. However the boundary behavior of plane conformal homeomorphisms can not be described in terms of Euclidean boundaries but it can be described in terms of ideal boundary elements (prime ends) that was introduced by C.~Caratheodory. By the Caratheodory Theorem any conformal homeomorphism $\varphi: \mathbb{D}\to \Omega$ induces one to one correspondence of prime ends. M.~A.~Lavrentiev \cite{Lv} introduced a metric (a relative distance) for prime ends. G.~D.~Suvorov \cite{Su} constructed a counterexample that demonstrates an absence of the triangle inequality for the Lavrentiev relative distance and proposed a more accurate concept of relative distance that support the triangle inequality. In terms of this metric the Cartheodory prime ends are a geometric representation of "ideal" compactification "boundary points". There exists a number of different conformally invariant intrinsic metrics. A detailed survey can be found in the paper of V.~M.~Miklyukov \cite{Mi}. For dimension more than two by the Liuoville theorem the class of conformal homeomorphisms coincides with the Mobius transformations. Even for quasiconformal homeomorphisms nothing similar to the Riemann mapping theorem is not correct. By our opinion two main constructions of a quasiconformally invariant {}``ideal'' boundary were proposed. The first one was in the spirit of Banach algebras. Recall that Royden algebra $\mathbb{R}(\Omega)$ is a quasiconformal invariant by M.~Nakai \cite{Na} for dimension two and by L.~G.~Lewis \cite{Le} for arbitrary dimension. As any Banach algebra the Royden algebra produces a compactification of $\Omega$ and any quasiconformal homeomorphisms induces a homeomorphism of such compactifications. The second one is so-called capacitary boundary proposed by V.~Gol'd\-shtein and S.~K.~Vodop'janov \cite{GV}. Its construction is based on a notion of the conformal capacity. Remember that the conformal capacity is a quasiinvariant of quasiconformal homeomorphisms. By \cite{GV} quasiconformal homeomorphism can be extended to a homeomorphism of domains with capacitary boundaries. The Royden compactification does not coincide with the Caratheodory compactification. The "ideal" elements of the capacitary boundary are Caratheodory prime ends. Necessary and sufficient condition for existence of continuous traces of $L^1_P(\Omega)$, $p>2$ were obtained by Shvartsman \cite{Sh} in terms of quasi-hyperbolic metrics. \end{document}
\begin{document} \title[Identities common to four abelian group operations] {Groupoid identities common to four abelian group operations} \author{David Kelly} \address{Department of Mathematics\\ University of Manitoba\\ Winnipeg, Manitoba, Canada R3T 2N2} \keywords{finitely based, finite basis, medial groupoid, variety} \subjclass[2000]{08B05} \date{July 6, 2008} \begin{abstract} We exhibit a finite basis $\mathcal{M}$ for a certain variety $\mathbf{V}$ of medial groupoids. The set $\mathcal{M}$ consists of the medial law $(xy)(zt)=(xz)(yt)$ and five other identities involving four variables. The variety $\mathbf{V}$ is generated by the four groupoids $\pm x\pm y$ on the integers. Since $\mathbf{V}$ is a very natural variety, proving it to be finitely based should be of interest. In an earlier paper, we made a conjecture which implies that $\mathbf{V}$ is finitely based. In this paper, we show that $\mathbf{V}$ is finitely based by proving that $\mathcal{M}$ is a basis. Based on our proof, we think that our conjecture will be difficult to prove. As we explain in the paper, the variety $\mathbf{V}$ corresponds to the Klein $4$-group. We use this group to show that $\mathbf{V}$ has a basis consisting of interchange laws. (We define ``interchange law'' in the introduction.) We give more examples of finite groups where such a basis exists for the corresponding groupoid variety. We also give examples of finite groups where such a basis is impossible. The second case is a further challenge to anyone who tries to prove our conjecture. We used four medial groupoids to define $\mathbf{V}$. We also present a finite basis for the variety generated by any proper subset of these four groupoids. In an earlier paper with R.~Padmanabhan, we gave the corresponding finite bases when the constant zero is allowed. \end{abstract} \maketitle \setcounter{section}{-1} \section{Introduction} The overview given in the abstract was designed to motivate the reading of our intricate arguments. In the next paragraph, we define the sets $\mathcal{M}$ and $\Sigma$ of identities. In fact, $\Sigma$ is the set of identities valid in the variety $\mathbf{V}$ that was defined in the abstract. Although it is ``obvious'' that $\mathcal{M}$ is a basis for $\mathbf{V}$, a proof is required. The conjecture we made in \cite{dK08}, described later in this introduction, implies that $\mathbf{V}$ is finitely based. Let $\Sigma$ be the set of groupoid identities that are satisfied by the four binary operations $\pm x\pm y$ in every abelian group. Theorem 1.1 states that the following six identities form an independent basis for $\Sigma$. \par\textup{(M1)} \ $(xy)(zt)=(xz)(yt)$ \par\textup{(M2)} \ $(xy)(zt)=(ty)(zx)$ \par\textup{(M3)} \ $((xy)z)t=((xt)z)y$ \par\textup{(M4)} \ $(x(yz))t=(x(tz))y$ \par\textup{(M5)} \ $x((yz)t)=z((yx)t)$ \par\textup{(M6)} \ $x(y(zt))=z(y(xt))$ \par\noindent The identity (M1) is called the \emph{medial} \emph{law}. Let $\mathcal{M}$ denote the set of the above six ``mutation laws.'' When the constant zero is allowed, Kelly and Padmanabhan \cite{KP85} found a finite basis for the corresponding set of identities. When $G$ is a multiplicative abelian group generated by $\alpha$ and $\beta$, we write $\Sigma(G;\alpha,\beta)$ for the set of groupoid identities that are satisfied in the integral group ring $\mathbb{Z}[G]$ when the binary operation is $\alpha x+\beta y$. Kelly and Padmanabhan \cite{KP85} showed that $\Sigma$ equals $\Sigma(\mathbf{K}\mathbf{L};\alpha,\beta)$, where $\mathbf{K}\mathbf{L}=\{\mspace{1mu}\alpha,\beta,\gamma,1\mspace{1mu}\}$ is the Klein $4$-group. Our result for $\Sigma$ supports the conjecture of \cite{dK08} that $\Sigma(G;\alpha,\beta)$ is finitely based whenever $G$ is finite. A term is \emph{linear} when no variable occurs more than once. If $p$ is a linear term and we interchange two variables in $p$ to form $q$, then $p=q$ is an \emph{interchange law}. Observe that each identity in $\mathcal{M}$ is an interchange law. We present finite bases for the identities satisfied by any proper subset of the four abelian group operations $\pm x\pm y$. All these bases are shown in Table 1 of \S2. (When the constant zero is allowed, the corresponding finite bases appear in \cite{KP85}.) To justify Table 1, four bases must be verified, which is done in Sections 3, 5, 6 and 7. Sections 5 and 6 each require a technical result from \S4. For finite $G$, Theorem 2.2 of \cite{dK08} characterizes when $\Sigma(G;\alpha,\beta)$ has a basis consisting of interchange laws. For certain finite groups---including the Klein $4$-group---Theorem 9.2 simplifies this characterization. The final two sections of the paper concern this new characterization. An identity is \emph{balanced} when each variable occurs equally often on each side. Any set of balanced identities is called \emph{balanced}. An identity is \emph{linear} if it is balanced and each side is linear. We allow $G$ to be an arbitrary $2$-generated abelian group. (In Sections 1 to 7, $G$ is always the Klein $4$-group.) Each identity $p=q$ in $\Sigma(G;\alpha,\beta)$ is balanced. Each identity in $\Sigma(G;\alpha,\beta)$ can be obtained by identifying variables in a linear identity that is in $\Sigma(G;\alpha,\beta)$. Thus, the linear identities of $\Sigma(G;\alpha,\beta)$ form a basis for $\Sigma(G;\alpha,\beta)$. A \emph{tree} always means a full binary tree, i.e., a finite rooted tree (growing downwards) in which each non-leaf has exactly two children. Every subterm of a linear term $p$ corresponds to a vertex of the corresponding tree $P$ and vice-versa. (An uppercase letter always denotes the corresponding tree.) A variable corresponds to a trivial tree. The tree for the linear term $\emph{pq}$ is obtained by substituting the trees $P$ and $Q$ for the leaves of the two-leaved tree. The \emph{rank} of a term is the number of its variable occurrences and the \emph{rank} of a tree is the number of its leaves. A \emph{left edge} (or $\alpha$-\emph{edge}) of a tree is an edge that descends to a left child. A vertex that is not a leaf is called \emph{internal}. The \emph{color} of a variable in a linear term is its coefficient in the polynomial ring over $\mathbb{Z}[G]$ when the binary operation $xy$ is replaced by $\alpha x+\beta y$. We color the vertices of the corresponding tree with elements of $G$. We color the root with the identity element and then descend the tree; the color for the left child is $\alpha$ times that of the parent and, for the right child, $\beta$ times. On the leaves of the tree, this coloring agrees with the coloring of the variables in the linear term. A linear identity $p=q$ is in $\Sigma(G;\alpha,\beta)$ iff every variable has the same color in $p$ and $q$. Thus, an interchange law is in $\Sigma(G;\alpha,\beta)$ exactly when the two interchanged variables have the same color. Figure 1 shows the tree for each mutation law. Black-filled circles correspond to the interchanged variables; their common color (an element of the Klein $4$-group) is also shown. \begin{figure} \caption{Trees for the mutation laws} \end{figure} Whenever we prove an interchange law from a set of interchange laws, we stop immediately after successfully interchanging the two distinguished variables in some derived term. Such a proof can be completed by re-applying, in the reverse order, all the other interchanges that were used. In any proof of an interchange law by induction on the rank, we can stop whenever the two variables are in a proper subterm; we shall say that the two variables are ``closer.'' We can also stop when the corresponding two leaves are in a proper subtree. (By replacing a suitable internal vertex by a leaf, the original two leaves are in a tree of lower rank.) Let $x$, $r$ and $s$ be vertices of the same color in a tree. If $x$ is a leaf, and $r$ and $s$ are incomparable, then we can replace $r$ with $x$ by using interchange laws. The verification is easy. If $r$ does not contain $x$, then interchange $r$ and $x$. If $r$ does contain $x$, then first interchange $r$ and $s$. This simple observation is called the ``double rule.'' The notation $p\equiv q$ means that the terms $p$ and $q$ are identical. We write $r\le p$ to indicate that $r$ is a subterm of $p$. We shall use the ``local'' rule for equational derivation of McNulty \cite{gM82}. A \emph{substitution instance} of an identity or a term is produced by replacing its variables by terms. We fix a set of identities $\Pi$ in an arbitrary type and write $p\sim q$ when the term $q$ is the result of replacing one occurrence of the subterm $r$ in $p$ by the term $s$, where $r=s$ or its opposite is a substitution instance of an identity in $\Pi$. The identity $p=q$ is a consequence of $\Pi$ iff there is a sequence $p\equiv p_1\sim p_2\sim\dots\sim p_n\equiv q$ for some $n\ge1$. Each term $p$ has a \emph{dual} $\widetilde{p}$, obtained by replacing the groupoid operation by its opposite. Forming the dual interchanges the colors $\alpha$ and $\beta$. The \emph{dual} of an identity $p=q$ is the identity $\widetilde{p}=\widetilde{q}$. A set of identities that is closed under duality is called \emph{self-dual}. In particular, $\Sigma=\Sigma(\mathbf{K}\mathbf{L};\alpha,\beta)$ is self-dual. The \emph{dual} of a tree is its mirror image. Henceforth, colors are elements of the Klein $4$-group. \section{Independent finite basis for $\Sigma$} Let $S$ be the semigroup with 1 that is freely generated by the ``letters'' $\alpha$ and $\beta$. For each $\sigma\in S$, we define (inductively) a linear term $\overline{\sigma}x$ in the variable $x$ and the \emph{auxiliary variables} $z_1$, $z_2$, $z_3$, \dots . For $\sigma\in S$, we write $|\sigma|$ for its length. We begin by defining $\overline{1}x\equiv x$. For $\sigma\in S$, $\overline{\alpha\sigma}x\equiv(\overline{\sigma}x)z_{|\sigma |+1}$ and $\overline{\beta\sigma}x\equiv z_{|\sigma |+1}(\overline{\sigma}x)$. Observe that the auxiliary variables are numbered beginning at the maximum depth. An example is $\overline{\beta\beta\alpha}x\equiv z_3(z_2(xz_1))$. (This definition is from \cite{dK08}.) Following \cite{dK08}, the \emph{signature} of a descending path from $u$ to $v$ in a tree is the product in $S$ (from left to right) of the edge labels $(\alpha$ or $\beta)$ starting at $u$. We allow $u$ and $v$ to be equal (in which case, 1 is the signature). In the tree for the linear term $\overline{\sigma}x$, the path to $x$ has signature $\sigma$. (When the initial vertex is unspecified, it is understood to be the root.) If there is a descending path in a tree with signature $\sigma$, then $\sigma$-\emph{terminator} is our name for final vertex of this path. In this section, we call a signature \emph{compressed} when it is compressed modulo $\mathcal{M}$ in the sense of \cite{dK08}. A signature is not compressed exactly when two vertices of the same color in the tree for $\overline{\sigma}x$ can be interchanged (using $\mathcal{M}$) so that the new tree has a shorter path to $x$. Of course, one of the interchanged vertices must be an auxiliary variable. \begin{lemma} The compressed signatures modulo $\mathcal{M}$ are $\alpha^k$, $\beta^k$, $\alpha\beta^k$ and $\beta\alpha^k$ for $k\ge0$. \end{lemma} \begin{proof} In the tree for $\overline{\alpha^k}x$, the internal vertices and $x$ have color 1 or $\alpha$, while each auxiliary variable has color $\beta$ or $\gamma$. In the tree for $\overline{\alpha\beta^k}x$, the internal vertices and $x$ have color $\alpha$ or $\gamma$, while each auxiliary variable has color 1 or $\beta$. Thus, by duality, all the given signatures are compressed. In $\overline{\alpha^2\beta}x$ or $\overline{\alpha\beta\alpha}x$, the variables $x$ and $z_3$ can be interchanged by (M3) or (M4). Therefore, by duality, the semigroup subterms $\alpha^2\beta$, $\alpha\beta\alpha$, $\beta^2\alpha$ and $\beta\alpha\beta$ must be excluded. The listed signatures are exactly the ones that remain. \end{proof} The following lemma is a special case of Theorem 9.2. We shall give a proof that only uses the characterization theorem of \cite{dK08}. The matrix in the following proof is explained in \S9, where we shall also calculate---in a very simple way---its determinant. \begin{lemma} The interchange laws form a basis for $\Sigma$. \end{lemma} \begin{proof} Since the following matrix is nonsingular, the interchange laws form a basis for $\Sigma$ by Theorem 2.2 of \cite{dK08}. \begin{equation*} \left[ \begin{array}{rrrr} -1 &1 &1 &0\\ 1 &-1 &0 &1\\ 1 &0 &-1 &1\\ 0 &1 &1 &-1 \end{array} \right] \qedhere \end{equation*} \end{proof} \begin{theorem} The set $\mathcal{M}$ is an independent basis for $\Sigma$. \end{theorem} \begin{proof} By Lemma 1.2, the interchange laws form a basis for $\Sigma$. Therefore, it suffices to derive each interchange law from $\mathcal{M}$ . Let $x$ and $y$ be distinct variables of the same color $c$ in the linear term $p$. We can assume that $p\equiv qr$ , with $x\le q$ and $y\le r$. By induction on the rank of $p$, we shall show that $\mathcal{M}$ allows us to interchange $x$ and $y$ in $p$. Let $\sigma$ be the signature of the path from $q$ to $x$ and let $\tau$ be the signature of the path from $r$ to $y$. By induction, we can assume that $q\equiv\overline{\sigma}x$, $r\equiv\overline{\tau}y$, and that both $\sigma$ and $\tau$ are compressed. (New auxiliary variables are used in $\overline{\tau}y$.) We shall consider the four possible values for $c$. For each value of $c$, Lemma 1.1 determines the possible values for $\sigma$ and $\tau$, subject to the condition that $\alpha\sigma$ and $\beta\tau$ both evaluate to $c$ in $\mathbf{K}\mathbf{L}$. If $\sigma=\alpha^k$ for $k\ge2$, then interchange $r$ and the $\alpha^2\beta$-terminator by (M3) to bring $x$ and $y$ closer. If $\sigma=\beta^k$ for $k\ge2$, then interchange $r$ and the $\alpha\beta\alpha$-terminator by (M4) to bring $x$ and $y$ closer. Therefore, we can assume that $k$ is 0 or 1 whenever $\sigma=\alpha^k$ or $\sigma=\beta^k$. By duality, $k$ is 0 or 1 whenever $\tau=\alpha^k$ or $\tau=\beta^k$. We call the procedures of this paragraph ``exponent reduction.'' Let $c=\alpha$. By exponent reduction, we can assume that $\sigma=1$. In other words, $q\equiv x$. Let $\tau=\beta\alpha^l$ for odd $l$. By (M6), we can interchange $x$ and the $\beta^2\alpha$-terminator. We have either interchanged $x$ and $y$ or the new value of $\sigma$ is $\alpha^{l-1}$ for $l\ge3$. In the latter case, apply exponent reduction. We can now assume that $\tau=\alpha\beta^l$ for odd $l$. By (M5), interchange $x$ and the $\beta\alpha\beta$-terminator. If $l=1$, then we have just interchanged $x$ and $y$. Otherwise, $l\ge3$ and the new value of $\sigma$ is $\beta^{l-1}$, so that we are done by exponent reduction. The $c=\beta$ case now follows by duality. In the two remaining cases, neither $\sigma$ nor $\tau$ is trivial. If $\sigma$ and $\tau$ begin with the same letter, then the medial law can be used to bring $x$ and $y$ closer. Therefore, we can assume that $\sigma$ and $\tau$ begin with different letters. Let $c=\gamma$. We first assume that $\sigma=\beta\alpha^k$ for even $k$. Therefore, $\tau=\alpha\beta^l$ for even $l$. Use the medial law to transform $\sigma$ into $\beta^{l+1}$ and $\tau$ into $\alpha^{k+1}$. By exponent reduction, $k=l=0$, so that we can interchange $x$ and $y$ by the medial law. The remaining case is that $\sigma=\beta^k$ and $\tau=\alpha^l$ with $k$ and $l$ odd. By exponent reduction, $k=l=1$ and we can apply the medial law to interchange $x$ and $y$. Finally, let $c=1$. We first assume that $\sigma=\alpha\beta^k$ for even $k$. Therefore, $\tau=\beta\alpha^l$ for even $l$. Use (M2) to transform $\sigma$ into $\alpha^{l+1}$ and $\tau$ into $\beta^{k+1}$. By exponent reduction, $k=l=0$, so that we can interchange $x$ and $y$ by (M2). The remaining case is that $\sigma=\alpha^k$ and $\tau=\beta^l$ with $k$ and $l$ odd. By exponent reduction, $k=l=1$ and we can apply (M2) to interchange $x$ and $y$. This completes the proof that $\mathcal{M}$ is a basis. We now show that $\mathcal{M}$ is independent. We consider local derivations using $\mathcal{M}$ without one of its identities. Without (M1), $\{\mspace{1mu}(ux)(yu)\mspace{1mu}\}$ is closed. Without (M2), $\{\mspace{1mu}(xu)(uy)\mspace{1mu}\}$ is closed. Let $p=q$ be one of the four remaining identities. Since each side of every identity in $\mathcal{M}$ has rank 4, no other identity in $\mathcal{M}$ can be used in a local derivation of $p=q$. (See Figure 1.) \end{proof} \section{Subsets of abelian group operations} We write the four abelian group operations as follows: $f_1(x,y)=x+y$, $f_2(x,y)=x-y$, $f_3(x,y)=-x+y$ and $f_4(x,y)=-x-y$. For any proper subset $K$ of $\{\mspace{1mu}1,2,3,4\mspace{1mu}\}$, we write $\Sigma_K$ for the groupoid identities that are satisfied in $\mathbb{Z}$ by $f_k$ for every $k\in K$. If $1\in K$, then $\Sigma_K$ is balanced because it is a subset of $\Sigma_1$. On the other hand, $\Sigma_{2,3,4}$ is not balanced. Table 1 gives a finite basis for every $\Sigma_K$. Observe that duality interchanges $f_2$ and $f_3$. The source for each basis is also given in the table. Up to duality, there are four new results in the table. \begin{table} \begin{center} \begin{tabular}{c l l} \hline $K$ &Basis for $\Sigma_K$ &Reference \\ \hline 1 &$x(yz)=(xy)z$, $xy=yx$ &folklore \\ 2 &$x(y(z(xy)))=z$ &Tarski \cite{aT38} \\ 3 &$(((yx)z)y)x=z$ &duality \\ 4 &(M1), $xy=yx$, $x(xy)=y$ &Je\v{z}ek and Kepka \cite{JK83} \\ $1, 2$ &(M1), $(xy)z=(xz)y$, $x(zy)=y(zx)$ &Kelly \cite{dK08} \\ $1, 3$ &(M1), $z(yx)=y(zx)$, $(yz)x=(xz)y$ &duality \\ $1, 4$ &(M1), $xy=yx$, $x(z(ty))=y(z(tx))$ &Kelly \cite{dK08} \\ $2, 3$ &(M1), $x^2=y^{2}$, $(xx^2)x^2=x$ &Kelly \&{} Padmanabhan \cite{KP85} \\ $2, 4$ &(M2), $x(xy)=y$ &Theorem 3.2 \\ $3, 4$ &(M2), $(yx)x=y$ &duality \\ $1,2,3$ &$\mathcal{M}$, $(x^2y)z^2=(z^2y)x^2$, $(xy^2)z^2=(xy^2)z^2$ &Theorem 5.1 \\ $1,2,4$ &$\mathcal{M}$, $x(x(yz))=(x(zy))x$ &Theorem 6.1 \\ $1,3,4$ &$\mathcal{M}$, $((zy)x)x=x((yz)x)$ &duality \\ $2,3,4$ &(M1), (M2), $(xy^2)y^2=x$ &Theorem 7.2 \\ \hline \\ \end{tabular} \caption{Finite bases for all selections of abelian group operations} \end{center} \end{table} In fact, Gr\"atzer and Padmanabhan \cite{GP78} proved that $\Sigma_{2,3}$ is one-based. Padmanabhan \cite{rP69} determined all the terms $p$ of rank five such that $\{\mspace{1mu}p=x\mspace{1mu}\}$ is a basis for $\Sigma_2$; moreover, he showed that five is the minimum rank for a term $p$ so that $\{\mspace{1mu}p=x\mspace{1mu}\}$ is a basis for $\Sigma_2$. For a term $p$ in variables $X$, we write $[p]$ for its value $\sum($ $a_xx\mid x\in X)$ when the product $xy$ is replaced by $\alpha x+\beta y$. In particular, $p=q$ is in $\Sigma$ iff $[p]=[q]$. Observe that each coefficient $a_x$ is in $\mathbb{N}[\mathbf{K}\mathbf{L}]$ and that only finitely many coefficients are nonzero. Our proof of the following result uses \cite{KP85}, where the constant zero was allowed. \begin{lemma} In the following three statements, each $n_x$ is a suitable integer. For terms $p$ and $q$, \begin{enumerate}[\rm(i)] \item $p=q$ is in $\Sigma_{1,2,3}$ iff \ $[p]-[q]=\sum(n_x(\alpha+\beta-\gamma-1)x\mid x\in X)$. \item $p=q$ is in $\Sigma_{1,2,4}$ iff \ $[p]-[q]=\sum(n_x(\alpha-\beta+\gamma-1)x\mid x\in X)$. \item $p=q$ is in $\Sigma_{2,3,4}$ iff \ $[p]-[q]=\sum(n_x(\alpha+\beta+\gamma+1)x\mid x\in X)$. \end{enumerate} \end{lemma} \begin{proof} We require some results from Table 2 of \cite{KP85}. If $p=q$ is in $\Sigma_{1,2,3}$, then from that table, $[p]-[q]=\sum(r_x(\alpha+\beta-\gamma-1)x\mid x\in X)$, where each $r_x$ is in $\mathbb{Z}[\mathbf{K}\mathbf{L}]$. Since $(a\alpha+b\beta+c\gamma+d)(\alpha+\beta-\gamma-1)=(-a-b+c+d)(\alpha+\beta-\gamma-1)$, condition (i) follows. The argument is similar for the other two cases. \end{proof} Later, we shall apply the following immediate consequence of Lemma 2.1. \begin{lemma} For each of $\Sigma_{1,2,3}$ and $\Sigma_{1,2,4}$, a basis consists of the identities $p=q$ that satisfy the following conditions. The symbol $x$ denotes any variable that occurs in $p$ or $q$. If $x$ occurs exactly once in both $p$ and $q$, then the color of $x$ is the same in $p$ and $q$. Whenever $x$ does not occur exactly once in $p$ and $q$, then $x$ occurs exactly twice in both $p$ and $q$. Moreover, when $x$ occurs twice, then: \begin{enumerate}[\rm(i)] \item for $\Sigma_{1,2,3}$, it occurs with colors $\alpha$ and $\beta$ in one term and with colors $\gamma$ and $1$ in the other; \item for $\Sigma_{1,2,4}$, it occurs with colors $\alpha$ and $\gamma$ in one term and with colors $\beta$ and $1$ in the other. \end{enumerate} \end{lemma} Let us call the identities described in Lemma 2.2 \emph{general identities}. Thus, $\Sigma_{1,2,3}$ and $\Sigma_{1,2,4}$ each has a basis consisting of general identities (where the meaning of ``general'' depends on the context). Any identity in each of these two sets can be obtained by identifying the variables in some general identity. \section{Independent finite basis for the operations $x-y$ and $-x-y$} Let $\mathcal{B}_{2,4}=\mathcal{M}\cup\{\mspace{1mu}x(xy)=y\mspace{1mu}\}$. For $\Sigma_{2,4}$, we first show that $\mathcal{B}_{2,4}$ is a basis and we then find an independent basis. We require the following result, which is similar to Lemma 2.1. \begin{lemma} For terms $p$ and $q$, $p=q$ is in $\Sigma_{2,4}$ iff $[p]-[q]=\sum((m_x(\alpha+\gamma)+n_x(\beta+1))x\mid x\in X)$ for integers $m_x$ and $n_x$. \end{lemma} \begin{proof} By Table 2 of \cite{KP85}, $p=q$ is in $\Sigma_{2,4}$ iff $[p]-[q]=\sum(r_x(\beta+1)x\mid x\in X)$, where each $r_x$ is in $\mathbb{Z}[\mathbf{K}\mathbf{L}]$. Since $(a\alpha+b\beta+c\gamma+d)(\beta+1)=(a+c)(\alpha+\gamma)+(b+d)(\beta+1)$, the result follows. \end{proof} \begin{theorem} The set $\mathcal{B}_{2,4}$ is a basis for $\Sigma_{2,4}$. \end{theorem} \begin{proof} Since $\mathcal{M}$ is a basis for $\Sigma$ by Theorem 1.1, we can calculate modulo $\Sigma$. Let $p=q$ be in $\Sigma_{2,4}$. Let $t$ be a fixed variable. If a variable $x$ occurs in $p$ with colors $\alpha$ and $\gamma$, then form $p'$ by replacing these two occurrences of $x$ with $t$. Since $t(tp)=x(xp')$ is in $\Sigma$, it follows that $p=p'$. Using (M4), $(x(yz))x=(x(xz))y=zy$. Consequently, $(xy)x=(y(x(xy)))y=y^2y$. Therefore, $(xy)x=(zy)z$. If $x$ has colors $\beta$ and 1 in $p$ and we form $p'$ by replacing these two occurrences of $x$ with $t$, then $p=p'$ because $p\equiv p_1p_2=(t(p_2p_1))t=(x(p'_2p'_1))x=p'$. Make all possible such variable replacements in both $p$ and $q$. By Lemma 3.1, $[p]-[q]=(m(\alpha+\gamma)+n(\beta+1))t$ \ for integers $m$ and $n$. Let $r$ be any term. If we define $r_i$ by $r_0\equiv r$ and $r_{i+1}\equiv t(tr_i)$, then $r=r_i$ is a consequence of $\mathcal{B}_{2,4}$. If $m<0$, replace $p$ by $p_{|m|}$, and if $m>0$, replace $q$ by $q_m$. Thus, we have reduced to the case that $m=0$. If either $p$ or $q$ is a variable, then replace $p$ by $t(tp)$ and replace $q$ by $t(tq)$. We now define $(uv)^*\equiv(t(vu))t$ for terms $u$ and $v$. We define a new sequence of terms: $r_0\equiv r$ and $r_{i+1}\equiv(r_i)^*$. Clearly, $r=r_i$ is a consequence of $\mathcal{B}_{2,4}$. If $n<0$, replace $p$ by $p_{|n|}$, and if $n>0$, replace $q$ by $q_n$. Since we have reduced to the case that $n=0$, the transformed identity is in $\Sigma$ and we are done. \end{proof} \begin{theorem} An independent basis for $\Sigma_{2,4}$ consists of \textup{(M2)} and $x(xy)=y$. \end{theorem} \begin{proof} Any consequence of (M2) is balanced. Moreover, the second projection satisfies $x(xy)=y$, but fails (M2) in a $2$-element set. Thus, the two identities are independent. Assume both (M2) and $x(xy)=y$. By Theorem 3.1, it suffices to derive the five remaining identities of $\mathcal{M}$. The second identity allows us to cancel on the left. By (M2), $(xy)((zy)z)=(zy)((zy)x)=x$. Thus, $(xy)((xy)x)=(xy)((zy)z)$ and we can cancel on the left to conclude that $(xy)x=(zy)z$. Calculating, $y^2(x^2y^2)=y^2(yx)^2=((yx)y)^2=(x^2x)^2=x^2(x^2x^2)=x^2$. Since $y^2(y^2x^2)=x^2$, the identity $x^2y^2=y^2x^2$ follows by left cancellation. Calculating, $(xy)((xz)(yt))=((yt)y)((xz)x)=(t^2t)(z^2z)=(zt)(z^2t^2)=(zt)(t^2z^2)=(zt)(zt)^2=zt$. Since $(xy)((xy)(zt))=zt$, we conclude, by left cancellation, that (M1) holds. We can now use both (M1) and (M2). The calculation $(x(yz))t=(x(yz))(tt^2)=(t^2(yz))(tx)=((ty)(tz))(tx)=(x(tz))(t(ty))=(x(tz))y$ proves (M3). The calculation $(x(yz))t=(x(yz))(tt^2)=(t^2(yz))(tx)=((ty)(tz))(tx)=(x(tz))(t(ty))=(x(tz))y$ proves (M4). The calculation $x((yz)t)=(xx^2)((yz)t)=(tx^2)((yz)x)=(tx^2)((yz)(xx^2))=(tx^2)((yx)(zx^2))=((zx^2)x^2)((yx)t)=((xx^2)(xz))((yx)t)=(x(xz))((yx)t)=z((yx)t)$ proves (M5). The calculation $x(y(zt))=(xx^2)(y(zt))=((zt)x^2)(yx)=((xt)(xz))(yx)=(x(xz))(y(xt))=z(y(xt))$ proves (M6). \end{proof} \section{Two results about terms} We shall apply the results of this section in \S5 and \S6. Let $p$ be a term in which no variable occurs more than once with the same color. There is an obvious tree $P$ associated with $p$ (which extends the definition given for linear terms). The variable \emph{occurrences} in $p$ now correspond to the leaves of $p$. For example, if a variable $x$ occurs with colors $\alpha$ and $\beta$ in $p$, then the $\alpha$-leaf $x$ and the $\beta$-leaf $x$ are two distinct leaves of $P$. As before, the subterms of $p$ uniquely correspond to subtrees of $P$. For a tree $T$, let $g^\#$ denote the number of its $g$-leaves, where $g\in\mathbf{K}\mathbf{L}$. We set $\lambda(T)=(\alpha^\#,\beta^\#,\gamma^\#,1^\#)$ and we call $\lambda(T)$ the \emph{total color} of $T$. A $4$-tuple of natural numbers is called \emph{representable} if it equals $\lambda(T)$ for some tree $T$. We first characterize the representable $4$-tuples. The second result of this section concerns subterms. Let $m$ and $n$ be nonnegative integers. We define two functions: $\varphi_1(m,n)=(2m+n-1)/3$ and $\varphi_2(m,n)=(m+2n-2)/3$. Each function returns an integer when $m$ and $n$ satisfy $2m+n\equiv1 \pmod{3}$. We also define these two functions on $4$-tuples by defining $\varphi_i(a,b,c,d)$ to be $\varphi_i(a+b,c+d)$. For a tree $T$, we write $\varphi_i(T)$ for $\varphi_i(\lambda(T))$. By the following result, $\varphi_1(T)$ and $\varphi_2(T)$ are nonnegative integers for any tree $T$. \begin{theorem} \begin{enumerate}[\rm(i)] \item If $\lambda(T)=(a,b,c,d)$ for a tree $T$, then $2m+n\equiv1\pmod{3}$, where $m=a+b$ and $n=c+d$. \item Every tree $T$ has $\,\varphi_1(T)$ \ $\alpha$-vertices, $\,\varphi_1(T)$ \ $\beta$-vertices, $\varphi_2(T)$ \ $\gamma$-vertices and \\ $(\varphi_2(T)+1)$ \ $1$-vertices. \item In any nontrivial tree $T$, $\alpha^\#\le\varphi_1(T)$, $\beta^\#\le\varphi_1(T)$, $\gamma^\#\le\varphi_2(T)$ and \\ $1^\#\le\varphi_2(T)$. \item A $4$-tuple $(a,b,c,d)$ of nonnegative integers different than $(0,0,0,1)$ is representable iff $2a+2b+c+d\equiv1\pmod{3}$, $a\le\varphi_1(a,b,c,d)$, $b\le\varphi_1(a,b,c,d)$, $c\le\varphi_2(a,b,c,d)$ and $d\le\varphi_2(a,b,c,d)$. \end{enumerate} \end{theorem} \begin{proof} We prove (i) and (ii) simultaneously by induction on the rank of $T$. Both statements hold for the trivial tree which has the total color $(0,0,0,1)$. We can now assume that $T$ is nontrivial. Consider a pair of sibling leaves at the maximum depth in $T$ and assume the result for the tree $S$ obtained by removing these two leaves. Let $\lambda(S)=(a',b',c',d')$. If the maximum depth is odd, then $a=a'+1$, $b=b'+1$ and $c+d=c'+d'-1$. Thus, (i) holds for $T$, $\varphi_1(T)=\varphi_1(S)+1$ and $\varphi_2(T)=\varphi_2(S)$. Since $T$ has one more $\alpha$-vertex and one more $\beta$-vertex than $S$, condition (ii) holds for $T$. The proof for even maximum depth is similar. Condition (iii) follows immediately from (ii). Moreover, the necessity in condition (iv) follows from (i) and (iii). Let $(a,b,c,d)$ be a $4$-tuple of nonnegative integers different than $(0,0,0,1)$ that satisfies the five parts of condition (iv). Let $m=a+b$ and $n=c+d$. Thus, $2m+n\equiv1 \pmod{3}$. Observe that $\varphi_1(m,n)+\varphi_2(m,n)=m+n-1$. By induction on $m+n$, we show that there is a tree $T$ with $\lambda(T)=(a,b,c,d)$. If there is a nontrivial tree with total color $(a,b,c,d)$, then there are also trees with total colors $(b,a,c,d)$, $(b,a,d,c)$ and $(a,b,d,c)$. (Apply the dual in the first case and replace the tree for $pq$ by the tree for $\emph{qp}$ in the second.) Therefore, we can assume that $a\le b$ and $c\le d$. We shall write $\varphi_i$ for $\varphi_i(m,n)$. If $a$ and $c$ were both zero, then $m+n=b+d\le\varphi_1+\varphi_2=m+n-1$, a contradiction. We first assume that $c>0$. If $a=\varphi_1$, then $a=b$ and $3b=4b+c+d-1$, which is impossible because $c+d\ge2$. Therefore, $a<\varphi_1$. Since $(a+1,b,c-1,d-1)$ satisfies (iv), we are done by induction. (In the representing tree, replace an $\alpha$-leaf by the tree of rank 2.) We can now assume that $a>0$. If $c=\varphi_2$, then $c=d$ and $3d=a+b+4d-2$, implying that $(a,b,c,d)$ equals $(1,1,0,0)$, the total color of the tree of rank 2. Thus, we can assume that $c<\varphi_2$. Since $(a-1,b-1,c+1,d)$ satisfies (iv), it is representable. Replace a $\gamma$-leaf to complete the proof. \end{proof} \begin{theorem} If a linear term contains variables of all four colors, then modulo $\Sigma$, the term has a subterm of the form $((xy)v)(zt)$ or $(u(yx))(zt)$, where $x$, $y$, $z$, $t$, $u$ are variables and $v$ is a term. \end{theorem} \begin{proof} Assume that variables $x$, $y$, $z$ and $t$ occur in the linear term $p$ with colors $\alpha$, $\beta$, $\gamma$ and 1, respectively. All calculations with identities are modulo $\Sigma$. We induct on the rank of $p$. We first assume there are no internal vertices of color $\alpha$ in the tree $P$. In particular, $p=xq$. Since $Q$ contains a $\beta$-leaf, it also contains an $\alpha$-vertex, say $u$. (All colors are calculated in $P$.) By our assumption, $u$ is a leaf. Interchange $x$ and the variable $u$ so that $p=ur$. The variables $x$, $y$, $z$ and $t$ occur in the term $r$ with colors $\gamma$, 1, $\beta$ and $\alpha$, respectively. Thus, by induction, $r$ has a subterm with one of the two given forms and we are done. By duality, we can now assume that $P$ has internal vertices of color $\alpha$ and of color $\beta$. By the double rule, we can assume that $p\equiv(qr)(zt)$ for terms $q$ and $r$. We first assume that $q\equiv q_1q_2$ and $r\equiv r_1r_2$. Using (M1) and (M2), we can assume that $x\le r$ and $y\le r$. Interchange $x$ and $q_1$, and $y$ and $q_2$ to give a term of the first form. We now assume that the tree $P$ has no internal $\gamma$-vertices. In particular, $r$ is a variable. As before, $q\equiv q_1q_2$. We are done if both $q_1$ and $q_2$ are variables. Firstly, we assume that $q_1$ is not a variable. In $P$, descend from $q_1$ by $\alpha$-edges until we reach a leaf $u$. If $u$ has color 1, then interchange the parent of $u$ and the leaf $x$. (Since there are no internal $\gamma$-vertices, the sibling of $u$ is a leaf.) Hence, we can assume that $u\equiv x$. Let $v$ be the sibling of $x$. Since $v$ and $q_2$ both have color $\beta$, we can use the double rule to replace $v$ with $y$. Thus, $((xy)s)w$ is now a subterm. Interchange $w$ and $zt$ to complete the proof in this case. Secondly, we can assume that $q_1\equiv x$ and that $q_2$ is not a variable. Descend from $q_2$ by $\beta$-edges until we reach a leaf $u$. Arguing as before, we can assume that $u\equiv y$. Interchange $x$ and the sibling of $y$ to make $xy$ a subterm. Now interchange $q$ and $t$. Interchange $r$ and $z$ to make $tz$ a subterm. Thus, there is a subterm $w(s(xy))$. Interchange $w$ and $tz$ to obtain $(tz)(s(xy))$ as a subterm. Since $(tz)(s(xy))=(ts)(z(xy))=((xy)s)(zt)$, we are done in this case. Finally, we can assume that the tree $P$ has no internal $1$-vertices. In particular, $q$ is a variable. Similarly as before, $r\equiv r_1r_2$ and we are done if both $r_1$ and $r_2$ are variables. Firstly, we assume that $r_1$ is not a variable and we descend from $r_1$ by $\alpha$-edges until we come to a leaf $u$. As before, we can assume that $u\equiv y$. Since $r_2$ has color $\alpha$, the double rule allows us to assume that $yx$ is a subterm. Interchange $q$ and $t$ , and also $r$ and $z$. In particular, $\emph{tz}$ is now a subterm. We now have the subterm $((yx)s)w$. Interchange $w$ and $tz$ to obtain $((yx)s)(tz)$ as a subterm. Secondly, we can assume that $r_1\equiv y$ and that $r_2$ is not a variable. Descend from $r_2$ by $\beta$-edges until we come to a leaf $u$, which as before, we can assume is $x$. Now make $yx$ a subterm by interchanging $y$ and the sibling of $x$. Thus, there is a subterm $(w(s(yx))$. Interchange $w$ and $zt$ to obtain $(zt)(s(yx))$ as a subterm. Since $(zt)(s(yx))=(zs)(t(yx))=((yx)s)(tz)$, the proof is complete. \end{proof} \section{Independent finite basis for all operations except $-x-y$} Let $\mathcal{B}_{1,2,3}=\mathcal{M}\cup\{\mspace{1mu}(x^2z)y^2=(y^2z)x^2,\:(zx^2)y^2=(zy^2)x^2\mspace{1mu}\}$. We shall show that $\mathcal{B}_{1,2,3}$ is a basis for $\Sigma_{1,2,3}$. \begin{lemma} If $\psi(x,y)$ is a term in which the variable $x$ occurs once with color $\alpha$ and once with color $\beta$, and the variable $y$ occurs once with color $\gamma$ and once with $1$, then the identity $\psi(x,y)=\psi(y,x)$ is a consequence of $\mathcal{B}_{1,2,3}$. \end{lemma} \begin{proof} Let $p\equiv\psi(x,y)$ be the term described above. We can assume that $x$ and $y$ each occur exactly twice and that no other variable occurs more than once in $p$. Since $\mathcal{M}$ is a basis for $\Sigma$ by Theorem 1.1, we can calculate with identities modulo $\Sigma$. By Theorem 4.2, there is a term $q$ such that $p=q$ modulo $\Sigma$ and $q$ contains a subterm $r$ of the form $((x_1x_2)v)(y_1y_2)$ or $(u(x_2x_1))(y_1y_2)$, where $x_1$, $x_2$, $y_1$ and $y_2$ are variables. For later use, record a sequence of vertex interchanges that takes us from $P$ to $Q$. Let $c$ be the color of the vertex $r$ in $Q$. If $c\in\{\mspace{1mu}\gamma,1\mspace{1mu}\}$, then use interchanges to replace both $x_1$ and $x_2$ by the variable $x$. For example, if $c=1$, then interchange the $\alpha$-occurrence of $x$ and the variable $x_1$ (unless $x$ is already $x_1$). If $c\in\{\mspace{1mu}\alpha,\beta\mspace{1mu}\}$, replace both $x_1$ and $x_2$ by $y$ . In the same way, replace $y_1$ and $y_2$ by $x$ or $y$, as appropriate. By the suitable identity of $\mathcal{B}_{1,2,3}$, we can interchange the subterms $xx$ and $yy$ in the modified subterm $r$. Now re-do the all the previous interchanges in the reverse order to obtain $\psi(y,x)$. \end{proof} \begin{theorem} The set $\mathcal{B}_{1,2,3}$ is an independent basis for $\Sigma_{1,2,3}$. \end{theorem} \begin{proof} Let $p=q$ be in $\Sigma_{1,2,3}$. We shall show that $p=q$ follows from $\mathcal{B}_{1,2,3}$. Since $\mathcal{M}$ is a basis for $\Sigma$ by Theorem 1.1, we can calculate with identities modulo $\Sigma$. We can assume that $p=q$ satisfies condition (i) of Lemma 2.2. Of the variables that occur twice in $p$, let $X$ be those that have colors $\alpha$ and $\beta$, and let $Y$ be those that have colors $\gamma$ and 1. (In $q$, the variables in $X$ have colors $\gamma$ and 1, while the variables in $Y$ have colors $\alpha$ and $\beta$.) By symmetry, we can assume that$|X|\le |Y|$. Let $f:X\to Y$ be a one-to-one function. For each $x\in X$, Lemma 5.1 shows that both occurrences of $x$ in $p$ can be exchanged with both occurrences of $f(x)$ in $p$. Thus, we have reduced to the case that $X$ is empty. If $Y$ is empty, we are done. Therefore, we can assume that the cardinality $n$ of $Y$ is nonzero. Let $\lambda(P)=(a,b,c,d)$. Thus, $\lambda(Q)=(a+n,b+n,c-n,d-n)$. By Theorem 4.1, $2(a+b)+c+d\equiv2(a+b+2n)+c+d-2n\equiv1 \pmod{3}$. Therefore, $n\equiv0 \pmod{3}$. Let $n=3k$ with $k>0$. From $a+3k\le\varphi_1(Q)=\varphi_1(P)+2k$, it follows that $a+k\le\varphi_1(P)$. Therefore, $a+1\le\varphi_1(P)$, so that $a+3\le\varphi_1(P)+2$. Similarly, $b+3\le\varphi_1(P)+2$. Since $c\le\varphi_2(P)$ and $d\le\varphi_2(P)$, the sequence $(a+3,b+3,c-3,d-3)$ is representable by Theorem 4.1. By this observation and induction on $k$, we can assume that $n=3$. In particular, $\varphi_1(Q)=\varphi_1(P)+2$, $\varphi_2(Q)=\varphi_2(P)-2$, $c\ge3$ and $d\ge3$. Let $Y=\{\mspace{1mu}x,y,z\mspace{1mu}\}$. Let $\mathbf{s}=(a+1,b+1,c-2,d-2)$. Clearly, $\varphi_1(\mathbf{s})=\varphi_1(P)$ and $\varphi_2(\mathbf{s})=\varphi_2(P)-2$. Since both $a+1$ and $b+1$ are at most $\varphi_1(P)$ by the previous paragraph, $\mathbf{s}$ is representable by Theorem 4.1. Let $\psi(t,z)$ be a term of total color $\mathbf{s}$ whose variables are those of $p$ with $x$ and $y$ removed, and $t$ added. Moreover, each old variable occurs with the same colors in $p$ and $\psi(t,z)$. The new variable $t$ occurs with colors $\alpha$ and $\beta$ in $\psi(t,z)$. Since $[p]=[\psi(xy,z)]$, the identity $p=\psi(xy,z)$ is in $\Sigma$. By Lemma 5.1, $\psi(t,z)=\psi(z,t)$ is a consequence of $\mathcal{B}_{1,2,3}$. Substituting, $xy$ for $t$, we obtain $p=\psi(xy,z)=\psi(z,xy)\equiv r$. Since $[r]=[q]$, the identity $r=q$ is $\Sigma$, and we have shown that $\mathcal{B}_{1,2,3}$ is a basis. We now show that $\mathcal{B}_{1,2,3}$ is independent. Let $\varepsilon_1$ denote $(x^2z)y^2=(y^2z)x^2$ and let $\varepsilon_2$ denote $(zx^2)y^2=(zy^2)x^2$. Since neither side of $\varepsilon_1$ or $\varepsilon_2$ is linear, these identities cannot be used in a local derivation of any identity in $\mathcal{M}$. Thus, since $\mathcal{M}$ is independent by Theorem 1.1, no identity in $\mathcal{M}$ can be omitted from $\mathcal{B}_{1,2,3}$. Suppose that there is a local derivation of $\varepsilon_1$ from $\mathcal{B}_{1,2,3}-\{\mspace{1mu}\varepsilon_1\mspace{1mu}\}$. Since $\varepsilon_1$ is not in $\Sigma$, the identity $\varepsilon_2$ must be used in this derivation; let $p\sim q$ be the first time that $\varepsilon_2$ was used. Therefore, $\lambda(P)=(1,1,2,1)$. Since $p$ and $\varepsilon_2$ both have rank 5, $p$ is a substitution instance of $(zx^2)y^2$ in which $x$, $y$ and $z$ are replaced by variables. Hence, $\lambda(P)=(1,1,1,2)$, a contradiction. Therefore, $\varepsilon_1$ cannot be omitted. Similarly, $\varepsilon_2$ cannot be omitted. \end{proof} \section{Independent finite basis for all operations except $-x+y$} Let $\mathcal{B}_{1,2,4}=\mathcal{M}\cup\{\mspace{1mu}x(x(yz))=(x(zy))x\mspace{1mu}\}$. We shall show that $\mathcal{B}_{1,2,4}$ is a basis for $\Sigma_{1,2,4}$. \begin{lemma} Both $((xy)z)(xy)=((yx)z)(yx)$ and $(z(yx))(xy)=(z(xy))(yx)$ are consequences of $\mathcal{B}_{1,2,4}$. \end{lemma} \begin{proof} Let $\varepsilon$ denote $x(x(yz))=(x(zy))x$. Since $((xy)z)(xy)=((x(zy))x)y$ is in $\Sigma$, we can derive it from $\mathcal{M}$ (by Theorem 1.1). The identity $((x(zy))x)y=(x(x(yz)))y\equiv p$ is a consequence of $\varepsilon$. Since $x$ and $y$ occur with the same colors in $p$, the identity $p=(y(y(xz)))x$ is in $\Sigma$. Thus, we have derived $((xy)z)(xy)=(y(y(xz)))x$. By interchanging $x$ and $y$ in this identity, we obtain $((yx)z)(yx)=p$. Hence, $((xy)z)(xy)=((yx)z)(yx)$. We now give the argument for the second identity. The identity $(z(yx))(xy)=x((y(zx))y)$ is in $\Sigma$. Consequently, $x((y(zx))y)=x(y(y(xz)))\equiv q$ using $\varepsilon$. Since the identity $q=y(x(x(yz)))$ is in $\Sigma$, we have derived $(z(yx))(xy)=y(x(x(yz)))$. Interchange $x$ and $y$ to obtain $(z(xy))(yx)=q$. Hence, $(z(yx))(xy)=(z(xy))(yx)$. \end{proof} \begin{lemma} If $\psi(x,y)$ is a term in which the variable $x$ occurs with colors $\alpha$ and $\gamma$, and the variable $y$ occurs with colors $\beta$ and 1, then the identity $\psi(x,y)=\psi(y,x)$ is a consequence of $\mathcal{B}_{1,2,4}$. \end{lemma} \begin{proof} We use the two identities of Lemma 6.1. The rest of the proof is a slight modification of the proof of Lemma 5.1. In this proof, we use condition (ii) of Lemma 2.2 and we interchange the subterms $xy$ and $yx$ (rather than $xx$ and $yy$). Also, the two pairs of colors are now $\{\mspace{1mu}\alpha,\gamma\mspace{1mu}\}$ and $\{\mspace{1mu}\beta,1\mspace{1mu}\}$. (Multiplication by any element of the Klein $4$-group permutes these two sets.) \end{proof} \begin{theorem} The set $\mathcal{B}_{1,2,4}$ is an independent basis for $\Sigma_{1,2,4}$. \end{theorem} \begin{proof} We write $\varepsilon$ for the identity $x(x(yz))=(x(zy))x$. Let $p=q$ be in $\Sigma_{1,2,4}$. We shall show that $p=q$ follows from $\mathcal{B}_{1,2,4}$. Since $\mathcal{M}$ is a basis for $\Sigma$ by Theorem 1.1, we can calculate with identities modulo $\Sigma$. We can assume that $p=q$ satisfies condition (ii) of Lemma 2.2. Of the variables that occur exactly twice in $p$, let $X$ be those having colors $\alpha$ and $\gamma$, and let $Y$ be those having colors $\beta$ and 1. (In $q$, the variables in $X$ have colors $\beta$ and 1, while the variables in $Y$ have colors $\alpha$ and $\gamma$.) By symmetry, we can assume that $|X|\le |Y|$. Let $f:X\to Y$ be a one-to-one function. For each $x\in X$, Lemma 6.2 shows that both occurrences of $x$ in $p$ can be exchanged with both occurrences of $f(x)$ in $p$. Thus, we have reduced to the case that $X$ is empty. If $Y$ is empty, we are done. Therefore, we can assume that the cardinality $n$ of $Y$ is nonzero. Let $\lambda(P)=(a,b,c,d)$. Thus, $\lambda(Q)=(a+n,b-n,c+n,d-n)$. For $i=1,2$, let us write $\varphi_i$ for the common values of $\varphi_i(P)$ and $\varphi_i(Q)$. Observe that $\varphi_i(a,b-1,c,d-1)=\varphi_i-1$. Since $(0,1,0,2)$ is not representable, the $4$-tuple $(a, b-1,c,d-1)$ is not $(0,0,0,1)$. From Theorem 4.1, $a+n\le\varphi_1$ and $c+n\le\varphi_2$. Thus, by Theorem 4.1, $(a, b-1,c,d-1)$ is representable. Let $rs$ be a term whose total color is $(a, b-1,c,d-1)$. We impose additional conditions on the variables in the term $\emph{rs}$. Choose some $x\in Y$ and let the variables of $rs$ be those of $p$ without $x$. Moreover, each remaining variable occurs exactly as many times in $rs$ as it does in $p$, and with the same colors. Consequently, $[p]=[(x(sr))x]$, which means that $p=(x(sr))x$ is in $\Sigma$. By $\varepsilon$, $(x(sr))x=x(x(rs))$. Thus, we have derived $p=p'\equiv x(x(rs))$, where $\lambda(P')=(a+1,b-1,c+1,d-1)$. Observe that the variable $x$ occurs with the same colors in $p'$ and $q$. By induction on $n$, we have shown that $\mathcal{B}_{1,2,4}$ is a basis. We now show that $\mathcal{B}_{1,2,4}$ is independent. As in the proof of Theorem 5.1, no identity in $\mathcal{M}$ can be omitted. Since $\varepsilon$ is not in $\Sigma$, it can also not be omitted. \end{proof} \section{Finite basis for all operations except $x+y$} \begin{theorem} The set $\mathcal{B}_{2,3,4}=\mathcal{M}\cup\{\mspace{1mu}(xy^2)y^2=x\mspace{1mu}\}$ is a basis for $\Sigma_{2,3,4}$. \end{theorem} \begin{proof} Let $p=q$ be in $\Sigma_{2,3,4}$. We shall show that $p=q$ follows from $\mathcal{B}_{2,3,4}$. Recall that $\mathcal{M}$ is a basis for $\Sigma$ by Theorem 1.1. Let $t$ be a fixed variable. If a variable $x$ occurs with all four colors in $p$, then the calculation $p=(pt^2)t^2=(p'x^2)x^2=p'$ shows that we can replace these four occurrences of $x$ by $t$. (The term $p'$ is the term $p$ with this replacement; the identity $(pt^2)t^2=(p'x^2)x^2$ is in $\Sigma$.) Repeat this replacement as often as possible on both $p$ and $q$. Thus, we can assume that $t$ is the only variable that occurs with all four colors in either $p$ or $q$. By Lemma 2.1, $[p]-[q]=n(\alpha+\beta+\gamma+1)t$ for some integer $n$. By symmetry, we can assume that $n\ge0$. Repeat the operation $r\mapsto(rt^2)t^2$ $n$ times on $q$ to obtain $q'$. Since $p=q'$ is in $\Sigma$, we have shown that $\mathcal{B}_{2,3,4}$ is a basis. \end{proof} \begin{theorem} The set $\mathcal{B}=\{\mspace{1mu}\textup{(M1)},\:\textup{(M2)},\:(xy^2)y^2=x\mspace{1mu}\}$ is a basis for $\Sigma_{2,3,4}$. \end{theorem} \begin{proof} By Theorem 7.1, it suffices to derive (M3) to (M6) from $\mathcal{B}$. It is easy to derive $y^2(y^2x)=x$, the dual of $(xy^2)y^2=x$, from $\mathcal{B}$. Thus, we can apply duality. Consequently, it suffices to derive (M3) and (M4). Using $\mathcal{B}$, $((xy)z)t=((xy)z)((tu^2)u^2)=((xy)(tu^2))(zu^2)=((xt)(yu^2))(zu^2)$, so that (M3) is a consequence. Similarly, $(x(yz))t=(x(yz))t=(x(yz))(u^2(u^2t))=(xu^2)((yz)(u^2t))=(xu^2)((tz)(u^2y))$, so that (M4) is a consequence. \end{proof} \section{Multicirculant matrices} In the next section, we shall apply Theorem 8.1 below. This theorem is due to P.J. Davis (see \S5.8 of \cite{pD79}). We include an elementary proof of Davis's result. For $k\ge1$ and a sequence $\mathbf{s}=(s_1, s_2,\dots, s_k)$ of positive integers, let $\mathcal{G}(\mathbf{s})=S_1\times S_2\times\dots\times S_n$, where for $1\le i\le k$, $S_i$ is the additive group of integers modulo $s_i$. Let $n=s_1s_2\dots s_k$. We also define a bijection from the group $\mathcal{G}(\mathbf{s})$ onto the set $\{\mspace{1mu}0,1,2, \dots ,n-1\mspace{1mu}\}$ by \begin{equation*} (x_1, x_2, x_3,\dots, x_k)^*=x_1+x_2s_1+x_3s_1s_2+\dots+x_k(s_1s_2\dots s_{k-1}). \end{equation*} Observe that $(0,0, \dots , 0)^*=0$. We shall define an $n\times n$ matrix $\mathcal{M}(\mathbf{s})=[a_{i,j}]$, where $0\le i,j<n$. The top row (when $i=0$) is arbitrary. For nonzero $i=(x_1, x_2, \dots , x_k)^*$ and any $j=(y_1, y_2,\dots, y_k)^*$, $a_{i,j}=a_{0,t}$ where $t=(y_1-x_1, y_2-x_2,\dots, y_k-x_k)^*$. We call $\mathcal{M}(\mathbf{s})$ a \emph{multicirculant} matrix of \emph{level} $k$. Let $G$ be a finite abelian group, written additively. For $g\in G$, let $\chi_g$ be the character associated with $g$. It is well known that $\sum(\chi_g(h)\mid h\in G)$ equals $|G|$ when $g=0$, and equals zero for every other $g$. \begin{theorem} For $k\ge1$, let $\mathbf{s}=(s_1, s_2,\dots, s_k)$ be a sequence of positive integers whose product is $n$. For each $\mathbf{x}=(x_1, x_2, \dots , x_k)\in\mathcal{G}(\mathbf{s})$, let $c_\mathbf{x}$ be a complex number. Let $A=\mathcal{M}(\mathbf{s})$ be the multicirculant matrix of level $k$ that is defined by setting $a_{0,\mathbf{x}^*}=c_\mathbf{x}$ for every $\mathbf{x}\in\mathcal{G}(\mathbf{s})$. The eigenvalues of $A$ (including multiplicities) are $\sum(c_\mathbf{x}\xi^{x_1}_1\xi^{x_2}_2\cdots\xi^{x_k}_k\mid\mathbf{x}\in\mathcal{G}(\mathbf{s}))$ as each $\xi_i$ runs over all $s_i$-th roots of unity. \end{theorem} \begin{proof} Let $\lambda=\sum(c_\mathbf{x}\xi^{x_1}_1\xi^{x_2}_2\cdots\xi^{x_k}_k\mid\mathbf{x} \in\mathcal{G}(\mathbf{s}))$, where $\xi_i$ is an $s_i$-th root of unity for $1\le i\le k$. Also, let $\mathbf{v}=(v_0, v_1,\dots, v_{n-1})$, where $v_{\mathbf{x}^*}=\xi^{x_1}_1\xi^{x_2}_2\cdots\xi^{x_k}_k$. We first show that $\mathbf{v}$ is an eigenvector for $\lambda$. The dot product of row $\mathbf{y}^{*}$ of $A$ and the vector $\mathbf{v}$ is \begin{align*} \sum&(c_{\mathbf{x}-\mathbf{y}}\xi^{x_1}_1\xi^{x_2}_2\cdots\xi^{x_k}_k\mid\mathbf{x}\in\mathcal{G}(\mathbf{s}))\\ =&\sum(c_\mathbf{z}\xi^{y_1+z_1}_1\xi^{y_2+z_2}_2\cdots\xi^{y_k+z_k}_k\mid\mathbf{z}\in\mathcal{G}(\mathbf{s}))\\ =&\left(\sum(c_\mathbf{z}\xi^{z_1}_1\xi^{z_2}_2\cdots\xi^{z_k}_k\mid\mathbf{z}\in\mathcal{G}(\mathbf{s}))\right)\xi^{y_1}_1\xi^{y_2}_2\cdots\xi^{y_k}_k=\lambda v_{\mathbf{y}^*}. \end{align*} Let $\omega_i$ ($1\le i\le k$) be a primitive $s_i$-th root of unity. For any $\mathbf{y}=(y_1, y_2,\dots, y_k)$, let $\xi_i=\omega^{y_i}_i$ and define the eigenvector $\mathbf{v}$ as above. Consequently, the component $\mathbf{x}^{*}$ of $\mathbf{v}$ equals $\chi_\mathbf{y}(\mathbf{x})$. Let $P$ be the matrix whose rows are the eigenvectors $\mathbf{v}$, indexed by $\mathbf{y}^{*}$ for $\mathbf{y}\in\mathcal{G}(\mathbf{s})$, and let $Q$ be the matrix whose columns are the same eigenvectors, but indexed by $(-\mathbf{z}\mathbf{)}^{*}$ for $\mathbf{z}\in\mathcal{G}(\mathbf{s})$. The $(\mathbf{y}^{*},\mathbf{z}^{*})$-entry of $\emph{PQ} $ is $\sum(\chi_{(\mathbf{y}-\mathbf{z})^*}(\mathbf{x})\mid\mathbf{x}\in\mathcal{G}(\mathbf{s}))$. By the well-known result mentioned above, $PQ=nI$. Hence, $P$ is nonsingular, which means that the eigenvectors are linearly independent. \end{proof} \section{Basis of interchange laws} Theorem 9.2 below is a significant generalization of Lemma 1.2. Let $G$ be a finite abelian group generated by $\alpha$ and $\beta$. In particular, $G$ is isomorphic to the direct product of two cyclic groups. We recall some notation from \cite{dK08}. A \emph{vector} means a function from $G$ to the integers. For each $g\in G$, let $\mathbf{e}_{g}$ be the vector that is 1 at $g$ and 0 elsewhere. For each $g\in G$, we define $\mathbf{v}_{g}=-\mathbf{e}_{g}+\mathbf{e}_{\alpha g}+\mathbf{e}_{\beta g}$. By Theorem 2.2 of \cite{dK08}, the following two conditions are equivalent: \begin{itemize} \item A basis for $\Sigma(G;\alpha,\beta)$ consists of its interchange laws. \item The set $\{\mspace{1mu}\mathbf{v}_{g}\mid g\in G\mspace{1mu}\}$ is linearly independent. \end{itemize} \begin{theorem} Let $G$ be $\langle\delta\rangle\times\langle\varepsilon\rangle$, the direct product of cyclic groups of orders $m$ and $n$, respectively, and assume that $\alpha=\delta^a\varepsilon^{a'}$ and $\beta=\delta^b\varepsilon^{b'}$. The interchange laws form a basis for $\Sigma(G;\alpha,\beta)$ iff $-1+\omega^a\xi^{a'}+\omega^b\xi^{b'}$ is never zero whenever $\omega$ is an $m$-th root of unity and $\xi$ is an $n$-th root of unity. \end{theorem} \begin{proof} The group $G$ is isomorpic to $\mathcal{G}(m,n)$, where we convert to addition and replace 1 by 0. Consequently, $\mathbf{a}=(a,a')$ and $\mathbf{b}=(b,b')$ are the images of $\alpha$ and $\beta$, respectively. We use our bijection and index our vectors by $0, 1, \dots , mn-1$ rather than by $G$. Thus, for $g\in \mathcal{G}(m,n)$, $\mathbf{e}_{g}$ is now the vector that is 1 at $g^*$ and 0 elsewhere. For $g\in \mathcal{G}(m,n)$, the definition of $\mathbf{v}_{g}$ is now $\mathbf{v}_{g}=-\mathbf{e}_{g}+\mathbf{e}_{a+g}+\mathbf{e}_{b+g}$. Let $\mathbf{v}_{0}$ be the top row of the multicirculant matrix $A=\mathcal{M}(m,n)$. Observe that the rows of $A$ are the vectors $\mathbf{v}_{g}$ for $g$ in $\mathcal{G}(m,n)$. By Theorem 8.1, each eigenvalue of $A$ equals $-1+\omega^a\xi^{a'} +\omega^b\xi^{b'}$, where $\omega$ is an $m$th root of unity and $\xi$ is an $n$th root of unity. Thus, $\{\mspace{1mu}\mathbf{v}_{g}\mid g\in \mathcal{G}(m,n)\mspace{1mu}\}$ is linearly independent iff $-1+\omega^a\xi^{a'}+\omega^b\xi^{b'}$ is never zero when $\omega$ and $\xi$ are as previously specified. Now apply Theorem 2.2 of \cite{dK08}, which we described above. \end{proof} \begin{theorem} Let $G=\langle\alpha\rangle\oplus\langle\beta\rangle$, a direct sum, where $\alpha$ has order $m$ and $\beta$ has order $n$. The interchange laws form a basis for $\Sigma(G;\alpha,\beta)$ iff $m$ and $n$ are not both multiples of $6$. \end{theorem} \begin{proof} By Theorem 9.1, we must determine when $-1+\omega+\xi=0$ for an $m$th root of unity $\omega$ and an $n$th root of unity $\xi$. Since the absolute values of the imaginary parts of $\omega$ and $\xi$ are equal, so are the absolute values of their real parts. Thus, $1/2$ is the real part of both $\omega$ and $\xi$. The result now follows. \end{proof} When $m=n=2$ in Theorem 9.2, we obtain Lemma 1.2. Let $A$ be the matrix in the proof of Lemma 1.2. Clearly, $A=\mathcal{M}(2,2)$ and its top row is as in the proof of Theorem 9.1 when $\mathbf{a}=(1,0)$ and $\mathbf{b}=(0,1)$. Since the eigenvalues of $A$ are $1,-1,-1,-3$ by Theorem 8.1, the determinant of $A$ is $-3$. For a finite \emph{cyclic} group $G$, Theorem 3.1 of \cite{dK08} determines exactly when the interchange laws form a basis for $\Sigma(G;\alpha,\beta)$. \end{document}
\begin{document} \title{Topologies and all that --- A Tutorial} \begin{abstract} This is a brief introduction to the basic concepts of topology. It includes the basic constructions, discusses separation properties, metric and pseudometric spaces, and gives some applications arising from the use of topology in computing. \end{abstract} \tableofcontents { \section{Topological Spaces} A topology formalizes the notion of an open set; call a set open iff each of its members leaves a little room like a breathing space around it. This gives immediately a hint at the structure of the collection of open sets --- they should be closed under finite intersections, but under arbitrary unions, yielding the base for a calculus of observable properties, as outlined in~\cite[Chapter 1]{Smyth} or in~\cite{Vickers}. It makes use of properties of topological spaces, but puts its emphasis subtly away from the classic approach, e.g., in mathematical analysis or probability theory, by stressing different properties of a space. The traditional approach, for example, stresses separation properties like being able to separate two distinct points through an open set. Such a strong emphasis is not necessarily observed in the computationally oriented use of topologies, where for example pseudometrics for measuring the conceptual distance between objects are important, when it comes to find an approximation between Markov transition systems. We give in this short treatise a brief introduction to some of the main properties of topological spaces, given that we have touched upon topologies already in the context of the Axiom of Choice~\SetCite{Sect. 1.5.8}. The objective is to provide the tools and methods offered by set-theoretic topology to an application oriented reader, thus we introduce the very basic notions of topology, and hint at applications of these tools. Some connections to logic and set theory are indicated, but as Moschovakis writes ``General (pointset) topology is to set theory like parsley to Greek food: some of it gets in almost every dish, but there are no 'parsley recipes' that the good Greek cook needs to know.''~\cite[6.27, p. 79]{Moschovakis-Notes}. In this metaphor, we study the parsley here, so that it can get into the dishes which require it. The goal of making topology useful suggests the following core areas: one should first discuss the \emph{basic notion} of a topology and its construction, including bases and subbases. Since compactness has been made available very early, compact spaces serve occasionally as an exercise ground. Continuity is an important topic in this context, and the basic constructions like product or quotients which are enabled by it. Since some interesting and important topological constructions are tied to filters, we study \emph{filters and convergence}, comparing in examples the sometimes more easily handled nets to the occasionally more cumbersome filters, which, however, offer some conceptual advantages. Talking about convergence, \emph{separation properties} suggest themselves; they are studied in detail, providing some classic results like Urysohn's Theorem. It happens so often that one works with a powerful concept, but that this concept requires assumptions which are too strong, hence one has to weaken it in a sensible way. This is demonstrated in the transition from compactness to local compactness; we discuss local compact spaces, and we give an example of a compactification. Quantitative aspects enter when one measures openness through a pseudometric; here many concepts are seen in a new, sharper light, in particular the problem of completeness comes up --- you have a sequence the elements of which are eventually very close to each other, and you want to be sure that a limit exists. This is possible on complete spaces, and, even better, if a space is not complete, then you can complete it. Complete spaces have some very special properties, for example the intersection of countably many open dense sets is dense again. This is Baire's Theorem, we show through a Banach-Mazur game played on a topological space that being of first category can be determined through Demon having a winning strategy. This completes the round trip of basic properties of topological spaces. We then present a small gallery in which topology is in action. The reason for singling out some topics is that we want to demonstrate the techniques developed with topological spaces for some interesting applications. For example, Gödel's Completeness Theorem for (countable) first order logic has been proved by Rasiowa and Sikorski through a combination of Baire's Theorem and Stone's topological representation of Boolean algebras. This topic is discussed. The calculus of observations, which is mentioned above, leads to the notion of topological systems, as demonstrated by Vickers. This hints at an interplay of topology and order, since a topology is after all a complete Heyting algebra. Another important topic is the approximation of continuous functions by a given class of functions, like the polynomials on an interval, leading quickly to the Stone-Weierstraß Theorem on a compact topological space, a topic with a rich history. Finally, the relationship of pseudometric spaces to general topological spaces is reflected again, we introduce uniform spaces as a rich class of spaces which is more general than pseudometric spaces, but less general than their topological cousins. Here we find concepts like completeness or uniform continuity, which are formulated for metric spaces, but which cannot be realized in general topological ones. This gallery could be extended, for example, Polish spaces could be discussed here with considerable relish, but it seemed to be more adequate to discuss these spaces in the context of their measure theoretic use. \begin{center} \fbox{We assume throughout that the Axiom of Choice is valid.} \end{center} \subsection{Defining Topologies} \label{sec:top-deftops} \def\mathcal{\mathcal} \def\mathit{} Recall that a topology $\tau$ on a carrier set $X$ is a collection of subsets which contains both $\emptyset$ and $X$, and which is closed under finite intersections and arbitrary unions. The elements of $\tau$ are called the \emph{open sets}. Usually a topology is not written down as one set, but it is specified what an open set looks like. This is done through a base or a subbase. Recall\MMP{Base, subbase} that a \emph{\index{topology!base}base} $\beta$ for $\tau$ is a set of subsets of $\tau$ such that for any $x\in G$ there exists $B\in\beta$ with $x\in B\subseteq G$. A subbase is a family of sets for which the finite intersections form a base. Not every family of subsets qualifies as a subbase or a base. We have the following characterization of a base. \BeginProposition{when-is-a-base} A family $\beta$ of sets is the base for a topology on $X = \bigcup\beta$ iff given $U, V\in\beta$ and $x\in U\cap V$, there exists $W\in \beta$ with $x\in W\subseteq U\cap V$, and if $X $. \end{proposition} Kelley~\cite[p. 47]{Kelley} gives the following example: Put $X := \{0, 1, 2\}$, $A := \{0, 1\}$ and $B := \{1, 2\}$, then $\beta := \{X, A, B, \emptyset\}$ cannot be the base for a topology. Assume it is, then the topology must be $\beta$ itself, but $A\cap B\not\in \beta$. So we have to be a bit careful. Let us have a look at the proof. \begin{proof} Checking the properties for a base shows that the condition is certainly necessary. Suppose that the condition holds, and define \begin{equation*}\textstyle \tau :=\{\bigcup \beta_{0}\mid \beta_{0}\subseteq\beta\}. \end{equation*} Then $\emptyset, X\in \tau$, and $\tau$ is closed under arbitrary unions, so that we have to check whether $\tau$ is closed under finite intersections. In fact, let $x\in U\cap V$ with $U, V\in \tau$, then we can find $U_{0}, V_{0}\in \beta$ with $x\in U_{0}\cap V_{0}$. By assumption there exists $W\in \beta$ with $x\in W\subseteq U_{0}\cap V_{0}\subseteq U\cap V$, so that $U\cap V$ can be written as union of elements in $\beta$. \end{proof} We perceive a base and a subbase, resp., relative to a topology, but it is usually clear what the topology looks like, once a basis is given. Let us have a look at some examples to clarify things. \BeginExample{ex-topol-bases-real} Consider the real numbers $\mathbb{R}$ with the Euclidean topology $\tau$. We say that a set $G$ is open iff given $x\in G$, there exists an open interval $]a, b[$ with $x\in ]a, b[\ \subseteq G$. Hence the set $\bigl\{]a, b[\mid a, b\in \mathbb{R}, a < b\bigr\}$ forms a base for $\tau$; actually, we could have chosen $a$ and $b$ as rational numbers, so that we have even a countable base for $\tau$. Note that although we can find a closed interval $[v, w]$ such that $x\in [v, w]\ \subseteq\ ]a, b[\ \subseteq G$, we could not have used the closed intervals for a description of $\tau$, since otherwise the singleton sets $\{x\} = [x, x]$ would be open as well. This is both undesirable and counter intuitive: in an open set we expect that each element has some breathing space around it. {\Large\ding{44}} \end{example} The next example looks at Euclidean spaces; here we do not have intervals directly at our disposal, but we can measure distances as well, which is a suitable generalization, given that the interval $]x-r, x+r[$ equals $\{y\in \mathbb{R} \mid |x-y|< r\}$. \BeginExample{ex-topol-bases-eucl} Consider the three dimensional space $\mathbb{R}^{3}$, and define for $x, y\in \mathbb{R}^{3}$ their distance \begin{equation*} d(x, y) := \sum_{i=1}^{3}|x_{i}- y_{i}|. \end{equation*} Call $G\subseteq\mathbb{R}^{3}$ open iff given $x\in G$, there exists $r > 0$ such that $\{y\in \mathbb{R}^{3}\mid d(x, y) < r\}\subseteq G$. Then it is clear that the set of all open sets form a topology: \begin{itemize} \item Both the empty set and $\mathbb{R}^{3}$ are open. \item The union of an arbitrary collection of open sets is open again. \item Let $G_{1}, \ldots, G_{k}$ be open, and $x\in G_{1}\cap\ldots\cap G_{k}$. Take an index $i$; since $x\in G_{i}$, there exists $r_{i}>0$ such that $K(d, x, r) := \{y\in \mathbb{R}^{3}\mid d(x, y) < r_{i}\}\subseteq G_{i}$. Let $r := \min\{r_{1}, \ldots, r_{k}\}$, then \begin{equation*} \{y\in \mathbb{R}^{3}\mid d(x, y) < r\}=\bigcap_{i=1}^{k}\{y\in \mathbb{R}^{3}\mid d(x, y) < r_{i}\} \subseteq\bigcap_{i=1}^{k}G_{i}. \end{equation*} Hence the intersection of a finite number of open sets is open again. \end{itemize} This argument would not work with a countable number of open sets, by the way. We could have used other measures for the distance, e.g., \begin{align*} d'(x, y) & := \sqrt{\sum_{i}|x_{i}- y_{i}|^{2}},\\ d''(x, y) & := \max_{1\leq i \leq 3}|x_{i}- y_{i}|. \end{align*} Then it is not difficult to see that all three describe the same collection of open sets. This is so because we can find for $x$ and $r>0$ some $r'>0$ and $r'' > 0$ with $K(d', x, r') \subseteq K(d, x, r)$ and $K(d'', x, r'') \subseteq K(d, x, r)$, similarly for the other combinations. It is noted that $3$ is not a magical number here, we can safely replace it with any positive $n$, indicating an arbitrary finite dimension. Hence we have shown that $\mathbb{R}^{n}$ is for each $n\in \mathbb{N}$ a topological space in the Euclidean topology. {\Large\ding{44}} \end{example} The next example uses also some notion of distance between two elements, which are given through evaluating real valued functions. Think of $f(x)$ as the numerical value of attribute $f$ for object $x$, then $|f(x) - f(y)|$ indicates how far apart $x$ and $y$ are with respect to their attribute values. \BeginExample{ex-topol-bases-weak} Let $X$ be an arbitrary non-empty set, and ${\mathcal E}$ be a non-empty collections of functions $f: X\to \mathbb{R}$. Define for the finite collection ${\mathcal F}\subseteq {\mathcal E}$, for $r>0$, and for $x\in X$ the base set \begin{equation*} W_{{\mathcal F}; r} (x) := \{y\in X \mid |f(x)-f(y)|<r\text{ for all }f\in {\mathcal F}\}. \end{equation*} We define as a base $\beta := \{W_{{\mathcal F}; r} (x) \mid x\in X, r > 0, {\mathcal F}\subseteq{\mathcal G}\text{ finite}\}$, and hence call $G\subseteq X$ open iff given $x\in G$, there exists ${\mathcal F}\subseteq{\mathcal E}$ finite and $r>0$ such that $W_{{\mathcal F}; r}(x)\subseteq G$. It is immediate that the finite intersection of open sets is open again. Since the other properties are checked easily as well, we have defined a topology\MMP{Weak topology}, which is sometimes called the \emph{weak \index{topology!weak}topology} on $X$ induced by ${\mathcal E}$. It is clear that in the last example the argument would not work if we restrict ourselves to elements of ${\mathcal G}$ for defining the base, i.e., to sets of the form $W_{\{g\}; r}$. These sets, however, have the property that they form a subbase, since finite intersections of these sets form a base. {\Large\ding{44}} \end{example} The next example shows that a topology may be defined on the set of all partial functions from some set to another one. In contrast to the previous example, we do without any numerical evaluations. \BeginExample{partial-maps-top} { \def\partMap#1#2{\ensuremath{{#1}\rightharpoonup{#2}}} Let $A$ and $B$ be non-empty sets, define $$\partMap{A}{B} := \{f\subseteq A\times B \mid f\text{ is a partial map}\}.$$ A set $G\subseteq\partMap{A}{B}$ is called open iff given $f\in G$ there exists a finite $f_{0}\in\partMap{A}{B}$ such that \begin{equation*} f \in N(f_{0}) := \{g\in \partMap{A}{B} \mid f_{0}\subseteq g\} \subseteq G. \end{equation*} Thus we can find for $f$ a finite partial map $f_{0}$ which is extended by $f$ such that all extensions to $f_{0}$ are contained in $G$. Then this is in fact a topology. The collection of open sets is certainly closed under arbitrary unions, and both the empty set and the whole set $\partMap{A}{B}$ are open. Let $G_{1}, \ldots, G_{n}$ be open, and $f\in G := G_{1}\cap\ldots\cap G_{n}$, then we can find finite partial maps $f_{1}, \ldots, f_{n}$ which are extended by $f$ such that $N(f_{i})\subseteq G_{i}$ for $1 \leq i \leq n$. Since $f$ extends all these maps, $f_{0} := f_{1}\cup\ldots\cup f_{n}$ is a well defined finite partial map which is extended by $f$, and \begin{equation*} f\in N(f_{0}) = N(f_{1})\cap\ldots\cup N(f_{n})\subseteq G. \end{equation*} Hence the finite intersection of open sets is open again. A base for this topology is the set $\{N(f)\mid f\text{ is finite}\}$, a subbase is the set $\bigl\{N(\{\langle a, b\rangle\})\mid a\in A, b\in B\bigr\}$ } {\Large\ding{44}} \end{example} The next example deals with a topology which is induced by an order structure. Recall that a chain in a partially ordered set is a non-empty totally ordered subset, and that in an inductively ordered set each chain has an upper bound. \BeginExample{scott-open} Let $(P, \leq)$ be a inductively ordered set. Call $G\subseteq P$ \emph{Scott \index{open!Scott}open} iff \begin{enumerate} \item $G$ is upward closed (hence $x\in G$ and $x\leq y$ imply $y\in G$). \item If $S\subseteq P$ is a chain with $\sup S \in G$, then $S\cap G\not=\emptyset$. \end{enumerate} Again, this defines a \index{topology!Scott}topology on $P$. In fact, it is enough to show that $G_{1}\cap G_{2}$ is open, if $G_{1}$ and $G_{2}$ are. Let $S$ be a chain with $\sup S\in G_{1}\cap G_{2}$, then we find $s_{i}\in S$ with $s_{i}\in G_{i}$. Since $S$ is a chain, we may and do assume that $s_{1}\leq s_{2}$, hence $s_{2}\in G_{1}$, because $G_{1}$ is upward closed. Thus $s_{2}\in S\cap (G_{1}\cap G_{2})$. Because $G_{1}$ and $G_{2}$ are upward closed, so is $G_{1}\cap G_{2}$. As an illustration, we show that the set $F := \{x\in P\mid x \leq t\}$ is Scott closed for each $t\in P$. Put $G := P\setminus F$. Let $x\in G$, and $x\leq y$, then obviously $y \not\in F$, so $y\in G$. If $S$ is a chain with $\sup S\in G$, then there exists $s\in S$ such that $s\not\in F$, hence $S\cap G\not=\emptyset$. {\Large\ding{44}} \end{example} \subsubsection{Continuous Functions} A continuous map between topological spaces is compatible with the topological structure. This is familiar from real functions, but we cannot copy the definition, since we have no means of measuring the distance between points in a topological space. All we have is the notion of an open set. So the basic idea is to say that given an open neighborhood $U$ of the image, we want to be able to find an open neighborhood $V$ of the inverse image so that all element of $V$ are mapped to $U$. This is a direct translation of the familiar $\epsilon$-$\delta$-definition from calculus. Since we are concerned with continuity as a global concept (as opposed to one which focusses on a given point), we arrive at this definition, and show in the subsequent example the it is really a faithful translation. \BeginDefinition{fnct-continuous} Let $(X, \tau)$ and $(Y, \vartheta)$ be topological spaces. A map $f: X\to Y$ is called $\tau$-$\vartheta$-\emph{\index{continuous}continuous} iff $\InvBild{f}{H}\in\tau$ for all $H\in\vartheta$ holds, we write this also as $f: (X, \tau)\to (Y, \vartheta)$. \end{definition} If the context is clear, we omit the reference to the topologies. Hence we say that the inverse image of an open set under a continuous map is an open set again. Let us have a look at real functions. \BeginExample{ex-cont-fncts-real} Endow the reals with the Euclidean topology, and let $f: \mathbb{R}\to \mathbb{R}$ be a map. Then the definition of continuity given above coincides with the usual $\epsilon$-$\delta$-definition. 1. Assuming the \MMP{$\epsilon$-$\delta$?}$\epsilon$-$\delta$-definition, we want to show that the inverse image of an open set is open. In fact, let $G\subseteq\mathbb{R}$ be open, and pick $x\in \InvBild{f}{G}$. Since $f(x)\in G$, we can find $\epsilon>0$ such that $]f(x)-\epsilon, f(x)+\epsilon[\ \subseteq G$. Pick $\delta>0$ for this $\epsilon$, hence $x'\in\ ]x-\delta, x+\delta[$ implies $f(x')\in\ ]f(x)-\epsilon, f(x)+ \epsilon[\ \subseteq G$. Thus $x\in\ ]x-\delta, x+\delta[\ \subseteq\InvBild{f}{G}$. 2. Assuming that the inverse image of an open set is open, we want to establish the $\epsilon$-$\delta$-definition. Given $x\in \mathbb{R}$, let $\epsilon>0$ be arbitrary, we want to show that there exists $\delta>0$ such that $|x-x'|<\delta$ implies $|f(x)-f(x')|<\epsilon$. Now $]f(x)-\epsilon, f(x')+\epsilon[$ is an open set hence $H := \InvBild{f}{]f(x)-\epsilon, f(x')+\epsilon[}$ is open by assumption, and $x\in H$, Select $\delta>0$ with $]x-\delta, x+\delta[\ \subseteq H$, then $|x-x'|<\delta$ implies $x'\in H$, hence $f(x')\in\ ]f(x)-\epsilon, f(x')+\epsilon[$. {\Large\ding{44}} \end{example} Thus we work on familiar ground, when it comes to the reals. Continuity may be tested on a subbase: \BeginLemma{cont-for-subbase} Let $(X, \tau)$ and $(Y, \vartheta)$ be topological spaces, $f: X\to Y$ be a map. Then $f$ is $\tau$-$\vartheta$-continuous iff $\InvBild{f}{S}\in \tau$ for each $S\in \sigma$ with $\sigma\subseteq\vartheta$ a subbase. \end{lemma} \begin{proof} Clearly, the inverse image of a subbase element is open, whenever $f$ is continuous. Assume, conversely, that the $\InvBild{f}{S} \in \tau$ for each $S\in\sigma$. Then $\InvBild{f}{B}\in \tau$ for each element $B$ of the base $\beta$ generated from $\sigma$, because $B$ is the intersection of a finite number of subbase elements. Now, finally, if $H\in\vartheta$, then $H = \bigcup\{B\mid B\in\beta, B\subseteq H\}$, so that $\InvBild{f}{H} = \bigcup\{\InvBild{f}{B}\mid B\in\beta, B\subseteq H\}\in\tau$. Thus the inverse image of an open set is open. \end{proof} \BeginExample{partial-maps-top-cont} { Take the topology from Example~\ref{partial-maps-top} on the space $\partMap{A}{B}$ of all partial maps. A map $q: (\partMap{A}{B})\to (\partMap{C}{D})$ is continuous in this topology iff the following condition holds: whenever $q(f)(c)=d$, then there exists $f_{0}\subseteq f$ finite such that $q(f_{0})(c) = d$. In fact, let $q$ be continuous, and $q(f)(c) = d$, then $G := \InvBild{q}{N(\{\langle c, d\rangle\})}$ is open and contains $f$, thus there exists $f_{0}\subseteq f$ with $f\in N(f_{0})\subseteq G$, in particular $q(f_{0})(c) = d$. Conversely, assume that $H\subseteq \partMap{C}{D}$ is open, and we want to show that $G := \InvBild{q}{H}\subseteq \partMap{A}{B}$ is open. Let $f\in G$, thus $q(f)\in H$, hence there exists $g_{0}\subseteq q(f)$ finite with $q(f)\in N(g_{0})\subseteq H$. $g_{0}$ is finite, say $g_{0} = \{\langle c_{1}, d_{1}\rangle, \ldots, \langle c_{n}, d_{n}\rangle\}$. By assumption there exists $f_{0}\in\partMap{A}{B}$ with $q(f_{0})(c_{i}) = d_{i}$ for $1\leq i\leq n$, then $f\in N(f_{0}) \subseteq G$, so that the latter set is open. } {\Large\ding{44}} \end{example} Let us have a look at the Scott topology. \BeginExample{scott-continuous-map} Let $(P, \leq)$ and $(Q, \leq)$ be inductively ordered sets, then $f: P\to Q$ is Scott continuous (i.e., continuous, when both ordered sets carry their respective Scott topology) iff $f$ is monotone, and if $f(\sup S) = \sup\Bild{f}{S}$ holds for every chain $S$. Assume that $f$ is Scott continuous. If $x\leq x'$, then every open set which contains $x$ also contains $x'$, so if $x\in\InvBild{f}{H}$ then $x'\in\InvBild{f}{H}$ for every Scott open $H\subseteq Q$; thus $f$ is monotone. If $S\subseteq P$ is a chain, then $\sup S$ exists in $P$, and $f(s)\leq f(\sup S)$ for all $s\in S$, so that $\sup\Bild{f}{S}\leq f(\sup S)$. For the other inequality, assume that $f(\sup S)\not\leq \sup\Bild{f}{S}$. We note that $G := \InvBild{f}{\{q\in Q\mid q\not\leq \sup\Bild{f}{S}\}}$ is open with $\sup S\in G$, hence there exists $s\in S$ with $s\in G$. But this is impossible. On the other hand, assume that $H\subseteq Q$ is Scott open, we want to show that $G := \InvBild{f}{H}\subseteq P$ is Scott open. $G$ is upper closed, since $x\in G$ and $x\leq x'$ implies $f(x)\in H$ and $f(x)\leq f(x')$, thus $f(x')\in H$, so that $x'\in G$. Let $S\subseteq P$ be a chain with $\sup S\in G$, hence $f(\sup S)\in H$. Since $\Bild{f}{S}$ is a chain, and $f(\sup S) = \sup\Bild{f}{S}$, we infer that there exists $s\in S$ with $f(s)\in H$, hence there is $s\in S$ with $s\in G$. Thus $G$ is Scott open in $P$, and $f$ is Scott continuous. {\Large\ding{44}} \end{example} The interpretation of modal logics in a topological space is interesting, when we interpret the transition which is associated with the diamond operator through a continuous map; thus the next step of a transition is uniquely determined, and it depends continuously on its argument. \BeginExample{interpr-modal-logics-top} The syntax of our modal logics is given through \begin{equation*} \varphi ::= \top~\mid~p~\mid~\varphi_{1}\vee \varphi_{2}~\mid~\varphi_{1}\wedge\varphi_{2}~\mid~\neg \varphi~\mid \Diamond\varphi \end{equation*} with $p\in\Phi$ an atomic proposition. The logic has the usual operators, viz., disjunction and negation, and $\Diamond$ as the modal operator. For interpreting the logic, we take a topological state space $(S, \tau)$ and a continuous map $f: X\to X$, and we associate with each atomic proposition $p$ an open set $V_{p}$ as the set of all states in which $p$ is true. We want the validity set $\Gilt$ of all those states in which formula $\varphi$ holds to be open, and define inductively the validity of a formula in a state in the following way. \begin{align*} \Gilt[\top] & := S\\ \Gilt[p] & := V_{p}, \text{ if $p$ is atomic}\\ \Gilt[\varphi_{1}\vee\varphi_{2}] & := \Gilt[\varphi_{1}]\cup\Gilt[\varphi_{2}]\\ \Gilt[\varphi_{1}\wedge\varphi_{2}] & := \Gilt[\varphi_{1}]\cap\Gilt[\varphi_{2}]\\ \Gilt[\neg \varphi] & := \Interior{(S\setminus \Gilt)}\\ \Gilt[\Diamond \varphi] & := \InvBild{f}{\Gilt} \end{align*} All definitions but the last two are self explanatory. The interpretation of $\Gilt[\Diamond\gamma]$ through $\InvBild{f}{\Gilt}$ suggests itself when considering the graph of $f$ in the usual interpretation of the diamond in modal logics, see~\CategCite{Sect. 2.7}. Since we want $\Gilt[\neg\varphi]$ be open, we cannot take the complement of $\Gilt$ and declare it as the validity set for $\varphi$, because the complement of an open set is not necessarily open. Instead, we take the largest open set which is contained in $S\setminus\Gilt$ (this is the best we can do), and assign it to $\neg \varphi$. One shows easily through induction on the structure of formula $\varphi$ that $\Gilt$ is an open set. But now look at this. Assume that $X := \mathbb{R}$ in the usual topology, $V_{p} = \Gilt[p] = ]0, +\infty[$, then $\Gilt[\neg p] = \Interior{]-\infty, 0]} = ]-\infty, 0[$, thus $\Gilt[p\vee\neg p] = ¸\mathbb{R}\setminus\{0\} \not= \Gilt[\top]$. Thus the law of the excluded middle does not hold in this model. {\Large\ding{44}} \end{example} Returning to the general discussion, the following fundamental property is immediate. \BeginProposition{cont-closed-under-compos} The identity $(X, \tau)\to (X, \tau)$ is continuous, and continuous maps are closed under composition. Consequently, topological spaces with continuous maps form a category. \QED \end{proposition} Continuous maps can be used to define topologies. \BeginDefinition{initial-and-final-tops} Given a family ${\mathcal F}$ of maps $f: A\to X_{f}$, where $(X_{f}, \tau_{f})$ is a topological space for each $f\in {\mathcal F}$, the \emph{initial \index{topology!initial}topology} $\tau_{\mathit{in} , {\mathcal F}}$ on $A$ with respect to ${\mathcal F}$ is the smallest topology on $A$ so that $f$ is $\tau_{\mathit{in}, {\mathcal F}}$-$\tau_{f}$-continuous for every $f\in{\mathcal F}$. Dually, given a family ${\mathcal G}$ of maps $g: X_{g}\to Z$, where $(X_{g}, \tau_{g})$ is a topological space for each $g\in{\mathcal G}$, the \emph{final \index{topology!final}topology} $\tau_{\mathit{fi}, {\mathcal G}}$ on $Z$ is the largest topology on $Z$ so that $g$ is $\tau$-$\tau_{\mathit{fi},{\mathcal G}}$-continuous for every $g\in{\mathcal G}$. \end{definition} In the case of the initial topology for just one map $f: A\to X_{f}$, note that $\PowerSet{A}$ is a topology which renders $f$ continuous, so there exists in fact a smallest topology on $A$ with the desired property; because $\{\InvBild{f}{G}\mid G\in \tau_{f}\}$ is a topology that satisfies the requirement, and because each such topology must contain it, this is in fact the smallest one. If we have a family ${\mathcal F}$ of maps $A\to X_{f}$, then each topology making all $f\in{\mathcal F}$ continuous must contain $$\xi := \bigcup_{f\in {\mathcal F}}\{\InvBild{f}{G}\mid G\in \tau_{f}\},$$ so the initial topology with respect to ${\mathcal F}$ is just the smallest topology on $A$ containing $\xi$. Similarly, being the largest topology rendering each $g\in {\mathcal G}$ continuous, the final topology with respect to ${\mathcal G}$ must contain the set $\bigcup_{g\in{\mathcal G}}\{H\mid \InvBild{g}{H}\in\tau_{g}\}$. An easy characterization of the initial resp. the final topology is proposed here: \BeginProposition{initial-final} Let $(Z, \tau)$ be a topological space, and ${\mathcal F}$ be a family of maps $A\to X_{f}$ with $(X_{f}, \tau_{f})$ topological spaces; $A$ is endowed with the initial topology $\tau_{\mathit{in}, {\mathcal F}}$ with respect to ${\mathcal F}$. A map $h: Z\to A$ is $\tau$-$\tau_{\mathit{in}, {\mathcal F}}$-continuous iff $h\circ f: Z\to X_{f}$ is $\tau$-$\tau_{f}$-continuous for every $f\in {\mathcal F}$. \end{proposition} \begin{proof} 1. Certainly, if $h: Z\to A$ is $\tau$-$\tau_{\mathit{in}, {\mathcal F}}$ continuous, then $h\circ f: Z\to X_{f}$ is $\tau$-$\tau_{f}$-continuous for every $f\in {\mathcal F}$ by Proposition~\ref{cont-closed-under-compos}. 2. Assume, conversely, that $h\circ f$ is continuous for every $f\in{\mathcal F}$; we want to show that $h$ is continuous. Consider \begin{equation*} \zeta := \{G\subseteq A\mid \InvBild{h}{G}\in\tau\}. \end{equation*} Because $\tau$ is a topology, $\zeta$ is; because $h\circ f$ is continuous, $\zeta$ contains the sets $\{\InvBild{f}{H}\mid H\in \tau_{f}\}$ for every $f\in{\mathcal F}$. But this implies that $\zeta$ contains $\tau_{\mathit{in}, {\mathcal F}}$, hence $\InvBild{h}{G}\in\tau$ for every $G\in\tau_{\mathit{in}, {\mathcal F}}$. This establishes the assertion. \end{proof} There is a dual characterization for the final topology, see Exercise~\ref{ex-char-final-top}. These are the most popular examples for initial and final topologies. \begin{enumerate} \item Given a family $(X_{i}, \tau_{i})_{i\in I}$ of topological spaces, let $X := \prod_{i\in I}X_{i}$ be the Cartesian product of the carrier sets\footnote{This works only if $X \not=\emptyset$, recall that we assume here that the Axiom of Choice is valid}. The\MMP{Product} \emph{product \index{topology!product}topology} $\prod_{i\in I}\tau_{i}$ is the initial topology on $X$ with respect to the projections $\pi_{i}: X\to X_{i}$. The product topology has as a base \begin{equation*}\textstyle \{\prod_{i\in I}A_{i}\mid A_{i}\in \tau_{i}\text{ and }A_{i}\not= X_{i}\text{ only for finitely many indices}\} \end{equation*} \item Let $(X, \tau)$ be a topological space, $A\subseteq X$. The \emph{\index{topology!trace}trace} $(A, \tau\cap A)$ of $\tau$ on $A$ is the initial topology on $A$ with respect to the embedding $i_{A}: A\to X$\MMP{Subspace}. It has the open sets $\{G\cap A\mid G\in \tau\}$; this is sometimes called the \emph{subspace topology}~\SetCite{p. 40}. We do not assume that $A$ is open. \item Given the family of spaces as above, let $X := \sum_{i\in I}X_{i}$ be the direct sum. The \emph{sum \index{topology!sum}topology}\MMP{Sum} $\sum_{i\in I}\tau_{i}$ is the final topology on $X$ with respect to the injections $\iota_{i}: X_{i}\to X$. Its open sets are described through \begin{equation*} \bigl\{\sum_{i\in I}\Bild{\iota_{i}}{G_{i}}\mid G_{i}\in \tau_{i}\text{ for all } i\in I\bigr\}. \end{equation*} \item Let $\rho$ be an equivalence relation on $X$ with $\tau$ a topology on the base space. The factor space $\Faktor{X}{\rho}$ is equipped with the final topology $\Faktor{\tau}{\rho}$ with respect to the factor map $\fMap{\rho}$ which sends each element to its $\rho$-class\MMP{Factor}. This topology is called the \emph{\index{topology!quotient}quotient topology} (with respect to $\tau$ and $\rho$). If a set $G\subseteq \Faktor{X}{\rho}$ is open then its inverse image $\InvBild{\fMap{\rho}}{G}=\bigcup G \subseteq X$ is open in $X$. But the converse holds as well: assume that $\bigcup G$ is open in $X$ for some $G\subseteq\Faktor{X}{\rho}$, then $G = \Bild{\fMap{\rho}}{\bigcup G}$, and, because $\bigcup G$ is the union if equivalence classes, one shows that $\InvBild{\fMap{\rho}}{G} = \InvBild{\fMap{\rho}}{\Bild{\fMap{\rho}}{\bigcup G}} = \bigcup G$. But this means that $G$ is open in $\Faktor{X}{\rho}$. \end{enumerate} Just to gain some familiarity with the concepts involved, we deal with an induced map on a product space, and with the subspace coming from the image of a map. The properties we find here will be useful later on as well. The product space first. We will use that a map into a topological product is continuous iff all its projections are; this follows from the characterization of an initial topology. It goes like this. \BeginLemma{into-unitcube} Let $M$ and $N$ be non-empty sets, $f: M\to N$ be a map. Equip both $[0, 1]^{M}$ and $[0, 1]^{N}$ with the product topology. Then \begin{equation*} f^{*}: \begin{cases} [0, 1]^{N}& \to [0, 1]^{M}\\ g & \mapsto g\circ f \end{cases} \end{equation*} is continuous. \end{lemma} \begin{proof} Note the reversed order; we have $f^{*}(g)(m) = (g\circ f)(m) = g(f(m))$ for $g\in [0, 1]^{N}$ and $m\in M$. Because $f^{*}$ maps $[0, 1]^{N}$ into $[0, 1]^{M}$, and the latter space carries the initial topology with respect to the projections $(\pi_{M, m})_{m\in N}$ with $\pi_{M, m}: q \mapsto q(m)$, it is by Proposition~\ref{initial-final} sufficient to show that $\pi_{M, m}\circ f^{*}: [0, 1]^{N}\to [0, 1]$ is continuous for every $m\in M$. But $\pi_{M, m}\circ f^{*} = \pi_{N, f(m)}$; this is a projection, which is continuous by definition. Hence $f^{*}$ is continuous. \end{proof} Hence an application of the projection defuses a seemingly complicated map. Note in passing the neither $M$ nor $N$ are assumed to carry a topology, they are simply plain sets. The next observation displays an example of a subspace topology. Each continuous map $f: X\to Y$ of one topological space to another one induces a subspace $\Bild{f}{X}$ of $Y$, which may or may not have interesting properties. In the case considered, it inherits compactness from its source. \BeginProposition{image-is-compact} Let $(X, \tau)$ and $(Y, \vartheta)$ be topological spaces, $f: X\to Y$ be $\tau$-$\vartheta$-continuous. If $(X, \tau)$ is compact, so is $(\Bild{f}{X}, \vartheta\cap\Bild{f}{X})$, the subspace of $(Y, \vartheta)$ induced by $f$. \end{proposition} \begin{proof} We take on open cover of $\Bild{f}{X}$ and show that it contains a finite cover of this space. So let $(H_{i})_{i\in I}$ be an open cover of $\Bild{f}{X}$. There exists open sets $H_{i}'\in\vartheta$ such that $H_{i}' = H_{i}\cap\Bild{f}{X}$, since $(\Bild{f}{X}, \tau\cap\Bild{f}{X})$ carries the subspace topology. Then $(\InvBild{f}{H_{i}'})_{i\in I}$ is an open cover of $X$, so there exists a finite subset $J\subseteq I$ such that $X = \bigcup_{i\in J}\InvBild{f}{H_{i}'}$, since $X$ is compact. But then $(H_{i}'\cap\Bild{f}{X})_{i\in J}$ is an open cover of $\Bild{f}{X}$. Hence this space is compact. \end{proof} Before continuing, we introduce the notion of homeomorphism (as an isomorphism in the category of topological spaces with continuous maps). \BeginDefinition{homeomorphism} Let $X$ and $Y$ be topological spaces. A bijection $f: X\to Y$ is called a \emph{\index{homeomorphism}homeomorphism} iff both $f$ and $f^{-1}$ are continuous. \end{definition} It is clear that continuity and bijectivity alone do not make a homeomorphism. Take as a trivial example the identity $(\mathbb{R}, \PowerSet{\mathbb{R}})\to (\mathbb{R}, \tau)$ with $\tau$ as the Euclidean topology. It is continuos and bijective, but its inverse is not continuous. Let us have a look at some examples, first one for the quotient topology. \BeginExample{example-quotient-top} Let $U := [0, 2\cdot \pi]$, and identify the endpoints of the interval, i.e., consider the equivalence relation \begin{equation*} \rho := \{\langle x, x\rangle\mid x\in U\}\cup\{\langle 0, 2\cdot \pi\rangle, \langle 2\cdot\pi, 0\rangle\}. \end{equation*} Let $K := \Faktor{U}{\rho}$, and endow $K$ with the quotient topology. A set $G\subseteq K$ is open iff $\InvBild{\fMap{\rho}}{G}\subseteq U$ is open, thus iff we can find an open set $H\subseteq\mathbb{R}$ such that $\InvBild{\fMap{\rho}}{G} = H\cap U$, since $U$ carries the trace of $\mathbb{R}$. Consequently, if $\Klasse{0}{\rho}\not\in G$, we find that $\InvBild{\fMap{\rho}}{G} = \{x\in U\mid \{x\}\in G\}$, which is open by construction. If, however, $\Klasse{0}{\rho}\in G$, then $\InvBild{\fMap{\rho}}{G} = \{x\in U\mid \{x\}\in G\}\cup\{0, 2\cdot \pi\}$, which is open in $U$. We claim that $K$ and the unit circle $ S := \{\langle s, t\rangle \mid 0\leq s, t\leq 1, s^{2}+t^{2} = 1\rangle\}$, are homeomorphic under the map $\psi: \Klasse{x}{\rho}\mapsto \langle \sin x, \cos x\rangle$. Because $\langle \sin 0, cos 0\rangle = \langle \sin 2\cdot \pi, \cos 2\cdot \pi\rangle$, the map is well defined. Since we can write $S = \{\langle \sin x, \cos x\rangle\mid 0\leq x \leq 2\cdot \pi\}$, it is clear that $\psi$ is onto. The topology on $S$ is inherited from the Cartesian plane, so open arcs are a subbasis for it. Because the old Romans Sinus and Cosinus both are continuous, we find that $\psi\circ \fMap{\rho}$ is continuous. We infer from Exercise~\ref{ex-char-final-top} that $\psi$ is continous, since $K$ has the quotient topology, which is final. We want to show that $\psi^{-1}$ is continuous. The argumentation is geometrical. Given an open arc on $K$, we may describe it through $(P_{1}, P_{2})$ with a clockwise movement. If the arc does not contain the critical point $P := \langle0, 1\rangle$, we find an open interval $I := ]a, b[$ with $0 < a < b < 2\cdot \pi$ such that $\Bild{\psi}{(P_{1}, P_{2})} = \{\Klasse{x}{\rho}\mid x\in I\}$, which is open in $K$. If, however, $P$ is on this arc, we decompose it into two parts $(P_{1}, P)\cup(P, P_{2})$. Then $(P_{1}, P)$ is the image of some interval $]a, 2\cdot \pi]$, and $(P, P_{2})$ is the image of an interval $[0, b[$, so that $\Bild{\psi}{(P_{1}, P_{2})} = \Bild{\fMap{\rho}}{[0, b[\ \cup\ ]a, 2\cdot \pi]}$, which is open in $K$ as well (note that $[0, b[$ as well as $]a, 2\cdot\pi]$ are open in $U$). {\Large\ding{44}} \end{example} While we have described so far direct methods to describe a topology by saying when a set is open, we turn now to an observation due to Kuratowski which yields an indirect way. It describes axiomatically what properties the closure of a set should have. Assume that we have a \emph{\index{closure operator}closure operator}\MMP{Closure operator}, i.e., a map $A\mapsto \closOp{A}$ on the powerset of a set $X$ with these properties: \begin{enumerate} \item $\closOp{\emptyset} = \emptyset$ and $\closOp{X} = X$. \item $A\subseteq \closOp{A}$, and $\closOp{(A\cup B)} = \closOp{A}\cup\closOp{B}$. \item $\closOp{(\closOp{A})} = \closOp{A}$. \end{enumerate} Thus the operator leaves the empty set and the whole set alone, the closure of the union is the union of the closures, and the operator is idempotent. One sees immediately that the operator which assigns to each set its closure with respect to a given topology is such a closure operator. It is also quite evident that the closure operator is monotone. Assume that $A\subseteq B$, then $B = A \cup (B\setminus A)$, so that $\closOp{B} = \closOp{A}\cup\closOp{(B\setminus A)}\supseteq\closOp{A}$. \BeginExample{finite-ordered-for-closure} Let $(D, \leq)$ be a finite partially ordered set. We put $\closOp{\emptyset} := \emptyset$ and $\closOp{D} := D$, moreover, \begin{equation*} \closOp{\{x\}} := \{y\in D \mid y \leq x\} \end{equation*} is defined for $x\in D$, and $\closOp{A} := \bigcup_{x\in X}\closOp{\{x\}}$ for subsets $A$ of $D$. Then this is a closure operator. It is enough to check whether $\closOp{(\closOp{\{x\}})} = \closOp{\{x\}}$ holds. In fact, we have \begin{align*} z \in \closOp{(\closOp{\{x\}})} & \Leftrightarrow z\in \closOp{\{y\}}\text{ for some }y\in \closOp{\{x\}}\\ & \Leftrightarrow \text{there exists }y\leq x\text{ with }z\leq y\\ & \Leftrightarrow z\leq x\\ & \Leftrightarrow z\in\closOp{\{x\}}. \end{align*} Thus we associate with each finite partially ordered set a closure operator, which assigns to each $A\subseteq D$ its down set. The map $x\mapsto\closOp{\{x\}}$ embeds $D$ into a distributive lattice, see the discussion in~\SetCite{Example 1.72}. {\Large\ding{44}} \end{example} We will show now that we can obtain a topology by calling open all those sets the complements of which are fixed under the closure operator; in addition, it turns out that the topological closure and the one from the closure operator are the same. \BeginTheorem{kuratowski-closure} Let $\closOp{\cdot}$ be a closure operator. Then \begin{enumerate} \item The set $\tau := \{X\setminus F \mid F\subseteq X, \closOp{F} = F\}$ is a topology. \item For each set $\Closure{A} = \closOp{A}$ with $\Closure{\cdot}$ as the closure in $\tau$. \end{enumerate} \end{theorem} \begin{proof} 1. For establishing that $\tau$ is a topology, it is enough to show that $\tau$ is closed under arbitrary unions, since the other properties are evident. Let ${\mathcal G}\subseteq \tau$, and put $G := \bigcup {\mathcal G}$, so we want to know whether $\closOp{X\setminus G} = X\setminus G$. If $H\in {\mathcal G}$, then $X\setminus G \subseteq X\setminus H$, so $\closOp{(X\setminus G)}\subseteq \closOp{(X\setminus H)} = X\setminus H$, thus $\closOp{(X\setminus G)} \subseteq X\setminus G$. Since the operator is monotone, it follows that $\closOp{(X\setminus G)} = X\setminus G$, hence $\tau$ is in fact closed under arbitrary unions, hence it is a topology. 2. Given $A\subseteq X$, \begin{equation*}\textstyle \Closure{A} = \bigcap\{F\subseteq X \mid F\text{ is closed, and }A\subseteq F\}, \end{equation*} and $\closOp{A}$ takes part in the intersection, so that $\Closure{A}\subseteq \closOp{A}$. On the other hand, $A\subseteq \Closure{A}$, thus $\closOp{A}\subseteq \closOp{(\Closure{A})} = \Closure{A}$ by part 1. Consequently, $\Closure{A}$ and $\closOp{A}$ are the same. \end{proof} It is on first sight a bit surprising that a topology can be described by finitary means, although arbitrary unions are involved. But we should not forget that we have also the subset relation at our disposal. Nevertheless, a rest of surprise remains. \subsubsection{Neighborhood Filters} \label{sec:nghb-filters} The last method for describing a topology we are discussing here deals also with some order properties. Assume that we assign to each $x\in X$, where $X$ is a given carrier set, a filter $\ensuremath{{\mathfrak U}}(x)\subseteq \PowerSet{X}$ with the property that $x\in U$ holds for each $U\in \ensuremath{{\mathfrak U}}(x)$. Thus $\ensuremath{{\mathfrak U}}(x)$ has these properties: \begin{enumerate} \item $x\in U$ for all $U\in \ensuremath{{\mathfrak U}}(x)$. \item If $U, V\in \ensuremath{{\mathfrak U}}(x)$, then $U\cap V\in\ensuremath{{\mathfrak U}}(x)$. \item If $U\in\ensuremath{{\mathfrak U}}(x)$ and $U\subseteq V$, then $V\in\ensuremath{{\mathfrak U}}(x)$. \end{enumerate} It is fairly clear that, given a topology $\tau$ on $X$, the \emph{neighborhood \index{filter!neighborhood}filter}\MMP{$\ensuremath{{\mathfrak U}}_{\tau}(x)$} \begin{equation*} \ensuremath{{\mathfrak U}}_{\tau}(x) := \{V\subseteq X \mid \text{ there exists $U\in\tau$ with $x\in U$ and $U\subseteq V$}\} \end{equation*} for $x$ has these properties. It has also an additional property, which we will discuss shortly~---~for dramaturgical reasons. Such a system of special filters defines a topology. We declare all those sets as open which belong to the neighborhoods of their elements. So if we take all balls in Euclidean $\mathbb{R}^{3}$ as the basis for a filter and assign each point the balls which it centers, then the sphere of radius $1$ around the origin would not be open (intuitively, it does not contain an open ball). So this appears to be an appealing idea. In fact: \BeginProposition{def-through-nbh-filters} Let $\{\ensuremath{{\mathfrak U}}(x) \mid x\in X\}$ be a family of filters such that $x\in U$ for all $U\in \ensuremath{{\mathfrak U}}(x)$. Then \begin{equation*} \tau := \{U\subseteq X \mid U\in\ensuremath{{\mathfrak U}}(x)\text{ whenever }x\in U\} \end{equation*} defines a topology on $X$. \end{proposition} \begin{proof} We have to establish that $\tau$ is closed under finite intersections, since the other properties are fairly straightforward. Now, let $U$ and $V$ be open, and take $x\in U\cap V$. We know that $U\in \ensuremath{{\mathfrak U}}(x)$, since $U$ is open, and we have $V\in\ensuremath{{\mathfrak U}}(x)$ for the same reason. Since $\ensuremath{{\mathfrak U}}(x)$ is a filter, it is closed under finite intersections, hence $U\cap V\in\ensuremath{{\mathfrak U}}(x)$, thus $U\cap V$ is open. \end{proof} We cannot, however, be sure that the neighborhood filter $\ensuremath{{\mathfrak U}}_{\tau}(x)$ for this new topology is the same as the given one. Intuitively, the reason is that we do not know if we can find for $U\in\ensuremath{{\mathfrak U}}(x)$ an open $V\in\ensuremath{{\mathfrak U}}(x)$ with $V\subseteq U$ such that $V\in\ensuremath{{\mathfrak U}}(y)$ for all $y\in V$. To illustrate, look at $\mathbb{R}^{3}$, and take the neighborhood filter for, say, $0$ in the Euclidean topology. Put for simplicity \begin{equation*} \|x\| := \sqrt{x_{1}^{2}+x_{2}^{2}+x_{3}^{2}}. \end{equation*} Let $U\in\ensuremath{{\mathfrak U}}(0)$, then we can find an open ball $V\in\ensuremath{{\mathfrak U}}(0)$ with $V\subseteq U$. In fact, assume $U = \{a\mid \|a\|< q\}.$ Take $z\in U$, then we can find $r>0$ such that the ball $V := \{y\mid \|y-z\|<r\}$ is entirely contained in $U$ (select $\|z\|< r < q$), thus $V\in\ensuremath{{\mathfrak U}}(0)$. Now let $y\in V$, let $0 < t < r-\|z-y\|$, then $\{a\mid \|a-y\|<t\}\subseteq V$, since $\|a-z\|\leq \|a-y\|+\|z-y\|<r$. Hence $U\in\ensuremath{{\mathfrak U}}(y)$ for all $y\in V$. We obtain now as a simple corollary \BeginCorollary{cor-def-through-nbh-filters} Let $\{\ensuremath{{\mathfrak U}}(x)\mid x\in X\}$ be a family of filters such that $x\in U$ for all $U\in \ensuremath{{\mathfrak U}}(x)$, and assume that for any $U\in\ensuremath{{\mathfrak U}}(x)$ there exists $V\in\ensuremath{{\mathfrak U}}(x)$ with $V\subseteq U$ and $U\in\ensuremath{{\mathfrak U}}(y)$ for all $y\in V$. Then $\{\ensuremath{{\mathfrak U}}(x)\mid x\in X\}$ coincides with the neighborhood filter for the topology defined by this family. \QED \end{corollary} In what follows, unless otherwise stated, \index{$\ensuremath{{\mathfrak U}}(x)$}$\ensuremath{{\mathfrak U}}(x)$ will denote the neighborhood filter of a point $x$ in a topological space $X$. \BeginExample{simple-ordered-set} Let $L := \{1, 2, 3, 6\}$ be the set of all divisors of $6$, and define $\isEquiv{x}{y}{\leq}$ iff $x$ divides $y$, so that we obtain \begin{equation*} \xymatrix{ &6\ar[dl]\ar[dr]\\ 2\ar[dr]&&3\ar[dl]\\ &1 } \end{equation*} Let us compute ---~just for fun~--- the topology associated with this partial order, and a basis for the neighborhood filters for each element. The topology can be seen from the table below (we have used that $\Interior{A} = X\setminus \Closure{(X\setminus A)}$, see~\SetCite{Definition 1.92}): \begin{center} \begin{tabular}{|l|l|l|}\hline set & closure & interior\\\hline\hline $\{1\}$& $\{1\}$& $\emptyset$\\\hline $\{2\}$& $\{1, 2\}$&$\emptyset$\\\hline $\{3\}$&$\{1, 3\}$&$\emptyset$\\\hline $\{6\}$&$\{1, 2, 3, 6\}$&$\{6\}$\\\hline $\{1, 2\}$&$\{1, 2\}$&$\emptyset$\\\hline $\{1, 3\}$&$\{1, 3\}$&$\emptyset$\\\hline $\{1, 6\}$&$\{1, 2, 3, 6\}$&$\{6\}$\\\hline $\{2, 3\}$&$\{1, 2, 3, 5\}$&$\emptyset$\\\hline $\{2, 6\}$&$\{1, 2, 3, 6\}$&$\{2, 6\}$\\\hline $\{3, 6\}$&$\{1, 2, 3, 6\}$&$\emptyset$\\\hline $\{1, 2, 3\}$&$\{1, 2, 3\}$&$\emptyset$\\\hline $\{1, 2, 6\}$&$\{1, 2, 3, 6\}$&$\{2, 6\}$\\\hline $\{1, 3, 6\}$&$\{1, 2, 3, 6\}$&$\{3, 6\}$\\\hline $\{2, 3, 6\}$&$\{1, 2, 3, 6\}$&$\{2, 3, 6\}$\\\hline $\{1, 2, 3, 6\}$&$\{1, 2, 3, 6\}$&$\{1, 2, 3, 6\}$\\\hline \end{tabular} \end{center} This is the topology: \begin{equation*} \tau = \bigl\{\emptyset, \{6\}, \{2, 6\}, \{3, 6\}, \{2, 3, 6\}, \{1, 2, 3, 6\}\bigr\}. \end{equation*} A basis for the respective neighborhood filters is given in this table: \begin{center} \begin{tabular}{|c|l|}\hline element & basis\\\hline\hline 1 & $\bigl\{\{1, 2, 3, 6\}\bigr\}$\\\hline 2 & $\bigl\{\{2, 6\}, \{1, 2, 3, 6\}\bigr\}$\\\hline 3 & $\bigl\{\{3, 6\}, \{2, 3, 6\}, \{1, 2, 3, 6\}\bigr\}$\\\hline 6 & $\bigl\{\{6\}, \{2, 6\}, \{3, 6\}, \{2, 3, 6\}, \{1, 2, 3, 6\}\bigr\}$\\\hline \end{tabular} \end{center} {\Large\ding{44}} \end{example} The next example deals with topological groups, i.e., topological spaces which have also a group structure rendering multiplication continuous. Here the neighborhood structure is fairly uniform --- if you know the neighborhood filter of the neutral element, you know the neighborhood filter of each element, because you can obtain them by a left shift or a right shift. \BeginExample{top-group} Let $(G, \cdot)$ be a group, and $\tau$ be a topology on $G$ such that the map $\langle x, y\rangle\mapsto xy^{-1}$ is continuous. Then $(G, \cdot, \tau)$ is called a \emph{\index{topology!topological group}topological \index{group!topological}group}. We will write down a topological group as $G$, the group operations and the topology will not be mentioned. The neutral element is denoted by $e$, multiplication will usually be omitted. Given a subsets $U$ of $G$, define $gU := \{gh\mid h\in U\}$ and $Ug := \{hg\mid h\in U\}$ for $g\in G$. Let us look at the algebraic operations in a group. Put $\zeta(x, y) := xy^{-1}$, then the map $\xi: g \mapsto g^{-1}$ which maps each group element to its inverse is just $\zeta(e,g)$, hence the cut of a continuous map, to it is continous as well. $\xi$ is a bijection with $\xi\circ \xi = id_{G}$, so it is in fact a homeomorphism. We obtain multiplication as $xy = \zeta(x, \xi(y))$, so multiplication is also continuous. Fix $g\in G$, then multiplication $\lambda_{g}: x\mapsto gx$ from the left and $\rho_{g}: x\mapsto xg$ from the right are continuous. Now both $\lambda_{g}$ and $\rho_{g}$ are bijections, and $\lambda_{g}\circ \lambda_{g^{-1}} = \lambda_{g^{-1}}\circ \lambda_{g} = id_{G}$, also $\rho_{g}\circ \rho_{g^{-1}} = \rho_{g^{-1}}\circ \rho_{g} = id_{G}$, thus $\lambda_{g}$ and $\rho_{g}$ are homeomophisms for every $g\in G$. Thus we have in a topological group this characterization of the neighborhood filter for every $g\in G$: \begin{equation*} \ensuremath{{\mathfrak U}}(g) = \{gU\mid U\in \ensuremath{{\mathfrak U}}(e)\} = \{Ug\mid U\in\ensuremath{{\mathfrak U}}(e)\}. \end{equation*} In fact, let $U$ be a neighborhood of $g$, then $\InvBild{\lambda_{g}}{U} = g^{-1}U$ is a neighborhood of $e$, so is $\InvBild{\rho_{g}}{U} = Ug^{-1}$. Conversely, a neighborhood $V$ of $e$ determines a neighborhood $\InvBild{\lambda_{g^{-1}}}{V} = gV$ resp. $\InvBild{\rho_{g^{-1}}}{V} = Vg$ of $g$. {\Large\ding{44}} \end{example} \subsection{Filters and Convergence} \label{sec:filters-and-convergence} The relationship between topologies and filters turns out to be fairly tight, as we saw when discussing the neighborhood filter of a point. We saw also that we can actually grow a topology from a suitable family of neighborhood filters. This relationship is even closer, as we will discuss now when having a look at convergence. Let $(x_{n})_{n\in \mathbb{N}}$ be a sequence in $\mathbb{R}$ which converges to $x\in\mathbb{R}$. This means that for any given open neighborhood $U$ of $x$ there exists an index $n\in \mathbb{N}$ such that $\{x_{m}\mid m\geq n\}\subseteq U$, so all members of the sequence having an index larger that $n$ are members of $U$. Now consider the filter $\ensuremath{{\filterFont F}}$ generated by the set $\{\{x_{m}\mid m\geq n\}\mid n\in \mathbb{N}\}$ of tails. The condition above says exactly that $\ensuremath{{\mathfrak U}}(x)\subseteq \ensuremath{{\filterFont F}}$, if you think a bit about it. This leads to the definition of convergence in terms of filters. \BeginDefinition{def-convergence-filter} Let $X$ be a topological space, $\ensuremath{{\filterFont F}}$ a filter on $X$. Then $\ensuremath{{\filterFont F}}$ \emph{\index{convergence!filter}converges} to a limit $x\in X$ iff $\ensuremath{{\mathfrak U}}(x)\subseteq \ensuremath{{\filterFont F}}$\MMP{$\ensuremath{{\filterFont F}}\to x$}. This is denoted by \index{${\mathcal F}\to x$}$\ensuremath{{\filterFont F}}\to x$. \end{definition} Plainly, $\ensuremath{{\mathfrak U}}(x)\to x$ for every $x$. Note that the definition above does not force the limit to be uniquely determined. If if two different points $x, y$ share their neighborhood filter, then $\ensuremath{{\filterFont F}}\to x$ iff $\ensuremath{{\filterFont F}}\to y$. Look again at Example~\ref{simple-ordered-set}. There all neighborhood filters are contained in $\ensuremath{{\mathfrak U}}(6)$, so that we have $\ensuremath{{\mathfrak U}}(6)\to t$ for $t\in\{1, 2, 3, 6\}$. It may seem that the definition of convergence through a filter is too involved (after all, being a filter should not be taken on a light shoulder!). In fact, sometimes convergence is defined through a \emph{\index{net}net}\MMP{Net}\label{def-net} as follows. Let $(I, \leq)$ be a directed set, i.e., $\leq$ is a partial order such that, given $i, j\in I$ there exists $k$ with $i\leq k$ and $j\leq k$. An $I$-indexed family $(x_{i})_{i\in I}$ is said to converge\index{convergence!net}\index{net!convergence} to a point $x$ iff, given a neighborhood $U\in\ensuremath{{\mathfrak U}}(x)$ there exists $k\in I$ such that $x_{i}\in U$ for all $i\geq k$. This generalizes the concept of convergence from sequences to index sets of arbitrary size. But look at this. The sets $\bigl\{\{x_{j}\mid j\geq i\}\mid i\in I\bigr\}$ form a filter base, because $(I, \leq)$ is directed. The corresponding filter converges to $x$ iff the net converges to $x$. But what about the converse? Take a filter $\ensuremath{{\filterFont F}}$ on $X$, then $F_{1}\leq F_{2}$ iff $F_{2}\subseteq F_{1}$ renders $(\ensuremath{{\filterFont F}}, \leq)$ a net. In fact, given $F_{1}, F_{2}\in\ensuremath{{\filterFont F}}$, we have $F_{1}\leq F_{1}\cap F_{2}$ and $F_{2}\leq F_{1}\cap F_{2}$. Now pick $x_{F}\in F$. Then the net $(x_{F})_{F\in\ensuremath{{\filterFont F}}}$ converges to $x$ iff $\ensuremath{{\filterFont F}}\to x$. Assume that $\ensuremath{{\filterFont F}}\to x$; take $U\in\ensuremath{{\mathfrak U}}(x)$, then $U\in\ensuremath{{\filterFont F}}$, thus if $F\in\ensuremath{{\filterFont F}}$ with $F\geq U$, then $F\subseteq U$, hence $x_{F}\in U$ for all such $x_{F}$. Conversely, if each net $(x_{F})_{F\in\ensuremath{{\filterFont F}}}$ derived from $\ensuremath{{\filterFont F}}$ converges to $x$, then for a given $U\in\ensuremath{{\mathfrak U}}(x)$ there exists $F_{0}$ such that $x_{F}\in U$ for $F\subseteq F_{0}$. Since $x_{F}$ has been chosen arbitrarily from $F$, this can only hold if $F\subseteq U$ for $F\subseteq F_{0}$, so that $U\in\ensuremath{{\filterFont F}}$. Because $U\in\ensuremath{{\mathfrak U}}(x)$ was arbitrary, we conclude $\ensuremath{{\mathfrak U}}(x)\subseteq\ensuremath{{\filterFont F}}$. Hence we find that filters offer a uniform generalization. The argument above shows that we may select the elements $x_{F}$ from a base for $\ensuremath{{\filterFont F}}$. If the filter has a countable base, we construct in this way a sequence; conversely, the filter constructed from a sequence has a countable base. Thus the convergence of sequences and the convergence of filters with a countable base are equivalent concepts. We investigate the characterization of the topological closure in terms of filters. In order to do this, we need to be able to restrict a filter to a set\MMP{Trace}, i.e., looking at the footstep the filter leaves on the set, hence at $$\ensuremath{{\filterFont F}}\cap A := \{F\cap A\mid F\in\ensuremath{{\filterFont F}}\}.$$ This is what we will do now. \BeginLemma{localize-filter} Let $X$ be a set, and $\ensuremath{{\filterFont F}}$ be a filter on $X$. Then $\ensuremath{{\filterFont F}}\cap A$ is a filter on $A$ iff $F\cap A\not=\emptyset$ for all $F\in\ensuremath{{\filterFont F}}$. \end{lemma} \begin{proof} Since a filter must not contain the empty set, the condition is necessary. But it is also sufficient, because it makes sure that the laws of a filter are satisfied. \end{proof} Looking at $\ensuremath{{\filterFont F}}\cap A$ for an ultrafilter $\ensuremath{{\filterFont F}}$, we know that either $A\in\ensuremath{{\filterFont F}}$ or $X\setminus A\in \ensuremath{{\filterFont F}}$, so if $F\cap A\not=\emptyset$ holds for all $F\in \ensuremath{{\filterFont F}}$, then this implies that $A\in\ensuremath{{\filterFont F}}$. Thus we obtain \BeginCorollary{localize-ultrafilter} Let $X$ be a set, and $\ensuremath{{\filterFont F}}$ be an ultrafilter on $X$. Then $\ensuremath{{\filterFont F}}\cap A$ is a filter iff $A\in\ensuremath{{\filterFont F}}$. Moreover, in this case $\ensuremath{{\filterFont F}}\cap A$ is an ultrafilter on $A$. \end{corollary} \begin{proof} It remains to show that $\ensuremath{{\filterFont F}}\cap A$ is an ultrafilter on $A$, provided, $\ensuremath{{\filterFont F}}\cap A$ is a filter. Let $B\not\in\ensuremath{{\filterFont F}}\cap A$ for some subset $B\subseteq A$. Since $A\in\ensuremath{{\filterFont F}}$, we conclude $B\not\in\ensuremath{{\filterFont F}}$, thus $X\setminus B\in\ensuremath{{\filterFont F}}$, since $\ensuremath{{\filterFont F}}$ is an ultrafilter. Thus $(X\setminus B)\cap A = A\setminus B\in\ensuremath{{\filterFont F}}\cap A$, so $\ensuremath{{\filterFont F}}\cap A$ is an ultrafilter by~\SetCite{Lemma 1.63}. \end{proof} From Lemma~\ref{localize-filter} we obtain a simple and elegant characterization of the topological closure of a set. \BeginProposition{char-top-closure} Let $X$ be a topological space, $A\subseteq X$. Then $x\in\Closure{A}$ iff $\ensuremath{{\mathfrak U}}(x)\cap A$ is a filter on $A$. Thus $x\in \Closure{A}$ iff there exists a filter $\ensuremath{{\filterFont F}}$ on $A$ with $\ensuremath{{\filterFont F}}\to x$. \end{proposition} \begin{proof} We know from the definition of $\Closure{A}$ that $x\in \Closure{A}$ iff $U\cap A\not=\emptyset$ for all $U\in\ensuremath{{\mathfrak U}}(x)$. This is by Lemma~\ref{localize-filter} equivalent to $\ensuremath{{\mathfrak U}}(x)\cap A$ being a filter on $A$. \end{proof} We know from Calculus that continuous functions preserve convergence, i.e., if $x_{n}\to x$ and $f$ is continuous, then $f(x_{n})\to f(x)$. We want to carry this over to the world of filters. For this, we have to define the image of a filter. Let $\ensuremath{{\filterFont F}}$ be a filter on a set $X$, and $f: X\to Y$ a map, then $$f(\ensuremath{{\filterFont F}}) := \{B\subseteq Y\mid \InvBild{f}{B}\in \ensuremath{{\filterFont F}}\}$$ is a filter on $Y$. In fact,\MMP[b]{Image of a filter} $\emptyset\not\in f(\ensuremath{{\filterFont F}})$, and, since $f^{-1}$ preserves the Boolean operations, $f(\ensuremath{{\filterFont F}})$ is closed under finite intersections. Let $B\in f(\ensuremath{{\filterFont F}})$ and $B\subseteq B'$. Since $\InvBild{f}{B}\in\ensuremath{{\filterFont F}}$, and $\InvBild{f}{B}\subseteq \InvBild{f}{B'}$, we conclude $\InvBild{f}{B'}\in \ensuremath{{\filterFont F}}$, so that $B'\in f(\ensuremath{{\filterFont F}})$. Hence $f(\ensuremath{{\filterFont F}})$ is also upper closed, so that it is in fact a filter. This is an easy representation through the direct image. \BeginLemma{direct-image-filter} Let $f: X\to Y$ be a map, $\ensuremath{{\filterFont F}}$ a filter on $X$, then $f(\ensuremath{{\filterFont F}})$ equals the filter generated by $\{\Bild{f}{A}\mid A\in\ensuremath{{\filterFont F}}\}$. \end{lemma} \begin{proof} Because $\Bild{f}{A_{1}\cap A_{2}}\subseteq \Bild{f}{A_{1}}\cap \Bild{f}{A_{2}}$, the set ${\mathcal G}_{0} := \{\Bild{f}{A}\mid A\in\ensuremath{{\filterFont F}}\}$ is a filter base. Denote by ${\mathcal G}$ the filter generated by ${\mathcal G}_{0}$. We claim that $f(\ensuremath{{\filterFont F}}) = {\mathcal G}$. ``$\subseteq$'': Assume that $B\in f(\ensuremath{{\filterFont F}})$, hence $\InvBild{f}{B}\in \ensuremath{{\filterFont F}}$ . Since $\Bild{f}{\InvBild{f}{B}} \subseteq B$, we conclude that $B$ is contained in the filter generated by ${\mathcal G}_{0}$, hence in ${\mathcal G}$. ``$\supseteq$'': If $B\in{\mathcal G}_{0}$, we find $A\in\ensuremath{{\filterFont F}}$ with $B = \Bild{f}{A}$, hence $A \subseteq \InvBild{f}{\Bild{f}{A}} = \InvBild{f}{B}\in\ensuremath{{\filterFont F}}$, so that $B\in f(\ensuremath{{\filterFont F}})$. This implies the desired inclusion, since $f(\ensuremath{{\filterFont F}})$ is a filter. \end{proof} We will see now that not only the filter property is transported through maps, but also the property of being an ultrafilter. \BeginLemma{ultrafilter-remains-under-map} Let $f: X\to Y$ be a map, $\ensuremath{{\filterFont F}}$ an ultrafilter on $X$. Then $f(\ensuremath{{\filterFont F}})$ is an ultrafilter on $Y$. \end{lemma} \begin{proof} It it enough to show that if $f(\ensuremath{{\filterFont F}})$ does not contain a set, it will contain its complement~\SetCite{Lemma 1.63}. In fact, assume that $H\not\in f(\ensuremath{{\filterFont F}})$, so that $\InvBild{f}{H}\not\in\ensuremath{{\filterFont F}}$. Since $\ensuremath{{\filterFont F}}$ is an ultrafilter, we know that $X\setminus\InvBild{f}{H}\in\ensuremath{{\filterFont F}}$; but $X\setminus\InvBild{f}{H}=\InvBild{f}{Y\setminus H}$, so that $Y\setminus H\in f(\ensuremath{{\filterFont F}})$. \end{proof} \BeginExample{filter-for-product} Let $X$ be the product of the topological spaces $(X_{i})_{i\in I}$ with projections $\pi_{i}: X\to X_{i}$. For a filter $\ensuremath{{\filterFont F}}$ on $X$, we have $\pi_{j}(\ensuremath{{\filterFont F}}) = \{A_{j}\subseteq X_{j}\mid A_{j}\times\prod_{i\not=j}X_{i}\in\ensuremath{{\filterFont F}}\}.$ {\Large\ding{44}} \end{example} Continuity preserves convergence: \BeginProposition{cont-pres-conv} Let $X$ and $Y$ be topological spaces, and $f: X\to Y$ a map. \begin{enumerate} \item If $f$ is continuous, and $\ensuremath{{\filterFont F}}$ a filter on $X$, then $\ensuremath{{\filterFont F}}\to x$ implies $f(\ensuremath{{\filterFont F}})\to f(x)$ for all $x\in X$. \item If $\ensuremath{{\filterFont F}}\to x$ implies $f(\ensuremath{{\filterFont F}})\to f(x)$ for all $x\in X$ and all filters $\ensuremath{{\filterFont F}}$ on $X$, then $f$ is continuous. \end{enumerate} \end{proposition} \begin{proof} Let $V\in \ensuremath{{\mathfrak U}}(f(x))$, then there exists $U\in\ensuremath{{\mathfrak U}}(f(x))$ open with $U\subseteq V$. Since $\InvBild{f}{U}\in\ensuremath{{\mathfrak U}}(x)\subseteq\ensuremath{{\filterFont F}}$, we conclude $U\in f(\ensuremath{{\filterFont F}})$, hence $V\in f(\ensuremath{{\filterFont F}})$. Thus $\ensuremath{{\mathfrak U}}(f(x))\subseteq f(\ensuremath{{\filterFont F}})$, which means that $f(\ensuremath{{\filterFont F}})\to f(x)$ indeed. This establishes the first part. Now assume that $\ensuremath{{\filterFont F}}\to x$ implies $f(\ensuremath{{\filterFont F}})\to f(x)$ for all $x\in X$ and an arbitrary filter $\ensuremath{{\filterFont F}}$ on $X$. Let $V\subseteq Y$ be open. Given $x\in \InvBild{f}{V}$, we find an open set $U$ with $x\in U\subseteq\InvBild{f}{V}$ in the following way. Because $x\in \InvBild{f}{V}$, we know $f(x)\in V$. Since $\ensuremath{{\mathfrak U}}(x)\to x$, we obtain from the assumption that $f(\ensuremath{{\mathfrak U}}(x))\to f(x)$, thus $\ensuremath{{\mathfrak U}}(f(x))\subseteq f(\ensuremath{{\mathfrak U}}(x))$. Because $V\in\ensuremath{{\mathfrak U}}(f(x))$, it follows $\InvBild{f}{V}\in\ensuremath{{\mathfrak U}}(x)$, hence we find an open set $U$ with $x\in U\subseteq \InvBild{f}{V}$. Consequently, $\InvBild{f}{V}$ is open in $X$. \end{proof} Thus continuity and filters cooperate in a friendly manner. \BeginProposition{initial-topology} Assume that $X$ carries the initial topology with respect to a family $(f_{i}: X\to X_{i})_{i\in I}$ of functions. Then $\ensuremath{{\filterFont F}}\to x$ iff $f_{i}(\ensuremath{{\filterFont F}})\to f_{i}(x)$ for all $i\in I$. \end{proposition} \begin{proof} Proposition~\ref{cont-pres-conv} shows that the condition is necessary. Assume that $f_{i}(\ensuremath{{\filterFont F}})\to f_{i}(x)$ for every $i\in I$, let $\tau_{i}$ be the topology on $X_{i}$. The sets $$\bigl\{\{\InvBild{f_{i_{1}}}{G_{i_{1}}}\cap\ldots\cap\InvBild{f_{i_{k}}}{G_{i_{k}}}\}\mid i_{1}, \ldots, i_{k}\in I, f_{i_{1}}(x)\in G_{i_{1}}\in\tau_{i_{1}}, \ldots, f_{i_{k}}(x)\in G_{i_{k}}\in \tau_{i_{k}}, k\in \mathbb{N}\bigr\}$$ form a base for the neighborhood filter for $x$ in the initial topology. Thus, given an open neighborhood $U$ of $x$, we have $\InvBild{f_{i_{1}}}{G_{i_{1}}}\cap\ldots\cap\InvBild{f_{i_{k}}}{G_{i_{k}}}\subseteq U$ for some suitable finite set of indices. Since $f_{i_{j}}(\ensuremath{{\filterFont F}})\to f_{i_{j}}(x)$, we infer $G_{i_{j}}\in f_{i_{j}}(\ensuremath{{\filterFont F}})$, hence $\InvBild{f_{i_{j}}}{G_{i_{j}}}\in\ensuremath{{\filterFont F}}$ for $1 \leq j\leq k$, thus $U\in\ensuremath{{\filterFont F}}$. This means $\ensuremath{{\mathfrak U}}(x)\subseteq\ensuremath{{\filterFont F}}$. Hence $\ensuremath{{\filterFont F}}\to x$, as asserted. \end{proof} We know that in a product a sequence converges iff its components converge. This is the counterpart for filters: \BeginCorollary{conv-in-a-product} Let $X = \prod_{i\in I} X_{i}$ be the product of the topological spaces. Then $\ensuremath{{\filterFont F}}\to (x_{i})_{i\in I}$ in $X$ iff $\ensuremath{{\filterFont F}}_{i}\to x_{i}$ in $X_{i}$ for all $i\in I$, where $\ensuremath{{\filterFont F}}_{i}$ it the $i$-th projection $\pi_{i}(\ensuremath{{\filterFont F}})$ of $\ensuremath{{\filterFont F}}$. \QED \end{corollary} The next observation further tightens the connection between topological properties and filters. It requires the existence of ultrafilters, so recall the we assume that the Axiom of Choice holds. \BeginTheorem{conv-vs-ultrafilter} Let $X$ be a topological space. Then $X$ is compact iff each ultrafilter converges. \end{theorem} Thus we tie compactness, i.e., the possibility to extract from each cover a finite subcover, to the convergence of ultrafilters. Hence an ultrafilter in a compact space cannot but converge. The proof of Alexander's Subbase Theorem~\SetCite{Theorem 1.99} indicates already that there is a fairly close connection between the Axiom of Choice and topological compactness. This connection is tightened here. \begin{proof} 1. Assume that $X$ is compact, but that we find an ultrafilter $\ensuremath{{\filterFont F}}$ which fails to converge. Hence we can find for each $x\in X$ an open neighborhood $U_{x}$ of $x$ which is not contained in $\ensuremath{{\filterFont F}}$. Since $\ensuremath{{\filterFont F}}$ is an ultrafilter, $X\setminus U_{x}\in \ensuremath{{\filterFont F}}$. Thus $\{X\setminus U_{x}\mid x\in X\}\subseteq\ensuremath{{\filterFont F}}$ is a collection of closed sets with $\bigcap_{x\in X}(X\setminus U_{x}) = \emptyset$. Since $X$ is compact, we find a finite subset $F\subseteq X$ such that $\bigcap_{x\in F}(X\setminus U_{x}) = \emptyset$. But $X\setminus U_{x}\in \ensuremath{{\filterFont F}}$, and $\ensuremath{{\filterFont F}}$ is closed under finite intersections, hence $\emptyset\in\ensuremath{{\filterFont F}}$. This is a contradiction. 2. Assume that each ultrafilter converges. It is sufficient to show that each family ${\mathcal H}$ of closed sets for which every finite subfamily has a non-empty intersection has a non-empty intersection itself. Now, the set $\{\bigcap{\mathcal H}_{0}\mid {\mathcal H}_{0}\subseteq{\mathcal H}\text{ finite}\}$ of all finite intersections forms the base for a filter $\ensuremath{{\filterFont F}}_{0}$, which may be extended to an ultrafilter $\ensuremath{{\filterFont F}}$~\SetCite{Theorem 1.80}. By assumption $\ensuremath{{\filterFont F}}\to x$ for some $x$, hence $\ensuremath{{\mathfrak U}}(x)\subseteq\ensuremath{{\filterFont F}}$. The point $x$ is a candidate for being a member in the intersection. Assume the contrary. Then there exists $H\in {\mathcal H}$ with $x\not\in H$, so that $x\in X\setminus H$, which is open. Thus $X\setminus H\in\ensuremath{{\mathfrak U}}(x)\subseteq\ensuremath{{\filterFont F}}$. On the other hand, $H = \bigcap\{H\}\in\ensuremath{{\filterFont F}}_{0}\subseteq\ensuremath{{\filterFont F}}$, so that $\emptyset\in\ensuremath{{\filterFont F}}$. Thus we arrive at a contradiction, and $x\in \bigcap{\mathcal H}$. Hence $\bigcap{\mathcal H}\not=\emptyset$. \end{proof} From Theorem~\ref{conv-vs-ultrafilter} we obtain Tihonov's celebrated theorem\footnote{``The Tychonoff Product Theorem concerning the stability of compactness under formation of topological products may well be regarded as the single most important theorem of general topology'' according to H. Herrlich and G. E. Strecker, quoted from~\cite[p. 85]{Herrlich-Choice}}\index{theorem!Tihonov} as an easy consequence. \BeginTheorem{tihonov}(Tihonov's Theorem) The product $\prod_{i\in I}X_{i}$ of topological spaces with $X_{i}\not= \emptyset$ for all $i\in I$ is compact iff each space $X_{i}$ is compact. \end{theorem} \begin{proof} If the product $X := \prod_{i\in I}X_{i}$ is compact, then $\Bild{\pi_{i}}{X} = X_{i}$ is compact by Proposition~\ref{image-is-compact}. Let, conversely, be $\ensuremath{{\filterFont F}}$ an ultrafilter on $X$, and assume all $X_{i}$ are compact. Then $\pi_{i}(\ensuremath{{\filterFont F}})$ is by Lemma~\ref{ultrafilter-remains-under-map} an ultrafilter on $X_{i}$ for all $i\in I$, which converges to some $x_{i}$ by Theorem~\ref{conv-vs-ultrafilter}. Hence $\ensuremath{{\filterFont F}}\to (x_{i})_{i\in I}$ by Corollary~\ref{conv-in-a-product}. This implies the compactness of $X$ by another application of Theorem~\ref{conv-vs-ultrafilter}. \end{proof} According to~\cite[p. 146]{Engelking}, Tihonov established the theorem for a product of an arbitrary numbers of closed and bounded intervals of the real line (we know from the Heine-Borel Theorem~\SetCite{Theorem 1.88} that these intervals are compact). Kelley~\cite[p. 143]{Kelley} gives a proof of the non-trivial implication of the theorem which relies on Alexander's Subbase Theorem~\SetCite{Theorem 1.99}. It goes like this. It is sufficient to establish that, whenever we have a family of subbase elements each finite family of which fails to cover $X$, then the whole family will not cover $X$. The sets $\bigl\{\InvBild{\pi_{i}}{U}\mid U\subseteq X_{i}\text{ open}, i\in I\bigr\}$ form a subbase for the product topology of $X$. Let ${\mathcal S}$ be a family of sets taken from this subbase such that no finite family of elements of ${\mathcal S}$ covers $X$. Put ${\mathcal S}_{i} := \{U\subseteq X_{i}\mid \InvBild{\pi_{i}}{U}\in {\mathcal S}\}$, then ${\mathcal S}_{i}$ is a family of open sets in $X_{i}$. Suppose ${\mathcal S}_{i}$ contains sets $U_{1}, \ldots, U_{k}$ which cover $X_{i}$, then $\InvBild{\pi_{i}}{U_{1}}, \ldots, \InvBild{\pi_{i}}{U_{k}}$ are elements of ${\mathcal S}$ which cover $X$; this is impossible, hence ${\mathcal S}_{i}$ fails to contain a finite family which covers $X_{i}$. Since $X_{i}$ is compact, there exists a point $x_{i}\in X_{i}$ with $x_{i}\not\in\bigcup{\mathcal S}_{i}$. But then $x := (x_{i})_{i\in I}$ cannot be a member of $\bigcup{\mathcal S}$. Hence ${\mathcal S}$ does not cover $X$. This completes the proof. Both proof rely heavily on the Axiom of Choice\MMP[t]{Axiom of Choice}, the first one through the existence of an ultrafilter extending a given filter, the second one through Alexander's Subbase Theorem. The relationship of Tihonov's Theorem to the Axiom of Choice is even closer: It can actually be shown that the theorem and the Axiom of Choice are equivalent~\cite[Theorem~4.68]{Herrlich-Choice}; this requires, however, establishing the existence of topological products without any recourse to the infinite Cartesian product as a carrier. We have defined above the concept of a limit point of a filter. A weaker concept is that of an accumulation point. Taking in terms of sequences, an accumulation point of a sequence has the property that each neighborhood of the point contains infinitely many elements of the sequence. This carries over to filters in the following way. \BeginDefinition{acc-point-filter} Given a topological space $X$, the point $x\in X$ is called an \emph{\index{accumulation point}\index{filter!accumulation point}accumulation point} of filter $\ensuremath{{\filterFont F}}$ iff $U\cap F\not=\emptyset$ for every $U\in\ensuremath{{\mathfrak U}}(x)$ and every $F\in\ensuremath{{\filterFont F}}$. \end{definition} Since $\ensuremath{{\filterFont F}}\to x$ iff $\ensuremath{{\mathfrak U}}(x)\subseteq\ensuremath{{\filterFont F}}$, it is clear that $x$ is an accumulation point. But a filter may fail to have an accumulation point at all. Consider the filter $\ensuremath{{\filterFont F}}$ over $\mathbb{R}$ which is generated by the filter base $\bigl\{]a, \infty[\mid a\in \mathbb{R}\bigr\}$; it is immediate that $\ensuremath{{\filterFont F}}$ does not have an accumulation point. Let us have a look at a sequence $(x_{n})_{n\in\mathbb{N}}$, and the filter $\ensuremath{{\filterFont F}}$ generated by the infinite tails $\bigl\{\{x_{m}\mid m\geq n\}\mid n\in\mathbb{N}\bigr\}$. If $x$ is an accumulation point of the sequence, $U\cap \{x_{m}\mid m\geq n\}\not=\emptyset$ for every neighborhood $U$ of $x$, thus $U\cap F\not=\emptyset$ for all $F\in \ensuremath{{\filterFont F}}$ and all such $U$. Conversely, if $x$ is an accumulation point for filter $\ensuremath{{\filterFont F}}$, it is clear that the defining property holds also for the elements of the base for the filter, thus $x$ is an accumulation point for the sequence. Hence we have found the ``right'' generalization from sequences to filters. An easy characterization of the set of all accumulation points goes like this. \BeginLemma{all-acc-points} The set of all accumulation points of filter $\ensuremath{{\filterFont F}}$ is exactly $\bigcap_{F\in\ensuremath{{\filterFont F}}}\Closure{F}$. \end{lemma} \begin{proof} This follows immediately from the observation that $x\in\Closure{A}$ iff $U\cap A\not=\emptyset$ for each neighborhood $U\in\ensuremath{{\mathfrak U}}(x)$. \end{proof} The lemma has an interesting consequence for the characterization of compact spaces through filters. \BeginCorollary{char-acc-point-ultra} $X$ is compact iff each filter on $X$ has an accumulation point. \end{corollary} \begin{proof} Let $\ensuremath{{\filterFont F}}$ be a filter in a compact space $X$, and assume that $\ensuremath{{\filterFont F}}$ does not have an accumulation point. Lemma~\ref{all-acc-points} implies that $\bigcap_{F\in\ensuremath{{\filterFont F}}}\Closure{F} = \emptyset$. Since $X$ is compact, we find $F_{1}, \ldots, F_{n}\in\ensuremath{{\filterFont F}}$ with $\bigcap_{i=1}^{n}\Closure{F_{i}} = \emptyset$. Thus $\bigcap_{i=1}^{n}F_{i} = \emptyset$. But this set is a member of $\ensuremath{{\filterFont F}}$, a contradiction. Now assume that each filter has an accumulation point. It is by Theorem~\ref{conv-vs-ultrafilter} enough to show that every ultrafilter $\ensuremath{{\filterFont F}}$ converges. An accumulation point $x$ for $\ensuremath{{\filterFont F}}$ is a limit: assume that $\ensuremath{{\filterFont F}}\not\to x$, then there exists $V\in\ensuremath{{\mathfrak U}}(x)$ with $V\not\in\ensuremath{{\filterFont F}}$, hence $X\setminus V\in\ensuremath{{\filterFont F}}$. But $V\cap F\not=\emptyset$ for all $F\in \ensuremath{{\filterFont F}}$, since $x$ is an accumulation point. This is a contradiction. \end{proof} This is a characterization of accumulation points in terms of converging filters. \BeginLemma{acc-iff-conv-finer-filter} In a topological space $X$, the point $x \in X$ is an accumulation point of filter $\ensuremath{{\filterFont F}}$ iff there exists a filter $\ensuremath{{\filterFont F}}_{0}$ with $\ensuremath{{\filterFont F}}\subseteq\ensuremath{{\filterFont F}}_{0}$ and $\ensuremath{{\filterFont F}}_{0}\to x$. \end{lemma} \begin{proof} Let $x$ be an accumulation point of $\ensuremath{{\filterFont F}}$, then $\{U\cap F\mid U\in\ensuremath{{\mathfrak U}}(x), F\in\ensuremath{{\filterFont F}}\}$ is a filter base. Let $\ensuremath{{\filterFont F}}_{0}$ be the filter generated by this base, then $\ensuremath{{\filterFont F}}\subseteq\ensuremath{{\filterFont F}}_{0}$, and certainly $\ensuremath{{\mathfrak U}}(x)\subseteq\ensuremath{{\filterFont F}}_{0}$, thus $\ensuremath{{\filterFont F}}_{0}\to x$. Conversely, let $\ensuremath{{\filterFont F}}\subseteq\ensuremath{{\filterFont F}}_{0}\to x$. Since $\ensuremath{{\mathfrak U}}(x)\subseteq\ensuremath{{\filterFont F}}_{0}$ holds as well, we conclude $U\cap F\not=\emptyset$ for all neighborhoods $U$ and all elements $F\in\ensuremath{{\filterFont F}}$, for otherwise we would have $\emptyset = U\cap F\in \ensuremath{{\filterFont F}}$ for some $U, F\in \ensuremath{{\filterFont F}}$, which contradicts $\emptyset\in\ensuremath{{\filterFont F}}$. Thus $x$ is indeed an accumulation point of $\ensuremath{{\filterFont F}}$. \end{proof} \subsection{Separation Properties} \label{sec:separation-props} We see from Example~\ref{simple-ordered-set} that a filter may converge to more than one point. This may be undesirable. Think of a filter which is based on a sequence, and each element of the sequence indicates an approximation step. Then you want the approximation to converge, but the result of this approximation process should be unique. We will have a look at this question, and we will see that this is actually a special case of separation properties. \BeginProposition{limits-are-unique} Given a topological space $X$, the following properties are equivalent \begin{enumerate} \item\label{limits-are-unique-1} If $x\not=y$ are different points in $X$, there exists $U\in\ensuremath{{\mathfrak U}}(x)$ and $V\in\ensuremath{{\mathfrak U}}(y)$ with $U\cap V=\emptyset$. \item\label{limits-are-unique-2} The limit of a converging filter is uniquely determined. \item\label{limits-are-unique-3} $\{x\} = \bigcap\{U\mid U\in\ensuremath{{\mathfrak U}}(x)\text{ is closed}\}$ or all points $x$. \item\label{limits-are-unique-4} The diagonal $\Delta := \{\langle x, x\rangle\mid x\in X\}$ is closed in $X\times X$. \end{enumerate} \end{proposition} \begin{proof} \labelImpl{limits-are-unique-1}{limits-are-unique-2}: If $\ensuremath{{\filterFont F}}\to x$ and $\ensuremath{{\filterFont F}}\to y$ with $x\not= y$, we have $U\cap V\in\ensuremath{{\filterFont F}}$ for all $U\in\ensuremath{{\mathfrak U}}(x)$ and $V\in\ensuremath{{\mathfrak U}}(y)$, hence $\emptyset\in\ensuremath{{\filterFont F}}$. This is a contradiction. \labelImpl{limits-are-unique-2}{limits-are-unique-3}: Let $y\in \bigcap\{U\mid U\in\ensuremath{{\mathfrak U}}(x)\text{ is closed}\}$, thus $y$ is an accumulation point of $\ensuremath{{\mathfrak U}}(x)$. Hence there exists a filter $\ensuremath{{\filterFont F}}$ with $\ensuremath{{\mathfrak U}}(x)\subseteq\ensuremath{{\filterFont F}}\to y$ by Lemma~\ref{acc-iff-conv-finer-filter}. Thus $x=y$. \labelImpl{limits-are-unique-3}{limits-are-unique-4}: Let $\langle x, y\rangle\not\in\Delta$, then there exists a closed neighborhood $W$ of $x$ with $y\not\in W$. Let $U\in\ensuremath{{\mathfrak U}}(x)$ open with $U\subseteq W$, and put $V := X\setminus W$, then $\langle x, y\rangle\in U\times V\cap\Delta=\emptyset$, and $U\times V$ is open in $X\times X$. \labelImpl{limits-are-unique-4}{limits-are-unique-1}: If $\langle x, y\rangle\in(X\times X)\setminus\Delta$, there exists open sets $U\in\ensuremath{{\mathfrak U}}(x)$ and $V\in\ensuremath{{\mathfrak U}}(y)$ with $U\times V\cap\Delta=\emptyset$, hence $U\cap V=\emptyset$. \end{proof} Looking at the proposition, we see that having a unique limit for a filter is tantamount to being able to separate two different points through disjoint open neighborhoods. Because these spaces are important, they deserve a special name. \BeginDefinition{def-hausdorff} A topological space is called a \emph{Hausdorff \index{space!Hausdorff, $T_{2}$}space} iff any two different points in $X$ can be separated by disjoint open neighborhoods, i.e., iff condition~(\ref{limits-are-unique-1}) in Proposition~\ref{limits-are-unique} holds. Hausdorff spaces are also called \emph{$T_{2}$-spaces}. \end{definition} \BeginExample{sorgenfrey-line} Let $X := \mathbb{R}$, and define a topology through the base $\bigl\{[a, b[\mid a, b\in\mathbb{R}, a < b\bigr\}$. Then this is a Hausdorff space. This space is sometimes called the \emph{\index{Sorgenfrey line}Sorgenfrey line}. {\Large\ding{44}} \end{example} Being Hausdorff can be discerned from neighborhood filters: \BeginLemma{neighborhood-filters-hausdorff} Let $X$ be a topological space. Then $X$ is a Hausdorff space iff each $x\in X$ has a base $\ensuremath{{\mathfrak U}}_{0}(x)$ for its neighborhood filters such that for any $x\not= y$ there exists $U\in\ensuremath{{\mathfrak U}}_{0}(x)$ and $V\in\ensuremath{{\mathfrak U}}_{0}(y)$ with $U\cap V=\emptyset$. \QED \end{lemma} It follows a first and easy consequence for maps into a Hausdorff space, viz., the set of arguments on which they coincide is closed. \BeginCorollary{eqset-is closed} Let $X$, $Y$ be topological spaces, and $f, g: X\to Y$ continuous maps. If $Y$ is a Hausdorff space, then $\{x\in X\mid f(x) = g(x)\}$ is closed. \end{corollary} \begin{proof} The map $t: x\mapsto \langle f(x), g(x)\rangle$ is a continuos map $X\to Y\times Y$. Since $\Delta\subseteq Y\times Y$ is closed by Proposition~\ref{limits-are-unique}, the set $\InvBild{t}{\Delta}$ is closed. But this is just the set in question. \end{proof} The reason for calling a Hausdorff space a $T_{2}$ space\footnote{\emph{T} stands for German \emph{Trennung}, i.e., separation} will become clear once we have discussed other ways of separating points and sets; then $T_{2}$ will be a point in a spectrum denoting separation properties. For the moment, we introduce two other separation properties which deal with the possibility of distinguishing two different points through open sets. Let for this $X$ be a topological space. \begin{description} \item[$T_0$-space:] $X$ is called a \emph{$T_{0}$-space}\MMP[h]{$T_{0}, T_{1}$} iff, given two different points $x$ and $y$, there exists an open set $U$ which contains exactly one of them. \item[$T_1$-space:] $X$ is called a \emph{$T_{1}$-space\index{space!$T_{0}, T_{1}$}} iff, given two different points $x$ and $y$, there exist open neighborhoods $U$ of $x$ and $V$ of $y$ with $y\not\in U$ and $x\not\in V$. \end{description} The following examples demonstrate these spaces. \BeginExample{ex-t0-space} Let $X := \mathbb{R}$, and define the topologies on the real numbers through \begin{align*} \tau_{<} & := \{\emptyset, \mathbb{R}\}\cup\bigl\{]-\infty, a[\mid a\in\mathbb{R}\bigr\},\\ \tau_{\leq} & := \{\emptyset, \mathbb{R}\}\cup\bigl\{]-\infty, a]\mid a\in\mathbb{R}\bigr\}. \end{align*} Then $\tau_{<}$ is a $T_{0}$-topology. $\tau_{\leq}$ is a $T_{1}$-topology which is not $T_{0}$. {\Large\ding{44}} \end{example} This is an easy characterization of $T_{1}$-spaces. \BeginProposition{char-t1-spaces} A topological space $X$ is a $T_{1}$-space iff $\{x\}$ is closed for all $x\in X$. \end{proposition} \begin{proof} Let $y\in\Closure{\{x\}}$, then $y$ is in every open neighborhood $U$ of $x$. But this can happen in a $T_{1}$-space only if $x=y$. Conversely, if $\{x\}$ is closed, and $y\not= x$, then there exists a neighborhood $U$ of $x$ which does not contain $y$, and $x$ is not in the open set $X\setminus\{x\}$. \end{proof} \BeginExample{ex-not-t1-space} Let $X$ be a set with at least two points, $x_{0}\in X$ be fixed. Put $\closOp{\emptyset} := \emptyset$ and for $\closOp{A} := A\cup\{x_{0}\}$ for $A\not=\emptyset$. Then $\closOp{\cdot}$ is a closure operator, we look at the associated topology. Since $\{x\}$ is open for $x\not= x_{0}$, $X$ is a $T_{0}$ space, and since $\{x\}$ is not closed for $x\not= x_{0}$, $X$ is not $T_{1}$. {\Large\ding{44}} \end{example} \BeginExample{finite-ordered-t1} Let $(D, \leq)$ be a partially ordered set. The topology associated with the closure operator for this order according to Example~\ref{finite-ordered-for-closure} is $T_{1}$ iff $y\leq x \Leftrightarrow x=y$, because this is what $\closOp{\{x\}} = \{x\}$ says. {\Large\ding{44}} \end{example} \BeginExample{cofinite-not-t2} Let $X := \mathbb{N}$, and $\tau := \{A\subseteq \mathbb{N}\mid A\text{ is cofinite}\}\cup\{\emptyset\}$. Recall that a cofinite set is defined as having a finite complement. Then $\tau$ is a topology on $X$ such that $X\setminus\{x\}$ is open for each $x\in X$. Hence $X$ is a $T_{1}$-space. But $X$ is not Hausdorff. If $x\not= y$ and $U$ is an open neighborhood of $x$, then $X\setminus U$ is finite. Thus if $V$ is disjoint from $U$, we have $V\subseteq X\setminus U$. But then $V$ cannot be an open set with $y\in V$. {\Large\ding{44}} \end{example} While the properties discussed so far deal with the relationship of two different points to each other, the next group of axioms looks at closed sets; given a closed set $F$, we call an open set $U$ with $F\subseteq U$ a neighborhood of $F$. Let again $X$ be a topological space. \begin{description} \item[$T_3$-space:] $X$ is a \emph{$T_{3}$-space}\MMP{$T_{3}, T_{3\ensuremath{\frac{1}{2}}}, T_{4}$} iff given a point $x$ and a closed set $F$, which does not contain $x$, there exist disjoint open neighborhoods of $x$ and of $F$. \item[$T_{3\ensuremath{\frac{1}{2}}}$-space:] $X$ is a \emph{$T_{3\ensuremath{\frac{1}{2}}}$-space} iff given a point $x$ and a closed set $F$ with $x\not\in F$ there exists a continuous function $f: X\to \mathbb{R}$ with $f(x) = 1$ and $f(y) = 0$ for all $y\in F$. \item[$T_4$-space:] $X$ is a \emph{$T_{4}$-space} \index{space!$T_{3}, T_{3\ensuremath{\frac{1}{2}}}, T_{4}$} iff two disjoint closed sets have disjoint open neighborhoods. \end{description} $T_{3}$ and $T_{4}$ deal with the possibility of separating a closed set from a point resp. another closed set. $T_{3\ensuremath{\frac{1}{2}}}$ is squeezed-in between these axioms. Because $\{x\in X\mid f(x) < 1/2\}$ and $\{x\in X\mid f(x) > 1/2\}$ are disjoint open sets, it is clear that each $T_{3\ensuremath{\frac{1}{2}}}$-space is a $T_{3}$-space. It is also clear that the defining property of $T_{3\ensuremath{\frac{1}{2}}}$ is a special property of $T_{4}$, provided singletons are closed. The relationship and further properties will be explored now. It might be noted that continuous functions play now an important r\^ole here in separating objects. $T_{3\ensuremath{\frac{1}{2}}}$ entails among others that there are ``enough'' continuous functions. Engel\-king~\cite[p. 29 and 2.7.17]{Engelking} mentions that there are spaces which satisfy $T_{3}$ but have only constant continuous functions, and comments ``they are, however, fairly complicated ...'' (p. 29), Kuratowski~\cite[p. 121]{Kuratowski} makes a similar remark. So we will leave it at that and direct the reader, who want to know more, to these sources and the papers quoted there. We look at some examples. \BeginExample{t3-nott2-nott1} Let $X := \{1, 2, 3, 4\}$. \begin{enumerate} \item With the indiscrete topology $\{\emptyset, X\}$, $X$ is a $T_{3}$ space, but it is neither $T_{2}$ nor $T_{1}$. \item Take the topology $\bigl\{\{1\}, \{1, 2\}, \{1, 3\}, \{1, 2, 3\}, X, \emptyset\bigr\}$, then two closed sets are only disjoint when one of them is empty, because all of them contain the point $4$ (with the exception of $\emptyset$, of course). Thus the space is $T_{4}$. The point $1$ and the closed set $\{4\}$ cannot be separated by a open sets, thus the space is not $T_{3}$. \end{enumerate} {\Large\ding{44}} \end{example} The next example displays a space which is $T_{2}$ but not $T_{3}$. \BeginExample{t2-but-not-t3} Let $X := \mathbb{R}$, and put $Z := \{1/n\mid n\in \mathbb{N}\}$. Define in addition for $x\in \mathbb{R}$ and $i\in\mathbb{N}$ the sets $ B_{i}(x) := ]x-1/i, x+1/i[. $ Then $\ensuremath{{\mathfrak U}}_{0}(x) := \{B_{i}(x) \mid i\in \mathbb{N}\}$ for $x\not=0$, and $\ensuremath{{\mathfrak U}}_{0}(0) := \{B_{i}(0)\setminus Z\mid i\in \mathbb{N}\}$ define neighborhood filters for a Hausdorff space by Lemma~\ref{neighborhood-filters-hausdorff}. But this is is not a $T_{3}$-space. One notes first that $Z$ is closed: if $x\not\in Z$ and $x\not\in[0, 1]$, one certainly finds $i\in \mathbb{N}$ with $B_{i}(x)\cap Z=\emptyset$, and if $0< x \leq 1$, there exists $k$ with $1/(k+1) < x < 1/k$, so taking $1/i$ less than the minimal distance of $x$ to $1/k$ and $1/(k+1)$, one has $B_{i}(x)\cap Z = \emptyset$. If $x=0$, each neighborhood contains an open set which is disjoint from $Z$. Now each open set $U$ which contains $Z$ contains also $0$, so we cannot separate $0$ from $Z$. {\Large\ding{44}} \end{example} Just one positive message: the reals satisfy $T_{3\ensuremath{\frac{1}{2}}}$. \BeginExample{reals-are-t3half} Let $F\subseteq \mathbb{R}$ be non-empty, then \begin{equation*} f(t) := \inf_{y\in F}\frac{|t-y|}{1+|t-y|} \end{equation*} defines a continuous function $f: \mathbb{R}\to [0, 1]$ with $z\in F\Leftrightarrow f(z) = 0$. Thus, if $x\not\in F$, we have $f(x)>0$, so that $ y \mapsto f(y)/f(x) $ is a continuous function with the desired properties. Thus the reals with the usual topology are a $T_{3\ensuremath{\frac{1}{2}}}$-space. {\Large\ding{44}} \end{example} The next proposition is a characterization of $T_{3}$-spaces in terms of open neighborhoods, motivated by the following observation. Take a point $x\in \mathbb{R}$ and an open set $G\subseteq\mathbb{R}$ with $x\in G$. Then there exists $r>0$ such that the open interval $]x-r, x+r[$ is entirely contained in $G$. But we can say more: by making this open interval a little bit smaller, we can actually fit a closed interval around $x$ into the given neighborhood as well, so, for example, $x\in\ ]x-r/2, x+r/2[\ \subseteq\ [x-r/2, x+r/2]\ \subseteq\ ]x-r, x+r[\ \subseteq G$. Thus we find for the given neighborhood another neighborhood the closure of which is entirely contained in it. \BeginProposition{char-t3-ngbh} Let $X$ be a topological space. Then the following are equivalent. \begin{enumerate} \item\label{char-t3-ngbh-1} $X$ is a $T_{3}$-space. \item\label{char-t3-ngbh-2} For every point $x$ and every open neighborhood $U$ of $x$ there exists an open neighborhood $V$ of $x$ with $\Closure{V}\subseteq U$. \end{enumerate} \end{proposition} \begin{proof} \labelImpl{char-t3-ngbh-1}{char-t3-ngbh-2}: Let $U$ be an open neighborhood of $x$, then $x$ is not contained in the closed set $X\setminus U$, so by $T_{3}$ we find disjoint open sets $U_{1}, U_{2}$ with $x\in U_{1}$ and $X\setminus U\subseteq U_{2}$, hence $X\setminus U_{2}\subseteq U$. Because $U_{1}\subseteq X\setminus U_{2}\subseteq U$, and $X\setminus U_{2}$ is closed, we conclude $\Closure{U}_{1}\subseteq U$. \labelImpl{char-t3-ngbh-2}{char-t3-ngbh-1}: Assume that we have a point $x$ and a closed set $F$ with $x\not\in F$. Then $x\in X\setminus F$, so that $X\setminus F$ is an open neighborhood of $x$. By assumption, there exists an open neighborhood $V$ of $x$ with $x\in \Closure{V}\subseteq X\setminus F$, then $V$ and $X\setminus(\Closure{V})$ are disjoint open neighborhoods of $x$ resp. $F$. \end{proof} This characterization can be generalized to $T_{4}$-spaces (roughly, by replacing the point through a closed set) in the following way. \BeginProposition{char-t4-ngbh} Let $X$ be a topological space. Then the following are equivalent. \begin{enumerate} \item\label{char-t4-ngbh-1} $X$ is a $T_{4}$-space. \item\label{char-t4-ngbh-2} For every closed set $F$ and every open neighborhood $U$ of $F$ there exists an open neighborhood $V$ of $F$ with $F\subseteq V\subseteq\Closure{V}\subseteq U$. \end{enumerate} \end{proposition} The proof of this proposition is actually nearly a copy of the preceding one, \emph{mutatis mutandis}. \begin{proof} \labelImpl{char-t4-ngbh-1}{char-t4-ngbh-2}: Let $U$ be an open neighborhood of the closed set $F$, then the closed set $F' := X\setminus U$ is disjoint to $F$, so that we can find disjoint open neighborhoods $U_{1}$ of $F$ and $U_{2}$ of $F'$, thus $U_{1}\subseteq X\setminus U_{2} \subseteq X\setminus F' = U$, so $V := U_{1}$ is the open neighborhood we are looking for. \labelImpl{char-t4-ngbh-2}{char-t4-ngbh-1}: Let $F$ and $F'$ be disjoint closed sets, then $X\setminus F'$ is an open neighborhood for $F$. Let $V$ be an open neighborhood for $F$ with $F\subseteq V\subseteq \Closure{V}\subseteq X\setminus F'$, then $V$ and $U := X\setminus(\Closure{V})$ are disjoint open neighborhoods of $F$ and $F'$. \end{proof} We mentioned above that the separation axiom $T_{3\ensuremath{\frac{1}{2}}}$ makes sure that there are enough continuous functions on the space. Actually, the continuous functions even determine the topology in this case, as the following characterization shows. \BeginProposition{char-t_3half} Let $X$ be a topological space, then the following statements are equivalent. \begin{enumerate} \item\label{char-t_3half-1} $X$ is a $T_{3\ensuremath{\frac{1}{2}}}$-space. \item\label{char-t_3half-2} $\beta := \bigl\{\InvBild{f}{U}\mid f: X\to \mathbb{R} \text{ is continuous}, U\subseteq \mathbb{R}\text{ is open}\bigr\}$ constitutes a basis for the topology of $X$. \end{enumerate} \end{proposition} \begin{proof} The elements of $\beta$ are open sets, since they are comprised of inverse images of open sets under continuous functions. \labelImpl{char-t_3half-1}{char-t_3half-2}: Let $G\subseteq X$ be an open set with $x\in G$. We show that we can find $B\in \beta$ with $x\in B\subseteq B$. In fact, since $X$ is $T_{3\ensuremath{\frac{1}{2}}}$, there exists a continuous function $f: X\to \mathbb{R}$ with $f(x) = 1$ and $f(y) = 0$ for $y\in X\setminus G$. Then $B := \{x\in X\mid -\infty < x < 1/2\} = \InvBild{f}{]-\infty, 1/2[}$ is a suitable element of $\beta$. \labelImpl{char-t_3half-2}{char-t_3half-1}: Take $x\in X$ and a closed set $F$ with $x\not\in F$. Then $U := X\setminus F$ is an open neighborhood $x$. Then we can find $G\subseteq \mathbb{R}$ open and $f: X\to \mathbb{R}$ continuous with $x\in \InvBild{f}{G}\subseteq U$. Since $G$ is the union of open intervals, we find an open interval $I := ]a, b[\ \subseteq G$ with $f(x)\in I$. Let $G: \mathbb{R}\to \mathbb{R}$ be a continuous with $g(f(x)) = 1$ and $g(t) = 0$, if $t\not\in I$; such a function exists since $\mathbb{R}$ is a $T_{3\ensuremath{\frac{1}{2}}}$-space (Example~\ref{reals-are-t3half}). Then $g\circ f$ is a continuous function with the desired properties. Consequently, $X$ is a $T_{3\ensuremath{\frac{1}{2}}}$-space. \end{proof} The separation axioms give rise to names for classes of spaces. We will introduce there traditional names now. \BeginDefinition{traditional names} Let $X$ be a topological space, then $X$ is called \begin{itemize} \item \emph{\index{space!regular}regular} iff $X$ satisfies $T_{1}$ and $T_{3}$, \item \emph{\index{space!completely regular}completely regular}, iff $X$ satisfies $T_{1}$ and $T_{3\ensuremath{\frac{1}{2}}}$, \item \emph{\index{space!normal}normal}, iff $X$ satisfies $T_{1}$ and $T_{4}$. \end{itemize} \end{definition} The reason $T_{1}$ is always included is that one wants to have every singleton as a closed set, which, as the examples show, is not always the case. Each regular space is a Hausdorff space, each regular space is completely regular, and each normal space is regular. We will obtain as a consequence of Urysohn's Lemma that that each normal space is completely regular as well (Corollary~\ref{normal-arecompletely-reg}). In a completely regular space we can separate a point $x$ from a closed set not containing $x$ through a continuous function. It turns out that normal spaces have an analogous property: Given two disjoint closed sets, we can separate these sets through a continuous function. This is what \emph{\index{Urysohn's Lemma}Urysohn's Lemma} says, a famous result from the beginnings of set-theoretic topology. To be precise: \BeginTheorem{urysohns-lemma}(Urysohn) Let $X$ be a normal space. Given disjoint closed sets $F_{0}$ and $F_{1}$, there exists a continuous function $f: X\to \mathbb{R}$ such that $f(x) = 0$ for $x\in F_{0}$ and $f(x) = 1$ for $x\in F_{1}$. \end{theorem} We need some technical preparations for proving Theorem~\ref{urysohns-lemma}; this gives also the opportunity to introduce the concept of a dense set. \BeginDefinition{set-is-dense} A subset $D\subseteq X$ of a topological space $X$ is called \emph{\index{dense set}dense} iff $\Closure{D} = X$. \end{definition} Dense sets are fairly practical when it comes to compare continuous functions for equality: if suffices that the functions coincide on a dense set, then they will be equal. Just for the record: \BeginLemma{equal-on-dense} Let $f, g: X\to Y$ be continuous maps with $Y$ Hausdorff, and assume that $D\subseteq X$ is dense. Then $f = g$ iff $f(x) = g(x)$ for all $x\in D$. \end{lemma} \begin{proof} Clearly, if $f = g$, then $f(x) = g(x)$ for all $x\in D$. So we have to establish the other direction. Because $Y$ is a Hausdorff space, $\Delta_{Y}:= \{\langle y, y\rangle\mid y\in Y\}$ is closed (Proposition~\ref{limits-are-unique}), and because $f\times g: X\times X\to Y\times Y$ is continuous, $\InvBild{(f\times g)}{\Delta_{Y}}\subseteq X\times X$ is closed as well. The latter set contains $\Delta_{D}$, hence its closure $\Delta_{X}$. \end{proof} It is immediate that if $D$ is dense, then $U\cap D\not=\emptyset$ for each open set $U$, so in particular each neighborhood of a point meets the dense set $D$. To provide an easy example, both $\mathbb{Q}$ and $\mathbb{R}\setminus\mathbb{Q}$ are dense subsets of $\mathbb{R}$ in the usual topology. Note that $\mathbb{Q}$ is countable, so $\mathbb{R}$ has even a countable dense set. The first lemma has a family of subsets indexed by a dense subset of $\mathbb{R}$ exhaust a given set and provides a useful real function. \BeginLemma{exhaust-family-1} Let $M$ be set, $D\subseteq\mathbb{R}_{+}$ be dense, and $(E_{t})_{t\in D}$ be a family of subsets of $M$ with these properties: \begin{itemize} \item if $t < s$, then $E_{t}\subseteq E_{s}$, \item $M = \bigcup_{t\in D}E_{t}$. \end{itemize} Put $f(m) := \inf\{t\in D\mid m\in E_{t}\}$, then we have for all $s\in \mathbb{R}$ \begin{enumerate} \item $\{m\mid f(m) < s\} = \bigcup\{E_{t}\mid t\in D, t < s\}$, \item $\{m\mid f(m)\leq s\} = \bigcap\{E_{t}\mid t\in D, t > s\}$. \end{enumerate} \end{lemma} \begin{proof} 1. Let us work on the first equality. If $f(m) < s$, there exists $t < s$ with $m\in E_{t}$. Conversely, if $m\in E_{t}$ for some $t < s$, then $f(m) = \inf\{r\in D\mid m\in E_{r}\} \leq t < s$. 2. For the second equality, assume $f(m) \leq s$, then we can find for each $r > s$ some $t < r$ with $m\in E_{t}\subseteq E_{r}$. To establish the other inclusion, assume that $f(m) \leq t$ for all $t > s$. If $f(m) = r > s$, we can find some $t'\in D$ with $r > t' > s$, hence $f(m) \leq t'$. This is a contradiction, hence $f(m) \leq s$. \end{proof} This lemma, which does not assume a topology on $M$, but requires only a plain set, is extended now for the topological scenario in which we will use it. We assume that each set $E_{t}$ is open, and we assume that $E_{t}$ contains the closures of its predecessors. Then it will turn out that the function we just have defined is continuous, specifically: \BeginLemma{exhaust-family-2} Let $X$ be a topological space, $D\subseteq\mathbb{R}_{+}$ a dense subset, and assume that $(E_{t})_{t\in D}$ is a family of open sets with these properties \begin{itemize} \item if $t < s$, then $\Closure{E}_{t}\subseteq E_{s}$, \item $X = \bigcup_{t\in D}E_{t}$. \end{itemize} Then $f: x \mapsto inf\{t\in D\mid x\in E_{t}\}$ defines a continuous function on $X$. \end{lemma} \begin{proof} 0. Because a subbase for the topology on $\mathbb{R}$ is comprised of the intervals $]-\infty, x[$ resp. $]x, +\infty[$, we see from Lemma~\ref{cont-for-subbase} that it is sufficient to show that for any $s\in \mathbb{R}$ the sets $\{x\in X \mid f(x) < s\}$ and $\{x\in X\mid f(x) > s\}$ are open, since they are the corresponding inverse images under $f$. For the latter set we show that its complement $\{x\in X\mid f(x) \leq s\}$ is closed. Fix $s\in \mathbb{R}$. 1. We obtain from Lemma~\ref{exhaust-family-1} that $\{x\in X \mid f(x) < s\}$ equals $\bigcup \{E_{t}\mid t\in D, t < s\}$; since all sets $E_{t}$ are open, their union is. Hence $\{x\in X \mid f(x) < s\}$ is open. 2. We obtain again from Lemma~\ref{exhaust-family-1} that $\{x\in X\mid f(x) \leq s\}$ equals $\bigcap\{E_{t}\mid t\in D, t > s\}$, so if we can show that $ \bigcap\{E_{t}\mid t\in D, t > s\} = \bigcap\{\Closure{E}_{t}\mid t\in D, t > s\}$, we are done. In fact, the left hand side is contained in the right hand side, so assume that $x$ is an element of the right hand side. If $x$ is not contained in the left hand side, we find $t'>s$ with $t'\in D$ such that $x\not\in E_{t'}$. Because $D$ is dense, we find some $r$ with $s < r < t'$ with $\Closure{E}_{r}\subseteq E_{t'}$. But then $x\not\in \Closure{E}_{r}$, hence $x\not\in\bigcap \{\Closure{E}_{t}\mid t\in D, t > s\}$, a contradiction. Thus both sets are equal, so that $\{x\in X \mid f(x)\geq s\}$ is closed. \end{proof} We are now in a position to establish Urysohn's Lemma. The idea of the proof rests on this observation for a $T_{4}$-space $X$: suppose that we have open sets $A$ and $B$ with $A\subseteq\Closure{A}\subseteq B$. Then we can find an open set $C$ such that $\Closure{A}\subseteq C \subseteq \Closure{C}\subseteq B$, see Proposition~\ref{char-t4-ngbh}. {\def\sqsubseteq^*{\sqsubseteq^*} Denote just for the proof for open sets $A, B$ the fact that $\Closure{A}\subseteq B$ by $A\sqsubseteq^* B$. Then we may express the idea above by saying that $A\sqsubseteq^* B$ implies the existence of an open set $C$ with $A\sqsubseteq^* C \sqsubseteq^* B$, so $C$ may be squeezed in. But now we have $A\sqsubseteq^* C$ and $C\sqsubseteq^* B$, so we find open sets $E$ and $F$ with $A\sqsubseteq^* E \sqsubseteq^* C$ and $C \sqsubseteq^* F \sqsubseteq^* B$, arriving at the chain $A \sqsubseteq^* E \sqsubseteq^* C \sqsubseteq^* F \sqsubseteq^* B$. But why stop here? The proof makes this argument systematic and constructs in this way a continuous function. \begin{proof} 1. Let $D := \{p/2^{q}\mid p, q\text{ non-negative integers}\}$. These are all dyadic numbers, which are dense in $\mathbb{R}_{+}$. We are about to construct a family $(E_{t})_{t\in D}$ of open sets $E_{t}$ indexed by $D$ in the following way. 2. Put $E_{t} := X$ for $t>1$, and let $E_{1} := X\setminus F_{1}$, moreover let $E_{0}$ be an open set containing $F_{0}$ which is disjoint from $E_{1}$. We now construct open sets $E_{p/2^{n}}$ by induction on $n$ in the following way. Assume that we have already constructed open sets $$E_{0}\sqsubseteq^* E_{\frac{1}{2^{n-1}}}\sqsubseteq^* E_{\frac{2}{2^{n-1}}}\ldots\sqsubseteq^* E_{\frac{2^{n-1}-1}{2^{n-1}}} \sqsubseteq^* E_{1}.$$ Let $t = \frac{2m+1}{2^{n}}$, then we find an open set $E_{t}$ with $E_{\frac{2m}{^{n}}}\sqsubseteq^* E_{t}\sqsubseteq^* E_{\frac{2m+2}{2^{n}}}$; we do this for all $m$ with $0 \leq m \leq 2^{n-1}-1$. 3. Look as an illustration at the case $n=3$. We have found already the open sets $E_{0}\sqsubseteq^* E_{1/4}\sqsubseteq^* E_{1/2}\sqsubseteq^* E_{3/4}\sqsubseteq^* E_{1}$. Then the construction goes on with finding open sets $E_{1/8}, E_{3/8}, E_{5/8}$ and $E_{7/8}$ such that after the step is completed, we obtain this chain. \begin{equation*} E_{0}\sqsubseteq^* E_{1/8}\sqsubseteq^* E_{1/4}\sqsubseteq^* E_{3/8}\sqsubseteq^* E_{1/2}\sqsubseteq^* E_{5/8}\sqsubseteq^* E_{3/4}\sqsubseteq^* E_{7/8}\sqsubseteq^* E_{1}. \end{equation*} 4. In this way we construct a family $(E_{t})_{t\in D}$ with the properties requested by Lemma~\ref{exhaust-family-2}. It yields a continuous function $f: X\to \mathbb{R}$ with $f(x) = 0$ for all $x\in F_{0}$ and $f(1)(x) = 1$ for all $x\in F_{1}$. \end{proof} } Urysohn's Lemma is used to prove the Tietze Extension Theorem, which we will only state, but not prove. \BeginTheorem{tietze-extension} Let $X$ be a $T_{4}$-space, and $f: A\to \mathbb{R}$ be a function which is continuous on a closed subset $A$ of $X$. Then $f$ can be extended to a continuous function $f^{*}$ on all of $X$. \QED \end{theorem} We obtain as an immediate consequence of Urysohn's Lemma \BeginCorollary{normal-arecompletely-reg} A normal space is completely regular. \end{corollary} We have obtained a hierarchy of spaces through gradually tightening the separation properties, and found that continuous functions help with the separation. The question arises, how compactness fits into this hierarchy. It turns out that a compact Hausdorff space is normal; the converse obviously does not hold: the reals with the Euclidean topology are normal, but by no means compact. We call a subset $K$ in a topological space $X$ compact iff it is compact as a subspace, i.e., a compact topological space in its own right. This is a first and fairly straightforward observation. \BeginLemma{closed-is-compact} A closed subset $F$ of a compact space $X$ is compact. \end{lemma} \begin{proof} Let $(G_{i}\cap F)_{i\in I}$ be an open cover of $F$ with $G_{i}\subseteq X$ open, then $\{F\}\cup\{G_{i}\mid i\in I\}$ is an open cover of $X$, so we can find a finite subset $J\subseteq I$ such that $\{F\}\cup\{G_{j}\mid i\in J\}$ covers $X$, hence $\{G_{i}\cap F\mid i\in J\}$ covers $F$. \end{proof} In a Hausdorff space, the converse holds as well: \BeginLemma{compact-is-closed} Let $X$ be a Hausdorff space, and $K\subseteq X$ compact, then \begin{enumerate} \item Given $x\not\in K$, there exist disjoint open neighborhoods $U$ of $x$ and $V$ of $K$. \item $K$ is closed. \end{enumerate} \end{lemma} \begin{proof} Given $x\not\in K$, we want to find $U\in\ensuremath{{\mathfrak U}}(x)$ with $U\cap K=\emptyset$ and $V\supseteq K$ open with $U\cap V = \emptyset$. Let's see, how to do that. There exists for $x$ and any element $y\in K$ disjoint open neighborhoods $U_{y}\in\ensuremath{{\mathfrak U}}(x)$ and $W_{y}\in\ensuremath{{\mathfrak U}}(y)$, because $X$ is Hausdorff. Then $(W_{y})_{y\in Y}$ is an open cover of $K$, hence by compactness there exists a finite subset $W_{0}\subseteq W$ such that $\{W_{y}\mid y\in W_{0}\}$ covers $K$. But then $\bigcap_{y\in W_{0}} U_{y}$ is an open neighborhood of $x$ which is disjoint from $V := \bigcup_{y\in W_{0}} W_{y}$, hence from $K$. $V$ is the open neighborhood of $K$ we are looking for. This establishes the first part, the second follows as an immediate consequence. \end{proof} Look at the reals as an illustrative example. \BeginCorollary{compact-subset-real} $A\subseteq\mathbb{R}$ is compact iff it is closed and bounded. \end{corollary} \begin{proof} If $A \subseteq \mathbb{R}$ is compact, then it is closed by Lemma~\ref{compact-is-closed}, since $\mathbb{R}$ is a Hausdorff space. Since $A$ is compact, it is also bounded. If, conversely, $A\subseteq\mathbb{R}$ is closed and bounded, then we can find a closed interval $[a, b]$ such that $A\subseteq [a, b]$. We know from the Heine-Borel Theorem~\SetCite{Theorem 1.88} that this interval is compact, and a closed subset of a compact space is compact by Lemma~\ref{closed-is-compact}. \end{proof} This has yet another, frequently used consequence, viz., that a continuous real valued function on a compact space assume its minimal and its maximal value. Just for the record: \BeginCorollary{compact-yields-extrema} Let $X$ be a compact Hausdorff space, $f: X\to \mathbb{R}$ a continuous map. Then there exist $x_{*}, x^{*}\in X$ with $f(x_{*}) = \min\Bild{f}{X}$ and $f(x^{*}) = \max \Bild{f}{X}$. \QED \end{corollary} But ---~after travelling an interesting side path~--- let us return to the problem of establishing that a compact Hausdorff space is normal. We know now that we can separate a point from a compact subset through disjoint open neighborhoods. This is but a small step from establishing the solution to the above problem. \BeginProposition{compact-is-normal} A compact Hausdorff space is normal. \end{proposition} \begin{proof} Let $X$ be compact, $A$ and $B$ disjoint closed subsets. Since $X$ is Hausdorff, $A$ and $B$ are compact as well. Now the rest is an easy application of Lemma~\ref{compact-is-closed}. Given $x\in B$, there exist disjoint open neighborhoods $U_{x}\in\ensuremath{{\mathfrak U}}(x)$ of $x$ and $V_{x}$ of $A$. Let $B_{0}$ be a finite subset of $B$ such that $U := \bigcup\{U(x)\mid b\in B_{0}\}$ covers $B$ and $V := \bigcap\{V_{x}\mid x\in B_{0}\}$ is an open neighborhood of $A$. $U$ and $V$ are disjoint. \end{proof} From the point of view of separation, to be compact is for a topological space a stronger property than being normal. The example $\mathbb{R}$ shows that this is a strictly stronger property. We will show now that $\mathbb{R}$ is just one point apart from being compact by investigating locally compact spaces. \subsection{Local Compactness and Compactification} \label{sec:local-comp-comp} We restrict ourselves in this section to Hausdorff spaces. Sometimes a space is not compact but has enough compact subsets, because each point has a compact neighborhood. These spaces are called locally compact, and we investigate properties they share with and properties they distinguish them from compact spaces. We show also that a locally compact space misses being compact by just one point. Adding this point will make it compact, so we have an example here where we embed a space into one with a desired property. While we are compactifying spaces, we also provide another one, named after Stone and $\check{\mathrm{C}}$ech, which requires the basic space to be completely regular. We establish also another classic, the Baire Theorem, which states that in a locally compact $T_{3}$ space the intersection of a countable number of open dense sets is dense again; applications will later on capitalize on this observation. \BeginDefinition{local-compact} Let $X$ be a Hausdorff space. $X$ is called \emph{\index{space!locally compact}\index{compact!locally compact}locally compact} iff for each $x\in X$ and each open neighborhood $U\in \ensuremath{{\mathfrak U}}(x)$ there exists a neighborhood $V\in\ensuremath{{\mathfrak U}}(x)$ such that $\Closure{V}$ is compact and $\Closure{V}\subseteq U$. \end{definition} Thus the compact neighborhoods form a basis for the neighborhood filter for each point. This implies that we can find for each compact subset an open neighborhood with compact closure. The proof of this property gives an indication of how to argue in locally compact spaces. \BeginProposition{compact-closure-for-open} Let $X$ be a locally compact space, $K$ a compact subset. Then there exists an open neighborhood $U$ of $K$ and a compact set $K'$ with $K\subseteq U\subseteq K'$. \end{proposition} \begin{proof} Let $x\in K$, then we find an open neighborhood $U_{x}\in\ensuremath{{\mathfrak U}}(x)$ with $\Closure{U}_{x}$ compact. Then $(U_{x})_{x\in K}$ is a cover for $K$, and there exists a finite subset $K_{0}\subseteq K$ such that $(U_{x})_{x\in K_{0}}$ covers $K$. Put $U := \bigcup_{x\in K_{0}}$, and note that this open set has a compact closure. \end{proof} So this is not too bad: We have plenty of compact sets in a locally compact space. Such a space is very nearly compact. We add to $X$ just one point, traditionally called $\infty$ and define the neighborhood for $\infty$ in such a way that the resulting space is compact. The obvious way to do that is to make all complements of compact sets a neighborhood of $\infty$, because it will then be fairly easy to construct from a cover of the new space a finite subcover. This is what the compactification which we discuss now will do for you. We carry out the construction in a sequence of lemmas, just in order to render the process a bit more transparent. \BeginLemma{l1-alexandrov-one-point} Let $X$ be a Hausdorff space with topology $\tau$, $\infty\not\in X$ be a distinguished new point. Put $X^{*} := X\cup\{\infty\}$, and define\MMP{One point extension} \begin{equation*} \tau^{*} := \{U\subseteq X^{*}\mid U\cap X\in\tau\}\cup\{U\subseteq X^{*}\mid \infty\in U, X\setminus U\text{ is compact}\}. \end{equation*} Then $\tau^{*}$ is a topology on $X^{*}$, and the identity $i_{X}: X\to X^{*}$ is $\tau$-$\tau^{*}$-continuous. \end{lemma} \begin{proof} $\emptyset$ and $X^{*}$ are obviously members of $\tau^{*}$; note that $X\setminus U$ being compact entails $U\cap X$ being open. Let $U_{1}, U_{2}\in\tau^{*}$. If $\infty\in U_{1}\cap U_{2}$, then $X\setminus (U_{1}\cap U_{2})$ is the union of two compact sets in $X$, hence is compact, If $\infty\not\in U_{1}\cap U_{2}$, $X\cap (U_{1}\cap U_{2})$ is open in $X$. Thus $\tau^{*}$ is closed under finite intersections. Let $(U_{i})_{i\in I}$ be a family of elements of $\tau^{*}$. The critical case is that $\infty\in U := \bigcup_{i\in I}U_{i}$, say, $\infty\in U_{j}$. But then $X\setminus U \subseteq X\subseteq U_{j}$, which is compact, so that $U\in\tau^{*}$. Continuity of $i_{X}$ is now immediate. \end{proof} We find $X$ in this new construction as a subspace. \BeginCorollary{x-is-a-subspace} $(X, \tau)$ is a dense subspace of $(X^{*}, \tau^{*})$. \end{corollary} \begin{proof} We have to show that $\tau =\tau^{*}\cap X$. But this is obvious from the definition of $\tau^{*}$. \end{proof} Now we can state and prove the result which has been announced above. \BeginTheorem{alexandrov-one-point} Given a Hausdorff space $X$, the one point extension $X^{*}$ is a compact space, in which $X$ is dense. If $X$ is locally compact, $X^{*}$ is a Hausdorff space. \end{theorem} \begin{proof} It remains to show that $X^{*}$ is compact, and that it is a Hausdorff space, whenever $X$ is locally compact. Let $(U_{i})_{i\in I}$ be an open cover of $X^{*}$, then $\infty\in U_{j}$ for some $j\in I$, thus $X\setminus U_{j}$ is compact and is covered by $(U_{i})_{i\in I, i\not= j}$. Select an finite subset $J\subseteq I$ such that $(U_{i})_{i\in J}$ covers $X\setminus U_{j}$, then ---~voilà~--- we have found a finite cover $(U_{i})_{i\in J\cup\{j\}}$ of $X^{*}$. Since the given space is Hausdorff, we have to separate the new point $\infty$ from a given point $x\in X$, provided $X$ is locally compact. But take a compact neighborhood $U$ of $x$, then $X^{*}\setminus U$ is an open neighborhood of $\infty$. \end{proof} $X^{*}$ is called the \emph{\index{compactification!Alexandrov one point}Alexandrov one point compactification} of $X$. The new point is sometimes called the \emph{infinite point}. It is not difficult to show that two different one point compactifications are homeomorphic, so we may talk about \emph{the} (rather than \emph{a}) one-point compactification. Looking at the map $i_{X}: X\to X^{*}$, which permits looking at elements of $X$ as elements of $X^{*}$, we see that $i_{X}$ is injective and has the property that $\Bild{i_{X}}{G}$ is an open set in the image $\Bild{i_{X}}{X}$ of $X$ in $X^{*}$, whenever $G\subseteq X$ is open. These properties will be used for characterizing compactifications. Let us first define embeddings, which are of interest independently of compactifications. \BeginDefinition{embedding-map} The continuous map $f: X\to Y$ between the topological spaces $X$ and $Y$ is called an \emph{\index{embedding}embedding} iff \begin{itemize} \item $f$ is injective, \item $\Bild{f}{G}$ is open in $\Bild{f}{X}$, whenever $G\subseteq X$ is open. \end{itemize} \end{definition} So if $f: X\to Y$ is an embedding, we may recover a true image of $X$ from its image $\Bild{f}{X}$, so that $f: X\to \Bild{f}{X}$ is a homeomorphism. Let us have a look at the map $[0, 1]^{N}\to [0, 1]^{M}$, which is induced by a map $f: M\to N$ for sets $M$ and $N$, and which we delt with in Lemma~\ref{into-unitcube}. We will put this map to good use in a moment, so it is helpful to analyze it a bit more closely. \BeginExample{onto-cube-is-embedding} Let $f: M\to N$ be a surjective map. Then $f^{*}: [0, 1]^{N}\to [0, 1]^{M}$, which sends $g: N\to [0, 1]$ to $g\circ f: M\to [0, 1]$ is an embedding. We have to show that $f^{*}$ is injective, and that it maps open sets into open sets in the image. This is done in two steps: \begin{description} \item[$f^{*}$ is injective:] In fact, if $g_{1}\not= g_{2}$, we find $n\in N$ with $g_{1}(n)\not=g_{2}(n)$, and because $f$ is onto, we find $m$ with $n = f(m)$, hence $f^{*}(g_{1})(m) = g_{1}(f(m)) \not=g_{2}(f(m)) = f^{*}(g_{2})(m)$. Thus $f^{*}(g_{1})\not=f(g_{2})$ (an alternative and more general proof is proposed in~\CategCite{Proposition 1.23}). \item[Open sets are mapped to open sets:] We know already from Lemma~\ref{into-unitcube} that $f^{*}$ is continuous, so we have to show that the image $\Bild{f}{G}$ of an open set $G\subseteq [0, 1]^{N}$ is open in the subspace $\Bild{f}{[0, 1]^{M}}$. Let $h\in \Bild{f}{G}$, hence $h = f^{*}(g)$ for some $g\in G$. $G$ is open, thus we can find a subbase element $H$ of the product topology with $g\in H\subseteq G$, say, $H = \bigcap_{i=1}^{k}\InvBild{\pi_{N, n_{i}}}{H_{i}}$ for some $n_{1}, \ldots, n_{k}\in N$ and some open subsets $H_{1}, \ldots, H_{k}$ in $[0, 1]$. Since $f$ is onto, $n_{1} = f(m_{1}), \ldots, n_{k} = f(m_{k})$ for some $m_{1}, \ldots, m_{k}\in M$. Since $h\in \InvBild{\pi_{N, n_{i}}}{H_{i}}$ iff $f^{*}(h)\in\InvBild{\pi_{M, m_{i}}}{H_{i}}$, we obtain \begin{equation*} h = f^{*}(g) \in \Bild{f^{*}}{\bigcap_{i=1}^{k}\InvBild{\pi_{N, n_{i}}}{H_{i}}} = \bigl(\bigcap_{i=1}^{k}\InvBild{\pi_{M, m_{i}}}{H_{i}}\bigr)\cap\Bild{f^{*}}{[0, 1]^{N}} \end{equation*} The latter set is open in the image of $[0, 1]^{N}$ under $f^{*}$, so we have shown that the image of an open set is open relative to the subset topology of the image. \end{description} These proofs will serve as patterns later on. {\Large\ding{44}} \end{example} Given an embedding, we define the compactification of a space. \BeginDefinition{compfact-space} A pair $(e, Y)$ is said to be a \emph{\index{topology!compactification}\index{compactification}compactification} of a topological space $X$ iff $Y$ is a compact topological space, and if $e: X\to Y$ is an embedding. \end{definition} The pair $(i_{X}, X^{*})$ constructed as the Alexandrov one-point compactification is a compactification in the sense of Definition~\ref{compfact-space}, provided the space $X$ is locally compact. We are about to construct another important compactification for a completely regular space $X$. Define for $X$ the space $\beta X$ as follows\footnote{It is a bit unfortunate that there appears to be an ambiguity in notation, since we denote the basis of a topological space by $\beta$ as well. But tradition demands this compactification to be called $\beta X$, and from the context it should be clear what we have in mind.}: Let $F(X)$ be all continuous maps $X\to [0, 1]$, and map $x$ to its evaluations from $F(X)$, so construct $e_{X}: X\ni x\mapsto (f(x))_{f\in F(X)}\in[0, 1]^{F(X)}$. Then $\beta X := \Closure{(\Bild{e_{X}}{X})}$, the closure being taken in the compact space $[0, 1]^{F(X)}$. We claim that $(e_{X}, \beta X)$ is a compactification of $X$. Before delving into the proof, we note that we want to have a completely regular space, since there we have enough continuous functions, e.g., to separate points, as will become clear shortly. We will first show that this is a compactification indeed, and then investigate an interesting property of it. \BeginProposition{beta-x-is-a-compactification} $(e_{X}, \beta X)$ is a compactification of the completely regular space $X$. \end{proposition} \begin{proof} 1. We take the closure in the Hausdorff space $[0, 1]^{F(X)}$, which is compact by Tihonov's Theorem~\ref{tihonov}. Hence $\beta X$ is a compact Hausdorff space by Lemma~\ref{closed-is-compact}. 2. $e_{X}$ is continuous, because we have $\pi_{f}\circ e_{X} = f$ for $f\in F(X)$, and each $f$ is continuous. $e_{X}$ is also injective, because we can find for $x\not=x'$ a map $f\in F(X)$ such that $f(x) \not= f(x')$; this translates into $e_{X}(x) (f) \not= e_{X}(x')(f)$, hence $e_{X}(x) \not= e_{X}(x')$. 3. The image of an open set in $X$ is open in the image. In fact, let $G\subseteq X$ be open, and take $x\in G$. Since $X$ is completely regular, we find $f\in F(X)$ and an open set $U\subseteq[0, 1]$ with $x\in \InvBild{f}{U}\subseteq G$; this is so because the inverse images of the open sets in $[0, 1]$ under continuous functions form a basis for the topology (Proposition~\ref{char-t_3half}). But $x\in \InvBild{f}{U}\subseteq G$ is equivalent to $x\in \InvBild{(\pi_{f}\circ e_{X})}{U}\subseteq G$. Because $e_{X}: X\to \Bild{e_{X}}{X}$ is a bijection, this implies $x\in\InvBild{\pi_{f}}{U}\subseteq \Bild{e_{X}}{G}\cap \Bild{e_{X}}{X}\subseteq \Bild{e_{X}}{G}\cap\Closure{(\Bild{e_{X}}{X})}$. Hence $\Bild{e_{X}}{G}$ is open in $\beta X$. \end{proof} If the space we started from is already compact, then we obtain nothing new: \BeginCorollary{compact-homeom-to-beta} If $X$ is a compact Hausdorff space, $e_{X}: X\to \beta X$ is a homeomorphism. \end{corollary} \begin{proof} A compact Hausdorff space is normal, hence completely regular by Proposition~\ref{compact-is-normal} and Corollary~\ref{normal-arecompletely-reg}, so we can construct the space $\beta X$ for $X$ compact. The assertion then follows from Exercise~\ref{ex-compact-homeom}. \end{proof} This kind of compactification is important, so it deserves a distinguishing name. \BeginDefinition{stone-cech} The compactification $(e_{X}, \beta X)$ is called the \emph{Stone-$\check{\mathrm{C}}$ech \index{compactification!Stone-$\check{\mathrm{C}}$ech}compactification} of the regular space $X$. \end{definition} This compactification permits the extension of continuous maps in the following sense: suppose that $f: X\to Y$ is continuous with $Y$ compact, then there exists a continuous extension $\beta X\to Y$. This statement is slightly imprecise, because $f$ is not defined on $\beta X$, so we want really to extend $f\circ e_{X}^{-1}: \Bild{e_{X}}{X}\to Y$ ~---~since $e_{X}$ is a homeomorphism from $X$ onto its image, one tends to identify both spaces. \BeginTheorem{extension-of-stone-cech} Let $(e_{X}, \beta X)$ be the Stoch-$\check{\mathrm{C}}$ech compactification of the completely regular space $X$. Then, given a continuous map $f: X\to Y$ with $Y$ compact, there exists a continuous extension $f_{!}: \beta X\to Y$ to $f\circ e_{X}^{-1}$. \end{theorem} The idea of the proof is to capitalize on the compactness of the target space $Y$, because $Y$ and $\beta Y$ are homeomorphic. This means that $Y$ has a topologically identical copy in $[0, 1]^{F(Y)}$, which may be used in a suitable fashion. The proof is adapted from~\cite[p. 153]{Kelley}; Kelley calls it a ``mildly intricate calculation''. \begin{proof} 1. Define $\varphi_{f}: F(Y)\to F(X)$ through $h\mapsto f\circ h$, then this map induces a map $\varphi^{*}_{f}: [0, 1]^{F(X)}\to [0, 1]^{F(Y)}$ by sending $t: F(X)\to [0, 1]$ to $t\circ \varphi_{f}$. Then $\varphi^{*}_{f}$ is continuous according to Lemma~\ref{into-unitcube}. 2. Consider this diagram \begin{equation*} \xymatrix{ \Bild{e_{X}}{X}\ar[rr]^{\subseteq}&&[0, 1]^{F(X)}\ar[rr]^{\varphi^{*}_{f}}&&[0, 1]^{F(Y)}&& \beta Y\ar[ll]_{\supseteq}\\ X\ar[u]_{e_{X}} \ar[rrrrrr]_{f}&&&&&&Y\ar[u]^{e_{Y}} } \end{equation*} We claim that $\varphi^{*}_{f}\circ e_{X} = e_{Y}\circ f$. In fact, take $x\in X$ and $h\in F(Y)$, then \begin{align*} (\varphi_{f}^{*}\circ e_{X})(x)(h) & = (e_{X}\circ \varphi_{f})(h) \\ & = e_{X}(x)(h\circ f) \\ & = (h\circ f)(x) \\ & = e_{Y}(f(x))(h)\\ & = (e_{Y}\circ f)(x)(h). \end{align*} 3. Because $Y$ is compact, $e_{Y}$ is a homeomorphism by Exercise~\ref{ex-compact-homeom}, and since $\varphi^{*}_{f}$ is continuous, we have \begin{equation*} \Bild{\varphi_{f}^{*}}{\beta X} = \Bild{\varphi_{f}^{*}}{\Closure{\Bild{e_{X}}{X}}} \subseteq \Closure{\bigl(\Bild{\varphi_{f}^{*}}{\Bild{e_{X}}{X}}\bigr)} \subseteq \beta Y. \end{equation*} Thus $e_{X}^{-1}\circ \varphi_{f}^{*}$ is a continuous extension to $f\circ e_{X}$. \end{proof} It is immediate from Theorem~\ref{extension-of-stone-cech} that a Stone-$\check{\mathrm{C}}$ech compactification is uniquely determined, up to homeomorphism. This justifies the probably a bit prematurely used characterization as \emph{the} Stone-$\check{\mathrm{C}}$ech compactification above. Baire's Theorem, which we will establish now, states a property of locally compact spaces which has a surprising range of applications~---~it states that the intersection of dense open sets in a locally compact $T_{3}$-space is dense again. This applies of course to compact Hausdorf spaces as well. The theorem has a counterpart for complete pseudometric spaces, as we will see below. For stating and proving the theorem we lift the assumption of working in a Hausdorff space, because it is really not necessary here. \BeginTheorem{baire-locally-compact} Let $X$ be a locally compact $T_{3}$-space. Then the intersection of dense open sets is dense.\index{theorem!Baire!locally compact} \end{theorem} \begin{proof} Let $\Folge{D}$ be a sequence of dense open sets. Fix a non-empty open set $G$, then we have to show that $G\cap\bigcap_{n\in\mathbb{N}}D_{n}\not=\emptyset$. Now $D_{1}$ is dense and open, hence we find an open set $V_{1}$ such that $\Closure{V}_{1}$ is compact and $\Closure{V}_{1}\subseteq D_{1}\cap G$ by Proposition~\ref{char-t3-ngbh}, since $X$ is a $T_{3}$-space. We select inductively in this way a sequence of open sets $\Folge{V}$ with compact closure such that $\Closure{V}_{n+1}\subseteq D_{n}\cap V_{n}$. This is possible since $D_{n}$ is open and dense for each $n\in\mathbb{N}$. Hence we have a decreasing sequence $\Closure{V}_{2} \supseteq \ldots \Closure{V}_{n} \supseteq \ldots$ of closed sets in the compact set $\Closure{V}_{1}$, thus $\bigcap_{n\in\mathbb{N}}\Closure{V}_{n} = \bigcap_{n\in\mathbb{N}}V_{n}$ is not empty, which entails $G\cap\bigcap_{n\in\mathbb{N}}D_{n}$ not being empty. \end{proof} Just for the record: \BeginCorollary{baire-compact} The intersection of a sequence of dense open sets in a compact Hausdorff space is dense. \end{corollary} \begin{proof} A compact Hausdorff space is normal by Proposition~\ref{compact-is-normal}, hence regular by Proposition~\ref{char-t4-ngbh}, thus the assertion follows from Theorem~\ref{baire-locally-compact}. \end{proof} We give an example from Boolean algebras. \BeginExample{bool-alg-dense} Let $B$ be a Boolean algebra with $\wp_{B}$ as the set of all prime ideals. Let $X_{a} := \{I\in \wp_{B}\mid a\not\in I\}$ be all prime ideals which do not contain a given element $a\in B$, then $\{X_{a}\mid a\in B\}$ is the basis for a compact Hausdorff topology on $\wp_{B}$, and $a\mapsto X_{a}$ is a Boolean algebra isomorphism, see~\SetCite{Example 1.98}. Assume that we have a countable family $S$ of elements of $B$ with $a = \sup\ S\in B$, then we say that the prime ideal $I$ \emph{preserves the supremum} of $S$ iff $\Klasse{a}{\sim_{I}} = \sup_{s\in S}\ \Klasse{s}{\sim_{I}}$ holds. Here $\sim_{I}$ is the equivalence relation induced by $I$, i.e., $\isEquiv{b}{b'}{\sim_{I}} \Leftrightarrow b\varnothingminus b'\in I$ with $\varnothingminus$ as the symmetric difference in $B$ (\SetCite{Sect. 1.5.7}). We claim that the set $R$ of all prime ideals, which do \emph{not} preserve the supremum of this family, is closed and has an empty interior. Well, $R = X_{a}\setminus \bigcup_{k\in K}X_{a_{k}}$. Because the sets $X_{a}$ and $X_{a_{k}}$ are clopen, $R$ is closed. Assume that the interior of $R$ is not empty, then we find $b\in B$ with $X_{b}\subseteq R$, so that $X_{a_{k}}\subseteq X_{a}\setminus X_{b} = X_{a\wedge-b}$ for all $k\in K$. Since $a\mapsto X_{a}$ is an isomorphism, this means $a_{k}\leq a\wedge-b$, hence $\sup_{k\in K}\ a_{k}\leq a\wedge-b$ for all $k\in K$, thus $a = a\wedge -b$, hence $a\leq -b$. But then $X_{b} \subseteq X_{a}\subseteq X_{-b}$, which is certainly a contradiction. Consequently, the set of all prime ideal preserving this particular supremum is open and dense in $\wp_{B}$. If we are given for each $n\in\mathbb{N}$ a family $S_{n}\subseteq B$ and $a_{0}\in B$ such that \begin{itemize} \item $a_{0}\not=\top$, the maximal element of $B$, \item $a_{n} := \sup_{s\in S_{n}} s$ is an element of $B$ for each $n\in\mathbb{N}$. \end{itemize} Then we claim that there exists a prime ideal $I$ which contains $a_{0}$ and which preserves all the suprema of $S_{n}$ for $n\in\mathbb{N}$. Let $P$ be the set of all prime ideals which preserve all the suprema of the families above, then \begin{equation*} P = \bigcap_{n\in\mathbb{N}}P_{n}, \end{equation*} where $P_{n}$ is the set of all prime ideals which preserve the supremum $a_{n}$, which is dense and open by the discussion above. Hence $P$ is dense by Baire's Theorem (Corollary~\ref{baire-compact}). Since $X_{-a_{0}} = \wp_{B}\setminus X_{a_{0}}$ is open and not empty, we infer that $P\cap X_{-a_{0}}$ is not empty, because $P$ is dense. Thus we can select an arbitrary prime ideal from this set. {\Large\ding{44}} \end{example} This example, which is taken from~\cite[Sect. 5]{Rasiowa-Sikorski-I}, will help in establishing Gödel's Completeness Theorem, see Section~\ref{sec:goedel}. The approach is typical for an application of Baire's Theorem~---~it is used to show that a set $P$, which is obtained from an intersection of countably many open and dense sets in a compact space, is dense, and that the object of one's desire is a member of $P$ intersecting an open set, hence this object must exist. Having been carried away by Baire's Theorem, let us make some general remarks. We have seen that local compactness is a somewhat weaker property than compactness. Other notions of compactness have been studied; an incomplete list for Hausdorff space $X$ includes \begin{description} \item[countably compact:] $X$ is called \emph{\index{compact!countably compact}countably compact} iff each countable open cover contains a finite subcover. \item[Lindelöf space:] $X$ is a \emph{\index{compact!Lindelöf space}Lindelöf space} iff each open cover contains a countable subcover. \item[paracompactness:] $X$ is said to be \emph{\index{compact!paracompact}paracompact} iff each open cover has a locally finite refinement. This explains it: \begin{itemize} \item An open cover ${\mathcal B}$ is a \emph{\index{compact!paracompact!refinement}refinement} of an open cover ${\mathcal A}$ iff each member of ${\mathcal B}$ is the subset of a member of ${\mathcal A}$. \item An open cover ${\mathcal A}$ is called \emph{\index{compact!paracompact!locally finite}locally finite} iff each point has a neighborhood which intersects a finite number of elements of ${\mathcal A}$. \end{itemize} \item[sequentially compact:] $X$ is called \emph{\index{compact!sequentially compact}sequentially compact} iff each sequence has a convergent subsequence (we will deal with this when discussing compact pseudometric spaces, see Proposition~\ref{seq-comp-equiv-comp}). \end{description} The reader is referred to~\cite[Chapter 3]{Engelking} for a penetrating study. \subsection{Pseudometric and Metric Spaces} \label{sec:metric-and-pseudometric} We turn to a class of spaces now in which we can determine the distance between any two points. This gives rise to a topology, declaring a set as open iff we can construct for each of its points an open ball which is entirely contained in this set. It is clear that this defines a topology, and it is also clear that having such a metric gives the space some special properties, which are not shared by general topological spaces. It also adds a sense of visual clearness, since an open ball is conceptually easier to visualize that an abstract open set. We will study the topological properties of these spaces now, starting with pseudometrics, with which we may measure the distance between two objects, but if the distance is zero, we cannot necessarily conclude that the objects are identical. This is a situation which occurs quite frequently when modelling an application, so it is sometimes more adequate to deal with pseudometric rather than metric spaces. \BeginDefinition{def-pseudometric} A map $d: X\times X\to \mathbb{R}_{+}$ is called a \emph{\index{pseudometric}pseudometric on $X$} iff these conditions hold \begin{description} \item[identity:] $d(x, x) = 0$ for all $x\in X$. \item[symmetry:] $d(x, y) = d(y, x)$ for all $x, y\in X$, \item[triangle inequality:] $d(x, y) \leq d(x, z) + d(z, y)$ for all $x, y, z\in X$. \end{description} Then $(X, d)$ is called a \emph{\index{space!pseudometric}pseudometric space}. If, in addition, we have \begin{equation*} d(x, y) = 0 \Leftrightarrow x = y, \end{equation*} then $d$ is called a \emph{metric on $X$}; accordingly, $(X, d)$ is called a \emph{\index{space!metric}metric space}. \end{definition} The non-negative real number $d(x, y)$ is called the distance of the elements $x$ and $y$ in a pseudometric space $(X, d)$. It is clear that one wants to have that each point does have distance $0$ to itself, and that the distance between two points is determined in a symmetric fashion. The triangle inequality is intuitively clear as well: \begin{equation*} \xymatrix{ x\ar[drr]\ar[rrrr] &&&& y\\ &&z\ar[rru] } \end{equation*} Before proceeding, let us have a look at some examples. Some of them will be discussed later on in greater detail. \BeginExample{for-metric-spaces} \begin{enumerate} \item Define for $x, y\in \mathbb{R}$ the distance as $|x - y|$, hence as the absolute value of their difference. Then this defines a metric. Define, similarly, \begin{equation*} d(x, y) := \frac{|x-y|}{1+|x-y|}, \end{equation*} then $d$ defines also a metric on $\mathbb{R}$ (the triangle inequality follows from the observation that $a\leq b \Leftrightarrow a/(1+a)\leq b/(1+b)$ holds for non-negative numbers $a$ and $b$). \item Given $x, y\in \mathbb{R}^{n}$ for $n\in \mathbb{N}$, then \begin{align*} d_{1}(x, y) & := \max_{1\leq i \leq n}|x_{i}-y_{i}|,\\ d_{2}(x, y) & := \sum_{i=1}^{n}|x_{i}-y_{i}|,\\ d_{3}(x, y) & := \sqrt{\sum_{i=1}^{n}(x_{i}-y_{i})^{2}} \end{align*} define all metrics an $\mathbb{R}^{n}$. Metric $d_{1}$ measures the maximal distance between the components, $d_{2}$ gives the sum of the distances, and $d_{3}$ yields the Euclidean, i.e., the geometric, distance of the given points. The crucial property to be established is in each case the triangle inequality. It follows for $d_{1}$ and $d_{2}$ from the triangle inequality for the absolute value, and for $d_{3}$ by direct computation. \item Given a set $X$, define \begin{equation*} d(x, y) := \begin{cases} 0, & \text{ if } x = y\\ 1, & \text{ otherwise} \end{cases} \end{equation*} Then $(X, d)$ is a metric space, $d$ is called the \emph{\index{metric!discrete}discrete metric}. Different points are assigned the distance $1$, while each point has distance $0$ to itself. \item Let $X$ be a set, ${\mathcal B}(X)$ be the set of all bounded maps $X\to \mathbb{R}$. Define \begin{equation*} d(f, g) := \sup_{x\in X}|f(x) - g(x)|. \end{equation*} Then $({\mathcal B}(X), d)$ is a metric space; the distance between functions $f$ and $g$ is just their maximal difference. \item Similarly, given a set $X$, take a set ${\mathcal E}\subseteq{\mathcal B}(X)$ of bounded real valued functions as a set of evaluations and determine the distance of two points in terms of their evaluations: \begin{equation*} e(x, y) := \sup_{f\in {\mathcal F}}|f(x) - f(y)|. \end{equation*} So two points are similar if their evaluations on terms of all elements of ${\mathcal F}$ are close. This is a pseudometric on $X$, which is not a metric if ${\mathcal F}$ does not separate points. \item Denote by $\Cont[[0, 1]]$ the set of all continuous real valued functions $[0, 1]\to \mathbb{R}$, and measure the distance between $f, g\in \Cont[[0, 1]]$ through \begin{equation*} d(f, g) := \sup_{0\leq x \leq 1}\ |f(x) - g(x)|. \end{equation*} Because a continuous function on a compact space is bounded, $d(f, g)$ is always finite, and since for each $x\in[0, 1]$ the inequality $|f(x) -g(x)| \leq |f(x) - h(x)| + |h(x)-g(x)|$ holds, the triangle inequality is satisfied. Then $(\Cont[[0, 1]], d)$ is a metric space, because $\Cont[[0, 1]]$ separates points. \item Define for the Borel sets $\Borel{[0, 1]}$ on the unit interval this distance: \begin{equation*} d(A, B) := \lambda(A\Delta B) \end{equation*} with $\lambda$ as Lebesgue measure. Then $\lambda(A\Delta B) = \lambda((A\Delta C)\Delta(C\Delta B) \leq \lambda(A\Delta C) + \lambda(C\Delta B)$ implies the triangle inequality, so that $(\Borel{[0, 1]}, d)$ is a pseudometric space. It is no metric space, however, because $\lambda(\mathbb{Q}\cap[0, 1]) = 0$, hence $d(\emptyset, \mathbb{Q}\cap[0, 1]) = 0$, but the latter set is not empty. \item Given a non-empty set $X$ and a ranking function $r: X\to \mathbb{N}$, define the closeness $c(A, B)$ of two subset $A, B$ of $X$ as \begin{equation*} c(A, B) := \begin{cases} +\infty, & \text{ if } A = B,\\ \inf\ \{r(w)\mid w\in A\Delta B\}, & \text{ otherwise} \end{cases} \end{equation*} If $w\in A\Delta B$, then $w$ can be interpreted as a witness that $A$ and $B$ are different, and the closeness of $A$ and $B$ is just the minimal rank of a witness. We observe these properties: \begin{itemize} \item $c(A, A) = +\infty$, and $c(A, B) = 0$ iff $A = B$ (because $A = B$ iff $A\Delta B =\emptyset$). \item $c(A, B) = c(B, A)$, \item $c(A, C) \geq \min\ \{c(A, B), c(B, C)\}$. If $A = C$, this is obvious; assume otherwise that $b\in A\Delta C$ is a witness of minimal rank. Since $A\Delta C = (A\Delta B)\Delta (B\Delta C)$, $b$ must be either in $A\Delta B$ or $B\Delta C$, so that $r(b) \geq c(A, C)$ or $r(b) \geq c(B, C)$. \end{itemize} Now put $d(A, B) := 2^{-c(A, B)}$ (with $2^{-\infty} := 0$). Then $d$ is a metric on $\PowerSet{X}$. This metric satisfies even $d(A, B) \leq \max\ \{d(A, C), d(B, C)\}$ for an arbitrary $C$, hence $d$ is an \emph{\index{metric!ultrametric}\index{ultrametric}ultrametric}. \item A similar construction is possible with a decreasing sequence of equivalence relation on a set $X$. In fact, let $(\rho_{n})_{n\in\mathbb{N}}$ be such a sequence, and put $\rho_{0} := X\times X$. Define \begin{equation*} c(x, y) := \begin{cases} +\infty, &\text{ if }\langle x, y\rangle\in\bigcap_{n\in\mathbb{N}}\rho_{n}\\ \max\ \{n\in\mathbb{N}\mid \langle x, y\rangle\in\rho_{n}\}, & \text{ otherwise} \end{cases} \end{equation*} Then it is immediate that $c(x, y) \geq \min\ \{c(x, z), c(z, y)\}$. Intuitively, $c(x, y)$ gives the degree of similarity of $x$ and $y$ --- the larger this value, the more similar $x$ and $y$ are. Then \begin{equation*} d(x, y) := \begin{cases} 0, & \text{ if }c(x, y) = \infty\\ 2^{-c(x, y)}, & \text{ otherwise} \end{cases} \end{equation*} defines a pseudometric. $d$ is a metric iff $\bigcap_{n\in\mathbb{N}}\rho_{n} = \{\langle x, x\rangle\mid x \in X\}$. \end{enumerate} {\Large\ding{44}} \end{example} Given a pseudometric space $(X, d)$, define for $x\in X$ and $r>0$ the \emph{\index{$B(x, r)$}open ball $B(x, r)$}\MMP{$B(x, r)$} with center $x$ and radius $r$ as \begin{equation*} B(x, r) := \{y\in X\mid d(x, y) < r\}. \end{equation*} The \emph{\index{$S(x, r)$}closed ball $S(x, r)$} is defined similarly as \begin{equation*} S(x, r) := \{ y\in X \mid d(x, y)\leq r\}. \end{equation*} If necessary, we indicate the pseudometric explicitly with $B$ and $S$. Note that $B(x, r)$ is open, and $S(x, r)$ is closed, but that the closure $\Closure{B(x, r)}$ of $B(x, r)$ may be properly contained in the closed ball $S(x, r)$ (let $d$ be the discrete metric, then $B(x, 1) = \{x\} = \Closure{B(x, 1)}$, but $S(x, 1) = X$, so both closed set do not coincide if $X$ has more than one point). Call $G\subseteq X$ open iff we can find for each $x\in G$ some $r>0$ such that $B(x, r)\subseteq G$. Then this defines the \emph{pseudometric topology} on $X$. It has the set $\beta := \{B(x, r) \mid x\in X, r > 0\}$ of open balls as a basis. Let us have a look at the properties a base is supposed to have. Assume that $x\in B(x_{1}, r_{1})\cap B(x_{2}, r_{2})$, and select $r$ with $0 < r < \min\{r_{1}-d(x, x_{1}), r_{2}-d(x, x_{2})\}$. Then $B(x, r)\subseteq B(x_{1}, r_{1})\cap B(x_{2}, r_{2})$, because we have for $z\in B(x, r)$ \begin{equation} \label{triang-equ} d(z, x_{1}) \leq d(z, x) + (x, x_{1}) < r + d(x, x_{1}) \leq (r_{1}-d(x, x_{1})) + d(x, x_{1}) = r_{1}, \end{equation} by the triangle inequality; similarly, $d(x, x_{2}) < r_{2}$. Thus it follows from Proposition~\ref{when-is-a-base} that $\beta$ is in fact a base. Call two pseudometrics on $X$ \emph{\index{pseudometrics!equivalent}equivalent} iff they generate the same topology. An equivalent formulation goes like this. Let $\tau_{i}$ be the topologies generated from pseudometrics $d_{i}$ for $i = 1, 2$, then $d_{1}$ and $d_{2}$ are equivalent iff the identity $(X, \tau_{1})\to (X, \tau_{2})$ is a homeomorphism. These are two common methods to construct equivalent pseudometrics. \BeginLemma{gen-equiv-pseudo-metrics} Let $(X, d)$ be a pseudometric space. Then \begin{align*} d_{1}(x, y) & := \max\{d(x, y), 1\},\\ d_{2}(x, y) & := \frac{d(x, y)}{1 + d(x, y)} \end{align*} both define pseudometrics which are equivalent to $d$. \end{lemma} \begin{proof} It is clear that both $d_{1}$ and $d_{2}$ are pseudometrics (for $d_{2}$, compare Example~\ref{for-metric-spaces}). Let $\tau, \tau_{1}, \tau_{2}$ be the respective topologies, then it is immediate that $(X, \tau)$ and $(X, \tau_{1})$ are homeomorphic. Since $d_{2}(x, y) < r$ iff $d(x, y) < r/(1-r)$, provided $0 < r < 1$, we obtain also that $(X, \tau)$ and $(X, \tau_{2})$ are homeomorphic. \end{proof} These pseudometrics have the advantage that they are bounded, which is sometimes quite practical for establishing topological properties. Just as a point in case: \BeginProposition{countable-product-is pseudo} Let $(X_{n}, d_{n})$ be a pseudometric space with associated topology $\tau_{n}$. Then the topological product $\prod_{n\in\mathbb{N}}(X_{n}, \tau_{n})$ is a pseudometric space again. \end{proposition} \begin{proof} 1. We may assume that each $d_{n}$ is bounded by $1$, otherwise we select an equivalent pseudometric with this property (Lemma~\ref{gen-equiv-pseudo-metrics}). Put \begin{equation*} d\bigl((x_{n})_{n\in\mathbb{N}}, (y_{n})_{n\in\mathbb{N}}\bigr) := \sum_{n\in\mathbb{N}}2^{-n}\cdot d_{n}(x_{n}, y_{n}). \end{equation*} We claim that the product topology is the topology induced by the pseudometric $d$ (it is obvious that $d$ is one). 2. Let $G_{i}\subseteq X_{i}$ open for $1\leq i\leq k$, and assume that $x\in G := G_{1}\times \ldots\times G_{k}\times\prod_{n>k}X_{n}$. We can find for $x_{i}\in G_{i}$ some positive $r_{i}$ with $B_{d_{i}}(x_{i}, r_{i})\subseteq G_{i}$. Put $r := \min\{r_{1}, \ldots, r_{k}\}$, then certainly $B_{d}(x, r) \subseteq G$. This implies that each element of the base for the product topology is open with respect to $d$. 3. Given the sequence $x$ and $r> 0$, take $y\in B_{d}(x, r)$. Put $t := r - d(x, y) > 0$. Select $m \in \mathbb{N}$ with $\sum_{n>m}2^{-n}< t/2$, and let $G_{n} := B_{d_{n}}(y_{n}, t/2)$ for $n\leq m$. If $z\in U := G_{1}\times \ldots\times G_{n}\times\prod_{k>m}X_{k}$, then \begin{align*} d(x, z) & \leq d(x, y) + d(y, z)\\ & \leq r - t + \sum_{n=1}^{m}2^{-n}d_{n}(y_{n}, z_{n}) + \sum_{n>m}2^{-n}\\ & < r - t + t/2 + t/2\\ & = r, \end{align*} so that $U\subseteq B_{d}(x, r)$. Thus each open ball is open in the product topology. \end{proof} One sees immediately that the pseudometric $d$ constructed above is a metric, provided each $d_{n}$ is one. Thus \BeginCorollary{countable-metric-is metric} The countable product of metric spaces is a metric space in the product topology. \QED \end{corollary} One expects that each pseudometric space can be made a metric space by identifying those elements which cannot be separated by the pseudometric. Let's try: \BeginProposition{pseudo-to-metric} Let $(X, d)$ be a pseudometric space, and define $\isEquiv{x}{y}{\sim}$ iff $d(x, y) = 0$ for $x, y\in X$. Then the factor space $\Faktor{X}{\sim}$ is a metric space with metric $D(\Klasse{x}{\sim}, \Klasse{y}{\sim}) := d(x, y)$. \end{proposition} \begin{proof} 1. Because $d(x, x') = 0$ and $d(y, y') = 0$ implies $d(x, y) = d(x', y')$, $D$ is well-defined, and it is clear that it has all the properties of a pseudometric. $D$ is also a metric, since $D(\Klasse{x}{\sim}, \Klasse{y}{\sim}) = 0$ is equivalent to $d(x, y) = 0$, hence to $\isEquiv{x}{y}{\sim}$, thus to $\Klasse{x}{\sim} = \Klasse{y}{\sim}$. 2. The metric topology is the final topology with respect to the factor map $\fMap{\sim}$. To establish this, take a map $f: \Faktor{X}{\sim}\to Z$ with a topological space $Y$. Assume that $\InvBild{(f\circ \fMap{\sim})}{G}$ is open for $G\subseteq Y$ open. If $\Klasse{x}{\sim}\in \InvBild{f}{G}$, we have $x\in\InvBild{(f\circ \fMap{\sim})}{G}$, thus there exists $r>0$ with $B_{d}(x, r)\subseteq \InvBild{\fMap{\sim}}{\InvBild{f}{G}}$. But this means that $B_{D}(\Klasse{x}{\sim}, r)\subseteq \InvBild{f}{U}$, so that the latter set is open. Thus if $f\circ \fMap{\sim}$ is continuous, $f$ is. The converse is established in the same way. This implies that the metric topology is final with respect to the factor map $\fMap{\sim}$, cp. Proposition~\ref{initial-final}. \end{proof} We want to show that a pseudometric space satisfies the $T_{4}$-axiom (hence that a metric space is normal). So we take two disjoint closed sets and need to produce two disjoint open sets, each of which containing one of the closed sets. The following construction is helpful. \BeginLemma{distance-is-continuous} Let $(X, d)$ be a pseudometric space\MMP{$d(x, A)$}. Define the distance of point $x\in X$ to $\emptyset\not=A\subseteq X$ through \begin{equation*} d(x, A) := \inf_{y\in A}d(x, y). \end{equation*} Then $d(\cdot, A)$ is continuous. \end{lemma} \begin{proof} Let $x, z\in X$, and $y\in A$, then $d(x, y) \leq d(x, z) + d(z, y)$. Now take lower bounds, then $d(x, A) \leq d(x, z) + d(z, A)$. This yields $d(x, A) - d(z, A) \leq d(x, z)$. Interchanging the r\^oles of $x$ and $z$ yields $d(z, A) - d(x, A) \leq d(z, x)$, thus $|d(x, A) - d(z, A)|\leq d(x, z)$. This implies continuity of $d(\cdot, A)$. \end{proof} Given a closed set $A\subseteq X$, we find that $A = \{x\in X\mid d(x, A) = 0\}$; we can say a bit more: \BeginCorollary{closure-through-distance} Let $X, A$ be as above, then $\Closure{A} = \{x\in X\mid d(x, A) = 0\}$. \end{corollary} \begin{proof} Since $\{x\in X\mid d(x, A) = 0\}$ is closed, we infer that $\Closure{A}$ is contained in this set. If, in the other hand, $x\not\in \Closure{A}$, we find $r>0$ such that $B(x, r)\cap A=\emptyset$, hence $d(x, A) \geq r$. Thus the other inclusion holds as well. \end{proof} Armed with this observation, we can establish now \BeginProposition{pseudo-metric-is t4} A pseudometric space $(X, d)$ is a $T_{4}$-space. \end{proposition} \begin{proof} Let $F_{1}$ and $F_{2}$ be disjoint closed subsets of $X$. Define \begin{equation*} f(x) := \frac{d(x, F_{1})}{d(x, F_{1}) + d(x, F_{2})}, \end{equation*} then Lemma~\ref{distance-is-continuous} shows that $f$ is continuous, and Corollary~\ref{closure-through-distance} indicates that the denominator will not vanish, since $F_{1}$ and $F_{2}$ are disjoint. It is immediate that $F_{1}$ is contained in the open set $\{x\mid f(x) < 1/2\}$, that $F_{2}\subseteq \{x\mid f(x) > 1/2\}$, and that these open sets are disjoint. \end{proof} Note that a pseudometric $T_{1}$-space is already a metric space (Exercise~\ref{ex-pseudo-t1-is-metric}). Define for $r>0$ the $r$-neighborhood $A^{r}$ of set $A\subseteq X$ as\MMP{$A^{r}$} \begin{equation*} A^{r} := \{x\in X\mid d(x, A) < r\}. \end{equation*} This makes of course only sense if $d(x, A)$ is finite. Using the triangle inequality, one calculates $(A^{r})^{s}\subseteq A^{r+s}$. This observation will be helpful when we look at the next example. \BeginExample{vietoris-pseudometric} Let $(X, d)$ be a pseudometric space, and let \begin{equation*} \mathfrak{C}(X) := \{C\subseteq X\mid C\text{ is compact and not empty}\} \end{equation*} be the set of all compact and not empty subsets of $X$. Define \begin{equation*} \delta_{H}(C, D) := \max\ \{\max_{x\in C}\ d(x, D), \max_{x\in D}\ d(x, C)\} \end{equation*} for $C, D\in\mathfrak{C}(X)$\MMP{$\delta_{H}$}. We claim that $\delta_{H}$ is a pseudometric on $\mathfrak{C}(X)$, which is a metric if $d$ is a metric on $X$. One notes first that \begin{equation*} \delta_{H}(C, D) = \inf\ \{r > 0 \mid C\subseteq D^{r}, D\subseteq C^{r}\}. \end{equation*} This follows easily from $C\subseteq D^{r}$ iff $\max_{x\in C}\ d(x, D) < r$. Hence we obtain that $\delta_{H}(C, D) \leq r$ and $\delta_{H}(D, E) \leq s$ together imply $\delta_{H}(C, E) \leq r + s$, which implies the triangle inequality. The other laws for a pseudometric are obvious. $\delta_{H}$ is called the \emph{\index{metric!Hausdorff}Hausdorff pseudometric}. Now assume that $d$ is a metric, and assume $\delta_{H}(C, D) = 0$. Thus $C\subseteq \bigcap_{n\in\mathbb{N}}D^{1/n}$ and $D\subseteq \bigcap_{n\in\mathbb{N}}C^{1/n}$. Because $C$ and $D$ are closed, and $d$ is a metric, we obtain $C = D$, thus $\delta_{H}$ is a metric, which is accordingly called the \emph{Hausdorff metric}. {\Large\ding{44}} \end{example} Let us take a magnifying glass and have a look at what happens locally in a point of a pseudometric space. Given $U\in\ensuremath{{\mathfrak U}}(x)$, we find an open ball $B(x, r)$ which is contained in $U$, hence we find even a rational number $q$ with $B(x, q)\subseteq B(x, r)$. But this means that the open balls with rational radii form a basis for the neighborhood filter of $x$. This is sometimes also the case in more general topological spaces, so we define this and two related properties for topological rather than pseudometric spaces. \BeginDefinition{space-separable} A topological space \begin{enumerate} \item satisfies the \emph{first axiom of countability} (and the space is called in this case \emph{\index{topology!first countable}first countable}) iff the neighborhood filter of each point has a countable base of open sets, \item satisfies the \emph{second axiom of countability} (the space is called in this case \emph{\index{topology!second countable}second countable}) iff the topology has a countable base, \item is \emph{\index{topology!separable}separable} iff it has a countable dense subset. \end{enumerate} \end{definition} The standard example for a separable topological space is of course $\mathbb{R}$, where the rational numbers $\mathbb{Q}$ form a countable dense subset. This is a trivial consequence of the observation just made. \BeginProposition{pseudo-is-first-count} A pseudometric space is first countable. \QED \end{proposition} In a pseudometric space separability and satisfying the second axiom of countability coincide, as the following observation shows. \BeginProposition{separable-iff-2ndcountable} A pseudometric space $(X, d)$ is second countable iff it has a countable dense subset. \end{proposition} \begin{proof} 1. Let $D$ be a countable dense subset, then \begin{equation*} \beta := \{B(x, r)\mid x\in D, 0<r\in\mathbb{Q}\} \end{equation*} is a countable base for the topology. For, given $U\subseteq X$ open, there exists $d\in D$ with $d\in U$, hence we can find a rational $r>0$ with $B(d, r)\subseteq U$. On the other hand, one shows exactly as in the argumentation leading to Eq.~(\ref{triang-equ}) on page~\pageref{triang-equ} that $\beta$ is a base. 2. Assume that $\beta$ is a countable base for the topology, pick from each $B\in\beta$ an element $x_{B}$. Then $\{x_{B}\mid B\in\beta\}$ is dense: given an open $U$, we find $B\in \beta$ with $B\subseteq U$, hence $x_{B}\in U$. This argument does not require $X$ being a pseudometric space (but the Axiom of Choice). \end{proof} We know from Exercise~\ref{ex-closure-filter} that a point $x$ in a topological space is in the closure of a set $A$ iff there exists a filter $\ensuremath{{\filterFont F}}$ with $i_{A}(\ensuremath{{\filterFont F}})\to x$ with $i_{A}$ as the injection $A\to X$. In a first countable space, in particular in a pseudometric space, we can work with sequences rather than filters, which is sometimes more convenient. \BeginProposition{sequences-are-enough} Let $X$ be a first countable topological space, $A\subseteq X$. Then $x\in\Closure{A}$ iff there exists a sequence $(x_{n})_{n\in\mathbb{N}}$ in $A$ with $x_{n}\to x$. \end{proposition} \begin{proof} If there exists a sequence $(x_{n})_{n\in\mathbb{N}}$ which converges to $x$ such that $x_{n}\in A$ for all $n\in\mathbb{N}$, then the corresponding filter converges to $x$, so we have to establish the converse statement. Now let $(U_{n})_{n\in\mathbb{N}}$ be the basis of the neighborhood filter of $x\in\Closure{A}$, and $\ensuremath{{\filterFont F}}$ be a filter with $i_{A}(\ensuremath{{\filterFont F}})\to x$. Put $V_{n} := U_{1}\cap\ldots\cap U_{n}$, then $V_{n}\cap A \in i_{A}(\ensuremath{{\filterFont F}})$. The sequence $(V_{n})_{n\in\mathbb{N}}$ decreases, and forms a basis for the neighborhood filter of $x$. Pick from each $V_{n}$ an element $x_{n}\in A$, and take a neighborhood $U\in\ensuremath{{\mathfrak U}}(x)$. Since there exists $n$ with $V_{n}\subseteq U$, we infer that $x_{m}\in U$ for all $m\leq n$, hence $x_{n}\to x$. \end{proof} A second countable normal space $X$ permits the following remarkable construction. Let $\beta$ be a countable base for $X$, and define ${\mathcal A} := \{\langle U, V\rangle\mid U, V\in \beta, \Closure{U}\subseteq V\}$. Then ${\mathcal A}$ is countable as well, and we can find for each pair $\langle U, V\rangle\in {\mathcal A}$ a continuous map $f: X\to [0, 1]$ with $f(x) = 0$ for all $x\in U$ and $f(x) = 1$ for all $x\in X\setminus V$. This is a consequence of Urysohn's Lemma (Theorem~\ref{urysohns-lemma}). The collection ${\mathcal F}$ of all these functions is countable, because ${\mathcal A}$ is countable. Now define the embedding map \begin{equation*} e: \begin{cases} X & \to [0, 1]^{{\mathcal F}}\\ x & \mapsto (f(x))_{f\in {\mathcal F}} \end{cases} \end{equation*} We endow the space $[0, 1]^{{\mathcal F}}$ with the product topology, i.e., with the initial topology with respect to all projections $\pi_{f}: x \mapsto f(x)$. Then we observe these properties \begin{enumerate} \item The map $e$ is continuous. This is so because $\pi_{f}\circ e = f$, and $f$ is continuous, hence we may infer continuity from Proposition~\ref{initial-final}. \item The map $e$ is injective. This follows from Urysohn's Lemma (Theorem~\ref{urysohns-lemma}), since two distinct points constitute two disjoint closed sets. \item If $G\subseteq X$ is open, $\Bild{e}{G}$ is open in $\Bild{e}{X}$. In fact, let $e(x)\in \Bild{e}{G}$. We find an open neighborhood $H$ of $e(x)$ in $[0, 1]^{{\mathcal F}}$ such that $\Bild{e}{X}\cap H\subseteq \Bild{e}{G}$ in the following way: we infer from the construction that we can find a map $f\in {\mathcal F}$ such that $f(x) = 0$ and $f(y) = 1$ for all $y\in X\setminus G$, hence $f(x)\not\in\Closure{\Bild{f}{X\setminus G}}$; hence the set $H := \{y\in [0, 1]^{{\mathcal F}}\mid y_{f}\not\in\Bild{f}{X\setminus G}\}$ is open in $[0, 1]^{{\mathcal F}}$, and $H\cap \Bild{e}{X}$ is contained in $\Bild{e}{G}$. \item $[0, 1]^{{\mathcal F}}$ is a metric space by Corollary~\ref{countable-metric-is metric}, because the unit interval $[0, 1]$ is a metric space, and because ${\mathcal F}$ is countable. \end{enumerate} Summarizing, $X$ is homeomorphic to a subspace of $[0, 1]^{{\mathcal F}}$. This is what \emph{\index{theorem!Urysohn's Metrization}Urysohn's Metrization Theorem} says. \BeginProposition{normal-embed-metric} A second countable normal topological space is metrizable. \QED. \end{proposition} The problem of metrization of topological spaces is non-trivial, as one can see from Proposition~\ref{normal-embed-metric}. The reader who wants to learn more about it may wish to consult Kelley's textbook~\cite[p. 124 f]{Kelley} or Engelking's treatise~\cite[4.5, 5.4]{Engelking}. \subsubsection{Completeness} \label{sec:completeness} Fix in this section a pseudometric space $(X, d)$. A \emph{\index{Cauchy sequence}Cauchy \index{sequence!Cauchy}sequence} $\Folge{x}$ is defined in $X$ just as in $\mathbb{R}$: Given $\epsilon>0$, there exists an index $n\in \mathbb{N}$ such that $d(x_{m}, x_{m'}) < \epsilon$ holds for all $m, m'\geq n$. Thus we have a Cauchy sequence, when we know that eventually the members of the sequence will be arbitrarily close; a converging sequence is evidently a Cauchy sequence. But a sequence which converges requires the knowledge of its limit; this is sometimes a disadvantage in applications. It would be helpful if we could conclude from the fact that we have a Cauchy sequence that we also have a point to which it converges. Spaces for which this is always guaranteed are called complete; they will be introduced next, examples show that there are spaces which are not complete; note, however, that we can complete each pseudometric space. This will be considered in some detail later on. \BeginDefinition{pseudo-is-complete} The pseudometric space is said to be \emph{\index{space!pseudometric!complete}complete} iff each Cauchy sequence has a limit. \end{definition} Compare in a pseudometric space the statement $\lim_{n\to \infty} x_{n} = x$ with the statement that $\Folge{x}$ is a Cauchy sequence. The former requires the knowledge of the limit point, while the latter is derived from observing the members of the sequence, but without knowing a limit. Hence we know in a complete space that a limit will exist, without being obliged to identify it. This suggests that complete pseudometric spaces are important. It is well known that the rational numbers are not complete, which is usually shown by showing that $\sqrt{2}$ is not rational. Another instructive example proposed by Bourbaki~\cite[II.3.3]{Bourbaki} is the following. \BeginExample{rationals-are-not-complete} The rational numbers $\mathbb{Q}$ are not complete in the usual metric. Take \begin{equation*} x_{n} := \sum_{i=0}^{n}2^{-i\cdot (i+1)/2}. \end{equation*} Then $\Folge{x}$ is a Cauchy sequence in $\mathbb{Q}$: if $m> n$, then $ |x_{m}-x_{n}| \leq 2^{-(n+3)/2} $ (this is shown easily through the well known identity $\sum_{i=0}^{p}i = p\cdot (p+1)/2$). Now assume that the sequence converges to $a/b\in\mathbb{Q}$, then we can find an integer $h_{n}$ such that \begin{equation*} \bigl|\frac{a}{b} - \frac{h_{n}}{2^{n\cdot (n+1)/2}}\bigr| \leq \frac{1}{2^{n\cdot (n+3)/2}}, \end{equation*} yielding \begin{equation*} |a\cdot 2^{n\cdot (n+1)/2} - b\cdot h_{n}| \leq \frac{b}{2^{n}} \end{equation*} for all $n\in\mathbb{N}$. The left hand side of this inequality is a whole number, the right side is not, once $n>n_{0}$ with $n_{0}$ so large that $b<2^{n}$. This means that the left hand side must be zero, so that $a/b = x_{n}$ for $n>n_{0}$. This is a contradiction. {\Large\ding{44}} \end{example} We know that $\mathbb{R}$ is complete with the usual metric, the rationals are not. But there is a catch: if we change the metric, completeness may be lost. \BeginExample{loose-completeness} The half open interval $]0, 1]$ is not complete under the usual metric $d(x, y) := |x - y|$. But take the metric \begin{equation*} d'(x, y) := \bigl|\frac{1}{x}-\frac{1}{y}\bigr| \end{equation*} Because $a < x < b$ iff $1/b < 1/x < 1/a$ holds for $0< a \leq b \leq 1$, the metrics $d$ and $d'$ are equivalent on $]0, 1]$. Let $\Folge{x}$ be a $d'$-Cauchy sequence, then $(1/x_{n})_{n\in\mathbb{N}}$ is a Cauchy sequence in $(\mathbb{R}, |\cdot |)$, hence it converges, so that $\Folge{x}$ is $d'$-convergent in $]0, 1]$. The trick here is to make sure that a Cauchy sequence avoids the region around the critical value $0$. {\Large\ding{44}} \end{example} Thus we have to carefully stick to the given metric, and changing the metric always entails checking completeness properties, if they are relevant. \BeginExample{cont-is-complete} Endow the set $\Cont[[0, 1]]$ of continuous functions on the unit interval with the metric $d(f, g) := \sup_{0\leq x\leq 1}\ |f(x)-g(x)|$, see Example~\ref{for-metric-spaces}. We claim that this metric space is complete. In fact, let $\Folge{f}$ be a $d$-Cauchy sequence in $\Cont[[0, 1]]$. Because we have for each $x\in [0, 1]$ the inequality $ |f_{n}(x) - f_{m}(x)| \leq d(f_{n}, f_{m}), $ we conclude that $\bigl(f_{n}(x)\bigr)_{n\in\mathbb{N}}$ is a Cauchy sequence for each $x\in[0, 1]$, which converges to some $f(x)$, since $\mathbb{R}$ is complete. We have to show that $f$ is continuous, and that $d(f, f_{n})\to 0$. Let $\epsilon>0$ be given, then there exists $n\in\mathbb{N}$ such that $d(f_{m}, f_{m'})< \epsilon/2$ for $m, m'\geq n$; hence we have $|f_{m}(x) - f_{m'}(x')| \leq |f_{m}(x) - f_{m}(x')| + |f_{m}(x')-f_{m'}(x')| \leq |f_{m}(x) - f_{m}(x')| + d(f_{m}, f_{m'})$. Choose $\delta>0$ so that $|x-x'|<\delta$ implies $|f_{m}(x)-f_{m}(x')| < \epsilon/2$, then $|f_{m}(x) - f_{m'}(x')| < \epsilon$ for $m, m'\geq n$. But this means $|x-x'|<\delta$ implies $|f(x)-f(x')|\leq \epsilon$. Hence $f$ is continuous. Since $\bigl(\{x\in [0, 1]\mid |f_{n}(x)-f(x)|\leq \epsilon\}\bigr)_{n\in\mathbb{N}}$ constitutes an open cover of $[0, 1]$, we find a finite cover given by $n_{1}, \ldots, n_{k}$; let $n'$ be the smallest of these numbers, then $d(f, f_{n})\leq \epsilon$ for all $n\geq n'$, hence $d(f, f_{n})\to 0$. {\Large\ding{44}} \end{example} The next example is inspired by an observation in~\cite{MacQueen+Plotkin+Sethi}. \BeginExample{ranking-complete} Let $r: X\to \mathbb{N}$ be a ranking function, and denote the (ultra-) metric on $\PowerSet{X}$ constructed from it by $d$, see Example~\ref{for-metric-spaces}. Then $(\PowerSet{X}, d)$ is complete. In fact, let $\Folge{A}$ be a Cauchy sequence, thus we find for each $m\in \mathbb{N}$ an index $n\in\mathbb{N}$ such that $c(A_{k}, A_{\ell}) \geq m$, whenever $k, \ell\geq n$. We claim that the sequence converges to \begin{equation*} A := \bigcup_{n\in\mathbb{N}}\bigcap_{k\geq n}A_{k}, \end{equation*} which is the set of all elements in $X$ which are contained in all but a finite number of sequence elements. Given $m$, fix $n$ as above; we show that $c(A, A_{k}) > m$, whenever $k>n$. Take an element $x\in A\Delta A_{k}$ of minimal rank. \begin{itemize} \item If $x\in A$, then there exists $\ell$ such that $x\in A_{t}$ for all $t\geq \ell$, so take $t\geq \max\ \{\ell, n\}$, then $x\in A_{t}\Delta A_{k}$, hence $c(A, A_{k}) = r(x) \geq c(A_{t}, A_{k}) > m$. \item If, however, $x\not\in A$, we conclude that $x\not\in A_{t}$ for infinitely many $t$, so $b\not\in A_{t}$ for some $t>n$. But since $x\in A\Delta A_{k}$, we conclude $x\in A_{k}$, hence $x\in A_{k}\Delta A_{t}$, thus $c(A, A_{k}) = r(x) \geq c(A_{k}, A_{t}) > m$. \end{itemize} Hence $A_{n}\to A$ in $(\PowerSet{X}, d)$. {\Large\ding{44}} \end{example} This observation is trivial, but sometimes helpful. \BeginLemma{closed-is-complete} A closed subset of a complete pseudometric space is complete. \QED \end{lemma} If we encounter a pseudometric space which is not complete, we may complete it through the following construction. Before discussing it, we need a simple auxiliar statement, which says that we can check completeness already on a dense subset. \BeginLemma{check-on-dense} Let $D\subseteq X$ be dense. Then the space is complete iff each Cauchy sequence on $D$ converges. \end{lemma} \begin{proof} If each Cauchy sequence from $X$ converges, so does each such sequence from $D$, so we have to establish the converse. Let $\Folge{x}$ be a Cauchy sequence on $X$. Given $n\in\mathbb{N}$, there exists for $x_{n}$ an element $y_{n}\in D$ such that $d(x_{n}, y_{n})< 1/n$. Because $\Folge{x}$ is a Cauchy sequence, $\Folge{y}$ is one as well, which converges by assumption to some $x\in X$; the triangle inequality shows that $\Folge{x}$ converges to $x$ as well. \end{proof} This helps in establishing that each pseudometric space can be embedded into a complete pseudometric space. The approach may be described as \index{Charly Brown's device}Charly Brown's device --- ``If you can't beat them, join them''. So we take all Cauchy sequences as our space into which we embed $X$, and ---~intuitively~--- we flesh out from a Cauchy sequence of these sequences the diagonal sequence, which then will be a Cauchy sequence as well, and which will be a limit of the given one. This sounds more complicated than it is, however, because fortunately Lemma~\ref{check-on-dense} makes life easier, when it comes to establish completeness. Here we go. \BeginProposition{embed-into-complete-space} There exists a complete pseudometric space $(X^{*}, d^{*})$ into which $(X, d)$ may be embedded isometrically as a dense subset. \end{proposition} \begin{proof} 0. This is the line of attack: We define $X^{*}$ and $d^{*}$, show that we can embed $X$ isometrically into it as a dense subset, and then we establish completeness with the help of Lemma~\ref{check-on-dense}\MMP{Fairly direct approach}. 1. Define \begin{equation*} X^{*} := \{\Folge{x}\mid \Folge{x} \text{ is a $d$-Cauchy sequence in }X\}, \end{equation*} and put \begin{equation*} d^{*}\bigl(\Folge{x}, \Folge{y}\bigr) := \lim_{n\to \infty}d(x_{n}, y_{n}) \end{equation*} Before proceeding, we should make sure that the limit in question exists. In fact, given $\epsilon>0$, there exists $n\in\mathbb{N}$ such that $d(x_{m'}, x_{m}) < \epsilon/2$ and $d(y_{m'}, y_{m}) < \epsilon/2$ for $m, m'\geq n$, thus, if $m, m'\geq n$, we obtain $$ d(x_{m}, y_{m}) \leq d(x_{m}, x_{m'}) + d(x_{m'}, y_{m'}) + d(y_{m'}, y_{m}) < d(x_{m'}, y_{m'}) + \epsilon, $$ interchanging the r\^oles of $m$ and $m'$ yields $$ |d(x_{m}, y_{m}) - d(x_{m'}, y_{m'})| < \epsilon $$ for $m, m'\geq n$. Hence $(d(x_{n}, y_{n}))_{n\in\mathbb{N}}$ is a Cauchy sequence in $\mathbb{R}$, which converges by completeness of $\mathbb{R}$. 2. Given $x\in X$, the sequence $(x)_{n\in\mathbb{N}}$ is a Cauchy sequence, so it offers itself as the image of $x$; let $e:X\to X^{*}$ be the corresponding map, which is injective, and it preserves the pseudometric. Hence $e$ is continuous. We show that $\Bild{e}{X}$ is dense in $X^{*}$: take a Cauchy sequence $\Folge{x}$ and $\epsilon>0$. Let $n\in\mathbb{N}$ be selected for $\epsilon$, and assume $m\geq n$. Then \begin{equation*} D(\Folge{x}, e(x_{m})) = \lim_{n\to \infty}d(x_{n}, x_{m}) < \epsilon. \end{equation*} 3. The crucial point is completeness. An appeal to Lemma~\ref{check-on-dense} shows that it is sufficient to show that a Cauchy sequence in $\Bild{e}{X}$ converges in $(X^{*}, d^{*})$, because $\Bild{e}{X}$ is dense. But this is trivial. \end{proof} Having the completion $X^{*}$ of a pseudometric space $X$ at one's disposal, one might be tempted to extend a continuous map $X\to Y$ to a continuous map $X^{*}\to Y$ for example in the case that $Y$ is complete. This is usually not possible, for example, not every continuous function $\mathbb{Q}\to \mathbb{R}$ has a continuous extension. We will deal with this problem when discussing uniform continuity below, but we will state and prove here a condition which is sometime helpful when one wants to extend a function not to the whole completion, but to a domain which is somewhat larger than the given one. Define the \index{diameter}\emph{diameter} \index{$\mathsf{diam}(A)$}$\mathsf{diam}(A)$ of a set $A$ as\MMP{$\mathsf{diam}(A)$} \begin{equation*} \mathsf{diam}(A) := \sup\ \{d(x, y)\mid x, y\in A\} \end{equation*} (note that the diameter may be infinite). It is easy to see that $\mathsf{diam}(A) = \mathsf{diam}(\Closure{A})$ using Proposition~\ref{sequences-are-enough}. Now assume that $f: A\to Y$ is given, then we measure the discontinuity of $f$ at point $x$ through the \label{ref:oscillation}\emph{oscillation} ${\varnothing}_f(x)$\index{oscillation}\MMP{Oscillation} of $f$ at $x \in \Closure{A}$, which is defined as the smallest diameter of the image of an open neighborhood of $x$, formally, \begin{equation*} {\varnothing}_f(x) := \inf\{\mathsf{diam}(\Bild{f}{A \cap V}) \mid x \in V, V \text{ open}\}. \end{equation*} If $f$ is continuous on $A$, we have ${\varnothing}_f(x) = 0$ for each element $x$ of $A$. In fact, let $\epsilon>0$ be given, then there exists $\delta>0$ such that $\mathsf{diam}(\Bild{f}{A \cap V}) < \epsilon$, whenever $V$ is a neighborhood of $x$ of diameter less than $\delta$. Thus ${\varnothing}_{f}(x) < \epsilon$; since $\epsilon>0$ was chosen to be arbitrary, the claim follows. \BeginLemma{Kuratowski} Let $Y$ be a complete metric space, $X$ a pseudometric space, then a continuous map $f: A \rightarrow Y$ can be extended to a continuous map $ f_*: G \rightarrow Y, $ where $ G := \{x \in \Closure{A} \mid {\varnothing}_f(x) = 0\} $ has these properties:\MMP{Extension} \begin{enumerate} \item $A \subseteq G \subseteq \Closure{A}$, \item $G$ can be written as the intersection of countably many open sets. \end{enumerate} \end{lemma} The basic idea for the proof is rather straightforward. Take an element in the closure of $A$, then there exists a sequence in $A$ converging to this point\MMP{Idea for the proof}. If the oscillation at that point is zero, the images of the sequence elements must form a Cauchy sequence, so we extend the map by forming the limit of this sequence. Now we have to show that this map is well defined and continuous. \begin{proof} 1. We may and do assume that the complete metric $d$ for $Y$ is bounded by $1$. Define $G$ as above, then $A \subseteq G \subseteq \Closure{A}$, and $G$ can be written as the intersection of a sequence of open sets. In fact, represent $G$ as \begin{equation*} G = \bigcap_{n \in \mathbb{N}} \{x \in \Closure{A} \mid {\varnothing}_f(x) < \frac{1}{n}\}, \end{equation*} so we have to show that $\{x \in \Closure{A} \mid {\varnothing}_f(x) < q\}$ is open in $\Closure{A}$ for any $q>0$. But we have \begin{equation*} \{x \in \Closure{A} \mid {\varnothing}_f(x) < q\} = \bigcup \{V \cap \Closure{A} \mid \mathsf{diam}(\Bild{f}{V \cap A}) < q\}. \end{equation*} This is the union of sets open in $\Closure{A}$, hence is an open set itself. 2. Now take an element $x \in G \subseteq \Closure{A}$. Then there exists a sequence $\Folge{x}$ of elements $x_n \in A$ with $x_n \rightarrow x$. Given $\epsilon > 0$, we find a neighborhood $V$ of $x$ with $\mathsf{diam}(\Bild{f}{A \cap V}) < \epsilon$, since the oscillation of $f$ at $x$ is $0$. Because $x_n \rightarrow x$, we know that we can find an index $n_{\epsilon}\in\mathbb{N}$ such that $x_m \in V \cap A$ for all $m > n_\epsilon$. This implies that the sequence $(f(x_n))_{n \in \mathbb{N}}$ is a Cauchy sequence in $Y$. It converges because $Y$ is complete. Put \begin{equation*} f_*(x) := \lim_{n \rightarrow \infty} f(x_n). \end{equation*} 3. We have to show now that \begin{itemize} \item $f_{*}$ is well-defined. \item $f_{*}$ extends $f$. \item $f_{*}$ is continuous. \end{itemize} Assume that we can find $x\in G$ such that $\Folge{x}$ and $\Folge{x'}$ are sequences in $A$ with $x_{n}\to x$ and $x'_{n}\to x$, but $\lim_{n\to \infty}f(x_{n}) \not= \lim_{n\to \infty}f(x'_{n})$. Thus we find some $\eta>0$ such that $d(f(x_{n}), f(x'_{n})) \geq \eta$ infinitely often. Then the oscillation of $f$ at $x$ is at least $\eta>0$, a contradiction. This implies that $f_{*}$ is well-defined, and it implies also that $f_{*}$ extends $f$. Now let $x\in G$. If $\epsilon>0$ is given, we find a neighborhood $V$ of $x$ with $\mathsf{diam}(\Bild{f}{A\cap V})<\epsilon$. Thus, if $x'\in G\cap V$, then $d(f_{*}(x), f_{*}(x')) < \epsilon$. Hence $f_{*}$ is continuous. \end{proof} A characterization of complete spaces in terms of sequences of closed sets with decreasing diameters is given below. \BeginProposition{diam-to-zero-compl} These statements are equivalent \begin{enumerate} \item\label{diam-to-zero-compl-1} $X$ is complete. \item\label{diam-to-zero-compl-2} For each decreasing sequence $\Folge{A}$ of non-empty closed sets the diameter of which tends to zero there exists $x\in X$ such that $\bigcap_{n\in\mathbb{N}}A_{n} = \Closure{\{x\}}$. \end{enumerate} In particular, if $X$ is a metric space, then $X$ is complete iff each decreasing sequence of non-empty closed sets the diameter of which tends to zero has exactly one point in common. \end{proposition} \begin{proof} The assertion for the metric case follows immediately from the general case, because $\Closure{\{x\}} = \{x\}$, and because there can be not more than one element in the intersection. \labelImpl{diam-to-zero-compl-1}{diam-to-zero-compl-2}: Let $\Folge{A}$ be a decreasing sequence of non-empty closed sets with $\mathsf{diam}(A_{n})\to 0$, then we have to show that $\bigcap_{n\in\mathbb{N}}A_{n} = \Closure{\{x\}}$ for some $x\in X$. Pick from each $A_{n}$ an element $x_{n}$, then $\Folge{x}$ is a Cauchy sequence which converges to some $x$, since $X$ is complete. Because the intersection of closed sets is closed again, we conclude $\bigcap_{n\in\mathbb{N}}A_{n} = \Closure{X}$. \labelImpl{diam-to-zero-compl-2}{diam-to-zero-compl-1}: Take a Cauchy sequence $\Folge{x}$, then $A_{n} :=\Closure{\{x_{m}\mid m\geq n\}}$ is a decreasing sequence of closed sets the diameter of which tends to zero. In fact, given $\epsilon>0$ there exists $n\in \mathbb{N}$ such that $d(x_{m}, x_{m'}) < \epsilon$ for all $m, m'\geq n$, hence $\mathsf{diam}(A_{n}) < \epsilon$, and it follows that this holds also for all $k\geq n$. Then it is obvious that $x_{n}\to x$ whenever $x\in\bigcap_{n\in\mathbb{N}}A_{n}$. \end{proof} We mention all too briefly a property of complete spaces which renders them most attractive, viz., Banach's Fixpoint Theorem. \BeginDefinition{def-contraction} Call $f: X\to X$ a \emph{\index{contraction}contraction} iff there exists $\gamma$ with $0<\gamma<1$ such that $d(f(x), f(y)) \leq \gamma\cdot d(x, y)$ holds for all $x, y\in X$. \end{definition} Then one shows \BeginTheorem{banach-fixed-point} Let $f: X\to X$ be a contraction with $X$ complete. Then there exists $x\in X$ with $f(x) = x$. If $f(y) = y$ holds as well, then $d(x, y) = 0$. In particular, if $X$ is a metric space, then there exists a unique fixed point for $f$.\MMP{Banach's Fixpoint Theorem} \end{theorem} The idea is just to start with an arbitrary element of $X$, and to iterate $f$ on it. This yields a sequence of elements of $X$. Because the elements become closer and closer, completeness kicks in and makes sure that there exists a limit. This limit is independent of the starting point. \begin{proof} Define the $n$-th iteration $f^{n}$ of $f$ through $f^{1} := f$ and $f^{n+1} := f^{n} \circ f$. Now let $x_{0}$ be an arbitrary element of $X$, and define $x_{n} := f^{n}(x_{0})$. Then $d(x_{n}, x_{n+m})\leq \gamma^{n}\cdot d(x_{0}, x_{m})$, so that $\Folge{x}$ is a Cauchy sequence which converges to some $x\in X$, and $f(x) = x$. If $f(y) = y$, we have $d(x, y) = d(f(x), f(y))\leq \gamma\cdot d(x, y)$, thus $d(x, y) = 0$. This implies uniqueness of the fixed point as well. \end{proof} The Banach Fixed Point Theorem has a wide range of applications, and it used for iteratively approximating the solution of equations, e.g., for implicit functions. The following example permits a glance at Google's\MMP{Google} page rank algorithm, it follows~\cite{Rousseau} (the linear algebra behind it is explored in, e.g.,~\cite{Langville+Meyer, Keener}). \BeginExample{how-google-works} Let $ S := \{\langle x_{1}, \ldots, x_{n}\rangle\mid x_{i}\geq 0, x_{1}+\ldots+x_{n}=1\} $ be the set of all discrete probability distributions over $n$ objects, and $P: \mathbb{R}^{n}\to \mathbb{R}^{n}$ be a stochastic matrix; this means that $P$ has non-negative entries and the rows all add up to $1$. The set $\{1, \ldots, n\}$ is usually interpreted as the state space for some random experiment, entry $p_{i, j}$ is then interpreted as the probability for the change of state $i$ to state $j$. We have in particular $P: S\to S$, so a probability distribution is transformed into another probability distribution. We assume that $P$ has an eigenvector $v_{1}\in S$ for the eigenvalue $1$, and that the other eigenvalues are in absolute value not greater than 1 (this is what the classic Perron-Frobenius Theorem says, see~\cite{Langville+Meyer, Keener}); moreover we assume that we can find a base $\{v_{1}, \ldots, v_{n}\}$ of eigenvectors, all of which may be assumed to be in $S$; let $\lambda_{i}$ be the eigenvector for $v_{i}$, then $\lambda_{1}=1$, and $|\lambda_{i}|\leq 1$ for $i\geq 2$. Such a matrix is called a \emph{regular transition matrix}; these matrices are investigated in the context of stability of finite Markov transition chains. Define for the distributions $p = \sum_{i=1}^{n}p_{i}\cdot v_{i}$ and $q = \sum_{i=1}^{n}q_{i}\cdot v_{i}$ their distance through \begin{equation*} d(p, q) := \ensuremath{\frac{1}{2}}\cdot \sum_{i=1}^{n}|p_{i}-q_{i}|. \end{equation*} Because $\{v_{1}, \ldots, v_{n}\}$ are linearly independent, $d$ is a metric. Because this set forms a basis, hence is given through a bijective linear maps from the base given by the unit vectors, and because the Euclidean metric is complete, $d$ is complete as well. Now define $f(x) := P\cdot x$, then this is a contraction $S\to S$: \begin{equation*} d(P\cdot x, P\cdot y) = \ensuremath{\frac{1}{2}}\cdot \sum_{i=1}^{n}|x_{i}\cdot P(v_{i})- y_{i}\cdot P(v_{i})| \leq \ensuremath{\frac{1}{2}}\sum_{i=1}^{n}|\lambda_{i}\cdot (x_{i}-y_{i})| \leq \ensuremath{\frac{1}{2}}\cdot d(x, y). \end{equation*} Thus $f$ has a fixed point, which must be $v_{1}$ by uniqueness. Now assume that we have a (very litte) Web universe with only five pages. The links are given as in the diagram. \begin{equation*} \xymatrix{ 1\ar@<.5ex>[dd] && 2\ar[ll]\\ &&3\ar[ull]\ar@<.5ex>[rr]\ar@<.5ex>[dll] && 5\ar@<.5ex>[ll]\ar@/_1pc/[ull]\ar@/^1.5pc/[dllll]\\ 2\ar@<.5ex>[uu]\ar@<.5ex>[urr] } \end{equation*} The transitions between pages are at random, the matrix below describes such a random walk \def\frac{1}{3}{\frac{1}{3}} \begin{equation*} P := \left( \begin{matrix} 0 & 1 & 0 & 0& 0\\ \ensuremath{\frac{1}{2}} & 0 & \ensuremath{\frac{1}{2}} & 0 & 0\\ \frac{1}{3} & \frac{1}{3} & 0 & 0 & \frac{1}{3}\\ 1 & 0 & 0 & 0 & 0\\ 0 & \frac{1}{3} & \frac{1}{3} & \frac{1}{3} & 0 \end{matrix} \right) \end{equation*} It says that we make a transition from state $2$ to state $1$ with $p_{2, 1} = \ensuremath{\frac{1}{2}}$, also $p_{2, 3} = \ensuremath{\frac{1}{2}}$, the transition from state $2$ to state $3$. From state $1$ one goes with probability one to state $2$, because $p_{1, 2} = 1$. Iterating $P$ quite a few times will yield a solution which does not change much after $32$ steps, one obtains \begin{equation*} P^{32} = \left( \begin{matrix} .293 & .390 & .220 & .024 & .073\\ .293 & .390 & .220 & .024 & .073\\ .293 & .390 & .220 & .024 & .073\\ .293 & .390 & .220 & .024 & .073\\ .293 & .390 & .220 & .024 & .073\\ \end{matrix} \right) \end{equation*} The eigenvector $p$ for the eigenvalue $1$ looks like this: $p = \langle.293, .390, .220, .024, .073\rangle$, so this yields a stationary distribution. In terms of web searches\MMP{Web search}, the importance of the pages is ordered according this stationary distribution as $2, 1, 3, 5, 4$, so this is the ranking one would associate with these pages. This is the basic idea behind \index{Google}Google's page ranking algorithm. Of course, there are many practical considerations which have been eliminated from this toy example. It may be that the matrix does not follow the assumptions above, so that it has to me modified accordingly in a preprocessing step; size is a problem, of course, since handling the extremely large matrices occurring in web searches. {\Large\ding{44}} \end{example} Compact pseudometric spaces are complete. This will be a byproduct of a more general characterization of compact spaces. We show first that compactness and sequential compactness are the same for these spaces. This is sometimes helpful in those situations in which a sequence is easier to handle than an open cover, or an ultrafilter. Before discussing this, we introduce\MMP{$\epsilon$-net} \emph{\index{$\epsilon$-net}$\epsilon$-nets} as a cover of $X$ through a \emph{finite} family $\{B(x, \epsilon)\mid x\in A\}$ of open balls of radius $\epsilon$. $X$ may or may not have an $\epsilon$-net for any given $\epsilon>0$. For example, $\mathbb{R}$ does not have an $\epsilon$-net for any $\epsilon > 0$, in contrast to $[0, 1]$ or $]0, 1]$. \BeginDefinition{totally-bounded} The pseudometric space $X$ is \emph{\index{totally bounded}totally bounded} iff there exists for each $\epsilon>0$ an $\epsilon$-net for $X$. A subset of a pseudometric space is totally bounded iff it is a totally bounded subspace. \end{definition} Thus $A\subseteq X$ is totally bounded iff $\Closure{A}\subseteq X$ is totally bounded. We see immediately \BeginLemma{compact-tot-bounded} A compact pseudometric space is totally bounded. \QED \end{lemma} Now we are in a position to establish this equivalence, which will help characterize compact pseudometric spaces. \BeginProposition{seq-comp-equiv-comp} The following properties are equivalent for the pseudometric space $X$: \begin{enumerate} \item\label{seq-comp-equiv-comp-1} $X$ is compact. \item\label{seq-comp-equiv-comp-2} $X$ is sequentially compact. \end{enumerate} \end{proposition} \begin{proof} \labelImpl{seq-comp-equiv-comp-1}{seq-comp-equiv-comp-2}: Assume that the sequence $\Folge{x}$ does not have a convergent subsequence, and consider the set $F := \{x_{n}\mid n\in\mathbb{N}\}$. This set is closed, since, if $y_{n}\to y$ and $y_{n}\in F$ for all $n\in\mathbb{N}$, then $y\in F$, since the sequence $\Folge{y}$ is eventually constant. $F$ is also discrete, since, if we could find for some $z\in F$ for each $n\in\mathbb{N}$ an element in $F\cap B(z, 1/n)$ different from $z$, we would have a convergent subsequence. Hence $F$ is a closed discrete subspace of $X$ which contains infinitely many elements, which is impossible. This contradiction shows that each sequence has a convergent subsequence. \labelImpl{seq-comp-equiv-comp-2}{seq-comp-equiv-comp-1}\MMP{Plan of attack}: Before we enter into the second and harder part of the proof, we have a look at its plan. Given an open cover for the sequential compact space $X$, we have to construct a finite cover from it. If we succeed in constructing for each $\epsilon>0$ a finite net so that we can fit each ball into some element of the cover, we are done, because in this case we may take just these elements of the cover, obtaining a finite cover. That this fitting in is possible is shown in the first part of the proof. We construct under the assumption that it is not possible a sequence, which has a converging subsequence, and the limit of this subsequence will be used as kind of a flyswatter\index{flyswatter}. The second part of the proof is then just a simple application of the net so constructed. Let $(U_{i})_{i\in I}$ be a finite cover of $X$. We claim that we can find for this cover some $\epsilon>0$ such that, whenever $\mathsf{diam}(A)< \epsilon$, there exists $i\in I$ with $A\subseteq U_{i}$. Assume that this is wrong, then we find for each $n\in\mathbb{N}$ some $A_{n}\subseteq X$ which is not contained in one single $U_{i}$. Pick from each $A_{n}$ an element $x_{n}$, then $\Folge{x}$ has a convergent subsequence, say $\Folge{y}$, with $y_{n}\to y$. There exists a member $U$ of the cover with $y\in U$, and there exists $r>0$ with $B(y, r)\subseteq U$. Now we catch the fly. Choose $\ell\in\mathbb{N}$ with $1/\ell < r/2$, then $y_{m}\in B(y, r/2)$ for $m\geq n_{0}$ for some suitable chosen $n_{0}\in\mathbb{N}$, hence, because $\Folge{y}$ is a subsequence of $\Folge{x}$, there are infinitely many $x_{k}$ contained in $B(y, r/2)$. But since $\mathsf{diam}(A_{\ell})<1/\ell$, this implies $A_{\ell}\subseteq B(y, r)\subseteq U$, which is a contradiction. Now select for the cover $\epsilon>0$ as above, and let the finite set $A$ be the set of centers for an $\epsilon/2$-net, say, $A = \{a_{1}, \ldots, a_{k}\}$. Then we can find for each $a_{j}\in A$ some member $U_{i_{j}}$ of this cover with $B(a_{j}, \epsilon/2)\subseteq U_{i_{j}}$ (note that $\mathsf{diam}(B(x, r) < 2\cdot r$). This yields a finite cover $\{U_{i_{j}}\mid 1 \leq j \leq k\}$ of $X$. \end{proof} This proof was conceptually a little complicated, since we had to make the step from a sequence (with a converging subsequence) to a cover (with the goal of finding a finite cover). Both are not immediately related. The missing link turned out to be measuring the size of a set through its diameter, and capturing limits through suitable sets. Using the last equivalence, we are in a position to characterize compact pseudometric spaces. \BeginTheorem{compact-is-tot-bound-compl} A pseudometric space is compact iff it is totally bounded and complete. \end{theorem} \begin{proof} 1. Let $X$ be compact. We know already from Lemma~\ref{compact-tot-bounded} that a compact pseudometric space is totally bounded. Let $\Folge{x}$ be a Cauchy sequence, then we know that it has a converging subsequence, which, being a Cauchy sequence, implies that it converges itself. 2. Assume that $X$ is totally bounded and complete. In view of Proposition~\ref{seq-comp-equiv-comp} it is enough to show that $X$ is sequentially compact. Let $\Folge{x}$ be a sequence in $X$. Since $X$ is totally bounded, we find a subsequence $(x_{n_{1}})$ which is entirely contained in an open ball of radius less that $1$. Then we may extract from this sequence a subsequence $(x_{n_{2}})$ which is contained in an open ball of radius less than $1/2$. Continuing inductively we find a subsequence $(x_{n_{k+1}})$ of $(x_{n_{k}})$ the members of which are completely contained in an open ball of radius less than $2^{-(k+1)}$. Now define $y_{n} := x_{n_{n}}$, hence $\Folge{y}$ is the diagonal sequence in this scheme. We claim that $\Folge{y}$ is a Cauchy sequence. In fact, let $\epsilon>0$ be given, then there exists $n\in\mathbb{N}$ such that $\sum_{\ell>n}2^{-\ell}<\epsilon/2$. Then we have for $m>n$ \begin{equation*} d(y_{n}, y_{m}) \leq 2\cdot \sum_{\ell=n}^{m}2^{-\ell} < \epsilon. \end{equation*} By completeness, $y_{n}\to y$ for some $y\in X$. Hence we have found a converging subsequence of the given sequence $\Folge{x}$, so that $X$ is sequentially compact. \end{proof} It\MMP[t]{Shift of emphasis} might be noteworthy to observe the shift of emphasis between finding a finite cover for a given cover, and admitting an $\epsilon$-net for each $\epsilon>0$. While we have to select a finite cover from an arbitrarily given cover beyond our control, in the case of a totally bounded space we can construct for each $\epsilon>0$ a cover of a certain size, hence we may be in a position to influence the shape of this special cover. Consequently, the characterization of compact spaces in Theorem~\ref{compact-is-tot-bound-compl} is very helpful and handy, but, alas, it works only in the restricted calls of pseudometric spaces. We apply this characterization to $(\mathfrak{C}(X), \delta_{H})$, the space of all non-empty compact subsets of $(X, d)$ with the Hausdorff metric $\delta_{H}$, see Example~\ref{vietoris-pseudometric}. \BeginProposition{vietoris-is-complete} $(\mathfrak{C}(X), \delta_{H})$ is complete, if $X$ is a complete pseudometric space. \end{proposition} \begin{proof} We fix for the proof a Cauchy sequence $\Folge{C}$ of elements of $\mathfrak{C}(X)$. 0. Let us pause a moment and discuss the approach to the proof\MMP{Plan} first. We show in a first step that $\Closure{(\bigcup_{n\in\mathbb{N}}C_{n})}$ is compact by showing that it is totally bounded and complete. Completeness is trivial, since the space is complete, and we are dealing with a closed subset, so we focus on showing that the set is totally bounded. Actually, it is sufficient to show that $\bigcup_{n\in\mathbb{N}}C_{n}$ is totally bounded, because a set is totally bounded iff its closure is. Then compactness of $\Closure{(\bigcup_{n\in\mathbb{N}}C_{n})}$ implies that $C := \bigcap_{n\in\mathbb{N}}\Closure{(\bigcup_{k\geq n}C_{k})}$ is compact as well, moreover, we will argue that $C$ must be non-empty. Then it is shown that $C_{n}\to C$ in the Hausdorff metric. 1. Let $D := \bigcup_{n\in\mathbb{N}}C_{n}$, and let $\epsilon>0$ be given. We will construct an $\epsilon$-net for $D$. Because $\Folge{C}$ is Cauchy, we find for $\epsilon$ an index $\ell$ so that $\delta_{H}(C_{n}, C_{m}) < \epsilon/2$ for $n, m\geq \ell$. When $n\geq \ell$ is fixed, this means in particular that $C_{m}\subseteq C_{n}^{\epsilon/2}$ for all $m\geq \ell$, thus $d(x, C_{n}) < \epsilon/2$ for all $x\in C_{m}$ and all $m\geq \ell$. We will use this observation in a moment. Let $\{x_{1}, \ldots, x_{t}\}$ be an $\epsilon/2$-net for $\bigcup_{j=1}^{n}C_{j}$, we claim that this is an $\epsilon$-net for $D$. In fact, let $x\in D$. If $x\in\bigcup_{j=1}^{\ell}C_{j}$, then there exists some $k$ with $d(x, x_{k})<\epsilon/2$. If $x\in C_{m}$ for some $m>n$, $x\in C_{n}^{\epsilon/2}$, so that we find $x'\in C_{n}$ with $d(x, x')<\epsilon/2$, and for $x'$ we find $k$ such that $d(x_{k}, x')<\epsilon/2$. Hence we have $d(x, x'_{k}) < \epsilon$, so that we have shown that $\{x_{1}, \ldots, x_{t}\}$ is an $\epsilon$-net for $D$. Thus $\Closure{D}$ is totally bounded, hence compact. 2. From the first part it follows that $\Closure{(\bigcup_{k\geq n}C_{k})}$ is compact for each $n\in\mathbb{N}$. Since these sets form a decreasing sequence of non-empty closed subsets to the compact set given by $n=1$, their intersection cannot be empty, hence $ C := \bigcap_{n\in\mathbb{N}}\Closure{(\bigcup_{k\geq n}C_{k})} $ is compact and non-empty, hence a member of $\mathfrak{C}(X)$. We claim that $\delta_{H}(C_{n}, C)\to 0$, as $n\to \infty$. Let $\epsilon>0$ be given, then we find $\ell\in\mathbb{N}$ such that $\delta_{H}(C_{m}, C_{n})\leq/2$, whenever $n, m\geq \ell$. We show that $\delta_{H}(C_{n}, C)< \epsilon$ for all $n\geq \ell$. Let $n\geq \ell$. The proof is subdivided into showing that $C\subseteq C_{n}^{\epsilon}$ and $C_{n}\subseteq C^{\epsilon}$. Let us work on the first inclusion. Because $D := \Closure{(\bigcup_{i\geq n}C_{i})}$ is totally bounded, there exists a $\epsilon/2$-net, say, $\{x_{1}, \ldots, x_{t}\}$, for $D$. If $x\in C\subseteq D$, then there exists $j$ such that $d(x, x_{j})< \epsilon/2$, so that we can find $y\in C_{n}$ with $d(y, x_{j})<\epsilon/2$. Consequently, we find for $x\in C$ some $y\in C_{n}$ with $d(x, y)<\epsilon$. Hence $C\subseteq C_{n}^{\epsilon}$. Now for the second inclusion. Take $x\in C_{n}$. Since $\delta_{H}(C_{m}, C_{n})<\epsilon/2$ for $m\geq \ell$, we have $C_{n}\subseteq C_{m}^{\epsilon/2}$, hence find $x_{m}\in C_{m}$ with $d(x, x_{m})<\epsilon/2$. The sequence $(x_{k})_{k\geq m}$ consists of members of the compact set $D$, so it has converging subsequence which converges to some $y\in D$. But it actually follows from the construction that $y\in C$, and $d(x, y) \leq d(x, x_{m}) + d(x_{m}, y) < \epsilon$ for $m$ taken sufficiently large from the subsequence. This yields $x\in C^{\epsilon}$. Taking these inclusions together, they imply $\delta_{H}(C_{n}, C) < \epsilon$ for $n>\ell$. This shows that $(\mathfrak{C}(X), \delta_{H})$ is a complete pseudometric space, if $(X, d)$ is one. \end{proof} The topology induced by the Hausdorff metric can be defined in a way which permits a generalization to arbitrary topological spaces, where it is called the \emph{\index{topology!Vietoris}Vietoris topology}. It has been studied with respect to finding continuous selections, e.g., by Michael~\cite{Michael}, see also~\cite{Jayne+Rogers, Castaing-Valadier}. The reader is also referred to~\cite[§33]{Kuratowski}, and to~\cite[p. 120]{Engelking} for a study of topologies on subsets. We will introduce uniform continuity now and discuss this concept briefly here. Uniform spaces will turn out to be the proper scenario for the more extended discussion in Section~\ref{sec:uniform-spaces}. As a motivating example, assume that the pseudometric $d$ on $X$ is bounded, take a subset $A\subseteq X$ and look at the function $x\mapsto d(x, A)$. Since \begin{equation*} |d(x, A) - d(y, A) | \leq d(x, y), \end{equation*} we know that this map is continuous. This means that, given $x\in X$, there exists $\delta>0$ such that $d(x, x')<\delta$ implies $|d(x, A) - d(x', A) |< \epsilon$. We see from the inequality above that the choice of $\delta$ does only depend on $\epsilon$, but not on $x$. Compare this with the function $x\mapsto 1/x$ on $]0, 1]$. This function is continuous as well, but the choice of $\delta$ depends on the point $x$ you are considering: whenever $0<\delta<\epsilon\cdot x^{2}/(1+\epsilon\cdot x)$, we may conclude that $|x'-x|\leq\delta$ implies $|1/x'-1/x|\leq\epsilon$. In fact, we may easily infer from the graph of the function that a uniform choice of $\delta$ for a given $\epsilon$ is not possible. This leads to the definition of uniform continuity in a pseudometric space: the choice of $\delta$ for a given $\epsilon$ does not depend on a particular point, but is rather, well, uniform. \BeginDefinition{unif-continuity} The map $f: X\to Y$ into the pseudometric space $(Y, d')$ is called \emph{uniformly \index{continuous!uniformly}continuous} iff given $\epsilon>0$ there exists $\delta>0$ such that $d'(f(x), f(x'))<\epsilon$ whenever $d(x, x')<\delta$. \end{definition} Doing\MMP[t]{Continuity vs. uniform continuity} a game of quantifiers, let us just point out the difference between uniform continuity and continuity. \begin{enumerate} \item Continuity says \begin{equation*} \forall \epsilon>0\underline{\forall x\in X\exists\delta>0}\forall x'\in X: d(x, x')< \delta \Rightarrow d'(f(x), f(x'))<\epsilon. \end{equation*} \item Uniform continuity says \begin{equation*} \forall \epsilon>0\underline{\exists\delta>0\forall x\in X}\forall x'\in X: d(x, x')< \delta \Rightarrow d'(f(x), f(x'))<\epsilon. \end{equation*} \end{enumerate} The formulation suggests that uniform continuity depends on the chosen metric. In contrast to continuity, which is a property depending on the topology of the underlying spaces, uniform continuity is a property of the underlying uniform space, which will be discussed below. We note that the composition of uniformly continuous maps is uniformly continuous again. A uniformly continuous map is continuous. The converse is not true, however. \BeginExample{cont-not-unif-cont} Consider the map $f: x\mapsto x^{2}$, which is certainly continuous on $\mathbb{R}$. Assume that $f$ is uniformly continuous, and fix $\epsilon>0$, then there exists $\delta>0$ such that $|x-y|<\delta$ always implies $|x^{2}-y^{2}|<\epsilon$. Thus we have for all $x$, and for all $r$ with $0<r\leq\delta$ that $|x^{2}-(x+r)^{2}| = |2\cdot x\cdot r+r^{2}|<\epsilon$ after Binomi's celebrated theorem. But this would mean $|2\cdot x+r| < \epsilon/r$ for all $x$, which is not possible. In general, a very similar argument shows that polynomials $\sum_{i=1}^{n}a_{i}\cdot x^{i}$ with $n>1$ and $a_{n}\not=0$ are not uniformly continuous. {\Large\ding{44}} \end{example} A continuous function on a compact pseudometric space, however, is uniformly continuous. This is established through an argument constructing a cover of the space, compactness will then permit us to extract a finite cover, from which we will infer uniform continuity. \BeginProposition{compact-cont-unif-cont} Let $f: X\to Y$ be a continuous map from the compact pseudometric space $X$ to the pseudometric space $(Y, d')$. Then $f$ is uniformly continuous. \end{proposition} \begin{proof} Given $\epsilon>0$, there exists for each $x\in X$ a positive $\delta_{x}$ such that $\Bild{f}{B(x, \delta_{x})} \subseteq B_{d'}(f(x), \epsilon/3)$. Since $\{B(x, \delta_{x}/3)\mid x\in X\}$ is an open cover of $X$, and since $X$ is compact, we find $x_{1}, \ldots, x_{n}\in X$ such that $B(x_{1}, \delta_{x_{1}}/3), \ldots, B(x_{n}, \delta_{x_{n}}/3)$ cover $X$. Let $\delta$ be the smallest among $\delta_{x_{1}}, \ldots, \delta_{x_{n}}$. If $d(x, x')< \delta/3$, then there exist $x_{i}, x_{j}$ with $d(x, x_{i})< \delta/3$ and $d(x', x_{j})< \delta/3$, so that $d(x_{i}, x_{j})\leq d(x_{i}, x) + d(x, x') + d(x', x_{j}) < \delta$, hence $d'(f(x_{i}), f(x_{j})) < \epsilon/3$, thus $d'(f(x), f(x')) \leq d'(f(x), f(x_{i})) + d'(f(x_{i}), f(x_{j})) + d'(f(x_{j}), f(x')) < 3\cdot \epsilon/3 = \epsilon.$ \end{proof} One of the most attractive features of uniform continuity is that it permits extending a function --- given a uniform continuous map $f: D\to Y$ with $D\subseteq X$ dense and $Y$ complete metric, we can extend $f$ to a uniformly continuous map $F$ on the whole space\MMP{Idea for a proof}. This extension is necessarily unique (see Lemma~\ref{equal-on-dense}). The basic idea is to define $F(x) := \lim_{n\to \infty}f(x_{n})$, whenever $x_{n}\to x$ is a sequence in $D$ which converges to $x$. This requires that the limit exists, and that it is in this case unique, hence it demands the range to be a metric space which is complete. \BeginProposition{extend-unif-cont-maps} Let $D\subseteq X$ be a dense subset, and assume that $f: D\to Y$ is uniformly continuous, where $(Y, d')$ is a complete metric space. Then there exists a unique uniformly continuous map $F: X\to Y$ which extends $f$. \end{proposition} \begin{proof} 0. We have already argued that an extension must be unique, if it exists. So we have to construct it, and to show that it is uniformly continuous. We will generalize the argument from above referring to a limit by considering the oscillation at each point\MMP{Outline~---~use the oscillation}. A glimpse at the proof of Lemma~\ref{Kuratowski} shows indeed that we argue with a limit here, but are able to look at the whole set of points which makes this possible. 1. Let us have a look at the oscillation ${\varnothing}_f(x)$ of $f$ at a point $x\in X$ (see page~\pageref{ref:oscillation}), and we may assume that $x\not\in D$. We claim that ${\varnothing}_f(x) = 0$. In fact, given $\epsilon>0$, there exists $\delta>0$ such that $d(x', x'') < \delta$ implies $d'(f(x'), f(x'')) < \epsilon/3$, whenever $x', x''\in D$. Thus, if $y', y''\in \Bild{f}{D\cap B(x, \delta/2)}$, we find $x', x''\in D$ with $f(x') = y', f(x'') = y''$ and $d(x', x'') \leq d(x, x') + d(x'', x) < \delta$, hence $d'(y', y'') = d'(f(x'), f(y'')) < \epsilon$. This means that $\mathsf{diam}(\Bild{f}{D\cap B(x, \delta/2)}) < \epsilon$. 2. Lemma~\ref{Kuratowski} tells us that there exists a continuous extension $F$ of $f$ to the set $\{x\in X \mid {\varnothing}_f(x) = 0\} = X$. Hence it remains to show that $F$ is \emph{uniformly} continuous. Given $\epsilon>0$, we choose the same $\delta$ as above, which did not depend on the choice of the points we were considering above. Let $x_{1}, x_{2}\in X$ with $d(x_{1}, x_{2})< \delta/2$, then there exists $v_{1}, v_{2}\in D$ such that $d(x_{1}, v_{1}) <\delta/4$ with $d'(F(x_{1}), f(v_{1}))\leq \epsilon/3$ and $d(x_{2}, v_{2}) < \delta/4$ with $d'(F(x_{2}), f(v_{2}))\leq \epsilon/3$. We see as above that $d(v_{1}, v_{2}) < \delta$, thus $d'(f(v_{1}), f(v_{2})) < \epsilon/3$, consequently, \begin{equation*} d'(F(x_{1}, x_{2})) \leq d'(F(x_{1}), f(v_{1})) + d'(f(v_{1}), f(v_{2})) + d'(f(v_{2}), F(x_{2})) < 3\cdot \epsilon/3 = \epsilon. \end{equation*} But this means that $F$ is uniformly continuous. \end{proof} Looking at $x\mapsto 1/x$ on $]0, 1]$ shows that uniform continuity is indeed necessary to obtain a continuous extension. \subsubsection{Baire's Theorem and a Game} \label{sec:baire+game} The technique of constructing a shrinking sequence of closed sets with a diameter tending to zero used for establishing Proposition~\ref{diam-to-zero-compl} is helpful in establishing Baire's Theorem~\ref{baire-locally-compact} also for complete pseudometric spaces;\index{theorem!Baire!complete pseudometric} completeness then makes sure that the intersection is not empty. The proof is essentially a blend of this idea with the proof given above (page~\pageref{baire-locally-compact}). We will then give an interpretation of Baire's Theorem in terms of the game \emph{Angel vs. Demon} introduced in~\SetCite{Section 1.7}. We show that Demon has a winning strategy iff the space is the countable union of nowhere dense sets (the space is then called to be of the \emph{first category}). This is done for a subset of the real line, but can easily generalized. This is the version of Baire's Theorem in a complete pseudometric space. \BeginTheorem{baire-complete} Let $X$ be a complete pseudometric space, then the intersection of a sequence of dense open sets is dense again\MMP{Baire's Theorem}. \end{theorem} \begin{proof} Let $\Folge{D}$ be a sequence of dense open sets. Fix a non-empty open set $G$, then we have to show that $G\cap\bigcap_{n\in\mathbb{N}}D_{n}\not=\emptyset$. Now $D_{1}$ is dense and open, hence we find an open set $V_{1}$ and $r > 0$ such that $\mathsf{diam}(\Closure{V}_{1}) \leq r$ and $\Closure{V}_{1}\subseteq D_{1}\cap G$. We select inductively in this way a sequence of open sets $\Folge{V}$ with $\mathsf{diam}(\Closure{V}_{n}) < r/n$ such that $\Closure{V}_{n+1}\subseteq D_{n}\cap V_{n}$. This is possible since $D_{n}$ is open and dense for each $n\in\mathbb{N}$. Hence we have a decreasing sequence $\Closure{V}_{1} \supseteq \ldots \Closure{V}_{n} \supseteq \ldots$ of closed sets with diameters tending to $0$ in the complete space $X$. Thus $\bigcap_{n\in\mathbb{N}}\Closure{V}_{n} = \bigcap_{n\in\mathbb{N}}V_{n}$ is not empty by Proposition~\ref{diam-to-zero-compl}, which entails $G\cap\bigcap_{n\in\mathbb{N}}D_{n}$ not being empty. \end{proof} Kelley~\cite[p. 201]{Kelley} remarks that there is a slight incongruence with this theorem, since the assumption of completeness is non-topological in nature (hence a property which may get lost when switching to another pseudometric, see Example~\ref{loose-completeness}), but we draw a topological conclusion. He suggests that the assumption on space $X$ should be reworded to $X$ being a topological space for which there exists a complete pseudometric. But, alas, the formulation above is the usual one, because it is pretty suggestive after all. \BeginDefinition{def-nowhere-dense} Call a set $A\subseteq X$ \emph{\index{nowhere dense}nowhere dense} iff $\Closure{\Interior{A}} = \emptyset$, i.e., the closure of the interior is empty, equivalently, iff the open set $X\setminus\Closure{A}$ is dense. The space $X$ is said to be of the \emph{\index{space!first category}first category} iff it can be written as the countable union of nowhere dense sets. \end{definition} Then Baire's Theorem can be reworded that the countable union of nowhere dense sets in a complete pseudometric space is nowhere dense. This is an important example for a nowhere dense set: \BeginExample{cantor-ternary-nowhere-dense} Cantor's ternary set $C$ (see~\SetCite{Example 1.104}) can be written as\MMP[t]{Cantor's ternary set} \begin{equation*} C = \bigl\{\sum_{i=1}^{\infty}a_{i}3^{-i}\mid a_{i}\in\{0, 2\}\text{ for all }i\in \mathbb{N}\bigr\}. \end{equation*} This is seen as follows: Define $ [a, b]' := [a + (b-a)/3] \cup [a+2\cdot (b-a)/3] $ for an interval $[a, b]$, and $(A_{1}\cup\ldots\cup A_{\ell})' := A_{1}'\cup\ldots\cup A_{\ell}'$, then $ C = \bigcap_{n\in\mathbb{N}}C_{n} $ with the inductive definition $ C_{1}:= [0, 1]' $ and $ C_{n+1} := C_{n}'. $ It is shown easily by induction that \begin{equation*} C_{n} = \{\sum_{i=1}^{\infty}a_{i}\cdot 3^{-i}\mid a_{i}\in\{0, 2\}\text{ for $i \leq n$ and }a_{i}\in\{0, 1, 2\}\text{ for }i>n\}. \end{equation*} The representation above implies that the interior of $C$ is empty, so that $C$ is in fact nowhere dense in the unit interval. {\Large\ding{44}} \end{example} Cantor's ternary set is a helpful device in investigating the structure of complete metric spaces which have a countable dense subset, i.e., in Polish spaces. We will give now a game theoretic interpretation of spaces of the first category through a game which is attributed to Banach and Mazur\index{Banach-Mazur game}, tieing the existence of a winning strategy for Demon to spaces of the first category. For simplicity, we discuss it for a closed interval of the real line. We do not assume that the game is determined; determinacy is not necessary here (and its assumption would bring us into serious difficulties with the assumption of the validity of the Axiom of Choice, see~\SetCite{Prop. 1.7.6}). Let a subset $S$ of a closed interval $L_{0}\subseteq \mathbb{R}$ be given; this set is assigned to Angel, its adversary Demon is assigned its complement $T := L_{0}\setminus S$. The game is played in this way\MMP[t]{Rules of the Banach-Mazur game}: \begin{itemize} \item Angel chooses a closed interval $L_{1}\subseteq L_{0}$, \item Demon reacts with choosing a closed interval $L_{2}\subseteq L_{1}$, \item Angel chooses then ---~knowing the moves $L_{0}$ and $L_{1}$~--- a closed interval $L_{2}\subseteq L_{1}$, \item and so on: Demon chooses the intervals with even numbers, Angel selects the intervals with the odd numbers, each interval is closed and contained in the previous one, both Angel and Demon have complete information about the game's history, when making a move. \end{itemize} Angel wins iff $\bigcap_{n\in\mathbb{N}}L_{n}\cap S \not=\emptyset$, otherwise Demon wins. We focus on Demon's behavior. Its strategy for the n-th move is modelled as a map $f_{n}$ which is defined on $2\cdot n$-tuples $\langle L_{0}, \ldots, L_{2\cdot n-1}\rangle$ of closed intervals with $L_{0}\supseteq L_{1}\supseteq\ldots\supseteq L_{2\cdot n-1}$, taking a closed interval $L_{2\cdot n}$ as a value with \begin{equation*} L_{2\cdot n} = f_{n}(L_{0}, \ldots, L_{2\cdot n-1})\subseteq L_{2\cdot n-1}. \end{equation*} The sequence $\Folge{f}$ will be a \emph{winning strategy} for Demon iff $\bigcap_{n\in\mathbb{N}}L_{n}\subseteq T$, when $\Folge{L}$ is chosen according to these rules. The following theorem relates the existence of a winning strategy for Demon with $S$ being of first category. \BeginTheorem{baire-game} There exists a strategy for Demon to win iff $S$ is of the first category. \end{theorem} We divide the proof into two parts~---~we show first that we can find a strategy for Demon, if $S$ is of the first category. The converse is technically somewhat more complicated, so we delay it and do the necessary constructions first. \begin{proof} (First part) Assume that $S$ is of the first category, so that we can write $S = \bigcup_{n\in\mathbb{N}} S_{n}$ with $S_{n}$ nowhere dense for each $n\in\mathbb{N}$. Angel starts with a closed interval $L_{1}$, then demon has to choose a closed interval $L_{2}$; the choice will be so that $L_{2}\subseteq L_{1}\setminus S_{1}$. We have to be sure that such a choice is possible; our assumption implies that $L_{1}\cap \Closure{S}_{1}$ is open and dense in $L_{1}$, thus contains an open interval. In the inductive step, assume that Angel has chosen the closed interval $L_{2\cdot n-1}$ such that $L_{2\cdot n-1}\subseteq\ldots\subseteq L_{2}\subseteq L_{1}\subseteq L_{0}$. Then Demon will select an interval $L_{2\cdot n}\subseteq L_{2\cdot n-1}\setminus(S_{1}\cup \ldots \cup S_{n})$. For the same reason as above, the latter set contains an open interval. This constitutes Demon's strategy, and evidently $\bigcap_{n\in\mathbb{N}}L_{n}\cap S = \emptyset$, so Demon wins. \end{proof} The proof for the second part requires some technical constructions. We assume that $f_{n}$ assigns to each $2\cdot n$-tuple of closed intervals $I_{1}\supseteq I_{2}\supseteq\ldots\supseteq I_{2\cdot n}$ a closed interval $f_{n}(I_{1}, \ldots, I_{2\cdot n})\subseteq I_{2\cdot n}$, but do not make any further assumptions, for the time being, that is. We are given a closed interval $L_{0}$ and a subset $S\subseteq L_{0}$. In a first step we define a sequence $\Folge{J}$ of closed intervals with these properties: \begin{itemize} \item $J_{n}\subseteq L_{0}$ for all $n\in\mathbb{N}$, \item $K_{n} := f_{1}(L_{0}, J_{n})$ defines a sequence $\Folge{K}$ of mutually disjoint closed intervals, \item $\bigcup_{n\in\mathbb{N}}\Interior{K}_{n}$ is dense in $L_{0}$. \end{itemize} Let's see how to do this. Define ${\mathcal F}$ as the sequence of all closed intervals with rational endpoints that are contained in $\Interior{L}_{0}$. Take $J_{1}$ as the first element of ${\mathcal F}$. Put $K_{1} := f_{1}(L_{0}, J_{1})$, then $K_{1}$ is a closed interval with $K_{1}\subseteq J_{1}$ by assumption on $f_{1}$. Let $J_{2}$ be the first element in ${\mathcal F}$ which is contained in $L_{0}\setminus K_{1}$, put $K_{2} := f_{1}(L_{0}, J_{2})$. Inductively, select $J_{i+1}$ as the first element of ${\mathcal F}$ which is contained in $L_{0}\setminus\bigcup_{t=1}^{i}K_{t}$, and set $K_{i+1} := f_{1}(L_{0}, J_{i+1})$. It is clear from the construction that $\Folge{K}$ forms a sequence of mutually disjoint closed intervals with $K_{n}\subseteq J_{n}\subseteq L_{0}$ for each $n\in\mathbb{N}$. Assume that $\bigcup_{n\in\mathbb{N}}\Interior{K}_{n}$ is not dense in $L_{0}$, then we find $x\in L_{0}$ which is not contained in this union, hence we find an interval $T$ with rational endpoints which contains $x$ but $T\cap\bigcup_{n\in\mathbb{N}}\Interior{K}_{n}=\emptyset$. So $T$ occurs somewhere in ${\mathcal F}$, but it is never the first interval to be considered in the selection process. Since this is impossible, we arrive at a contradiction. We repeat this process for $\Interior{K}_{i}$ rather than $L_{0}$ for some $i$, hence we will define a sequence $(J_{i, n})_{n\in\mathbb{N}}$ of closed intervals $J_{i, n}$ with these properties: \begin{itemize} \item $J_{i, n}\subseteq \Interior{K}_{i}$ for all $n\in\mathbb{N}$, \item $K_{i, n} := f_{2}(L_{0}, J_{i}, K_{i}, J_{i, n})$ defines a sequence $(K_{i, n})_{n\in\mathbb{N}}$ of mutually disjoint closed intervals, \item $\bigcup_{n\in\mathbb{N}}\Interior{K}_{i, n}$ is dense in $K_{i}$. \end{itemize} It is immediate that $\bigcup_{i, j}\Interior{K}_{i, j}$ is dense in $L_{0}$. Continuing inductively, we find for each $\ell\in \mathbb{N}$ two families $J_{i_{1}, \ldots, i_{\ell}}$ and $K_{i_{1}, \ldots, i_{\ell}}$ of closed intervals with these properties \begin{itemize} \item $K_{i_{1}, \ldots, i_{\ell}} = f_{\ell}(L_{0}, J_{i_{1}}, K_{i_{1}}, J_{i_{1}, i_{2}}, K_{i_{1}, i_{2}}, \ldots, J_{i_{1}, \ldots, i_{\ell}})$, \item $J_{i_{1}, \ldots, i_{\ell+1}}\subseteq \Interior{K}_{i_{1}, \ldots, i_{\ell}}$, \item the intervals $(K_{i_{1}, \ldots, i_{\ell-1}, i_{\ell}})_{i_{\ell}\in \mathbb{N}}$ are mutually disjoint for each $i_{1}, \ldots, i_{\ell-1}$, \item $\bigcup\{\Interior{K}_{i_{1}, \ldots, i_{\ell-1}, i_{\ell}} \mid \langle i_{1}, \ldots, i_{\ell-1}, i_{\ell}\rangle\in\mathbb{N}^{\ell}\}$ is dense in $L_{0}$. \end{itemize} Note\MMP[t]{Relax NOW!} that this sequence depends on the chosen sequence $\Folge{f}$ of functions that represents the strategy for Demon. \begin{proof} (Second part) Now assume that Demon has a winning strategy $\Folge{f}$; hence no matter how Angel plays, Demon will win. For proving the assertion, we have to construct a sequence of nowhere dense subsets the union of which is $S$. In the first move, Angel chooses a closed interval $L_{1} := J_{i_{1}}\subseteq L_{0}$ (we refer here to the enumeration given by ${\mathcal F}$ above, so the interval chosen by Angel has index $i_{1}$). Demon's answer is then $$L_{2} := K_{i_{1}} := f_{1}(L_{0}, L_{1}) = f_{1}(L_{0}, J_{i_{1}}),$$ as constructed above. In the next step, Angel selects $L_{3} := J_{i_{1}, i_{2}}$ among those closed intervals which are eligible, i.e., which are contained in $\Interior{K}_{i_{1}}$ and have rational endpoints, Demon's countermove is $$L_{4} := K_{i_{1}, i_{2}} := f_{2}(L_{0}, L_{1}, L_{2}, L_{3}) = f_{2}(L_{0}, J_{i_{1}}, K_{i_{1}}, J_{i_{1}, i_{2}}).$$ In the n-th step, Angel selects $L_{2\cdot n-1} := J_{1_{1}, \ldots, i_{n}}$ and Demon selects $L_{2\cdot n} := K_{i_{1}, \ldots, i_{n}}$. Then we see that the sequence $L_{0}\supseteq L_{1}\ldots\supseteq L_{2\cdot n-1}\supseteq L_{2\cdot n} \ldots$ decreases and $L_{2\cdot n} = f_{n}(L_{0}, L_{1}, \ldots, L_{2\cdot n-1})$ holds, as required. Put $T := S\setminus L_{0}$ for convenience, then $\bigcap_{n\in\mathbb{N}} L_{n}\subseteq T$ by assumption (after all, we assume that Demon wins), put \begin{equation*} G_{n} := \bigcup_{\langle i_{1}, \ldots, i_{n}\rangle\in \mathbb{N}^{n}}\Interior{K}_{i_{1}, \ldots, i_{n}}. \end{equation*} Then $G_{n}$ is open. Let $E := \bigcap_{n\in\mathbb{N}}G_{n}$. Given $x\in E$, there exists a unique sequence $\Folge{i}$ such that $x\in K_{i_{1}, \ldots, i_{n}}$ for each $n\in\mathbb{N}$. Hence $x\in \bigcap_{n\in\mathbb{N}}L_{n}\subseteq T$, so that $E\subseteq T$. But then we can write $$S = L_{0}\setminus T\subseteq L_{0}\setminus T = \bigcup_{n\in\mathbb{N}}(L_{0}\setminus G_{n}).$$ Because $\bigcup\{\Interior{K}_{i_{1}, \ldots, i_{n-1}, i_{n}} \mid \langle i_{1}, \ldots, i_{n-1}, i_{n}\rangle\in\mathbb{N}^{n}\}$ is dense in $L_{0}$ for each $n\in\mathbb{N}$ by construction, we conclude that $L_{0}\setminus G_{n}$ is nowhere dense, so $S$ is of the first category. \end{proof} Games are an interesting tool for proofs, as we can see in this example; we have shown already that games may be used for other purposes, e.g., demonstrating the each subset of $[0, 1]$ is Lebesgue measurable under the Axiom of Determinacy~\SetCite{Section 1}. Further examples for using games to derive properties in a metric space can be found, e.g., in Kechris' book~\cite{Kechris}. \subsection{A Gallery of Spaces and Techniques} \label{sec:top-gallery} The discussion of the basic properties and techniques suggest that we now have a powerful collection of methods at our disposal. Indeed, we set up a small gallery of show cases, in which we demonstrate some approaches and methods. We first look at the use of topologies in logics from two different angles. The more conventional one is a direct application of the important Baire Theorem, which permits the construction of a model in a countable language of first order logic. Here the application of the theorem lies at the heart of the application, which is a proof of Gödel's Completeness Theorem. The other vantage point starts from a calculus of observations and develops the concept of topological systems from it, stressing an order theoretic point of view by perceiving topologies as complete Heyting algebras, when considering them as partially ordered subset of the power set of their carrier. Since partial orders may generate topologies on the set they are based on, this yields an interesting interplay between order and topology, which is reflected here in the Hofmann-Mislove Theorem. Then we return to the green pastures of classic applications and give a proof of the Stone-Weierstraß Theorem, one of the true classics. It states that a subring of the space of continuous functions on a compact Hausdorff space, which contains the constants, and which separates points is dense in the topology of uniform convergence. We actually give two proofs for this. One is based on a covering argument in a general space, it has a wide range of applications, of course. The second proof is no less interesting. It is essentially based on Weierstraß' original proof and deals with polynomials over $[0, 1]$ only; here concepts like elementary integration and uniform continuity are applied in a very concise and beautiful way. Finally, we deal with uniform spaces; they are a generalization of pseudometric spaces, but more specific than topological spaces. We argue that the central concept is closeness of points, which is, however, formulated in conceptual rather than quantitative terms. It is shown that many concepts which appear specific to the metric approach like uniform continuity or completeness may be carried into this context. Nevertheless, uniform spaces are topological spaces, but the assumption on having a uniformity available has some consequences for the associated topology. The reader probably misses Polish spaces in this little gallery. We deal with these spaces in depth, but since most of our applications of them are measure theoretic in nature, we deal with them in the context of a discussion of measures as a kind of natural habitat~\cite{EED-Meas}. \subsubsection{Gödel's Completeness Theorem} \label{sec:goedel} Gödel's Completeness Teorem states that a set of sentences of first order logic is consistent iff it has a model. The crucial part is the construction of a model for a consistent set of sentences. This is usually done through Henkin's approach, see, e.g.,~\cite[4.2]{Shoenfield}, \cite[Chapter 2]{Chang+Keisler} or~\cite[5.1]{Srivastava-Logic}. Rasiowa and Sikorski~\cite{Rasiowa-Sikorski-I} followed a completely different path in their topological proof by making use of Baire's Category Theorem and using the observation that in a compact topological space the intersection of a sequence of open and dense sets is dense again. The compact space is provided by the clopen sets of a Boolean algebra which in turn is constructed from the formulas of the first order language upon factoring. The equivalence relation is induced by the consistent set under consideration. We present the fundamental ideas of their proof in this section, since it is an unexpected application of a combination of the topological version of Stone's Representation Theorem for Boolean algebras and Baire's Theorem, hinted at already in Example~\ref{bool-alg-dense}. Since we assume that the reader is familiar with the semantics of first order languages, we do not want to motivate every definition for this area in detail, but we sketch the definitions, indicate the deduction rules, say what a model is, and rather focus on the construction of the model. The references given above may be used to fill in any gaps. A slightly informal description of the first order language $\ensuremath{{\filterFont L}}$ with identity with which we will be working is given first. For this, we assume that we have a countable set $\{x_{n}\mid n\in \mathbb{N}\}$ of variables and countably many constants. Moreover, we assume countably many function symbols and countably many predicate symbols. In particular, we have a binary relation $==$, the identity. Each function and each predicate symbol has a positive arity. These are the components of our language $\ensuremath{{\filterFont L}}$. \begin{description} \item[Terms.] A variable is a term and a constant symbol is a term. If $f$ is a function symbol of arity $n$, and $t_{1}, \ldots, t_{n}$ are terms, then $f(t_{1}, \ldots, t_{n})$ is a term. Nothing else is a term. \item[Atomic Formulas.] If $t_{1}$ and $t_{2}$ are terms, then $t_{1} == t_{2}$ is an atomic formula. If $p$ is a predicate symbol of arity $n$, and $t_{1}, \ldots, t_{n}$ are terms, then $p(t_{1}, \ldots, t_{n})$ is an atomic formula. \item[Formulas.] An atomic formula is a formula. If $\varphi$ and $\psi$ are formulas, then $\varphi\wedge\psi$ and $\neg\varphi$ are formulas. If $x$ is a variable and $\varphi$ is a formula, then $\forall x.\varphi$ is a formula. Nothing else is a formula. \end{description} Because there are countably many variables resp. constants, the language has countably many formulas. One usually adds parentheses to the logical symbols, but we do without, using them, however, freely, when necessary. We will use also disjunction [$\varphi\vee\psi$ abbreviates $\neg(\neg\varphi\wedge\neg\psi)$], implication [$\varphi\to \psi$ for $\neg\varphi\vee\psi$], logical equivalence [$\varphi\leftrightarrow\psi$ for $(\varphi\to \psi)\wedge(\psi\to \varphi)$] and existential quantification [$\exists x.\varphi$ for $\neg(\forall x.\neg\varphi)$]. Conjunction and disjunction are associative. We need logical axioms and inference rules as well. We have four groups of axioms \begin{description} \item[Propositional Axioms.] Each propositional tautology is an axiom. \item[Identity Axioms.] $x==x$, when $x$ is a variable. \item[Equality Axioms.] $y_{1}==z_{1} \to \ldots\to y_{n}==z_{n}\to f(y_{1}, \ldots, y_{n}) = f(z_{1}, \ldots, z_{n})$, whenever $f$ is a function symbol of arity $n$, and $y_{1}==z_{1} \to \ldots\to y_{n}==z_{n}\to p(y_{1}, \ldots, y_{n}) \to p(z_{1}, \ldots, z_{n})$ for a predicate symbol of arity $n$. \item[Substitution Axiom.] If $\varphi$ is a formula, $\varphi_{x}[t]$ is obtained from $\varphi$ by freely substituting all free occurrences of variable $x$ by term $t$, then $\varphi_{x}[t]\to \exists x.\varphi$ is an axiom. \end{description} These are the inference rules. \begin{description} \item[Modus Ponens.] From $\varphi$ and $\varphi\to \psi$ infer $\psi$. \item[Generalization Rule.] From $\varphi$ infer $\forall x.\varphi$. \end{description} A \emph{\index{sentence}sentence} is a formula without free variables. Let $\Sigma$ be a set of sentences, $\varphi$ a formula, then we denote that $\varphi$ is deducible from $\Sigma$ by $\Sigma\vdash \varphi$\MMP{$\Sigma\vdash \varphi$}, i.e., iff there is a proof for $\varphi$ in $\Sigma$. $\Sigma$ is called \emph{inconsistent} iff $\Sigma\vdash \bot$, or, equivalently, iff each formula can be deduced from $\Sigma$. If $\Sigma$ is not inconsistent, then $\Sigma$ is called \emph{consistent} or a \emph{theory}. Fix a theory $T$, and define \begin{equation*} \isEquiv{\varphi}{\psi}{\sim} \text{ iff } T \vdash \varphi\leftrightarrow \psi \end{equation*} for formulas $\varphi$ and $\psi$, then this defines an equivalence relation on the set of all formulas. Let $\mathbf{B}_{T}$ be the set of all equivalence classes $[\varphi]$\MMP{$\textbf{B}_T$}, and define \begin{align*} [\varphi]\wedge[\psi] & := [\varphi\wedge\psi]\\ [\varphi]\vee[\psi] & := [\varphi\vee\psi]\\ -[\varphi] & := [\neg\varphi]. \end{align*} This defines a Boolean algebra structure on $\mathbf{B}_{T}$, the \emph{Lindenbaum \index{algebra!Lindenbaum}algebra} of $T$. The maximal element $\top$ of $\mathbf{B}_{T}$\MMP{Lindenbaum algebra} is $\{\varphi\mid T\vdash \varphi\}$, its minimal element $\bot$ is $\{\varphi\mid T\vdash \neg \varphi\}$. The proof that $\mathbf{B}_{T}$ is a Boolean algebra follows the lines of~\SetCite{1.5.7} closely, hence it can be safely omitted. It might be noted, however, that the individual steps in the proof require additional properties of $\vdash$, for example, one has to show that $T\vdash \varphi$ and $T\vdash \psi$ together imply $T\vdash \varphi\wedge\psi$. We trust that the reader is in a position to recognize and accomplish this; \cite[Chapter 4]{Srivastava-Logic} provides a comprehensive catalog of useful derivation rules with their proofs. Let $\varphi$ be a formula, then denote by $\varphi(k/p)$ the formula obtained in this way: \begin{itemize} \item all bound occurrences of $x_{p}$ are replaced by $x_{\ell}$, where $x_{\ell}$ is the first variable among $x_{1}, x_{2}, \ldots$ which does not occur in $\varphi$, \item all free occurrences of $x_{k}$ are replaced by $x_{p}$. \end{itemize} This construction is dependent on the integer $\ell$, so the formula $\varphi(k/p)$ is not uniquely determined, but its class is. We have these representations in the Lindenbaum algebra for existentially resp. universally quantified formulas. \BeginLemma{repr-sup-in-lindenbaum} Let $\varphi$ be a formula in $\ensuremath{{\filterFont L}}$, then we have for every $k\in \mathbb{N}$ \begin{enumerate} \item\label{repr-sup-in-1} $\sup_{p\in\mathbb{N}}[\varphi(k/p)] = [\exists x_{k}.\varphi]$, \item\label{repr-sup-in-2} $\inf_{p\in\mathbb{N}}[\varphi(k/p)] = [\forall x_{k}.\varphi]$. \end{enumerate} \end{lemma} \begin{proof} 1. Fix $k\in\mathbb{N}$, then we have $T\vdash \varphi(k/p)\to \exists x_{k}.\varphi$ for each $p\in \mathbb{N}$ by the $\exists$ introduction rule. This implies $[\varphi(k/p)]\leq [\exists x_{k}.\varphi]$ for all $p\in \mathbb{N}$, hence $\sup_{p\in\mathbb{N}}[\varphi(k/p)] \leq [\exists x_{k}.\varphi]$, thus $[\exists x_{k}.\varphi]$ is an upper bound to $\{[\varphi(k/p)]\mid p\in \mathbb{N}\}$ in the Lindenbaum algebra. We have to show that it is also the least upper bound, so take a formula $\psi$ such that $[\varphi(k/p)]\leq[\psi]$ for all $k\in \mathbb{N}$. Let $q$ be an index such that $x_{q}$ does not occur free in $\psi$, then we conclude from $T\vdash \varphi(k/p)\to \psi$ for all $p$ that $\exists x_{q}.\varphi(k/q)\to \psi$. But $T\vdash \exists x_{k}.\varphi \leftrightarrow \exists x_{q}.\varphi(k/q)$, hence $T\vdash \exists x_{k}.\varphi \to \psi$. This means that $[\exists x_{k}.\varphi]$ is the least upper bound to $\{[\varphi(k/p)]\mid p\in \mathbb{N}\}$, proving the first equality. 2. The second equality is established in a very similar way. \end{proof} These representations motivate \BeginDefinition{preserve-sups-and-infs} Let $\ensuremath{{\filterFont F}}$ be an ultrafilter on the Lindenbaum algebra $\mathbf{B}_{T}$, $S\subseteq \mathbf{B}_{T}$. \begin{enumerate} \item $\ensuremath{{\filterFont F}}$ \emph{preserves the supremum of $S$} iff $\sup S\in \ensuremath{{\filterFont F}} \Leftrightarrow s\in \ensuremath{{\filterFont F}}$ for some $s\in S$. \item $\ensuremath{{\filterFont F}}$ \emph{preserves the infimum of $S$} iff $\inf S\in \ensuremath{{\filterFont F}} \Leftrightarrow s\in \ensuremath{{\filterFont F}}$ for all $s\in S$. \end{enumerate} \end{definition} Preserving the supremum of a set is similar to being inaccessible by joins (see Definition~\ref{scott-open-gen}), but inaccessibility refers to directed sets, while we are not making any assumption on $S$, except, of course, that its supremum exists in the Boolean algebra. Note also that one of the characteristic properties of an ultrafilters is that the join of two elements is in the ultrafilter iff it contains at least one of them. Preserving the supremum of a set strengthens this property \emph{for this particular set only}. The de Morgan laws and $\ensuremath{{\filterFont F}}$ being an ultrafilter make it clear that $\ensuremath{{\filterFont F}}$ preserves $\inf S$ iff it preserves $\sup \{-s\mid s\in S\}$, resp. that $\ensuremath{{\filterFont F}}$ preserves $\sup S$ iff it preserves $\inf\{-s\mid s\in S\}$. This cuts our work in half. \BeginProposition{preserving-ultrafilter-exists} Let $\Folge{S}$ be a sequence of subsets $S_{n}\subseteq \mathbf{B}_{T}$ such that $\sup S_{n}$ exists in $\mathbf{B}_{T}$. Then there exists an ultrafilter $\ensuremath{{\filterFont F}}$ such that $\ensuremath{{\filterFont F}}$ preserves the supremum of $S_{n}$ for all $n\in\mathbb{N}$. \end{proposition} \begin{proof} This is an application of Baire's Category Theorem~\ref{baire-locally-compact} and is discussed in Example~\ref{bool-alg-dense}. We find there a prime ideal which does not preserve the supremum for $S_{n}$ for all $n\in\mathbb{N}$. Since the complement of a prime ideal in a Boolean algebra is an ultrafilter~\SetCite{Lemma 1.5.38, Lemma 1.5.37}, the assertion follows. \end{proof} So much for the syntactic side of our language $\ensuremath{{\filterFont L}}$. We will leave the ultrafilter $\ensuremath{{\filterFont F}}$ alone for a little while and turn to the semantics of the logic. An \emph{interpretation} of $\ensuremath{{\filterFont L}}$ is given by a carrier set $A$, each constant $c$ is interpreted through an element $c_{A}$ of $A$, each function symbol $f$ with arity $n$ is assigned a map $f_{A}: A^{n}\to A$, and each $n$-ary predicate $p$ is interpreted through an $n$-ary relation $p_{A}\subseteq A^{n}$; finally, the binary predicate $==$ is interpreted through equality on $A$. We also fix a sequence $\{w_{n}\mid n\in\mathbb{N}\}$ of elements of $A$ for the interpretation of variables, set ${\mathcal A} := (A, \{w_{n}\mid n\in\mathbb{N}\})$, and call ${\mathcal A}$ a \emph{\index{model}model} for the first order language. We then proceed inductively: \begin{description} \item[Terms.] Variable $x_{i}$ is interpreted by $w_{i}$. Assume that the term $f(t_{1}, \ldots, t_{n})$ is given. If the terms $t_{1}, \ldots, t_{n}$\MMP{${\mathcal A}\models \varphi$} are interpreted, through the respective elements $t_{A, 1}, \ldots, t_{A, n}$ of $A$, then $f(t_{1}, \ldots, t_{n})$ is interpreted through $f_{A}(t_{A, 1}, \ldots, t_{A, n})$. \item[Atomic Formulas.] The atomic formula $t_{1} == t_{2}$ is interpreted through $t_{A, 1} = t_{A, 2}$. If the $n$-ary predicate $p$ is assigned $p_{A}\subseteq A^{n}$, then $p(t_{1}, \ldots, t_{n})$ is interpreted as $\langle t_{A, 1}, \ldots, t_{A, n}\rangle\in p_{A}$. We denote by ${\mathcal A}\models \varphi$ that the interpretation of the atomic formula $\varphi$ yields the value true. We say that $\varphi$ holds in ${\mathcal A}$. \item[Formulas.] Let $\varphi$ and $\psi$ be formulas, then ${\mathcal A}\models \varphi\wedge\psi$ iff ${\mathcal A}\models \varphi$ and ${\mathcal A}\models \psi$, and ${\mathcal A}\models \neg\varphi$ iff ${\mathcal A}\models \varphi$ is false. Let $\varphi$ be the formula $\forall x_{i}. \psi$, then ${\mathcal A}\models \varphi$ iff ${\mathcal A}\models \psi_{x_{i}\mid a}$ for every $a\in A$, where $\psi_{x\mid a}$ is the formula $\psi$ with each free occurrence of $x$ replaced by $a$. \end{description} Construct the ultrafilter $\ensuremath{{\filterFont F}}$ constructed in Proposition~\ref{preserving-ultrafilter-exists} for all possible suprema arising from existentially quantified formulas according to Lemma~\ref{repr-sup-in-lindenbaum}. There are countably many suprema, because the number of formulas is countable. This ultrafilter and the Lindenbaum algebra $\mathbf{B}_{T}$ will be used now for the construction of a \emph{\index{model}model\MMP{Model}} ${\mathcal A}$ for $T$ (so that ${\mathcal A}\models \varphi$ holds for all $\varphi\in T$). We will first need to define the carrier set $A$. Define for the variables $x_{i}$ and $x_{j}$ the equivalence relation $\approx$ through $\isEquiv{x_{i}}{x_{j}}{\approx}$ iff $[x_{i}==x_{j}]\in \ensuremath{{\filterFont F}}$; denote by $\hat{x}_{i}$ the $\approx$-equivalence class of $x_{i}$. The carrier set $A$ is defined as $\{\hat{x}_{n}\mid n\in\mathbb{N}\}$. Let us take care of the constants now. Given a constant $c$, we know that $\vdash \exists x_{i}.c==x_{i}$ by substitution. Thus $[\exists x_{i}.c==x_{i}] = \top\in \ensuremath{{\filterFont F}}$. But $[\exists x_{i}.c==x_{i}] = \sup_{i\in \mathbb{N}} [c==x_{i}]$, and $\ensuremath{{\filterFont F}}$ preserves suprema, so we conclude that there exists $i$ with $[c==x_{i}]\in \ensuremath{{\filterFont F}}$. We pick this $i$ and define $c_{A} := \hat{x}_{i}$. Note that it does not matter which $i$ to choose. Assume that there is more than one. Since $[c==x_{i}]\in \ensuremath{{\filterFont F}}$ and $[c==x_{j}]\in \ensuremath{{\filterFont F}}$ implies $[c==x_{i}\wedge c==x_{j}]\in \ensuremath{{\filterFont F}}$, we obtain $[x_{i}==x_{j}]\in \ensuremath{{\filterFont F}}$, so the class is well defined. Coming to terms, let $t$ be a variable or a constant, so that it has an interpretation already, and assume that $f$ is a unary function. Then $\vdash \exists x_{i}.f(t) == x_{i}$, so that $[\exists x_{i}.f(t) == x_{i}]\in \ensuremath{{\filterFont F}}$, hence there exists $i$ such that $[f(t)==x_{i}]\in \ensuremath{{\filterFont F}}$, then put $f_{A}(t_{A}) := \hat{x}_{i}$. Again, if $[f(t) == x_{i}]\in \ensuremath{{\filterFont F}}$ and $[f(t) == x_{j}]\in\ensuremath{{\filterFont F}}$, then $[x_{i}==x_{j}]\in \ensuremath{{\filterFont F}}$, so that $f_{A}(c_{A})$ is well defined. The argument for the general case is very similar. Assume that terms $t_{1}, \ldots, t_{n}$ have their interpretations already, and $f$ is a function with arity $n$, then $\vdash \exists x_{i}.f(t_{1}, \ldots, t_{n}) == x_{i}$, hence we find $j$ with $[f(t_{1}, \ldots, f(t_{n}) == x_{j}]\in \ensuremath{{\filterFont F}}$, so put $f_{A}(t_{A, 1}, \ldots, t_{A, n}) := \hat{x}_{j}$. The same argument as above shows that this is well defined. Having defined the interpretation $t_{A}$ for each term $t$, we define for the $n$-ary relation symbol $p$ the relation $p_{A}\subseteq A^{n}$ by \begin{equation*} \langle t_{A, 1}, \ldots, t_{A, n}\rangle \in p_{A}\Leftrightarrow [p(t_{1}, \ldots, t_{n}]\in\ensuremath{{\filterFont F}} \end{equation*} Then $p_{A}$ is well defined by the equality axioms. Thus ${\mathcal A}\models \varphi$ is defined for each formula $\varphi$, hence we know how to interpret each formula in terms of the Lindenbaum algebra of $T$ (and the ultrafilter $\ensuremath{{\filterFont F}}$). We can show now that a formula is valid in this model iff its class is contained in ultrafilter $\ensuremath{{\filterFont F}}$. \BeginProposition{valid-iff-in-filter} ${\mathcal A}\models \varphi$ iff $[\varphi]\in \ensuremath{{\filterFont F}}$ holds for each formula $\varphi$ of $\ensuremath{{\filterFont L}}$. \end{proposition} \begin{proof} The proof is done by induction on the structure of formula $\varphi$ and is straightforward, using the properties of an ultrafilter. For example, \begin{align*} {\mathcal A}\models \varphi\wedge\psi & \Leftrightarrow {\mathcal A}\models \varphi \text{ and } {\mathcal A}\models \psi&\text{ (definition)} \\ & \Leftrightarrow [\varphi]\in\ensuremath{{\filterFont F}}\text{ and }[\psi]\in\ensuremath{{\filterFont F}}&\text{ (induction hypothesis)}\\ &\Leftrightarrow [\varphi\wedge\psi]\in\ensuremath{{\filterFont F}}&\text{ ($\ensuremath{{\filterFont F}}$ is an ultrafilter)} \end{align*} For establishing the equivalence for universally quantified formulas $\forall x_{i}.\psi$, assume that $x_{i}$ is a free variable in $\psi$ such that ${\mathcal A}\models \psi_{x_{i}\mid a} \Leftrightarrow [\psi_{x_{i}\mid a}]\in\ensuremath{{\filterFont F}}$ has been established for all $a\in A$. Then \begin{align*} {\mathcal A}\models \forall x_{i}.\psi &\Leftrightarrow {\mathcal A}\models \psi_{x_{i}\mid a}\text{ for all }a\in A&\text{ (definition)}\\ &\Leftrightarrow [\psi_{x_{i}\mid a}]\in\ensuremath{{\filterFont F}}\text{ for all }a\in A&\text{ (induction hypothesis)}\\ &\Leftrightarrow \sup_{a\in A}[\psi_{x_{i}\mid a}]\in\ensuremath{{\filterFont F}}&\text{ ($\ensuremath{{\filterFont F}}$ preserves the infimum)}\\ &\Leftrightarrow [\forall x_{i}.\psi]\in\ensuremath{{\filterFont F}}&\text{ (by Lemma~\ref{repr-sup-in-lindenbaum})} \end{align*} This completes the proof. \end{proof} As a consequence, we have established this version of Gödel's Completeness Theorem: \BeginCorollary{a-is-a-model-for} ${\mathcal A}$ is a model for the consistent set $T$ of formulas. \QED \end{corollary} This approach demonstrates how a topological argument is used at the center of a construction in logic. It should be noted, however, that the argument is only effective since the universe in which we work is countable. This is so because the Baire Theorem, which enables the construction of the ultrafilter, works for a countable family of open and dense sets. If, however, we work in an uncountable language $\ensuremath{{\filterFont L}}$, this instrument is no longer available (\cite[Exercise 2.1.24]{Chang+Keisler} points to a possible generalization). But even in the countable case one cannot help but note that the construction above depends on the Axiom of Choice, because we require an ultrafilter. The approach in~\cite[Exercise 2.1.22]{Chang+Keisler} resp.~\cite[Theorem 2.21]{Koppelberg} point to the construction of a filter without the help of a topology, but, alas, this filter is extended to an ultrafilter, and here the dreaded axiom is needed again. \subsubsection{Topological Systems or: Topology Via Logic} \label{sec:topology-via-logic} This section investigates topological systems. They abstract from topologies being sets of subsets and focus on the order structure imposed by a topology instead. We focus on the interplay between a topology and the base space by considering these objects separately. A topology is considered a complete Heyting algebra, the carrier set is, well, a set of points, both are related through a validity relation $\models $ which mimics the $\in$-relation between a set and its elements. This leads to the definition of a topological system, and the question is whether this separation really bears fruits. It does; for example we may replace the point set by the morphisms from the Heyting algebra to the two element algebras $2\!\!2$, giving sober spaces, and we show that, e.g., a Hausdorff space is isomorphic to such a structure. The interplay of the order structure of a topology and its topological obligations will be investigated through the Scott topology on a dcpo, a directed complete partial order, leading to the Hofmann-Mislove Theorem which characterizes compact sets that are represented as the intersection of the open sets containing them in terms of Scott open filters. Before we enter into a technical discussion, however, we put the following definitions on record. \BeginDefinition{complete-heyting-algebra} A partially ordered set $P$ is called a \emph{complete Heyting algebra}\index{algebra!Heyting} iff \begin{enumerate} \item each finite subset $S$ has a join $\bigwedge S$, \item each subset $S$ has a meet $\bigvee S$, \item finite meets distribute over arbitrary joins, i.e., \begin{equation*} a\wedge\bigvee S = \bigvee\{a\wedge s \mid s\in S\} \end{equation*} holds for $a\in L, S\subseteq L$. \end{enumerate} A \emph{\index{algebra!Heyting!morphism}morphism} $f$ between the complete Heyting algebras $P$ and $Q$ is a map $f: P\to Q$ such that \begin{enumerate} \item $f(\bigwedge S) = \bigwedge \Bild{f}{S}$ holds for finite $S\subseteq P$, \item $f(\bigvee S) = \bigvee \Bild{f}{S}$ holds for arbitrary $S\subseteq P$. \end{enumerate} $\|P, Q\|$\MMP{$\|P, Q\|$} denotes the set of all morphisms $P\to Q$. \end{definition} The definition of a complete Heyting algebra is a bit redundant, but never mind. Because the join and the meet of the empty set is a member of such an algebra, it contains a smallest element $\bot$ and a largest element $\top$, and $f(\bot) = \bot$ and $f(\top) = \top$ follows. A topology is a complete Heyting algebra with inclusion as the partial order~\SetCite{Exercise 29}. Sometimes, complete Heyting algebras are called \emph{\index{frame}frames}; but since the structure underlying the interpretation of modal logics are also called frames, we stick here to the longer name. \BeginExample{compl-heyting-alg} Call a lattice $V$ pseudo-complemented iff given $a, b\in V$, there exists $c\in V$ such that $x\leq c$ iff $x\wedge a \leq b$; $c$ is usually denoted by $a\to b$. A complete Heyting algebra is pseudo-complemented. In fact, let $c := \bigvee\{x \in V\mid x\wedge a \leq b\}$, then \begin{equation*} c\wedge a = \bigvee\{x\in V\mid x\wedge a \leq b\}\wedge a = \bigvee\{x\wedge a\in V\mid x\wedge a \leq b\}\leq b \end{equation*} by the general distributive law, hence $x \leq c$ implies $x\wedge a \leq b$. Conversely, if $x\wedge a \leq b$, then $x\leq c$ follows. {\Large\ding{44}} \end{example} \BeginExample{compl-heyting-alg-foll} Assume that we have a complete lattice $V$ which is pseudo-complemented. Then the lattice satisfies the general distributive law. In fact, given $a\in V$ and $S\subseteq V$, we have $s\wedge a \leq \bigvee\{a\wedge b\mid b\in S\}$ , thus $s \leq a\to \bigvee\{a\wedge b\mid b\in S\}$ for all $s\in S$, from which we obtain $\bigvee S \leq a\wedge\bigvee\{a\wedge b\mid b\in S\}$, which in turn gives $a\wedge \bigvee S \leq a\wedge\bigvee\{a\wedge b\mid b\in S\}$. On the other hand, $\bigvee \{a\wedge b \mid b\in S\}\leq \bigvee S$, and $\bigvee \{a\wedge b \mid b\in S\}\leq a$, so that we obtain $\bigvee\{a\wedge b \mid b\in S\}\leq a\wedge \bigvee S$. {\Large\ding{44}} \end{example} We note \BeginCorollary{cha-is-lattice} A complete Heyting algebra is a complete distributive lattice. \QED \end{corollary} Quite apart from investigating what can be said if open sets are replaced by an element of a complete Heyting algebra, and thus focussing on the order structure, one can argue as follows. Suppose we have observers and events, say, $X$ is the set of observers, and $A$ is the set of events. The observers are not assumed to have any structure, the events have a partial order making them a distributive lattice; an observation may be incomplete, so $a\leq b$ indicates that observing event $b$ contains more information than observing event $a$. If observer $x\in X$ observes event $a\in A$, we denote this as $x\models a$. The lattice structure should be compatible with the observations, that is, we want to have for $S\subseteq A$ that \begin{align*} x\models \bigwedge S & \text{ iff }x\models a\text{ for all }a\in S, S\text{ finite},\\ x \models \bigvee S & \text{ iff }x\models a\text{ for some }a\in S, S\text{ arbitrary}. \end{align*} (recall $\bigwedge \emptyset = \top$ and $\bigvee\emptyset = \bot$). Thus our observations should be closed under finite conjunctions and arbitrary disjunctions; replacing disjunctions by intersections and conjunctions by unions, this shows a somewhat topological face. We define accordingly \BeginDefinition{topol-syst} A \emph{\index{topological system}topological system} $(\pTX, \varnothingMX, \models)$ has a set $\pTX$ of points, a complete Heyting algebra $\varnothingMX$ of observations, and a satisfaction relation $\models\ \subseteq\ \pTX\times\varnothingMX$ (written as $x\models a$ for $\langle x, a\rangle\in\ \models$) such that we have for all $x\in \varnothingMX$ \begin{itemize} \item If $S\subseteq \varnothingMX$ is finite, then $x\models \bigvee S$ iff $x\models a$ for all $a\in S$. \item For $S\subseteq \varnothingMX$ arbitrary, $x\models \bigvee S$ iff $x\models a$ for some $a\in S$. \end{itemize} The elements of $\pTX$ are called \emph{\index{topological system!points}points}, the elements of $\varnothingMX$\MMP{$\pTX, \varnothingMX$} are called \emph{\index{topological system!opens}opens}. \end{definition} We will denote a topological system $X = (\pTX,\varnothingMX)$ usually without writing down the satisfaction relation, which is either explicity defined or understood from the context. \BeginExample{top-system-example} \begin{enumerate} \item\label{top-system-example-1} The obvious example for a topological system $D$ is a topological space $(X, \tau)$ with $\pTX[D] := X$ and $\varnothingMX[D] := \tau$, ordered through inclusion. The satisfaction relation $\models $ is given by the containment relation $\in$, so that we have $x\models G$ iff $x\in G$ for $x\in \pTX[D]$ and $G\in\varnothingMX[D]$. \item\label{top-system-example-2} But it works the other way around as well. Given a topological system $X$, define for the open $a\in\varnothingMX$ its \emph{\index{topological system!opens!extension}extension}\MMP{Extension $\eXT[\cdot]$} \begin{equation*} \eXT := \{x\in \pTX\mid x\models a\}. \end{equation*} Then $\tau := \{\eXT\mid a\in \varnothingMX\}$ is a topology on $\pTX$. In fact, $\emptyset = \eXT[\bot]$, $\pTX =\eXT[\top]$, and if $S\subseteq \tau$ is finite, say, $S = \{\eXT[a_{1}], \ldots, \eXT[a_{n}]\}$, then $\bigcap S = \eXT[\bigwedge_{i=1}^{n}a_{i}]$. Similarly, if $S = \{\eXT[a_{i}]\mid i\in I\} \subseteq \tau$ is an arbitrary subset of $\tau$, then $\bigcup S = \eXT[\bigvee_{i\in I}a_{i}]$. This follows directly from the laws of a topological system. \item\label{top-system-example-3} Put $2\!\!2\index{2\negthickspace2} := \{\bot, \top\}$, then this is a complete Heyting algebra\MMP{$2\!\!2$}. Let $\varnothingMX := A$ be another complete Heyting algebra, and put $\pTX := \|\varnothingMX, 2\!\!2\|$ defining $x\models a$ iff $x(a) = \top$ then yields a topological system. Thus a point in this topological system is a morphism $\varnothingMX\to 2\!\!2$, and a point satisfies the open $a$ iff it assigns $\top$ to it. \end{enumerate} {\Large\ding{44}} \end{example} Next, we want to define morphisms between topological systems. Before we do that, we have another look at topological spaces and continuous maps. Recall that a map $f: X\to Y$ between topological spaces $(X, \tau)$ and $(Y, \vartheta)$ is $\tau$-$\vartheta$-continuous iff $\InvBild{f}{H}\in\tau$ for all $H\in\vartheta$. Thus $f$ spawns a map $f^{-1}: \vartheta\to \tau$~---~note the opposite direction. We have $x\in \InvBild{f}{H}$ iff $f(x)\in H$, accounting for containment. This leads to the definition of a morphism as a pair of maps, one working in the opposite direction of the other one, such that the satisfaction relation is maintained, formally: \BeginDefinition{def-morph-top-syst} Let $X$ and $Y$ be topological systems. Then $f: X\to Y$ is a \emph{\index{topological system!c-morphism}c-morphism} iff \begin{enumerate} \item $f$ is a pair of maps $f = (\pTf, \varnothingMf)$ with $\pTf: \pTX\to \pTX[Y]$, and $\varnothingMf\in \|\varnothingMX[Y], \varnothingMX\|$ is a morphism for the underlying algebras. \item $\pTf(x)\models_{Y} b$ iff $x\models_{X} \varnothingMf(b)$ for all $x\in \pTX$ and all $b\in \varnothingMX[Y]$.\MMP{$\pTf, \varnothingMf$} \end{enumerate} \end{definition} We have indicated above for the reader's convenience in which system the satisfaction relation is considered. It is evident that the notion of continuity is copied from topological spaces, taking the slightly different scenario into account. \BeginExample{cont-morph-example} Let $X$ and $Y$ be topological systems with $f: X\to Y$ a c-morphism. Let $(\pTX, \tau_{\pTX})$ and $(\pTX[Y], \tau_{\pTX[Y]})$ be the topological spaces generated from these systems through the extent of the respective opens, as in Example~\ref{top-system-example}, part~\ref{top-system-example-2}. Then $\pTf: \pTX\to \pTX[Y]$ is $\tau_{\pTX}$-$\tau_{\pTX[Y]}$-continuous. In fact, let $b\in\varnothingMX[Y]$, then \begin{align*} x\in \InvBild{(\pTf)}{\eXT[b]} & \Leftrightarrow \pTf(x)\in\eXT[b]\\ & \Leftrightarrow \pTf(x)\models b\\ & \Leftrightarrow x\models \varnothingMf(b), \end{align*} thus $\InvBild{(\pTf)}{\eXT[b]} = \eXT[\varnothingMf(b)]\in\tau_{\pTX}$. {\Large\ding{44}} \end{example} This shows that continuity of topological spaces is a special case of c-morphisms between topological systems, in the same way as topological spaces are special cases of topological systems. Let $f: X\to Y$ and $g: Y\to Z$ be c-morphisms, then their composition is defined as $g\circ f := (\pTf[g]\circ \pTf, \varnothingMf\circ \varnothingMf[g])$. The identity $id_{X}:X\to X$ is defined through $id_{X} := (id_{\pTX}, id_{\varnothingMX})$. If, given the c-morphism $f: X\to Y$, there there is a c-morphisms $g: Y\to X$ with $g\circ f = id_{X}$ and $f\circ g = id_{Y}$, then $f$ is called a \emph{\index{topological system!homeomorphism}homeomorphism}. \BeginCorollary{top-syst-form-category} Topological systems for a category $\Category{TS}$, the objects of which are topological systems, with c-morphisms as morphisms. \QED \end{corollary} Given a topological system $X$, the topological space $(\pTX, \tau_{\pTX})$ with $\tau_{\pTX} := \bigl\{\eXT\mid a\in \varnothingMX\bigr\}$ is called the \emph{\index{topological system!spatialization}spatialization} of $X$ and denoted by $\FunctorSenza{S}P(X)$. We want to make $\FunctorSenza{S}P$ a (covariant) functor $\Category{TS}\to \Category{Top}$, the latter one denoting the category of topological spaces with continuous maps as morphisms\MMP{$\Category{TS}, \Category{Top}, \FunctorSenza{S}P$}. Thus we have to define the image $\FunctorSenza{S}P(f)$ of a c-morphism $f: X\to Y$. But this is fairly straightforward, since we have shown in Example~\ref{cont-morph-example} that $f$ induces a continuous map $(\pTX, \tau_{\pTX})\to (\pTX[Y], \tau_{\pTX[Y]})$. It is clear now that $\FunctorSenza{S}P: \Category{TS}\to \Category{Top}$ is a covariant functor. On the other hand, part~\ref{top-system-example-1} of Example~\ref{top-system-example} shows that we have a forgetful functor $\FunctorSenza{V}: \Category{Top}\to \Category{TS}$ with $\FunctorSenza{V}(X, \tau) := (\pTX, \varnothingMX)$ with $\pTX := X$ and $\varnothingMX := \tau$, and $\FunctorSenza{V}(f) := (f, f^{-1})$. These functors are related. \BeginProposition{sp-is-right-adjoint-to-forgetful} $\FunctorSenza{S}P$ is right adjoint to $\FunctorSenza{V}$. \end{proposition} \begin{proof} 0. Given a topological space $X$ and a topological system $A$ we have to find a bijection $\varphi_{X, A}: \hom{\Category{TS}}(\FunctorSenza{V}(X), A)\to \hom{\Category{Top}}(X, \FunctorSenza{S}P(A))$ rendering these diagrams commutative: \begin{equation*} \xymatrix{ \hom{\Category{TS}}(\FunctorSenza{V}(X), A)\ar[d]_{F_{*}}\ar[rr]^{\varphi_{X, A}} &&\hom{\Category{Top}}(X, \FunctorSenza{S}P(A))\ar[d]^{(\FunctorSenza{S}P(F))_{*}} \\ \hom{\Category{TS}}(\FunctorSenza{V}(X), B)\ar[rr]_{\varphi_{X, B}} &&\hom{\Category{Top}}(X, \FunctorSenza{S}P(B)) } \end{equation*} and \begin{equation*} \xymatrix{ \hom{\Category{TS}}(\FunctorSenza{V}(X), A)\ar[d]_{(\FunctorSenza{V}(G))^{*}}\ar[rr]^{\varphi_{X, A}} &&\hom{\Category{Top}}(X, \FunctorSenza{S}P(A))\ar[d]^{G^{*}} \\ \hom{\Category{TS}}(\FunctorSenza{V}(Y), A)\ar[rr]_{\varphi_{Y, A}} &&\hom{\Category{Top}}(Y, \FunctorSenza{S}P(A)) } \end{equation*} where $F_{*} := \hom{\Category{TS}}(\FunctorSenza{V}(X), F): f\mapsto F\circ f$ for $F: A\to B$ in $\Category{TS}$, and $G^{*} := \hom{\Category{Top}}(G, \FunctorSenza{S}P(A)): g\mapsto g\circ G$ for $G: Y\to X$ in $\Category{Top}$, see~\CategCite{Section 1.5}. We define $\varphi_{X, A}(\pTf, \varnothingMf) := \pTf$, hence we focus on the component of a c-morphism which maps points to points. 1. Let us work on the first diagram. Take $f = (\pTf, \varnothingMf): \FunctorSenza{V}(X)\to A$ as a morphism in $\Category{TS}$, and let $F: A\to B$ be a c-morphism, $F = (\pTf[F], \varnothingMf[F])$, then $\varphi_{X, B}(F_{*}(f)) = \varphi_{X, B}(F\circ f) = \pTf[F]\circ \pTf$, and $(\FunctorSenza{S}P(F))_{*}(\varphi_{X, A}(f)) = \FunctorSenza{S}P(F) \circ \pTf = \pTf[F]\circ \pTf$. 2. Similarly, chasing $f$ through the second diagram for some continuous map $G: Y\to X$ yields \begin{align*} \varphi_{Y, A}((\FunctorSenza{V}(G))^{*}(f)) & = \varphi_{Y, A}((\pTf, \varnothingMf)\circ (G, G^{-1}))\\ & = \pTf\circ G \\ & = G^{*}(\pTf)\\ & = G^{*}(\varphi_{X, A}(f)). \end{align*} This completes the proof. \end{proof} Constructing $\FunctorSenza{S}P$, we went from a topological space to its associated topological system by exploiting the observation that a topology $\tau$ is a complete Heyting algebra. But we can travel in the other direction as well, as we will show now. Given a complete Heyting algebra $A$, we take the elements of $A$ as opens, and take all morphisms in $\|A, 2\!\!2\|$ as points, defining the relation $\models $ which connects the components through \begin{equation*} x\models a\Leftrightarrow x(a) = \top. \end{equation*} This construction was announced already in Example~\ref{top-system-example}, part~\ref{top-system-example-3}. In order to extract a functor from this construction, we have to cater for morphisms. In fact, let $\psi\in\|B, A\|$ a morphism $B\to A$ of the complete Heyting algebras $B$ and $A$, and $p\in\|A, 2\!\!2\|$ a point of $A$, then $p\circ \psi\in\|B, 2\!\!2\|$ is a point in $B$. Let $\Category{C}ha$ be the category of all complete Heyting algebras with $\hom{\Category{C}ha}(A, B) := \|B, A\|$, then we define the functor $\FunctorSenza{Loc}: \Category{C}ha\to \Category{TS}$ through $\FunctorSenza{Loc}(A) := (\|A, 2\!\!2\|, A)$, and $\FunctorSenza{Loc}(\psi) := (\psi_{*}, \psi)$ for $\psi\in\hom{\Category{C}ha}(A, B)$ with $\psi^{*}(p) := p\circ \psi$. Thus $\FunctorSenza{Loc}(\psi): \FunctorSenza{Loc}(A)\to \FunctorSenza{Loc}(B)$, if $\psi: A\to B$ in $\Category{C}ha$. In fact, let $f := \FunctorSenza{Loc}(\psi)$, and $p\in\|A, 2\!\!2\|$ a point in $\FunctorSenza{Loc}(A)$, then we obtain for $b\in B$\MMP{$\Category{C}ha, \FunctorSenza{Loc}$} \begin{align*} \pTf(p) \models b & \Leftrightarrow \pTf(p)(b) = \top\\ & \Leftrightarrow (p\circ \psi)(b) = \top&\text{ (since }\pTf=p\circ \psi)\\ & \Leftrightarrow p\models \psi(b)\\ & \Leftrightarrow p\models \varnothingMf(b)&\text{ (since }\varnothingMf=\psi). \end{align*} This shows that $\FunctorSenza{Loc}(\psi)$ is a morphism in $\Category{TS}$. $\FunctorSenza{Loc}(A)$ is called the \emph{\index{topological system!localization}localization}\MMP{Localization} of the complete Heyting algebra $A$. The topological system is called \emph{\index{topological system!localic}localic} iff it is homeomorphic to the localization of a complete Heyting algebra. We have also here a forgetful functor $\FunctorSenza{V}: \Category{TS}\to \Category{C}ha$, and with a proof very similar to the one for Proposition~\ref{sp-is-right-adjoint-to-forgetful} one shows \BeginProposition{loc-is-left-adjoint-to-forgetful} $\FunctorSenza{Loc}$ is left adjoint to the forgetful functor $\FunctorSenza{V}$. \QED \end{proposition} In a localic system, the points enjoy as morphisms evidently much more structure than just being flat points without a face, in an abstract set. Before exploiting this wondrous remark, recall these notations, where $(P, \leq)$ is a reflexive and transitive relation: \begin{align*} \uparrow\! p & := \{q\in P\mid q\geq p\},\\ \downarrow\!p & := \{q\in P\mid q \leq p\}. \end{align*} The following properties are stated just for the record. \BeginLemma{upper-is-prime} Let $a\in A$ with $A$ a complete Heyting algebra. Then $\uparrow\!a$ is a filter, and $\downarrow\!a$ is an ideal in $A$. \QED \end{lemma} \BeginDefinition{} Let $A$ be a complete Heyting algebra. \begin{enumerate} \item $a\in A$ is called a \emph{\index{prime!element}prime element} iff $\downarrow\!a$ is a prime ideal. \item The filter $F\subseteq A$ is called \emph{completely \index{prime!completely}prime} iff $\bigvee S\in F$ implies $s\in F$ for some $s\in S$, where $S\subseteq A$. \end{enumerate} \end{definition} Thus $a\in A$ is a prime element iff we may conclude from $\bigwedge S\leq a$ that there exists $s\in S$ with $s\leq a$, provided $S\subseteq A$ is finite. Note that a prime filter has the stipulated property for finite $S\subseteq A$, so a completely prime filter is a prime filter by implication. \BeginExample{compl-prime-open} Let $(X, \tau)$ be a topological space, $x\in X$, then \begin{equation*} {\mathcal G}_{x} := \{G\in \tau\mid x\in G\} \end{equation*} is a completely prime filter in $\tau$. It is clear that ${\mathcal G}_{x}$ is a filter in $\tau$, since it is closed under finite intersections, and $G\in {\mathcal G}_{x}$ and $G\subseteq H$ implies $H\in {\mathcal G}_{x}$ for $H\in \tau$. Now let $\bigcup_{i\in I}S_{i}\in {\mathcal G}_{x}$ with $S_{i}\in\tau$ for all $i\in I$, then there exists $j\in I$ such that $x\in S_{j}$, hence $S_{j}\in {\mathcal G}_{x}$. {\Large\ding{44}} \end{example} Prime filters in a Heyting complete Heyting algebras have this useful property: if we have an element which is not in the filter, then we can find a prime element not in the filter dominating the given one. The proof of this property requires Zorn's Lemma, hence a variant of the Axiom of Choice. \BeginProposition{prime-elements-not-in} Let $F\subseteq A$ be a prime filter in the complete Heyting algebra $A$. Let $a\not\in F$, then there exists a prime element $p\in A$ with $a\leq p$ and $p\not\in F$. \end{proposition} \begin{proof} Let $Z := \{b\in A\mid a\leq b \text{ and }b\not\in F\}$, then $Z\not=\emptyset$, since $a\in Z$. We want to show that $Z$ is inductively ordered, hence take a chain $C\subseteq Z$, then $c := \sup\ C\in A$, since $A$ is a complete lattice. Clearly, $a\leq c$; suppose $c\in F$, then, since $F$ is completely prime, we find $c'\in C$ with $c'\in F$, which contradicts the assumption that $C\subseteq Z$. Hence $Z$ contains a maximal element $p$ by Zorn's Lemma. Since $p\in Z$, we have $a\leq p$ and $p\not\in F$, so we have to show that $p$ is a prime element. Assume that $x\wedge y\leq p$, then either of $x\vee p$ or $y\vee p$ is not in $F$: if both are in $F$, we have by distributivity $(x\vee p)\wedge (y\vee p) = (x\wedge y)\vee p = p$, so $p\in F$, since $F$ is a filter; this is a contradiction. Assume that $x\vee p\not\in F$, then $a\leq x\vee p$, since $a\leq p$, hence even $x\vee p\in Z$. Since $p$ is maximal, we conclude $x\vee p \leq p$, which entails $x\leq p$. Thus $p$ is a prime element. \end{proof} The reader might wish to compare this statement to an argument used in the proof of Stone's Representation Theorem, see~\SetCite{Section 1.5.7}. There it is used that we may find in a Boolean algebra for each ideal a prime ideal which contains it. The argumentation is fairly similar, but, alas, one works there in a Boolean algebra, and not in a complete Heyting algebra, as we do presently. This is a characterization of completely prime filters and prime elements in a complete Heyting algebra in terms of morphisms into $2\!\!2$. We will use this characterization later on. \BeginLemma{prime-things} Let $A$ be a complete Heyting algebra, then \begin{enumerate} \item\label{prime-things-1} $F\subseteq A$ is a completely prime filter iff $F = f^{-1}(\top) := \InvBild{f}{\{\top\}}$ for some $f\in\|A, 2\!\!2\|$. \item\label{prime-things-2} $I = f^{-1}(\bot)$ for some $f\in\|A, 2\!\!2\|$ iff $I=\downarrow\!p$ for some prime element $p\in A$. \end{enumerate} \end{lemma} \begin{proof} 1. Let $F\subseteq A$ be a completely prime filter, and define \begin{equation*} f(a) := \begin{cases} \top, & \text{ if } a\in F\\ \bot, & \text{ if }a\not\in F \end{cases} \end{equation*} Then $f: A\to 2\!\!2$ is a morphism for the complete Heyting algebras $A$ and $2\!\!2$. Since $F$ is a filter, we have $f(\bigwedge S) = \bigwedge_{s\in S} f(s)$ for $S\subseteq A$ finite. Let $S\subseteq A$, then \begin{equation*} \bigvee_{s\in S}f(s) = \top \Leftrightarrow f(s) = \top \text{ for some }s\in S \Leftrightarrow f(\bigvee S) = \top, \end{equation*} since $F$ is completely prime. Thus $f\in \|A, 2\!\!2\|$ and $F = f^{-1}(\top)$. Conversely, given $f\in\|A, 2\!\!2\|$, the filter $f^{-1}(\top)$ is certainly completely prime. 2. Assume that $I = f^{-1}(\bot)$ for some $f\in \|A, 2\!\!2\|$, and put \begin{equation*} p := \bigvee\{a\in A\mid f(a) = \bot\}. \end{equation*} Since $A$ is complete, we have $p\in A$, and if $a\leq p$, then $f(a) = \bot$. Conversely, if $f(a) = \bot$, then $a\leq p$, so that $I=\downarrow\!p$; moreover, $I$ is a prime ideal, for $f(a)\wedge f(b) = \bot \Leftrightarrow f(a) = \bot\text{ or } f(b) = \bot$, thus $a\wedge b\in I$ implies $a\in I$ or $b\in I$. Thus $p$ is a prime element. Let, conversely, the prime element $p$ be given, then one shows as in part 1. that \begin{equation*} f(a) := \begin{cases} \bot, & \text{ if } a\leq p\\ \top, & \text{ otherwise} \end{cases} \end{equation*} defines a member of $\|A, 2\!\!2\|$ with $\downarrow\!p = f^{-1}(\bot)$. \end{proof} Continuing Example~\ref{compl-prime-open}, we see that there exists for a topological space $X := (X, \tau)$ for each $x\in X$ an element $f_{x}\in\|\tau, 2\!\!2\|$ such that $f_{x}(G) = \top$ iff $x\in G$. Define the map $\Phi_{X}: X\to \|\tau, 2\!\!2\|$ through $\Phi_{X}(x) := f_{x}$ (so that $\Phi_{X}(x) = f_{x}$ iff ${\mathcal G}_{x} = f^{-1}_{x}(\top)$). We will investigate $\Phi_{X}$ now in a little greater detail\MMP{$\Phi_{X}$}. \BeginLemma{phi-is-injective} $\Phi_{X}$ is injective iff $X$ is a $T_{0}$-space. \end{lemma} \begin{proof} Let $\Phi_{X}$ be injective, $x\not= y$, then ${\mathcal G}_{x}\not={\mathcal G}_{y}$. Hence there exists an open set $G$ which contains one of $x, y$, but not the other. If, conversely, $X$ is a $T_{0}$-space, then we have by the same argumentation ${\mathcal G}_{x}\not={\mathcal G}_{y}$ for all $x, y$ with $x\not=y$, so that $\Phi_{X}$ is injective. \end{proof} Well, that's not too bad, because the representation of elements into $\|\tau, 2\!\!2\|$ is reflected by a (very basic) separation axiom. Let us turn to surjectivity. For this, we need to transfer reducibility to the level of open or closed sets; since this is formulated most concisely for closed sets, we use this alternative. A closed set is called irreducible iff each of its covers with closed sets implies its being covered already by one of them. \BeginDefinition{irred-closed-set} A closed set $F\subseteq X$ is called \emph{\index{irreducible}irreducible} iff $F\subseteq \bigcup_{i\in I}F_{i}$ implies $F\subseteq F_{i}$ for some $i\in I$ for any family $(F_{i})_{i\in I}$ of closed sets. \end{definition} Thus a closed set $F$ is irreducible iff the open set $X\setminus F$ is a prime element in $\tau$. Let's see: Assume that $F$ is irreducible, and let $\bigcap_{i\in I} G_{i}\subseteq X\setminus F$ for some open sets $(G_{i})_{i\in I}$. Then $F\subseteq \bigcup_{i\in I}X\setminus G_{i}$ with $X\setminus G_{i}$ closed, thus there exists $j\in I$ with $F\subseteq X\setminus G_{j}$, hence $G_{j}\subseteq X\setminus F$. Thus $\downarrow\!(X\setminus F)$ is a prime ideal in $\tau$. One argues in exactly the same way for showing that if $\downarrow\!(X\setminus F)$ is a prime ideal in $\tau$, then $F$ is irreducible. Now we have this characterization of surjectivity of our map $\Phi_{X}$ through irreducible closed sets. \BeginLemma{phi-is-onto} $\Phi_{X}$ is onto iff for each irreducible closed set $F$ there exists $x\in X$ such that $F=\Closure{\{x\}}$. \end{lemma} \begin{proof} 1. Let $\Phi_{X}$ be onto, $F\subseteq X$ be irreducible. By the argumentation above, $X\setminus F$ is a prime element in $\tau$, thus we find $f\in\|\tau, 2\!\!2\|$ with $\downarrow\!(X\setminus F) = f^{-1}(\bot)$. Since $\Phi_{X}$ is into, we find $x\in X$ such that $f = \Phi_{X}(x)$, hence we have $x\not\in G \Leftrightarrow f(x)=\bot$ for all open $G\subseteq X$. It is then elementary to show that $F=\Closure{\{x\}}$. 2. Let $f\in \|\tau, 2\!\!2\|$, then we know that $f^{-1}(\bot) = \downarrow\!G$ for some prime open $G$. Put $F := X\setminus G$, then $F$ is irreducible and closed, hence $F = \Closure{\{x\}}$ for some $x\in X$. Then we infer $f(H) = \top \Leftrightarrow x\in H$ for each open set $H$, so we have indeed $f = \Phi_{X}(x)$. Hence $\Phi_{X}$ is onto. \end{proof} Thus, if $\Phi_{X}$ is a bijection, we can recover (the topology on) $X$ from the morphisms on the complete Heyting algebra $\|\tau, 2\!\!2\|$. \BeginDefinition{def-sober} A topological space $(X, \tau)$ is called \emph{\index{topology!sober}sober}\footnote{The rumors in the domain theory community that a certain \emph{Johann Heinrich-Wilhelm Sober} was a skat partner of Hilbert's gardener at Göttingen could not be confirmed~---~anyway, what about the third man?} iff $\Phi_{X}: X\to \|\tau, 2\!\!2\|$ is a bijection. \end{definition} Thus we obtain as a consequence this characterization. \BeginCorollary{cor-def-sober} Given a topological space $X$, the following conditions are equivalent \begin{itemize} \item $X$ is sober. \item $X$ is a $T_{0}$-space and for each irreducible closed set $F$ there exists $x\in X$ with $F = \Closure{\{x\}}$. \end{itemize} \end{corollary} Exercise~\ref{ex-hausdorff-sober} shows that each Hausdorff space is sober. This property is, however, seldom made use of the the context of classic applications of Hausdorff spaces in, say, analysis. Before continuing, we generalize the Scott topology, which has been defined in Example~\ref{scott-open} for inductively ordered sets. The crucial property is closedness under joins, and we stated this properts in a linearly ordered set by saying that, if the supremum of a set $S$ is in a Scott open set $G$, then we should find an element $s\in S$ with $s\in G$. This will have to be relaxed somewhat. Let us analyze the argument why the intersection $G_{1}\cap G_{2}$ of two Scott open sets (old version) $G_{1}$ and $G_{2}$ is open by taking a set $S$ such that $\bigvee S\in G_{1}\cap G_{2}$. Because $G_{i}$ is Scott open, we find $s_{i}\in S$ with $s_{i}\in G_{i}$ ($i = 1, 2$), and because we work in a linear ordered set, we know that either $s_{1}\leq s_{2}$ or $s_{2}\leq s_{1}$. Assuming $s_{1}\leq s_{2}$, we conclude that $s_{2}\in G_{1}$, because open sets are upward closed, so that $G_{1}\cap G_{2}$ is indeed open. The crucial ingredient here is that we can find for two elements of $S$ an element which dominates both, and this is the key to the generalization. We want to be sure that each directed set has an upper bound; this is the case, e.g., when we are working in a complete Heyting algebra. The structure we are defining now, however, is considerably weaker, but makes sure that we can do what we have in mind. \BeginDefinition{def-dcpo} A partially ordered set in which every directed subset has an upper bound is called a \emph{directed completed partial ordered set}, abbreviated as \emph{\index{dcpo}dcpo}\MMP{dcpo}. \end{definition} Evidently, complete Heyting algebras are dcpos, in particular topologies are under inclusion. Sober topological spaces with the specialization order induced by the open sets furnish another example for a dcpo. \BeginExample{sober-tops-are-dcpos} Let $X = (X, \tau)$ be a sober topological space. Hence the points in $X$ and the morphisms in $\|\tau, 2\!\!2\|$ are in a bijective correspondence. Define for $x, x'\in X$ the relation $x\sqsubseteq x'$ iff we have for all open sets $x\models G \Rightarrow x'\models G$ (thus $x\in G$ implies $x'\in G$). If we think that being contained in more open sets means having better information, $x\sqsubseteq x'$ is then interpreted as $x'$ being better informed than $x$; $\sqsubseteq$ is sometimes called the \emph{specialization order}. Then $(X, \sqsubseteq)$ is a partially ordered set, antisymmetry following from the observation that a sober space is a $T_{0}$-space. But $(X, \sqsubseteq)$ is also a dcpo. Let $S\subseteq X$ be a directed set, then $L := \Bild{\Phi_{X}}{S}$ is directed in $\|\tau, 2\!\!2\|$. Define \begin{equation*} p(G) := \begin{cases} \top, & \text{ if there exists $\ell\in L$ with }\ell(G) = \top\\ \bot, & \text{ otherwise} \end{cases} \end{equation*} We claim that $p\in \|\tau, 2\!\!2\|$. It is clear that $p(\bigvee W) = \bigvee_{w\in W} p(w)$ for $W\subseteq \tau$. Now let $W\subseteq \tau$ be finite, and assume that $\bigwedge \Bild{p}{W} = \top$, hence $p(w) = \top$ for all $w\in W$. Thus we find for each $w\in W$ some $\ell_{w}\in L$ with $\ell_{w}(w)=\top$. Because $L$ is directed, and $W$ is finite, we find an upper bound $\ell \in L$ to $\{\ell_{w}\mid w\in W\}$, hence $\ell(w) = \top$ for all $w\in W$, so that $\ell(\bigwedge W) = \top$, hence $p(\bigwedge W) = \top$. This implies $\bigwedge \Bild{p}{W} = p(\bigwedge W)$. Thus $p\in \|\tau, 2\!\!2\|$, so that there exists $x\in X$ with $x=\Phi_{X}(p)$. Clearly, $x$ is an upper bound to $S$. {\Large\ding{44}} \end{example} \BeginDefinition{scott-open-gen} Let $(P, \leq)$ be a dcpo, then $U\subseteq P$ is called \emph{Scott open\index{open!Scott}} iff \begin{enumerate} \item $U$ is upward closed. \item If $\sup\ S\in U$ for some directed set $S$, then there exists $s\in S$ with $s\in U$. \end{enumerate} \end{definition} The second property can be described as \emph{inaccessability through directed joins}: If $U$ contains the directed join of a set, it must contain already one of its elements. The following example is taken from~\cite[p. 136]{Cont-Latt}. \BeginExample{scott-open-example} The powerset $\PowerSet{X}$ of a set $X$ is a dcpo under inclusion. The sets $\{{\mathcal F}\subseteq \PowerSet{X}\mid {\mathcal F}\text{ is of finite character}\}$ are Scott open (${\mathcal F}\subseteq\PowerSet{X}$ is of \emph{finite character} iff this condition holds: $F\in {\mathcal F}$ iff some finite subset of $F$ is in ${\mathcal F}$). Let ${\mathcal F}$ be of finite character. Then ${\mathcal F}$ is certainly upward closed. Now let $S := \bigcup{\mathcal S}\in{\mathcal F}$ for some directed set ${\mathcal S}\subseteq\PowerSet{X}$, thus there exists a finite subset $F\subseteq S$ with $F\in{\mathcal F}$. Because ${\mathcal S}$ is directed, we find $S_{0}\in {\mathcal S}$ with $F\subseteq S_{0}$, so that $S_{0}\in {\mathcal F}$. {\Large\ding{44}} \end{example} In a topological space, each compact set gives rise to a Scott open filter as a subset of the topology. \BeginLemma{compact-yields-scott-open} Let $(X, \tau)$ be a topological space, and $C\subseteq X$ compact, then \begin{equation*} H(C) := \{U\in \tau\mid C\subseteq U\} \end{equation*} is a Scott open filter. \end{lemma} \begin{proof} Since $H(C)$ is upward closed and a filter, we have to establish that it is not accessible by directed joins. In fact, let ${\mathcal S}$ be a directed subset of $\tau$ such that $\bigcup{\mathcal S}\in H(C)$. Then ${\mathcal S}$ forms a cover of the compact set $C$, hence there exists ${\mathcal S}_{0}\subseteq{\mathcal S}$ finite such that $C\subseteq \bigcup{\mathcal S}_{0}$. But ${\mathcal S}$ is directed, so ${\mathcal S}_{0}$ has an upper bound $S\in{\mathcal S}$, thus $S\in H(C)$. \end{proof} Scott opens form in fact a topology, and continuous functions are characterized in a fashion similar to Example~\ref{scott-continuous-map}. We just state and prove these properties for completeness, before entering into a discussion of the Hofmann-Mislove Theorem. \BeginProposition{scott-open-top-cont} Let $(P, \leq)$ be a dcpo, \begin{enumerate} \item $\{U\subseteq P\mid U\text{ is Scott open}\}$ is a topology on $P$, the Scott topology of $P$\index{topology!Scott}. \item $F\subseteq P$ is Scott closed iff $F$ is downward closed ($x\leq y$ and $y\in F$ imply $x\in F$) and closed with respect to suprema of directed subsets. \item Given a dcpo $(Q, \leq)$, a map $f: P\to Q$ is continuous with respect to the corresponding Scott topologies iff $f$ preserves directed joins (i.e., if $S\subseteq P$ is directed, then $\Bild{f}{S}\subseteq Q$ is directed and $\sup\ \Bild{f}{S} = f(\sup\ S)$). \end{enumerate} \end{proposition} \begin{proof} 1. Let $U_{1}, U_{2}$ be Scott open, and $\sup\ S\in U_{1}\cap U_{2}$ for the directed set $S$. Then there exist $s_{i}\in S$ with $s_{i}\in G_{i}$ for $i = 1, 2$. Since $S$ is directed, we find $s\in S$ with $s\geq s_{1}$ and $s\geq s_{2}$, and since $U_{1}$ and $U_{2}$ both are upward closed, we conclude $s\in U_{1}\cap U_{2}$. Because $U_{1}\cap U_{2}$ is plainly upward closed, we conclude that $U_{1}\cap U_{2}$ is Scott open, hence the set of Scott opens is closed under finite intersections. The other properties of a topology are evidently satisfied. This establishes the first part. 2. The characterization of closed sets follows directly from the one for open sets by taking complements. 3. Let $f: P\to Q$ be Scott-continuous. Then $f$ is monotone: if $x\leq x'$, then $x'$ is contained in the closed set $\InvBild{f}{\downarrow\!f(x')}$, thus $x\in \InvBild{f}{\downarrow\!f(x')}$, hence $f(x)\leq f(x')$. Now let $S\subseteq P$ be directed, then $\Bild{f}{S}\subseteq Q$ is directed by assumption, and $S\subseteq \InvBild{f}{\downarrow\!(\sup_{s\in S}f(s))}$. Since the latter set is closed, we conclude that it contains $\sup\ S$, hence $f(\sup\ S) \leq \sup\ \Bild{f}{S}$. On the other hand, since $f$ is monotone, we know that $f(\sup\ S) \geq \sup\ \Bild{f}{S}$. Thus $f$ preserves directed joins. Assume that $f$ preserves directed joins, then, if $x\leq x'$, $f(x') = f(\sup\ \{x, x'\}) = \sup\ \{f(x), f(x')\}$ follows, hence $f$ is monotone. Now let $H\subseteq Q$ be Scott open, then $\InvBild{f}{H}$ is upward closed. Let $S\subseteq P$ be directed, and assume that $\sup\ \Bild{f}{S}\in H$, then there exists $s\in S$ with $f(s) \in H$, hence $s\in \InvBild{f}{H}$, which therefore is Scott open. Hence $f$ is Scott continuous. \end{proof} Following~\cite[Chapter II-1]{Cont-Latt}, we show that in a sober space there is an order morphism between Scott open filters and certain compact subsets. Preparing for this, we observe that in a sober space every open subset which contains the intersection of a Scott open filter is already an element of the filter. This will turn out to be a consequence of the existence of prime elements not contained in a prime filter, as stated in Proposition~\ref{prime-elements-not-in}. \BeginLemma{open-is-contained-in-filter} Let ${\mathcal F} \subseteq\tau$ be a Scott open filter of open subsets in a sober topological space $(X, \tau)$. If $\bigcap{\mathcal F}\subseteq U$ for the open set $U$, then $U\in {\mathcal F}$. \end{lemma} \begin{proof} 0. The plan\MMP{Plan} of the proof goes like this: Since ${\mathcal F}$ is Scott open, it is a prime filter in $\tau$. We assume that there exists an open set which contains the intersection, but which is not in ${\mathcal F}$. This is exactly the situation in Proposition~\ref{prime-elements-not-in}, so there exists an open set which is maximal with respect to not being a member of ${\mathcal F}$, and which is prime, hence we may represent this set as $f^{-1}(\bot)$ for some $f\in \|\tau, 2\!\!2\|$. But now sobriety kicks in, and we represent $f$ through an element $x\in X$. This will then lead us to the desired contradiction. 1. Because ${\mathcal F}$ is Scott open, it is a prime filter in $\tau$. Let $G := \bigcap {\mathcal F}$, and assume that $U$ is open with $G\subseteq U$ (note that we do not know whether or not $G$ is empty). Assume that $U\not\in{\mathcal F}$, then we obtain from Proposition~\ref{prime-elements-not-in} a prime open set $V$ which is not in ${\mathcal F}$, which contains $U$, and which is maximal. Since $V$ is prime, there exists $f\in\|\tau, 2\!\!2\|$ such that $\{H\in \tau\mid f(H) = \bot\} = \downarrow\!V$ by Lemma~\ref{prime-things}. Since $X$ is sober, we find $x\in X$ such that $\Phi_{X}(x) = f$, hence $X\setminus V = \Closure{\{x\}}$. 2. We claim that $\Closure{\{x\}}\subseteq G$. If this is not the case, we have $z\not\in H$ for some $H\in{\mathcal F}$ and $z\in\Closure{\{x\}}$. Because $H$ is open, this entails $\Closure{\{x\}}\cap H = \emptyset$, thus by maximality of $V$, $H\subseteq V$. Since ${\mathcal F}$ is a filter, this implies $V\in{\mathcal F}$, which is not possible. Thus $\Closure{\{x\}}\subseteq G$, hence $G\not=\emptyset$, and $X\setminus V \cap G = \emptyset$. Thus $U\cap G=\emptyset$, contradicting the assumption. \end{proof} This is a fairly surprising and strong statement, because we usually cannot conclude from $\bigcap {\mathcal F}\subseteq U$ that $U\in {\mathcal F}$ holds, when ${\mathcal F}$ is an arbitrary filter. But we work here under stronger assumptions: the underlying space is sober, so each point is given by a morphism for the underlying complete Heyting algebra \emph{and vice versa}. In addition we deal with Scott open filters. They have the pleasant property that they are inaccessible by directed suprema. But we may even say more, viz., that the intersection of these filters is compact. For, if we have an open cover of the intersection, the union of this cover is open, thus must be an element of the filter by the previous lemma. We may write the union as a union of a directed set of open sets, which then lets us apply the assumption that the filter is inaccessible. \BeginCorollary{cor-is-contained-in-filter} Let $X$ be sober, and ${\mathcal F}$ be a Scott open filter. Then $\bigcap {\mathcal F}$ is compact and nonempty. \end{corollary} \begin{proof} Let $K := \bigcap {\mathcal F}$, and ${\mathcal S}$ be an open cover of $K$. Thus $U := \bigcup {\mathcal S}$ is open with $K\subseteq S$, hence $U\in {\mathcal F}$ by Lemma~\ref{open-is-contained-in-filter}. But $\bigcup {\mathcal S} = \bigcup\bigl\{\bigcup {\mathcal S}_0\mid {\mathcal S}_0\subseteq{\mathcal S}\text{ finite}\bigr\}$, and the latter collection is directed, so there exists ${\mathcal S}_{0}\subseteq {\mathcal S}$ finite with $\bigcup{\mathcal S}_{0}\in{\mathcal F}$. But this means ${\mathcal S}_{0}$ is a finite subcover of $K$, which consequently is compact. If $K$ is empty, $\emptyset\in{\mathcal F}$ by Lemma~\ref{open-is-contained-in-filter}, which is impossible. \end{proof} This gives a complete characterization of the Scott open filters in a sober space. The characterization involves compact sets which are represented as the intersections of these filters. But we can represent only those compact sets $C$ which are upper sets in the specialization order, i.e., for which holds $x\in C$ and $x\sqsubseteq x'$ implies $x'\in C$. These sets are called \emph{\index{set!saturated}saturated}. Recall that $x\sqsubseteq x'$ means $x\in G \Rightarrow x'\in G$ for all open sets $G$, hence a set is saturated iff it equals the intersection of all open sets containing it. With this in mind, we state the \emph{\index{theorem!Hofmann-Mislove}Hofmann-Mislove Theorem}. \BeginTheorem{hofmann-mislove} Let $X$ be a sober space. Then the Scott open filters are in one-to-one and order preserving correspondence with the non-empty saturated compact subsets of $X$ via ${\mathcal F}\mapsto \bigcap{\mathcal F}$. \end{theorem} \begin{proof} We have shown in Corollary~\ref{cor-is-contained-in-filter} that the intersection of a Scott open filter is compact and nonempty; it is saturated by construction. Conversely, Lemma~\ref{compact-yields-scott-open} shows that we may obtain from a compact and saturated subset $C$ of $X$ a Scott open filter, the intersection of which must be $C$. It is clear that the correspondence is order preserving. \end{proof} It is quite important for the proof of Lemma~\ref{open-is-contained-in-filter} that the underlying space is sober. Hence it does not come as a surprise that the Theorem of Hofmann-Mislove can be used for a characterization of sober spaces as well~\cite[Theorem II-1.21]{Cont-Latt}. \BeginProposition{char-sober-hofmann-mislove} Let $X$ be a $T_{0}$-space. Then the following statements are equivalent. \begin{enumerate} \item\label{char-sober-hofmann-mislove-1} $X$ is sober. \item\label{char-sober-hofmann-mislove-2} Each Scott open filter ${\mathcal F}$ on $\tau$ consists of all open sets containing $\bigcap{\mathcal F}$. \end{enumerate} \end{proposition} \begin{proof} \labelImpl{char-sober-hofmann-mislove-1}{char-sober-hofmann-mislove-2}: This follows from Lemma~\ref{open-is-contained-in-filter}. \labelImpl{char-sober-hofmann-mislove-2}{char-sober-hofmann-mislove-1}: Corollary~\ref{cor-def-sober} tells us that it is sufficient to show that each irreducible closed sets is the closure of one point. Let $A\subseteq X$ be irreducible and closed. Then ${\mathcal F} := \{G \text{ open}\mid G\cap A\not=\emptyset\}$ is closed under finite intersections, since $A$ is irreducible. In fact, let $G$ and $H$ be open sets with $G\cap A\not=\emptyset$ and $H\cap A\not=\emptyset$. If $A\subseteq (X\setminus G)\cup (X\setminus H)$, then $A$ is a subset of one of these closed sets, say, $X\setminus G$, but then $A\cap H=\emptyset$, which is a contradiction. This implies that ${\mathcal F}$ is a filter, and ${\mathcal F}$ is obviously Scott open. Assume that $A$ cannot be represented as $\Closure{\{x\}}$ for some $x$. Then $X\setminus\Closure{\{x\}}$ is an open set the intersection of which with $A$ is not empty, hence $X\setminus\Closure{\{x\}}\in{\mathcal F}$. We obtain from the assumption that $X\setminus A\in {\mathcal F}$, because with $ K := \bigcap{\mathcal F} \subseteq \bigcap_{x\in X} (X\setminus\Closure{\{x\}}), $ we have $K\subseteq X\setminus A$, and $X\setminus A$ is open. Consequently, $A\cap X\setminus A\not=\emptyset$, which is a contradiction. Thus there exists $x\in X$ such that $A = \Closure{\{x\}}$. Hence $X$ is sober. \end{proof} These are the first and rather elementary discussions of the interplay between topology and order, considered in a systematic fashion in domain theory. The reader is directed to~\cite{Cont-Latt} or to~\cite{Abramsky+Jung} for further information. \subsubsection{The Stone-Weierstraß Theorem} \label{sec:stone-weierstrass} This section will see the classic Stone-Weierstraß Theorem on the approximation of continuous functions on a compact topological space. We need for this a ring of continuous functions, and show that ---~under suitable conditions~--- this ring is dense. This requires some preliminary considerations on the space of continuous functions, because this construction evidently requires a topology. Denote for a topological space $X$ by \index{$\Cont$}$\Cont$\MMP{$\Cont$} the space of all continuous and bounded functions $f: X\to \mathbb{R}$. The structure of $\Cont $ is algebraically fairly rich; just for the record: \BeginProposition{cont-is-vector-lattice} $\Cont$ is a real vector space which is closed under constants, multiplication and under the lattice operations. \QED \end{proposition} This describes the algebraic properties, but we need a topology on this space, which is provided for by the supremum norm. Define for $f\in \Cont$ \begin{equation*} \|f\| := \sup_{x\in X}|f(x)|. \end{equation*} Then $(\Cont, \|\cdot \|)$ is an example for a normed linear (or vector) space. \BeginDefinition{vector-space} Let $V$ be a real vector space. A \emph{\index{norm}norm} $\|\cdot \|: V\to \mathbb{R}_{+}$ assigns to each vector $v$ a non-negative real number $\|v\|$ with these properties: \begin{enumerate} \item $\|v\| \geq 0$, and $\|v\| = 0$ iff $v = 0$. \item $\|\alpha\cdot v\| = |\alpha|\cdot \|v\|$ for all $\alpha\in\mathbb{R}$ and all $v\in V$. \item $\|x+y\|\leq \|x\| + \|y\|$ for all $x, y\in V$. \end{enumerate} A vector space with a norm is called a \emph{normed \index{space!normed}space}. \end{definition} It is immediate that a normed space is a metric space, putting $d(v, w) := \|v-w\|$. It is also immediate that $f\mapsto \|f\|$ defines a norm on $\Cont$. But we can say actually a bit more: with this definition of a metric, $\Cont$ is a complete metric space; we have established this for the compact interval $[0, 1]$ in Example~\ref{cont-is-complete} already. Let us have a look at the general case. \BeginLemma{cx-is-complete} $\Cont$ is complete with the metric induced by the supremum norm. \end{lemma} \begin{proof} Let $\Folge{f}$ be a $\|\cdot \|$-Cauchy sequence in $\Cont$, then $(f_{n}(x))_{n\in\mathbb{N}}$ is bounded, and $f(x) := \lim_{n\to \infty}f_{n}(x)$ exists for each $x\in X$. Let $\epsilon>0$ be given, then we find $n_{0}\in \mathbb{N}$ such that $\|f_{n}-f_{m}\|< \epsilon$ for all $n, m\geq n_{0}$, thus $|f(x)-f_{n}(x)|\geq \epsilon$ for $n\geq n_{0}$. This inequality holds for each $x\in X$, so that we obtain $\|f-f_{n}\|\leq \epsilon$ for $n\geq n_{0}$. It implies also that $f\in\Cont$. \end{proof} Normed spaces for which the associated metric space are special, so they deserve their own name. \BeginDefinition{banach-space} A normed space $(V, \|\cdot \|)$ which is complete in the metric associated with $\|\cdot \|$ is called a \emph{Banach \index{space!Banach}space}. \end{definition} The topology induced by the supremum norm is called the \emph{\index{topology!uniform convergence}topology of uniform convergence}, so that we may restate Lemma~\ref{cx-is-complete} by saying the $\Cont$ is closed under uniform convergence. A helpful example is Dini's \index{theorem!Dini}Theorem for uniform convergence on $\Cont$ for compact $X$. It gives a criterion of uniform convergence, provided we know already that the limit is continuous. \BeginProposition{dini-uniform-convergence} Let $X$ be a compact topological space, and assume that $\Folge{f}$ is a sequence of continuous functions which increases monotonically to a continuous function $f$. Then $\Folge{f}$ converges uniformly to $f$. \end{proposition} \begin{proof} We know that $f_{n}(x) \leq f_{n+1}(x)$ holds for all $n\in\mathbb{N}$ and all $x\in X$, and that $f(x) := \sup_{n\in\mathbb{N}}f_{n}(x)$ is continuous. Let $\epsilon>0$ be given, then $F_{n} := \{x\in X\mid f(x) \geq f_{n}(x) - \epsilon\}$ defines a closed set with $\bigcap_{n\in\mathbb{N}} F_{n} = \emptyset$, moreover, the sequence $\Folge{F}$ decreases. Thus we find $n_{0}\in \mathbb{N}$ with $F_{n} = \emptyset$ for $n\geq n_{0}$, hence $\|f-f_{n}\| < \epsilon$ for $n\geq n_{0}$. \end{proof} The goal of this section is to show that, given the compact topological space $X$, we can approximate each continuous real function uniformly through elements of a subspace of $\Cont$. It is plain that this subspace has to satisfy some requirements: it should \begin{itemize} \item be a vector space itself, \item contain the constant functions, \item separate points, \item be closed under multiplication. \end{itemize} Hence it is in particular a subring of the ring $\Cont$. Let $A$ be such a subset, then we want to show that the closure $\Closure{A}$ with respect to uniform convergence equals $\Cont$. We will show first that $\Closure{A}$ is closed under the lattice operations, because we will represent an approximating function as the finite supremum of a finite infimum of simpler approximations. So the first goal will be to establish closure under $\inf$ and $\sup$. Recall that \begin{align*} f\wedge g & = \frac{1}{2}\cdot (f + g - |f-g|),\\ f\vee g & = \frac{1}{2}\cdot (f + g + |f-g|). \end{align*} Now it is easy to see that $\Closure{A}$ is closed under the vector space operations, if $A$ is. Our first step boils down to showing that $|f|\in \Closure{A}$ if $f\in A$. Thus, given $f\in A$, we have to find a sequence $\Folge{f}$ such that $|f|$ is the uniform limit of this sequence. \MMP{But wait!} It is actually enough to show that $t\mapsto \sqrt{t}$ can be approximated uniformly on the unit interval $[0, 1]$, because we know that $|f| = \sqrt{f^{2}}$ holds. It is enough to do this on the unit interval, as we will see below. \BeginLemma{approx-square-root} There exists a sequence $\Folge{f}$ in $\Cont[[0, 1]]$ which converges uniformly to the function $t\mapsto \sqrt{t}$. \end{lemma} \begin{proof} Define inductively for $t\in [0, 1]$ \begin{align*} f_{0}(t) & := 0,\\ f_{n+1}(t) & := f_{n}(t) + \frac{1}{2}\cdot (t - f_{n}^{2}(t)). \end{align*} We show by induction that $f_{n}(t) \leq \sqrt{t}$ holds. This is clear for $n = 0$. If we know already that the assumption holds for $n$, then we write \begin{equation*} \sqrt{t} - f_{n+1}(t) = \sqrt{t} - f_{n}(t) - \frac{1}{2}\cdot (t - f_{n}^{2}(t)) = (\sqrt{t}-f_{n}(t))\cdot \bigl((1-\frac{1}{2}\cdot (\sqrt{t}+f_{n}(t))\bigr). \end{equation*} Because $t\in[0, 1]$ and from the induction hypothesis, we have $\sqrt{t}+f_{n}(t)\leq 2\cdot \sqrt{t}\leq 2$, so that $\sqrt{t} - f_{n+1}(t) \geq 0$. Thus we infer that $f_{n}(t) \leq \sqrt{t}$ for all $t\in[0, 1]$, and $\lim_{n\to \infty}f_{n}(t) = \sqrt{t}$. From Dini's Proposition~\ref{dini-uniform-convergence} we now infer that the convergence is uniform. \end{proof} This is the desired consequence from this construction. \BeginCorollary{ring-is-lattice} Let $X$ be a compact topological space, and let $A\subseteq \Cont$ be a ring of continuous functions which contains the constants, and which is closed under uniform convergence. Then $A$ is a lattice. \end{corollary} \begin{proof} It is enough to show that $A$ is closed under taking absolute values. Let $f\in A$, then we may and do assume that $0\leq f \leq 1$ holds (otherwise consider $(f-\|f\|)/\|f\|$, which is an element of $A$ as well). Because $|f| = \sqrt{f^{2}}$, and the latter is a uniform limit of elements of $A$ by Lemma~\ref{approx-square-root}, we conclude $|f|\in A$, which entails $A$ being closed under the lattice operations. \end{proof} We are now in a position to establish the classic Stone-Weierstraß Theorem, which permits to conclude that a ring of bounded continuous functions on a compact topological space $X$ is dense with respect to uniform convergence in $\Cont$, provided it contains the constants and separates points. The latter condition is obviously necessary, but has not been used in the argumentation so far. It is clear, however, that we cannot do without this condition, because $\Cont$ separates points, and it is difficult to see how a function which separates points could be approximated from a collection which does not. The polynomials on a compact interval in the reals are an example for a ring which satisfies all these assumptions. This collection shows also that we cannot extend the result to a non-compact base space like the reals. Take $x\mapsto \sin x$ for example; this function cannot be approximated uniformly over $\mathbb{R}$ by polynomials. For, assume that given $\epsilon>0$ there exists a polynomial $p$ such that $\sup_{x\in \mathbb{R}}|p(x) - \sin x| < \epsilon$, then we would have $ -\epsilon - 1 < p(x) < 1 + \epsilon $ for all $x\in \mathbb{R}$, which is impossible, because a polynomial is unbounded. Here, then, is the Stone-Weierstraß \index{theorem!Stone-Weierstraß}Theorem for compact topological spaces. \BeginTheorem{stone-weierstrass} Let $X$ be a compact topological space, and $A\subseteq \Cont$ be a ring of functions which separates points, and which contains all constant functions. \end{theorem} \begin{proof} 0. Our goal\MMP{Approach} is to find for some given $f\in \Cont$ and an arbitrary $\epsilon>0$ a function $F\in \Closure{A}$ such that $\|f-F\|< \epsilon$. Since $X$ is compact, we will find $F$ through a refined covering argument in the following way. If $a, b\in X$ are given, we find a continuous function $f_{a, b}\in A$ with $f_{a, b} = f(a)$ and $f_{a, b} = f(b)$. From this we construct a cover, using sets like $\{x\mid f_{a, b}(x) < f(x) + \epsilon\}$ and $\{x\mid f_{a, b}(x) > f(x) - \epsilon\}$, extract finite subcovers and construct from the corresponding functions the desired function through suitable lattice operations. 1. Fix $f\in \Cont$ and $\epsilon>0$. Given distinct point $a\not= b$, we find a function $h\in A$ with $h(a) \not= h(b)$, thus \begin{equation*} g(x) := \frac{h(x) - h(a)}{h(b) - h(a)} \end{equation*} defines a function $g\in A$ with $g(a) = 0$ and $g(b) = 1$. Then \begin{equation*} f_{a, b}(x) := (f(b) - f(a))\cdot g(x) + f(a) \end{equation*} is also an element of $A$ with $f_{a, b}(a) = f(a)$ and $f_{a, b}(b) = f(b)$. Now define \begin{align*} U_{a, b} & := \{x\in X \mid f_{a, b}(x) < f(x) + \epsilon\},\\ V_{a, b} & := \{x\in X \mid f_{a, b}(x) > f(x) - \epsilon\}, \end{align*} then $U_{a, b}$ and $V_{a, b}$ are open sets containing $a$ and $b$. 2. Fix $b$, then $\{U_{a, b}\mid a\in X\}$ is an open cover of $X$, so we can find points $a_{1}, \ldots, a_{k}$ such that $\{U_{a_{1}, b}, \ldots, U_{a_{k}, b}\}$ is an open cover of $X$ by compactness. Thus \begin{equation*} f_{b} := \bigwedge_{i=1}^{k}f_{a_{i}, b} \end{equation*} defines an element of $\Closure{A}$ by Corollary~\ref{ring-is-lattice}. We have $f_{b}(x) < f(x) + \epsilon$ for all $x\in X$, and we know that $f_{b}(x) > f(x) - \epsilon$ for all $x\in V_{b} := \bigcap_{i=1}^{k}V_{a_{i}, b}$. The set $V_{b}$ is an open neighborhood of $b$, so from the open cover $\{V_{b}\mid b\in X\}$ we find $b_{1}, \ldots, b_{\ell}$ such that $X$ is covered through $\{V_{b_{1}}, \ldots , V_{b_{\ell}}\}$. Put \begin{equation*} F := \bigvee_{i=1}^{\ell}f_{b_{i}}, \end{equation*} then $f_{\epsilon}\in\Closure{A}$ and $\|f-F\|<\epsilon$. \end{proof} This is the example already discussed above. \BeginExample{stone-weierstrass-polynomials} Let $X := [0, 1]$ be the closed unit interval, and let $A$ consist of all polynomials $\sum_{i=0}^{n}a_{i}\cdot x^{i}$ for $n\in \mathbb{N}$ and $a_{0}, \ldots, a_{n}\in\mathbb{R}$. Polynomials are continuous, they form a vector space and are closed under multiplication. Moreover, the constants are polynomials. Thus we obtain from the Stone-Weierstraß Theorem~\ref{stone-weierstrass} that every continuous function on $[0, 1]$ can be uniformly approximated through a sequence of polynomials. {\Large\ding{44}} \end{example} It is said that Oscar Wilde\MMP[t]{Oscar Wilde} could resist everything but a temptation. The author concurs. Here is a classic proof of the Weierstraß Approximation Theorem, the original form of Theorem~\ref{stone-weierstrass}, which deals with polynomials on $[0, 1]$ only, and establishes the statement given in Example~\ref{stone-weierstrass-polynomials}. We will give this proof now, based on the discussion in the classic~\cite[§ II.4.1]{Hilbert-Courant}. This proof is elegant and based on the manipulation of specific functions (we are all too often focussed on our pretty little garden of beautiful abstract structures, all too often in danger of loosing the contact to concrete mathematics, and our roots). As a preliminary consideration, we will show that \begin{equation*} \lim_{n\to \infty}\frac{\int_{\delta}^{1}(1-v^{2})^{n}\ dv}{\int_{0}^{1}(1-v^{2})^{n}\ dv} = 0 \end{equation*} for every $\delta\in]0, 1[$. Define for this \begin{align*} J_{n} & := \int_{0}^{1}(1-v^{2})^{n}\ dv,\\ J_{n}^{*} & := \int_{\delta}^{1}(1-v^{2})^{n}\ dv. \end{align*} (we will keep these notations for later use). We have \begin{equation*} J_{n} > \int_{0}^{1}(1-v)^{n}\ dv = \frac{1}{n+1} \end{equation*} and \begin{equation*} J_{n}^{*} = \int_{\delta}^{1}(1-v^{2})^{n}\ dv < (1-\delta^{2})^{n}\cdot (1-\delta) < (1-\delta^{2})^{n}. \end{equation*} Thus \begin{equation*} \frac{J_{n}^{*}}{J_{n}} < (n+1)\cdot (1-\delta^{2})^{n}\to 0. \end{equation*} This establishes the claim. Let $f: [0, 1]\to \mathbb{R}$ be continuous. Given $\epsilon>0$, there exists $\delta>0$ such that $|x-y|<\delta$ implies $|f(x) - f(y)|< \epsilon$ for all $x\in[0, 1]$, since $f$ is uniformly continuous by Proposition~\ref{compact-cont-unif-cont}. Thus $0\leq v < \delta$ implies $|f(x+v)-f(x)|< \epsilon$ for all $x\in[0, 1]$. Put \begin{align*} Q_{n}(x) & := \int_{0}^{1}f(u)\cdot \bigl(1-(u-x)^{2}\bigr)^{n}\ du,\\ P_{n}(x) & := \frac{Q_{n}(x)}{2\cdot J_{n}}. \end{align*} We will show that $P_{n}$ converges to $f$ in the topology of uniform convergence. We note first that $Q_{n}$ is a polynomial of degree $2 n$. In fact, put \begin{equation*} A_{j} := \int_{0}^{1}f(u)\cdot u^{j}\ du \end{equation*} for $j \geq 0$, expanding yields the formidable representation \begin{equation*} Q_{n}(x) = \sum_{k=0}^{n}\sum_{j=0}^{2 k}\binom{n}{k}\binom{2 k}{j}(-1)^{n-k+j}A_{j} \cdot x^{2 k-j}. \end{equation*} Let us work on the approximation. We fix $x\in[0, 1]$, and note that the inequalities derived below do not depend on the specific choice of $x$. Hence they provide a uniform approximation. Substitute $u$ by $v+x$ in $Q_{n}$; this yields \begin{align*} \int_{0}^{1}f(u) \bigl(1-(u-x)^{2}\bigr)^{n}\ du & = \int_{\-x}^{1-x}f(v+x)(1-v^{2})^{n}\ dv\\ & = I_{1} + I_{2} + I_{3} \end{align*} with \begin{align*} I_{1} & := \int_{-x}^{-\delta}f(v+x)(1-v^{2})^{n}\ dv,\\ I_{2} & := \int_{-\delta}^{+\delta}f(v+x)(1-v^{2})^{n}\ dv,\\ I_{3} & := \int_{+\delta}^{1-x}f(v+x)(1-v^{2})^{n}\ dv. \end{align*} We work on these integrals separately. Let $M := \max_{0\leq x \leq 1}|f(x)|$, then \begin{equation*} I_{1} \leq M \int_{-1}^{-\delta}(1-v^{2})^{n}\ dv = M\cdot J_{n}^{*}, \end{equation*} and \begin{equation*} I_{3} \leq M \int_{\delta}^{1}(1-v^{2})^{n}\ dv = M\cdot J_{n}^{*}. \end{equation*} We can rewrite $I_{2}$ as follows: \begin{align*} I_{2} & = f(x) \int_{-\delta}^{+\delta}(1-v^{2})^{n}\ dv + \int_{-\delta}^{+\delta}\bigl(f(x+v)-f(x)\bigr) (1-v^{2})^{n}\ dv\\ & = 2 f(x) (J_{n}-J_{n}^{*}) + \int_{-\delta}^{+\delta}\bigl(f(x+v)-f(x)\bigr) (1-v^{2})^{n}\ dv. \end{align*} From the choice of $\delta$ for $\epsilon$ we obtain \begin{equation*} \bigl|\int_{-\delta}^{+\delta}\bigl(f(x+v)-f(x)\bigr) (1-v^{2})^{n}\ dv\bigr| \leq \epsilon \int_{-\delta}^{+\delta}(1-v^{2})^{n}\ dv < \epsilon \int_{-1}^{+1}(1-v^{2})^{n}\ dv = 2 \epsilon\cdot J_{n} \end{equation*} Combining these inequalities, we obtain \begin{equation*} |P_{n}(x) - f(x)| < 2 M\cdot \frac{J_{n}^{*}}{J_{n}} + \epsilon. \end{equation*} Hence the difference can be made arbitrarily small, which means that $f$ can be approximated uniformly through polynomials. The two approaches presented are structurally very different, it would be difficult to recognize the latter as a precursor of the former. While both make substantial use of uniform continuity, the first one is an existential proof, constructing two covers from which to choose a finite subcover each, and from this deriving the existence of an approximating function. It is non-constructive because it would be difficult to construct an approximating function from it, even if the ring of approximating functions is given by a base for the underlying vector space. The second one, however, starts also from uniform continuity and uses this property to find a suitable bound for the difference of the approximating polynomial and the function proper through integration. The representation of $Q_{n}$ above shows what the constructing polynomial looks like, and the coefficients of the polynomials may be computed (in principle, at least). And, finally, the abstract situation gives us a greater degree of freedom, since we deal with a ring of continuous functions observing certain properties, while the original proof works for the class of polynomials only. \subsubsection{Uniform Spaces} \label{sec:uniform-spaces} This section will give a brief introduction to uniform spaces. The objective is to demonstrate in what ways the notion of a metric space can be generalized without arriving at the full generality of topological spaces, but retaining useful properties like completeness or uniform continuity. While pseudometric spaces formulate the concept of two points to be close to each other through a numeric value, an general topological spaces use the concept of an open neighborhood, uniform spaces formulate neighborhoods on the Cartesian product. This concept is truly in the middle: each pseudometric generates neighborhoods, and from a neighborhood we may obtain the neighborhood filter for a point. For motivation and illustration, we consider a pseudometric space $(X, d)$ and say that two point are neighbors iff their distance is smaller that $r$ for some fixed $r>0$; the degree of neighborhood is evidently depending on $r$. The set \begin{equation*} V_{d, r} := V_{r} := \{\langle x, y\rangle\mid d(x, y)< r\} \end{equation*} is then the collection of all neighbors\MMP[h]{$V_{d, r}; V_{r}$}. We may obtain from $V_{r}$ the neighborhood $B(x, r)$ for some point $x$ upon extracting all $y$ such that $\langle x, y\rangle\in V_{r}$, thus \begin{equation*} B(x, r) = V_{r}[x] := \{y\in X\mid \langle x, y\rangle\in V_{r}\}. \end{equation*} The collection of all these neighborhoods observes these properties. \begin{enumerate} \item The diagonal $\Delta := \Delta_{X}$ is contained in $V_{r}$ for all $r>0$, because $d(x, x) = 0$. \item $V_{r}$ is ---~as a relation on $X$---~symmetric: $\langle x, y\rangle\in V_{r}$ iff $\langle y, x\rangle\in V_{r}$, thus $V_{r}^{-1} = V_{r}$. This property reflects the symmetry of $d$. \item $V_{r}\circ V_{s}\subseteq V_{r+s}$ for $r, s>0$; this property is inherited from the triangle inequality for $d$. \item $V_{r_{1}}\cap V_{r_{2}} = V_{\min\{r_{1}, r_{2}\}}$, hence this collection is closed under finite intersections. \end{enumerate} It is convenient to consider not only these immediate neighborhoods but rather the filter generated by them on $X\times X$ (which is possible because the empty set is not contained in this collection, and the properties above shows that they form the base for a filter indeed). This leads to this definition of a uniformity. It focusses on the properties of the neighborhoods rather than on that of a pseudometric, so we formulate it for a set in general. \BeginDefinition{def-uniformity} Let $X$ be a set. A filter $\mathfrak{u}$ on $\PowerSet{X\times X}$ is called a \emph{\index{uniformity}uniformity on $X$} iff these properties are satisfied \begin{enumerate} \item $\Delta\subseteq U$ for all $U\in\mathfrak{u}$. \item If $U\in\mathfrak{u}$, then $U^{-1}\in\mathfrak{u}$. \item If $U\in\mathfrak{u}$, there exists $V\in\mathfrak{u}$ such that $V\circ V\subseteq U$. \item $\mathfrak{u}$ is closed under finite intersections. \item If $U\in\mathfrak{u}$ and $U\subseteq W$, then $W\in\mathfrak{u}$. \end{enumerate} The pair $(X, \mathfrak{u})$ is called a \emph{\index{space!uniform}uniform space}. The elements of $\mathfrak{u}$ are called $\mathfrak{u}$-\emph{neighborhoods}. \end{definition} The first three properties are gleaned from those of the pseudometric neighborhoods above, the last two are properties of a filter, which have been listed here just for completeness. We will omit $\mathfrak{u}$ when talking about a uniform space, if this does not yield ambiguities. The term ``neighborhood'' is used for elements of a uniformity and for the neighborhoods\MMP[t]{Neigh\-borhood, entourage, Nachbarschaft} of a point. There should be no ambiguity, because the point is always attached, when talking about neighborhood in the latter, topological sense. Bourbaki uses the term \emph{\index{entourage}entourage} for a neighborhood in the uniform sense, the German word for this is \emph{\index{Nachbarschaft}Nachbarschaft} (while the term for a neighborhood of a point is \emph{\index{Umgebung}Umgebung}). We will need some relational identities; they are listed in Figure~\ref{tab:some-identities} for the reader's convenience. \begin{figure} \caption{\label{tab:some-identities} \label{tab:some-identities} \end{figure} As in the case of topologies, where we do not always specify the entire topology, but occasionally make use of the possibility to define it through a base, we will proceed similarly here, where we deal with filter bases. We have this characterization for the base of a uniformity. \BeginProposition{char-base-unif} A family $\emptyset\not=\mathfrak{b}\subseteq \PowerSet{X\times X}$ is the base\MMP{Base} for a uniformity iff it has the following properties: \begin{enumerate} \item\label{char-base-unif-1} Each member of $\mathfrak{b}$ contains the diagonal of $X$. \item\label{char-base-unif-2} For $U\in\mathfrak{b}$ there exists $V\in\mathfrak{b}$ with $V\subseteq U^{-1}$. \item\label{char-base-unif-3} For $U\in\mathfrak{b}$ there exists $V\in\mathfrak{b}$ with $V\circ V\subseteq U$. \item\label{char-base-unif-4} For $U, V\in\mathfrak{b}$ there exists $W\in\mathfrak{b}$ with $W\subseteq U\cap V$. \end{enumerate} \end{proposition} \begin{proof} Recall that the filter generated by a filter base $\mathfrak{b}$ is defined through $\{F\mid U\subseteq F\text{ for some }U\in\mathfrak{b}\}$. With this in mind, the proof is straightforward. \end{proof} This permits a description of a uniformity in terms of a base, which is usually easier than giving a uniformity as a whole. Let us look at some examples. \BeginExample{example-uniformities} \begin{enumerate} \item\label{example-uniformities-1} The uniformity $\{\Delta, X\times X\}$ is called the \emph{\index{uniformity!indiscrete}indiscrete uniformity}, the uniformity $\{A\subseteq X\times X\mid \Delta\subseteq A\}$ is called the \emph{\index{uniformity!discrete}discrete uniformity} on $X$. \item\label{example-uniformities-2} Let $V_{r} := \{\langle x, y\rangle\mid x, y\in \mathbb{R}, |x-y|<r\}$, then $\{V_{r}\mid r>0\}$ is a base for a uniformity on $\mathbb{R}$. Since it makes use of the structure of $(\mathbb{R}, +)$ as an additive group, it is called the \emph{additive \index{uniformity!additive}uniformity} on $\mathbb{R}$. \item\label{example-uniformities-3} Put $V_{E} := \{\langle x, y\rangle\in\mathbb{R}^{2}\mid x/y\in E\}$ for some neighborhood $E$ of $1\in\mathbb{R}\setminus\{0\}$. Then the filter generated by $\{V_{E}\mid E \text{ is a neighborhood of 1}\}$ is a uniformity. This is so because the logarithm function is continuous on $\mathbb{R}_{+}\setminus\{0\}$. This uniformity nourishes itself from the multiplicative group $(\mathbb{R}\setminus\{0\}, \cdot)$, so it is called the \emph{multiplicative \index{uniformity!multiplicative}uniformity} on $\mathbb{R}\setminus\{0\}$. This is discussed in greater generality in part~\ref{example-uniformities-9}. \item\label{example-uniformities-4} A \emph{\index{partition}partition} $\pi$ on a set $X$ is a collection of non-empty and mutually disjoint subsets of $X$ which covers $X$. It generates an equivalence relation on $X$ by rendering two elements of $X$ equivalent iff they are in the same partition element. Define $ V_{\pi} := \bigcup_{i=1}^{n}(P_{i}\times P_{i}) $ for a finite partition $\pi = \{P_{1}, \ldots, P_{k}\}$. Then $$\mathfrak{b} := \{V_{\pi}\mid \pi\text{ is a finite partition on }X\}$$ is the base for a uniformity. Let $\pi$ be a finite partition, and denote the equivalence relation generated by $\pi$ by $|\pi|$, hence $\isEquiv{x}{y}{|\pi|}$ iff $x$ and $y$ are in the same element of $\pi$. \begin{itemize} \item $\Delta\subseteq V_{\pi}$ is obvious, since $|\pi|$ is reflexive. \item $U^{-1} = U$ for all $U\in V_{\pi}$, since $|\pi|$ is symmetric. \item Because $|\pi|$ is transitive, we have $V_{\pi}\circ V_{\pi}\subseteq V_{\pi}$. \item Let $\pi'$ be another finite partition, then $ \{A\cap B\mid A\in \pi, B\in\pi', A\cap B\not=\emptyset\} $ defines a partition $\pi''$ such that $V_{\pi''}\subseteq V_{\pi}\cap V_{\pi'}$. \end{itemize} Thus $\mathfrak{b}$ is the base for a uniformity, which is, you guessed it, called the \emph{\index{uniformity!finite partitions}uniformity of finite partitions}. \item\label{example-uniformities-5} Let $\emptyset\not={\mathcal I}\subseteq\PowerSet{X}$ be an ideal (\SetCite{Definition 1.5.32}), and define \begin{align*} {\mathcal A}_{E} & := \{\langle A, B\rangle\mid A\Delta B\in E\}\text{ for }E\in{\mathcal I},\\ \mathfrak{b} & := \{{\mathcal A}_{E}\mid E\in{\mathcal I}\}. \end{align*} Then $\mathfrak{b}$ is a base for a uniformity on $\PowerSet{X}$. In fact, it is clear that $\Delta_{\PowerSet{X}} \subseteq {\mathcal A}_{E}$ always holds, and that each member of $\mathfrak{b}$ is symmetric. Let $A\Delta B\subseteq E$ and $B\Delta C\subseteq F$, then $ A\Delta C = (A\Delta B)\Delta (B\Delta C) \subseteq (A\Delta B)\cup (A\Delta C)\subseteq E\cup F$, thus ${\mathcal A}_{E}\circ {\mathcal A}_{F} \subseteq {\mathcal A}_{E\cup F}$, and finally ${\mathcal A}_{E}\cap{\mathcal A}_{F} \subseteq {\mathcal A}_{E\cap F}$. Because ${\mathcal I}$ is an ideal, it is closed under finite intersections and finite unions, the assertion follows. \item\label{example-uniformities-7} Let $p$ be a prime, and put $W_{k} := \{\langle x, y\rangle\mid x, y\in\mathbb{Z}, p^{k}\text{ divides } x - y\}$. Then $W_{k}\circ W_{\ell} \subseteq W_{\min\{k, \ell\}} = W_{k}\cap W_{\ell}$, thus $\mathfrak{b} := \{W_{k}\mid k\in\mathbb{N}\}$ is the base for a uniformity $\mathfrak{u}_{p}$ on $\mathbb{Z}$, the \emph{$p$-adic \index{uniformity!$p$-adic}uniformity}. \item\label{example-uniformities-6} Let $A$ be a set, $(X, \mathfrak{u})$ a uniform space, and let $F(A, X)$ be the set of all maps $A\to X$. We will define a uniformity on $F(A, X)$; the approach is similar to Example~\ref{ex-topol-bases-weak}. Define for $U\in\mathfrak{u}$ the set \begin{equation*} U_{F} := \{\langle f, g\rangle\in F(A, X) \mid \langle f(x), g(x)\rangle\in U\text{ for all }x\in X\}. \end{equation*} Thus two maps are close with respect to $U_{F}$ iff all their images are close with respect to $U$. It is immediate that $\{U_{F}\mid U\in\mathfrak{u}\}$ forms a uniformity, and that $\{U_{F}\mid U\in\mathfrak{b}\}$ is a base for a uniformity, provided $\mathfrak{b}$ is a base for uniformity $\mathfrak{u}$. If $X=\mathbb{R}$ is endowed with the additive uniformity, a typical set of the base is given for $\epsilon>0$ through \begin{equation*} \{\langle f, g\rangle\in F(A, \mathbb{R})\mid \sup_{a\in A}|f(a)-g(a)|<\epsilon\}, \end{equation*} hence the images of $f$ and of $g$ have to be uniformly close to each other. \item\label{example-uniformities-8} Call a map $f: \mathbb{R}\to \mathbb{R}$ \emph{\index{map!affine}affine} iff it can be written as $f(x) = a\cdot x+b$ with $a\not=0$; let $f_{a, b}$ be the affine map characterized by the parameters $a$ and $b$, and define $X := \{f_{a, b}\mid a, b\in \mathbb{R}, a\not=0\}$ the set of all affine maps. Note that an affine map is bijective, and that its inverse is an affine map again with $f_{a, b}^{-1} = f_{1/a, -b/a}$; the composition of an affine map is an affine map as well, since $f_{a, b}\circ f_{c, d} = f_{ac, ad+b}$. Define for $\epsilon>0, \delta>0$ the $\epsilon, \delta$-neighborhood $U_{\epsilon, \delta}$ by \begin{equation*} U_{\epsilon, \delta} := \{f_{a, b}\in X\mid |a-1|<\epsilon, |b|<\delta\} \end{equation*} Put \begin{align*} U_{\epsilon, \delta}^{L} & := \{\langle f_{x, y}, f_{a, b}\rangle\in X\times X\mid f_{x, y}\circ f_{a, b}^{-1}\in U_{\epsilon, \delta}\},\\ \mathfrak{b}_{L} & := \{U_{\epsilon, \delta}^{L}\mid \epsilon>0, \delta>0\},\\ U_{\epsilon, \delta}^{R} & := \{\langle f_{x, y}, f_{a, b}\rangle\in X\times X \mid f_{x, y}^{-1}\circ f_{a, b}\in U_{\epsilon, \delta}\},\\ \mathfrak{b}_{R} & := \{U_{\epsilon, \delta}^{R}\mid \epsilon>0, \delta>0\}.\\ \end{align*} Then $\mathfrak{b}_{L}$ resp. $\mathfrak{b}_{R}$ is the base for a uniformity $\mathfrak{u}_{L}$ resp. $\mathfrak{u}_{R}$ on $X$. Let us check this for $\mathfrak{b}_{R}$. Given positive $\epsilon, \delta$, we want to find positive $r, s$ with $\langle f_{m, n}, f_{p, q}\rangle\in V_{r, s}^{R}$ implies $\langle f_{p, q}, f_{m, n}\rangle\in U_{\epsilon, \delta}^{R}$. Now we can find for $\epsilon > 0$ and $\delta>0$ some $r > 0$ and $s>0$ so that \begin{align*} |\frac{p}{m} - 1| < r & \Rightarrow |\frac{m}{p} - 1| < \epsilon\\ |\frac{q}{m} - \frac{n}{m} | < s & \Rightarrow |\frac{n}{p} - \frac{q}{p}| < \delta \end{align*} holds, which is just what we want, since it translates into $V_{r, s}^{R}\subseteq \bigl(U_{\epsilon, \delta}^{R}\bigr)^{-1}$. The other properties of a base are easily seen to be satisfied. One argues similarly for $\mathfrak{b}_{L}$. Note that $(X, \circ)$ is a topological group with the sets $\{U_{\epsilon, \delta}\mid \epsilon>0, \delta>0\}$ as a base for the neighborhood filter of the neutral element $f_{1, 0}$ (topological groups are introduced in Example~\ref{top-group} on page~\pageref{top-group}). \item\label{example-uniformities-9} Let, in general, $G$ be a topological group with neutral element $e$ . Define for $U\in\ensuremath{{\mathfrak U}}(e)$ the sets \begin{align*} U_{L} & := \{\langle x, y\rangle\mid xy^{-1}\in U\},\\ U_{R} & := \{\langle x, y\rangle\mid x^{-1}y\in U\},\\ U_{B} & := U_{L}\cap U_{R}. \end{align*} Then $\{U_{L}\mid U\in\ensuremath{{\mathfrak U}}(e)\}$, $\{U_{R}\mid U\in\ensuremath{{\mathfrak U}}(e)\}$ and $\{U_{B}\mid U\in\ensuremath{{\mathfrak U}}(e)\}$ define bases for uniformities on $G$; it can be shown that they do not necessarily coincide (of, course, they do, if $G$ is Abelian). \end{enumerate} {\Large\ding{44}} \end{example} Before we show that a uniformity generates a topology, we derive a sufficient criterion for a family of subsets of $X\times X$ is a subbase for a uniformity. \BeginLemma{subbase-for-uniformity} Let $\mathfrak{s}\subseteq\PowerSet{X\times X}$, then $\mathfrak{s}$ is the subbase\MMP{Subbase} for a uniformity on $X$, provided the following conditions hold. \begin{enumerate} \item $\Delta\subseteq S$ for each $S\in\mathfrak{s}$. \item Given $U\in \mathfrak{s}$, there exists $V\in\mathfrak{s}$ such that $V\subseteq U^{-1}$. \item For each $U\in\mathfrak{s}$ there exists $V\in\mathfrak{s}$ such that $V\circ V\subseteq U$. \end{enumerate} \end{lemma} \begin{proof} We have to show that \begin{equation*} \mathfrak{b} := \{U_{1}\cap\ldots\cap U_{n}\mid U_{1}, \ldots, U_{n}\in\mathfrak{s}\text{ for some }n\in\mathbb{N}\} \end{equation*} constitutes a base for a uniformity. It is clear that every element of $\mathfrak{b}$ contains the diagonal. Let $U = \bigcap_{i=1}^{n}U_{i}\in\mathfrak{b}$ with $U_{i}\in\mathfrak{s}$ for $i = 1, \ldots, n$, choose $V_{i}\in\mathfrak{s}$ with $V_{i}\subseteq U_{i}^{-1}$ for all $i$, then $V := \bigcap_{i=1}^{n}V_{i}\in\mathfrak{b}$ and $V\subseteq U^{-1}$. If we select $W_{i}\in\mathfrak{s}$ with $W_{i}\circ W_{i}\subseteq U_{i}$, then $W := \bigcap_{i=1}^{n}W_{i}\in\mathfrak{b}$ and $W\circ W\subseteq U$. The last condition of Proposition~\ref{char-base-unif} is trivially satisfied for $\mathfrak{b}$, since $\mathfrak{b}$ is closed under finite intersections. Thus we conclude that $\mathfrak{b}$ is a base for a uniformity on $X$ by Proposition~\ref{char-base-unif}, which in turn entails that $\mathfrak{s}$ is a subbase. \end{proof} \paragraph{The Topology Generated by a Uniformity} A pseudometric space $(X, d)$ generates a topology by declaring a set $G$ open iff there exists for $x\in G$ some $r>0$ with $B(x, r)\subseteq G$; from this we obtained the neighborhood filter $\ensuremath{{\mathfrak U}}(x)$ for a point $x$. Note that in the uniformity associated with the pseudometric the identity \begin{equation*} B(x, r) = V_{r}[x] \end{equation*} holds. Encouraged by this, we approach the topology for a uniform space in the same way. Given a uniform space $(X, \mathfrak{u})$, a subset $G\subseteq X$ is called open iff we can find for each $x\in G$ some neighborhood $U\in\mathfrak{u}$ such that $U[x]\subseteq G$. The following proposition investigates this construction. \BeginProposition{topol-from-unif} Given a uniform space $(X, \mathfrak{u})$, \MMP[t]{From $\mathfrak{u}$ to $\tau_{\mathfrak{u}}$}for each $x\in X$ the family $\mathfrak{u}[x] :=\{U[x]\mid U\in \mathfrak{u}\}$ is the base for the neighborhood filter of $x$ for a topology $\tau_{\mathfrak{u}}$, which is called the \emph{\index{topology!uniform}\index{space!uniform!topology}uniform topology}. The neighborhoods for $x$ in $\tau_{\mathfrak{u}}$ are just $\mathfrak{u}[x]$. \end{proposition} \begin{proof} It follows from Proposition~\ref{def-through-nbh-filters} that $\mathfrak{u}[x]$ defines a topology $\tau_{\mathfrak{u}}$, it remains to show that the neighborhoods of this topology are just $\mathfrak{u}[x]$. We have to show that $U\in\mathfrak{u}$ there exists $V\in\mathfrak{u}$ with $V[x]\subseteq U[x]$ and $V[x]\in\mathfrak{u}[y]$ for all $y\in V[x]$, then the assertion will follow from Corollary~\ref{cor-def-through-nbh-filters}. For $U\in\mathfrak{u}$ there exists $V\in\mathfrak{u}$ with $V\circ V\subseteq U$, thus $\langle x, y\rangle\in V$ and $\langle y, z\rangle\in V$ implies $\langle x, z\rangle\in U$. Now let $y\in V[x]$ and $z\in V[y]$, thus $z\in U[x]$, but this means $U[x]\in\mathfrak{u}[y]$ for all $x\in V[y]$. Hence the assertion follows. \end{proof} These are some illustrative example. They indicate also that different uniformities can generate the same topology. \BeginExample{top-for-unif} \begin{enumerate} \item The topology obtained from the additive uniformity on $\mathbb{R}$ is the usual topology. The same holds for the multiplicative uniformity on $\mathbb{R}\setminus\{0\}$. Both can be shown to be distinct~\cite[Ch. 3, §6]{Bourbaki}. \item The topology induced by the discrete uniformity is the discrete topology, in which each singleton $\{x\}$ is open. Since $\bigl\{\{x\}, X\setminus\{x\}\bigr\}$ forms a finite partition of $X$, the discrete topology is induced also by the uniformity defined by the finite partitions. \item Let $F(A, \mathbb{R})$ be endowed with the uniformity defined by the sets $\{\langle f, g\rangle\in F(A, \mathbb{R})\mid \sup_{a\in A}|f(a)-g(a)|<\epsilon\}$, see Example~\ref{example-uniformities}. The corresponding topology yields for each $f\in F(A, \mathbb{R})$ the neighborhood $\{g\in F(A, \mathbb{R})\mid \sup_{a\in A}|f(a)-g(a)|< \epsilon\}$. This is the topology of uniform convergence. \item Let $\mathfrak{u}_{p}$ for a prime $p$ be the $p$-adic uniformity on $\mathbb{Z}$, see Example~\ref{example-uniformities}, part~\ref{example-uniformities-7}. The corresponding topology $\tau_{p}$ is called the $p$-adic topology. A basis for the neighborhoods of $0$ is given by the sets $V_{k} := \{x\in \mathbb{Z} \mid p^{k}\text{ divides } x\}$. Because $p^{m}\in V_{k}$ for $m\geq k$, we see that $\lim_{n\to \infty}\ p^{n} = 0$ in $\tau_{p}$, but not in $\tau_{q}$ for $q\not= p$, $q$ prime. Thus the topologies $\tau_{p}$ and $\tau_{q}$ differ, hence also the uniformities $\mathfrak{u}_{p}$ and $\mathfrak{u}_{q}$. \end{enumerate} {\Large\ding{44}} \end{example} Now that we know that each uniformity yields a topology on the same space, some questions are immediate: \begin{itemize} \item Do the open resp. the closed sets play a particular r\^ole in describing the uniformity? \item Does the topology have particular properties, e.g., in terms of separation axioms? \item What about metric spaces --- can we determine from the uniformity that the topology is metrizable? \item Can we find a pseudometric for a given uniformity? \item Is the product topology on $X\times X$ somehow related to $\mathfrak{u}$, which is defined on $X\times X$, after all? \end{itemize} We will give answers to some of these questions, some will be treated only lightly, with an in depth treatment to be found in the vast literature on uniform spaces, see the Bibliographic Notes in Section~\ref{sec:top-bib-notes}. Fix a uniform space $X$ with uniformity $\mathfrak{u}$ and associated topology $\tau$. References to neighborhoods and open sets are always to $\mathfrak{u}$ resp. $\tau$, unless otherwise stated. This is a first characterization of the interior of an arbitrary set. Recall that in a pseudometric space $x$ is an interior point of $A$ iff $B(x, r)\subseteq A$ for some $r>0$; the same description applies here as well, \emph{mutatis mutandis} (of course, this ``mutatis mutandis'' part is the interesting one). \BeginLemma{unif-top-descr-interior} Given $A\subseteq X$, $x\in\Interior{A}$ iff there exists a neighborhood $U$ with $U[x]\subseteq A$. \end{lemma} \begin{proof} Assume that $x\in\Interior{A} = \bigcup\{G\mid G\text{ open and }G\subseteq A\}$, then it follows from the definition of an open set that we must be able to find an neighborhood $U$ with $U[x]\subseteq A$. Conversely, we show that the set $ B := \{x\in X\mid U[x]\subseteq A\text{ for some neighborhood }U\} $ is open, then this must be the largest open set which is contained in $A$, hence $B=\Interior{A}$. Let $x\in B$, thus $U[x]\subseteq A$, and we should find now a neighborhood $V$ such that $V[y]\subseteq B$ for $y\in V[x]$. But we find a neighborhood $V$ with $V\circ V\subseteq U$. Let's see whether $V$ is suitable: if $y\in V[x]$, then $V[y]\subseteq (V\circ V)[x]$ (this is so because $\langle x, y\rangle\in V$, and if $z\in V[y]$, then $\langle y, z\rangle\in V$; this implies $\langle x, z\rangle\in V\circ V$, hence $z\in (V\circ V)[x]$). But this implies $V[y]\subseteq U[x]\subseteq B$, hence $y\in B$. But this means $V[x]\subseteq B$, so that $B$ is open. \end{proof} This gives us a handy way of describing the base for a neighborhood filter for a point in $X$. It states that we may restrict our attention to the members of a base or of a subbase, when we want to work with the neighborhood filter for a particular element. \BeginCorollary{descr-nbhd-base} If $\mathfrak{u}$ has base or subbase $\mathfrak{b}$, then $\{U[x]\mid U\in\mathfrak{b}\}$ is a base resp. subbase for the neighborhood filter for $x$. \end{corollary} \begin{proof} This follows immediately from Lemma~\ref{unif-top-descr-interior} together with Proposition~\ref{char-base-unif} resp. Lemma~\ref{subbase-for-uniformity} \end{proof} Let us have a look at the topology on $X\times X$ induced by $\tau$. Since the open rectangles generate this topology, and since we can describe the open rectangles in terms of the sets $U[x]\times V[y]$, we can expect that these open sets can also related to the uniformity proper. In fact: \BeginProposition{product-open-base} If $U\in\mathfrak{u}$, then both $\Interior{U}\in \mathfrak{u}$ and $\Closure{U}\in\mathfrak{u}$. \end{proposition} \begin{proof} 1. Let $G\subseteq X\times X$ be open, then $\langle x, y\rangle\in G$ iff there exist neighborhoods $U, V\in\mathfrak{u}$ with $U[x]\times V[y]\subseteq G$, and because $U\cap V\in\mathfrak{u}$, we may even find some $W\in\mathfrak{u}$ such that $W[x]\times W[y]\subseteq G$. Thus \begin{equation*} G = \bigcup\{W[x]\times W[y]\mid \langle x, y\rangle\in G, W\in\mathfrak{u}\}. \end{equation*} 2. Let $W\in\mathfrak{u}$, then there exists a symmetric $V\in\mathfrak{u}$ with $V\circ V\circ V\subseteq W$, and by the identities in Figure~\ref{tab:some-identities} we may write \begin{equation*} V\circ V\circ V = \bigcup_{\langle x, y\rangle\in V}V[x]\times V[y]. \end{equation*} Hence $\langle x, y\rangle\in \Interior{W}$ for every $\langle x, y\rangle\in V$, so $V\subseteq \Interior{W}$, and since $V\in\mathfrak{u}$, we conclude $\Interior{W}\in\mathfrak{u}$ from $\mathfrak{u}$ being upward closed. 3. Because $\mathfrak{u}$ is a filter, and $U\subseteq \Closure{U}$, we infer $\Closure{U}\in\mathfrak{u}$. \end{proof} The closure of a subset of $X$ and the closure of a subset of $X\times X$ may be described as well directly through uniformity $\mathfrak{u}$. These are truly remarkable representations. \BeginProposition{closure-and-square} $\Closure{A} = \bigcap\{U[A]\mid U\in\mathfrak{u}\}$ for $A\subseteq X$, and $\Closure{M} = \bigcap\{U\circ M\circ U\mid U\in\mathfrak{u}\}$ for $M\subseteq X\times X$. \end{proposition} \begin{proof} 1. We use the characterization of a point $x$ in the closure through its neighborhood filter from \SetCite{Lemma 1.94}: $x\in\Closure{A}$ iff $U[x]\cap A\not=\emptyset$ for all symmetric $U\in\mathfrak{u}$, because the symmetric neighborhoods form a base for $\mathfrak{u}$. Now $z\in U[x]\cap A$ iff $z\in A$ and $\langle x, z\rangle\in U$ iff $z\in U[z]$ and $z\in A$, hence $U[x]\cap A \not=\emptyset$ iff $x\in U[A]$, because $U$ is symmetric. But this means $\Closure{A}= \bigcap\{U[A]\mid U\in\mathfrak{u}\}$. 2. Let $\langle x, y\rangle\in\Closure{M}$, then $U[x]\times U[y]\cap M\not=\emptyset$ for all symmetric neighborhoods $U\in\mathfrak{u}$, so that $\langle x, y\rangle\in U\circ M\circ U$ for all symmetric neighborhoods. This accounts for the inclusion from left to right. If $\langle x, y\rangle\in U\circ M\circ U$ for all neighborhoods $U$, then for every $U\in\mathfrak{u}$ there exists $\langle a, b\rangle\in M$ with $\langle a, b\rangle\in U[x]\times (U^{-1})[y]$, thus $\langle x, y\rangle\in \Closure{M}$. \end{proof} Hence \BeginCorollary{clos-symm-are-base} The closed symmetric neighborhoods form a base for the uniformity. \end{corollary} \begin{proof} Let $U\in\mathfrak{u}$, then there exists a symmetric $V\in\mathfrak{u}$ with $V\circ V\circ V\subseteq U$ with $V\subseteq\Closure{V}\subseteq V\circ V\circ V$ by Proposition~\ref{limits-are-unique}. Hence $W := \Closure{V}\cap(\Closure{V})^{-1})$ is a member of $\mathfrak{u}$ which is contained in $U$. \end{proof} Proposition~\ref{closure-and-square} has also an interesting consequence when looking at the characterization of Hausdorff spaces in Proposition~\ref{limits-are-unique}. Putting $M = \Delta$, we obtain $\Closure{\Delta} = \bigcap\{U\circ U\mid U\in\mathfrak{u}\}$, so that the associated topological space is Hausdorff iff the intersection of all neighborhoods is the diagonal $\Delta$. Uniform spaces with $\bigcap\mathfrak{u}=\Delta$ are called \emph{\index{space!uniform!separated}separated}\MMP{Separated}. \paragraph{Pseudometrization} We will see shortly that the topology for a separated uniform space is completely regular. First, however, we will show that we can generate pseudometrics from the uniformity by the following idea: suppose that we have a neighborhood $V$, then there exists a neighborhood $V_{2}$ with $V_{2}\circ V_{2}\circ V_{2}\subseteq V_{1} := V$; continuing in this fashion, we find for the neighborhood $V_{n}$ a neighborhood $V_{n+1}$ with $V_{n+1}\circ V_{n+1}\circ V_{n+1}\subseteq V_{n}$, and finally put $V_{0} := X\times X$. Given a pair $\langle x, y\rangle\in X\times X$, this sequence $\Folge{V}$ is now used as a witness to determine how far apart these points are: put $f_{V}(x, y) := 2^{-n}$, iff $\langle x, y\rangle\in V_{n}\setminus V_{n-1}$, and $d_{V}(x, y) := 0$ iff $\langle x, y\rangle\in \bigcap_{n\in\mathbb{N}}V_{n}$. Then $f_{V}$ will give rise to a pseudometric $d_{V}$\MMP{$d_{V}$}, the pseudometric associated with $V$, as we will show below. This means that many pseudometric spaces are hidden deep inside a uniform space! Moreover, if we need a pseudometric, we construct one from a neighborhood. These observations will turn out to be fairly practical later on. But before we are in a position to make use of them, we have to do some work. \BeginProposition{construct-pseudometric} Assume that $\Folge{V}$ is a sequence of symmetric subsets of $X\times X$ with these properties for all $n\in\mathbb{N}$: \begin{itemize} \item $\Delta\subseteq V_{n}$, \item $V_{n+1}\circ V_{n+1}\circ V_{n+1}\subseteq V_{n}$. \end{itemize} Put $V_{0} := X\times X$. Then there exists a pseudometric $d$ with \begin{equation*} V_{n}\subseteq \{\langle x, y\rangle\mid d(x, y)<2^{-n}\}\subseteq V_{n-1} \end{equation*} for all $n\in\mathbb{N}$. \end{proposition} \begin{proof} 0. The proof uses the idea outlined above. The main effort will be showing that we can squeeze $\{\langle x, y\rangle\mid d(x, y)<2^{-n}\}$ between $V_{n}$ and $V_{n-1}$. 1. Put $f(x, y) := 2^{-n}$ iff $\langle x, y\rangle\in V_{n}\setminus V_{n-1}$, and let $f(x, y) := 0$ iff $\langle x, y\rangle\in \bigcap_{n\in\mathbb{N}}V_{n}$. Then $f(x, x) = 0$, and $f(x, y) = f(y, x)$, because each $V_{n}$ is symmetric. Define \begin{equation*} d(x, y) := \inf\bigl\{\sum_{i=0}^{k}f(x_{i}, x_{i+1}) \mid x_{0}, \ldots, x_{k+1}\in X\text{ with }x_{0} = x, x_{k+1} = y, k\in\mathbb{N}\bigr\} \end{equation*} So we look at all paths leading from $x$ to $y$, sum the weight of all their edges, and look at their smallest value. Since we may concatenate a path from $x$ to $y$ with a path from $y$ to $z$ to obtain one from $x$ to $z$, the triangle inequality holds for $d$, and since $d(x, y) \leq f(x, y)$, we know that $V_{n}\subseteq \{\langle x, y\rangle\mid d(x, y)<2^{-n}\}$. The latter set is contained in $V_{n-1}$; to show this is a bit tricky and requires an intermediary step. 2. We show by induction on $n$ that \begin{equation*} f(x_{0}, x_{n+1}) \leq 2\cdot \sum_{i=0}^{n}f(x_{i}, x_{i+1}), \end{equation*} so if we have a path of length $n$, then the weight of the edge connecting their endpoints cannot be greater than twice the weight on an arbitrary path. If $n = 1$, there is nothing to show. So assume the assertion is proved for all path with less that $n$ edges. We take a path from $x_{0}$ to $x_{n+1}$ with $n$ edges $\langle x_{i}, x_{i+1}\rangle$. Let $w$ be the weight of the path from $x_{0}$ to $x_{n+1}$, and let $k$ be the largest integer such that the path from $x_{0}$ to $x_{k}$ is at most $w/2$. Then the path from $x_{k+1}$ to $x_{n+1}$ has a weight at most $w/2$ as well. Now $f(x_{0}, x_{k}) \leq w$ and $f(x_{k+1}, x_{n+1})\leq w$ by induction hypothesis, and $f(x_{k}, x_{k+1})\leq w$. Let $m\in\mathbb{N}$ the smallest integer with $2^{-m}\leq w$, then we have $\langle x_{0}, x_{k}\rangle, \langle x_{k}, x_{k+1}\rangle, \langle x_{k+1}, x_{n+1}\rangle\in V_{m}$, thus $\langle x_{0}, x_{n+1}\rangle\in V_{m-1}$. This implies $f(x_{0}, x_{n+1})\leq 2^{-(m-1)}\leq 2\cdot w = 2\cdot \sum_{i=0}^{n}f(x_{i}, x_{i+1})$. 3. Now let $d(x, y) < 2^{-n}$, then $f(x, y) \leq 2^{-(n-1)}$ by part 2., and hence $\langle x, y\rangle\in V_{n-1}$. \end{proof} This has a ---~somewhat unexpected~--- consequence because it permits characterizing those uniformities, which are generated by a pseudometric. \BeginProposition{unif-by-pseudometric} The uniformity $\mathfrak{u}$ of $X$ is generated by a pseudometric iff $\mathfrak{u}$ has a countable base. \end{proposition} \begin{proof} Let $\mathfrak{u}$ be generated by a pseudometric $d$, then the sets $\{V_{d, r}\mid 0<r\in\mathbb{Q}\}$ are a countable basis. Let, conversely, $\mathfrak{b} := \{U_{n}\mid n\in\mathbb{N}\}$ be a countable base for $\mathfrak{u}$. Put $V_{0} := X\times X$ and $V_{1} := U_{1}$, and construct inductively the sequence $\Folge{V}\subseteq\mathfrak{b}$ of symmetric base elements with $V_{n}\circ V_{n}\circ V_{n}\subseteq V_{n-1}$ and $V_{n}\subseteq U_{n}$ for $n\in\mathbb{N}$. Then $\{V_{n}\mid n\in\mathbb{N}\}$ is a base for $\mathfrak{u}$. In fact, given $U\in\mathfrak{u}$, there exists $U_{n}\in\mathfrak{b}$ with $U_{n}\subseteq U$, hence $V_{n}\subseteq U$ as well. Construct $d$ for this sequence as above, then we have $V_{n}\subseteq \{\langle x, y\rangle\mid d(x, y) < 2^{-n}\}\subseteq V_{n-1}$. Thus the sets $V_{d, r}$ are a base for the uniformity \end{proof} Note that this does not translate into an observation of the metrizability of the underlying topological space. This space may carry a metric, but the uniform space from which it is derived does not. \BeginExample{partitions-not-metriz} Let $X$ be an uncountable set, and let $\mathfrak{u}$ be the uniformity given by the finite partitions, see Example~\ref{example-uniformities}. Then we have seen in Example~\ref{top-for-unif} that the topology induced by $\mathfrak{u}$ on $X$ is the discrete topology, which is metrizable. Assume that $\mathfrak{u}$ is generated by a pseudometric, then Proposition~\ref{unif-by-pseudometric} implies that $\mathfrak{u}$ has a countable base, thus given a finite partition $\pi$, there exists a finite partition $\pi^{*}$ such that $V_{\pi^{*}}\subseteq V_{\pi}$, and $V_{\pi^{*}}$ is an element of this base. Here $V_{\{P_{1}, \ldots, P_{n}\}} := \bigcup_{i=1}^{n}(P_{i}\times P_{i})$ is the basic neighborhood for $\mathfrak{u}$ associated with partition $\{P_{1}, \ldots, P_{n}\}$. But for any given partition $\pi^{*}$ we can only form a finite number of other partitions $\pi$ with $V_{\pi^{*}}\subseteq V_{\pi}$, so that we have only a countable number of partitions on $X$. {\Large\ding{44}} \end{example} This is another consequence of Proposition~\ref{construct-pseudometric}: each uniform space satisfies the separation axiom $T_{3\ensuremath{\frac{1}{2}}}$. For establishing this claim, we take a closed set $F\subseteq X$ and a point $x_{0}\not\in F$, then we have to produce a continuous function $f: X\to [0, 1]$ with $f(x_{0}) = 0$ and $f(y) = 1$ for $y\in A$. This is how to do it. Since $X\setminus F$ is open, we find a neighborhood $U\in\mathfrak{u}$ with $U[x_{0}]\subseteq X\setminus F$. Let $d_{U}$ be the pseudometric associated with $U$, then $\{\langle x, y\rangle\mid d_{U}(x, y)< 1/2\}\subseteq U$. Clearly, $x\mapsto d_{U}(x, x_{0})$ is a continuous function on $X$, hence \begin{equation*} f(x) := \max\{0, 1-2\cdot d_{U}(x, x_{0})\} \end{equation*} is continuous with $f(x_{0}) = 1$ and $f(y) = 0$ for $y\in F$, thus $f$ has the required properties. Thus we have shown \BeginProposition{tdreieinhalb-unif} A uniform space is a $T_{3\ensuremath{\frac{1}{2}}}$-space; a separated uniform space is completely regular. \QED \end{proposition} \paragraph{Cauchy Filters} We generalize the notion of a Cauchy sequence to uniform spaces now. We do this in order to obtain a notion of convergence which includes convergence in topological spaces, and which carries the salient features of a Cauchy sequence with it. First, we note that filters are a generalization for sequences. So let us have a look at what can be said, when we construct the filter $\ensuremath{{\filterFont F}}$ for a Cauchy sequence $\Folge{x}$ in a pseudometric space $(X, d)$. $\ensuremath{{\filterFont F}}$ has the sets $\mathfrak{c} := \{B_{n}\mid n\in\mathbb{N}\}$ with $B_n := \{x_{m}\mid m\geq n\}$ as a base. Being a Cauchy filter says that for each $\epsilon>0$ there exists $n\in\mathbb{N}$ such that $B_{n}\times B_{n}\subseteq V_{d, \epsilon}$; this inclusion holds then for all $B_{m}$ with $m\geq n$ as well. Because $\mathfrak{c}$ is the base for $\ensuremath{{\filterFont F}}$, and the sets $V_{d, r}$ are a base for the uniformity, we may reformulate that $\ensuremath{{\filterFont F}}$ is a Cauchy filter iff for each neighborhood $U$ there exists $B\in \ensuremath{{\filterFont F}}$ such that $B\times B\subseteq U$. Now this looks like a property which may be formulated for general uniform spaces. Fix the uniform space $(X, \mathfrak{u})$. Given $U\in\mathfrak{u}$, the set $M\subseteq X$ is called \emph{\index{set!small}\MMP{Small sets}$U$-small} iff $M\times M\subseteq U$. A collection ${\mathcal F}$ of sets is said to contain \emph{small sets} iff given $U\in\mathfrak{u}$ there exists $A\in{\mathcal F}$ which is $U$-small, or, equivalently, given $U\in\mathfrak{u}$ there exists $x\in X$ with $A\subseteq U[x]$. This helps in formulating the notion of a Cauchy filter. \BeginDefinition{def-cauchy-filter} A filter $\ensuremath{{\filterFont F}}$ is called a \emph{\index{Cauchy filter}Cauchy \index{filter!Cauchy}filter} iff it contains small sets. \end{definition} In this sense, a Cauchy sequence induces a Cauchy filter. Convergent filters are Cauchy filters as well: \BeginLemma{convergent-is-cauchy} If $\ensuremath{{\filterFont F}}\to x$ for some $x\in X$, then $\ensuremath{{\filterFont F}}$ is a Cauchy filter. \end{lemma} \begin{proof} Let $U\in\mathfrak{u}$, then there exists a symmetric $V\in\mathfrak{u}$ with $V\circ V\subseteq U$. Because $\ensuremath{{\mathfrak U}}(x)\subseteq \ensuremath{{\filterFont F}}$, we conclude $V[x]\in\ensuremath{{\filterFont F}}$, and $V[x]\times V[x]\subseteq U$, thus $V[x]$ is a $U$-small member of $\ensuremath{{\filterFont F}}$. \end{proof} But the converse does not hold, as the following example shows. \BeginExample{ultra-is-cauchy} Let $\mathfrak{u}$ be the uniformity induced by the finite partitions with $X$ infinite. We claim that each ultrafilter $\ensuremath{{\filterFont F}}$ is a Cauchy filter. In fact, let $\pi = \{A_{1}, \ldots, A_{n}\}$ be a finite partition, then $V_{\pi} = \bigcup_{i=1}^{n} A_{i}\times A_{i}$ is the corresponding neighborhood, then there exists $i^{*}$ with $A_{i^{*}}\in\ensuremath{{\filterFont F}}$. This is so since if an ultrafilter contains the finite union of sets, it must contain one of them, see~\SetCite{Lemma 1.5.36}. $A_{i^{*}}$ is $V$-small. The topology induced by this uniformity is the discrete topology, see Example~\ref{top-for-unif}. This topology is not compact, since $X$ is infinite. By Theorem~\ref{conv-vs-ultrafilter} there are ultrafilters which do not converge. {\Large\ding{44}} \end{example} If $x$ is an accumulation point of a Cauchy sequence in a pseudometric space, then we know that $x_{n}\to x$; this is fairly easy to show. A similar observation can be made for Cauchy filters, so that we have a partial converse to Lemma~\ref{convergent-is-cauchy}. \BeginLemma{acc-point-cf-converges} Let $x$ be an accumulation point of the Cauchy filter $\ensuremath{{\filterFont F}}$, then $\ensuremath{{\filterFont F}}\to x$. \end{lemma} \begin{proof} Let $V\in\mathfrak{u}$ be a closed neighborhood; in view of Corollary~\ref{clos-symm-are-base} is is sufficient to show that $V[x]\in \ensuremath{{\filterFont F}}$, then it will follow that $\ensuremath{{\mathfrak U}}(x)\subseteq\ensuremath{{\filterFont F}}$. Because $\ensuremath{{\filterFont F}}$ is a Cauchy filter, we find $F\in\ensuremath{{\filterFont F}}$ with $F\times F\subseteq V$, because $V$ is closed, we may assume that $F$ is closed as well (otherwise we replace it by its closure). Because $F$ is closed and $x$ is an accumulation point of $\ensuremath{{\filterFont F}}$, we know from Lemma~\ref{all-acc-points} that $x\in F$, hence $F\subseteq V[x]$. This implies $\ensuremath{{\mathfrak U}}(x)\subseteq \ensuremath{{\filterFont F}}$. \end{proof} \BeginDefinition{complete-unif-space} The uniform space $(X, \mathfrak{u})$ is called \emph{\index{space!uniform!complete}complete} iff each Cauchy filter converges. \end{definition} Each Cauchy sequence converges in a complete uniform space, because the associated filter is a Cauchy filter. A slight reformulation is given in the following proposition, which is the uniform counterpart to the characterization of complete pseudometric spaces in Proposition~\ref{diam-to-zero-compl}. Recall that a collection of sets is said to have the \emph{finite intersection property} iff each finite subfamily has a non-empty intersection. \BeginProposition{equiv-uni-complete} The uniform space $(X, \mathfrak{u})$ is complete iff each family of closed sets which has the finite intersection property and which contains small sets has a non-void intersection. \end{proposition} \begin{proof} This is essentially a reformulation of the definition, but let's see. 1. Assume that $(X, \mathfrak{u})$ is complete, and let ${\mathcal A}$ be a family of closed sets with the finite intersection property, which contains small sets. Hence $\ensuremath{{\filterFont F}}_{0} := \{F_{1}\cap \ldots\cap F_{n}\mid n\in\mathbb{N}, F_{1}, \ldots, F_{n}\in {\mathcal A}\}$ is a filter base. Let $\ensuremath{{\filterFont F}}$ be the corresponding filter, then $\ensuremath{{\filterFont F}}$ is a Cauchy filter, for ${\mathcal A}$, hence $\ensuremath{{\filterFont F}}_{0}$ contains small sets. Thus $\ensuremath{{\filterFont F}}\to x$, so that $\ensuremath{{\mathfrak U}}(x)\subseteq \ensuremath{{\filterFont F}}$, thus $x\in\bigcap_{F\in\ensuremath{{\filterFont F}}}\Closure{F}\subseteq \bigcap_{A\in{\mathcal A}}A$. 2. Conversely, let $\ensuremath{{\filterFont F}}$ be a Cauchy filter. Since $\{\Closure{F}\mid F\in\ensuremath{{\filterFont F}}\}$ is a family of closed sets with the finite intersection property which contains small sets, the assumption says that $\bigcap_{F\in\ensuremath{{\filterFont F}}}\Closure{F}$ is not empty and contains some $x$. But then $x$ is an accumulation point of $\ensuremath{{\filterFont F}}$ by Lemma~\ref{all-acc-points}, so $\ensuremath{{\filterFont F}}\to x$ by Lemma~\ref{acc-point-cf-converges}. \end{proof} As in the case of pseudometric spaces, compact spaces are derived from a complete uniformity. \BeginLemma{compactyields-complete} Let $(X, \mathfrak{u})$ be a uniform space so that the topology associated with the uniformity is compact. Then the uniform space $(X, \mathfrak{u})$ is complete. \end{lemma} \begin{proof} In fact, let $\ensuremath{{\filterFont F}}$ be a Cauchy filter on $X$. Since the topology for $X$ is compact, the filter has an accumulation point $x$ by Corollary~\ref{char-acc-point-ultra}. But Lemma~\ref{acc-point-cf-converges} tells us then that $\ensuremath{{\filterFont F}}\to x$. Hence each Cauchy filter converges. \end{proof} The uniform space which is derived from an ideal on the powerset of a set, which has been defined in Example~\ref{example-uniformities} (part~\ref{example-uniformities-5}) is complete. We establish this first for Cauchy nets as the natural generalization of Cauchy sequences, and then translate the proof to Cauchy filters. This will permit an instructive comparison of handling these two concepts. \BeginExample{example-uniformities-5-net} Recall the definition of a net on page~\pageref{def-net}. A net $(x_{i})_{i\in N}$ in the uniform space $X$ is called a \emph{Cauchy \index{net!Cauchy}net}\MMP{Cauchy net} iff, given a neighborhood $U\in\mathfrak{u}$, there exists $i\in N$ such that $\langle x_{j}, x_{\gamma}\rangle\in U$ for all $j, k\in N$ with $j, k\geq i$. The net converges to $x$ iff given a neighborhood $U$ there exists $i\in N$ such that $\langle x_{j}, x\rangle\in U$ for $j\geq i$. Now assume that ${\mathcal I}\subseteq \PowerSet{X}$ is an ideal; part~\ref{example-uniformities-5} of Example~\ref{example-uniformities} defines a uniformity $\mathfrak{u}_{{\mathcal I}}$ on $\PowerSet{X}$ which has the sets $ V_{I} := \{\langle A, B\rangle\mid A, B\in\PowerSet{X}, A\Delta B\subseteq I\} $ as a base, as $I$ runs through ${\mathcal I}$. We claim that each Cauchy net $(F_{i})_{i\in N}$ converges to $F := \bigcup_{i\in N}\bigcap_{j\geq i}F_{j}$. In fact, let a neighborhood $U$ be given; we may assume that $U = V_{I}$ for some ideal $I\in{\mathcal I}$. Thus there exists $i\in N$ such that $\langle F_{j}, F_{k}\rangle\in V_{I}$ for all $j, k\geq i$, hence $F_{j}\Delta F_{k}\subseteq I$ for all these $j, k$. Let $x\in F\Delta F_{j}$ for $j\geq i$. \begin{itemize} \item If $x\in F$, we find $i_{0}\in N$ such that $x\in F_{k}$ for all $k\geq i_{0}$. Fix $k\in N$ so that $k\geq i$ and $k\geq i_{0}$, which is possible since $N$ is directed. Then $x\in F_{k}\Delta F_{j}\subseteq I.$ \item If $x\not\in F$, we find for each $i_{0}\in N$ some $k\geq i_{0}$ with $x\not\in F_{k}$. Pick $k\geq i_{0}$, then $x\not\in F_{k}$, hence $x\in F_{\gamma}\Delta F_{j}\subseteq I$ \end{itemize} Thus $\langle F, F_{j}\rangle\in V_{I}$ for $j\geq i$, hence the net converges to $F$. {\Large\ding{44}} \end{example} Now let's investigate convergence of a Cauchy filter. One obvious obstacle in a direct translation seems to be the definition of the limit set, because this appears to be bound to the net's indices. But look at this. If $(x_{i})_{i\in N}$ is a net, then the sets ${\mathcal B}_{i} := \{x_{j}\mid j\geq i\}$ form a filter base $\mathfrak{B}$, as $i$ runs through the directed set $N$ (see the discussion on page~\pageref{def-net}). Thus we have defined $F$ in terms of this base, viz., $F = \bigcup_{{\mathcal B}\in\mathfrak{B}}\bigcap{\mathcal B}$. This gives an idea for the filter based case. \BeginExample{example-uniformities-5-filter} Let $\mathfrak{u}_{{\mathcal I}}$ be the uniformity on $\PowerSet{X}$ discussed in Example~\ref{example-uniformities-5-net}. Then each Cauchy filter $\ensuremath{{\filterFont F}}$ converges. In fact, let $\mathfrak{B}$ be a base for $\ensuremath{{\filterFont F}}$, then $\ensuremath{{\filterFont F}}\to F$ with $F := \bigcup_{{\mathcal B}\in\mathfrak{B}}\bigcap{\mathcal B}$. Let $U$ be a neighborhood in $\mathfrak{u}_{{\mathcal I}}$, and we may assume that $U = V_{I}$ for some $I\in{\mathcal I}$. Since $\ensuremath{{\filterFont F}}$ is a Cauchy filter, we find ${\mathcal F}\in\ensuremath{{\filterFont F}}$ which is $V_{I}$-small, hence $F\Delta F'\subseteq I$ for all $F, F'\in{\mathcal F}$. Let $F_{0}\in{\mathcal F}$, and consider $x\in F\Delta F_{0}$; we show that $x\in I$ by distinguishing these cases: \begin{itemize} \item If $x\in F$, then there exists ${\mathcal B}\in\mathfrak{B}$ such that $x\in\bigcap{\mathcal B}$. Because ${\mathcal B}$ is an element of base $\mathfrak{B}$, and because $\ensuremath{{\filterFont F}}$ is a filter, ${\mathcal B}\cap {\mathcal F}\not=\emptyset$, so we find $G\in{\mathcal B}$ with $G\in{\mathcal F}$, in particular $x\in G$. Consequently $x\in G\Delta F_{0}\subseteq I$, since ${\mathcal F}$ is $V_{I}$-small. \item If $x\not\in F$, we find for each ${\mathcal B}\in\mathfrak{B}$ some $G\in{\mathcal B}$ with $x\not\in G$. Since $\mathfrak{B}$ is a base for $\ensuremath{{\filterFont F}}$, there exists ${\mathcal B}\in\mathfrak{B}$ with ${\mathcal B}\subseteq{\mathcal F}$, so there exists $G\in{\mathcal F}$ with $x\not\in G$. Hence $x\in G\Delta F_{0}\subseteq I$. \end{itemize} Thus $F\Delta F_{0}\subseteq I$, hence $\langle F, F_{0}\rangle\in V_{I}$. This means ${\mathcal F}\subseteq V_{I}[F]$, which in turn implies $\ensuremath{{\mathfrak U}}(F)\subseteq \ensuremath{{\filterFont F}}$, or, equivalently, $\ensuremath{{\filterFont F}}\to F$. {\Large\ding{44}} \end{example} For further investigations of uniform spaces, we define uniform continuity as the brand of continuity which is adapted to uniform spaces. \paragraph{Uniform Continuity} Let $f: X\to X'$ be a uniformly continuous map between the pseudometric spaces $(X, d)$ and $(X', d')$. This means that given $\epsilon>0$ there exists $\delta>0$ such that, whenever $d(x, y)<\delta$, $d'(f(x), f(y))<\epsilon$ follows. In terms of neighborhoods, this means $V_{d, \delta}\subseteq\InvBild{(\zZ{f})}{V_{d', \epsilon}}$, or, equivalently, that $\InvBild{(\zZ{f})}{V}$ is a neighborhood in $X$, whenever $V$ is a neighborhood in $X'$. We use this formulation, which is based only on neighborhoods, and not on pseudometrics, for a formulation of uniform continuity. \BeginDefinition{unif-cont-unif-sp} Let $(X, \mathfrak{u})$ and $(Y, \mathfrak{v})$ be uniform spaces. Then $f: X\to Y$ is called \emph{uniformly \index{continuous!uniformly}continuous} iff $\InvBild{(\zZ{f})}{V}\in\mathfrak{u}$ for all $V\in\mathfrak{v}$. \end{definition} \BeginProposition{unif-spac-cat} Uniform spaces for a category with uniform continuous maps as morphisms. \end{proposition} \begin{proof} The identity is uniformly continuous, and, since $(\zZ{g})\circ (\zZ{f}) = \zZ{(g\circ f)}$, the composition of uniformly continuous maps is uniformly continuous again. \end{proof} Introducing something new, one checks whether this has some categorical significance, of course. We also want to see what happens in the underlying topological space. But here nothing unexpected will happen: a uniformly continuous map is continuous with respect to the underlying topologies, formally: \BeginProposition{unif-cont-is-cont} If $f: (X, \mathfrak{u})\to (Y, \mathfrak{v})$ is uniformly continuous, then $f: (X, \tau_{\mathfrak{u}})\to (Y, \tau_{\mathfrak{v}})$ is continuous. \end{proposition} \begin{proof} Let $H\subseteq Y$ be open with $f(x)\in H$. If $x\in \InvBild{f}{H}$, there exists a neighborhood $V\in\mathfrak{v}$ such that $V[f(x)]\subseteq H$. Since $U := \InvBild{(\zZ{f})}{V}$ is a neighborhood in $X$, and $U[x]\subseteq \InvBild{f}{H}$, it follows that $\InvBild{f}{H}$ is open in $X$. \end{proof} The converse is not true, however, as Example~\ref{cont-not-unif-cont} shows. Before proceeding, we briefly discuss two uniformities on the same topological group which display quite different behavior, so that the identity is not uniformly continuous. \BeginExample{ex-affine-maps} Let $X := \{f_{a, b}\mid a, b\in \mathbb{R}, a\not=0\}$ be the set of all affine maps $f_{a, b}: \mathbb{R}\to \mathbb{R}$ with the separated uniformities $\mathfrak{u}_{R}$ and $\mathfrak{u}_{L}$, as discussed in Example~\ref{example-uniformities}, part~\ref{example-uniformities-8}. Let $a_{n} := d_{n} := 1/n$, $b_{n} := -1/n$ and $c_{n} := n$. Put $g_{n} := f_{a_{n}, b_{n}}$ and $h_{n} := f_{c_{n}, d_{n}}$, $j_{n} := h_{n}^{-1}$. Now $g_{n}\circ h_{n} = f_{1, 1/n^{2}-1/n}\to f_{1, 0}$, $h_{n}\circ g_{n} = f_{1, -1+1/n}\to f_{1, -1}$. Now assume that $\mathfrak{u}_{R} = \mathfrak{u}_{L}$. Given $U\in \ensuremath{{\mathfrak U}}(e)$, there exists $V\in\ensuremath{{\mathfrak U}}(e)$ symmetric such that $V^{R}\subseteq U^{L}$. Since $g_{n}\circ h_{n}\to f_{1, 0}$, there exists for $V$ some $n_{0}$ such that $g_{n}\circ h_{n}\in V$ for $n\geq n_{0}$, hence $\langle g_{n}, j_{n}\rangle\in V^{R}$, thus $\langle j_{n}, g_{n}\rangle\in V^{R}\subseteq U^{L}$, which means that $h_{n}\circ g_{n}\in U$ for $n\geq n_{0}$. Since $U\in\ensuremath{{\mathfrak U}}(e)$ is arbitrary, this means that $h_{n}\circ g_{n}\to e$, which is a contradiction. Thus we find that the left and the right uniformity on a topological group are different, although they are derived from the same topology. In particular, the identity $(X, \mathfrak{u}_{R})\to (X, \mathfrak{u}_{L})$ is not uniformly continuous. {\Large\ding{44}} \end{example} We will construct the initial uniformity for a family of maps now. The approach is similar to the one observed for the initial topology (see Definition~\ref{initial-and-final-tops}), but since a uniformity is in particular a filter with certain properties, we have to make sure that the construction can be carried out as intended. Let ${\mathcal F}$ be a family of functions $f: X\to Y_{f}$, where $(Y_{f}, \mathfrak{v}_{f})$ is a uniform space. We want to construct a uniformity $\mathfrak{u}$ on $X$ rendering all $f$ uniformly continuous, so $\mathfrak{u}$ should contain \begin{equation*} \mathfrak{s} := \bigcup_{f\in {\mathcal F}}\{\InvBild{(\zZ{f})}{V}\mid V\in\mathfrak{v}_{f}\}, \end{equation*} and it should be the smallest uniformity on $X$ with this property. For this to work, it is necessary for $\mathfrak{s}$ to be a subbase. We check this along the properties from Lemma~\ref{subbase-for-uniformity}: \begin{enumerate} \item Let $f\in {\mathcal F}$ and $V\in \mathfrak{v}_{f}$, then $\Delta_{Y_{f}}\subseteq V$. Since $\Delta_{X} = \InvBild{f}{\Delta_{Y_{f}}}$, we conclude $\Delta_{X} \subseteq \{\InvBild{(\zZ{f})}{V}$. Thus each element of $\mathfrak{s}$ contains the diagonal of $X$. \item Because $\bigl(\InvBild{(\zZ{f})}{V}\bigl)^{-1} = \InvBild{(\zZ{f})}{V^{-1}}$, we find that, given $U\in\mathfrak{s}$, there exists $V\in\mathfrak{s}$ with $V\subseteq U^{-1}$. \item Let $U\in\mathfrak{b}$, so that $U = \InvBild{(\zZ{f})}{V}$ for some $f\in{\mathcal F}$ and $V\in\mathfrak{v}_{f}$. We find $W\in\mathfrak{v}_{f}$ with $W\circ W\subseteq V$; put $W_{0} := \InvBild{(\zZ{f})}{W}$, then $W_{0}\circ W_{0}\subseteq \InvBild{(\zZ{f})}{W\circ W}\subseteq\InvBild{(\zZ{f})}{V} = U$, so that we find for $U\in\mathfrak{s}$ an element $W_{0}\in\mathfrak{s}$ with $W_{0}\circ W_{0}\subseteq U$. \end{enumerate} Thus $\mathfrak{s}$ is the subbase for a uniformity, and we have established \BeginProposition{initial-unif} Let ${\mathcal F}$ be a family of maps $X\to Y_{f}$ with $(Y_{f}, \mathfrak{v}_{f})$ a uniform space, then there exists a smallest uniformity $\mathfrak{u}_{{\mathcal F}}$ on $X$ rendering all $f\in {\mathcal F}$ uniformly continuous. $\mathfrak{u}_{{\mathcal F}}$ is called the \emph{initial \index{uniformity!initial}uniformity} on $X$ with respect to ${\mathcal F}$. \end{proposition} \begin{proof} We know that $\mathfrak{s} := \bigcup_{f\in {\mathcal F}}\{\InvBild{(\zZ{f})}{V}\mid V\in\mathfrak{v}_{f}\}$ is a subbase for a uniformity $\mathfrak{u}$, which is evidently the smallest uniformity so that each $f\in {\mathcal F}$ is uniformly continuous. So $\mathfrak{u}_{f} := \mathfrak{u}$ is the uniformity we are looking for. \end{proof} Having this tool at our disposal, we can now ---~in the same way as we did with topologies~--- define \begin{description} \item[Product] The product \index{uniformity!product}uniformity\MMP{Product} for the uniform spaces $(X_{i}, \mathfrak{u}_{i})_{i\in I}$ is the initial uniformity on $X := \prod_{i\in I}X_{i}$ with respect to the projections $\pi_{i}: X\to X_{i}$. \item[Subspace] The subspace uniformity\index{uniformity!subspace}\MMP{Subspace} $\mathfrak{u}_{A}$ is the initial uniformity on $A\subseteq X$ with respect to the embedding $i_{A}: x \mapsto x$. \end{description} We can construct dually a final uniformity on $Y$ with respect to a family ${\mathcal F}$ of maps $f: X_{f}\to Y$ with uniform spaces $(X_{f}, \mathfrak{u}_{f})$, for example when investigating quotients. The reader is referred to~\cite[II.2]{Bourbaki} or to~\cite[8.2]{Engelking}. This is a little finger exercise for the use of a product uniformity. It takes a pseudometric and shows what you would expect: the pseudometric is uniformly continuous iff it generates neighborhoods. The converse holds as well. We do not assume here that $d$ generates $\mathfrak{u}$, rather, it is just an arbitrary pseudometric, of which there may be many. \BeginProposition{d-unif-cont-iff} Let $(X, \mathfrak{u})$ be a uniform space, $d: X\times X\to\mathbb{R}_{+}$ a pseudometric. Then $d$ is uniformly continuous with respect to the product uniformity on $X\times X$ iff $V_{d, r}\in\mathfrak{u}$ for all $r>0$. \end{proposition} \begin{proof} 1. Assume first that $d$ is uniformly continuous, thus we find for each $r>0$ some neighborhood $W$ on $X\times X$ such that $\bigl\langle\langle x, u\rangle, \langle y, v\rangle\bigr\rangle\in W$ implies $|d(x, y) - d(u, v)|<r$. We find a symmetric neighborhood $U$ on $X$ such that $U_{1}\cap U_{2} \subseteq W$, where $U_{i} := \InvBild{(\zZ{\pi_{i}})}{U}$ for $i = 1, 2$, and \begin{align*} U_{1} & = \{\bigl\langle\langle x, u\rangle, \langle y, v\rangle\bigr\rangle\mid \langle x, y\rangle\in U\},\\ U_{2} & = \{\bigl\langle\langle x, u\rangle, \langle y, v\rangle\bigr\rangle\mid \langle u, v\rangle\in U\}. \end{align*} Thus if $\langle x, y\rangle\in U$, we have $\bigl\langle\langle x, y\rangle, \langle y, y\rangle\bigr\rangle \in W$, hence $d(x, y)< r$, so that $U\subseteq V_{d, r}$, thus $V_{d, r}\in\mathfrak{u}$. 2. Assume that $V_{d, r}\in \mathfrak{u}$ for all $r>0$, and we want to show that $d$ is uniformly continuous in the product. If $\langle x, u\rangle, \langle y, v\rangle\in V_{d, r}$, then \begin{align*} d(x, y) & \leq d(x, u) + d(u, v) + d(v, y)\\ d(u, v) & \leq d(x, u) + d(x, y) + d(y, v), \end{align*} hence $|d(x, y) - d(u, v)| < 2\cdot r$. Thus $\InvBild{(\zZ{\pi_{1}})}{V_{d, r}}\cap\InvBild{(\zZ{\pi_{2}})}{V_{d, r}}$ is a neighborhood on $X\times X$ such that $\bigl\langle\langle x, u\rangle, \langle y, v\rangle\bigr\rangle\in W$ implies $|d(x, y) - d(u, v)|<2\cdot r$. \end{proof} Combining Proposition~\ref{construct-pseudometric} with the observation from Proposition~\ref{d-unif-cont-iff}, we have established this characterization of a uniformity through pseudometrics. \BeginProposition{char-thru-pseudometrics} The uniformity $\mathfrak{u}$ is the smallest uniformity which is generated by all pseudometrics which are uniformly continuous on $X\times X$, i.e., $\mathfrak{u}$ is the smallest uniformity containing $V_{d, r}$ for all such $d$ and all $r>0$. \end{proposition} We fix for the rest of this section the uniform spaces $(X, \mathfrak{u})$ and $(Y, \mathfrak{v})$. Note that for checking uniform continuity it is enough to look at a subbase. The proof is straightforward and hence omitted. \BeginLemma{unif-subbase-is-enough} Let $f: X\to Y$ be a map. Then $f$ is uniformly continuous iff $\InvBild{(\zZ{f})}{V}\in\mathfrak{u}$ for all elements of a subbase for $\mathfrak{v}$. \QED \end{lemma} Cauchy filters are preserved through uniformly continous maps (the image of a filter is defined on page~\pageref{direct-image-filter}). \BeginProposition{preserve-cauchy-filters} Let $f: X\to Y$ be uniformly continuous and $\ensuremath{{\filterFont F}}$ a Cauchy filter on $X$. Then $f(\ensuremath{{\filterFont F}})$ is a Cauchy filter. \end{proposition} \begin{proof} Let $V\in\mathfrak{v}$ be a neighborhood in $Y$, then $U := \InvBild{(\zZ{f})}{V}$ is a neighborhood in $X$, so that there exists $F\in\ensuremath{{\filterFont F}}$ which is $U$-small, hence $F\times F\subseteq U$, hence $\Bild{(\zZ{f})}{F\times F} = \Bild{f}{F}\times\Bild{f}{F} \subseteq V$. Since $\Bild{f}{F}\in f(\ensuremath{{\filterFont F}})$ by Lemma~\ref{direct-image-filter}, the image filter contains a $V$-small member. \end{proof} A first consequence of Proposition~\ref{preserve-cauchy-filters} is shows that the subspaces induced by closed sets in a complete uniform space are complete again. \BeginProposition{closed-in-compl-unif} If $X$ is separated, then a complete subspace is closed. Let $A\subseteq X$ be closed and $X$ be complete, then the subspace $A$ is complete. \end{proposition} Note that the first part does not assume that $X$ is complete, and that the second part does not assume that $X$ is separated. \begin{proof} 1. Assume that $X$ is a Hausdorff space and $A$ a complete subspace of $X$. We show $\partial A \subseteq A$, from which it will follow that $A$ is closed. Let $b\in \partial A$, then $U\cap A\not=\emptyset$ for all open neighborhoods $U$ of $b$. The trace $\ensuremath{{\mathfrak U}}(b)\cap A$ of the neighborhood filter $\ensuremath{{\mathfrak U}}(b)$ on $A$ is a Cauchy filter. In fact, if $W\in\mathfrak{u}$ is a neighborhood for $X$, which we may choose as symmetric, then $\bigl((W[b]\cap A)\times (W[b]\cap A)\bigr) \subseteq W\cap (A\times A)$, which means that $W[b]\cap A)$ is $W\cap (A\times A)$- small. Thus $\ensuremath{{\mathfrak U}}(b)\cap A$ is a Cauchy filter on $A$, hence it converges to, say, $c\in A$. Thus $\ensuremath{{\mathfrak U}}(c)\cap A\subseteq \ensuremath{{\mathfrak U}}(b)\in A$, which means that $b = c$, since $X$, and hence $A$, is Hausdorff as a topological space. Thus $b\in A$, and $A$ is closed by Proposition~\ref{char-top-closure}. 2. Now assume that $A\subseteq X$ is closed, and that $X$ is complete. Let $\ensuremath{{\filterFont F}}$ be a Cauchy filter on $A$, then $i_{A}(\ensuremath{{\filterFont F}})$ is a Cauchy filter on $X$ by Proposition~\ref{preserve-cauchy-filters}. Thus $i_{A}(\ensuremath{{\filterFont F}})\to x$ for some $x\in X$, and since $A$ is closed, $x\in A$ follows. \end{proof} We show that a uniformly continuous map on a dense subset into a complete and separated uniform space can be extended uniquely to a uniformly continuous map on the whole space. This was established in Proposition~\ref{extend-unif-cont-maps} for pseudometric spaces; having a look at the proof displays the heavy use of pseudometric machinery such as the oscillation, and the pseudometric itself. This is not available in the present situation, so we have to restrict ourselves to the tools at our disposal, viz., neighborhoods and filters, in particular Cauchy filters for a complete space. We follow Kelley's elegant proof~\cite[p. 195]{Kelley}. \BeginTheorem{ext-dense-unif-compl} Let $A\subseteq X$ be a dense subsets of the uniform space $(X, \mathfrak{u})$, and $(Y; \mathfrak{v})$ be a complete and separated uniform space. Then a uniformly continuous map $f: A\to Y$ can be extended uniquely to a uniformly continous $F: X\to Y$. \end{theorem} \begin{proof} 0. The proof starts from the graph $\{\langle a, f(a)\rangle\mid a\in A\}$ of $f$ and investigates the properties of its closure in $X\times Y$\MMP{Plan of the proof}. It is shown that the closure is a relation which has $\Closure{A} = X$ as its domain, and which is the graph of a map, since the topology of $Y$ is Hausdorff. This map is an extension $F$ to $f$, and it is shown that $F$ is uniformly continuous. We also use the observation that the image of a converging filter under a uniform continuous map is a Cauchy filter, so that completeness of $Y$ kicks in when needed. We do not have to separately establish uniqueness, because this follows directly from Lemma~\ref{equal-on-dense}. 1. Let $G_{f} := \{\langle a, f(a)\rangle\mid a\in A\}$ be the graph of $f$. We claim that the closure of the domain of $f$ is the domain of the closure of $G_{f}$. Let $x$ be in the domain of the closure of $G_{f}$, then there exists $y\in Y$ with $\langle x, y\rangle\in \Closure{G}_{f}$, thus we find a filter $\ensuremath{{\filterFont F}}$ on $G_{f}$ with $\ensuremath{{\filterFont F}}\to \langle x, y\rangle$. Thus $\pi_{1}(\ensuremath{{\filterFont F}})\to x$, so that $x$ is in the closure of the domain of $f$. Conversely, if $x$ is in the closure of the domain of $G_{f}$, we find a filter $\ensuremath{{\filterFont F}}$ on the domain of $G_{f}$ with $\ensuremath{{\filterFont F}}\to x$. Since $f$ is uniformly continuous, we know that $f(\ensuremath{{\filterFont F}})$ generates a Cauchy filter $\mathfrak{G}$ on $Y$, which converges to some $y$. The product filter $\ensuremath{{\filterFont F}}\times\mathfrak{G}$ converges to $\langle x, y\rangle$, thus $x$ is in the domain of the closure of $G_{f}$. 2. Now let $W\in\mathfrak{v}$; we show that there exists a neighborhood $U\in\mathfrak{u}$ with this property: if $\langle x, y\rangle, \langle u, v\rangle\in\Closure{G}_{f}$, then $x\in U[u]$ implies $y\in W[v]$. After having established this, we know \begin{itemize} \item $\Closure{G}_{f}$ is the graph of a function $F$. This is so because $Y$ is separated, hence its topology is Hausdorff. For, assume there exists $x\in X$ some $y_{1}, y_{2}\in Y$ with $y_{1}\not= y_{2}$ and $\langle x, y_{1}\rangle, \langle x, y_{2}\rangle\in\Closure{G}_{f}$. Choose $W\in\mathfrak{v}$ with $y_{2}\not\in W[y_{1}]$, and consider $U$ as above. Then $x\in U[x]$, hence $y_{2}\in W[y_{1}]$, contradicting the choice of $W$. \item $F$ is uniformly continuous. The property above translates to finding for $W\in\mathfrak{v}$ a neighborhood $U\in\mathfrak{u}$ with $U\subseteq \InvBild{(\zZ{F})}{W}$. \end{itemize} So we are done after having established the statement above. 3. Assume that $W\in\mathfrak{v}$ is given, and choose $V\in\mathfrak{v}$ closed and symmetric with $V\circ V\subseteq W$. This is possible by Corollary~\ref{clos-symm-are-base}. There exists $U\in\mathfrak{u}$ open and symmetric with $\Bild{f}{U[x]}\subseteq V[f(x)]$ for every $x\in A$, since $f$ is uniformly continuous. If $\langle x, y\rangle, \langle u, v\rangle\in \Closure{G}_{f}$ and $x\in U[u]$, then $U[x]\cap U[u]$ is open (since $U$ is open), and there exists $a\in A$ with $x, u\in U[a]$, since $A$ is dense. We claim $y\in \Closure{\bigl(\Bild{f}{U[a]}\bigr)}$. Let $H$ be an open neighborhood of $y$, then, since $U[a]$ is a neighborhood of $x$, $U[a]\times H$ is a neighborhood of $\langle x, y\rangle$, thus $G_{f}\cap U[a]\times H \not= \emptyset$. Hence we find $y'\in H$ with $\langle x, y'\rangle\in G_{f}$, which entails $H\cap \Bild{f}{U[a]}\not=\emptyset$. Similarly, $z\in \Closure{\bigl(\Bild{f}{U[a]}\bigr)}$; note $\Closure{\bigl(\Bild{f}{U[a]}\bigr)}\subseteq V[f(a)]$. But now $\langle y, v\rangle\in V\circ V\subseteq W$, hence $y\in W[v]$. This establishes the claim above, and finishes the proof. \end{proof} Let us just have a look at the idea lest it gets lost. If $x\in X$, we find a filter $\ensuremath{{\filterFont F}}$ on $A$ with $i_{A}(\ensuremath{{\filterFont F}})\to x$. Then $f(i_{a}(\ensuremath{{\filterFont F}}))$ is a Cauchy filter, hence it converges to some $y\in Y$, which we define as $F(x)$. Then it has to be shown that $F$ is well defined, it clearly extends $f$. It finally has to be shown that $F$ is uniformly continuous. So there is a lot technical ground which is covered. We note on closing that also the completion of pseudometric spaces can be translated into the realm of uniform spaces. Here, naturally, the Cauchy filters defined on the space play an important r\^ole, and things get very technical. The interplay between compactness and uniformities yields interesting results as well, here the reader is referred to~\cite[Chapter~II]{Bourbaki} or to~\cite{James-Unif}. \subsection{Bibliographic Notes} \label{sec:top-bib-notes} The towering references in this area are~\cite{Bourbaki, Engelking, Kuratowski, Kelley}; the author had the pleasure of taking a course on topology from one of the authors of~\cite{Querenburg}, so this text has been an important source, too. The delightful Lecture Note~\cite{Herrlich-Choice} by Herrlich has a chapter ``Disasters without Choice'' which discusses among others the relationship of the Axiom of Choice and various topological constructions. The discussion of the game related to Baire's Theorem in Section~\ref{sec:baire+game} in taken from Oxtoby's textbook~\cite[Sec. 6]{Oxtoby} on the duality between measure and category (\emph{category} in the topological sense introduced on page~\pageref{baire-complete} above); he attributes the game to Banach and Mazur. Other instances of proofs by games for metric spaces are given, e.g., in~\cite[8.H, 21]{Kechris}. The uniformity discused in Example~\ref{example-uniformities-5-filter} has been considered in~\cite{EED-Polymorphic} in greater detail. The section on topological systems follows fairly closely the textbook~\cite{Vickers} by Vickers, but see also~\cite{Cont-Latt, Abramsky+Jung}, and for the discussion of dualities and the connection to intuitionistic logics,~\cite{Johnstone, Goldblatt-Topoi}. The discussion of Gödel's Completeness Theorem in Section~\ref{sec:goedel} is based on the original paper by Rasiowa and Sikorski~\cite{Rasiowa-Sikorski-I} together with occasional glimpses at~\cite[Chapter 2.1]{Chang+Keisler},\cite[Chapter 4]{Srivastava-Logic} and~\cite[Chapter 1.2]{Koppelberg}. Uniform spaces are discussed in\cite{Bourbaki, Engelking, Kelley, Querenburg}, special treatises include\cite{James-Unif} and \cite{Isbell}, the latter one emphasizing a categorical point of view. \subsection{Exercises} \label{sec:tops-exercises} \Exercise{ex-char-final-top}{ Formulate and prove an analogue of Proposition~\ref{initial-final} for the final topology for a family of maps. } \Exercise{ex-product-euclidean}{ The Euclidean topology on $\mathbb{R}^{n}$ is the same as the product topology on $\prod_{i=1}^{n}\mathbb{R}$. } \Exercise{ex-prod-discrete}{ Recall that the topological space $(X, \tau)$ is called \emph{discrete} iff $\tau=\PowerSet{X}$. Show that the product $\prod_{i\in I} (\{0, 1\}, \PowerSet{\{0, 1\}})$ is discrete iff the index set $I$ is finite. } \Exercise{ex-compare-tops}{ Let $ L := \{(x_{n})_{n\in \mathbb{N}}\} \subseteq \mathbb{R}^{\mathbb{N}}\mid \sum_{n\in \mathbb{N}}|x_{n}| < \infty\} $ be all sequences of real numbers which are absolutely summable. $\tau_{1}$ is defined as the trace of the product topology on $\prod_{n\in\mathbb{N}}\mathbb{R}$ on $L$, $\tau_{2}$ is defined in the following way: A set $G$ is $\tau_{2}$-open iff given $x\in G$, there exists $r>0$ such that $ \{y\in L\mid \sum_{n\in \mathbb{N}}|x_{n}-y_{n}| < r\} \subseteq G. $ Investigate whether the identity maps $(L, \tau_{1})\to (L, \tau_{2})$ and $(L, \tau_{2})\to (L, \tau_{1})$ are continuous. } \Exercise{ex-equiv-classes}{ Define for $x, y\in \mathbb{R}$ the equivalence relation $ \isEquiv{x}{y}{\sim} \text{ iff } x - y \in\mathbb{Z}. $ Show that $\Faktor{\mathbb{R}}{\sim}$ is homeomorphic to the unit circle. \textbf{Hint:} Example~\ref{example-quotient-top}. } \Exercise{ex-scott-equiv}{ Let $A$ be a countable set. Show that a map $q: (\partMap{A}{B})\to (\partMap{C}{D})$ is continuous in the topology taken from Example~\ref{partial-maps-top} iff it is continuous, when $\partMap{A}{B}$ as well as $\partMap{C}{D}$ are equipped with the Scott topology. } \Exercise{ex-order-top}{ Let $D_{24}$ be the set of all divisors of $24$, including $1$, and define an order $\sqsubseteq$ on $D_{24}$ through $x\sqsubseteq y$ iff $x \text{ divides } y$. The topology on $D_{24}$ is given through the closure operator as in Example~\ref{finite-ordered-for-closure}. Write a \texttt{Haskell} program listing all closed subsets of $D_{24}$, and determining all filters $\ensuremath{{\filterFont F}}$ with $\ensuremath{{\filterFont F}}\to 1$. \textbf{Hint:} It is helpful to define a type \texttt{Set} with appropriate operations first, see~\cite[4.2.2]{EED-Haskell-Buch}. } \Exercise{ex-closure-filter}{ Let $X$ be a topological space, $A\subseteq X$, and $i_{A}: A\to X$ the injection. Show that $x\in\Closure{A}$ iff there exists a filter $\ensuremath{{\filterFont F}}$ on $A$ such that $i_{A}(\ensuremath{{\filterFont F}})\to x$. } \Exercise{ex-reals-are-normal}{ Show \emph{by expanding Example~\ref{reals-are-t3half}} that $\mathbb{R}$ with its usual topology is a $T_{4}$-space. } \Exercise{ex-compact-homeom}{ Given a continuous bijection $f: X\to Y$ with the Hausdorff spaces $X$ and $Y$, show that $f$ is a homeomorphism, if $X$ is compact. } \Exercise{ex-inherit-separation}{ Let $A$ be a subspace of a topological space $X$. \begin{enumerate} \item If $X$ is a $T_{1}, T_{2}, T_{3}, T_{3\ensuremath{\frac{1}{2}}}$ space, so is $A$. \item If $A$ is closed, and $X$ is a $T_{4}$-space, then so is $A$. \end{enumerate} } \Exercise{ex-semicontinuous}{ A function $f: X\to \mathbb{R}$ is called \emph{\index{semicontinuous!lower}lower semicontinuous} iff for each $c\in \mathbb{R}$ the set $\{x\in X \mid f(x) < c\}$ is open. If $\{x\in X\mid f(x) > c\}$ is open, then $f$ is called \emph{\index{semicontinuous!upper}upper semicontinuous}. If $X$ is compact, then a lower semicontinuous map assumes on $X$ its maximum, and an upper semicontinuous map assumes its minimum. } \Exercise{ex-prod-locally-compact}{ Let $X := \prod_{i\in I}X_{i}$ be the product of the Hausdorff space $(X_{i})_{i\in I}$. Show that $X$ is locally compact in the product topology iff $X_{i}$ is locally compact for all $i\in I$, and all but a finite number of $X_{i}$ are compact. } \Exercise{ex-river-metric}{ Given $x, y\in \mathbb{R}^{2}$, define \begin{equation*} D(x, y) := \begin{cases} |x_{2}-y_{2}|, & \text{ if }x_{1} = y_{1}\\ |x_{2}| + |y_{2}| + |x_{1}-y_{1}|,& \text{ otherwise}. \end{cases} \end{equation*} Show that this defines a metric on the plane $\mathbb{R}^{2}$. Draw the open ball $\{y\mid D(y, 0) < 1\}$ of radius $1$ with the origin as center.} \Exercise{ex-pseudo-t1-is-metric}{ Let $(X, d)$ be a pseudometric space such that the induced topology is $T_{1}$. Then $d$ is a metric. } \Exercise{ex-cont-seq-testing}{ Let $X$ and $Y$ be two first countable topological spaces. Show that a map $f: X\to Y$ is continuous iff $x_{n}\to x$ implies always $f(x_{n})\to f(x)$ for each sequence $(x_{n})_{n\in\mathbb{N}}$ in $X$. } \Exercise{ex-contfncts-int}{ Consider the set $\Cont[[0, 1]]$ of all continuous functions on the unit interval, and define \begin{equation*} e(f, g) := \int_{0}^{1}|f(x) - g(x)|\ dx. \end{equation*} Show that \begin{enumerate} \item $e$ is a metric on $C([0, 1])$. \item $\Cont[[0, 1]]$ is not complete with this metric. \item The metrics $d$ on $\Cont[[0, 1]]$ from Example~\ref{for-metric-spaces} and $e$ are not equivalent. \end{enumerate} } \Exercise{ex-ultrametric-space}{ Let $(X, d)$ be an ultrametric space, hence $d(x, z) \leq \max\ \{d(x, y), d(y, z)\}$ (see Example~\ref{for-metric-spaces}). Show that \begin{itemize} \item If $d(x, y) \not= d(y, z)$, then $d(x, z) = \max\ \{d(x, y), d(y, z)\}$. \item Any open ball $B(x, r)$ is both open and closed, and $B(x, r) = B(y, r)$, whenever $y\in B(x, r)$. \item Any closed ball $S(x, r)$ is both open and closed, and $S(x, r) = S(y, r)$, whenever $y\in S(x, r)$. \item Assume that $B(x, r)\cap B(x', r')\not=\emptyset$, then $B(x, r)\subseteq B(x', r')$ or $B(x', r')\subseteq B(x, r)$. \end{itemize} } \Exercise{ex-nowhere-dense}{ Show that the set of all nowhere dense sets in a topological space $X$ forms an ideal. Define a set $A\subseteq X$ as \emph{open modulo nowhere dense sets} iff there exists an open set $G$ such that the symmetric difference $A\Delta G$ is nowhere dense (hence both $A\setminus G$ and $G\setminus A$ are nowhere dense). Show that the open sets modulo nowhere dense sets form an $\sigma$-algebra.} \Exercise{ex-oxtoby-game}{ Consider the game formulated in Section~\ref{sec:baire+game}; we use the notation from there. Show that there exists a strategy that Angel can win iff $L_{1}\cap B$ is of first category for some interval $L_{1}\subseteq L_{0}$. } \Exercise{ex-open-not-neighbor}{ Let $\mathfrak{u}$ be the additive uniformity on $\mathbb{R}$ from Example~\ref{example-uniformities}. Show that $\{\langle x, y\rangle\mid |x-y|<1/(1+|y|)\}$ is not a member of $\mathfrak{u}$. } \Exercise{ex-relat-symm-identity}{ Show that $V\circ U\circ V = \bigcup_{\langle x, y\rangle \in U}V[x]\times V[y]$ for symmetric $V\subseteq X\times X$ and arbitrary $U\subseteq X\times X$. } \Exercise{ex-unif-base}{ Given a base $\mathfrak{b}$ for a uniformity $\mathfrak{u}$, show that \begin{align*} \mathfrak{b'} & := \{B\cap B^{-1}\mid B\in\mathfrak{b}\},\\ \mathfrak{b''} & := \{B^{n}\mid B\in \mathfrak{b}\} \end{align*} are also bases for $\mathfrak{u}$, when $n\in\mathbb{N}$ (recall $B^{1} := B$ and $B^{n+1} := B\circ B^{n}$). } \Exercise{ex-complete-lattice-unif}{ Show that the uniformities on a set $X$ form a complete lattice with respect to inclusion. Characterize the initial and the final uniformity on $X$ for a family of functions in terms of this lattice. } \Exercise{ex-inters-small}{ If two subsets $A$ and $B$ in a uniform space $(X, \mathfrak{u})$ are $V$-small then $A\cup B$ is $V\circ V$-small, if $A\cap B\not=\emptyset$. } \Exercise{ex-ultra-discr}{ Show that a discrete uniform space is complete. \textbf{Hint}: A Cauchy filter is an ultrafilter based on a point. } \Exercise{ex-product-top-vs-unif}{ Let ${\mathcal F}$ be a family of maps $X\to Y_{f}$ with uniform spaces $(Y_{f}, \mathfrak{v}_{f})$. Show that the initial topology on $X$ with respect to ${\mathcal F}$ is the topology induced by the product uniformity. } \Exercise{ex-proj-unif-cont}{ Equip the product $X := \prod_{i\in I}X_{i}$ with the product uniformity for the uniform spaces $\bigl((X_{i}, \mathfrak{u}_{i})\bigr)_{i\in I}$, and let $(Y, \mathfrak{v})$ be a uniform space. A map $f: Y\to X$ is uniformly continuous iff $\pi_{i}\circ f: Y\to X_{i}$ is uniformly continuous for each $i\in I$.} \Exercise{ex-spatial-equiv}{ Let $X$ be a topological system. Show that the following statements are equivalent \begin{enumerate} \item $X$ is homeomorphic to $\FunctorSenza{S}P(Y)$ for some topological system $Y$. \item For all $a, b\in\varnothingMX$ holds $a = b$, provided we have $x\models a \Leftrightarrow x\models b$ for all $x\in \pTX$. \item For all $a, b\in\varnothingMX$ holds $a \leq b$, provided we have $x\models a \Rightarrow x\models b$ for all $x\in \pTX$. \end{enumerate} } \Exercise{ex-hausdorff-sober}{ Show that a Hausdorff space is sober.} \Exercise{ex-stone-weierstrass}{ Let $X$ and $Y$ be compact topological spaces with their Banach spaces $\Cont$ resp. $\Cont[Y]$ of real continuous maps. Let $f: X\to Y$ be a continuous map, then \begin{equation*} f^{*}: \begin{cases} \Cont[Y]&\to \Cont\\ g&\mapsto g\circ f \end{cases} \end{equation*} defines a continuous map (with respect to the respective norm topologies). $f^{*}$ is onto iff $f$ is an injection. $f$ is onto iff $f^{*}$ is an isomorphism of $\Cont[Y]$ onto a ring $A\subseteq \Cont$ which contains constants. } \Exercise{ex-completeness-propositional}{ Let $\ensuremath{{\filterFont L}}$ be a language for propositional logic with constants $C$ and $V$ as the set of propositinal variables. Prove that a consistent theory $T$ has a model, hence a map $h: V\to 2\!\!2$ such that each formula in $T$ is assigned the vlaue $\top$. \textbf{Hint:} Fix an ultrafilter on the Lindenbaum algebra of $T$ and consider the corresponding morphism into $2\!\!2$.} \Exercise{ex-top-group}{ Let $G$ be a topological group, see Example~\ref{top-group}. Given $F\subseteq G$ closed, show that \begin{enumerate} \item $gF$ and $Fg$ are closed, \item $F^{-1}$ is closed, \item $MF$ and $FM$ are closed, provided $M$ is finite. \item If $A\subseteq G$, then $ \Closure{A} = \bigcap_{U\in\ensuremath{{\mathfrak U}}(e)}AU = \bigcap_{U\in\ensuremath{{\mathfrak U}}(e)}UA = \bigcap_{U\in\tau}AU = \bigcap_{U\in\tau}UA. $ \end{enumerate}} } \newcommand{\etalchar}[1]{$^{#1}$} \addcontentsline{toc}{section}{Index} \begin{theindex} \item $B(x, r)$, 38 \item $S(x, r)$, 38 \item $U[x]$, 84 \item $\Cont$, 76 \item $\epsilon $-net, 50 \item $\mathsf{diam}(A)$, 46 \item $\ensuremath{{\mathfrak U}}(x)$, 13 \item ${\mathcal F}\to x$, 15 \item 2\negthickspace2, 66 \indexspace \item accumulation point, 19 \item algebra \subitem Heyting, 64 \subsubitem morphism, 65 \subitem Lindenbaum, 61 \indexspace \item Banach-Mazur game, 56 \indexspace \item Cauchy filter, 92 \item Cauchy sequence, 43 \item Charly Brown's device, 45 \item closure operator, 11 \item compact \subitem countably compact, 34 \subitem Lindel\IeC {\"o}f space, 35 \subitem locally compact, 29 \subitem paracompact, 35 \subsubitem locally finite, 35 \subsubitem refinement, 35 \subitem sequentially compact, 35 \item compactification, 31 \subitem Alexandrov one point, 30 \subitem Stone-$\mathaccentV {check}014{\mathrm {C}}$ech, 32 \item continuous, 6 \subitem uniformly, 53, 94 \item contraction, 48 \item convergence \subitem filter, 15 \subitem net, 15 \indexspace \item dcpo, 72 \item dense set, 25 \item diameter, 46 \indexspace \item embedding, 30 \item entourage, 83 \indexspace \item filter \subitem accumulation point, 19 \subitem Cauchy, 92 \subitem neighborhood, 13 \item flyswatter, 50 \item frame, 65 \indexspace \item Google, 50 \item group \subitem topological, 15 \indexspace \item homeomorphism, 10 \indexspace \item irreducible, 71 \indexspace \item map \subitem affine, 85 \item metric \subitem discrete, 36 \subitem Hausdorff, 41 \subitem ultrametric, 37 \item model, 62, 63 \indexspace \item Nachbarschaft, 83 \item net, 15 \subitem Cauchy, 93 \subitem convergence, 15 \item norm, 76 \item nowhere dense, 55 \indexspace \item open \subitem Scott, 6, 73 \item oscillation, 46 \indexspace \item partition, 84 \item prime \subitem completely, 69 \subitem element, 69 \item pseudometric, 35 \item pseudometrics \subitem equivalent, 38 \indexspace \item semicontinuous \subitem lower, 99 \subitem upper, 99 \item sentence, 61 \item sequence \subitem Cauchy, 43 \item set \subitem saturated, 75 \subitem small, 92 \item Sorgenfrey line, 21 \item space \subitem $T_{0}, T_{1}$, 22 \subitem $T_{3}, T_{3\ensuremath{\frac{1}{2}}}, T_{4}$, 22 \subitem Banach, 77 \subitem completely regular, 25 \subitem first category, 55 \subitem Hausdorff, $T_{2}$, 21 \subitem locally compact, 29 \subitem metric, 35 \subitem normal, 25 \subitem normed, 77 \subitem pseudometric, 35 \subsubitem complete, 43 \subitem regular, 25 \subitem uniform, 83 \subsubitem complete, 92 \subsubitem separated, 89 \subsubitem topology, 87 \indexspace \item theorem \subitem Baire \subsubitem complete pseudometric, 55 \subsubitem locally compact, 33 \subitem Dini, 77 \subitem Hofmann-Mislove, 75 \subitem Stone-Weierstra\IeC {\ss }, 79 \subitem Tihonov, 19 \subitem Urysohn's Metrization, 42 \item topological system, 66 \subitem c-morphism, 67 \subitem homeomorphism, 67 \subitem localic, 69 \subitem localization, 69 \subitem opens, 66 \subsubitem extension, 66 \subitem points, 66 \subitem spatialization, 67 \item topology \subitem base, 3 \subitem compactification, 31 \subitem final, 8 \subitem first countable, 41 \subitem initial, 8 \subitem product, 9 \subitem quotient, 9 \subitem Scott, 6, 73 \subitem second countable, 41 \subitem separable, 41 \subitem sober, 72 \subitem sum, 9 \subitem topological group, 15 \subitem trace, 9 \subitem uniform, 87 \subitem uniform convergence, 77 \subitem Vietoris, 53 \subitem weak, 5 \item totally bounded, 50 \indexspace \item ultrametric, 37 \item Umgebung, 83 \item uniformity, 83 \subitem $p$-adic, 85 \subitem additive, 84 \subitem discrete, 84 \subitem finite partitions, 85 \subitem indiscrete, 84 \subitem initial, 95 \subitem multiplicative, 84 \subitem product, 96 \subitem subspace, 96 \item Urysohn's Lemma, 25 \end{theindex} \end{document}
\begin{document} \title{Limiting Distribution of Frobenius Numbers for $n=3$} etcounter{Zam}0 ection{Introduction} The purpose of this paper is to give a complete derivation of the limiting distribution of large Frobenius numbers outlined in~\cite{Bourgain2007} and fill some gaps formulated there as hypotheses. We start with the basic definitions and descriptions of some results. Consider $n$ mutually coprime positive integers $a_1$, $a_2$, \dots, $a_n$. This means that there is no $r>1$ such that each $a_j$, $1\le j\le n$, is divisible by $r$. Take $N$ which later will tend to infinity and will be our main large parameter. Introduce the ensemble $Q_N$ of mutually coprime $a=(a_1,\dots,a_n)$, $1\le a_j\le N$, $1\le j\le n$ and $P_N$ be the uniform probability distribution on $Q_N$. For each $a\in Q_N$ denote by $F(a)$ the largest integer number that is not representable in the form $x=x_1a_1+\cdots+x_na_n$, where $x_j$ are non-negative integers. $F(a)$ can be considered as a random variable defined on $Q_N$. The basic problem which will be discussed in this paper is the existence and the form of the limiting distribution for the normalized Frobenius numbers $f(a)=\dfrac 1{N^{1+\frac{1}{n-1}}}F(a)$. The reason for this normalization will be explained below. The case of $n=2$ is simple in view of the classical result of Sylvester (see~\cite{Sylvester1884}) according to which $F(a_1,a_2)=a_1a_2-a_1-a_2$. It shows that in a typical situation $F$ grow as $N^2$. The first non-trivial case is $n=3$ where $F(a)$ grow as $N^{3/2}$ It is known (see~\cite{Ustinov2009}) that the numbers $F(a_1,a_2,a_3)$ have weak asymptotics: $$ \frac 1{x_1x_2a_3^{7/2}} um_{a_1\le x_1a_3} um_{a_2\le x_2a_3} \left(F(a_1,a_2,a_3)- \dfrac 8\pi qrt{a_1a_2a_3}\right)= O_{x_1,x_2,\varepsilon}\left(a_3^{-1/6+\varepsilon}\right) $$ For arbitrary $n$ the only result known to us is the following theorem proven in~\cite{Bourgain2007}. \begin{Th} \label{Th_1} Under some additional technical condition (see~\cite{Bourgain2007}) the family of probability distributions of $f_N(a)=\frac 1{N^{1+\frac 1{n-1}}}F(a)$ is weakly compact. This means that for every $\varepsilon>0$ one can find $\mathcal D=\mathcal D(\varepsilon)$ such that $$ P_N\left\{\dfrac1{N^{1+\frac 1{n-1}}}F(a)\le \mathcal D\right\}\ge 1-\varepsilon. $$ \end{Th} In this theorem $\varepsilon,\mathcal D$ do not depend on $N$. It also implies the existence of the limiting points (in the sense of weak convergence) for the sequence of probability distributions of $f_N(a)$. As was already mentioned, in this paper we shall study the limiting distribution of $f_N(a)=\frac 1{N^{3/2}}F(a)$, $a=(a_1,a_2,a_3)$ as $N\to\infty$. This distribution is not universal and will be described below. Take any $\rho$, $0<\rho <1$, and consider its expansion into continued fraction \begin{equation} \label{eq_1} \rho=[h_1,h_2,\dots,h_s,\dots] \end{equation} where $h_j\ge 1$ are integers. If $\rho$ is rational then the continued fraction~\eqref{eq_1} is finite. The finite continued fractions $\rho=[h_1,\dots,h_s]=\dfrac{p_s}{q_s}$ are called the $s$-approximants of $\rho$. The numbers $q_s$ satisfy the recurrent relations \begin{equation} \label{eq_2} q_s=h_sq_{s-1}+q_{s-2},\;\;s\ge 2 \end{equation} Introduce the Gauss measure on $[0,1]$ given by the density $\pi(x)=\frac1{\ln 2(1+x)}$. Then the elements of the continued fraction~\eqref{eq_1} become random variables. It is well-known that their probability distributions are stationary in the sense that the distributions of any $h_{m-k}$, $h_{m-k+1},\dots,h_m.\dots,h_{m+k}$ do not depend on $m$. We shall need the values of $s =s_1$, such that $q_{s_1}$ is the first $q_s$ greater than $ qrt N$. It was proven in~\cite{Sinai2008} that $q_{s_1}/ qrt N$ have a limiting distribution. More precisely, the following theorem is true. \begin{Th} \label{Th_2} Let $k$ be fixed and $s(R)$ be the smallest $s$ for which $q_s\ge R$. As $R\to\infty$ there exists the joint limiting probability distribution of $\frac{q_{s(R)}}R$, $h_{s(R)-k}$, \dots, $h_{s(R)+k}$. \end{Th} In the paper~\cite{Ustinov2008b} the analytic form of this distribution was given. Consider the subensemble $Q_N^{(0)} ubset Q_N$ for which $a_1,a_3$ are coprime. Then there exists $a_1^{-1} (\bmod\,a_3)$, $1\le a_1^{-1}<a_3$. Denote $\rho=\frac{a_1^{-1}a_2}{a_3}$. The expansion of $\rho$ into continued fraction will be needed below. Clearly, $\rho$ is a rational number. However, the following theorem is valid. \begin{Th} \label{Th_3} As before, consider $s_1$ such that $q_{s_1-1}< qrt N<q_{s_1}$. Then in the ensemble $Q_N^{(0)}$ equipped with the uniform measure, for any $k>0$ and $N\to\infty$ there exists the joint limiting probability distributions of $\frac{q_{s_1}}{ qrt N}$, $h_{s_1-k}$, \dots, $h_{s_1+k}$ which coincides with the distribution in theorem 2. \end{Th} A stronger version of theorem~\ref{Th_3} is also valid. \begin{Th} \label{Th_4} Let the first elements of the continued fraction for $\rho$ be fixed: $h_1,h_2,\dots,h_l$. Then under this condition and as $N\to\infty$ the conditional distributions of $\frac{q_{s_1}}{ qrt N}$, $h_{s_1-k}$, \dots, $h_{s_1+k}$ converge to the same limit as in theorems~\ref{Th_2} and~\ref{Th_3}. \end{Th} All these theorems will be proven in section~\ref{Sec_3}. Now we can formulate the main result of this paper. \begin{Th} \label{Th_5}There exists the limiting distribution of $f_N(a)=f_N((a_1,a_2,a_3))$, $(a_1 , a_2 , a_3) \in Q_N$ as $N \rightarrow \infty$.\end{Th} The proof of the main theorem is given in section~\ref{Sec_2}. First we consider the ensemble $Q_N^{(0)}$ and then explain how to extend the proof to $Q_N$. The second author thanks NSF for the financial support, grant DMS No 0600996. The research of the third author was supported by the Russian Foundation for Basic Research (grant no. 07-01-00306 and the Russian Science Support Foundation. ection{The limiting Distribution of $f_N(a)$.}\label{Sec_2} Return back to the case of arbitrary $n$. Introduce arithmetic progressions $$ \Pi_r=\{r+ma_n,m\ge 0\},\quad 0\le r<a_n. $$ For non-negative integers $x_1$, \dots, $x_{n-1}$ such that $x_1a_1+x_2a_2+\cdots+x_{n-1}a_{n-1}\in\Pi_r$ we write $$x_1a_1+\cdots+x_{n-1}a_{n-1}=r+m(x_1,\dots,x_{n-1})a_n. $$ Define $\overline m(r)=\underset{x_1\dots,x_{n-1}}\min m(x_1,\dots,x_{n-1})$ and put $$ F_1(a)=\max_{0\le r<a_n} \quad \min_{{x_1,\dots,x_{n-1}\atop x_1a_1+\cdots+x_{n-1}a_{n-1}\in\prod_r}} (r+m(x_1,\dots,x_{n-1})a_n) = $$ $$ = \, \max\limits_{0 \le r < a_n} \quad \min\limits_{x_1 a_1 + \ldots + x_n + a_{n-1} \:\equiv \: r (\hspace{-.8em}\mod a_n)} (x_1 a_1 + \ldots + a_{n-1} a_{n-1} ) \, . $$ It was proven in~\cite{Selmer1978} that $F(a)=F_1(a)-a_n$. A slightly weaker statement can be found in~\cite{Bourgain2007}. Since in a typical situation $a_j$ grow as $N$ while $F_1(a)$ grow as $N^{1+\frac 1{n-1}}$ (see also~\cite{Bourgain2007}) the limiting behavior of $\frac {F(a)}{N^{1+\frac 1{n-1}}}$ and $\frac {F_1(a)}{N^{1+\frac 1{n-1}}}$ is the same, but the analysis of $\frac{F_1(a)}{N^{1+\frac 1{n-1}}}$ is slightly simpler. Let us write for $n=3$ $$ x_1a_1+x_2a_2=r+m(x_1,x_2)a_3 $$ or \begin{equation} \label{eq_3} x_1a_1+x_2a_2\equiv r(\bmod\,a_3) \end{equation} We assume that $a_1,a_3$ and $a_2, a_3$ are coprime. Therefore there exists $a_1^{-1}$, $1\le a_1^{-1}<a_3$, such that $a_1\cdot a_1^{-1}\equiv 1(\bmod\,a_3)$. Choose $a_1^{-1}$ so that $1\le a_1^{-1}<a_3$ and rewrite~\eqref{eq_3} as follows \begin{equation} \label{eq_4} x_1+a_{12}x_2\equiv r_1(\bmod\,a_3) \end{equation} where $a_{12}\equiv a_1^{-1}a_2(\bmod\,a_3)$, $0<a_{12}<a_3$ and $r_1\equiv ra_1^{-1}(\bmod\,a_3)$, $0\le r_1<a_3$. From~\eqref{eq_4} \begin{equation} \label{eq_5} a_{12}x_2\equiv (r_1-x_1)(\bmod\,a_3) \end{equation} The expression~\eqref{eq_5} has a nice geometric interpretation. Consider $S=[0,1,\dots,a_3-1]$ as a ``discrete circle''. Let $\mathcal R$ be the rotation of this circle by $a_{12},$ i.e.\break $\mathcal Rx=x+a_{12}(\bmod a_3)$. Then $\mathcal R^px=x+pa_{12}(\bmod a_3)$ and~\eqref{eq_5} means that $r_1-x_1$ belongs to the orbit of $0$ under the action of $\mathcal R$. From the definition of $F_1(a)$ \begin{gather}\nonumber F_1(a)=\max_{0\le r<a_3}\min_{{x_1a_1+x_2a_2\equiv r(\bmod\,a_3)\atop0\le x_1,x_2<a_3}}(x_1a_1+x_2a_2)= \\\label{eq_6} =N^{3/2}\max_{0\le r_1<a_3}\min_{x_1+x_2a_{12}\equiv r_1\pmod{a_3}} \left(\dfrac{x_1}{ qrt N}\dfrac{a_1}N+\dfrac{x_2}{ qrt N}\dfrac{a_2}N\right) \end{gather} Choose $h^{(j)}=(h_1^{(j)},\dots,h_m^{(j)})$, $j=1,2,3$ and denote by $Q_{N,h^{(1)},h^{(2)},h^{(3)}}^{(0)}$ the ensemble of $a=(a_1,a_2,a_3)\in Q_N^{(0)}$ such that the first $m$ elements of the continued fractions of $\frac{a_j}N$ are given by $h^{(j)}$, $j=1,2,3$. This step means the localization of the ensemble $Q_N^{(0)}$. It is easy to see that for every $\varepsilon >0$ one can find rational $\alpha_1$, $\alpha_2$, $\alpha_3$ and $m$ such that $\left|\frac{a_j}N-\alpha_j\right|\le \varepsilon,\;1\le j \le 3$. Then in~\eqref{eq_6} one can replace $\frac{a_j}N$ by $\alpha_j$. Since $\frac{x_j}{ qrt N}$ will take the values $O(1)$ the whole expression in~\eqref{eq_6} takes values $O(1)$ and instead of~\eqref{eq_6} we may consider \begin{equation} \label{eq_7} \max_{r_1}\min_{x_1+a_{12}x_2\equiv r_1\pmod{a_3}} \left(\dfrac{x_1}{ qrt N}\alpha_1+\dfrac{x_2}{ qrt N}\alpha_2\right) \end{equation} with the error $O(\varepsilon)$. We assume that in the ensemble $Q_{N,h^{(1)},h^{(2)},h^{(3)}}^{(0)}$ we also have the uniform distribution. We shall need some facts from the theory of rotations of the circle. According to our assumption $a_{12}$ and $a_3$ are coprime. Therefore $\mathcal R$ is ergodic in the sense that $\mathcal R^{a_3}=Id$ and $a_3$ is the smallest number with this property. Put $\rho=\frac {a_{12}}{a_3}$ and write down the expansion of $\rho$ into continued fraction: $\rho=[h_1,h_2,\dots , h_{s_0}]$. Let also be $\rho_s=[h_1,h_2,\dots,h_s]=\frac{p_s}{q_s}$ and $s_1$ is such that $q_{s_1-1}< qrt N<q_{s_1}$. It will be more convenient to consider the usual unit circle instead of $S$ and use the same letter $\mathcal R$ for the rotation of the unit circle by $\rho$. Introduce the interval $\varDelta_0^{(p)}$ bounded by $0$ and $\{q_p\rho\}$ and $\varDelta_j^{(p)}=\mathcal R^j\varDelta_0^{(p)}$. Using the induction one can show that $\varDelta_j^{(p)},\;0\le j<q_{p+1}$ and $\varDelta_j^{(p+1)},\;0\le j'<q_p$ are pair-wise disjoint and their union is the whole circle except the boundary points (see~\cite{Sinai2008}). Denote by $\eta^{(p)}$ the partition of the unit circle into $\varDelta_j^{(p)},\;\varDelta_{j'}^{(p+1)}$. Then $\eta^{(p+1)}\ge\eta^{(p)}$ in the sense that each element of $\eta^{(p)}$ consists of several elements of $\eta^{(p+1)}$. More precisely, $\varDelta_0^{(p-1)}$ consists of $h_p$ elements $\varDelta_j^{(p)}$ and one elements $\varDelta_0^{(p+1)}$. The partitions $\eta^{(p)}$ show how the orbit of $0$ fills the circle. Return back to the discrete circle $S$. The partitions $\eta^{(p)}$ can be constructed in the same way as in the continuous case. We have to analyze \begin{equation} \label{eq_8} \underset{0\le r_1<a_3}\max \quad \min_{ ubstack{x_1,x_2 \\ x_1+a_{12}x_2\equiv r_1(\bmod\,a_3)}} \left(\dfrac{x_1}{ qrt N}\alpha_1+\dfrac{x_2}{ qrt N}\alpha_2\right) \end{equation} for given $\alpha_1,\alpha_2,\;0<\alpha_1,\alpha_2<1$. \begin{Le} \label{Le_1} There exists some number $C_1 (\alpha_1, \alpha_2) = C_1$ such that for any $r_1$ the point $x_1$ giving $\min \left( \frac{x_1}{ qrt{N}} \, \alpha_1 + \, \frac{x_2}{ qrt{N}} \, \alpha_2 \right)$ under the condition is such that $r_1 - x_1$ ($x_1 + a_{12} x_2 \equiv r_1 \pmod{a_3}$) is an end-point of some element of the partition $\eta^{(s_1 + m_1)}$. Here $m_1 \geq 0$ is such that ${q{s_1} + m_1}/{q_{s_1}} \, \leq \, C_1 (\alpha_1, \alpha_2 )$ \end{Le} The proof is simple. In any case $r_1 - x_1$ is an end-point of some element of the partition $\eta^{(s_1 + m_1)}$. If $m_1$ is too big then $\frac{x_2}{ qrt{N}}$ is too big because it takes too much time to reach an end-point of $\eta^{(s_1 + m_1)}$ which is not an end-point of one of the previous partitions. We can choose $y_1$ so that $r_1 - y_1$ will be an end-point of some element of $\eta^{(s_1)}$ and the linear combination $\frac{y_1}{ qrt{N}} \alpha_1 + \frac{y_2}{ qrt{N}} \alpha_2$ is smaller. This completes the proof of the lemma. Its meaning is the following. If $r_1 - x_1$ is an end-point of $\eta^{(s_1 + m_1)}$ with too big $m_1$ then $x_2$ will be also too big. Lemma 2 shows that $x_1$ also cannot be too big. \begin{Le} \label{Le_2} There exists an integer $m_2 > 0$ depending on $\alpha_1 , \alpha_2$ the ratio $q_{s_1}/N$ and the elements of the continued fraction $h_{s_1}, h_{s_1+1} , \ldots , h_{s_1 + m_2}$ of $\rho$ such that for any $r_1$ the interval $[r_1 - x_1, r_1]$ corresponding to the minimum of \[ \frac{x_1}{ qrt{N}} \, \alpha_1 \, + \, \frac{x_2}{ qrt{N}} \, \alpha_2 \] has not more than $m_2$ elements of $\eta^{(s_1)}$. \end{Le} The proof is also simple. If the number in question is too big then $\frac{x_1}{ qrt{N}}$ will be too big. Therefore for given $r_1$ $\min$ can be attained at a point which is closer to $r_1$. The values of $q_{s_1}/ qrt{N}$ and $h_{s_1} , h_{s_1 + 1} \ldots$, $h_{s_1 + m_2}$ determine the structure of the partitions\break $\eta^{(s_1)} , \ldots , \eta^{(s_1 + m_2)}$. The conclusion which follows from both lemmas is that for each $r_1$ we check only finitely many $x_1$ and $x_2$ and find $\min ( x_1 \alpha_1 + x_2 \alpha_2)$ among them. The number of points which have to be checked depends on $\alpha_1, \alpha_2$, $\frac{q_{s_1}}{ qrt{N}}$ and $h_{s_1} , \ldots , h_{s_1 + m_2}$. Now we remark that $r_1$ must be also an end-point of some element of the partition $\eta^{(s_1)}$. Indeed, if $r_1$ increases within some element of $\eta^{(s_1)}$ then the set of values $r_1 - x_1$ which have to be checked remain the same. Then $\max\limits_{r_1}$ is attained at the end-point of this element $\eta^{(s_1)}$ because $r_1 - x_1$ is a monotone increasing function of $r_1$. The last step in the proof is the final choice of $r_1$. As was mentioned above $r_1$ must be an end-point of some element of $\eta^{(s_1)}$ and $\frac{x_1}{ qrt{N}}$ takes finitely many values. Therefore $r_1$ should be chosen so that $x_2/{ qrt{N}}$ takes the largest possible value. Take the last point $r^\prime_1 = \mathcal{R}^{q_{s_1 - 1}} 0$ on the orbit of $0$ of the length $q_{s_1}$. Assume for definiteness that $r^\prime_1$ lies to the left from $0$. Consider $m_2$ elements of $\eta^{(s_1)}$ which start from $r^\prime_1$ and go left. Then $r_1$ must be one of the end-points of these elements. Indeed, if $r_1$ lies more to the left from $0$ then the values $x_1$ take finitely many values and $x_2$ will be significantly smaller. Therefore it cannot give maximum over $r$ of our basic linear form. Thus we take $m_2$ elements of $\eta^{(s_1)}$, consider their end-points. Each end-point is a possible value of $r$. Taking finitely many $x_1$ (see Lemma 1 and Lemma 2) we find minimum of our basic linear form. After that we find $r$ for which this minimum takes maximal value. In this way we get the solution of our max-min problem. It is clear that this solution is a function of $\frac{q_{s_1}}{ qrt{N}}$ and elements $h_j , s_1 \leq j \leq s_1 + m_1$ of the continued fraction of $\rho$ near $s_1$. Since $\frac{q_{s_1}}{ qrt{N}}$ and $h_j , s_1 \leq j \leq s_1 + m_1$ have limiting distribution as $N \rightarrow \infty$ the number $f_N ( a ) = \frac{1}{N^{3/2}} \, F_1 ( a )$ also has a limiting distribution. It remains to extend our proof to the case when the pairs from $a_1 , a_2 , a_3$ have non-trivial common divisors, say $k_1$ is $gcd$ of $a_1 , a_3$ and $k_2$ is $gcd$ of $a_2, a_3$. It is easy to show that $k_1 , k_2$ have a joint limiting probability distribution in the whole ensemble $Q_N$. Fixing $k_1 , k_2$ we can write $a_1 = k_1 a^\prime_1$, $a_2 = k_2 a^\prime_2$, $a_3 = k_1 k_2 a^\prime_3$ where $a^\prime_1 , a^\prime_3$ are coprime, $a^\prime_2 , a_3$ are coprime and $k_1 , k_2$ are coprime. This implies that $(a^\prime_1)^{-1}$$(\hspace{-.3em}\mod a^\prime_3)$ exists and we can multiply both sides of (3) by $(a^\prime_1)^{-1}$. This will give \begin{equation} k_1 x_1 + k_2 a^\prime_2 \cdot (a^\prime_1)^{-1} \cdot x_2 \, \equiv \, r_1 (\hspace{-.8em}\mod a_3) \end{equation} where $r_1 = r \cdot ( a^\prime_1)^{-1}$ $( \hspace{-.5em}\mod a_3)$. Denote $b = a^\prime_2 ( a^\prime_1)^{-1}$. Then from (9) we have the linear form \begin{equation} k_1 x_1 + k_2 \, b x_2 \, \equiv \, r_1 \, ( \hspace{-.8em}\mod a_3) \end{equation} which we can treat in the same way as before. ection{Statistical properties of continued fractions}\label{Sec_3} Statistical properties of elements of continued fractions usually are identical for real numbers and for rationales with bounded denominators (see~\cite{Ustinov2005a}--\cite{Ustinov2008a}). Let $\mathcal M$ be the set of integer matrices $S=\bigl(\begin{smallmatrix} P & P'\\ Q & Q' \end{smallmatrix}\bigr)$ with determinant $\det S=\pm 1$ such that $ 1\le Q\le Q'$, $ 0\le P\le Q$, $ 1\le P'\le Q'.$ For real $\alpha\in(0,1)$ the fractions $P/Q$ and $P'/Q'$ with $S=\bigl(\begin{smallmatrix} P & P'\\ Q & Q' \end{smallmatrix}\bigr)\in\mathcal M$ will be consecutive convergents to $\alpha$ (distinct from $\alpha$) if and only if $$ 0<\frac{Q'\alpha-P'}{-Q\alpha+P}= S^{-1}(\alpha)<1 $$ (see \cite[lemma 1]{Ustinov2005a}). Moreover if $\alpha=[0;h_1,h_2,\ldots]$ then for some $s\ge 1$ \begin{align}\label{MA} \frac PQ=[0;h_1,\dots,h_{s-1}],&\quad \frac {P'}{Q'}=[0;h_1,\dots,h_s],\\\dfrac{Q}{Q'}=[0;h_s, \ldots, h_1],&\nonumber\quad \frac{Q'\alpha-P'}{-Q\alpha+P}=[0;h_{s+1}, h_{s+2}, \ldots]. \end{align} It means that the distribution of partial quotients $h_{s-k}$, \ldots, $h_{s+k}$ depends on Gauss-Kuz'min statistics of fractions $Q/Q'$ and $({Q'\alpha-P'})/({-Q\alpha+P})$. For real $\alpha$, $x_1$, $x_2$, $y_1$, $y_2\in(0,1)$ denote by $N_{x_1,x_2,y_1,y_2}(\alpha,R)$ the number of solutions of the following system of inequalities \begin{gather} 0<S^{-1}(\alpha)\le x_1,\label{31_1}\quad Q\le x_2Q',\quad Q\le y_1R,\quad R\le y_2Q', \end{gather} with variables $P$, $P'$, $Q$, $Q'$ such that $S=\bigl(\begin{smallmatrix} P & P'\\ Q & Q' \end{smallmatrix}\bigr)\in\mathcal M$. Let $$N(R)=N_{x_1,x_2,y_1,y_2}(R)=\int_0^1 N_{x_1,x_2,y_1,y_2}(\alpha,R)\,d\alpha$$ and $$G(x_1,x_2,y_1,y_2)=\begin{cases}\frac{2}{\zeta(2)}\left(\log(1+x_1x_2)\log\frac{y_1y_2}{x_2}-{\rm Li}_2(-x_1x_2)\right),&\text{if }x_2\le y_1y_2;\\ -\frac{2}{\zeta(2)}{\rm Li}_2(-x_1y_1y_2),&\text{if }x_2> y_1y_2, \end{cases}$$ where ${\rm Li}_2(\cdot)$ is the dilogarithm $${\rm Li}_2(z)= um\limits_{k=1}^{\infty}\dfrac{z^k}{k^2}= -\int_{0}^{z}\dfrac{\log(1-t)}{t}dt.$$ The next statement implies Theorem~\ref{Th_2}. \begin{Prop} \label{Le031.1} For $R\ge2$ $$N(R)=G(x_1,x_2,y_1,y_2)+O\left(\dfrac{x_1\log R}{R}\right).$$\end{Prop} \begin{proof} For every number $\alpha=[0;a_1,a_2,\ldots]$ find a unique matrix $S\in\mathcal M$ with elements $P$, $P'$, $Q$, $Q'$ defined by~\eqref{MA} with the additional restriction $Q\le R<Q'$. The inequalities $0<S^{-1}(\alpha)\le x_1$ define the interval $I_{x_1}(S) ubset(0,1)$ of the length $$|I_{x_1}(S)|=\left|\frac{P'+x_1P}{Q'+x_1Q}-\frac{P'}{Q'}\right|= \frac{x_1}{Q'(Q'+x_1Q)}.$$ Hence $$N(R)= um\limits_{\bigl(\begin{smallmatrix} P & P'\\ Q & Q' \end{smallmatrix}\bigr)\in\mathcal M}[Q\le x_2Q',Q\le y_1R,R\le y_2Q'] \frac{x_1}{Q'(Q'+x_1Q)},$$ where $[A]$ is $1$ if the statement $A$ is true, and it is $0$ otherwise. Second row $(Q,Q')$ can be complemented to the matrix from $\mathcal M$ in two ways. That is why \begin{equation}\label{Art_31.3} N(R)=2 um\limits_{Q'\ge R/y_2} um\limits_{{(Q,Q')=1}}[Q\le x_2Q',Q\le y_1R]\frac{x_1}{Q'(Q'+x_1Q)}. \end{equation} In the first case $x_2\le y_1y_2$ and the M\"obius inversion formula gives \begin{align*}N(R)=&2 um\limits_{d\le R}\dfrac{\mu(d)}{d^2} um\limits_{R/(y_2d)\le Q'< y_1R/(x_2d)} um\limits_{Q\le x_2Q'}\frac{x_1}{Q'(Q'+x_1Q)}+\\+&2 um\limits_{d\le R}\dfrac{\mu(d)}{d^2} um\limits_{Q'\ge y_1R/(x_2d) } um\limits_{Q\le y_1R/d}\frac{x_1}{Q'(Q'+x_1Q)}=\\=& \frac{2}{\zeta(2)}\left(\log(1+x_1x_2)\log\frac{y_1y_2}{x_2}+ \int_{1/(x_1x_2)}^{\infty}\log\left(1+\frac1t\right)\frac{dt}t\right)+ O\left(\dfrac{x_1\log R}{R}\right)=\\=& \frac{2}{\zeta(2)}\left(\log(1+x_1x_2)\log\frac{y_1y_2}{x_2}-{\rm Li}_2(-x_1x_2)\right)+ O\left(\dfrac{x_1\log R}{R}\right). \end{align*} The second case $x_2> y_1y_2$ can be treated in the same way. \end{proof} Let $$L(R)=L_{x_1,x_2,y_1,y_2}(R)= um\limits_{b\le R^2} um\limits_{{a\le b\atop(a,b)=1}}N_{x_1,x_2,y_1,y_2}\left(\frac ab,R\right).$$ Theorem~\ref{Th_3} will be proved in the following form. \begin{Prop} \label{Le031.2} For $R\ge2$ $$\frac{2\zeta(2)}{R^4}L(R)=G(x_1,x_2,y_1,y_2)+O\left(\dfrac{x_1\log^2 R}{R}\right).$$\end{Prop} \begin{proof} Let $\alpha=a/b$ be a given number and $S=\bigl(\begin{smallmatrix} P & P'\\ Q & Q' \end{smallmatrix}\bigr)\in\mathcal M$ be a solution of the system~\eqref{31_1}. Denote by $m$ and $n$ the integers such that $mP+nP'=a, mQ+nQ'=b$. Then the system~\eqref{31_1} can be written as follows \begin{gather*} mP+nP'=a,\quad mQ+nQ'=b,\\ 0<m/n\le x_1,\quad 0<Q/Q'\le x_2,\quad Q\le y_1R,\quad R\le y_2Q'. \end{gather*} Summing up solutions of this system over $a$ and $b$ we get that the sum $L(R)$ equals to the number of solutions of the following system \begin{gather*} mQ+nQ'\le R^2,\quad 0<m/n\le x_1,\quad 0<Q/Q'\le x_2,\quad Q/y_1\le R<y_2Q', \end{gather*} where $\bigl(\begin{smallmatrix} P & P'\\ Q & Q' \end{smallmatrix}\bigr)\in\mathcal M$, $0\le m\le n$, $(m,n)=1$. For given $Q$ and $Q'$ values of $P$ and $P'$ can be founded in two ways. Number of solutions of the last system is equal to the area of the corresponding region with the factor $1/\zeta(2)$ (see~\cite[Ch.~II, problems~21--22]{Vinogradov1972}) $$\frac{R^4}{2\zeta(2)}\cdot\frac{x_1}{Q'(Q'+x_1Q)}+O\left(\dfrac{x_1 R^{2}\log R}{Q'}\right).$$ It leads to the sum similar to ~\eqref{Art_31.3}: \begin{align*}L(R)= \frac{R^4}{\zeta(2)} um\limits_{R/y_2\le Q'\le R^2} um\limits_{{Q\le\min\{y_1R,x_2Q'\}\atop(Q,Q')=1}}\frac{x_1}{Q'(Q'+x_1Q)}+ O(x_1R^{3}\log^2R). \end{align*} Therefore \begin{align*}L(R)= \frac{R^4}{\zeta(2)}N(R)+ O(x_1R^{3}\log^2R), \end{align*} and Proposition~\ref{Le031.2} follows from Proposition~\ref{Le031.1}. \end{proof} In order to prove theorem 4 we have to use Kloosterman sums $$K_q(m,n)= um\limits_{x,y=1}^{q}\delta_q(xy-1)\,e^{2\pi i\frac{mx+ny}{q}},$$ where $\delta_q(a)$ is characteristic function of divisibility by $q$: $$\delta_q(a)=[a\equiv 0\!\!\pmod{p}]=\begin{cases} 1,& \text{if } a\equiv 0\pmod{q},\\ 0,& \text{if } a\not\equiv 0\pmod{q}.\\ \end{cases}$$ Using Estermann bound (see~\cite{Estermann1961}) \begin{equation*} \label{Estermannn} |K_q(m,n)|\le igma_0(q)\cdot(m,n,q)^{1/2}\cdot q^{1/2}. \end{equation*} it is easy to prove the following statement (see~\cite{Ustinov2008a} for details). \begin{Le} \label{LeKloo} Let $q\ge1$ be an integer, $Q_1$, $Q_2$, $P_1$, $P_2$ be real numbers and $0\le P_1,P_2\le q$. Then the sum $$\Phi_q(Q_1,Q_2;P_1,P_2)= um\limits_{ Q_1<u\le Q_1+P_1\atop Q_2<v\le Q_2+P_2 }\delta_q(uv-1)$$ satisfies the asymptotic formula $$\Phi_q(Q_1,Q_2;P_1,P_2)=\dfrac{\varphi(q)}{q^2}\cdot P_1P_2+O\left(\psi(q)\right),$$ where \begin{equation*} \label{Art_24.psi} \psi(q)= igma_0 (q)\log^2 (q+1)q^{1/2}. \end{equation*} \end{Le} It implies the following general result (see~\cite{Ustinov2005a}). \begin{Le} \label{Le3.2} Let $q\ge1$ be an integer and let $a(u,v)$ be a function defined on the set of integral points $(u,v)$ such that $1\le u,v\le q$. Assume that this function satisfies the inequalities \begin{equation} \label{3.1}a(u,v)\ge0,\quad \Delta_{1,0}a(u,v)\le0,\quad \Delta_{0,1}a(u,v)\le0,\quad \Delta_{1,1}a(u,v)\ge0 \end{equation} at all points at which these conditions have the well-defined meaning. Then the sum $$W= um\limits_{u,v=1}^{q}\delta_q(uv-1)a(u,v)$$ satisfies the asymptotics $$W=\dfrac{\varphi(q)}{q^2} um\limits_{u,v=1}^{q}a(u,v)+O\left(A\psi(q) qrt{q}\right),$$ where $\psi(q)$ is the function from lemma~\ref{LeKloo} and $A=a(1,1)$ is the maximum of the function $a(u,v)$. \end{Le} Let \begin{align*} N_z(R)=&N_{z,x_1,x_2,y_1,y_2}(R)=\int_0^z N_{x_1,x_2,y_1,y_2}(\alpha,R)\,d\alpha,\\ L_z(R)=&L_{z,x_1,x_2,y_1,y_2}(R)= um\limits_{b\le R^2} um\limits_{{a\le zb\atop(a,b)=1}}N_{x_1,x_2,y_1,y_2}\left(\frac ab,R\right). \end{align*} The next statement implies Theorem~\ref{Th_4}. \begin{Prop} \label{Le031.3} For $R\ge2$ \begin{align*} N_z(R)=&z\cdot G(x_1,x_2,y_1,y_2)+O\left(\dfrac{x_1\log^3R}{R^{1/2}}\right),\\ \frac{2\zeta(2)}{R^4}L_z(R)=&z\cdot G(x_1,x_2,y_1,y_2)+O\left(\dfrac{x_1\log^3R}{R^{1/2}}\right). \end{align*}\end{Prop} \begin{proof} Let $$\mathcal M_z=\left\{\begin{pmatrix} P & P'\\ Q & Q' \end{pmatrix}\in\mathcal M:\dfrac{P'}{Q'}\le z\right\}.$$ For a given $z$ there is at most one matrix $S=\bigl(\begin{smallmatrix} P & P'\\ Q & Q' \end{smallmatrix}\bigr)\in\mathcal M$ such that $Q\le R<Q'$ and $z\in I_{x_1}(S)$. Hence \begin{align*} N_z(R)=& um\limits_{\bigl(\begin{smallmatrix} P & P'\\ Q & Q' \end{smallmatrix}\bigr)\in\mathcal M_z}[Q\le x_2Q',Q\le y_1R,R\le y_2Q'] \frac{x_1}{Q'(Q'+x_1Q)}+O\left(\dfrac{x_1}{R^2}\right). \end{align*} If $Q'$ is fixed then $P'$ and $Q$ satisfy the congruence $P'Q\equiv \pm1\pmod{Q'}$. Therefore \begin{align*} N_z(R)=& um\limits_{Q'\ge R/y_2} um\limits_{P',Q=1}^{Q'}\delta_{Q'}(P'Q\pm1)[Q\le \min\{x_2Q',y_1R\},P'\le zQ'] \frac{x_1}{Q'(Q'+x_1Q)}+O\left(\dfrac{x_1}{R^2}\right). \end{align*} Using Lemma~\ref{Le3.2} we obtain \begin{align*} N_z(R)=& um\limits_{Q'\ge R/y_2}\dfrac{\varphi(Q')}{(Q')^2} um\limits_{P',Q=1}^{Q'}[Q\le \min\{x_2Q',y_1R\},P'\le zQ'] \frac{x_1}{Q'(Q'+x_1Q)}+O\left(\dfrac{x_1\log^3R}{R^{1/2}}\right)=\\=& z um\limits_{Q'\ge R/y_2}\dfrac{\varphi(Q')}{Q'} um\limits_{Q=1}^{Q'}[Q\le \min\{x_2Q',y_1R\}] \frac{x_1}{Q'(Q'+x_1Q)}+O\left(\dfrac{x_1\log^3R}{R^{1/2}}\right). \end{align*} Applying the formula \begin{equation} \label{Art_31.Euler} \dfrac{\varphi(Q')}{Q'}= um\limits_{d\mid Q'}\dfrac{\mu(d)}{d} \end{equation} we get the same sum as in the proof of Proposition~\ref{Le031.1}. As in Proposition~\ref{Le031.2} the sum $L_z(R)$ equals to the number of solutions of the system \begin{gather*} mQ+nQ'\le R^2,\quad mP+nP'\le z(mQ+nQ'),\\ 0<m/n\le x_1,\quad 0<Q/Q'\le x_2,\quad Q/y_1\le R<y_2Q', \end{gather*} where $\bigl(\begin{smallmatrix} P & P'\\ Q & Q' \end{smallmatrix}\bigr)\in\mathcal M$, $0\le m\le n$, $(m,n)=1$. Again, there is at most one matrix $S=\bigl(\begin{smallmatrix} P & P'\\ Q & Q' \end{smallmatrix}\bigr)\in\mathcal M$ such that $Q\le R<Q'$ and $z\in I_{x_1}(S)$. Also for $Q'\ge R$ $$ um\limits_{n\ge 1} um\limits_{m\le x_1n}[mQ+nQ'\le R^2]\ll x_1R^2.$$ This estimate implies that \begin{align*}L_z(R)=& \frac{R^4}{\zeta(2)} um\limits_{\bigl(\begin{smallmatrix} P & P'\\ Q & Q' \end{smallmatrix}\bigr)\in\mathcal M_z}[R/y_2\le Q'\le R^2,Q\le\min\{y_1R,x_2Q'\}] \frac{x_1}{Q'(Q'+x_1Q)}+ O(x_1R^{3}\log^2R)=\\=& \frac{R^4}{\zeta(2)} um\limits_{R/y_2\le Q'\le R^2} um\limits_{P',Q=1}^{Q'}[Q\le\min\{y_1R,x_2Q'\},P'\le zQ']\frac{x_1\delta_{Q'}(P'Q\pm1)}{Q'(Q'+x_1Q)}+ O(x_1R^{3}\log^2R). \end{align*} Using Lemma~\ref{Le3.2} one more time we obtain \begin{align*} L_z(R) = & \frac{R^4}{\zeta(2)} um\limits_{Q'\ge R/y_2}\dfrac{\varphi(Q')}{(Q')^2} um\limits_{P',Q=1}^{Q'}[Q\le \min\{x_2Q',y_1R\},P'\le zQ'] \frac{x_1}{Q'(Q'+x_1Q)} + O \left(x_1R^{7/2}\log^3R\right) =\\ =& \dfrac{zR^4}{\zeta(2)} um\limits_{Q'\ge R/y_2}\dfrac{\varphi(Q')}{Q'} um\limits_{Q=1}^{Q'}[Q\le \min\{x_2Q',y_1R\}] \dfrac{x_1}{Q'(Q'+x_1Q)} + O\left(x_1R^{7/2}\log^3R\right). \end{align*} \noindent Applying formula~\eqref{Art_31.Euler} we get the same sum as in as in the proof of Proposition~\ref{Le031.1}. \end{proof} \begin{Zam} In the simplest case $x_2=y_1=y_2=1$ we have cumulative distribution function $$F(x)=F(x,1,1,1)=-\frac{2}{\zeta(2)}{\rm Li}_2(-x),$$ which is not equal to the Gaussian function $\log_2(1+x)$. As $x\to 0$ function $F(x)$ (with error terms in Propositions~\ref{Le031.1} and ~\ref{Le031.2}) decreases as a linear function $F(x) im 2x/\zeta(2)$. This fact shows that the expectation of the partial quotient $a_s$ (defined by inequalities $q_{s-1}\le R< q_s$) is equal to infinity.\end{Zam} ection{Concluding remarks} The calculations done by one of the authors (A. Ustinov) shows that the density of the limiting distribution of $\frac{F(a_1, a_2 , a_3}{ qrt{a_1 a_2 a_3}}$ has the following simple form: \[ p ( t ) = \left\{ \begin{array}{ll} 0 , & \mbox{if} \ t \in [ 0, qrt{3}]; \\ \frac{12}{\pi} \, \left( \frac{t}{ qrt{3}} - qrt{4-t^2} \right) , & \mbox{if} \ t \in [ qrt{3} , 2]; \\ \frac{12}{\pi^2} \left( t qrt{3} \mbox{arccos} \, \frac{t + 3 qrt{t^2 - 4}}{4 qrt{t^2 - 3}} + \frac{3}{2} qrt{t^2 - 4} \log \, \frac{t^2-4}{t^2 - 3} \right) , & \mbox{if} \ t \in [ 2 , + \infty ) . \end{array} \right. \] This result will be published elsewhere. \baselineskip=15pt \today \ :gpp \end{document}
\begin{document} \leftline{ \scriptsize \it } \title[Complex New Operators] {Approximation by Complex Baskakov-Sz\'asz-Durrmeyer Operators in Compact Disks} \maketitle \begin{center} {\bf Sorin G. Gal}\\ Department of Mathematics and Computer Science, University of Oradea\\ Str. Universitatii No. 1, 410087 Oradea, Romania\\ e-mail address : [email protected] \\ and {\bf Vijay Gupta}\\ Department of Mathematics, Netaji Subhas Institute of Technology\\ Sector 3 Dwarka, New Delhi-110078, India\\ e-mail address : [email protected] \\ \end{center} \begin{abstract} In the present paper, we deal with the complex Baskakov-Sz\'asz-Durrmeyer mixed operators and study Voronovskaja type results with quantitative estimates for these operators attached to analytic functions of exponential growth in $\mathbb{D}_{R}=\{z\in \mathbb{C} ; |z|<R\}$. Also, the exact order of approximation is found. The method used allows to construct complex Sz\'asz-type and Baskakov-type approximation operators without to involve the values on $[0, \infty)$. \end{abstract} {\bf Subject Classification.} \,30E10,\, 41A25,\, 41A28.\\ {\bf Keywords.} Complex Baskakov-Sz\'asz-Durrmeyer operators, Voronovskaja type result, exact order of approximation in compact disks, simultaneous approximation. \section{Introduction} The study of approximation properties for the Sz\'asz type operators on $[0, +\infty)$ was well established in \cite{szasz} and then generalized in various ways, see e.g. \cite{AG1}. Also, very recently, approximation properties for several real operators including the Sz\'asz-Durrmeyer operators are presented in the book \cite{vn}. In order to approximate integrable functions on the positive real axis, in \cite{vgggs} it was proposed the modifications of the Baskakov operators with the weights of Sz\'{a}sz basis functions under the integral sign. These operators reproduce only constant functions. Ten years later in \cite{PNA_AJM} it was proposed yet another sequence of the Baskakov-Sz\'{a}sz-Durrmeyer operators which preserve constant as well as linear functions. Also, generalizations of the Durrmeyer polynomials were studied in e.g. \cite{AG2}. In the complex domain, the overconvergence phenomenon holds, that is the extension of approximation properties from real domain to complex domain. In this context, the first qualitative kind results were obtained in the papers \cite{DGP1}, \cite{WR}, \cite{Wood1}. Then, in the books \cite{Gal}, \cite{og} quantitative approximation results are presented for several type of approximation operators. For Sz\'{a}sz-Mirakjan operator and its Stancu variant in complex domain, we refer the readers to \cite{Aydin}, \cite{Ce_Is}, \cite{Ispir1}, \cite{G1}, \cite{Mah1}, \cite{Mah2}, \cite{Su_Ib} and \cite{VGDKV}. Also for complex Bernstein-Durrmeyer operators, several papers are available in the literature (see e.g. \cite{G2}, \cite{G3}, \cite{GaGu1}, \cite{GaGu2}, \cite{G6}, \cite{Mah3}, \cite{Os}, \cite{Ren1}, \cite{Ren2}), for complex Sz\'{a}sz-Durrmeyer operators see \cite{CS}, while for complex $q$-Bal\'asz-Szabados operators see \cite{Ispir2}. In the present paper, we study the rate of approximation of analytic functions in a disk $\mathbb{D}_{R}=\{z\in \mathbb{C} ; |z|<R\}$, i.e. $f(z)=\sum_{k=0}^{\infty}c_{k}z^{k}$, of exponential growth, and the Vo\-ro\-nov\-ska\-ja type result, for a natural derivation from the complex operator $L_{n}(f)(z)$ introduced in the case of real variable in \cite{PNA_AJM}, and formally defined as operator of complex variable by \begin{equation}\label{e:PNA_AJM1} L_n(f)(z):=n\sum_{v=1}^\infty b_{n,v}(z)\int_0^\infty s_{n,v-1}(t)f(t)dt+(1+z)^{-n}f(0), z\in \mathbb{C}, \end{equation} where $$b_{n, k}(z)=\left( \begin{array}{c} n+k-1 \\ k \end{array} \right)\frac{z^k}{(1+z)^{n+k}}, s_{n,k}(t)=e^{-n t}\frac{(nt)^k}{k!}.$$ An important relationship used for the quantitative results in approximation of an analytic function $f$ by the complex operator $L_{n}(f)$ would be $L_{n}(f)(z)=\sum_{k=0}^{\infty}c_{k}L_{n}(e_{k})(z)$, but which requires some additional hypothesis on $f$ (because the definition of $L_{n}(f)(z)$ involves the values of $f$ on $[0, +\infty)$ too) and implies restrictions on the domain of convergence. This situation can naturally be avoided, by defining directly the approximation complex operator $$L_{n}^{*}(f)(z)=\sum_{k=0}^{\infty}c_{k}\cdot L_{n}(e_{k})(z),$$ whose definition evidently that omits the values of $f$ outside of its disk of analyticity. In this paper we deal with the approximation properties of the complex operator $L_{n}^{*}(f)(z)$. It is worth noting here that if instead of the above defined $L_{n}(f)(z)$ we consider any other Sz\'asz-type or Baskakov-type complex operator, then for $L_{n}^{*}(f)(z)$ defined as above, all the quantitative estimates in e.g. \cite{Aydin}, \cite{Ce_Is}, \cite{G1}, \cite{Gal4}, \cite{CS}, \cite{Gal5}, \cite{VGDKV}, \cite{Ispir1}, \cite{Mah1} hold true identically, without to need the additional hypothesis on the values of $f$ on $[0, \infty)$ imposed there. Everywhere in the paper we denote $\|f\|_{r}=\max\{|f(z)| ; |z|\le r\}$. \section{Auxiliary Result} In the sequel, we need the following lemma : \begin{lemma} \label{l:2} Denoting $e_{k}(z)=z^{k}$ and $T_{n, k}(z)=L_{n}(e_{k})(z)$, we have the recurrence formula $$T_{n, k+1}(z)=\frac{z(1+z)}{n}T^{\prime}_{n, k}(z)+\frac{nz+k }{n}T_{n, k}(z).$$ Also, $T_{n, k}(z)$ is a polynomial of degree $k$. \end{lemma} \begin{proof} Using $z(1+z)b_{n,\nu}^\prime(z)=(\nu-nz)b_{n,\nu}(z)$, we have \begin{eqnarray*}z(1+z)T_{n, k}^\prime(z)&=&n\sum_{\nu=1}^{\infty}z(1+z)b_{n, \nu}^\prime(z)\int_{0}^{\infty}s_{n,\nu-1}(t)t^{k}d t \\ &=&n\sum_{\nu=1}^{\infty}(\nu-n z)b_{n, \nu}(z)\int_{0}^{\infty}s_{n,\nu-1}(t)t^{k}d t\\ &=&n\sum_{\nu=1}^{\infty}b_{n, \nu}(z)\int_{0}^{\infty}[(\nu-1-nt)+(1+nt-n z)]s_{n,\nu-1}(t)t^{k}d t\\ &=&n\sum_{\nu=1}^{\infty}b_{n, \nu}(z)\int_{0}^{\infty}s_{n,\nu-1}^\prime(t)t^{k+1}d t\\ &&+(1-n z)T_{n,k}(z)+nT_{n,k+1}(z) \end{eqnarray*} Thus integrating by parts the last integral, we get. \begin{eqnarray*}z(1+z)T_{n, k}^\prime(z)&=&-(k+1)T_{n,k}(z)+(1-n z)T_{n,k}(z)+nT_{n,k+1}(z) \end{eqnarray*} which completes the proof of the recurrence relation. Taking above step by step $k=1, 2, ..., $, by mathematical induction we easily get that $T_{n, k}(z)$ is a polynomial of degree $k$. \end{proof} \section{Main Results} Our first main result is the following theorem for upper bound. \begin{theorem} \label{t:1} For $f:\mathbb{D}_{R}\to \mathbb{C}$, $1<R<+\infty$, analytic on $\mathbb{D}_{R}$, i.e. $f(z)=\sum_{k=0}^{\infty}c_{k}z^{k}$, for all $z\in \mathbb{D}_{R}$, suppose that there exist $M>0$ and $A\in(\frac{1}{R},1)$, with the property that $|c_k|\le M\frac{A^k}{k!},$ for all $k=0,1,...,$ (which implies $|f(z)|\leq Me^{A|z|}$ for all $z\in \mathbb{D}_{R}$). (i) If $1\le r <\frac{1}{A}$, then for all $|z|\le r$ and $n\in \mathbb{N}$ with $n > r+2$, $L_{n}^{*}(f)(z)$ is well-defined and we have \begin{eqnarray*} |L_{n}^{*}(f)(z)-f(z)|\le \frac{C_{r, A, M}}{n}, \end{eqnarray*} where $C_{r,A, M}=\frac{M(r+2)}{r}\cdot \sum_{k=2}^\infty (k+1)\cdot (r A)^k <\infty ;$ (ii) If $1\le r < r_{1} < \frac{1}{A}$, then for all $|z|\le r$ and $n, p\in \mathbb{N}$ with $n>r+2$, we have $$|[L_{n}^{*}(f)]^{(p)}(z)-f^{(p)}(z)|\le \frac{p!r_{1}C_{r_{1}, A, M}}{n (r_{1}-r)^{p+1}},$$ where $C_{r_1, A, M}$ is given as at the above point (i). \end{theorem} \begin{proof} (i) By using the recurrence relation of Lemma \ref{l:2}, we have $$T_{n, k+1}(z)=\frac{z(1+z)}{n}T^{\prime}_{n, k}(z)+\frac{nz+k}{n}T_{n, k}(z),$$ for all $z\in\mathbb{C},k\in\{0,1,2,....\},n\in N$. From this we immediately get the recurrence formula \begin{eqnarray*} T_{n, k}(z)-z^k&=&\frac{z(1+z)}{n}[T_{n, k-1}(z)-z^{k-1}]^\prime+\frac{nz+k-1}{n}[T_{n, k-1}(z)-z^{k-1}]\\ &&+\frac{(k-1)}{n}z^{k-1}(2+z), \end{eqnarray*} for all $z\in\mathbb{C},k,n\in N$. Now for $1\leq r<R$, if we denote the norm-$||\cdot||_r$ in $C(\overline{\mathbb{D}}_r)$, where $\overline{\mathbb{D}}_r=\{z\in \mathbb{C}:|z|\leq r\}$, then by a linear transformation, the Bernstein's inequality in the closed unit disk becomes $|P_k^\prime (z)|\leq \frac{k}{r}||P_k||_r$, for all $|z|\leq r$, where $P_k(z)$ is a polynomial of degree $\leq k$. Thus from the above recurrence relation, we get \begin{eqnarray*} ||T_{n, k}-e_k||_r&\le&\frac{r(1+r)}{n}\cdot ||T_{n, k-1}-e_{k-1}||_r\frac{k-1}{r}+\frac{nr+k-1}{n}||T_{n, k-1}-e_{k-1}||_r\\ &&+\frac{(k-1)}{n}(2+r)r^{k-1}, \end{eqnarray*} which, by using the notation $\rho=r+2$, implies $$||T_{n, k}-e_k||_r \le \left(r+\frac{(2+r)(k-1)}{n}\right)\cdot ||T_{n, k-1}-e_{k-1}||_r+\frac{(k-1)}{n}(2+r)r^{k-1}$$ $$=\left(r+\frac{\rho(k-1)}{n}\right)\cdot ||T_{n, k-1}-e_{k-1}||_r+\frac{(k-1)}{n}\rho\cdot r^{k-1}.$$ In what follows we prove by mathematical induction with respect to $k$ that for $n\ge \rho$, this recurrence implies \begin{eqnarray*} ||T_{n, k}-e_k||_r&\le&\frac{\rho \cdot (k+1)!}{n}\cdot r^{k-1} \ \ \mbox{ for all} \ \ k\ge 1. \end{eqnarray*} Indeed for $k=1$ it is trivial, as the left-hand side is zero. Suppose that it is valid for $k$, the above recurrence relation implies that \begin{eqnarray*} ||T_{n, k+1}-e_{k+1}||_r&\le&\left(r+\frac{\rho\cdot k}{n}\right)\cdot \frac{\rho\cdot (k+1)!}{n}r^{k-1}+\frac{\rho \cdot k}{n}r^{k}. \end{eqnarray*} It remains to prove that \begin{eqnarray*} \left(r+\frac{\rho\cdot k}{n}\right)\cdot \frac{\rho \cdot (k+1)!}{n}r^{k-1}+\frac{\rho\cdot k}{n}r^{k}\le \frac{\rho \cdot (k+2)!}{n}r^k, \end{eqnarray*} or after simplifications, equivalently to \begin{eqnarray*} \left(r+\frac{\rho \cdot k}{n}\right)\cdot (k+1)!+r k \le (k+2)! \cdot r, \end{eqnarray*} for all $k\in \mathbb{N}$ and $r\ge 1$. Since by $n\ge \rho$, we get $$\left(r+\frac{\rho\cdot k}{n}\right)\cdot (k+1)!+r k \le \left(r+k\right)\cdot (k+1)!+r k,$$ it is good enough if we prove that $$\left(r+k\right)\cdot (k+1)!+r k \le (k+2)! \cdot r.$$ But this last inequality is obviously valid for all $k\ge 1$ (and fixed $r\ge 1$). In conclusion, the required estimate holds. Now, let us prove that $L_{n}^{*}(f)(z)$ is well-defined for $|z|\le r$ and $n > r+2$. Indeed, we have $$|L_{n}^{*}(f)(z)|\le \sum_{k=0}^\infty |c_k|\cdot | L_n(e_k)(z)|=\sum_{k=0}^\infty |c_k|\cdot |T_{n,k}(z)|$$ $$\le M\cdot \sum_{k=0}^{\infty} \frac{A^{k}}{k !}(\|T_{n,k}-e_{k}-e_{k}\|_{r}+r^{k})\le Me^{A r}+ M\sum_{k=0}^\infty \frac{A^{k}}{k !}\cdot \frac{(r+2)(k+1)!}{n}\cdot r^{k}$$ $$\le Me^{A r}+ M(r+2)\sum_{k=0}^\infty (A r)^{k}(k+1)<+\infty.$$ In conclusion, we get \begin{eqnarray*} |L_n^{*}(f)(z)-f(z)|&\le& \sum_{k=2}^\infty |c_k|\cdot |T_{n,k}(z)-e_k(z)|\le \sum_{k=2}^\infty M\frac{A^k}{k!}\frac {(r+2)\cdot (k+1)!}{n}r^{k-1}\\ &=& \frac{M(r+2)}{r n}\sum_{k=2}^\infty (k+1)\cdot (r A)^k=\frac{C_{r, A, M}}{n}, \end{eqnarray*} where $C_{r,A, M}=\frac{M(r+2)}{r}\cdot \sum_{k=2}^\infty (k+1)\cdot (r A)^k <\infty$ for all $1\le r <\frac{1}{A},$ taking into account that the series $\sum_{k=1}^\infty (k+1)u^k$ is uniformly convergent in any compact disk included in the open unit disk. (ii) Denote by $\gamma$ the circle of radius $r_{1}>r$ and center $0$. For any $|z|\le r$ and $v\in \gamma$, we have $|v-z|\ge r_{1}-r$ and by the Cauchy's formula, for all $|z|\le r$ and $n > r+2$ it follows \begin{eqnarray} |[L_{n}^{*}(f)]^{(p)}(z)-f^{(p)}(z)|&=& \frac{p!}{2\pi}\left |\int_{\gamma}\frac{L_{n}^{*}(f)(v)-f(v)}{(v-z)^{p+1}}dv\right | \le \frac{C_{r_{1}, A, M}}{n}\frac{p!}{2\pi}\frac{2\pi r_{1}}{(r_{1}-r)^{p+1}} \nonumber \\ &=& \frac{C_{r_{1}, A, M}}{n}\frac{p! r_{1}}{(r_{1}-r)^{p+1}}, \nonumber \end{eqnarray} which proves (ii) and the theorem. \end{proof} The following Voronovskaja type result holds. \begin{theorem}\label{t:2} For $f:\mathbb{D}_{R}\to \mathbb{C}$, $2<R<+\infty$, analytic on $\mathbb{D}_{R}$, i.e. $f(z)=\sum_{k=0}^{\infty}c_{k}z^{k}$, for all $z\in \mathbb{D}_{R}$, suppose that there exist $M>0$ and $A\in(\frac{1}{R},1)$, with the property that $|c_k|\le M\frac{A^k}{k!},$ for all $k=0,1,...,$. If $1\le r< r+1<\frac{1}{A}$ then for all $|z|\le r$ and $n\in \mathbb{N}$ with $n>r+2$, we have $$\left|L_{n}^{*}(f)(z)-f(z)-\frac{z(z+2)}{2n}f^{\prime \prime}(z)\right|\leq \frac{C_{r, A, M}(f)}{n^2},$$ where $C_{r, A, M}(f)=M\sum_{k=2}^\infty \frac{k-1}{k !}[A(r+1)]^{k} B_{k,r}+\frac{4 M(r+2)}{r}\cdot \frac{1}{ln^{2}(1/\rho)}\cdot \left (\frac{1}{(1-A r)^{2}}+\frac{4}{1-A r}\right )<\infty$ and $$B_{k,r}=(k-1)^2(k-2)r^2+2(k-1)(k-2)(2k-3)(r+1)+(r+1)(r+2)\cdot (k+1)!.$$ \end{theorem} \begin{proof} By Theorem \ref{t:1}, (i), $L_{n}^{*}(f)(z)$ is well-defined, for all $|z|\le r$, $n > r+2$. We can write $$\frac{z(z+2)f^{\prime \prime}(z)}{2n}=\frac{z(z+2)}{2n}\sum_{k=2}^\infty c_kk(k-1)z^{k-2} =\frac{1}{2n}\sum_{k=1}^\infty c_kk(k-1)(z+2)z^{k-1}.$$ Thus $$\left|L_n^{*}(f)(z)-f(z)-\frac{z(z+2)}{2n}f^{\prime \prime}(z)\right| \leq \sum_{k=1}^\infty |c_k|\left|T_{n,k}(z)-e_k(z)-\frac{k(k-1)(z+2)z^{k-1}}{2n}\right|.$$ By Lemma \ref{l:2}, for all $n\in \mathbb{N}, z\in \mathbb{C}$ and $k=0,1,2,...$, we have $$T_{n, k+1}(z)=\frac{z(1+z)}{n}T^{\prime}_{n, k}(z)+\frac{nz+k }{n}T_{n, k}(z).$$ If we denote $$E_{k,n}(z)=T_{n,k}(z)-e_k(z)-\frac{k(k-1)(z+2)z^{k-1}}{2n},$$ then it is obvious that $E_{k,n}(z)$ is a polynomial of degree less than or equal to $k$ and by simple computation and the use of above recurrence relation, we are led to $$E_{k,n}(z)=\frac{z(1+z)}{n}E_{k-1,n}^{\prime}(z)+\frac{nz+k-1}{n}E_{k-1,n}(z)+X_{k,n}(z),$$ where $$X_{k,n}(z)=\frac{z^{k-2}}{2n^2}\left[(k-1)^2(k-2)z^2+2(k-1)(k-2)(2k-3)z+2(k-1)(k-2)(2k-3)\right]$$ for all $k\geq 2,n\in \mathbb{N}$ and $|z|\le r$. Using the estimate in the proof of Theorem \ref{t:1}, we have $$|T_{n,k}(z)-e_k(z)|\leq \frac{(k+1)! (r+2) r^{k-1}}{n},$$ for all $k\ge 1, n\ge r+2, |z|\leq r$, with $1\leq r$. It follows $$|E_{k,n}(z)|\leq \frac{r(r+1)}{n}|E_{k-1,n}^{\prime}(z)|+\left (r+\frac{k-1}{n}\right )|E_{k-1,n}(z)|+|X_{k,n}(z)|.$$ Now we shall find the estimation of $|E_{k-1,n}^{\prime}(z)|$ for $k\geq 2$. Taking into account the fact that $E_{k-1,n}(z)$ is a polynomial of degree $\leq k-1$, we have $$|E_{k-1,n}^{\prime}(z)|\leq \frac{k-1}{r}||E_{k-1,n}||_r$$ $$\leq \frac{k-1}{r}\left[||T_{n,k-1}(z)-e_{k-1}(z)||_r+\left|\left|\frac{(k-1)(k-2)[e_1+2]e_{k-2}}{2 n}\right|\right|_r\right]$$ $$\leq \frac{k-1}{r}\left[\frac{k! (r+2) r^{k-2}}{n}+\frac{r^{k-2}(k-1)(k-2)(r+2)}{n}\right]$$ $$ \leq \frac{(r+2) r^{k-2} (k-1)(k! + (k-1)(k-2))}{nr}.$$ Thus, by the obvious inequality $(k-1)(k! + (k-1)(k-2))\le (k+1)!$, we get $$\frac{r(1+r)}{n}|E_{k-1,n}^{\prime}(z)|\leq \frac{(r+1)(r+2)\cdot (k+1)! r^{k-2}}{n^2}$$ and $$|E_{k,n}(z)|\leq \frac{(r+1)(r+2)\cdot (k+1)! r^{k-2}}{n^2}+\left (r+\frac{k-1}{n}\right )|E_{k-1,n}(z)|+|X_{k,n}(z)|,$$ for all $|z|\leq r, k\geq 2$ and $n > r+2$. For $ k-1\le n$ (i.e. $k\le n+1$) and $|z|\le r$, taking into account that $r+(k-1)/n\le r+1$, we get $$|E_{k,n}(z)|\leq \frac{(r+1)(r+2)(k+1)!\cdot r^{k-2}}{n^2}+(r+1)|E_{k-1,n}(z)|+|X_{k,n}(z)|,$$ where $$|X_{k,n}(z)|\le \frac{r^{k-2}}{n^2}\left[(k-1)^2(k-2)r^2+2(k-1)(k-2)(2k-3)r+2(k-1)(k-2)(2k-3)\right]$$ $$\le \frac{r^{k-2}}{n^2} A_{k,r},$$ for all $|z|\le r, k\ge 1, n>r+2,$ where $$A_{k,r}=(k-1)^2(k-2)r^2+2(k-1)(k-2)(2k-3)(r+1).$$ Thus for all $|z|\le r, n > r+2$ and $k\le n+1$, $$|E_{k,n}(z)|\le (r+1)|E_{k-1,n}(z)|+\frac{r^{k-2}}{n^2}B_{k,r},$$ where $$B_{k,r}=A_{k,r}+(r+1)(r+2)\cdot (k+1)!.$$ But $E_{0,n}(z)=E_{1,n}(z)=0$, for any $z\in \mathbb{C}$ and therefore by writing last inequality for $2\le k\le n+1$, we easily obtain step by step the following $$|E_{k,n}(z)|\leq \frac{(r+1)^{k}}{n^2}\sum_{j=2}^{k} B_{j,r}\le \frac{(k-1)(r+1)^k}{n^2}B_{k,r}.$$ It follows that $$\left|L_n^{*}(f)(z)-f(z)-\frac{z(z+2)}{2n}f^{\prime \prime}(z)\right|\leq \sum_{k=2}^{n+1} |c_k|\cdot |E_{k,n}(z)|+\sum_{k=n+2}^{\infty} |c_k|\cdot |E_{k,n}(z)|$$ $$\le \frac{1}{n^2}\sum_{k=2}^{\infty}|c_k|(k-1)(r+1)^kB_{k,r} +\sum_{k=n+2}^{\infty} |c_k|\cdot \left[|T_{n,k}(z)-e_k(z)|+\frac{k(k-1)(r+2)r^{k-1}}{2n}\right]$$ $$\le \frac{1}{n^2}\sum_{k=2}^{\infty}|c_k|(k-1)(r+1)^k B_{k,r} +\sum_{k=n+2}^{\infty} |c_k|\cdot \left[\frac{(r+2)\cdot (k+1)!}{n}\cdot r^{k-1}+\frac{k(k-1)(r+2)r^{k-1}}{2n}\right]$$ $$\le \frac{1}{n^2}\sum_{k=2}^{\infty}|c_k|(k-1)(r+1)^k B_{k,r} +2\sum_{k=n+2}^{\infty} |c_k|\cdot \frac{(r+2)\cdot (k+1)!}{n}\cdot r^{k-1}$$ $$\le \frac{1}{n^2}\sum_{k=2}^{\infty}|c_k|(k-1)(r+1)^k B_{k,r} +\frac{2 M(r+2)}{n r}\sum_{k=n+2}^{\infty} (k+1)(A r)^k$$ $$=\frac{1}{n^2}\sum_{k=2}^{\infty}|c_k|(k-1)(r+1)^k B_{k,r} +\frac{2 M(r+2)}{n r}\cdot (A r)^{n+2}\sum_{k=n+2}^{\infty} (k+1)(A r)^{k-n-2}.$$ But, denoting for simplicity $\rho=A r <1$, we easily obtain $$(A r)^{n+2}\cdot \sum_{k=n+2}^{\infty} (k+1)(A r)^{k-n-2}=\rho^{n+2}\cdot\left [\sum_{j=0}^{\infty}j \rho^{j}+(n+3)\sum_{j=0}^{\infty}\rho^{j}\right ]$$ $$=\rho^{n+2}\left [\rho \cdot \left (\sum_{j=0}^{\infty}\rho^{j}\right )^{\prime}+(n+3)\cdot \frac{1}{1-\rho}\right ] =\rho^{n+3}\cdot \frac{1}{(1-\rho)^{2}}+\rho^{n+2}\cdot (n+3)\cdot \frac{1}{1-\rho},$$ which leads to the estimate $$\left|L_n^{*}(f)(z)-f(z)-\frac{z(z+2)}{2n}f^{\prime \prime}(z)\right|$$ $$\leq \frac{1}{n^2}\sum_{k=2}^{\infty}|c_k|(k-1)(r+1)^k B_{k,r}+\frac{2M(r+2)}{n r}\cdot \left [\frac{(A r)^{n+3}}{(1-A r)^{2}}+\frac{(A r)^{n+2} (n+3)}{1-A r}\right ]$$ $$\le M \frac{1}{n^2}\sum_{k=2}^{\infty}\frac{k-1}{k!}[A(r+1)]^k \cdot B_{k,r} +\frac{4 M (r+2)}{r n^{2}}\cdot \frac{1}{ln^{2}(1/(Ar))}\cdot \left [\frac{1}{(1-A r)^{2}}+\frac{4}{1-A r}\right ],$$ where we used the inequality $$\rho^{n+3}\le \rho^{n+2}\le \rho^{n}\le \frac{2}{ln^{2}(1/(A r))}\cdot \frac{1}{n^{2}}, \mbox{ for all } n\in \mathbb{N},$$ applied for $\rho=A r<1$ and where for $(r+1)A<1$, we obviously have the series $\sum_{k=2}^{\infty}|c_k|(k-1)(r+1)^kB_{k,r}$ convergent. Indeed, by $e^{x}=1+x+\frac{x^{2}}{2}+\frac{x^{3}}{6}+ ... $, we get $e^{x}\ge \frac{x^{2}}{2}$, for all $x\ge 0$. Then, by $(1/\rho)^{n}=e^{n ln(1/\rho)}$, it follows $\frac{1}{\rho^{n}}\ge \frac{n^{2} ln^{2}(1/\rho)}{2}$, for all $n\in \mathbb{N}$, which immediately implies the above claimed inequality. \end{proof} The following exact order of approximation can be obtained. \begin{theorem}\label{t:3} Suppose that the hypothesis in Theorem \ref{t:2} hold. (i) If $f$ is not a polynomial of degree $\le 1$ then for all $n > r+2$ we have $$\|L_{n}^{*}(f)-f\|_{r}\sim \frac{1}{n}.$$ where the constants in the equivalence depend only on $f$, $R$, $A$ and $r$. (ii) If $1\le r < r_{1} < r_{1}+ 1 < 1/A$ and $f$ is not a polynomial of degree $\le p-1, (p\ge 1)$ then $$\|[L_{n}^{*}(f)]^{(p)}-f^{(p)}\|_{r}\sim \frac{1}{n}, \mbox{ for all } n > r+2,$$ where the constants in the equivalence depend only on $f$, $R$, $A$, $r$, $r_{1}$ and $p$. \end{theorem} \begin{proof} (i) For all $|z|\le r$ and $n\in\mathbb{N}$ with $n > r+2$, we can write \begin{eqnarray*} L_n^{*}(f)(z)-f(z)&=&\frac{1}{n}\bigg[\frac{z(z+2)}{2}f^{\prime \prime}(z) \\ &&+\frac{1}{n}\cdot n^{2}\left(L_n^{*}(f)(z)-f(z)-\frac{z(z+2)}{2n}f^{\prime \prime}(z)\right)\bigg] \end{eqnarray*} Applying the inequality \begin{eqnarray*} \|F+G\|_{r}\ge|\,\|F\|_{r}-\|G\|_{r}\,|\ge\|F\|_{r}-\|G\|_{r}, \end{eqnarray*} we obtain $$\|L_n^{*}(f)-f\|_{r}\ge\frac{1}{n}\left [\left\|\frac{e_1(e_1+2)}{2}f^{\prime \prime}\right\|_{r}\right . \left .- \frac{1}{n}\cdot n^{2}\left\|L_n^{*}(f)-f-\frac{e_1(e_1+2)f^{\prime \prime}}{2n}\right\|_{r}\right ].$$ Since $f$ is not a polynomial of degree $\le 1$, we get $\left\|\frac{e_1(e_1+2)}{2}f^{\prime \prime}\right\|_{r}>0$. Indeed, supposing the contrary, it follows that $$\frac{z(z+2)}{2}f^{\prime \prime}(z)=0,\, \mbox{ for all } |z|\le r.$$ The last equality is equivalent to $f(z)=C_1z+C_2$, with $C_1, C_2$ are constants, a contradiction with the hypothesis. Now by Theorem \ref{t:2}, we have $$n^{2}\left\|L_n^{*}(f)-f-\frac{e_1(e_1+2)f^{\prime \prime}}{2n}\right\|_{r}\le C_{r, A, M}(f),$$ for all $n > r+2$. Thus, there exists $n_{0} > r+2$ (depending on $f$ and $r$ only) such that for all $n\ge n_0,$ we have $$\left\|\frac{e_1(e_1+2)}{2}f^{\prime \prime}\right\|_{r} \left .- \frac{1}{n}\cdot n^{2}\left\|L_n^{*}(f)-f-\frac{e_1(e_1+2)f^{\prime \prime}}{2n}\right\|_{r}\right ] \ge \frac{1}{4}\left\|e_1(e_1+2)f^{\prime \prime}\right\|_{r},$$ which implies that \begin{eqnarray*} \|L_n^{*}(f)-f\|_{r}\ge \frac{1}{4 n}\left\|e_1(e_1+2)f^{\prime \prime}\right\|_{r}, \end{eqnarray*} for all $n\ge n_0$.\\ For $r+2 < n \le n_{0}-1$, we get $\|L_n^{*}(f)-f\|_{r}\ge\frac{M_{r,n}(f)}{n}$ with $M_{r,n}(f)=n\cdot \|L_n^{*}(f)-f\|_{r}>0$ (since $\|L_n^{*}(f)-f\|_{r}=0$ for a certain $n$ is valid only for $f$ a polynomial of degree $\le 1$, contradicting the hypothesis on $f$). Therefore, finally we have $$||L_n^{*}(f)-f||_{r}\ge \frac{C_r(f)}{n}$$ for all $n>r+2$, where $$C_r(f)=\min_{n_{0}-1\ge n > r+2}\left\{M_{r,n}(f),....,M_{r,n_{0}-1}(f),\frac{1}{4}\left\|e_1(e_1+2)f^{\prime \prime}\right\|_{r}\right\},$$ which combined with Theorem \ref{t:1}, (i), proves the desired conclusion. (ii) The upper estimate is exactly Theorem \ref{t:1}, (ii), therefore it remains to prove the lower estimate. Denote by $\gamma$ the circle of radius $r_{1}>r$ and center $0$. For any $|z|\le r$ and $v\in \gamma$, we have $|v-z|\ge r_{1}-r$ and by the Cauchy's formula, for all $|z|\le r$ and $n > r+2$ it follows $$[L_{n}^{*}(f)]^{(p)}(z) - f^{(p)}(z)=\frac{p!}{2\pi i}\int_{\gamma}\frac{L_{n}^{*}(f)(v)-f(v)}{(v-z)^{p+1}}dv,$$ where $|v-z|\ge r_{1}-r$, for all $v\in \gamma$. Since for $v\in \gamma$ we get $$L_{n}^{*}(f)(v)-f(v)$$ $$=\frac{1}{n}\left \{\frac{v(v+2)}{2}f^{\prime \prime}(v)+ \frac{1}{n}\left [n^{2}\left (L_{n}^{*}(f)(v)-f(v)-\frac{v(v+2)f^{\prime \prime}(v)}{2n}\right )\right ]\right \},$$ replaced in the Cauchy's formula implies $$[L_{n}^{*}(f)]^{(p)}(z) - f^{(p)}(z)=\frac{1}{n}\left \{\frac{p!}{2\pi i}\int_{\gamma}\frac{v(v+2)f^{\prime \prime}(v)}{2(v-z)^{p+1}}dv\right .$$ $$+\left .\frac{1}{n}\cdot \frac{p!}{2\pi i}\int_{\gamma}\frac{n^{2}\left (L_{n}^{*}(f)(v)-f(v)-\frac{v(v+2)f^{\prime \prime}(v)}{2n}\right )}{(v-z)^{p+1}}dv\right \}$$ $$=\frac{1}{n}\left \{\frac{1}{2}\cdot \left [z(z+2) f^{\prime \prime}(z)\right ]^{(p)}+\frac{1}{n}\cdot \frac{p!}{2\pi i}\int_{\gamma}\frac{n^{2}\left (L_{n}^{*}(f)(v)-f(v)-\frac{v(v+2) f^{\prime \prime}(v)}{2n}\right )}{(v-z)^{p+1}}dv\right \}.$$ Passing to the norm $\|\cdot \|_{r}$, we obtain $$\|[L_{n}^{*}(f)]^{(p)} - f^{(p)}\|_{r}$$ $$\ge \frac{1}{n}\left \{\frac{1}{2}\left \|\left [e_{1}(e_{1}+2)f^{\prime \prime}\right ]^{(p)}\right \|_{r} -\frac{1}{n}\left \|\frac{p!}{2\pi}\int_{\gamma}\frac{n^{2}\left (L_{n}^{*}(f)(v)-f(v)-\frac{v(v+2) f^{\prime \prime}(v)}{2n}\right )}{(v-z)^{p+1}}dv\right \|_{r}\right \},$$ where by Theorem \ref{t:2}, for all $n > r+2$ it follows \begin{eqnarray} &&\left \|\frac{p!}{2\pi}\int_{\gamma}\frac{n^{2}\left (L_{n}^{*}(f)(v)-f(v)-\frac{v(v+2)f^{\prime \prime}(v)}{2n}\right )}{(v-z)^{p+1}}dv\right \|_{r} \nonumber \\ &\le & \frac{p!}{2\pi}\cdot \frac{2\pi r_{1}n^{2}}{K^{p+1}}\left \|L_{n}^{*}(f)-f-\frac{e_{1}(e_1+2)f^{\prime \prime}}{2n}\right \|_{r_{1}} \nonumber \\ &\le & C_{r, A, M}(f)\cdot \frac{p! r_{1}}{(r_{1}-r)^{p+1}}. \nonumber \end{eqnarray} Now, by hypothesis on $f$ we have $\left \|\left [e_{1}(e_{1}+2)f^{\prime \prime}\right ]^{(p)}\right \|_{r}>0$. Indeed, supposing the contrary it follows that $z(z+2)f^{\prime \prime}(z)$ is a polynomial of degree $\le p-1$, which by the analyticity of $f$ obviously implies that $f$ is a polynomial of degree $\le p-1$, a contradiction with the hypothesis. For the rest of the proof, reasoning exactly as in the proof of the above point (i), we immediately get the required conclusion. \end{proof} \end{document}
\begin{document} \title[Semilinear heat equation with memory boundary condition] {Global existence of solutions of semilinear heat equation with nonlinear memory condition} \author[A. Gladkov]{Alexander Gladkov} \address{Alexander Gladkov \\ Department of Mechanics and Mathematics \\ Belarusian State University \\ 4 Nezavisimosti Avenue \\ 220030 Minsk, Belarus and Peoples' Friendship University of Russia (RUDN University) \\ 6 Miklukho-Maklaya street \\ 117198 Moscow, Russian Federation} \email{[email protected] } \author[M. Guedda]{Mohammed Guedda} \address{Mohammed Guedda \\ Universit\'{e} de Picardie, LAMFA, CNRS, UMR 6140, 33 rue Saint-Leu, F-80039, Amiens, France} \email{[email protected]} \subjclass{35K20, 35K58, 35K61} \keywords{Semilinear parabolic equation, memory boundary condition, finite time blow-up} \begin{abstract} We consider a semilinear parabolic equation with flux at the boundary governed by a nonlinear memory. We give some conditions for this problem which guarantee global existence of solutions as well as blow up in finite time of all nontrivial solutions. The results depend on the behavior of variable coefficients as $t \to \infty.$ \end{abstract} \maketitle \section{Introduction} We investigate the global solvability and blow-up in finite time for a semilinear heat equation with a nonlinear memory boundary condition: \begin{equation}\label{1e} u_{t} = \Delta u + c(t) u^p \,\,\, \textrm{for} \,\,\, x \in \Omega, \,\,\, t>0, \end{equation} \begin{equation}\label{1b} \frac{\partial u(x,t)}{\partial\nu} = k(t) \int_0^t u^q (x,\tau) \,d\tau \,\,\, \textrm{for} \,\,\, x \in\partial\Omega, \,\,\, t > 0, \end{equation} \begin{equation}\label{1i} u(x,0)= u_0(x) \,\,\, \textrm{for} \,\,\, x \in \Omega, \end{equation} where $\Omega$ is a bounded domain in $\mathbb{R}^n$ for $n \geq 1$ with smooth boundary $\partial \Omega,$ $\nu$ is unit outward normal on $\partial\Omega,$ $p >0$ and $q>0.$ Here $c(t)$ and $k(t)$ are nonnegative continuous functions for $ t \geq 0.$ The initial datum $ u_0(x)$ is a nonnegative $C^1(\overline\Omega)$ function which satisfies the boundary condition at $t = 0.$ In the literature for parabolic equations, memory terms in the boundary flux appear in many references. For example, in \cite{C} a memory term (\ref{1b}) with $k(t) \equiv 1, \, q=1$ is introduced for the study of Newtonian radiation and calorimetry. A linear memory boundary condition takes into account the hereditary effects on the boundary as those studied in \cite{FM1}, \cite{GP}. In the paper \cite{FM2} similar hereditary boundary conditions have been employed in models of time-dependent electromagnetic fields at dissipative boundaries. A nonlinear memory boundary condition arises in a model of capillary growth in solid tumors as initiated by angiogenic growth factors, for example (see \cite{LPSN-H}). Global existence and blow-up in finite time of solutions for variety parabolic problems with memory boundary conditions have been studied in many papers (see, for example, \cite{A,AD,ADD,ADW,DD,DW1,DW2,LQF,WCH} and the references therein). Let $Q_T=\Omega\times(0,T),\;S_T =\partial\Omega\times(0,T),$ $\Gamma_T=S_T\cup\overline\Omega\times\{0\}$, $T>0.$ \begin{definition}\label{Def1} We say that a nonnegative function $u \in C^{2,1}(Q_T ) \cap C^{1,0}({Q}_T\cup \Gamma_T)$ is a subsolution of problem (\ref{1e})--(\ref{1i}) in $Q_T$ if \begin{eqnarray}\label{E:2.0} \left\{ \begin{array}{ll} u_{t} \leq \Delta u + c(t) u^p \,\,\, \textrm{for} \,\,\, (x,t) \in Q_T, \\ \frac{\partial u(x,t)}{\partial\nu} \leq k(t) \int_0^t u^q (x,\tau) \,d\tau \,\,\, \textrm{for} \,\,\, (x,t) \in S_T, \\ u(x,0)\leq u_0(x) \,\,\, \textrm{for} \,\,\, x \in \Omega, \end{array} \right. \end{eqnarray} and $u \in C^{2,1}(Q_T ) \cap C^{1,0}({Q}_T\cup \Gamma_T)$ is a supersolution if $u \geq 0$ and it satisfies (\ref{E:2.0}) in the reverse order. We say that $u$ is a solution of problem (\ref{1e})--(\ref{1i}) in $Q_T$ if it is both a subsolution and a supersolution of (\ref{1e})--(\ref{1i}) in $Q_T.$ \end{definition} Local existence of solutions and comparison principle for (\ref{1e})--(\ref{1i}) may be developed using the same techniques as in \cite{ADD}, \cite{GladkovKavitova1}. We formulate comparison principle which will be used below. \begin{theorem}\label{p:theorem:comp-prins} Let $u(x,t)$ and $v(x,t)$ be a supersolution and a subsolution of problem~(\ref{1e})--(\ref{1i}) in $Q_{T},$ respectively. Suppose that $u(x,t)>0$ or $v(x,t)>0$ in $Q_T\cup\Gamma_T$ if $\min(p,q)<1$. Then $u(x,t)\geq v(x,t)$ in $Q_T\cup\Gamma_T.$ \end{theorem} In this paper we analyze the influence of variable coefficients on global existence and blow-up in finite time of classical solutions of problem~(\ref{1e})--(\ref{1i}). Our global existence and blow-up results depend on the behavior of the functions $c (t)$ and $k (t)$ as $t \to \infty.$ This paper is organized as follows. In the next section we show that all nonnegative solutions are global for $\max(p,q) \leq 1$ and present finite time blow-up of all nontrivial solutions for $\max(p,q) > 1$ as well as the existence of bounded global solutions for small initial data for $\min(p,q) > 1.$ In section~3 we investigate the case $ p=1, q > 1.$ \section{Finite time blow-up and global existence }\label{FT} We begin with the global existence of solutions of (\ref{1e})--(\ref{1i}). The proof relies on the continuation principle and the construction of a supersolution. \begin{theorem}\label{Th00} If $\max (p,q) \leq 1,$ then every solution of (\ref{1e})--(\ref{1i}) is global. \end{theorem} \begin{proof} We seek a positive supersolution $\overline u$ of (\ref{1e})--(\ref{1i}) in $Q_T$ for any positive $T.$ Since $c(t)$ and $k(t)$ are continuous functions there exists a constant $M>0$ such that $\max ( c(t), k(,t) )\leq M$ for $t \in [0,T].$ Let $\lambda_1$ be the first eigenvalue of the following problem \begin{equation*} \begin{cases} \Delta\varphi+\lambda\varphi=0,\;x\in\Omega,\\ \varphi(x)=0,\;x\in\partial\Omega \end{cases} \end{equation*} and $\varphi(x)$ be the corresponding eigenfunction with $\sup\limits_{\Omega}\varphi(x)=1$. It is well known $\varphi(x)>0$ in $\Omega$ and $\max\limits_{\partial\Omega} \partial\varphi(x)/\partial\nu < 0.$ We define \begin{equation*} \overline u = d \exp (bt) [2 - \varphi(x)], \end{equation*} where \begin{equation*} b\geq \max \left( \lambda_1 + 2M , 2M \max\limits_{\partial\Omega}\left(-q\frac{\partial\varphi}{\partial\nu}\right)^{-1}\right), \;d\geq \left\{ \sup\limits_\Omega u_0(x), 1 \right\}. \end{equation*} Then $\overline u$ satisfies \begin{eqnarray*}\label{} \begin{array}{ll} \overline u_{t} \geq \Delta \overline u + c(t) \overline u^p \,\,\, & \textrm{for} \,\,\, (x,t) \in Q_T, \\ \frac{\partial \overline u (x,t)}{\partial\nu} \geq k(t) \int_0^t \overline u^q (x,\tau) \,d\tau \,\,\, & \textrm{for} \,\,\, (x,t) \in S_T, \\ \overline u(x,0) \geq u_0(x) \,\,\, & \textrm{for} \,\,\, x \in \Omega. \end{array} \end{eqnarray*} Hence, $\overline u$ is the desired supersolution and by Theorem~\ref{p:theorem:comp-prins} problem (\ref{1e})--(\ref{1i}) has a global solution for any initial datum. \end{proof} We need the following assertion which was proved in \cite{GS} for a more general case. \begin{theorem}\label{Th0} Let $y (a) \geq 0,\,$ $y' (a) \geq 0,\,$ $y (a) + y' (a) > 0,\,$ $q> 1,\,$ $b (r)$ be a nonnegative continuous function for $r \geq a.$ Then for $r>a$ the inequality \begin{equation*}\label{} y''(r) \geq b(r) y^q (r) \end{equation*} has no global solutions if \begin{equation*}\label{} \int_a^\infty r^q b(r) \,dr = \infty \end{equation*} and at least one of the following conditions is fulfilled \begin{equation*}\label{} b(r) \leq \frac{B}{r^{q+1}} \,\,\, \textrm{for large values of} \,\,\, r, \, B > 0, \end{equation*} or \begin{equation*}\label{} b(r) \,\,\, \textrm{is nonincreasing for large values of} \,\,\, r. \end{equation*} \end{theorem} Now we prove blow-up result for $\max (p,q) > 1.$ \begin{theorem}\label{Th1} There are not nontrivial global solutions of (\ref{1e})--(\ref{1i}) if \begin{equation}\label{1.2} p >1 \,\,\, \textrm{and} \,\,\,\int_0^\infty c(t) \,dt = \infty \end{equation} or \begin{equation}\label{1.3} q >1, \,\,\, k(t) \geq \underline k(t) \geq 0 \,\,\, \textrm{and} \,\,\, \int_0^\infty t \underline k(t) \,dt = \infty \end{equation} and at least one of the following conditions is fulfilled \begin{equation}\label{1.4} \underline k(t) \leq \frac{c}{t^2} \,\,\, \textrm{for large values of} \,\,\, t, \, c > 0, \end{equation} or \begin{equation}\label{1.5} t^{1-q}\underline k(t) \,\,\, \textrm{is nonincreasing for large values of} \,\,\, t. \end{equation} \end{theorem} \begin{proof} Without loss of generality we can suppose that $u_0 (x) \not \equiv 0$ in $\Omega.$ Then from strong maximum principle and (\ref{1b}) we conclude that $u (x,t) > 0$ for $x \in \overline\Omega, \, t > 0$ and, moreover, by Theorem~\ref{p:theorem:comp-prins} we have $u (x,t) \geq \min_{\overline\Omega} u (x,t_0) >0$ for $x \in \overline\Omega, \, t \geq t_0$ and any $t_0 >0.$ Suppose at first that (\ref{1.2}) holds. Let us introduce an auxiliary function \begin{equation*}\label{1.6} w(t) = \int_\Omega u(x,t) \, dx. \end{equation*} Integrating (\ref{1e}) over $\Omega$ and using Green's identity, Jensen's inequality and boundary condition (\ref{1b}), we have \begin{eqnarray}\label{1.7} w'(t) &=& \int_\Omega \left( \Delta u(x,t) + c(t) u^p (x,t) \right) \, dx = k(t) \int_{\partial \Omega} \int_0^t u^q (x,\tau) d\tau \, dS \nonumber \\ &+& c(t) \int_\Omega u^p (x,t) \, dx \geq |\Omega|^{1-p} c(t) w^p (t). \end{eqnarray} From (\ref{1.2}) and (\ref{1.7}) we obtain blow-up of all nontrivial solutions. Suppose now that either (\ref{1.3}), (\ref{1.4}) or (\ref{1.3}), (\ref{1.5}) hold. Let $G (x,y;t-\tau)$ be the Green function of the heat equation with homogeneous Neumann boundary condition. We note that $G (x,y;t-\tau)$ has the following properties (see, for example, \cite{Hu_Yin}): \begin{equation}\label{1.81} G (x,y;t-\tau) \geq 0, \; x,y \in\Omega, \; 0 \leq \tau <t, \end{equation} \begin{equation}\label{1.82} \int_{\partial\Omega}{G (x,y;t-\tau)}\,dS_x \geq c_1,\; y \in\partial\Omega, \; 0 \leq \tau < t. \end{equation} Here and subsequently by $c_i\,(i\in \mathbb N)$ we denote positive constants. It is well known that problem~(\ref{1e})--(\ref{1i}) is equivalent to the equation \begin{eqnarray}\label{1.9} u(x,t) & = & \int_\Omega G (x,y;t) u_0(y) \,dy + \int_0^t \int_\Omega G (x,y;t-\tau) c(\tau) u^p(y,\tau) \,dy \, d\tau \nonumber \\ &+& \int_0^t \int_{\partial\Omega} G (x,y;t-\tau) k(\tau) \int_0^\tau u^q(y,\sigma) \, d\sigma \,dS_y \,d\tau. \end{eqnarray} Integrating (\ref{1.9}) over $\partial\Omega$ and applying (\ref{1.81}), (\ref{1.82}) and Jensen's inequality, we obtain \begin{eqnarray}\label{1.10} \int_{\partial\Omega} u(x,t) \,dS_x & \geq & c_1 \int_0^t k(\tau) \int_0^\tau \int_{\partial\Omega} u^q(y,\sigma) \, dS_y \, d\sigma \,d\tau \nonumber \\ &\geq& c_1 |\partial\Omega|^{1-q} \int_0^t k(\tau) \tau^{1-q} \left( \int_0^\tau \int_{\partial\Omega} u (y,\sigma) dS_y d\sigma \right)^q d\tau. \nonumber \\ \end{eqnarray} Let us define \begin{equation*}\label{1.10a} f(t) = \int_0^t \int_{\partial\Omega} u(x,\sigma) \,dS_x d\sigma. \end{equation*} Then from (\ref{1.10}) we have \begin{equation}\label{1.11} f'(t) \geq c_2 \int_0^t \tau^{1-q} k(\tau) f^q (\tau) d\tau . \end{equation} After integration of (\ref{1.11}) over $[0,t]$ we obtain \begin{equation*}\label{} f (t) \geq c_2 \int_0^t (t - \tau) \tau^{1-q} k(\tau) f^q (\tau) d\tau . \end{equation*} Now we denote \begin{equation*}\label{} g(t) = c_2 \int_0^t (t - \tau) \tau^{1-q} k(\tau) f^q (\tau) d\tau . \end{equation*} Then \begin{equation}\label{1.12} g''(t) = c_2 t^{1-q} k(t) f^q (t) \geq c_2 t^{1-q} \underline k(t) g^q (t) . \end{equation} Applying Theorem~\ref{Th0} to (\ref{1.12}), we complete the proof. \end{proof} To formulate global existence result for problem (\ref{1e})--(\ref{1i}) we suppose that \begin{equation}\label{1.13} \int_0^\infty \left( c(t) + t k(t) \right) \,dt < \infty \end{equation} and there exist positive constants $\alpha,\;t_0$ and $K$ such that $\alpha>t_0$ and \begin{equation}\label{1.14} \int_{t-t_0}^t {\frac{\tau k(\tau)}{\sqrt{t-\tau}}} \, d\tau \leq K \, \textrm{ for } \, t \geq \alpha. \end{equation} \begin{theorem}\label{Th2} Let $\min (p,q) >1$ and (\ref{1.13}), (\ref{1.14}) hold. Then problem (\ref{1e})--(\ref{1i}) has bounded global solutions for small initial data. \end{theorem} \begin{proof} Let $y(x,t)$ be a solution of the following problem \begin{equation*}\label{vsp} \left\{ \begin{array}{ll} y_t = \Delta y, \; x\in\Omega, \; t>0 \\ \frac{\partial y(x,t)}{\partial \nu} = t k(t), \; x \in\partial\Omega, \; t>0, \\ y(x,0)= 1,\; x\in\Omega. \end{array} \right. \end{equation*} According to Lemma 3.3 of \cite{GladkovKavitova2} there exists a positive constant $Y$ such that \begin{equation*}\label{} 1 \leq y(x,t) \leq Y, \, x\in\Omega, \; t>0. \end{equation*} Next, for any $T >0$ we construct a positive supersolution of (\ref{1e})--(\ref{1i}) in $Q_T$ in such a form that \begin{equation*}\label{} \overline u (x,t) = \alpha z(t) y(x,t), \end{equation*} where $ \alpha >0$ and \begin{equation*}\label{} z (t) = \left( 1 + (p-1) (\alpha Y )^{p-1} \int_t^\infty c(\tau) \, d\tau \right)^{-\frac{1}{p-1}}. \end{equation*} It is easy to check that $z(t)$ is the solution of the equation \begin{equation*}\label{} z' (t) - (\alpha Y )^{p-1} c(t) z^p (t) = 0 \end{equation*} and satisfies the inequality $ z (t) \leq 1.$ After simple computations it follows that \begin{eqnarray*} \overline u_t - \Delta \overline u - c(t) \overline u^p & = & \alpha z' y + \alpha z y_t - \alpha z \Delta y - \alpha^p c(t) z^p y^p\\ &\geq& \alpha y (z' - \alpha^{p-1} Y^{p-1} c (t) z^p) = 0, \;x\in\Omega, \; t>0, \end{eqnarray*} and \begin{equation*} \frac{\partial\overline u}{\partial\nu} - k(t) \int_0^t \overline u^q (x,\tau) \, d\tau \geq \alpha t k(t) z(t) (1 - \alpha^{q-1} Y^q) \geq 0, \; x\in\partial\Omega, \;t>0, \end{equation*} if $\alpha \leq Y^{q/(q-1)}.$ Thus, by Theorem~\ref{p:theorem:comp-prins} there exist bounded global solutions of (\ref{1e})--(\ref{1i}) for any initial data satisfying the inequality \begin{equation*}\label{} u_0 (x) \leq \alpha \left( 1 + (p-1) (\alpha Y )^{p-1} \int_0^\infty c(\tau) \, d\tau \right)^{-\frac{1}{p-1}}. \end{equation*} \end{proof} Let us introduce the following notations: \begin{equation}\label{1.17} \ln_1 t= \ln t, \, \ln_{j+1} t=\ln(\ln_j t), \, l_j (t)= \prod_{i=1}^{j} \ln_i t, \, l_{j,\gamma} (t) = l_j (t) \ln_j^{\gamma} t, \, j \in \mathbb{N}, \, \gamma>0. \end{equation} \begin{remark}\label{Rem1} Arguing in the same way as in \cite{GladkovKavitova2} it is easy to show that~(\ref{1.14}) is a necessary condition for the boundedness of global solutions for~(\ref{1e})--(\ref{1i}). It follows from Theorem~\ref{Th1} and Theorem~\ref{Th2} that the condition (\ref{1.2}) is optimal for blow-up in finite time of all nontrivial solutions of (\ref{1e})--(\ref{1i}). Furthermore, from Theorem~\ref{p:theorem:comp-prins} and Theorem~\ref{Th1} we conclude that problem (\ref{1e})--(\ref{1i}) has no nontrivial global solutions if $q > 1$ and \begin{equation*}\label{} k (t) \geq \frac{c_3 }{t^2 l_j (t)} \,\,\, \textrm{for} \,\,\, j \in \mathbb{N} \,\,\, \textrm{and large values of} \,\,\, t. \end{equation*} On the other hand, from Theorem~\ref{Th2} we obtain the existence of nontrivial bounded global solutions of (\ref{1e})--(\ref{1i}) if $\min (p,q) > 1,$ \begin{equation*}\label{} \int_0^\infty c(t) \,dt < \infty \,\,\, \textrm{and} \,\,\, k (t) \leq \frac{c_4 }{t^2 l_{j,\gamma} (t) } \,\,\, \textrm{for} \,\,\, j \in \mathbb{N}, \gamma>0 \,\,\, \textrm{and large values of} \,\,\, t. \end{equation*} \end{remark} \section{Global existence and blow-up for $p=1, \, q > 1$ }\label{p=1, q>1} In this section we obtain sufficient conditions for the existence and nonexistence of global solutions of problem (\ref{1e})--(\ref{1i}) for $p=1, q>1.$ \begin{theorem}\label{Th3} Let $p=1, q>1.$Then there are not nontrivial global solutions of (\ref{1e})--(\ref{1i}) if $k(t) \geq \underline k(t) \geq 0,$ \begin{equation}\label{2.2} \int_0^\infty t^{1-q} \exp \left( -\int_0^t c(s) \, ds \right) \left( \int_0^t \exp \left( \int_0^\tau c(s) \, ds \right) \, d\tau \right)^q \underline k(t) \,dt = \infty \end{equation} and at least one of the following conditions is fulfilled \begin{equation}\label{2.3} t^{1-q} \exp \left( - 2 \int_0^t c(s) ds \right) \left( \int_0^t \exp \left( \int_0^\tau c(s) \, ds \right) \, d\tau \right)^{q+1} \underline k(t) \leq C, \, C > 0, \end{equation} for large values of $t,$ or \begin{equation}\label{2.4} t^{1-q}\exp \left( - 2\int_0^t c(s) \, ds \right) \underline k(t) \, \textrm{is nonincreasing for large values of} \,\, t. \end{equation} \end{theorem} \begin{proof} We can suppose that $u_0 (x) \not \equiv 0,$ since otherwise $u (x,t) \equiv 0.$ Let us change unknown function in the following way \begin{equation}\label{2.0} u (x,t) = v (x,t) \exp \int_0^t c(\tau) \, d\tau. \end{equation} Then $ v (x,t) $ is a solution to the problem \begin{eqnarray}\label{2.1} \left\{ \begin{array}{ll} v_{t} = \Delta v, \, & x \in \Omega, \, t>0, \\ \frac{\partial v(x,t)}{\partial\nu} = k(t) \int_0^t \exp \left( q \int_0^\tau c(s) \, ds - \int_0^t c(s) \, ds \right) v^q (x,\tau) \,d\tau, \, & x \in\partial\Omega, \, t > 0, \\ v(x,0)= u_0(x), \, & \, x \in \Omega. \end{array} \right. \end{eqnarray} It is well known that problem~(\ref{2.1}) is equivalent to the equation \begin{eqnarray}\label{2.5} && v (x,t) = \int_\Omega G (x,y;t) u_0 (y) \,dy \nonumber \\ &+& \int_0^t \int_{\partial\Omega} G (x,y;t-\tau) k(\tau) \int_0^\tau \exp \left( q\int_0^\sigma c(s) ds - \int_0^\tau c(s) ds \right) v^q(y,\sigma) d\sigma dS_y d\tau. \nonumber \\ \end{eqnarray} Integrating (\ref{2.5}) over $\partial\Omega$ and applying (\ref{1.81}), (\ref{1.82}) and Jensen's inequality, we obtain \begin{eqnarray}\label{2.6} &&\int_{\partial\Omega} v(x,t) \,dS_x \nonumber \\ &\geq & c_1 |\partial\Omega|^{1-q} \int_0^t \tau^{1-q} k(\tau) \exp \left( -\int_0^\tau c(s) ds \right) \left( \int_0^\tau \int_{\partial\Omega} \exp \left( \int_0^\sigma c(s) ds \right) v(y,\sigma) dS_y d\sigma \right)^q d\tau. \nonumber \\ \end{eqnarray} We set \begin{equation}\label{2.7} f(t) = \int_0^t \int_{\partial\Omega} \exp \left( \int_0^\sigma c(s) ds \right) v(y,\sigma) \,dS_y d\sigma. \end{equation} Then from (\ref{2.6}) and (\ref{2.7}) we deduce that $f'(t) >0$ for $t >0$ and \begin{equation}\label{2.8} f'(t) \geq c_5 \exp \left( \int_0^t c(s) ds \right) \int_0^t \tau^{1-q} \exp \left( -\int_0^\tau c(s) ds \right) k(\tau) f^q (\tau) d\tau . \end{equation} After integration of (\ref{2.8}) over $[0,t]$ we obtain \begin{equation*}\label{} f (t) \geq c_5 \int_0^t \exp \left( \int_0^\sigma c(s) ds \right) \int_0^\sigma \tau^{1-q} \exp \left( -\int_0^\tau c(s) ds \right) k(\tau) f^q (\tau) d\tau d\sigma. \end{equation*} Defining \begin{equation*}\label{} g(t) = c_5 \int_0^t \exp \left( \int_0^\sigma c(s) ds \right) \int_0^\sigma \tau^{1-q} \exp \left( -\int_0^\tau c(s) ds \right) k(\tau) f^q (\tau) d\tau d\sigma \end{equation*} we have $f(t) \geq g(t).$ Moreover, \begin{equation}\label{2.9} g''(t) \geq c(t) g' (t) + c_5 t^{1-q} \underline k(t) g^q (t). \end{equation} Multiplying (\ref{2.9}) by $ \exp \left( - \int_0^t c(s) ds \right),$ we obtain \begin{equation}\label{2.10} \left( \exp \left( - \int_0^t c(s) ds \right) g'(t) \right)' \geq c_5 t^{1-q} \exp \left( - \int_0^t c(s) ds \right) \underline k(t) g^q (t). \end{equation} Let us change variable and unknown function in the following way \begin{equation*}\label{} s = \int_0^t \exp \left( \int_0^\tau c(\sigma) d\sigma \right) \, d\tau, \,\,\, \phi (s) = g (t) \end{equation*} and rewrite (\ref{2.10}) as \begin{equation}\label{2.11} \phi'' (s) \geq c_5 t^{1-q} \underline k(t) \exp \left( -2 \int_0^t c(\sigma) d\sigma \right) \phi^q (s). \end{equation} Applying Theorem~\ref{Th0} to (\ref{2.11}), we complete the proof. \end{proof} \begin{theorem}\label{Th4} Let $p =1,\, q >1,$ \begin{equation}\label{2.12} \int_0^\infty k(t) \exp \left( -\int_0^t c(s) \, ds \right) \int_0^t \exp \left( q \int_0^\tau c(s) \, ds \right) \, d\tau \,dt < \infty \end{equation} and there exist positive constants $\alpha,\;t_0$ and $K$ such that $\alpha > t_0$ and \begin{equation}\label{2.13} \int_{t-t_0}^t {\frac{k(\tau) \exp \left( -\int_0^\tau c(s) \, ds \right) \int_0^\tau \exp \left( q \int_0^\sigma c(s) \, ds \right) \, d\sigma \,d\tau }{\sqrt{t-\tau}}} \leq K \, \textrm{ for } \, t \geq \alpha. \end{equation} Then problem (\ref{1e})--(\ref{1i}) has global solutions for small initial data. If, in addition, \begin{equation}\label{2.14} \int_0^\infty c(t) \, dt < \infty \end{equation} then problem (\ref{1e})--(\ref{1i}) has bounded global solutions for small initial data. \end{theorem} \begin{proof} To prove the theorem we construct a positive supersolution of (\ref{1e})--(\ref{1i}) in such a form that \begin{equation*}\label{} \overline u (x,t) = \alpha \exp \left( \int_0^t c(s) \, ds \right) h(x,t), \end{equation*} where $h(x,t)$ is a solution to the following problem \begin{equation}\label{vsp2} \left\{ \begin{array}{ll} h_t = \Delta h, \; x\in\Omega, \; t>0, \\ \frac{\partial h(x,t)}{\partial \nu} = k(t) \exp \left( -\int_0^t c(s) \, ds \right) \int_0^t \exp \left( q \int_0^\tau c(s) \, ds \right) \, d\tau , \; x \in\partial\Omega, \; t>0, \\ h(x,0)= 1,\; x\in\Omega. \end{array} \right. \end{equation} As it is proved in \cite{GladkovKavitova2} the solution of (\ref{vsp2}) satisfies the inequalities \begin{equation*}\label{} 1 \leq h(x,t) \leq H, \, x\in\Omega, \; t>0 \end{equation*} for some $H>0.$ It is easy to check that $\overline u (x,t)$ is the supersolution of (\ref{1e})--(\ref{1i}) if $\alpha \leq H^{-q/(q-1)}$ and $u_0 (x) \leq \alpha.$ Moreover, $\overline u (x,t)$ is bounded function under the condition (\ref{2.14}). \end{proof} \begin{remark}\label{Rem2} Let $p=1, q>1.$ Arguing in the same way as in \cite{GladkovKavitova2} it is easy to prove from (\ref{2.0}) and (\ref{2.1}) that both conditions (\ref{2.13}) and (\ref{2.14}) are necessary for the boundedness of global solutions of~(\ref{1e})--(\ref{1i}). Furthermore, we conclude from Theorem~\ref{p:theorem:comp-prins}, Theorem~\ref{Th3} and Theorem~\ref{Th4} that problem (\ref{1e})--(\ref{1i}) has no nontrivial global solutions if \begin{equation*}\label{} c (t) \geq \frac{\beta }{t} \,\,\, \textrm{for large values of} \,\,\, t \,\,\, \textrm{and some} \,\,\, \beta > 0 \end{equation*} and \begin{equation*}\label{} k (t) \geq \frac{c_6 }{t^{\beta (q-1) + 2} l_j (t)} \,\,\, \textrm{for} \,\,\, j \in \mathbb{N} \,\,\, \textrm{and large values of} \,\,\, t \end{equation*} and problem (\ref{1e})--(\ref{1i}) has nontrivial bounded global solutions if \begin{equation*}\label{} c (t) \leq \frac{\omega }{t} \,\,\, \textrm{for large values of} \,\,\, t \,\,\, \textrm{and some} \,\,\, \omega > 0 \end{equation*} and \begin{equation*}\label{} k (t) \leq \frac{c_7}{t^{\omega (q-1) + 2} l_{j,\gamma} (t)} \,\,\, \textrm{for} \,\,\, j \in \mathbb{N}, \gamma >0 \,\,\, \textrm{and large values of} \,\,\, t, \end{equation*} where $l_j (t)$ and $l_{j,\gamma} (t)$ were introduced in (\ref{1.17}). \end{remark} \section*{Funding} The first author was supported by the "RUDN University Program 5-100" and the state program of fundamental research of Belarus (grant 1.2.03.1). The second author was supported by DAI-UPJV F-Amiens. \end{document}
\begin{document} \title{Fast Single Photon Detectors and real-time Key Distillation: Enabling High Secret Key Rate QKD Systems} \author{Fadri Gr\"unenfelder}\email{[email protected]} \affiliation{Group of Applied Physics, Rue de l'Ecole-de-Médecine 20, CH-1211 Genève 4, Switzerland{}} \author{Alberto Boaron} \affiliation{Group of Applied Physics, Rue de l'Ecole-de-Médecine 20, CH-1211 Genève 4, Switzerland{}} \author{Matthieu Perrenoud} \affiliation{Group of Applied Physics, Rue de l'Ecole-de-Médecine 20, CH-1211 Genève 4, Switzerland{}} \author{Giovanni V. Resta} \affiliation{Group of Applied Physics, Rue de l'Ecole-de-Médecine 20, CH-1211 Genève 4, Switzerland{}} \author{Davide Rusca} \affiliation{Group of Applied Physics, Rue de l'Ecole-de-Médecine 20, CH-1211 Genève 4, Switzerland{}} \author{Claudio Barreiro} \affiliation{Group of Applied Physics, Rue de l'Ecole-de-Médecine 20, CH-1211 Genève 4, Switzerland{}} \author{Rapha\"el Houlmann} \affiliation{Group of Applied Physics, Rue de l'Ecole-de-Médecine 20, CH-1211 Genève 4, Switzerland{}} \author{Rebecka Sax} \affiliation{Group of Applied Physics, Rue de l'Ecole-de-Médecine 20, CH-1211 Genève 4, Switzerland{}} \author{Lorenzo Stasi} \affiliation{Group of Applied Physics, Rue de l'Ecole-de-Médecine 20, CH-1211 Genève 4, Switzerland{}} \affiliation{ID Quantique SA, Rue Eugène-Marziano 25, CH-1227 Acacias - Genève Switzerland{}} \author{Sylvain El-Khoury} \affiliation{ID Quantique SA, Rue Eugène-Marziano 25, CH-1227 Acacias - Genève Switzerland{}} \author{Esther Hänggi} \affiliation{Lucerne School of Computer Science and Information Technology, Suurstoffi 1, CH-6343 Rotkreuz, Switzerland{}} \author{Nico Bosshard} \affiliation{Lucerne School of Computer Science and Information Technology, Suurstoffi 1, CH-6343 Rotkreuz, Switzerland{}} \author{Félix Bussières} \affiliation{ID Quantique SA, Rue Eugène-Marziano 25, CH-1227 Acacias - Genève Switzerland{}} \author{Hugo Zbinden} \affiliation{Group of Applied Physics, Rue de l'Ecole-de-Médecine 20, CH-1211 Genève 4, Switzerland{}} \begin{abstract} Quantum Key Distribution has made continuous progress over the last 20 years and is now commercially available. However, the secret key rates (SKR) are still limited to a few \si{\mega \bps}. Here, we present a custom multipixel superconducting nanowire single-photon detectors and fast acquisition and real-time key distillation electronics, removing two roadblocks and allowing an increase of the SKR of more than an order of magnitude. In combination with a simple 2.5 GHz clocked time-bin quantum key distribution system, we can generate secret keys at a rate of \SI{64}{\mega\bps} over a distance of \SI{10.0}{\kilo\meter} and at a rate of \SI{3.0}{\mega\bps} over a distance of \SI{102.4}{\kilo\meter} with real-time key distillation. \end{abstract} \maketitle Quantum key distribution (QKD) allows the exchange of cryptographic keys at a distance without assumptions on the technological limits of a possible eavesdropper, in particular their computational power \cite{Bennett1984,Ekert1991}. In contrast, currently used public key systems rely on computationally demanding tasks \cite{Rivest1978,Koblitz1987}. While nowadays an eavesdropper is bound to use classical computers, this could change in the near future with the advent of large scale quantum computers. This renders an eavesdropper able to use powerful attacks which today's public key systems cannot withstand \cite{Shor1995}. The security of QKD, however, is solely based on the laws of quantum mechanics. Therefore, together with the One-Time-Pad \cite{Shannon1949}, private communication can be ensured even in a future where quantum computers are widely available. Since the advent of the QKD era with the BB84 protocol \cite{Bennett1984}, a variety of other protocols have been developed \cite{Ekert1991,Lo2012,Lucamarini2018,Ralph1999}. Although the complexity and level of device-independence differ between protocols, the main goals remain the same, namely to increase the distance over which a secret key can be generated or, conversely, to maximize the secret key rate (SKR) over a certain distance. To give some context, we consider the use-case of encrypted video conferencing. The United States Federal Communications Commission recommends a download rate of \SI{6}{\mega\bps} for this application, so with One-Time-Pad encryption one needs a SKR equal to this rate per user. For more demanding applications, such as data centers, much higher SKRs are required. Recently, it was demonstrated that a single QKD link can achieve a SKR up to \SI{13.72}{\mega\bps} over a channel equivalent to \SI{10}{\kilo\meter} of single-mode fiber \cite{Yuan2018}. A proof-of-principle experiment using space division multiplexing showed that it would be possible to achieve a SKR of \SI{105.7}{\mega\bps} over a distance of \SI{7.9}{\km} by using 37 QKD transmitters and receivers with a multi-core fiber as quantum channel \cite{Bacco2019}. In order to go to even higher secret key rates without multiplexing, a QKD system needs to fulfill a couple of requirements: Obviously, the transmitter must emit qubits at a high repetition rate. However, a high repetition is only useful, in particular at shorter distances, if i) the single photon detectors are able to count at high rates (with high efficiency and low timing jitter), ii) the readout and sifting electronics are able to process these rates and iii) the post-processing unit is capable of correcting the key (with low leakage) and performing privacy amplification in \textbf{real-time}. In this paper, we report on our efforts of improving points i) to iii). In particular, we present a custom Superconducting Nanowire Single Photon Detector (SNSPD) featuring high count rates and high efficiency. We discuss how to optimize the parameters of a QKD system for high secret key rates and demonstrate an implementation generating a SKR of more than \SI{60}{\mega \bps} over \SI{10}{\km} and \SI{2}{\mega\bps} over 100 km. We use a simplified BB84 with time-bin encoding and one decoy state clocked at 2.5 GHz (time bins of 100 ps seperated by 100 ps) \cite{Rusca2018, Rusca2018b, Boaron2018b}, but the presented principles are valid also for polarisation based schemes (see \cite{Li2022}). \begin{figure} \caption{\label{fig:SNSPD} \label{fig:SNSPD} \end{figure} Let's start with a description and characterization of the SNSPD developed in-house. We design the detector such that high efficiency, low jitter and a high maximum count rate can be achieved \textbf{simultaneously}. This is not an easy task, as the jitter of an SNSPDs tends to increase at high count rates. As the superconducting material for our nanowires, we use niobium-titanium nitride (NbTiN) that has been sputtered from a NbTi target in a nitrogen-rich atmosphere. The superconducting film has a thickness of around \SI{9}{\nm} and exhibits a critical temperature ($T_\text{c}$) of \SI{8.5}{\kelvin}. The detector is composed of 14 independent pixels arranged in an interleaved geometry (see \autoref{fig:SNSPD}). The number of pixels is chosen to comply with the requirements of Bob, and the generated signal is amplified at \SI{40}{\K} with a custom-made amplifier board. Thanks to the large number of pixels and the interleaved design, which guarantees an uniform illumination of the pixels, the probability that two detections occur during the recovery time on the same pixel is minimized. The detector is integrated in an optical cavity, designed to maximize photon absorption at \SI{1550}{\nm}, and exhibits a maximum system detection efficiency of 82\%, see \autoref{fig:eff_vs_counts_z}. The detector covers the same area as a conventional single-pixel SNSPD (around \SI{200}{\um^2}), thus the length of each nanowire is greatly reduced, allowing for much faster recovery time (on average, $< \SI{10}{\ns}$ to be back at full efficiency) compared to a single-meander SNSPD. The fast recovery time of each pixel directly translates into the capability to reach \si{\GHz} detection rates when reading all the 14 pixels simultaneously. However, Bob must measure the arrival time of the pulses and be sure to assign them to the correct time bin, that has a \SI{100}{\ps} duration, thus posing a stringent requirement on the jitter of the detector and of the whole read-out system. \begin{figure} \caption{\label{fig:eff_vs_counts_z} \label{fig:eff_vs_counts_z} \end{figure} We characterize the jitter of the 14-pixel detector at a count rate of \SI{1}{\mega\cps}. There, the average jitter for the 14 pixels is \SI{22}{\ps}, which represents a good starting point for when the count rate will be pushed to the limit. In fact, at the high rate of the QKD system, more and more photon detections occur when the bias current in the SNSPD has not yet reached its maximum value, i.e. before the current and efficiency have fully recovered, thus causing an increase of the jitter. One contribution to the jitter at high detection rates is the variation in the amplitude of the detection signal. This contribution can be minimized by using constant fraction discriminators (CFDs) instead of threshold discriminators. We designed and built CFDs which are optimized for the use with our multipixel detector. With this readout electronics we obtained a jitter below \SI{47}{\ps} and an efficiency of 64\% at a count rate of 320 Mcps. For error correction, we use a quasi-cyclic low-density parity check code (LDPC) with a syndrome size of $1/6$ which is implemented in the FPGAs. Bob calculates the syndrome and sends it to Alice. The resource-intensive error correction core is running on the FPGA II (Xilinx Virtex-6 LXT, see \autoref{fig:setup}) on Alice's side. One core can correct up to \SI{110}{\mega\cps}. By simply running two cores in parallel on the same FPGA, we achieve a throughput of \SI{220}{\mega\cps}, which is high enough for our experiment. The privacy amplification is implemented on a consumer-type computer. It receives the sifted key via a Generation 2 PCIe x4 connector (maximum throughput of \SI{4}{\giga\byte}) from the FPGA. It runs on a consumer-type graphics processing unit (RTX 2070 Super Ventus OC) and has a maximum throughput of \SI{3.4}{\giga\bps}. The block size of the algorithm is $2^{27}~\text{bit} \approx 134~\text{Mbit}$ and the secrecy parameter we used is $10^{-15}$ (for more details on the extraction see \cite{Bosshard2021}). The ultra-fast SNSPD and the post-processing error correction are implemented in our QKD system, as shown in \autoref{fig:setup}. We use the simplified BB84 with time-bin encoding and one decoy state \cite{Rusca2018, Rusca2018b, Boaron2018b}. Alice prepares the states shown in \autoref{fig:states}. The two states $\ket{0}$ and $\ket{1}$ form the Z basis and $\ket{+}$ the X basis. She chooses the basis at random with probabilities $p_\text{Z,A}$ for the Z basis and $1 - p_\text{Z,A}$ for the X basis. In case she chooses the Z basis, she picks either $\ket{0}$ or $\ket{1}$ with equal probability. Additionally, she chooses at random between two mean photon numbers $\mu_0$ and $\mu_1$ with probabilities $p_{\mu_0}$ and $p_{\mu_1}$. Bob picks a measurement basis at random with probabilities $p_\text{Z,B}$ for the Z basis and $1 - p_\text{Z,B}$ for the X basis. The secret key rate is generated from the correlations in the Z basis and they use a privacy amplification block size of $2^{27}~\text{bit} \approx 134~\text{Mbit}$. The X basis is used to find an upper bound on the phase error rate via the decoy method \cite{Rusca2018b}. A distributed feedback InGaAsP/InP multi-quantum well laser diode is used to create a train of phase-randomized pulses with a full width at half maximum (FWHM) of \laserFWHM{} and at a rate of \qubitRepRate{}. Then the pulses pass an imbalanced Michelson interferometer with a time difference of \SI{200}{\pico\second} between the two arms. The states are encoded using an intensity modulator (IM). The three states with the two mean photon levels required for this protocol are shown in \autoref{fig:states}. In the Z basis, we have a pulse either in the early or in the late time bin. The state in the X basis carries pulses in both time bins, but with half the intensity than the pulses in the Z basis. Alice chooses the Z basis with a probability $p_\text{Z,A}$ depending on the distance. In any case, the value of $p_\text{Z,A}$ is well above 0.5. These two pulses have a fixed phase relation, while in between the states, the phase is randomized due to the gain-switching of the laser. As a quantum channel serves a ULL single mode fiber. Its dispersion is pre-compensated by dispersion-compensating fiber. At the other end of the channel, at Bob's, the basis is selected passively with the help of a fibre coupler. The optimal probability $p_\text{Z,B}$ is close to unity. This means that the sifting efficiency is significantly higher than in the standard BB84. In the Z basis, Bob measures the time of arrival of the signal with the multipixel detector described above. In the X basis, the pulses pass through a Michelson interferometer with the same delay as the one of Alice. Here, the requirement on the detector is less stringent due to the high bias of the basis choice towards the Z basis. We choose a MoSi SNSPD with a parallel design which exhibits a timing jitter below \SI{55}{\pico\second} and an efficiency of 0.85{} at a count rate of \SI{2}{\mega\cps}. The states prepared by Alice and the events measured by Bob are registered by FPGA I and III (Xilinx Kintex-7 FPGA KC705 Evaluation Kit). The outputs of the detectors are interfaced to the FPGA with an in-house made card. This card can delay the 14 channels of the multipixel detector and the channel of the X basis detector individually, allowing us to synchronize them. Further, the card combines the 14 channels of the multipixel detectors into 7 channels with OR-gates. At very high count rates, the combining of channels will mask some detections. By comparing the count rate of the QKD system with the count rate measured with time-to-digital converters, we found that due to the OR-gate masking, we loose 2.8\% of the counts at \SI{320}{\mega\cps}. The two FPGAs communicate directly via a service channel to perform sifting and error correction in real-time. \begin{figure} \caption{\label{fig:states} \label{fig:states} \end{figure} \begin{figure} \caption{\label{fig:jitter} \label{fig:jitter} \end{figure} \begin{figure*} \caption{\label{fig:setup} \label{fig:setup} \end{figure*} We performed secret key exchanges through optical fibers with lengths of \SI{10.0}{\kilo\meter} and \SI{102.4}{\kilo\meter} for typically half an hour. The results over a privacy amplification block (for the two distances) are shown in \autoref{tab:results_skr}, together with the relevant parameters. The mean photon number of the signal and decoy states and the probabilities to choose the Z basis at Alice and Bob were obtained by numerical optimization for each distance. We manage to exchange secret keys at a rate of \SI{64}{\mega\bps} over a distance of \SI{10.0}{\kilo\meter} and at a rate of \SI{3.0}{\mega\bps} over a distance of \SI{102.4}{\kilo\meter}. \begin{table*}[ht] \centering \begin{tabular}{ c c c c c c c c c c c}\toprule fiber length & att. & $\mu_0$ & $\mu_1$ & $p_{\mu_0}$ & $p_\text{Z,A}$ & $p_\text{Z,B}$ & $R_\text{sift}$ & $\phi_\text{Z}$ & $Q_\text{Z}$ & SKR \\ (km) & (dB) & & & & & & (Mbps) & (\%) & (\%) & (Mbps) \\ \midrule 10.0{} & 1.58{} & 0.49{} & 0.22{} & 0.74{} & p_\text{Z,A}Short{} & p_\text{Z,B}Short{} & 159.4{} & 0.8{} & 0.4{} & 64{} \\ 102.4{} & 16.34{} & 0.46{} & 0.20{} & 0.79{} & p_\text{Z,A}Long{} & p_\text{Z,B}Long{} & 7.8{} & 1.0{} & 0.3{} & 3.0{} \\ \bottomrule \end{tabular} \caption{\label{tab:results_skr} Measured secret key rate (SKR) and corresponding experimental parameters. The variables $\mu_0$ and $\mu_1$ stand for the mean photon number of the signal and decoy states, $p_{\mu_0}$ and $p_{\mu_1}$ are the corresponding probabilities to choose these values, $p_\text{Z,A}$ and $p_\text{Z,B}$ are the probabilities of Alice and Bob to choose the Z basis, $R_\text{sift}$ is the sifted key rate, $\phi_\text{Z}$ is the phase error rate and $Q_\text{Z}$ is the QBER Z.} \end{table*} While these are best of class results, there is still some room for improvements. Indeed, our QKD scheme was designed to be simple and suitable for commercial device. To push the system to its limits in terms of maximum secret key some adaptations could be made. Due to our high repetition rate and consequently small time bins we loose the detections that fall outside the time bins (see \autoref{fig:jitter}). Whereas further reducing the timing jitter is not simple, we could just double the multi-pixel detectors at Bob. The advantage would be twofold; the detection rate will be halved on each detector, leading to an almost 10\% increase in detection efficiency, see \autoref{fig:eff_vs_counts_z}, and a decrease in jitter. Our protocols allows for only one detector in the monitoring basis (projection in only one eigenstate of the X basis). In order to guarantee the security in this configuration we also need to monitor events where Alice used the Z-basis and Bob measures in the X-basis and vice-versa. Moreover, we record also events depending on which state was sent previously (some detection depend on two subsequent pulses, see \cite{Rusca2018b} for details). This forces us to chose, in the finite key scenario, a lower $p_\text{Z,A}$ which lowers the possible achievable sifted key rate. Finally, the error correction is still not optimal. The used LDPC implementation has a leakage of 17\% of the sifted key rate at a QBER of 0.5\%. This is much higher than the Shannon limit of 5\% of leakage. Rate-adaptive LDPC codes could help minimizing the leakage\cite{Elkouss2010,Kiktenko2017}, but the corresponding studies do not give information about the leakage at very low QBER. Another solution would be to implement cascade (see \cite{Mao2022}) which would allow to approach the Shannon limit, in fact the state of the art allows for an efficiency of 1.038 and more than 500 MHz of throughput. Implementing these improvements would allow us to achieve about \SI{140}{\mega\bps} at 10 km (under the condition that the other parameters stay the same). In conclusion, we demonstrated secret key rates up to \SI{64}{\mega\bps} over a distance of \SI{10.0}{\kilo\meter}. This achievement was possible thanks to a QKD system working at a high repetition rate of \qubitRepRate, coupled with our custom SNSPDs and readout electronics which allow us to detect with low jitter and high efficiency at a high count rate. This result paves the way for secret key-demanding applications like real-time one-time-pad secured video encryption in a metropolitan area. \section*{Data Availability} The data that support the findings of this study are available from the corresponding author upon reasonable request. \section*{Code Availability} The computer code that support the findings of this study are available from the corresponding author upon reasonable request. \end{document}
\begin{document} \title{The Prasad conjectures for $\mathrm{GSp}_4$ and $\mathrm{PGSp}_4$} \author{Hengfei Lu} \date{} \address{Department of Mathematics, Weizmann Institute of Science, 234 Herzl St. P.O.B. 26, Rehovot 7610001, Israel} \email{[email protected]} \subjclass[2010]{Primary 22E50; Secondary 11F27} \keywords{Theta lift, Langlands correspondence, see-saw diagrams, quaternionic Hermitian groups, the Prasad conjecture } \begin{abstract} In this paper, we use the theta correspondence between $\mathrm{GSp}_4(E)$ and $\mathrm{GO} (V)$ to study the $\mathrm{GSp}_4$-distinction problems over a quadratic extension $E/F$ of nonarchimedean local fields of characteristic $0$. With a similar strategy, we investigate the distinction problem for the pair $(\mathrm{GSp}_4(E),\mathrm{GSp}_{1,1}(F) )$, where $\mathrm{GSp}_{1,1}$ is the unique inner form of $\mathrm{GSp}_4$ defined over $F$. Then we verify the Prasad conjecture for a discrete series representation $\tau$ of $\mathrm{PGSp}_4(E)$. \end{abstract} \maketitle \tableofcontents \section{Introduction} Let $F$ be a finite field extension over $\mathbb{Q}_p$ and $E$ be a quadratic extension over $F$ with associated Galois group $\mathrm{Gal}(E/F)=\{1,\sigma\}$ and associated quadratic character $\omega_{E/F}$ of $F^\times$. Let $W_F$ be the Weil group of $F$ and $WD_F$ be the Weil-Deligne group. Then $\omega_{E/F}$ is a quadratic character of $W_F$ with kernel $W_E.$ Let $\mathbf{G}$ be a connected reductive group defined over $F$ and $\mathbf{G}(F)$ (resp. $\mathbf{G}(E)$) be the $F$-rational (resp. $E$-rational) points. Given a smooth representation $\tau$ of $\mathbf{G}(E)$ and a character $\chi$ of $\mathbf{G}(F)$, we say that $\tau$ is $(\mathbf{G}(F),\chi)$-distinguished or has a nonzero $(\mathbf{G}(F),\chi)$-period if \[\mathrm{Hom}_{\mathbf{G}(F)}(\tau,\chi)\neq0. \] If $\chi$ is the trivial character, then $\tau$ is called $\mathbf{G}(F)$-distinguished. There exists a rich literature, such as \cite{beuzart2017distinguished,flicker1991ondist,gan22arithmeticity,hengfei2016new,matringe2009distinction,prasad2015arelative}, trying to classify all $\mathbf{G}(F)$-distinguished representations of $\mathbf{G}(E)$. The method often used to study the distinction problems is the relative trace formula, such as \cite{beuzart2017distinguished,flicker1994quaternionic}, which is powerful especially for the global period problems. This paper focuses on the local period problems for $\mathbf{G}=\mathrm{GSp}_4,\mathrm{PGSp}_4$ and their inner forms. The main tool in this paper is the local theta correspondence appearing in \cite{gan2011theta,Kudla1992,yamana2011deg}. \par Let $V$ be the unique non-split quaternion algebra $D_E$ with quadratic form $N_{D_E}$ over $E,$ or the split $6$-dimensional quadratic space $\mathbb{H}_E^3$ over $E.$ Then \[\mathrm{GSO}(V)\cong \begin{cases}\mathrm{GSO}_{4,0}(E)=D_E^\times(E)\times D_E^\times(E)/\{(t,t^{-1})\},&\mbox{ if }V=D_E,\\ \mathrm{GSO}_{3,3}(E)=\mathrm{GL}_4(E)\times E^\times/\{(t^{-1},t^2) \},&\mbox{ if }V=\mathbb{H}_E^3, \end{cases} \] and any irreducible representation of $\mathrm{GSO}(V)$ must be of the form \begin{itemize} \item $\pi_1\boxtimes\pi_2$ with $\omega_{\pi_1}=\omega_{\pi_2}$ if $V=D_E;$ \item $\Pi\boxtimes\mu$ with $\omega_\Pi=\mu^2$ if $V=\mathbb{H}_E^3.$ \end{itemize} Here for each $i$, $\pi_i$ is an irreducible representation of $D_E^\times(E)$. In \cite{gan2011theta}, Gan-Takeda have proved that any irreducible representation $\tau$ of $\mathrm{GSp}_4(E)$ falls into one of the following two disjoint families of representations: \begin{itemize} \item $\tau=\theta(\pi_1\boxtimes\pi_2)$ with $\omega_{\pi_1}=\omega_{\pi_2};$ \item $\tau=\theta(\Pi\boxtimes\mu)$ with $\mu=\omega_\tau$ and $\omega_\Pi=\mu^2.$ \end{itemize} In order to use the see-saw identities, we need to study the big theta lift to $\mathrm{GO} (V)$ of a generic representation $\tau$ of $\mathrm{GSp}_4(E)$. In fact, we have studied the general (almost equal rank) case for the irreducibility of big theta lifts to $\mathrm{GO} _{n+1,n+1}(F)$ of a generic representation of $\mathrm{GSp}_{2n}(F)$ in \S\mathrm{Re}f{subsect:big}. After computing the big theta lifts following \cite{gan2014formal,gan2011theta}, we use the local theta correspondences between $\mathrm{GSp}_4(E)$ and $\mathrm{GSO}(V)$ and the see-saw identities to discuss $\mathrm{GSp}_4$-period problems, by transferring the period problem for $\mathrm{GSp}_4$ to various analogous period problems for $\mathrm{GL}_2, \mathrm{GL}_4$ and their various forms (not necessarily inner). Then we obtain the following results: \begin{theorem}[Theorem \mathrm{Re}f{localgspperiod}]\label{maingsp(4)theorem} Suppose that $\tau$ is an irreducible representation of $\mathrm{GSp}_4(E)$ with a central character $\omega_\tau$ and $\omega_\tau|_{F^\times}=\mathbf{1}$. \begin{enumerate}[(i)] \item If $\tau=\theta(\Sigma)$ is an irreducible representation of $\mathrm{GSp}_4(E),$ where $\Sigma$ is an irreducible representation of $\mathrm{GO} _{4,0}(E),$ then the representation $\tau$ is not $\mathrm{GSp}_4(F)$-distinguished. \item If $\tau=\theta(\pi_1\boxtimes\pi_2),$ where $\pi_1\boxtimes\pi_2$ is a generic representation of $\mathrm{GSO}_{2,2}(E),$ then \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=\begin{cases} 2,&\mbox{ if }\pi_i\ncong\pi_0 \mbox{ are both }\mathrm{GL}_2(F)\mbox{-distinguished};\\ 1,&\mbox{ if }\pi_1\ncong\pi_2\mbox{ but }\pi_1^\sigma\cong\pi_2^\vee;\\ 1,&\mbox{ if }\pi_1\cong\pi_2 \mbox{ is }\mathrm{GL}_2(F)\mbox{-distinguished but not }(\mathrm{GL}_2(F),\omega_{E/F})\mbox{-distinguished};\\ 1,&\mbox{ if }\pi_2 \mbox{ is }\mathrm{GL}_2(F)\mbox{-distinguished and }\pi_1\cong\pi_0; \\ 0,&\mbox{ the other cases.} \end{cases}\] Here $\pi_0=\pi(\chi_1,\chi_2)$ with $\chi_1\neq\chi_2,\chi_1|_{F^\times}=\chi_2|_{F^\times}=\mathbf{1}$ is a principal series representation of $\mathrm{GL}_2(F)$.\\ Note that these conditions are mutually exclusive. \item Assume that $\tau$ is not in case (i) or (ii) and that $\tau=\theta(\Pi\boxtimes\chi)$ is generic, where $\Pi\boxtimes\chi$ is a representation of $\mathrm{GSO}_{3,3}(E)$. Then \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(F) }(\tau,\mathbb{C})=\begin{cases} 1,&\mbox{ if }\Pi\mbox{ is }\mathrm{GL}_4(F)\mbox{-distinguished;}\\ 0,&\mbox{ otherwise.} \end{cases} \] \end{enumerate} \end{theorem} Then we can verify the Prasad conjecture for $\mathrm{GSp}_4$ in \S\mathrm{Re}f{subsect:GSp(4)conj}. More precisely, let $G_0$ be a quasi-split group defined over $F$ such that \[{}^LG_0=\mathrm{GSp}_4(\mathbb{C})\rtimes\mathrm{Gal}(E/F), \] where the nontrivial element $\sigma\in\mathrm{Gal}(E/F)$ acts on $\mathrm{GSp}_4(\mathbb{C})$ by \[\sigma(g)=\text{sim}(g)^{-1}\cdot g. \] Here sim$:\mathrm{GSp}_4(\mathbb{C})\longrightarrow\mathbb{C}^\times$ is the similitude character. \begin{theorem}[The Prasad conjecture for $\mathrm{GSp}_4$]\label{thm1.2} Let $\tau$ be an irreducible smooth representation of $\mathrm{GSp}_4(E)$ with enhanced Langlands parameter $(\phi_{\tau},\lambdabda_\tau)$. Assume that the $L$-packet $\Pi_{\phi_{\tau}}$ is generic. Then \[\dim\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\omega_{E/F})=\begin{cases} |F(\phi_{\tau})|,&\mbox{ if }\tau\mbox{ is generic},\\ 0,&\mbox{ otherwise}, \end{cases} \] where the finite set $F(\phi_\tau )$ is given by $$F(\phi_{\tau})=\{\tilde{\phi}:WD_F\longrightarrow {}^LG_0|~\tilde{\phi}|_{WD_E}=\phi_{\tau} \}$$ and $|F(\phi_{\tau})|$ denotes the cardinality of the set $F(\phi_{\tau})$. \end{theorem} We have proved analogous results for the inner form. Let $ D$ be the $4$-dimensional quaternion division algebra of $F$. In a similar way, we study the period problem for the inner form $\mathrm{GU}_2(D)=\mathrm{GSp}_{1,1}$, i.e. try to figure out the multiplicity \[\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C}). \] We will not state the results of the inner form case in the introduction; the precise results can be found in Theorem \mathrm{Re}f{innerformperiod}. \par Combining Theorem \mathrm{Re}f{maingsp(4)theorem} and its analog for inner forms, we can verify the conjecture of Dipendra Prasad \cite[Conjecture 2]{prasad2015arelative} for $\mathrm{PGSp}_4$. Given a quasi-split reductive group $\mathbf{G}$ defined over $F$ and a quadratic extension $E/F,$ assuming the Langlands-Vogan conjectures for $\mathbf{G}$, Dipendra Prasad \cite{prasad2015arelative} uses the information from the Galois side to give a formula for the individual multiplicity \[\dim\mathrm{Hom}_{G_\alpha(F)}(\tau,\chi_\mathbf{G} ), \] where \begin{itemize} \item $\tau$ is an irreducible discrete series representation of $\mathbf{G}(E);$ \item $\chi_\mathbf{G}$ is a quadratic character of $\mathbf{G}(F)$ depending on $\mathbf{G}$ and $E;$ \item $G_\alpha$ is any pure inner form of $\mathbf{G}$ defined over $F$ satisfying $G_\alpha(E)=\mathbf{G}(E).$ \end{itemize} \par In \S \mathrm{Re}f{secpgsp}, we will focus on the case $\mathbf{G}=\mathrm{PGSp}_4$. Then $\chi_\mathbf{G}=\omega_{E/F}$ and $H^1(F,\mathbf{G})=\{\mathrm{PGSp}_4,\mathrm{PGU}_2(D) \}.$ More precisely, given a discrete series representation $\tau$ of $\mathrm{PGSp}_4(E)$ with the enhanced L-parameter $(\phi_\tau,\lambdabda)$ (called the Langlands-Vogan parameter), where $\lambdabda$ is a character of the component group $\pi_0(Z(\phi_\tau)),$ set $$F(\phi_{\tau})=\{\tilde{\phi}:WD_F\longrightarrow \mathrm{Sp}_4(\mathbb{C})|~\tilde{\phi}|_{WD_E}=\phi_\tau \} .$$ Up to the twisting by the quadratic character $\omega_{E/F},$ there are several orbits in $F(\phi_\tau),$ denoted by $\sqcup_{i=1}^r\mathcal{O}(\tilde{\phi}_i).$ Each orbit $\mathcal{O}(\tilde{\phi}_i)$ corresponds to a unique subset $\mathcal{C}_i$ of $H^1(W_F,\mathbf{G})$. (See \S \mathrm{Re}f{subsect:prasad} for more details.) \begin{theorem}\label{prasadfordisc} Let the notation be as above. Given a discrete series representation $\tau$ of $\mathrm{PGSp}_4(E),$ we have \begin{equation}\label{equfordist} \dim\mathrm{Hom}_{G_\alpha(F)}(\tau,\omega_{E/F})=\sum_{i=1}^rm(\lambdabda,\tilde{\phi}_i)\mathbf{1}_{\mathcal{C}_i}(G_\alpha)/d_0(\tilde{\phi}_i), \end{equation} where \begin{itemize} \item $\mathbf{1}_{\mathcal{C}_i}$ is the characteristic function of the set $\mathcal{C}_i;$ \item $m(\lambdabda,\tilde{\phi})$ is the multiplicity for the trivial representation contained in the restricted representation $\lambdabda|_{\pi_0(Z(\tilde{\phi}))}$; \item $d_0(\tilde{\phi})=|Coker\{\pi_0(Z(\tilde{\phi}))\rightarrow\pi_0(Z(\phi_\tau))^{\mathrm{Gal}(E/F)} \}|$, where $|-|$ denotes its cardinality. \end{itemize} \end{theorem} \begin{remark} We would like to highlight the fact that Theorem \mathrm{Re}f{prasadfordisc} provides the first example in the Prasad conjecture that the square-integrable representation $\tau$ may be nongeneric and so $\tau$ is not $\mathrm{PGSp}_4(F)$-distinguished (see Theorem \mathrm{Re}f{innerformperiod}) but $\tau$ contains a nonzero period for the pure inner form $\mathrm{PGSp}_{1,1}(F)$. It is different from the case $\mathbf{G}=\mathrm{PGL}_2$ that if a representation $\pi$ of $\mathrm{PGL}_2(E)$ is $\mathrm{PD}^\times(F)$-distinguished, then $\pi$ must be $\mathrm{PGL}_2(F)$-distinguished. (See Lemma \mathrm{Re}f{GL:period}.) \end{remark} \par In fact, we have shown that the equality \eqref{equfordist} holds for almost all generic representations in \S\mathrm{Re}f{secpgsp}, except that the Langlands parameter $\phi_\tau=2\chi_F|_{W_E}\oplus\phi_2$ with $\phi_2$ conjugate-symplectic and $\chi_F^2=\omega_{E/F}.$ However, there is a weaker version of the Prasad conjecture which determines the sum of $\dim\mathrm{Hom}_{G_\alpha(F)}(\tau,\chi_\mathbf{G})$ as $G_\alpha$ runs over all pure inner forms of $\mathbf{G}$ satisfying $G_\alpha(E)=\mathbf{G}(E)$. It involves the degree of the base change map $$\Phi:\mathrm{Hom}(WD_F,\mathrm{Sp}_4(\mathbb{C}))\longrightarrow\mathrm{Hom}(WD_E,\mathrm{Sp}_4(\mathbb{C}))$$ for the exception case, i.e., the following identity \begin{equation}\label{equaforsum} \dim\mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F})+\dim\mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F) }(\tau,\omega_{E/F})=\sum_{\tilde{\phi}\in F(\phi_\tau) }m(\lambdabda,\tilde{\phi})\frac{\deg\Phi(\tilde{\phi})}{d_0(\tilde{\phi})}\end{equation} when the $L$-parcket $\Pi_{\phi_{\tau}}$ is generic, which is the original identity formulated by Dipendra Prasad in \cite{prasad2015arelative}. \par There is a brief introduction to the proof of Theorem \mathrm{Re}f{prasadfordisc}. After introducing the local theta correspondence between quaternionic unitary groups following \cite{yamana2011deg}, we use the Morita equivalence $\mathrm{GU}_2(R)=\mathrm{GSp}_{1,1}(E)\cong\mathrm{GSp}_4(E),$ where $R\cong Mat_{2,2}(E)$ is the split quaternion algebra over $E,$ to embed the group $\mathrm{GSp}_{1,1}(F)$ into $\mathrm{GSp}_4(E).$ Then one can use the see-saw identity to transfer the inner form $\mathrm{GSp}_{1,1}$-period problem to $\mathrm{GO} ^\ast_{3,0}$ or $\mathrm{GO} _{1,1}^\ast$ side, which are closely related to $\mathrm{GL}_n$-period problems. But we need to be very careful when we use the see-saw identity for a pair of quaternionic unitary groups. (See Remark \mathrm{Re}f{counterforseesaw}.) Once the see-saw identity for the quaternionic unitary groups has been set up, the rest of the proof for the inner form case is similar to the case for $\mathrm{GSp}_4$-period. Then we obtain the results for the distinction problems for the automorphic side. For the Galois side, i.e., the right hand side of \eqref{equaforsum}, it will be checked case by case in \S\mathrm{Re}f{secpgsp}. \begin{remark} Rapha\"{e}l Beuzart-Plessis \cite[Theorem 1]{beuzart2017distinguished} used the local trace formula to deal with the distinction problems for the Galois pair $(G'(E),G'(F))$ for the stable square-integrable representations, where $G'$ is an inner form of $\mathbf{G}$ defined over $F$, which generalizes \cite[Theorem C]{prasad1992gl(2)}. \end{remark} The paper is organized as follows. In \S $2$, we set up the notation about the local theta correspondence. In \S\mathrm{Re}f{sect:generic}, we will study the irreducibility for the big theta lift of a generic representation in the almost equal rank case, which generalizes the results of Gan-Ichino \cite[Proposition C.4]{gan2014formal} for the tempered representations. In \S\mathrm{Re}f{sect:GSp(4)}, we will study the distinction problems for $\mathrm{GSp}_4$ over a quadratic extension $E/F.$ The proof of Theorem \mathrm{Re}f{maingsp(4)theorem} will be given in \S\mathrm{Re}f{subsect:proofof1.1}. The analogous results for the inner form $\mathrm{GSp}_{1,1}$ will be given in \S\mathrm{Re}f{sect:GSp(1,1)}. In \S\mathrm{Re}f{subsect:prasad}, we will introduce the Prasad conjecture for a reductive quasi-split group $\mathbf{G}$ defined over $F$. Then we will verify the Prasad conjecture for $\mathrm{GSp}_4$ in \S\mathrm{Re}f{subsect:GSp(4)conj}. Finally, the proof of Theorem \mathrm{Re}f{prasadfordisc} will be given in \S\mathrm{Re}f{secpgsp}. \subsection*{Acknowledgments} This paper contains part of the author's Ph.D. thesis \cite{hengfei2017}. He is grateful to Wee Teck Gan for his guidance and numerous discussions when he was studying in Singapore. He would like to thank Dipendra Prasad for proposing this conjecture and fruitful discussions. He wants to thank Dmitry Gourevitch and Lei Zhang for useful comments as well. \section{The Local Theta Correspondences for Similitudes} In this section, we will briefly recall some results about the local theta correspondence, following \cite{gan2011theta,kudla1996notes,roberts2001global}. \par Let $F$ be a local field of characteristic zero. Consider the dual pair $\mathrm{O}(V)\times \mathrm{Sp}(W).$ For simplicity, we may assume that $\dim V$ is even. Fix a nontrivial additive character $\psi$ of $F.$ Let $\omega_\psi$ be the Weil representation for $\mathrm{O}(V)\times \mathrm{Sp}(W).$ If $\pi$ is an irreducible smooth representation of $\mathrm{O}(V)$ (\mathrm{Re}sp $\mathrm{Sp}(W)$), the maximal $\pi$-isotypic quotient of $\omega_\psi$ has the form \[\pi\boxtimes\Theta_\psi(\pi) \] for some smooth representation $\Theta_\psi(\pi)$ of $\mathrm{Sp}(W)$ (resp. some smooth representation $\Theta_\psi(\pi)$ of $\mathrm{O}(V)$). We call $\Theta_\psi(\pi )$ or $\Theta_{V,W,\psi}(\pi)$ the big theta lift of $\pi.$ It is known that $\Theta_\psi(\pi)$ is of finite length and hence is admissible. Let $\theta_\psi(\pi)$ or $\theta_{V,W,\psi}(\pi)$ be the maximal semisimple quotient of $\Theta_\psi(\pi),$ which is called the small theta lift of $\pi.$ \begin{theorem}[Howe duality conjecture]\cite{gan2014howe,gan2014proof}\label{localhowe} One has \begin{itemize} \item $\theta_\psi(\pi)$ is irreducible whenever $\Theta_\psi(\pi)$ is non-zero. \item the map $\pi\mapsto \theta_\psi(\pi)$ is injective on its domain. \end{itemize} \end{theorem} It has been proved by Waldspurger \cite{waldspurger1990demonstration} when $p\neq2$. \par Then we extend the Weil representation to the case of similitude groups. Let $\lambdabda_V$ and $\lambdabda_W$ be the similitude factors of $\mathrm{GO} (V)$ and $\mathrm{GSp}(W)$ respectively. We shall consider the group \[R=\mathrm{GO} (V)\times \mathrm{GSp}^+(W) \] where $\mathrm{GSp}^+(W)$ is the subgroup of $\mathrm{GSp}(W)$ consisting of elements $g$ such that $\lambdabda_W(g)$ lies in the image of $\lambdabda_V.$ Define \[R_0=\{(h,g)\in R|~\lambdabda_V(h)\lambdabda_W(g) =1\} \] to be the subgroup of $R.$ The Weil representation $\omega_\psi$ extends naturally to the group $R_0$ via \[\omega_\psi(g,h)\phi=|\lambdabda_V(h)|_F^{-\frac{1}{8}\dim V\cdot\dim W } \omega(g_1,1)(\phi\circ h^{-1}) \] where $|-|_F$ is the absolute value on $F$ and \[g_1=g\begin{pmatrix} \lambdabda_W(g)^{-1}&0\\0&1 \end{pmatrix}\in \mathrm{Sp}(W). \] Here the central elements $(t,t^{-1})\in R_0$ acts by the quadratic character $\chi_V(t)^{\frac{\dim W}{2}},$ which is slightly different from the normalization used in \cite{roberts2001global}. \par Now we consider the compactly induced representation \[\Omega=ind_{R_0}^R\omega_\psi. \] As a representation of $R,$ $\Omega$ depends only on the orbit of $\psi$ under the evident action of $Im\lambdabda_V\subset F^\times.$ For example, if $\lambdabda_V$ is surjective, then $\Omega$ is independent of $\psi.$ For any irreducible representation $\pi$ of $\mathrm{GO} (V)$ (\mathrm{Re}sp $\mathrm{GSp}^+(W)$), the maximal $\pi$-isotropic quotient of $\Omega$ has the form \[\pi\otimes\Theta_\psi(\pi), \] where $\Theta_\psi(\pi)$ is some smooth representation of $\mathrm{GSp}^+(W)$ (resp. $\mathrm{GO} (V)$). Similarly, we let $\theta_\psi(\pi)$ be the maximal semisimple quotient of $\Theta_\psi(\pi).$ Note that though $\Theta_\psi(\pi)$ may be reducible, it has a central character $\omega_{\Theta_\psi(\pi)}$ given by \[\omega_{\Theta_\psi(\pi) }=\chi_V^{\frac{\dim W}{2}}\omega_\pi . \] There is an extended Howe conjecture for similitude groups, which says that $\theta_\psi(\pi)$ is irreducible whenever $\Theta_\psi(\pi)$ is non-zero and the map $\pi\mapsto\theta_\psi(\pi)$ is injective on its domain. It was shown by Roberts \cite{roberts1996theta} that this follows from Theorem \mathrm{Re}f{localhowe}. \par Since $\lambdabda_V$ is surjective in this paper, we have $\mathrm{GSp}^+(W)=\mathrm{GSp}(W).$ \begin{proposition}\cite[Proposition 2.3]{gan2011locallanglands} Suppose that $\pi$ is a supercuspidal representation of $\mathrm{GO} (V)$ (resp. $\mathrm{GSp}(W)$). Then $\Theta_\psi(\pi)$ is either zero or is an irreducible representation of $\mathrm{GSp}(W)$ (\mathrm{Re}sp $\mathrm{GO} (V)$). \end{proposition} \subsection{First Occurence Indices for pairs of orthogonal Witt Towers} Let $W_n~ (n\geq1)$ be the $2n$-dimensional symplectic vector space with associated symplectic group $\mathrm{Sp}(W_n)$ and consider the two towers of orthogonal groups attached to the quadratic spaces with trivial discriminant. More precisely, let $\mathbb{H}$ be the split $2$-dimensional quadratic space over $F$ and $D$ be the quaternion division algebra over $F$. Let \[V_{2r}^+= \mathbb{H}^{r}\quad\mbox{and}\quad V_{2r}^-= D(F)\oplus\mathbb{H}^{r} \] and denote the orthogonal groups by $\mathrm{O}(V_{2r}^+)=\mathrm{O}_{r,r}$ and $\mathrm{O}(V_{2r}^-)=\mathrm{O}_{r+4,r}$ respectively. For an irreducible representation $\pi$ of $\mathrm{Sp}(W_n),$ one may consider the theta lifts $\theta_{2r}^+(\pi)$ and $\theta_{2r}^-(\pi)$ to $\mathrm{O}(V^+_{2r})$ and $\mathrm{O}(V_{2r}^-)$ respectively, with respect to a fixed non-trivial additive character $\psi.$ Set \[\begin{cases} r^+(\pi)=\inf\{r:\theta_{2r}^+(\pi)\neq0 \};\\ r^-(\pi)=\inf\{r:\theta_{2r}^-(\pi)\neq0 \}. \end{cases} \] Then Kudla-Rallis \cite{kudla2005first} and Sun-Zhu \cite{sun2012conservation} showed: \begin{theorem} [Conservation Relation] For any irreducible representation $\pi$ of $\mathrm{Sp}(W_n),$ we have \[r^+(\pi)+r^-(\pi)=2n=\dim W_n. \] \end{theorem} On the other hand, one may consider the mirror situation, where one fixes an irreducible representation $\pi$ of $\mathrm{O}(V_{2r})$ and consider its theta lift $\theta_n(\pi)$ to the tower of symplectic groups $\mathrm{Sp}(W_n).$ Then, with $n(\pi)$ defined in the analogous fashion \[n(\pi)=\inf\{n:\theta_n(\pi)\neq0 \}, \] one has \[n(\pi)+n(\pi\otimes\det )=2r=\dim V_{2r}. \] For similitude groups, this implies that \[n(\pi)+n(\pi\otimes\nu)=2r, \] where $\nu$ is the nontrivial character of $\mathrm{GO} (V_{2r})/\mathrm{GSO}(V_{2r}).$ \section{The irreducibility of the big theta lift}\label{sect:generic} Let $\tau$ be an irreducible representation of $\mathrm{Sp}_{2n}(F)$. Thanks to \cite[Proposition C.4]{gan2014formal}, Gan-Ichino show that the big theta lift $\Theta_{2n+2}^+(\tau)$ to $\mathrm{O}_{n+1,n+1}(F)$ (called the almost equal rank case) is irreducible if $\tau$ is tempered. We will use the generalized standard module \cite[Theorem 3.2]{heiermann2016vogan} to study the case when $\Pi_{\phi_{\tau}}$ is generic. (See Theorem \mathrm{Re}f{genericthetalift}.) In \S\mathrm{Re}f{subsect:big}, we mainly study the big theta lift to the split group $\mathrm{O}_{n+1,n+1}(F)$ from a representation $\tau$ of $\mathrm{Sp}_{2n}(F)$ when the associated $L$-packet $\Pi_{\phi_\tau}$ is generic. Then we will focus on the computation for $n=2$. \subsection{Notation} Let us introduce the notation used in this section. \begin{itemize} \item $|-|_F$ (resp. $|-|_E$) is an absolute value defined on $F$ (resp. $E$). \item $P_{\vv{n}}$ (resp. $Q_{\vv{n}}$) is a parabolic subgroup of $\mathrm{Sp}_{2n}$ (resp. $\mathrm{O}_{n+1,n+1}$) defined over $F$. \item $\phi_{\tau}$ is the Langlands parameter or $L$-parameter of $\tau$ and $\phi^\vee_\tau$ is the dual parameter of $\phi_\tau$. \item $\tau^\vee$ is the contragredient representation of $\tau$. \item $\Pi_{\phi_{\tau}}$ is the $L$-packet containing $\tau$. \item $\mathcal{W}_r$ is the symplectic vector space over $E$ of dimension $2r$. \item $Z$ is a line in $\mathcal{W}_2$ and $Y$ is a maximal isotropic subspace in $\mathcal{W}_2$. \item $Q(Z)$ (resp. $P(Y)$) is the Klingen (resp. Siegel) parabolic subgroup of $\mathrm{GSp}_4(E)=\mathrm{GSp}(\mathcal{W}_2)$. \item $B$ (resp. $B_0$) is the Borel subgroup of $\mathrm{GSp}_4(E)$ (resp. $\mathrm{GL}_4(E)$). \item $P$ is the parabolic subgroup of $\mathrm{GL}_4(E)$ with Levi component $\mathrm{GL}_2(E)\times\mathrm{GL}_2(E)$. \item $\Theta^+_{2r}(\tau)$ (resp. $\Theta_{6}(\tau)$) is the big theta lift to $\mathrm{GO} _{r,r}(E)$ (resp. $\mathrm{GSO}_{3,3}(E)$) of $\tau$ of $\mathrm{GSp}_4(E)$. \item $\theta_6^+(\tau)$ (resp. $\theta_6(\tau)$) is the small theta lift to $\mathrm{GO} _{3,3}(E)$ (resp. $\mathrm{GSO}_{3,3}(E)$) of $\tau$ of $\mathrm{GSp}_4(E)$. \end{itemize} \subsection{The standard module conjecture} Let $\mathbf{G}$ be a quasi-split reductive group defined over $F$. Fix a Borel subgroup $\mathbf{B}=\mathbf{T}\mathbf{U}$ of $\mathbf{G}$. Let $\pi$ be an irreducible smooth representation of $\mathbf{G}(F)$. If there exists a nondegenerate character $\psi_U$ of $\mathbf{U}(F)$ such that $\mathrm{Hom}_{\mathbf{U}(F)}(\pi,\psi_U)\neq0$, then we say $\pi$ is $\psi_U$-generic or generic. If the $L$-packet $\Pi_{\phi_{\pi}}$ contains a generic representation, then we call $\Pi_{\phi_{\pi}}$ a generic $L$-packet. Let $\mathbf{P}=\mathbf{M}\mathbf{N}$ be a standard parabolic subgroup of $\mathbf{G}$. Suppose that there exists a generic tempered representation $\rho$ of $\mathbf{M}(F)$ such that $\pi$ is isomorphic to the Langlands quotient $J(\rho,\chi)$, where $\chi$ is a character of $\mathbf{M}(F)$ and lies in the positive Weyl chamber with respect to $\mathbf{P}(F)$. (See \cite[Page 777]{Heiermann2013standard} for more details.) \begin{theorem}[the standard module conjecture]\label{stand} If $\pi=J(\rho,\chi)$ is a generic representation of $\mathbf{G}(F)$, then $\mathrm{Ind}_{\mathbf{P}(F)}^{\mathbf{G}(F)}(\rho\otimes\chi )$(normalized induction) is irreducible. Moreover, for any irreducible representation $\rho'$ of $\mathbf{M}(F)$ lying inside the $L$-packet $\Pi_{\phi_\rho}$, $\mathrm{Ind}_{\mathbf{P}(F)}^{\mathbf{G}(F)}(\rho'\otimes\chi )$ is irreducible. \end{theorem} Heiermann-Mui\'{c} \cite{Heiermann2013standard} proved the standard module conjecture. Later Volker Heiermann proved its generalized version in \cite[Theorem 3.2]{heiermann2016vogan}, i.e., the "moreover" part of Theorem \mathrm{Re}f{stand}. The following subsection will focus on the cases $\mathbf{G}=\mathrm{Sp}_{2n}$ and $\mathbf{G}=\mathrm{O}_{n+1,n+1}$. \subsection{The theta lift from $\mathrm{Sp}_{2n}(F)$ to $\mathrm{O}_{n+1,n+1}(F)$}\label{subsect:big} Suppose that $\tau$ is a generic irreducible admissible representation of $\mathrm{Sp}_{2n}(F)$. Assume that there exists a parabolic subgroup $P_{\vv{n}}=M_{\vv{n}}N_{\vv{n}}$ of $\mathrm{Sp}_{2n}$ and an irreducible representation $\pi_1\otimes\pi_2\otimes\cdots\otimes\pi_r\otimes\tau_0$ of $M_{\vv{n}}(F)\cong \mathrm{GL}_{n_1}(F)\times\cdots\times\mathrm{GL}_{n_r}(F)\times\mathrm{Sp}_{2n_0}(F) $ $(n_1+n_2+\cdots+n_r+n_0=n)$ such that $\tau$ is the unique irreducible quotient of the standard module \begin{equation}\label{standard} \mathrm{Ind}_{P_{\vv{n}}(F)}^{\mathrm{Sp}_{2n}(F)}(\pi_1|-|_F^{s_1}\otimes\cdots\otimes \pi_r|-|_F^{s_r}\otimes\tau_0)(\mbox{ normalized induction}), \end{equation} where $s_1> s_2> \cdots > s_r>0$, $n\geq n_0$, each $\pi_i$ is a tempered representation of $\mathrm{GL}_{n_i}(F)$ and $\tau_0$ is a tempered representation of $\mathrm{Sp}_{2n_0}(F)$. Moreover, the Langlands parameter $\phi_{\tau}:WD_F\longrightarrow\mathrm{SO}_{2n+1}(\mathbb{C})$ is given by \[\phi_{\tau}=\phi_{\pi_1}|-|_F^{s_1}\oplus\cdots\oplus\phi_{\pi_r}|-|_F^{s_r}\oplus\phi_{\tau_0}\oplus\phi_{\pi_r}^\vee|-|_F^{-s_r}\oplus\cdots\oplus \phi_{\pi_1}^\vee|-|_F^{-s_1} \] where each $\phi_{\pi_i}$ is the Langlands parameter of $\pi_i$ and $\phi_{\tau_0}$ is the Langlands parameter of $\tau_0$. Here we identify the characters of $F^\times$ and the characters of the Weil group $W_F$ by the local class field theory. Due to Theorem \mathrm{Re}f{stand}, the generic representation $\tau$ is isomorphic to the standard module, i.e. the standard module is irreducible. Thanks to \cite[Proposition C.4]{gan2014formal}, the small theta lift $\theta_{2n+2}^+(\tau)$ is the unique irreducible quotient of the standard module \begin{equation}\label{st:O} \mathrm{Ind}_{Q_{\vv{n}}(F)}^{\mathrm{O}_{n+1,n+1}(F)}(\pi_1|-|_F^{s_1}\otimes\cdots\otimes\pi_r|-|_F^{s_r}\otimes\Theta^+_{2n_0+2}(\tau_0) ), \end{equation} where $Q_{\vv{n}}(F)$ is the parabolic subgroup of $\mathrm{O}_{n+1,n+1}(F)$ with Levi component $\mathrm{GL}_{n_1}(F)\times\cdots\times\mathrm{GL}_{n_r}(F)\times\mathrm{O}_{n_0+1,n_0+1}(F)$. We will show that \eqref{st:O} equals to $\theta_{2n+2}^+(\tau)$ under certain conditions. \begin{theorem} Let $P_{\vv{n}}$(resp. $Q_{\vv{n}}$) be a parabolic subgroup of $\mathrm{Sp}_{2n}$(resp. $\mathrm{O}_{n+1,n+1}$) defined as above. If $\tau$ is generic and so $\tau$ is isomorphic to the standard module (\mathrm{Re}f{standard}), and the standard $L$-function of $\tau$ is regular at $s=1$, then $\Theta_{2n+2}^+(\tau)$ is irreducible.\label{genericthetalift} \end{theorem} There is another key input in the proof of Theorem \mathrm{Re}f{genericthetalift}. \begin{theorem} Let $\mathbf{G}$ be $\mathrm{Sp}_{2n}$ or $\mathrm{SO}_{n+1,n+1}$. Let $\pi$ be an irreducible representation of $\mathbf{G}(F)$. The $L$-packet $\Pi_{\phi_{\pi}}$ is generic if and only if the adjoint $L$-function $L(s,\phi_{\pi},Ad)$ is regular at $s=1$. \end{theorem} \begin{proof} See \cite[Theorem 1.2]{liu2011sp} and \cite[Theorem 1.5]{liu2014SO(2n)}. \end{proof} \begin{proof}[Proof of Theorem \mathrm{Re}f{genericthetalift}] We will show that $\Theta_{2n+2}^+(\tau)|_{\mathrm{SO}_{n+1,n+1}(F)}$ is irreducible. If $n=n_0$, then it follows from \cite[Proposition C.4]{gan2014formal}. Assume that $s_1>0$. Then there exists a surjection \[\xymatrix{\mathrm{Ind}_{Q_{\vv{n}} (F)} ^{\mathrm{O}_{n+1,n+1}(F)}(\pi_1|-|_F^{s_1}\otimes\cdots\otimes\pi_r|-|_F^{s_r}\otimes\Theta_{2n_0+2}^+(\tau_0) )\ar@{->>}[r]&\Theta^+_{2n+2}(\tau). }\] Due to \cite[Proposition C.4]{gan2014formal}, if $\tau_0$ is tempered, then $\Theta^+_{2n_0+2}(\tau_0)$ is irreducible and generic. Moreover, if $$\phi_{\tau_0}:WD_F\longrightarrow \mathrm{SO}_{2n_0+1}(\mathbb{C}) $$ is the Langlands parameter of $\tau_0$, then $\phi_{\theta_{2n_0+2}^+(\tau_0)}=\phi_{\tau_0}\oplus\mathbb{C}$. Assume that $\phi_{\tau}=\phi_0\oplus \phi_{\tau_0}\oplus\phi_0^\vee$ with $\phi_{\tau_0}$ tempered and $\phi_0=\bigoplus_i \phi_{\pi_i}|-|^{s_i}$. Then $\phi_{\theta_{2n+2}^+(\tau)}=\phi_0\oplus (\phi_{\tau_0}\oplus\mathbb{C})\oplus \phi_0^\vee$ due to \cite[Proposition C.4]{gan2014formal}. Observe that \[L(s,Ad_{\mathrm{SO}_{2n+2}}\circ \phi_{\theta_{2n+2}^+(\tau)})= L(s,Ad_{\mathrm{SO}_{2n+1}}\circ\phi_{\tau})\cdot L(s,\phi_{\tau},Std) \] where $L(s,\phi_{\tau},Std)$ is the standard $L$-function of $\tau$. By \cite[Theorem 1.2]{liu2011sp} and the assumption that $\tau$ is generic, we obtain that $L(s,Ad_{\mathrm{SO}_{2n+1}}\circ\phi_{\tau})$ is regular at $s=1$. So $L(s, Ad_{\mathrm{SO}_{2n+2}}\circ \phi_{\theta_{2n+2}^+(\tau)} )$ is regular at $s=1$. Thanks to \cite[Theorem 1.5]{liu2014SO(2n)}, the $L$-packet $\Pi_{\phi_{ \theta_{2n+2}^+(\tau)}}$ is generic. By the generalization of the standard module conjecture \cite[Theorem 3.2]{heiermann2016vogan} that the standard module with a generic quotient is irreducible, $$\theta_{2n+2}^+(\tau)=\Theta_{2n+2}^+(\tau)=\mathrm{Ind}_{Q_{\vv{n}}(F)}^{\mathrm{O}_{n+1,n+1}(F)}(\pi_1|-|_F^{s_1}\otimes\cdots\otimes\pi_r|-|_F^{s_r}\otimes\Theta_{2n_0+2}^+(\tau_0) )$$ i.e., $\Theta_{2n+2}^+(\tau)$ is irreducible. \end{proof} \begin{remark} Similarly, if $\Sigma$ is a generic representation of $\mathrm{O}_{n,n}(F)$ and $L(s,\Sigma,Std)$ is regular at $s=1$, then the big theta lift $\Theta_{n}(\Sigma)$ to $\mathrm{Sp}_{2n}(F)$ is irreducible. However, if $\tau$ is a generic representation of $\mathrm{Sp}_{2n}(F)$ and $L(s,\tau,Std)$ is regular at $s=1$, the big theta lift to nonsplit group $\mathrm{O}(V_F)$ may be reducible when $V_F$ is a $(2n+2)$-dimensional quadratic space over $F$ with nontrivial discriminant. (See \cite[Proposition 3.8(iii)]{hengfei2016new}.) \end{remark} \begin{remark} In fact, there exists an isomorphism between the characters $\lambdabda_{\theta_{2n+2}^+(\tau)}\cong\lambdabda_{\theta_{2n_0+2}^+(\tau_0)} $, where the latter one is given by Atobe-Gan \cite[Theorem 4.3]{atobe2016local} in term of the character $\lambdabda_{\tau_0}$, conjectured by Prasad in \cite{prasad1993local}. \end{remark} \begin{corollary} Let $\Pi_{\phi_{\tau}}$ be the $L$-packet of $\mathrm{Sp}_{2n}(F)$ containing $\tau$. Suppose that $\Pi_{\phi_{\tau}}$ is generic. If the standard $L$-function $L(s,\phi_\tau,Std)$ is a factor of the adjoint $L$-function $L(s,Ad\circ\phi_{\tau} )$, then the big theta lift $\Theta_{2n+2}^+(\tau)$ to $\mathrm{O}_{n+1,n+1}(F)$ is irreducible for any $\tau\in\Pi_{\phi_{\tau}}$. \end{corollary} For the rest of this section, we will compute the big theta lifts between $\mathrm{GSp}_4(E)$ and $\mathrm{GO} (V)$ explicitly when $\dim_E V=4$ or $6$. \subsection{Representations of $\mathrm{GO} (V)$} Let $\pi_i$ be an irreducible representations of $\mathrm{GL}_2(E)$ with central character $\omega_{\pi_i}$ and $\omega_{\pi_1}=\omega_{\pi_2}$. Then $\pi_1\boxtimes\pi_2$ is an irreducible representation of the similitude group $$\mathrm{GSO}_{2,2}(E)\cong\frac{ \mathrm{GL}_2(E)\times \mathrm{GL}_2(E)}{\{(t,t^{-1}) \}}.$$ If $\pi_1\neq\pi_2,$ then $\Sigma=\mathrm{Ind}_{\mathrm{GSO}_{2,2}(E)}^{\mathrm{GO} _{2,2}(E)}(\pi_1\boxtimes\pi_2)$ is an irreducible smooth representation of $\mathrm{GO} _{2,2}(E)$ and $\Sigma\cong\Sigma\otimes\nu,$ where $\nu|_{\mathrm{O}_{2,2}(E)}=\det.$ If $\pi_1=\pi_2,$ then there are two extensions $(\pi_1\boxtimes\pi_1)^\pm$ and only one of them participates in the theta lift between $\mathrm{GSp}_4(E)$ and $\mathrm{GO} _{2,2}(E),$ denoted by $(\pi_1\boxtimes\pi_1)^+=\Sigma^+.$ Moreover, we have $(\pi_1\boxtimes\pi_1)^+\otimes\nu\cong (\pi_1\boxtimes\pi_1)^-.$ (See \cite[\S6]{gan2011theta}.) Any irreducible representation of $$\mathrm{GSO}_{3,3}(E)=\frac{\mathrm{GL}_4(E)\times\mathrm{GL}_1(E)}{\{(t,t^{-2}):t\in E^\times \} }$$ is of the form \[\Pi\boxtimes \chi \] where $\Pi$ is a representation of $\mathrm{GL}_4(E)$ with central character $\omega_\Pi$, $\chi$ is a character of $E^\times$ and $\chi^2=\omega_\Pi$. \subsection{Representations of $\mathrm{GSp}_4(E)$} Assume that ${\tau}=\theta({\Sigma})$ is a representation of $\mathrm{GSp}_4(E)$ and ${\Sigma}$ is a representation of $\mathrm{GSO}_{2,2}(E)$. Then ${\tau}$ is generic if and only if ${\Sigma}$ is generic. We follow the notation in \cite{gan2011theta} to describe the non-discrete series representations of $\mathrm{GSp}_4(E)$. Thanks to \cite[Proposition 5.3]{gan2011theta}, the non-discrete series representations of $\mathrm{GSp}_4(E)$ fall into the following three families: \begin{itemize} \item ${\tau}\hookrightarrow I_{Q(Z)}(\chi|-|_E^{-s},\pi)$ with $\chi$ a unitary character, $s\geq0$ and $\pi$ a discrete series representation of $\mathrm{GL}_2(E)$ up to twist; \item ${\tau}\hookrightarrow I_{P(Y)}(\pi|-|_E^{-s},\chi)$ with $\chi$ an arbitrary character, $s\geq0$ and $\pi$ a unitary discrete series representation of $\mathrm{GL}(Y)$; \item ${\tau}\hookrightarrow I_{B}(\chi_1|-|_E^{-s_1},\chi_2|-|_E^{-s_2};\chi)$ where $\chi_1,\chi_2$ are unitary and $s_1\geq s_2\geq0$. \end{itemize} Note that if ${\tau}$ itself is generic and non-tempered, then those embeddings are in fact isomorphisms due to the standard module conjecture for $\mathrm{GSp}_4$, except \[\tau\hookrightarrow I_{Q(Z)}(\mathbf{1},\pi). \] For instance, $\tau=J_{P(Y)}(\pi|-|_E^s,\chi)$ with $s\geq0$. If $\tau$ is generic, then $I_{P(Y)}(\pi|-|_E^{s},\chi)$ is irreducible and so $$\tau=I_{P(Y)}(\pi|-|_E^{s},\chi)\cong I_{P(Y)}(\pi^\vee|-|_E^{-s},\chi\omega_{\pi}|-|_E^{2s}) $$ with $s\geq0$. (See \cite[Lemma 5.2]{gan2011theta}.) If the big theta lift $\Theta_6^+({\tau})$ to $\mathrm{GO} _{3,3}(E)$ of $\tau$ is irreducible, then the restricted representation $\Theta_6^+({\tau})|_{\mathrm{GSO}_{3,3}(E)}$ is irreducible due to \cite[\S5, Page 282]{prasad1993local}. We will use $\Theta_6({\tau})$ to denote the big theta lift to $\mathrm{GSO}_{3,3}(E)$ of ${\tau}$. \begin{proposition}\label{big:GSp4GO6} Let ${\tau}$ be a generic representation of $\mathrm{GSp}_4(E)$. Then the big theta lift $\Theta_6({\tau})$ to $\mathrm{GSO}_{3,3}(E)$ of $\tau$ is an irreducible representation unless ${\tau}=I_{Q(Z)}(|-|_E,\pi)$ with $\pi$ essentially square-integrable. If $\tau=I_{Q(Z)}(|-|_E,\pi)$, then $\Theta_6(\tau)=I_P(\pi|-|_E,\pi)\boxtimes\omega_{\pi}|-|_E$ is reducible. \label{bigtheta} \end{proposition} \begin{proof} If ${\tau}$ is a tempered representation, then $\Theta_6^+({\tau})$ is irreducible due to \cite[Proposition C.4]{gan2014formal}(which holds even for $p=2$ since the Howe duality conjecture holds) and so $\Theta_6({\tau})$ is irreducible. Assume that the generic representation ${\tau}$ is not essentially tempered. There are $4$ cases: \begin{itemize} \item If $\tau=I_{B}(\chi_1,\chi_2;\chi)$ is irreducible, then none of the characters $\chi_1,\chi_2,\chi_1/\chi_2,\chi_1\chi_2$ is $|-|_E^{\pm1}$ and so $I_{B_0}(\mathbf{1},\chi_2,\chi_1,\chi_1\chi_2)$ has a generic quotient where $B_0$ is a Borel subgroup of $\mathrm{GL}_{4}(E)$. Thus $\Theta_6(\tau)=I_{B_0}(\mathbf{1},\chi_2,\chi_1,\chi_1\chi_2)\cdot\chi\boxtimes\chi^2\chi_1\chi_2$ is irreducible due to the standard module conjecture for $\mathrm{GL}_4$. \item If $\tau=I_{P(Y)}(\pi,\chi)$, then $\Theta_6(\tau)$ is a quotient of \[I_{Q}(\mathbf{1},\pi,\omega_\pi )\cdot\chi\boxtimes \chi^2\omega_\pi \] where $Q$ is a parabolic subgroup of $\mathrm{GL}_4(E)$ with Levi subgroup $\mathrm{GL}_1(E)\times\mathrm{GL}_2(E)\times\mathrm{GL}_1(E)$. Since the standard $L$-function $L(s,\tau,Std)$ is a factor of $L(s,Ad\circ \phi_{\tau} )$, $L(s,\tau,Std)$ is regular at $s=1$. Then $I_Q(\mathbf{1},\pi,\omega_\pi)$ is irreducible and so $\Theta_6(\tau)=I_{Q}(\mathbf{1},\pi,\omega_\pi )\cdot\chi\boxtimes \chi^2\omega_\pi $ is irreducible. \item If $\tau=I_{Q(Z)}(\chi,\pi)$ with $\chi\neq\mathbf{1}$, then there is an epimorphism \[\xymatrix{ I_{P}(\pi\cdot\chi,\pi)\boxtimes\omega_\pi\chi \ar@{->>}[r]& \Theta_6(\tau)} \] of $\mathrm{GSO}_{3,3}(E)$-representations, where $P$ is a parabolic subgroup of $\mathrm{GL}_4(E)$ with Levi subgroup $\mathrm{GL}_2(E)\times\mathrm{GL}_2(E)$. Gan-Takeda \cite[Proposition 13.2]{gan2011theta} have proved that $I_P(\pi\cdot\chi,\pi)$ is irreducible if $I_{Q(Z)}(\chi,\pi)$ is irreducible and $\chi\neq|-|_E$. If $\chi=|-|_E$ and $\pi$ is essentially square-integrable, then $\Theta_6(\tau)=I_P(\pi\cdot\chi,\pi)\boxtimes\omega_{\pi}\chi$ and $\theta_6(\tau)=J_P(\pi\cdot\chi,\pi)\boxtimes\omega_{\pi}\chi$ is the Langlands quotient. \item If $\tau\hookrightarrow I_{Q(Z)}(\mathbf{1},\pi)$, then $\Theta_6(\tau)$ is either zero or $I_P(\pi,\pi)\boxtimes\omega_{\pi}$, where $P$ is a parabolic subgroup of $\mathrm{GL}_4(E)$ with Levi subgroup $\mathrm{GL}_2(E)\times\mathrm{GL}_2(E)$. In fact, $\Theta_6(\tau)=0$ only when $\tau$ is a nongeneric constituent representation of $I_{Q(Z)}(\mathbf{1},\pi)$. \end{itemize} This finishes the proof of Proposition \mathrm{Re}f{bigtheta}. \end{proof} \begin{remark} Similarly one can prove that if $\Sigma$ is a generic representation of $\mathrm{GSO}_{2,2}(E)$ and $L(s,\Sigma,Std)$ is regular at $s=1$, then the big theta lift $\Theta_2(\Sigma)$ to $\mathrm{GSp}_4(E)$ is an irreducible representation. \end{remark} Let us turn the table around. The rest in this subsection focuses on the computation of local theta lifts to $\mathrm{GO} _{2,2}(E)$ from $\mathrm{GSp}_4(E)$. \begin{proposition} Let $\tau$ be a generic representation of $\mathrm{GSp}_4(E)$. Assume that $\theta_4^+(\tau)\neq0$. \begin{enumerate}[(i)] \item If $\tau=I_{Q(Z)}(\mathbf{1},\pi(\mu_1,\mu_2))$, then the big theta lift $\Theta_4^+(\tau)$ to $\mathrm{GO} _{2,2}(E)$ of $\tau$ is $\mathrm{Ext}^1_{\mathrm{GO} _{2,2}(E)}(\Sigma^+,\Sigma^-)$, where $\Sigma^\pm$ are two distinct extensions of $\pi(\mu_1,\mu_2)\boxtimes\pi(\mu_1,\mu_2)$ from $\mathrm{GSO}_{2,2}(E)$ to $\mathrm{GO} _{2,2}(E)$; \item If $\tau\neq I_{Q(Z)}(\mathbf{1},\pi(\mu_1,\mu_2))$, then $\Theta_4^+(\tau)$ is an irreducible representation of $\mathrm{GO} _{2,2}(E)$. \end{enumerate}\label{GSp:GO(4)} \end{proposition} \begin{proof} \begin{enumerate}[(i)] \item If $\tau=I_{Q(Z)}(\mathbf{1},\pi(\mu_1,\mu_2))$, then the small theta lift $\theta_4^{+}(\tau)=\Sigma^+$ by the Howe duality, where $\Sigma^+$ is the extension to $\mathrm{GO} _{2,2}(E)$ of $\pi(\mu_1,\mu_2)\boxtimes\pi(\mu_1,\mu_2)$. Let $\psi_U$ be a non-degenerate character of the standard unipotent subgroup $U$ of $\mathrm{GO} _{2,2}(E)$. Then \begin{equation}\label{whittakerdimension} \dim\mathrm{Hom}_U(\Theta_4^+(\tau),\psi_U)=\dim\mathrm{Hom}_{H(\mathcal{W}_1)\rtimes \mathrm{Sp}(\mathcal{W}_1)}(\tau,\omega_\psi)=2 \end{equation} where $\mathcal{W}_2=Z\oplus \mathcal{W}_1\oplus Z^\ast$, $H(\mathcal{W}_1)$ is the Heisenberg group of $\mathcal{W}_1$ equipped with the Weil representation $\omega_\psi$ and $\tau$ is the representation of $\mathrm{GSp}(\mathcal{W}_2)$. Thus the big theta lift $\Theta_4^+(\tau)$ to $\mathrm{GO} _{2,2}(E)$ is reducible. There is a short exact sequence of $\mathrm{GO} _{2,2}(E)$-representations \begin{equation}\label{bigthetaexact} \xymatrix{\Sigma^-\oplus\Sigma^+\ar[r]&\Theta_4^{+}(\tau)\ar[r]&\Sigma^+\ar[r]&0.} \end{equation} However, we can not determine $\Theta_4^+(\tau)$ at this moment. Note that \[\dim\mathrm{Ext}^1_{\mathrm{GSO}_{2,2}(E)}(\pi(\mu_1,\mu_2)\boxtimes\pi(\mu_1,\mu_2),\pi(\mu_1,\mu_2)\boxtimes\pi(\mu_1,\mu_2))=1\] due to \cite[Theorem 1]{dipendra2012extensions}. Here $\mathrm{Ext}^1$ is the extension functor defined on the category of all smooth representations with a fixed central character. Then $\dim\mathrm{Ext}^1_{\mathrm{GO} _{2,2}(E)}(\Sigma^+,\Sigma^-\oplus\Sigma^+)=1 $ by Frobenius Reciprocity, which implies that either $\mathrm{Ext}^1_{\mathrm{GO} _{2,2}(E)}(\Sigma^+,\Sigma^-)$ or $\mathrm{Ext}^1_{\mathrm{GO} _{2,2}(E)}(\Sigma^+,\Sigma^+)$ is zero. Assume that $B$ is the Borel subgroup of $\mathrm{GSO}_{2,2}(E)$. Set $\tilde{B}=B\rtimes\mu_2$ to be a subgroup of $\mathrm{GO} _{2,2}(E)$ and $\tilde{B}\cap\mathrm{GSO}_{2,2}(E)=B$. Since $$\pi(\mu_1,\mu_2)\boxtimes\pi(\mu_1,\mu_2)=\mathrm{Ind}_B^{\mathrm{GSO}_{2,2}(E)}\chi~~(\mbox{normalized induction}), $$ there are two extensions $\chi^\pm$ to $\tilde{B}$ of $\chi$ of $B$. We may assume without loss of generality that $\Sigma^+=\mathrm{Ind}_{\tilde{B}}^{\mathrm{GO} _{2,2}(E)}\chi^+$ and $\Sigma^-=\mathrm{Ind}_{\tilde{B}}^{\mathrm{GO} _{2,2}(E)}\chi^-$. Note that $\mathrm{Ext}^1_{\tilde{B}}(\chi^+,\chi^-)\neq0$. Then there is a short exact sequence of $\mathrm{GO} _{2,2}(E)$-representations \[\xymatrix{0\ar[r]&\Sigma^-\ar[r]&\mathrm{Ind}_{\tilde{B}}^{\mathrm{GO} _{2,2}(E)}(\mathrm{Ext}^1_{\tilde{B}}(\chi^+,\chi^-))\ar[r]&\Sigma^+\ar[r]&0, } \] which is not split. Hence $\mathrm{Ext}^1_{\mathrm{GO} _{2,2}(E)}(\Sigma^+,\Sigma^-)\neq0$. Together with \eqref{whittakerdimension} and \eqref{bigthetaexact}, one can obtain the desired equality $\Theta_4^+(\tau)=\mathrm{Ext}^1_{\mathrm{GO} (2,2)(E)}(\Sigma^+,\Sigma^-)$. \item If $\tau$ is a (essentially) discrete series representation, then it follows from \cite[Proposition 5.4]{atobe2016local}. \begin{itemize} \item If $\tau=I_{Q(Z)}(\mu_0,\pi(\mu_1,\mu_2))$ with $\mu_0\neq\mathbf{1}$, then there is only one orbit in the double coset $Q(Z)\backslash \mathrm{GSp}_4(E)/ H(\mathcal{W}_1)\rtimes\mathrm{Sp}(\mathcal{W}_1)$ that contributes to the multiplicity \[\dim\mathrm{Hom}_{H(W_1)\rtimes\mathrm{Sp}(\mathcal{W}_1)}(\tau,\omega_\psi) \] and so $\Theta_4^+(\tau)$ is irreducible. \item If $\tau\subset I_{Q(Z)}(\mathbf{1},\pi)$ with $\pi$ square-integrable, then $\tau$ is tempered. Due to \cite[Proposition 5.5]{atobe2016local}, $\Theta_4^+(\tau)$ is tempered. Note that $\theta_4^+(\tau)$ is a discrete series representation which is projective in the category of the tempered representations. Thus $\Theta_4^+(\tau)=\theta_4^+(\tau)$ is irreducible. Otherwise, it will contradict the Howe duality conjecture (see Theorem 2.1). \item If $\tau=I_{P(Y)}(\pi,\chi)$, then $\dim\mathrm{Hom}_U(\Theta_4^+(\tau),\psi_U) =1$ and so $\Theta^+_4(\tau)$ is irreducible. \end{itemize} \end{enumerate} This finishes the proof of Proposition \mathrm{Re}f{GSp:GO(4)}. \end{proof} \section{The $\mathrm{GSp}_4(F)$-distinguished representations}\label{sect:GSp(4)} \subsection{Notation} \begin{itemize} \item $\mathbb{C}$ or $\mathbf{1}$ is the trivial representation. \item $\mathbb{H}$ (resp. $\mathbb{H}_E$) is the split $2$-dimensional quadratic space over $F$ (resp. $E$). \item $(-,-)_E$ is the Hilbert symbol on $E^\times\times E^\times$. \item $\mathbb{R}es_{E/F}V$ is a quadratic space over $F$ while $V$ is a quadratic space over $E$. \item $\mathrm{GSp}(W_n)=\mathrm{GSp}_{2n}(F)$ is the symplectic similitude group. \item $\mathrm{GU}_2(D)=\mathrm{GSp}_{1,1}$ is the unique inner form of $\mathrm{GSp}_4$. \item $\lambdabda_W$ (resp. $\lambdabda_V$) is the similitude character of $\mathrm{GSp}_{4}(E)$ (resp. $\mathrm{GO} (V)$). \item $\mathrm{GSp}_4(E)^\natural=\{g\in\mathrm{GSp}_4(E)|\lambdabda_W(g)\in F^\times \}$ is the subgroup of $\mathrm{GSp}_4(E)$ and similarly for $\mathrm{GO} _{2,2}(E)^\natural$. \item $P'$ (resp. $P^\natural$) is a parabolic (resp. Siegel parabolic) subgroup of $\mathrm{GSp}_4(E)^\natural$ and $Q^\natural$ is the Siegel parabolic subgroup of $\mathrm{GO} _{2,2}(E)^\natural$. And $R_{\bar{P'}}$ (resp. $R_{\bar{P}^\natural}$) is the Jacquet functor with respect to the parabolic subgroup opposite to $P'$ (resp. $P^\natural$). \item $R_r(\mathbf{1})$ is the big theta lift to $\mathrm{GO} _{4,4}(F)$ of the trivial representation of $\mathrm{GSp}(W_r)$. \item $R^{m,n}(\mathbf{1})$ is the big theta lift to $\mathrm{GSp}_{8}(F)$ of the trivial representation of $\mathrm{GO} _{m,n}(F)$. \item $\Sigma$ is a generic representation of $\mathrm{GO} (V)$. \item $Q_r$ is the Siegel parabolic subgroup of $H_r=\mathrm{GO} _{r,r}(F)$. \item $I_{Q_r}^{H_r}(s)$ is the degenerate Siegel principal series of $H_r$. \item $X_4=Q_4\backslash H_4$ is the projective variety. \item $\mathcal{I}(s)$ is the degenerate Siegel principal series of $\mathrm{GSp}_{8}(F)$. \item $Mat_{m,n}(F)$ is the matrix space over $F$ consisting of all $m\times n$ matrices. \end{itemize} \subsection{See-saw identity for orthogonal-symplectic dual pairs} Following the notation in \cite{prasad1996some}, for a quadratic space $(V,q)$ of even dimension over $E,$ let $\mathbb{R}es_{E/F}V$ be the same space $V$ but now thought of as a vector space over $F$ with a quadratic form $$q_F(v)=\frac{1}{2}tr_{E/F}q(v).$$ If $W_0$ is a symplectic vector space over $F,$ then $W_0\otimes_F E$ is a symplectic vector space over $E.$ Then we have the following isomorphism of symplectic spaces over $F$ \[\mathbb{R}es_{E/F}[(W_0\otimes_FE)\otimes_EV]\cong W_0\otimes_F \mathbb{R}es_{E/F}V=:\mathbf{W}. \] There is a pair \[\Big(\mathrm{GSp}(W_0),\mathrm{GO} (\mathbb{R}es_{E/F}V)\Big)\mbox{ and }\Big(\mathrm{GSp}(W_0\otimes_F E),\mathrm{GO} (V)\Big) \] of similitude dual reductive pairs in the symplectic similitude group $\mathrm{GSp}(\mathbf{W}).$ A pair $(G,H)$ and $(G',H')$ of dual reductive pairs in a symplectic similitude group is called a see-saw pair if $H\subset G'$ and $H'\subset G.$ The following lemma is quite useful in this section. See \cite[Lemma p. 6]{prasad1996some}. \begin{lem}\label{localseesaw} For a see-saw pair of dual reductive pairs $(G,H)$ and $(G', H')$, let $\pi$ be an irreducible representation of $H$ and $\pi'$ of $H'$. Then we have the following isomorphism: \[\mathrm{Hom}_H(\Theta_\psi(\pi'),\pi)\cong \mathrm{Hom}_{H'}(\Theta_\psi(\pi),\pi'). \] \end{lem} Let $\mathrm{GSp}(W_0\otimes_F E)^\natural$ to be the subgroup of $\mathrm{GSp}(W_0\otimes_F E)$ where the similitude factor takes values in $F^\times$. Similarly we define $$\mathrm{GO} (V)^\natural=\{h\in \mathrm{GO} (V)|\lambdabda_V(h)\in F^\times \} .$$ Then we have a see-saw diagram \[\xymatrix{\mathrm{GSp}(W_0\otimes_F E)^\natural\ar@{-}[rrd]&& \mathrm{GO} (\mathbb{R}es_{E/F}V)\ar@{-}[lld]\\ \mathrm{GSp}(W_0)\ar@{-}[u]&& \mathrm{GO} (V)^\natural.\ar@{-}[u] } \] Replace $W_0$ by a $4$-dimensional symplectic space $W_2$ over $F$ with a symplectic similitude group $\mathrm{GSp}_4(F)$. Then there is a see-saw pair \[\Big(\mathrm{GSp}_4( E)^\natural,\mathrm{GO} (V)^\natural \Big)\mbox{ and }\Big(\mathrm{GSp}_4(F),\mathrm{GO} (\mathbb{R}es_{E/F}V) \Big) \] in the similitude symplectic group $\mathrm{GSp}(\mathbf{W})$ where $\mathbf{W}=\mathbb{R}es_{E/F}((W_2\otimes_F E)\otimes_E V)$ and \[\mathrm{GSp}_4(E)^\natural=\{g\in\mathrm{GSp}_4(E)|\lambdabda_W(g)\in F^\times \}. \] In order to use Lemma \mathrm{Re}f{localseesaw}, we need to figure out the discriminant and Hasse invariant of the quadratic space $\mathbb{R}es_{E/F}V$ over $F.$ Assume that $E=F(\sqrt{d})$ is a quadratic field extension of $F$, where $d\in F^\times\setminus {F^\times}^2.$ Let $D_E=(\frac{a,b}{E})$ be the nonsplit quaternion algebra with involution $\ast$ defined over $E$ with a norm map $N_{D_E},$ which is a $4$-dimensional quadratic space $V$ over $E$. Then there is an isomorphism for the vector space $\mbox{Res}_{E/F}V,$ $$\mbox{Res}_{E/F}D_E\cong\mbox{Span}_F\{1,\sqrt{d},i,\sqrt{d}i,j,\sqrt{d}j,ij,\sqrt{d}ij \}$$ as $F$-vector spaces, where $i^2=a,j^2=b,ij=-ji.$ Given a vector $v\in V,$ set $$q_F(v)=\frac{1}{2}tr_{E/F}\circ N_{D_E}(v)\mbox{ and } (v_i,v_j)=q(v_i+v_j)-q(v_i)-q(v_j).$$ \begin{lem}\label{quadraticrestriction} The quadratic space $\mathbb{R}es_{E/F}D_E$ with quadratic form $\frac{1}{2}tr_{E/F}\circ N_{D_E}$ over $F$ has dimension $8,$ discriminant $1$ and Hasse-invariant $-1.$ \end{lem} \begin{proof} The nonsplit quaternion algebra over a nonarchimedean local field is unique. We may assume that $$i^2=a\in F^\times$$ and $j^2=b=b_1+b_2\sqrt{d},N_{E/F}(b)=b_1^2-b_2^2d,~b_i\in F.$ For an element $v=x_1+x_2i+x_3j+x_4ij$ in $D_E$ with $x_i\in E$, we have $$\frac{1}{2}(v,v)=N_{D_E}(v)=vv^\ast=x_1^2-ax_2^2-bx_3^2+abx_4^2$$ and the corresponding matrix for the quadratic space $(\mbox{Res}_{E/F}D_E,q_F)$ is \[\begin{pmatrix} {2}&0&0&0&0&0&0&0\\ 0&{2d}&0&0&0&0&0&0\\ 0&0&-{2a}&0&0&0&0&0\\ 0&0&0&-{2ad}&0&0&0&0\\ 0&0&0&0&-2{b_1}&-2b_2d&0&0\\ 0&0&0&0&-2b_2d&-2{b_1d}&0&0\\ 0&0&0&0&0&0&2ab_1&2dab_2\\ 0&0&0&0&0&0&2dab_2&2{dab_1} \end{pmatrix}. \] The discriminant of $\mathbb{R}es_{E/F} D_E$ is trivial in $F^\times/{F^\times}^2$. If $b_1=0,$ then the Hasse-invariant is $$(-d,~a)=-1$$ since $(b_2\sqrt{d},~ a)_E=-1$, where $(-,-)$ (resp. $(-,-)_E$) is the Hilbert symbol defined on $F^\times\times F^\times$ (resp. $E^\times\times E^\times$). If $b_1\neq0,$ then the Hasse-invariant is \[(d,d)(-a,-ad)(-b_1,\frac{N_{E/F}(b)d}{-b_1})(N_{E/F}(b)d,-1)(ab_1,\frac{N_{E/F}(b)d}{ab_1})=(a,N_{E/F}(b))=(a,b)_E=-1, \] because $(a,b)_E=(a,N_{E/F}(b))$ for all $a\in F^\times$ and $b\in E^\times.$ \end{proof} Now let $V$ be the split $2n$-dimensional quadratic space $\mathbb{H}_E^n$ over $E$. There is a basis $\{e_i,e_j' \}_{1\leq i,j\leq n}$ for the quadratic space $V$ satisfying $<e_i,e_j'>=\delta_{ij}$ and the other inner products are zero. Then we fix the basis \[\{e_i,\sqrt{d}e_i,e_j',e_j'/\sqrt{d} \}_{1\leq i,j\leq n} \] for $\mathbb{R}es_{E/F}V.$ It is straightforward to check that the vector space $\mathbb{R}es_{E/F}V$ is isomorphic to the split $4n$-dimensional quadratic space $\mathbb{H}^{2n}$ over $F.$ \subsection{The Structure of Degenerate Principal Series} In this subsection, we follow the notation in \cite{gan2011endoscopy,kudla1996notes}. Let $H_n=\mathrm{GO} (\mathbb{H}^n)$ be the orthogonal similitude group. Define the quadratic character $\nu$ to be \[\nu(h)=\det(h)\cdot\lambdabda_V^{-n}(h)\mbox{ for }h\in \mathrm{GO} (\mathbb{H}^n ) \] so that $\nu|_{\mathrm{O}(\mathbb{H}^n )}=\det.$ Define \[\mathrm{GSO}(\mathbb{H}^n )=\ker\nu=\{h\in \mathrm{GO} (\mathbb{H}^n )|\lambdabda(h)^n=\det(h) \}. \] Assume that $Q_n$ is the standard Siegel parabolic subgroup of $H_n,$ i.e., \[Q_n= \Bigg\{\begin{pmatrix} A^{-1}\\&\lambdabda A^t \end{pmatrix}\begin{pmatrix} I&X\\&I \end{pmatrix}\big|A\in \mathrm{GL}_n(F), X\in Mat_{n,n}(F) \mbox{ and }X+X^t=0 \Bigg\} \] with modular character $|\det A|_F^{1-n}|\lambdabda|_F^{-n(n-1)/2}$. Then $Q_n\backslash H_n$ is a projective variety and a homogenous space equipped with $H_n$-action. Each point on $Q_n\backslash H_n$ corresponds to an isotropic subspace in $\mathbb{H}^n$ of dimension $n.$ Set the degenerate normalized induced representation $I_{Q_n}^{H_n}(s)$ as follows \[I_{Q_n}^{H_n}(s)=\{f:H_n\rightarrow\mathbb{C}|f(xg)=\delta_{Q_n}(x)^{\frac{1}{2}+\frac{s}{n-1}}f(g)\mbox{ for }x\in Q_n, g\in H_n \}. \] Let $W_{r}$ be the symplectic space with a symplectic similitude group $\mathrm{GSp}(W_{r}).$ Set $\mathbf{1}_W$ to be the trivial representation of $\mathrm{GSp}(W_{r}).$ Then the big theta lift $\Theta_r(\mathbf{1}_W)$ to $H_n$ of the trivial representation $\mathbf{1}_W$ is isomorphic to a subrepresentation of $I_{Q_n}^{H_n}(s_0)$ where $$s_0=r-\frac{n-1}{2}.$$ The image of $\Theta_r(\mathbf{1}_W)$ in $I_{Q_n}^{H_n}(s_0)$ is denoted by $R_r(\mathbf{1})$, i.e., \[\Theta_r(\mathbf{1}_W)=R_{r}(\mathbf{1})\subset I_{Q_n}^{H_n}(s_0). \] Let us come back to the $\mathrm{GSp}_4$-cases. Assume that $r=2$ and $n=4$. \begin{prop}\label{degenerateseries} There is an exact sequence of $H_4$-modules \[\xymatrix{0\ar[r]&R_2(\mathbf{1})\ar[r]& I_{Q_4}^{H_4}(1/2)\ar[r]&R_1(\mathbf{1})\otimes\nu\ar[r]&0 }. \] \end{prop} \begin{proof} Note that $R_2(\mathbf{1})|_{\mathrm{O}_{4,4}(F)}$ is isomorphic to the big theta lift of the trivial representation $\mathbf{1}_W$ from $\mathrm{Sp}_4(F)$ to $\mathrm{O}_{4,4}(F),$ similar for the big theta lift $R_1(\mathbf{1}).$ Mackey theory implies that there is only one orbit for the double coset \[Q_4\backslash H_4/\mathrm{O}_{4,4}(F)=(Q_4\cap \mathrm{O}_{4,4}(F))\backslash \mathrm{O}_{4,4}(F)/\mathrm{O}_{4,4}(F) \] which implies $I_{Q_4}^{H_4}(1/2)|_{\mathrm{O}_{4,4}(F)}\cong I_{Q_4\cap \mathrm{O}_{4,4}(F)}^{\mathrm{O}_{4,4}(F)}(1/2)$. Then the sequence is still the same when restricted to the orthogonal group $\mathrm{O}_{4,4}(F).$ The sequence is exact when restricted to the orthogonal group $\mathrm{O}_{4,4}(F)$ due to the structure of degenerate principal series (see \cite[Proposition 7.2]{gan2014formal}). By the construction of the extended Weil representation, the sequence is exact as $H$-modules. \end{proof} Similarly, let $P_4=M_4N_4$ be the Siegel parabolic subgroup of $\mathrm{GSp}(W_4)=\mathrm{GSp}_8(F)$ where $M_4\cong\mathrm{GL}_1(F)\times\mathrm{GL}_4(F)$. Let $\mathcal{I}(s)$ be the degenerate normalized induced representation of $\mathrm{GSp}_8(F)$ associated to $P_4$, i.e., \[\mathcal{I}(s)=\{f:\mathrm{GSp}_{8}(F)\rightarrow\mathbb{C}|f(pg)=\delta_{P_4}(p)^{\frac{1}{2}+\frac{s}{5}}f(g)\mbox{ for }p\in P_4,g\in \mathrm{GSp}_{8}(F) \}. \] Then we have \begin{prop} There is an exact sequence of $\mathrm{GSp}_{8}(F)$-modules \[\xymatrix{0\ar[r]& R^{3,3}(\mathbf{1})\ar[r]&\mathcal{I}(1/2)\ar[r]&R^{4,0}(\mathbf{1})\ar[r]&0 }, \] where $\mathcal{I}(s)$ is the degenerate normalized induced representation of $\mathrm{GSp}_8(F)$ and $R^{3,3}(\mathbf{1})$ (resp. $R^{4,0}(\mathbf{1})$) is the big theta lift to $\mathrm{GSp}_{8}(F)$ of the trivial representation of $\mathrm{GO} _{3,3}(F)$ (resp. $\mathrm{GO} _{4,0}(F)$). \end{prop} Now we use Mackey theory to study $I_{Q_4}^{H_4}(1/2)|_{\mathrm{GO} _{2,2}(E)^\natural}$ which involves the computation for the double coset $Q_4\backslash H_4/\mathrm{GO} _{2,2}(E)^\natural$. Denote $X_4=Q_4\backslash H_4$ as the projective variety. \subsubsection{Double cosets} Now let us consider the double coset \[Q_4\backslash H_4/\mathrm{GO} _{2,2}(E)^\natural. \] Assume that $V=\mathbb{H}_E^2$ with basis $\{e_i,e_j' \}_{1\leq i,j\leq 2}$ and $<e_i,e_j'>=\delta_{ij}.$ Fix the basis $$\{e_1,\sqrt{d}e_1,e_2,\sqrt{d}e_2,e_1',e_1'/\sqrt{d},e_2',e_2'/\sqrt{d} \}$$ for $V_F=\mathbb{R}es_{E/F}V$. The inner product $\langle\langle-,-\rangle\rangle$ on $V_F$ is given by \[\langle\langle x,y\rangle\rangle:=\frac{1}{2}tr_{E/F}(<x,y>)\] for $x,y\in V$. Let us fix an embedding $i:\mathrm{GO} _{2,2}(E)^\natural\rightarrow \mathrm{GSO}_{4,4}(F)$. The double coset decomposition for the case at hand can be obtained from more general case. Assume that $\mathbf{V}$ is a symplectic space or a split quadratic space over $E$ of dimension $2n,$ with a non-degenerate bilinear form $B:\mathbf{V}\times \mathbf{V}\rightarrow E .$ Let $U(\mathbf{V})$ be the isometry group, i.e., \[U(\mathbf{V})=\{g\in \mathrm{GL}(\mathbf{V})| B(gx,gy)=B(x,y)\mbox{ for all } x,y\in \mathbf{V} \} \] which is a symplectic group or an orthogonal group. Then $\mathbb{R}es_{E/F}\mathbf{V}$ is a vector space over $F$ of dimension $4n$ with a non-degenerate bilinear form $\frac{1}{2}tr_{E/F}\circ B.$ \begin{lem}\label{orbitdecomp} Let $P$ be a Siegel parabolic subgroup of $U(\mathbb{R}es_{E/F}\mathbf{V})$. Then each point in the homogeneous space $X=P\backslash U(\mathbb{R}es_{E/F}\mathbf{V})$ corresponds to a $2n$-dimensional maximal isotropic subspace in $\mathbb{R}es_{E/F}\mathbf{V}$ and the finite double cosets $X/U(\mathbf{V})$ can be parametrized by a pair $$(\dim_E E\cdot L,B_L )$$ where $L\subset \mathbb{R}es_{E/F}\mathbf{V}$ is a maximal isotropic subspace with respect to the inner product $\langle\langle-,-\rangle\rangle$ over $F$, $$E\cdot L:=\{e\cdot x|e\in E,x\in L \}$$ is a linear $E$-subspace in $\mathbf{V}$ and $$B_L:L/L_0\times L/L_{0}\rightarrow \sqrt{d}\cdot F$$ is a non-degenerate bilinear form inherited from $V$, where $$L_0=\{x\in L: B(x,y)=0 \mbox{ for all } y\in L \}.$$ Moreover, if $ L=L_0,$ then $L$ lies in the closed orbit. If $L_0=0,$ then $L$ lies in the open orbit. \end{lem} \begin{proof} Under a suitable basis for $L,$ the bilinear form for $B|_{ L}$ corresponds to a matrix $\sqrt{d}\cdot T$, where $T\in M_{2n}(F).$ Moreover, we can choose $T$ such that it is a diagonal (resp. an anti-diagonal) matrix if $B(x,y)=B(y,x)$ (resp. $B(y,x)=-B(x,y)$). Then $$\dim_E E\cdot L=n+\frac{1}{2}\cdot rank(T)$$ which is invariant under $U(\mathbf{V})$-action. The bilinear form $B_L$ corresponds to a matrix $\sqrt{d}\cdot T',$ i.e., $$T= \begin{pmatrix} 0&0&0\\0& T'&0\\0&0&0 \end{pmatrix}$$ where $T'$ is invertible and $rank (T)=rank (T')$. Assume that there are two isotropic subspaces $L_1$ and $L_2$ satisfying \[\dim_E E\cdot L_1=\dim_E E\cdot L_2=l\mbox{ and }B_{L_1}\cong B_{L_2}. \] This means that there exists $g\in \mathrm{GL}_l(E)$ such that $g:E\cdot L_1\rightarrow E\cdot L_2$ satisfying $$B_{L_1}(x,y)=B_{L_2}(gx,gy).$$ It is easy to lift $g$ to $g_E\in U(\mathbf{V})$ such that $g_EL_1=L_2.$ In fact, $g=\begin{pmatrix} g_1&0\\0&g_2 \end{pmatrix}$ lies in a subgroup of $\mathrm{GL}_l(E),$ which can be regarded as a Levi subgroup of $U(\mathbf{V}),$ and \[B_L(gx,gy)=B_L(g_2x',g_2y')\] when $x-x',y-y'\in L_0.$ Then $g_E=\begin{pmatrix} g_1\\&g_2\\&& g_1^\ast \end{pmatrix}\in U(\mathbf{V}),$ where $g_1^\ast$ depends on $g_1$ and $\mathbf{V}$. \end{proof} \begin{rem} In fact, there is only one closed orbit in the double coset $P\backslash U(\mathbb{R}es_{E/F}\mathbf{V} )/U(\mathbf{V}).$ \end{rem} Consider the double coset $$Q_4\backslash H_4/\mathrm{GO} _{2,2}(E)^\natural.$$ There are several $\mathrm{GO} _{2,2}(E)^\natural$-orbits in $Q_4\backslash H_4/\mathrm{GO} _{2,2}(E)^\natural$. By Lemma \mathrm{Re}f{orbitdecomp}, there are two invariants for the orbit $\mathrm{GO} _{2,2}(E)^\natural\cdot L:$ \begin{itemize} \item the dimension $\dim_E(E\cdot L)$ and \item the quadratic form $q_E|_{L}$ up to scaling in $F^\times.$ \end{itemize} By the classification of $4$-dimensional quadratic spaces over $F,$ there are $4$ elements lying in the kernel \[\ker \{H^1(F,\mathrm{O}_4)\rightarrow H^1(E,\mathrm{O}_4)\}, \] which are \begin{itemize} \item the split quaternion algebra $Mat_{2,2}(F)$ with $q(v)=\det(v)$ for $v\in Mat_{2,2}(F);$ \item the quaternion division algebra $D(F)$ with the norm map $N_{D/F};$ \item the non-split $4$-dimensional quadratic space $V_3=E\oplus\mathbb{H}$ with $q(e,x,y)=N_{E/F}(e)-xy$ and \item $V_4=\varepsilonsilon V_3$ with $\varepsilonsilon\in F^\times\setminus N_{E/F}(E^\times).$ \end{itemize} However, we consider the double coset \[Q_4\backslash H_4/\mathrm{GO} _{2,2}(E)^\natural\] for the similitude groups and we observe that $V_3$ and $V_4$ are in the same $\mathrm{GO} _{2,2}(E)^\natural$-orbit in $Q_4\backslash H_4/\mathrm{GO} _{2,2}(E)^\natural$. \begin{prop} Pick a point $L\in X_4/\mathrm{GO} _{2,2}(E)^\natural $ lying in an open orbit. Then the stabilizer of $L$ in $\mathrm{GO} _{2,2}(E)^\natural$ is isomorphic to the similitude group $\mathrm{GO} (L).$ \end{prop} \begin{proof} For $g\in \mathrm{GO} _{2,2}(E)^\natural$ with $g(L)=L,$ we have \[<gl_1,gl_2>=\lambdabda(g)\cdot<l_1,l_2>\] and so $\langle\langle gl_1,gl_2\rangle\rangle=\lambdabda(g)\cdot \langle \langle l_1,l_2\rangle\rangle$. This means $g\in \mathrm{GO} (L).$ Conversely, if $h\in \mathrm{GO} (L,\frac{1}{\sqrt{d}}q_E|_L),$ set $$h_E:x\otimes e\mapsto h(x)\otimes e$$ for $x\otimes e\in L\otimes E\cong L\cdot E=V$. Then $h_E(L)=L$ and \[<h_E(x_1\otimes e_1),h_E(x_2\otimes e_2)>=e_1e_2\lambdabda(h)\langle \langle x_1,x_2\rangle\rangle=\lambdabda(h)<x_1\otimes e_1,x_2\otimes e_2>, \] i.e. $h_E\in \mathrm{GO} _{2,2}(E)^\natural.$ Then we get a bijection between the similitude orthogonal group $\mathrm{GO} (L)$ and the stabilizer of $L$ in $\mathrm{GO} _{2,2}(E)^\natural.$ Observe that the map $h\mapsto h_E$ is a group homomorphism. Then $\mathrm{GO} (L)$ is isomorphic to the stabilizer of $L$ via the map $h\mapsto h_E.$ \end{proof} There are three $F$-rational open orbits, whose stabilizers are $\mathrm{GO} _{2,2}(F),\mathrm{GO} _{4,0}(F)$ and $\mathrm{GO} _{3,1}(F)$ respectively. There is one closed orbit which has stabilizer $$\mathrm{GO} _{2,2}(E)^\natural\cap Q_4=:Q^\natural\cong\Bigg\{\begin{pmatrix} A^{-1}&\ast\\0&\lambdabda A^t \end{pmatrix}|A\in \mathrm{GL}_2(E),\lambdabda\in F^\times \Bigg\}.$$ There are two intermediate orbits with representatives $L_1,L_2$ and $\dim_E(E\cdot L_i)=3$. The stabilizers are isomorphic to $$(\mathrm{GL}_1(E)\times \mathrm{GO} _{1,1}(F))\cdot Mat_{2,2}(F)\mbox{ and } (\mathrm{GL}_1(E)\times \mathrm{GO} (\mathcal{V}_E))\cdot Mat_{2,2}(F),$$ where $\mathcal{V}_E$ is the $2$-dimensional quadratic space over $F$ with nontrivial discriminant which corresponds to $E.$ \begin{rem} For $(g,t)\in \mathrm{GL}_2(E)\times F^\times, $ we set $$\beta((g,t))=(g,\bar{g}\cdot t)\in \mathrm{GL}_2(E)\times \mathrm{GL}_2(E).$$ Then $\beta:\mathrm{GSO}_{3,1}(F)\rightarrow \mathrm{GSO}_{2,2}(E)^\natural$ is an embedding due to the following exact sequences \[\xymatrix{1\ar[r]& E^\times\ar[r]^-{i_1}\ar@{=}[d]&\mathrm{GL}_2(E)\times F^\times\ar[r]\ar[d]^\beta&\mathrm{GSO}_{3,1}(F)\ar[d]\ar[r]&1\\ 1\ar[r]&E^\times\ar[r]^-{i_2}&\mathrm{GL}_2(E)\times\mathrm{GL}_2(E)\ar[r]&\mathrm{GSO}_{2,2}(E)\ar[r]&1 } \] where $i_1(e)=(e,N_{E/F}(e)^{-1})$ and $i_2(e)=(e,e^{-1})$ for $e\in E^\times$. \end{rem} There are several orbits for $X_4/\mathrm{GO} _{2,2}(E)^\natural$. So there is a decreasing filtration of $\mathrm{GO} _{2,2}(E)^\natural$-modules for $I_{Q_4}^{H_4}(s)|_{\mathrm{GO} _{2,2}(E)^\natural}.$ \subsubsection{Filtration} Consider the filtration \[I_{Q_4}^{H_4}(s)= I_2(s)\supset I_1(s)\supset I_0(s)\supset 0\] of the degenerate principal series $I_{Q_4}^{H_4}(s)|_ {\mathrm{GO} _{2,2}(E)^\natural}$ with a sequence of sub-quotients $$I_0(s)=ind_{\mathrm{GO} _{2,2}(F) }^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C}\oplus ind_{\mathrm{GO} _{4,0}(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C}\oplus ind_{\mathrm{GO} _{3,1}(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C},$$ $$I_2(s)/I_1(s)\cong ind_{Q^\natural}^{\mathrm{GO} _{2,2}(E)^\natural}\delta_{Q^\natural}^{s+1}$$ where $Q^\natural$ is the Siegel parabolic subgroup of $\mathrm{GO} _{2,2}(E)^\natural$ with modular character $\delta_{Q^\natural}$ and \[I_1(s)/I_0(s)\cong ind_{(\mathrm{GL}_1(E)\times \mathrm{GO} _{1,1}(F))\cdot N }^{\mathrm{GO} _{2,2}(E)^\natural}\delta_Q^{\frac{1}{2}+\frac{s}{3}}\delta_1^{-\frac{1}{2}}\oplus ind_{Q'}^{\mathrm{GO} _{2,2}(E)^\natural }\delta_Q^{\frac{1}{2}+\frac{s}{3}}\delta_2^{-\frac{1}{2}} \] where $Q'=(\mathrm{GL}_1(E)\times \mathrm{GO} (\mathcal{V}_E))\cdot N,~N\cong Mat_{2,2}(F)$ and \[\delta_i\Big(t,h\Big)=|N_{E/F}(t^2) \cdot \lambdabda_V (h)^{-2}|_F\] for $ t\in \mathrm{GL}_1(E)$ and $h\in\mathrm{GO} _{1,1}(F)$ or $\mathrm{GO} (\mathcal{V}_E)$, where $\mathcal{V}_E$ is the non-split $2$-dimensional quadratic space. \begin{rem} We would like to highlight the fact that on the open orbits related to $I_0(s),$ the group embedding $\mathrm{GO} _{2,2}(F)\hookrightarrow\mathrm{GO} _{2,2}(E)^\natural$ (similar for the other two group embeddings) is not induced from the geometric embedding $i:\mathrm{GO} (L)\hookrightarrow\mathrm{GO} (L\otimes_FE),$ but the composite map $Ad_{h^\delta}\circ i$ of the adjoint map $Ad_{h^\delta}$ and the geometric embedding $i$ where $$h^\delta=\begin{pmatrix} \sqrt{d}\\&1 \end{pmatrix}\in\mathrm{GO} (2,2)(E).$$ However, it does not affect the results when we consider the distinction problems for the similitude groups. We will show that the results on the open orbits determine the distinction problems $\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)}(I_{Q_4}^{H_4}(1/2),\Sigma) $ when $\Sigma$ is a generic representation. (See \cite[Theorem 2.8]{blanc-delorme}.) \end{rem} Recall that $\mathrm{GSp}_4(E)^\natural=\{g\in\mathrm{GSp}_4(E)|\lambdabda_W(g)\in F^\times \}.$ When we deal with the case \[\mathrm{Ind}_{P_4}^{\mathrm{GSp}_8(F)}\delta_{P_4}^{\frac{s}{5} }|_{\mathrm{GSp}_4(E)^\natural }, \] where $P_4$ is the Siegel parabolic of $\mathrm{GSp}_{8}(F)$ with modular character $\delta_{P_4}$, the above results still hold. More precisely, set \[\mathcal{I}(s)=\{f:\mathrm{GSp}_8(F)\rightarrow\mathbb{C}|f(xg)=\delta_{P_4}(x)^{\frac{1}{2}+\frac{s}{5}}f(g)\mbox{ for }x\in P_4, g\in \mathrm{GSp}_8(F) \}. \] There is a filtration \[\mathcal{I} _0(s)\subset\mathcal{I}_1(s)\subset \mathcal{I}_2(s)=\mathcal{I}(s)|_{\mathrm{GSp}_4(E)^\natural } \] of $\mathcal{I}(s)|_{\mathrm{GSp}_4(E)^\natural}$ such that \begin{itemize} \item $\mathcal{I}_0(s)\cong ind_{\mathrm{GSp}_4(F) }^{\mathrm{GSp}_4(E)^\natural }\mathbb{C},$ \item $\mathcal{I}_1(s)/\mathcal{I}_0(s)\cong ind_{M'N' }^{\mathrm{GSp}_4(E)^\natural }\delta_{P_4}^{\frac{1}{2}+\frac{s}{5}}\delta_{M'N'}^{-\frac{1}{2}}$ and \item $\mathcal{I}_2(s)/\mathcal{I}_1(s)\cong ind_{P^\natural }^{\mathrm{GSp}_4(E)^\natural }\delta_{P^\natural}^{\frac{s+1}{3}}, $ \end{itemize} where $P^\natural$ is the Siegel parabolic subgroup of $\mathrm{GSp}_4(E)^\natural$, $M'\cong\mathrm{GL}_1(E)\times \mathrm{GL}_2(F),$ $N'\cong Mat_{1,1}(E)\oplus Mat_{2,2}(F)$ and $$\delta_{M'N'}(t,g)=|N_{E/F}(t)^4\cdot \lambdabda_W(g)^{-4}|_F $$ for $(t,g)\in\mathrm{GL}_1(E)\times\mathrm{GL}_2(F)$. Here the group embedding $\mathrm{GSp}_4(F)\hookrightarrow\mathrm{GSp}_4(E)^\natural$ in $\mathcal{I}_0(s)$ is the composition map $Ad_{g^\delta}\circ i'$ where $i':\mathrm{GSp}(W_2)\hookrightarrow\mathrm{GSp}(W_2\otimes_FE)$ is the geometric embedding and \[g^\delta=\begin{pmatrix} \sqrt{d}\\&1 \end{pmatrix}\in\mathrm{GSp}_4(E). \] \begin{defn} An irreducible representation $\tau$ of $\mathrm{GSp}_{4}(E)^\natural$ occurs on the boundary of $\mathcal{I}(s)$ if $$\mathrm{Hom}_{\mathrm{GO} (2,2)(E)^\natural}(\mathcal{I}_{i+1}(s)/\mathcal{I}_i(s),\tau)\neq0 \mbox{ for } i=0\mbox{ or }1.$$ \end{defn} In \cite{hengfei2017}, we have proved that the tempered representation $\tau$ does not occur on the boundary of $\mathcal{I}(1/2)$. Then we can verify the Prasad conjecture for $\mathrm{GSp}_4$ when $\tau$ is a tempered representation. After discussing with Dmitry Gourevitch, we realized that \cite[Proposition 4.9]{gurevich2015non} can imply the Prasad conjecture for $\mathrm{GSp}_4$ when the $L$-packet $\Pi_{\phi_\tau }$ is generic. Thus we will give a slightly different proof of Theorem \mathrm{Re}f{maingsp(4)theorem} from the one in \cite{hengfei2017}. \subsection{The distinction problem for $\mathrm{GSp}_4$}\label{subsect:proofof1.1} Let us recall what we have obtained. Let $\tau$ be an irreducible representation of $\mathrm{GSp}_4(E).$ Since $\tau|_{\mathrm{Sp}_4(E)}$ is multiplicity-free due to \cite[Theorem 1.4]{adler2006on}, $\tau|_{\mathrm{GSp}_4(E)^\natural}$ is multiplicity-free. Assume that $\tau=\theta(\pi_1\boxtimes\pi_2)$ participates in the theta correspondence with $\mathrm{GSO}_{2,2}(E)$. Then the see-saw identity implies that \[\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C} )\subset \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\Theta_2(\Sigma),\mathbb{C})\cong \mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(R_2(\mathbf{1}),\Sigma ) \] where $R_2(\mathbf{1} )$ is the image of the big theta lift to $H_4$ of the trivial representation of $\mathrm{GSp}_4(F) $ in $I_{Q_4}^{H_4}(1/2)$ and $\Sigma$ is the irreducible representation of $\mathrm{GO} _{2,2}(E)$ such that $\tau=\theta(\Sigma)$. In fact, if $\pi_1\ncong\pi_2$, then $\Sigma=\mathrm{Ind}_{\mathrm{GSO}_{2,2}(E)}^{\mathrm{GO} _{2,2}(E)}(\pi_1\boxtimes\pi_2)$. If $\pi_1\cong\pi_2$, then there are two extensions to $\mathrm{GO} _{2,2}(E)$ of $\pi_1\boxtimes\pi_2$. The representation $\Sigma$ is the unique extension of $\pi_1\boxtimes\pi_2$ which participates into the theta correspondence with $\mathrm{GSp}_4(E)$, denoted by $(\pi_1\boxtimes\pi_2)^+$. \begin{lem} Assume that $\pi_1\boxtimes\pi_2$ is an irreducible representation of $\mathrm{GSO}_{2,2}(E),$ associated with an irreducible representation $\Sigma^+$ of $\mathrm{GO} _{2,2}(E)$ that has a nonzero theta lift to $\mathrm{GSp}_4(E)$. Then \[\dim \mathrm{Hom}_{\mathrm{GO} (L)}(\Sigma^+,\mathbb{C})=\dim \mathrm{Hom}_{\mathrm{GSO}(L)}(\pi_1\boxtimes\pi_2,\mathbb{C} )\] where $\mathrm{GO} (L) \hookrightarrow \mathrm{GO} (L\otimes_F E)=\mathrm{GO} _{2,2}(E)$ and the $4$-dimensional quadratic space $L$ is one of the following options: $Mat_{2,2}(F)$, $D(F)$ and $E\oplus\mathbb{H}_F$ \end{lem} \begin{proof} If $\pi_1\neq\pi_2,$ then it follows from Frobenius Reciprocity. If $\pi_1=\pi_2,$ then we consider the following see-saw diagram \[\xymatrix{ \mathrm{GO} _{2,2}(E)^\natural\ar@{-}[rd]\ar@{-}[d] & \mathrm{GSp}_4(F)\ar@{-}[ld]\\ \mathrm{GO} (L)&\mathrm{GSp}_2(E)^\natural\ar@{-}[u] } \] where $\mathrm{GSp}_2(E)^\natural=\{g\in\mathrm{GSp}_2(E)|\lambdabda_W(g)\in F^\times \}$. Then we have $$\mathrm{Hom}_{\mathrm{GO} (L)}(\Sigma^-,\mathbb{C})=\mathrm{Hom}_{\mathrm{GO} (L)}(\Sigma^+,\nu)=\mathrm{Hom}_{\mathrm{GSp}_2(E)^\natural}(\Theta_2(\nu),\pi_1 )=0,$$ because the big theta lift $\Theta_2(\nu)$ to $\mathrm{GSp}_4(F)$ is zero by the conservation relation. Hence \[\mathrm{Hom}_{\mathrm{GSO}(L) }(\pi_1\boxtimes\pi_2 ,\mathbb{C})=\mathrm{Hom}_{\mathrm{GO} (L)}(\Sigma^+\oplus\Sigma^-,\mathbb{C} )=\mathrm{Hom}_{\mathrm{GO} (L)}(\Sigma^+,\mathbb{C}). \] This finishes the proof. \end{proof} \begin{lem} Given an irreducible admissible representation $\tau$ of $\mathrm{GSp}_4(E)$, we have \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(F) }(\tau^g,\mathbb{C} )= \dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau^\vee,\mathbb{C}) \] where $\tau^g(x)=\tau(gxg^{-1})$ for $g\in \mathrm{GSp}_4(E).$ \end{lem} \begin{proof}Note that $\tau^g\cong\tau$ and $\tau^\vee\cong\tau^{g_{-1}},$ where $g_{-1}\in\mathrm{GSp}_4(F)$ with similitude $\lambdabda_W(g_{-1})=-1$. Then \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau^\vee,\mathbb{C}). \] \end{proof} \begin{rem} We have a similar statement for the group $\mathrm{GSO}(V)$. \end{rem} There is another key input for the $\mathrm{GL}_4$-distinction problems in our proof of Theorem \mathrm{Re}f{maingsp(4)theorem}. \begin{thm}\cite[Theorem 5.2]{matringe2009distinction} Given a generic representation $\pi$ of $\mathrm{GL}_n(E)$ with a Langlands parameter $\phi_\pi=\mathrm{tr}iangle_1\oplus\mathrm{tr}iangle_2\oplus\cdots\oplus\mathrm{tr}iangle_t$ with $\mathrm{tr}iangle_i:WD_E\rightarrow \mathrm{GL}_{n_i}(\mathbb{C})$ irreducible and $\sum_{i=1}^t n_i=n,$ then $\pi$ is $\mathrm{GL}_n(F)$-distinguished if and only if there is a reordering of $\mathrm{tr}iangle_i'$s and an integer $r$ between $1$ and $\frac{t}{2}$ such that $\mathrm{tr}iangle_{i+1}^\sigma=\mathrm{tr}iangle_i^\vee$ for $i=1,3,\cdots,2r-1 $ and $\mathrm{tr}iangle_i$ is conjugate-orthogonal for $i>2r.$ \end{thm} \begin{lem}\label{GL:period} Let $\pi$ be a square-integrable representation of $\mathrm{GL}_2(E)$. Then $\pi$ is $\mathrm{GL}_2(F)$-distinguished if and ony if $\pi$ is $D^\times(F)$-distinguished. If $\pi=\pi(\chi^{-1},\chi^\sigma)$, then $\pi$ is both $\mathrm{GL}_2(F)$-distinguished and $D^\times(F)$-distinguished. Let $\pi_0=\pi(\chi_1,\chi_2 )$ with $\chi_1\neq\chi_2,\chi_1|_{F^\times}=\chi_2|_{F^\times}=\mathbf{1}$ be an irreducible smooth representation of $\mathrm{GL}_2(E).$ Then $\pi_0 $ is $\mathrm{GL}_2(F)$-distinguished but not $D^\times(F)$-distinguished. \end{lem} \begin{proof} If $\pi$ is square-integrable, then it follows from \cite[Theorem C]{prasad1992gl(2)}. Let $\pi_0=\pi(\chi_1,\chi_2)$. By Mackey theory, we know that \[\dim\mathrm{Hom}_{D^\times(F) }(\pi_0,\mathbb{C} )=\dim\mathrm{Hom}_{E^\times}(\chi_1\chi_2^\sigma,\mathbb{C} )=\begin{cases} 1&\mbox{ if }\chi_1\chi_2^\sigma=\mathbf{1};\\ 0&\mbox{ otherwise.} \end{cases} \] If $\chi_1\neq\chi_2$ and $\chi_1|_{F^\times}=\chi_2|_{F^\times}=\mathbf{1}$, then $\chi_1\chi_2^\sigma\neq\mathbf{1}$. Thus $\pi_0$ is not $D^\times(F)$-distinguished. Since the Langlands parameter $\phi_{\pi}=\chi^{-1}\oplus\chi^\sigma$ (resp. $\phi_{\pi_0}$) is conjugate-orthogonal in the sense of \cite[\S3]{gan2011symplectic}, $\pi$ (resp. $\pi_0$) is $\mathrm{GL}_2(F)$-distinguished due to \cite[Theorem 6.2]{gan22arithmeticity} or \cite[Theorem 5.2]{matringe2009distinction}. \end{proof} \begin{lem} \label{arnab} Let $\pi$ be an essentially discrete series representation of $\mathrm{GL}_2(E)$. Let $\Pi=J_{P}(\pi|-|_E,\pi)$ be the nongeneric representation of $\mathrm{GL}_4(E)$. Then the following statements are equivalent: \begin{enumerate}[(i)] \item $\Pi$ is both $\mathrm{GL}_4(F)$-distinguished and $(\mathrm{GL}_4(F),\omega_{E/F})$-distinguished; \item $\Pi^\vee\cong\Pi^\sigma$. \end{enumerate} \end{lem} \begin{proof} See \cite[Theorem 6.5]{arnab2018}. \end{proof} \subsubsection{The Langlands correspondence for $\mathrm{GSp}_4$} In this part, we will recall the Langlands correspondence for $\mathrm{GSp}_4$ which has been set up by Gan-Takeda in \cite{gan2011locallanglands}. Let $\Pi(\mathrm{GSp}_4)$ be the set of (equivalence classes of) irreducible smooth representation of $\mathrm{GSp}_4(F)$. Let $\mathrm{Hom}(WD_F,\mathrm{GSp}_4(\mathbb{C}))$ be the set of (equivalence classes of) admissible homomorphisms \[WD_F\longrightarrow \mathrm{GSp}_4(\mathbb{C}). \] \begin{thm}[Gan-Takeda] There is a surjective finite to one map \[L:\Pi(\mathrm{GSp}_4)\longrightarrow \mathrm{Hom}(WD_F,\mathrm{GSp}_4(\mathbb{C})) \] with the following properties: \begin{enumerate}[(i)] \item $\tau$ is a (essentially) discrete series representation of $\mathrm{GSp}_4(F)$ if and only if its $L$-parameter $\phi_\tau=L(\tau) $ does not factor through any proper Levi subgroup of $\mathrm{GSp}_4(\mathbb{C})$. \item For an $L$-parameter $\phi\in \mathrm{Hom}(WD_F,\mathrm{GSp}_4(\mathbb{C}))$, its fiber $\Pi_\phi $ can be naturally parametrized by the set of irreducible characters of the component group \[\pi_0(Z(Im(\phi))/Z_{\mathrm{GSp}_4(\mathbb{C})} ). \] This component group is either trivial or equal to $\mathbb{Z}/2\mathbb{Z}$. When it is $\mathbb{Z}/2\mathbb{Z}$, exactly one of the two representations in $\Pi_\phi$ is generic and it is the one indexed by the trivial character of $\pi_0(Z(Im(\phi))/Z_{\mathrm{GSp}_4(\mathbb{C})} )$. \item The similitude character sim$(\phi_\tau)$ of $\phi_\tau $ is equal to the central character $\omega_\tau$ of $\tau$. Here sim:$\mathrm{GSp}_4(\mathbb{C})\longrightarrow \mathbb{C}^\times$ is the similitude character of $\mathrm{GSp}_4(\mathbb{C})$. \item The $L$-parameter of $\tau\otimes(\chi\circ\lambdabda_W)$ is equal to $\phi_{\tau}\otimes\chi$. Here $\lambdabda_W:\mathrm{GSp}_4(F)\longrightarrow F^\times$ is the similitude character of $\mathrm{GSp}_4(F)$, and we have regarded $\chi$ as both a character of $F^\times$ and a character $W_F$ by local class field theory. \end{enumerate} \end{thm} We repeat the statements of Theorem 1.1 as below. \begin{thm}\label{localgspperiod} Assume that $\tau$ is an irreducible representation of $\mathrm{GSp}_4(E)$ with a central character $\omega_\tau$ satisfying $\omega_\tau|_{F^\times}=\mathbf{1}.$ \begin{enumerate}[(i)] \item If $\tau=\theta(\Sigma)$ is an irreducible representation of $\mathrm{GSp}_4(E),$ where $\Sigma$ is an irreducible representation of $\mathrm{GO} _{4,0}(E)$, then $\tau$ is not $\mathrm{GSp}_4(F)$-distinguished. \item Suppose that $\Sigma=(\pi_1\boxtimes\pi_1)^+$ is an irreducible representation of $\mathrm{GO} _{2,2}(E)$ and $\Sigma=\mathrm{Ind}_{\mathrm{GSO}_{2,2}(E)}^{\mathrm{GO} _{2,2}(E)}(\pi_1\boxtimes\pi_2)$ if $\pi_1\neq\pi_2.$ If $\tau=\theta(\Sigma)$ is generic, then \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=\begin{cases} 2,&\mbox{ if }\pi_i\ncong\pi_0 \mbox{ are both }\mathrm{GL}_2(F)\mbox{-distinguished};\\ 1,&\mbox{ if }\pi_1\ncong\pi_2\mbox{ but }\pi_1^\sigma\cong\pi_2^\vee;\\ 1,&\mbox{ if }\pi_1\cong\pi_2 \mbox{ is }\mathrm{GL}_2(F)\mbox{-distinguished but not }(\mathrm{GL}_2(F),\omega_{E/F})\mbox{-distinguished};\\ 1,&\mbox{ if }\pi_2 \mbox{ is }\mathrm{GL}_2(F)\mbox{-distinguished and }\pi_1\cong\pi_0; \\ 0,&\mbox{the other cases.} \end{cases}\] Here $\pi_0=\pi(\chi_1,\chi_2)$ with $\chi_1\neq\chi_2,\chi_1|_{F^\times}=\chi_2|_{F^\times}=1.$ \item Assume that $\tau$ is not in case (i) or (ii), so that $\tau=\theta(\Pi\boxtimes\chi),$ where $\Pi\boxtimes\chi$ is a representation of $\mathrm{GSO}_{3,3}(E)$. If $\tau$ is generic, then \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(F) }(\tau,\mathbb{C})=\begin{cases} 1,&\mbox{ if }\Pi\mbox{ is }\mathrm{GL}_4(F)\mbox{-distinguished;}\\ 0,&\mbox{ otherwise.} \end{cases} \] \end{enumerate} \end{thm} \begin{proof} \begin{enumerate}[(i)] \item If $\Sigma$ is a representation of $\mathrm{GSO}_{4,0}(E)$, then $\tau=\theta(\Sigma)=\Theta(\Sigma)$ and $$\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\Theta(\Sigma),\mathbb{C})\cong \mathrm{Hom}_{\mathrm{GO} _{4,0}(E)^\natural }(\Theta_{W,D',\psi}(\mathbf{1}),\Sigma^+),$$ where $D'=\mbox{Res}_{E/F}D_E=D(F)\oplus\mathbb{H}^2 $ is the 8-dimensional quadratic vector space over $F$ with determinant $1$ and Hasse invariant $-1$ due to Lemma \mathrm{Re}f{quadraticrestriction} and $\Theta_{W,D',\psi}(\mathbf{1})$ is the big theta lift to $\mathrm{GO} (V')$ of the trivial representation $\mathbf{1}$. Note that the first occurrence of the trivial representation is $\dim_F W=4$ in the Witt tower $D\oplus\mathbb{H}^{r}$, which is bigger than $2$. Thus $\Theta_{W,D',\psi}(\mathbf{1})=0.$ Hence, $$\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\Theta(\Sigma),\mathbb{C})=0$$ and so $\tau=\theta(\Sigma)$ is not $\mathrm{GSp}_4(F)$-distinguished. \item By Proposition \mathrm{Re}f{degenerateseries}, there is an exact sequence of $H_4$-representations \begin{equation}\label{I(1/2)} \xymatrix{0\ar[r]&R_2(\mathbf{1})\ar[r]&I_{Q_4}^{H_4}(1/2)\ar[r]&\nu\otimes R_1(\mathbf{1})\ar[r]&0 } \end{equation} where $R_i(\mathbf{1} )$ is the big theta lift to $H_4$ of the trivial representation $\mathbf{1}$ of $\mathrm{GSp}_{2i}(F) $. We take the right exact contravariant functor $\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(-,\Sigma)$ with respect to \eqref{I(1/2)} and get a short exact sequence \begin{equation}\label{injection} 0\rightarrow \mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(R_1(\mathbf{1}))\otimes\nu,\Sigma)\rightarrow \mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I_{Q_4}^{H_4}(1/2),\Sigma)\rightarrow \mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(R_2(\mathbf{1}),\Sigma). \end{equation} Consider the following double see-saw diagrams $$ \xymatrix{\mathrm{GSp}_4(E)^\natural\ar@{-}[d]\ar@{-}[rd]&H_4\ar@{-}[d]\ar@{-}[ld]\ar@{-}[rd]&\mathrm{GSp}_2(E)^\natural\ar@{-}[ld]\ar@{-}[d] \\\mathrm{GSp}_4(F)&\mathrm{GO} _{2,2}(E)^\natural&\mathrm{GL}_2(F). }$$ Note that $\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(R_2(\mathbf{1}),\Sigma)\cong\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})$. Since $\mathrm{GO} _{2,2}(E)^\natural$ is a subgroup of $\mathrm{GSO}_{4,4}(F),$ one has \begin{equation*}\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(R_1(\mathbf{1})\otimes\nu,\Sigma)= \mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(R_1(\mathbf{1}),\Sigma)\cong \mathrm{Hom}_{\mathrm{GSp}_2(F)}(\Theta_1(\Sigma),\mathbb{C}) .\end{equation*} Here $\Theta_1(\Sigma)$ is the big theta lift to $\mathrm{GSp}_2(E)$ of $\Sigma$ and it is zero unless $\pi_1=\pi_2.$ Then \begin{equation}\label{exact:inequ} \dim\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})+\dim\mathrm{Hom}_{\mathrm{GSp}_2(F)}(\Theta_2(\Sigma),\mathbb{C})\geq\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I_{Q_4}^{H_4}(1/2),\Sigma) . \end{equation} Observe that $\mathrm{GO} _{2,2}(E)^\natural$ is the fixed point of a involution on $H_4$, which is given by the scalar matrix $$h=\sqrt{d}\in \mathrm{GO} _{2,2}(E)^\natural\subset H_4$$ acting on $H_4$ by conjugation. Due to \cite[Theorem 2.5]{olafsson1987fourier}, there exists a polynomial $f$ on $H_4$ such that the complements of the open orbits in the double coset $Q_4\backslash H_4/\mathrm{GO} _{2,2}(E)^\natural$ is the zero set of $f$. Thanks to \cite[Proposition 4.9]{dima2018analytic}, the multiplicity $\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I_{Q_4}^{H_4}(1/2),\Sigma) $ is at least $\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I_0(1/2),\Sigma)$ where the submodule $I_0$ corresponds to the open orbits. More precisely, $I_0(1/2)\cong ind_{\mathrm{GO} _{4,0}(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C}\oplus ind_{\mathrm{GO} _{2,2}(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C}\oplus ind_{\mathrm{GO} _{3,1}(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C}$ and \begin{equation}\label{openorbitindentity} \dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I_{Q_4}^{H_4}(1/2),\Sigma)\geq \dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(ind_{\mathrm{GO} _{4,0}(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C}\oplus ind_{\mathrm{GO} _{2,2}(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C}\oplus ind_{\mathrm{GO} _{3,1}(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C},\Sigma ) .\end{equation} Together with \eqref{exact:inequ}, the sum \begin{equation} \begin{split}\label{key:inequality} &\dim\mathrm{Hom}_{\mathrm{GSp}_{4}(F)}(\Sigma,\mathbb{C})+\dim\mathrm{Hom}_{\mathrm{GSp}_2(F)}(\Theta_2(\Sigma),\mathbb{C})\\ \geq&\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I_{Q_4}^{H_4}(1/2),\Sigma)\\ \geq&\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(ind_{\mathrm{GO} _{4,0}(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C}\oplus ind_{\mathrm{GO} _{2,2}(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C}\oplus ind_{\mathrm{GO} _{3,1}(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C},\Sigma ) \\ =&\dim\mathrm{Hom}_{\mathrm{GO} _{4,0}(F)}(\Sigma,\mathbb{C})+ \dim\mathrm{Hom} _{\mathrm{GO} _{2,2}(F)}(\Sigma,\mathbb{C})+\dim\mathrm{Hom}_{\mathrm{GO} _{3,1}(F)}(\Sigma,\mathbb{C}). \end{split} \end{equation} Let us turn the table around. There is an exact sequence of $\mathrm{GSp}_{8}(F)$-representations \[\xymatrix{0\ar[r]&R^{3,3}(\mathbf{1})\ar[r]&\mathcal{I}(1/2)\ar[r]&R^{4,0}(\mathbf{1})\ar[r]&0 } \] where $\mathcal{I}(s)$ is the degenerate principal series of $\mathrm{GSp}_8(F)$ and $R^{m,n}(\mathbf{1})$ is the big theta lift to $\mathrm{GSp}_8(F)$ of the trivial representation $\mathbf{1}$ of $\mathrm{GO} _{m,n}(F)$. There is only one open orbit in the double coset decomposition $P_4\backslash\mathrm{GSp}_8(F)/\mathrm{GSp}_4(E)^\natural$. In a similar way, thanks to \cite[Theorem 2.5]{olafsson1987fourier} and \cite[Proposition 4.9]{dima2018analytic}, \begin{eqnarray} \begin{split}\label{reverse:ineq} \dim\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=&\dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}_0(1/2),\tau)\\ \leq&\dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}(1/2),\tau )\\ \leq&\dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural }(R^{3,3}(\mathbf{1}),\tau )+\dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(R^{4,0}(\mathbf{1}),\tau)\\ =&\dim\mathrm{Hom}_{\mathrm{GO} _{3,3}(F)}(\Theta_{6}^+(\tau),\mathbb{C})+\dim\mathrm{Hom}_{\mathrm{GO} _{4,0}(F)}(\Theta^+_4(\tau),\mathbb{C}) . \end{split} \end{eqnarray} Now we separate them into two cases: $\pi_1\ncong\pi_2$ and $\pi_1\cong\pi_2$. \begin{enumerate}[(A).] \item If $\pi_1\ncong\pi_2,$ then the theta lift to $\mathrm{GSp}_2(E)$ of $\Sigma$ is zero, $$\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(R_1(\mathbf{1})\otimes\nu,\Sigma )=\mathrm{Hom}_{\mathrm{GSp}_2(F)}(\Theta_1(\Sigma),\mathbb{C})=0$$ and $\Sigma=\mathrm{Ind}_{\mathrm{GSO}(2,2)(E)}^{\mathrm{GO} (2,2)(E)}(\pi_1\boxtimes\pi_2).$ There are several subcases: \begin{enumerate}[label=(A\arabic*)] \item If $\pi_i(i=1,2)$ are both $D^\times(F)$-distinguished, which implies that $\phi_{\pi_i}$ are conjugate-orthogonal and so that $\pi_i$ are both $\mathrm{GL}_2(F)$-distinguished, then $\pi_1^\vee\not\cong\pi_2^\sigma.$ Otherwise, $\pi_1^\sigma\cong\pi_1^\vee\cong\pi_2^\sigma,$ which contradicts the assumption $\pi_1\ncong\pi_2.$ Then the inequality \eqref{key:inequality} can be rewritten as \begin{eqnarray}\label{lowbound2} \dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})\geq \dim \mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I_{Q_4}^{H_4}(1/2),\Sigma) \geq 2. \end{eqnarray} Flicker \cite{flicker1991ondist} proved that $(\mathrm{GL}_n(E),\mathrm{GL}_n(F))$ is a Gelfand pair, which implies that $$1\geq \mathrm{Hom}_{\mathrm{GSO}_{3,3}(F)}(\Theta_6^+(\tau),\mathbb{C})=\mathrm{Hom}_{\mathrm{GO} _{3,3}(F)}(\Theta^+_6(\tau),\mathbb{C}).$$ Thus \begin{equation}\label{upperbound2} \dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})\leq1+1 \end{equation} due to the upper bound \eqref{reverse:ineq}. Then \eqref{lowbound2} and \eqref{upperbound2} imply $$\dim\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=2.$$ \item If $\pi_1=\pi(\chi_1,\chi_2),\chi_1\neq\chi_2,\chi_1|_{F^\times}=\chi_2|_{F^\times}=\mathbf{1}$ and $\pi_2$ is $\mathrm{GL}_2(F)$-distinguished, then both $\phi_{\pi_1}$ and $\phi_{\pi_2}$ are conjugate-orthogonal, $\pi_1^\vee\ncong\pi_2^\sigma$ and $$\mathrm{Hom}_{\mathrm{GO} _{4,0}(F)}(\Sigma,\mathbb{C})=0=\mathrm{Hom}_{\mathrm{GO} _{3,1}(F)}(\Sigma,\mathbb{C}).$$ Moreover, $\mathrm{Hom}_{\mathrm{GSO}_{3,3}(F)}(\Theta_6^{+}(\tau),\mathbb{C})\neq0.$ Since $$\dim \mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I_{Q_4}^{H_4}(1/2),\Sigma)\geq\dim \mathrm{Hom}_{\mathrm{GO} (2,2)(F)}(\Sigma,\mathbb{C})+0=1, $$ the desired equality $\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=1$ follows from \eqref{key:inequality} and \eqref{reverse:ineq}. \item If $\pi_1^\sigma\cong\pi_2^\vee,$ then $$\dim \mathrm{Hom}_{\mathrm{GO} _{3,1}(F)}(\Sigma,\mathbb{C})=1.$$ By the previous arguments, we know that $\mathrm{Hom}_{\mathrm{GO} _{2,2}(F) }(\Sigma,\mathbb{C})=0$ in this case. Therefore $$\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C} )=1.$$ \end{enumerate} In other cases, if $\pi_1^\sigma\ncong \pi_2^\vee$ and either $\phi_{\pi_1}$ or $\phi_{\pi_2}$ is not conjugate-orthogonal, then $$\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=0. $$ If not, then \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=\dim \mathrm{Hom}_{ \mathrm{GSO}_{3,3}(F)}(\Theta_6^+(\tau),\mathbb{C})=1. \] Set $\Pi\boxtimes\chi=\Theta_6^+(\tau)|_{\mathrm{GSO}_{3,3}(E)}$ as a representation of $\mathrm{GSO}_{3,3}(E)$, which is irreducible due to Proposition \mathrm{Re}f{big:GSp4GO6}. Then $\Pi$ is $\mathrm{GL}_4(F)$-distinguished and so $\phi_\Pi$ is conjugate-orthogonal. \par We consider the following cases: \begin{itemize} \item If $\phi_{\pi_1}$ is conjugate-orthogonal, then $\phi_{\pi_2}$ is conjugate-orthogonal by \cite[Theorem 5.2]{matringe2009distinction}. \item If $\phi_{\pi_1}$ is irreducible, then $\phi_{\pi_1}$ is conjugate-orthogonal, which will imply that $\phi_{\pi_2}$ is conjugate-orthogonal as well. \item Now suppose that both $\phi_{\pi_1} $ and $\phi_{\pi_2}$ are reducible and that neither $\phi_{\pi_1}$ nor $\phi_{\pi_2}$ is conjugate-orthogonal. Assume that $\phi_{\pi_i}=\chi_{i1}+\chi_{i2}~ (i=1,2)$. Then $$\phi_\Pi=\chi_{11}+\chi_{12}+\chi_{21}+\chi_{22},~\chi_{11}\chi_{12}=\chi_{21}\chi_{22}:E^\times/F^\times\rightarrow\mathbb{C}^\times.$$ Thanks to \cite[Theorem 5.2]{matringe2009distinction}, $\chi_{11}\chi_{21}^\sigma=\mathbf{1}$ and $\chi_{12}\neq\chi_{22} $ but $\chi_{12}|_{F^\times}=\mathbf{1}=\chi_{22}|_{F^\times}.$ Moreover, $\chi_{21}\chi_{22}\cdot(\chi_{21}\chi_{22})^\sigma=\mathbf{1}$ implies \[\chi_{21}^\sigma\chi_{21}=\mathbf{1}. \] Similarly $\chi_{11}^\sigma\chi_{11}=\mathbf{1}$. Thus, $\chi_{21}^\sigma=\chi_{21}^{-1}$ and $\chi_{11}=\chi_{21}.$ This implies that $\chi_{12}=\chi_{22}$ which contradicts the condition $\chi_{12}\neq\chi_{22}.$ \end{itemize} Hence the Langlands parameter $\phi_{\Pi}$ can not be conjugate-orthogonal. Thus $\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C} )=0$ if $\pi_1^\sigma\ncong\pi_2^\vee$ and either $\phi_{\pi_1}$ or $\phi_{\pi_2}$ is not conjugate-orthogonal. \item If $\pi_1=\pi_2$, then $\Theta_1(\Sigma)=\pi_1,$ which is obvious except for $\pi_1=\pi(\chi,\chi).$ The exact sequence \eqref{injection} implies the following inequality \begin{equation}\label{lowerinequality} \dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C} )\geq \dim \mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I_{Q_4}^{H_4}(1/2),\Sigma)-\dim \mathrm{Hom}_{\mathrm{GSp}_2(F)}(\pi_1,\mathbb{C}). \end{equation} We separate them into following cases: \begin{enumerate}[label=(B\arabic*)] \item If $\pi_1$ is $D^\times(F)$-distinguished, then $\dim \mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I_0(1/2),\Sigma)=3. $ Again, we consider the upper bound \eqref{reverse:ineq} and the lower bound \eqref{lowerinequality} to obtain the equality $$\dim \mathrm{Hom}_{\mathrm{GSp}_4(\mathbb{C})}(\tau,\mathbb{C})=2.$$ \item If $\pi_1\cong\pi_0=\pi(\chi_1,\chi_2)$ with $\chi_1\neq\chi_2$ and $\chi_1|_{F^\times}=\chi_2|_{F^\times}=1,$ then $$\dim \mathrm{Hom}_{\mathrm{GO} _{4,0}(F)}(\Sigma,\mathbb{C})=0.$$ In a similar way, we can get $\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=1.$ \item If $\pi_1$ is not $\mathrm{GL}_2(F)$-distinguished but $(\mathrm{GL}_2(F),\omega_{E/F})$-distinguished, then $$\mathrm{Hom}_{\mathrm{GSp}_2(F)}(\pi_1,\mathbb{C})=0 \mbox{ and } \mathrm{Hom}_{\mathrm{GO} _{3,1}(F)}(\Sigma,\mathbb{C})\neq0, $$ which implies that $\dim \mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I_{Q_4}^{H_4}(1/2 ),\Sigma)\geq1=\dim \mathrm{Hom}_{\mathrm{GSO}_{3,3}(F)}(\Theta_6^{+}(\tau),\mathbb{C}). $ Therefore we can deduce that $\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=1.$ \end{enumerate} If $\pi_1=\pi(\chi,\chi),$ then there is an exact sequence $$\xymatrix{\pi_1\ar[r]&\Theta_2(\pi_1\boxtimes\pi_1)\ar[r]&\pi_1\ar[r]& 0}$$ of $\mathrm{GL}_2(E)$-representations, where we can not deduce $\Theta_2(\pi_1\boxtimes\pi_1)$ directly. Let $N=\big\{\begin{pmatrix} 1&n\\0&1 \end{pmatrix}|n\in F \big\}$ be the subgroup of $\mathrm{GSp}_2(F)$. Let $\psi_N$ be a nontrivial character of $N$. Now we consider the Whittaker model of $\Theta_1(\pi_1\boxtimes\pi_1)$, \[\dim \mathrm{Hom}_N(\Theta_1(\pi_1\boxtimes\pi_1 ),\psi_N )=\dim\mathrm{Hom}_{\mathrm{PGL}_2(E)}(\pi_1\boxtimes\pi_1,\mathbb{C} )\leq1 \] which implies that $\Theta_1(\Sigma)=\pi_1.$ \end{enumerate} \item If $\tau$ is not in case (i) or (ii), then the first occurence index of $\tau$ of $\mathrm{GSp}_4(E)$ in the Witt Tower $\mathbb{H}_E^r$ is $3$. Observe that $\Theta_6^+(\tau)|_{\mathrm{GSO}_{3,3}(E)}$ is irreducible unless $\tau=\mathrm{Ind}_{Q(Z)}^{\mathrm{GSp}_4(E)}(\chi,\pi)$ with $\chi=|-|_E$. \par Suppose that $\tau\neq\mathrm{Ind}_{Q(Z)}^{\mathrm{GSp}_4(E)}(|-|_E,\pi)$. Consider the double see-saw diagrams \[\xymatrix{\mathrm{GO} _{2,2}(E)^\natural\ar@{-}[d]\ar@{-}[rd]&\mathrm{GSp}_8(F)\ar@{-}[d]\ar@{-}[rd]& \mathrm{GO} _{3,3}(E)^\natural\ar@{-}[d]\ar@{-}[ld]\\ \mathrm{GO} _{4,0}(F)\ar@{-}[ru] &\mathrm{GSp}_4(E)^\natural& \mathrm{GO} _{3,3}(F). } \] By \cite[Page 211]{Kudla1992} and Proposition \mathrm{Re}f{degenerateseries}, there are two exact sequences of $\mathrm{GSp}_8(F)$-modules \[\xymatrix{0\ar[r]&R^{3,3}(\mathbf{1})\ar[r]&\mathcal{I}(1/2)\ar[r]&R^{4,0}(\mathbf{1})\ar[r]&0 } \] and \[\xymatrix{0\ar[r]&R^{4,0}(\mathbf{1})\oplus R^{2,2}(\mathbf{1})\ar[r]&\mathcal{I}(-1/2)\ar[r]&R^{5,1}(\mathbf{1})\cap R^{3,3}(\mathbf{1})\ar[r]&0 } \] where $\mathcal{I}(s)$ is the degenerate principal series of $\mathrm{GSp}_8(F)$ and $R^{m,n}(\mathbf{1})$ is the big theta lift to $\mathrm{GSp}_8(F)$ of $\mathbf{1}$ of $\mathrm{GO} _{m,n}(F)$. Assume that $\tau$ is generic and its theta lift to $\mathrm{GO} _{2,2}(E)$ is zero. Then $$\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(R^{4,0}(\mathbf{1}),\tau)=\mathrm{Hom}_{\mathrm{GO} _{4,0}(F)}(\Theta_4^+(\tau),\mathbb{C})=0,$$ so that $$\dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}(-1/2),\tau)=\dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural }(R^{5,1}(\mathbf{1})\cap R^{3,3}(\mathbf{1}),\tau ).$$ Thus \begin{equation}\label{openorbitforGSp4} \begin{split} \dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau^\vee,\mathbb{C}) &=\dim \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}_0(1/2),\tau )\\ &\leq \dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}(1/2),\tau)\\ &\leq \dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(R^{3,3}(\mathbf{1}),\tau)\\ &=\dim \mathrm{Hom}_{\mathrm{GO} _{3,3}(F)}(\Theta_6^+(\tau),\mathbb{C} )\\ &=\dim \mathrm{Hom}_{\mathrm{GO} _{3,3}(F)}((\Pi\boxtimes\chi)^+,\mathbb{C}) \end{split} \end{equation} where $(\Pi\boxtimes\chi)^\pm$ are two extensions to $\mathrm{GO} _{3,3}(E)$ of $\Pi\boxtimes\chi$. On the other hand, one has \[\mathrm{Hom}_{\mathrm{GO} _{3,3}(F)}((\Pi\boxtimes\chi)^-,\mathbb{C})=\mathrm{Hom}_{\mathrm{GO} _{3,3}(F)}(\Theta_6^+(\tau)\otimes\nu,\mathbb{C})\cong \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\Theta(\nu),\tau)=0. \] Then we have an inequality $$ \dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C}) \leq \dim \mathrm{Hom}_{\mathrm{GSO}_{3,3}(F)}(\Pi\boxtimes\chi,\mathbb{C})=\dim \mathrm{Hom}_{\mathrm{GL}_4(F)}(\Pi,\mathbb{C}).$$ Now we want to obtain the reverse inequality. Note that $$\xymatrix{ 1\ar[r]& R^{5,1}(\mathbf{1})\cap R^{3,3}(\mathbf{1})\ar[r]& R^{3,3}(\mathbf{1})\ar[r]& R^{2,2}(\mathbf{1})\ar[r]& 1 }$$ is exact due to \cite[Proposition 7.2]{gan2014formal}. There is an injection \begin{equation}\label{reverseinequality} \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(R^{3,3}(\mathbf{1}),\tau)\hookrightarrow \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(R^{5,1}(\mathbf{1})\cap R^{3,3}(\mathbf{1}),\tau )=\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}(-1/2),\tau) \end{equation} since the theta lifts to $\mathrm{GO} _{2,2}(E)$ and $\mathrm{GO} _{4,0}(E)$ of $\tau$ are both zero by the assumption. We will show that $\tau$ does not occur on the boundary of $\mathcal{I}(-1/2)$ under the assumptions. If $\tau$ is non-discrete, then $\tau=J_{Q(Z)}(\chi,\pi),\chi\neq\mathbf{1},$ due to \cite[Table 1]{gan2011theta}. Note that $$\mathcal{I}_1(s)/\mathcal{I}_0(s)=ind_{(E^\times \times \mathrm{GSp}_2(F))N'}^{\mathrm{GSp}_4(E)^\natural}\chi' $$ where $N'\cong E\oplus Mat_{2,2}(F)$ and $\chi'(t,g)=|N_{E/F}(t)^{s+\frac{1}{2} }\cdot\lambdabda(g)^{-2s-3}|_F$. Set $$P'=(\mathrm{GL}_1(E)\times \mathrm{GSp}_2(E)^\natural )\cdot N'.$$ Thanks to the second adjoint theorem due to Bernstein, we have \[ \mathrm{Hom}(\mathcal{I}_1(-1/2)/\mathcal{I}_0(-1/2),\tau )=\mathrm{Hom}_{E^\times\times \mathrm{Sp}_2(E)\times F^\times }(\mathbf{1}\otimes ind_{\mathrm{Sp}_2(F)}^{\mathrm{Sp}_2(E)}\mathbb{C}\otimes|-|_F^{-2},R_{\bar{P'}}(J_{Q(Z)}(\chi,\pi)) ) =0, \] because $R_{\bar{P'}}(J(\chi,\pi))=\chi\otimes\pi+\chi^{-1}\otimes\pi\chi$ and $\chi\neq\mathbf{1}.$ Moreover, the cuspidal supports of $J_{Q(Z)}(\chi,\pi)$ and $\mathcal{I}_2(-1/2)/\mathcal{I}_1(-1/2)$ are disjoint. Therefore $\tau=J_{Q(Z)}(\chi,\pi)$ does not occur on the boundary of $\mathcal{I}(-1/2)$ and so \[\dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}(-1/2),\tau)\leq\dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}_0(-1/2),\tau)=\dim\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C}). \] Note that if $\tau$ is a discrete series representation. Then we have $$\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}_{i+1}(-1/2)/\mathcal{I}_i(-1/2),\tau )=0$$ for $i=0,1$. If not, then we will get a contradiction. Suppose that $$\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}_1(-1/2)/\mathcal{I}_0(-1/2),\tau)\neq0. $$ Then $\mathrm{Hom}_{\mathrm{GL}_1(E)}(\mathbf{1},R_{\bar{P'}}(\tau))\neq0 $ which contradicts Casselman's criterion \cite{casselman82duke} for the discrete series representation that \[\mathrm{Hom}_{\mathrm{GL}_1(E)}(|-|_E^{s},R_{\bar{P'}}(\tau))\neq0 \] implies $s<0$. Similarly, \[\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}_2(-1/2)/\mathcal{I}_1(-1/2),\tau )=\mathrm{Hom}_{\mathrm{GL}_2(E)\times F^\times}(\delta_{P^\natural}^{\frac{1}{6}},R_{\bar{P}^\natural}(\tau) )=0 \] and so \begin{equation}\label{-halfopen}\dim \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural }(\mathcal{I}(-1/2),\tau )\leq \dim \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural }(\mathcal{I}_0(-1/2),\tau ). \end{equation} Therefore one can combine \eqref{openorbitforGSp4},\eqref{reverseinequality} and \eqref{-halfopen} to obtain that \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=\dim \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathcal{I}_0(-1/2),\tau)=\dim \mathrm{Hom}_{\mathrm{GSO}_{3,3}(F)}(\Theta_6^+(\tau),\mathbb{C}) . \] The right hand side is $1$ if and only if $\Pi$ is $\mathrm{GL}_4(F)$-distinguished. \par If $\tau=\mathrm{Ind}_{Q(Z)}^{\mathrm{GSp}_4(E)}(|-|_E,\pi)$ is irreducible, then $\theta_6(\tau)=J_P(\pi|-|_E,\pi)\boxtimes\omega_{\pi}|-|_E$. It suffices to show that $I_P(\pi|-|_E,\pi)$ is $\mathrm{GL}_4(F)$-distinguished if and only if $J_P(\pi|-|_E,\pi)$ is $\mathrm{GL}_4(F)$-distinguished. It follows from Lemma \mathrm{Re}f{arnab}. \end{enumerate} Then we have finished the proof. \end{proof} \begin{rem} In fact, we can show that if $\tau=\theta(\pi_1\boxtimes\pi_2)$ with $\pi_1^\vee\cong\pi_2^\sigma$ is generic, then $\phi_\Pi=\phi_{\pi_1}\oplus\phi_{\pi_2}$ is not only conjugate-orthogonal but also conjugate-symplectic. Keeping this fact in mind will be helpful when we verify the Prasad conjecture for $\mathrm{GSp}_4$ in \S\mathrm{Re}f{subsect:GSp(4)conj}. \end{rem} \begin{coro} The pair $(\mathrm{GSp}_4(E)^\natural,\mathrm{GSp}_4(F))$ is not a Gelfand pair. \end{coro} For a generic representation $\tau$ of $\mathrm{GSp}_4(E)$ with $\omega_\tau|_{F^\times}=\chi_F^2,$ we may consider the muliplicity $$\dim\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\pi,\chi_F ) $$ which is equal to $\dim\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\pi\otimes\chi_E^{-1},\mathbb{C}) ,$ where $\chi_E|_{F^\times}=\chi_F.$ We will focus on the case $\chi_F=\omega_{E/F}$ when we verify the Prasad conjecture for $\mathrm{GSp}_4$ in \S\mathrm{Re}f{subsect:GSp(4)conj}. \section{The $\mathrm{GSp}_{1,1}(F)$-distinguished representations} \label{sect:GSp(1,1)} \subsection{Notation} \begin{itemize} \item $D$ (resp. $D_E$) is a quaternion divison algebra over $F$ (resp. $E$) with a standard involution $\ast$. \item $\pi^{D_E}$ is the Jacquet-Langlands lift to $D_E^\times(E)$ of $\pi$ and $\pi^{D_E}\boxtimes\pi^{D_E}$ is a representation of $\mathrm{GSO}_{4,0}(E)$. \item $\mathfrak{W}$ (resp. $\mathfrak{V}$) is a right skew-Hermitian (resp. left Hermitian) $D$-vector space with isometry group $U(\mathfrak{W})$ (resp. $U(\mathfrak{V})$). \item $\mathfrak{U}^\ast$ is the dual $D$-vector space of $\mathfrak{U}$ in $\mathbb{R}es_{R/D}V_R$. \item $\mathfrak{W}\otimes_D\mathfrak{V}$ is a symplectic $F$-vector space. \item $\mathrm{GO} _{3,0}^\ast$ (resp. $\mathrm{GO} _{r,r}^\ast$) is the inner form of $\mathrm{GO} _{3,3}$ (resp. $\mathrm{GO} _{2r,2r}$) defined over $F$. \item $\mathfrak{I}(s)$ (resp. $I(s)$) is the degenerate principal series of $\mathrm{GSp}_{2,2}(F)$ (resp. $\mathrm{GO} _{2,2}^\ast(F)$). \item $\mathrm{GSO}^\ast_{2,0}$ is the inner form of $\mathrm{GSO}_{3,1}$ defined over $F$. \item $\mathrm{GO} _{5,1}$ is the pure inner form of $\mathrm{GO} _{3,3}$ defined over $E$ and $\Pi^D\boxtimes\chi$ is a representation of $\mathrm{GSO}_{5,1}(E)$. \item $B_1$ is the minimal parabolic subgroup of $\mathrm{GL}_2(D_E)(E)$. \item $\mathrm{GSp}_{1,0}=D^\times$ (resp. $\mathrm{Sp}_{1,0}$) is the inner form of $\mathrm{GL}_2$ (resp. $\mathrm{SL}_2$). \item $P(Y_D)$ (resp. $\mathfrak{Q}$) is the Siegel parabolic subgroup of $\mathrm{GU}(\mathfrak{V})$ (resp. $\mathrm{GO} _{2,2}^\ast(F)$). \item $\mathfrak{R}^3(\mathbf{1})$ (resp. $\mathfrak{R}^2(\mathbf{1})$) is the big theta lift to $\mathrm{GSp}_{2,2}(F)$ of the trivial representation of $\mathrm{GO} _{3,0}^\ast(F)$ (resp. $\mathrm{GO} _{1,1}^\ast(F)$) and $\mathfrak{R}^{1,j}(\mathbf{1})$ is the big theta lift to $\mathrm{GO} _{2,2}^\ast(F)$ from $\mathrm{GSp}_{1,j}(F)$. \item $\theta_2^-(\tau)$ (resp. $\Theta_{2}^-(\tau)$) is the small (resp. big) theta lift to $\mathrm{GO} _{5,1}(E)$ of $\tau$ of $\mathrm{GSp}_4(E)$. \item $\Theta_{\mathfrak{W},\mathfrak{V},\psi}(\pi)$ is the big theta lift to $\mathrm{GU}(\mathfrak{V})$ of $\pi$ of $\mathrm{GU}(\mathfrak{W})$. \item $\gamma_F$ is the Weil index and $\gamma_F(\psi\circ q)\in\mu_8$ for the character of second degree $x\mapsto \psi(q(x,x))$, where $q$ is a non-degenerate symmetric $F$-bilinear form. \end{itemize} \subsection{Theta lifts for quaternionic unitary groups} In order to study the $\mathrm{GSp}_{1,1}$-distinction problems, we need to introduce the local theta lift for quaternionic unitary groups, following \cite{gan2014inner,gurevich2015non,yamana2011deg}. \subsubsection{Morita equivalence} Let $R=Mat_{2,2}(E)$ be the split quaternion algebra over $E.$ Any left Hermitian (resp. right skew-Hermitian) free $R$-module $(W_R,h_R)$ corresponds to a symplectic (resp. orthogonal) space $(W_E,h_E)$ over $E$ and \[\dim_E W_E=2\cdot\dim_R W_R , ~Aut(W_R,h_R)=Aut(W_E,h_E).\] (See \cite[\S2.1]{gurevich2015non} for more details.) \subsubsection{Dual pairs} Let $D$ be the unique nonsplit quaternion algebra over $F,$ with a standard involution $\ast$. Then $D\otimes_F E\cong R.$ There is a $D$-linear map \[tr_{R/D}:R\longrightarrow D \] such that $tr_{R/D}(d)=2d$ for $d\in D.$ Given a $4$-dimensional symplectic space $(\mathcal{W}_2,h_E)$ over $E,$ corresponding to a $2$-dimensional left Hermitian space $(W_R,h_R),$ we set \[h_D(x,y)=\frac{1}{2}tr_{R/D}(h_R(x,y))\in D\] for all $x,y\in W_R$. Then $h_D$ is a nondegenerate Hermitian form on $\mathfrak{V}=\mathbb{R}es_{R/D}W_R$ and $\dim_D \mathfrak{V}=4.$ Given a left Hermitian space $(\mathfrak{V},h_D)$ and a right skew-Hermitian space $(\mathfrak{W},s_D),$ the tensor product space $\mathfrak{W}\otimes_D \mathfrak{V}$ admits a symplectic form defined by \[<w\otimes v,w'\otimes v'>:=\frac{1}{2}tr_{D/F}((w,w')\cdot(v,v')^\ast) . \] This gives an embedding of $F$-groups \[U(\mathfrak{W})\times U(\mathfrak{V})\longrightarrow \mathrm{Sp}(\mathfrak{W}\otimes_D \mathfrak{V}). \] Then we can define the Weil representation $\omega_\psi$ on $U(\mathfrak{W})\times U(\mathfrak{V}),$ using the complete polarization $\mathfrak{V}=Y_D+Y_D^\ast$ of $\mathfrak{V}$. \begin{thm}\cite[Theorem 1.2]{gan2015howe} The Howe duality conjecture holds for the dual pair $U(\mathfrak{W})\times U(\mathfrak{V})$. \end{thm} We extend it to the similitude group $\mathrm{GU}(\mathfrak{W})\times \mathrm{GU}(\mathfrak{V})$ following Roberts. (See \cite[\S 3]{gan2014inner}.) \subsubsection{The see-saw diagram} Let us fix the polarization $W_R=Y_R+Y_R^\ast$. Then $$\mathfrak{V}=\mbox{Res}_{R/D}W_R=Y_D+Y^\ast_D.$$ Consider the following see-saw diagram \[\xymatrix{\mathrm{GU}(\mathfrak{V})\ar@{-}[rrd]\ar@{-}[d] && \mathrm{GO} _{2,2}(E)^\natural\ar@{-}[lld]\ar@{-}[d]\\ \mathrm{GU}(W_R)^\natural&& \mathrm{GO} ^\ast_{1,1}(F) .} \] Here $\mathrm{GU}(W_R)^\natural=\mathrm{GSp}_4(E)^\natural$. \begin{prop}\cite[Theorem 8.2]{gurevich2015non} Let $\tau$ be an irreducible representation of $\mathrm{GSp}(\mathcal{W}_2)\cong \mathrm{GU}(W_R).$ Assume that $\pi$ is an irreducible representation of $\mathrm{GO} _{1,1}^\ast(F)$. Then \[\mathrm{Hom}_{\mathrm{GU}(W_R)^\natural }(\Theta_{\mathfrak{W},\mathfrak{V},\psi}(\pi),\tau )=\mathrm{Hom}_{\mathrm{GO} _{1,1}^\ast(F)}(\Theta_4^+(\tau),\pi ). \] \end{prop} Assume that $V_R$ is a skew-Hermitian free module over $R$ of rank $2$, associated to the anisotropic $4$-dimensional quadratic space over $E$ given by $(D_E, N_{D_E})$ such that \[ \mathrm{GU}(V_R)\cong\mathrm{GO} _{4,0}(E). \] Then $\mathbb{R}es_{R/D}V_R$ is a $4$-dimensional skew-Hermitian $D$-vector space with trivial discriminant. There is a natural embedding \[\mathrm{SU}(V_R)\cong\mathrm{SO}_{4,0}(E)\hookrightarrow \mathrm{SO}^\ast_{2,2}(F)=\mathrm{SU}(\mathbb{R}es_{R/D} V_R). \] Given a $1$-dimensional Hermitian vector space $\mathfrak{V}_1$ over $D,$ we consider the theta lift from $\mathrm{GU}(\mathfrak{V}_1)=\mathrm{GSp}_{1,0}(F)$ to $\mathrm{GO} ^\ast_{2,2}(F)$ and the theta lift from $\mathrm{GSO}_{4,0}(E)$ to $\mathrm{GU}(R\otimes_D\mathfrak{V}_1)=\mathrm{GL}_2(E)$. Consider the following see-saw diagram \[\xymatrix{\mathrm{GU}(\mathbb{R}es_{R/D}V)\ar@{-}[d]\ar@{-}[rd] & \mathrm{GL}_2(E)^\natural\ar@{-}[d]\ar@{-}[ld]\\ \mathrm{GSO}_{4,0}(E)^\natural& \mathrm{GSp}_{1,0}(F) } \] which is different from the situation in \cite[Theorem 8.2]{gurevich2015non}, since there does not exist a natural polarization in the symplectic $F$-vector space $\mathbb{V}=(\mathbb{R}es_{R/D}V_R)\otimes_D \mathfrak{V}_1.$ Assume that $\mathbb{V}=\mathbb{X}\oplus\mathbb{Y}$ is a polarization. Set the group \[\mathrm{Mp}(\mathbb{V})_{\mathbb{Y}}=\mathrm{Sp}(\mathbb{V})\times\mathbb{C}^\times \] with group law \[(g_1,z_1)(g_2,z_2)=(g_1g_2,z_1z_2\cdot z_{\mathbb{Y}}(g_1,g_2)) \] where $z_{\mathbb{Y}}(g_1,g_2)=\gamma_F(\frac{1}{2}\psi\circ q(\mathbb{Y},g_2^{-1}\mathbb{Y},g_1\mathbb{Y} ))$ is a $2$-cocycle (called Rao cocyle) associated to $\mathbb{Y}$ and $q(\mathbb{Y},g_2^{-1}\mathbb{Y},g_1\mathbb{Y})$ is the Leray invariant. (See \cite[\S I.3]{kudla1996notes}.) Suppose that $\mathbb{V}=\mathbb{X}'\oplus\mathbb{Y}'$ is another polarization of $\mathbb{V}.$ There is an isomorphism \[\mathcal{S}(\mathbb{X})\cong\mathcal{S}(\mathbb{X}'). \] Given $\varphi\in \mathcal{S}(\mathbb{X})$ and $\varphi'\in\mathcal{S}(\mathbb{X}'),$ due to \cite[Lemma 3.3]{ichino2015periods}, we have \[\varphi(x)=\int_{\mathbb{Y}\cap\mathbb{Y}'\backslash\mathbb{Y} }\psi(\frac{1}{2}<x',y'>-\frac{1}{2}<x,y> )\varphi'(x')dy \] where $x'\in\mathbb{X}'$ and $y'\in\mathbb{Y}'$ are given by $x'+y'=x+y\in\mathbb{V}.$ \begin{lem}[Local Siegel-Weil identity] Assume that $\pi$ is an irreducible discrete series representation of $\mathrm{GL}_2(E)$ so that the big theta lift $\Theta(\pi)$ to $\mathrm{GSO}_{4,0}(E)$ is isomorphic to $\pi^{D_E}\boxtimes\pi^{D_E}$, where $\pi^{D_E} $ is the Jacquet-Langlands lift to $D_E^\times(E)$ of $\pi.$ Let $\varrho$ be an irreducible representation of $\mathrm{GSp}_{1,0}(F)$. Then \[\dim \mathrm{Hom}_{\mathrm{GSO}_{4,0}(E)^\natural }(\Theta(\varrho),\pi^{D_E}\boxtimes\pi^{D_E})=\dim \mathrm{Hom}_{\mathrm{GSp}_{1,0}(F)}(\pi,\varrho ) \] where $\Theta(\varrho)$ is the big theta lift to $\mathrm{GO} ^\ast_{2,2}(F)$ of $\varrho$. \end{lem} \begin{proof} It suffices to show that two splittings of $\mathrm{SO}_{4,0}(E)\times \mathrm{Sp}_{1,0}(F)$ in $\mathrm{Mp}(\mathbb{V})$ are compactible. Let us fix two polarizations $\mbox{Res}_{R/D}V_R=\mathfrak{U}\oplus \mathfrak{U}^\ast$ and $R\otimes_D\mathfrak{V}_1= X\oplus Y$. Then \[\mathbb{V}=\mathbb{X}\oplus\mathbb{Y}= (\mathfrak{U}\otimes_D \mathfrak{V}_1)\oplus (\mathfrak{U}^\ast\otimes_D \mathfrak{V}_1)\mbox{ and } \mathbb{V}=\mathbb{X}'\oplus\mathbb{Y}'=(D_E\otimes_E X)\oplus (D_E\otimes_E Y). \] Choose a fixed element $h_0\in \mathrm{Sp}(\mathbb{V})$ such that $$\mathbb{X}'=h_0\mathbb{X}\mbox{ and } \mathbb{Y}'=h_0\mathbb{Y}.$$ By \cite[Appendix B.4]{ichino2015periods}, there is an isomorphism $\alpha_0:\mathrm{Mp}(\mathbb{V})_{\mathbb{Y}'}\rightarrow \mathrm{Mp}(\mathbb{V})_{\mathbb{Y}}$ via $$(h,z)\mapsto(\alpha_0(h),z)$$ where $ \alpha_0(h)=h^{-1}\cdot g\cdot h$ for all $h\in \mathrm{Sp}(\mathbb{V}). $ Moreover, \[z_{\mathbb{Y}'}(h_1,h_2 )=z_{\mathbb{Y}}(\alpha_0(h_1),\alpha_0(h_2) ). \] Now we fix the splitting $i_\mathbb{Y}:\mathrm{O}^\ast_{2,2}(F)\times \mathrm{Sp}_{1,0}(F)\hookrightarrow \mathrm{Mp}(\mathbb{V})_{\mathbb{Y}}$ and \[i_{\mathbb{Y}'}:\mathrm{SO}_{4,0}(E)\times \mathrm{Sp}_2(E)\hookrightarrow \mathrm{Mp}(\mathbb{V})_{\mathbb{Y}'} \] where the splitting $i_\mathbb{Y}(y,z)=((y,z),\beta_\mathbb{Y}(z))$ is defined in \cite[Theorem 3.1]{kudla1994splitting}. We will show that $ i_{\mathbb{Y}}(h)=\alpha_0\circ i_{\mathbb{Y}'}(h)$ for all $h=(y,z)\in \mathrm{SO}_{4,0}(E)\times \mathrm{Sp}_{1,0}(F)$. Consider \[\xymatrix{\mathrm{SO}_{4,0}(E)\times \mathrm{Sp}_{1,0}(F)\ar@{^{(}->}[r]\ar@{=}[d] & \mathrm{O}^\ast_{2,2}(F)\times \mathrm{Sp}_{1,0}(F)\ar[r]^-{i_{\mathbb{Y}}} & \mathrm{Mp}(\mathbb{V})_{\mathbb{Y}} \\ \mathrm{SO}_{4,0}(E)\times \mathrm{Sp}_{1,0}(F)\ar@{^{(}->}[r] & \mathrm{SO}_{4,0}(E)\times \mathrm{Sp}_2(E)\ar[r]^-{i_{\mathbb{Y}'}}& \mathrm{Mp}(\mathbb{V})_{\mathbb{Y}'}.\ar[u]^-{\alpha_0} } \] Set $i_\mathbb{Y}(h)=(h,\beta_\mathbb{Y}(h))$. Then $\beta_\mathbb{Y}(z)=1$ for all $z\in \mathrm{Sp}_{1,0}(F)$. Similarly, we have $$\beta_{\mathbb{Y}'}(y)=1$$ for all $y\in \mathrm{SO}_{4,0}(E).$ In order to show that \[\beta_\mathbb{Y}(h)=\beta_{\mathbb{Y}'}(h)\] for all $h=(y,z)\in \mathrm{SO}_{4,0}(E)\times \mathrm{Sp}_{1,0}(F)$, we will show that $\beta_{\mathbb{Y}}(y)=1=\beta_{\mathbb{Y}'}(z).$ \begin{itemize} \item If $y\in \mathrm{SO}_{4,0}(E)\subset \mathrm{O}^\ast_{2,2}(F)=\bigsqcup_{i=0}^2 \mathfrak{P}\omega_i\mathfrak{P},$ say $y\in \mathfrak{P}\omega_{i}\mathfrak{P},$ where $\mathfrak{P}$ is the Siegel parabolic subgroup of $\mathrm{O}^\ast_{2,2}(F),\omega_0=\mathbf{1}_4$ (the identity matrix in $\mathrm{O}_{2,2}^\ast(F)$),$$\omega_1=\begin{pmatrix} &&1\\&1\{\bf 1}&\\&&&1 \end{pmatrix}\mbox{ and }\omega_2=\begin{pmatrix} &&1\\&&&1\{\bf 1}\\&1 \end{pmatrix},$$ then $\beta_{\mathbb{Y}}(y)=(-1)^i.$ Since $\omega_1$ switches a pair of vectors $e_1$ and $e_1'$ in a basis $\{e_1,e_2,e_1',e_2' \}$, which corresponds to an element $h\in \mathrm{O}_{4,0}(E)$ with determinant $-1,$ where $\mathfrak{P}$ stabilizes the maximal isotropic subspace $\{e_1,e_2 \},$ it follows that \[\mathrm{SO}_{4,0}(E)\cap \mathfrak{P}\omega_1\mathfrak{P}=\emptyset,\] i.e. , $\beta_\mathbb{Y}(y)=1.$ \item If $z\in \mathrm{Sp}_{1,0}(F)$ and so $z=g\in \mathrm{SL}_2(E),$ then $\beta_{\mathbb{Y}'}(z)=\gamma_F(x(g),\frac{1}{2}\psi )^4\cdot\gamma_F(\frac{1}{2}\psi\circ N_{D_E} )^4=1$, where \[x(g)=\begin{cases} N_{E/F}(a_{21})\pmod{{F^\times}^2},&\mbox{ if }g=\begin{pmatrix} a_{11}&a_{12}\\a_{21}&a_{22} \end{pmatrix}\mbox{ with }a_{21}\neq0;\\ N_{E/F}(a_{22})\pmod{{F^\times}^2} ,&\mbox{otherwise.} \end{cases} \] \end{itemize} Therefore we have finished the proof. \end{proof} \begin{rem}\label{counterforseesaw} From the proof above, we can see that the see-saw identity does not hold if one replaces $\mathrm{SO}_{4,0}(E)$ by $\mathrm{O}_{4,0}(E)$ in this case. \end{rem} Let $V$ be a free $R$-module of rank $2$ corresponding to the quadratic space $\mathbb{H}_E^2$ by the Morita equivalence. Then $\mathbb{R}es_{R/D}V$ is a skew-Hermitian $D$-vector space of dimension $4$. \begin{lem} Let $\Sigma$ be an irreducible representation of $\mathrm{GO} _{2,2}(E)$. Let $\varrho$ be a representation of $\mathrm{GSp}_{1,j}(F)$ for $j=0$ or $1$. Then \[\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(\Theta(\varrho),\Sigma)=\dim\mathrm{Hom}_{\mathrm{GSp}_{1,j}(F)}(\Theta_{1+j}(\Sigma\cdot\nu^{1+j}),\varrho) \] where $\nu$ is the nontrivial character of $\mathrm{GO} _{2,2}(E)/\mathrm{GSO}_{2,2}(E)$ and $\nu|_{\mathrm{O}_{2,2}(E)}=\det$. \end{lem} \begin{proof} Consider the following see-saw diagram \[\xymatrix{ \mathrm{GO} _{2,2}^\ast(F)\ar@{-}[rd]\ar@{-}[d] &\mathrm{GSp}_{2+2j}(E)^\natural\ar@{-}[d]\ar@{-}[ld] \\ \mathrm{GO} _{2,2}(E)^\natural&\mathrm{GSp}_{1,j}(F). } \] Assume that $\mathfrak{W}=\mathbb{R}es_{R/D}V$. Let us fix the polarization $\mathfrak{W}=\mathfrak{U}+\mathfrak{U}^\ast$ and $\mathbb{H}_E^2=Y+Y^\ast$, where $Y^\ast$ is the dual space of $Y$. Let $\mathfrak{V}$ be a Hermitian $D$-vector space with isometric group $\mathrm{GSp}_{1,j}(F)$. Then there exists a natural polarization \[\mathfrak{W}\otimes_D\mathfrak{V}=\mathfrak{U}\otimes_D\mathfrak{V}+\mathfrak{U}^\ast\otimes_D\mathfrak{V}. \] Similarly, $\mathbb{H}_E^2\otimes_E \mathcal{W}_{1+j}=Y\otimes_E\mathcal{W}_{1+j}+Y^\ast\otimes_E\mathcal{W}_{1+j}, $ where $\mathcal{W}_r$ is the symplectic vector space over $E$ of dimension $2r$. Set $\mathbb{Y}=\mathfrak{U}^\ast\otimes_D \mathfrak{V}$ and $\mathbb{Y}'=Y^\ast\otimes_E\mathcal{W}_{1+j}$. Then we have the splittting $i_{\mathbb{Y}}$ and $i_{\mathbb{Y}'}$ defined in \cite[Theorem 3.1]{kudla1994splitting}. For instance, $i_{\mathbb{Y}'}(y,z)=((y,z),\beta_{\mathbb{Y}'}(y) )$ for $(y,z)\in\mathrm{O}_{2,2}(E)\times\mathrm{Sp}_{2+2j}(E) $ and \[ i_{\mathbb{Y}}(y,z)=((y,z),\beta_{\mathbb{Y}}(y) )\in \mathrm{Mp}(\mathfrak{W}\otimes_D\mathfrak{V} )_\mathbb{Y} \] for $y\in \mathrm{O}_{2,2}^\ast(F)$ and $z\in\mathrm{Sp}_{1,j}(F)$. Note that $\beta_{\mathbb{Y}'}(y)=1$ for $y\in\mathrm{O}_{2,2}(E) $ and $$\beta_{\mathbb{Y}}(y)=(-1)^{(1+j)i}$$ if $y\in\mathfrak{P}\omega_i\mathfrak{P} $ where $\mathrm{O}^\ast_{2,2}(F)=\bigcup_i \mathfrak{P}\omega_i\mathfrak{P}$ and $\mathfrak{P}$ is the Siegel parabolic subgroup of $\mathrm{O}_{2,2}^\ast(F)$. Thus \[\beta_{\mathbb{Y}}(h)=\beta_{\mathbb{Y}'}(h)\cdot (\nu(h))^{1+j} \] for $h\in\mathrm{O}_{2,2}(E)$. Hence \begin{align*} \dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(\Theta(\varrho),\Sigma)&=\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural\times\mathrm{GSp}_{1,j}(F) }(\omega_{\psi,\mathbb{Y}},\Sigma\otimes\varrho) \\ &=\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural\times\mathrm{GSp}_{1,j}(F) }(\omega_{\psi,\mathbb{Y}'},\Sigma\cdot\nu^{1+j}\otimes\varrho )\\ &=\dim\mathrm{Hom}_{\mathrm{GSp}_{1,j}(F)}(\Theta_{1+j}(\Sigma\cdot\nu^{1+j}),\varrho ) \end{align*} where $\omega_{\psi,\mathbb{Y}}$ (resp. $\omega_{\psi,\mathbb{Y}'}$) is the Weil representation on $\mathrm{Mp}(\mathfrak{W}\otimes_D\mathfrak{V})$ emphasizing the splitting $\mathbb{Y}+\mathbb{Y}^\ast$ (resp. $\mathbb{Y}'+{\mathbb{Y}'}^\ast$). This finishes the proof. \end{proof} \subsubsection{Degenerate principal series} Let us fix the complete polarization \[\mathfrak{V}=Y_D+Y_D^\ast. \] Suppose $\dim_D\mathfrak{V}=4$. Assume that $\mathfrak{I}(s)$ is the degenerated principal series of $\mathrm{GU}(\mathfrak{V})=\mathrm{GSp}_{2,2}(F)$ associated to a Siegel parabolic subgroup $P(Y_D)$, i.e., \[\mathfrak{I}(s)=\{f:\mathrm{GU}(\mathfrak{V})\rightarrow \mathbb{C}|~f(pg)=\delta_{P(Y_D)}(p)^{\frac{1}{2}+\frac{s}{5}}f(g)\mbox{ for all } p\in P(Y_D),g\in \mathrm{GU}(\mathfrak{V}) \} \] where $\delta_{P(Y_D)}$ is the modular character. Similar to Proposition \mathrm{Re}f{degenerateseries}, we have \begin{lem} \label{GSp:GU} Assume that $\mathfrak{R}^{3}(\mathbf{1})$ is the big theta lift to $\mathrm{GU}(\mathfrak{V})$ of the trivial representation of $\mathrm{GO} _{3,0}^\ast(F)$. Then there is an exact sequence \[\xymatrix{0\ar[r]& \mathfrak{R}^{3}(\mathbf{1})\ar[r]& \mathfrak{I}(\frac{1}{2})\ar[r]& \mathfrak{R}^{2}(\mathbf{1})\ar[r]&0 } \] where $\mathfrak{R}^{2}(\mathbf{1})$ is the big theta lift to $\mathrm{GU}(\mathfrak{V})$ of the trivial representation of $\mathrm{GO} ^\ast_{1,1}(F)$. \end{lem} \begin{proof} By \cite[Theorem 1.4]{yamana2011deg}, we may give a similar proof as in Proposition \mathrm{Re}f{degenerateseries}. So we omit it here. \end{proof} \subsubsection{Double cosets} Assume that $P(Y_D)$ is the Siegel parabolic subgroup of $\mathrm{GU}(\mathfrak{V})=\mathrm{GSp}_{2,2}(F)$. Then the homogeneous space $X_D=P(Y_D)\backslash \mathrm{GSp}_{2,2}(F)$ corresponds to the set of maximal isotropic subspaces in $ \mathfrak{V}$. We consider the double coset $X_D/\mathrm{GU}(W_R)^\natural=X_D/\mathrm{GSp}_4(E)^\natural,$ similar to Lemma \mathrm{Re}f{orbitdecomp}. \begin{prop} In the double cosets $X_D/\mathrm{GSp}_4(E)^\natural,$ there are \begin{itemize} \item one closed orbit with stabilizer $P(Y_D)\cap \mathrm{GSp}_4(E)^\natural, $ \item one open orbit with stabilizer $\mathrm{GU}_2(D)(F)=\mathrm{GSp}_{1,1}(F)\subset \mathrm{GSp}_4(E)^\natural$ and \item one intermediate orbit with a representative \[L=Dr(\sqrt{d}e+f)+D(e-\frac{1}{\sqrt{d}}f )\in X_D,\] which is a non-free $R$-module with stabilizer $(\mathrm{GL}_1(E)\times \mathrm{GSp}_{1,0}(F))\cdot N,N\cong E\oplus D$, where $r=\begin{pmatrix} 1&0\\0&0 \end{pmatrix}=r^2\in R $ and $W_R=Re+Rf$ with $h_R(e,f)=1$. \end{itemize} \end{prop} \begin{lem}\label{degpos1/2} Let $\tau$ be an irreducible representation of $\mathrm{GU}(W_R)^\natural=\mathrm{GSp}_4(E)^\natural$ and $\mathrm{GSp}_4(E)^\natural\hookrightarrow \mathrm{GSp}_{2,2}(F) $ be a natural embedding. Then \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural }(\mathfrak{I}(1/2),\tau )\geq \dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau^\vee,\mathbb{C} ). \] \end{lem} \begin{proof} Note that there are three orbits for $P(Y_D) \backslash \mathrm{GSp}_{2,2}(F)/\mathrm{GSp}_4(E)^\natural$. There is a filtration for $\mathfrak{I}(1/2)|_{\mathrm{GSp}_4(E)^\natural}$ as follows \[ind_{\mathrm{GSp}_{1,1}(F)}^{\mathrm{GSp}_4(E)^\natural }\mathbb{C}=\mathfrak{I}_0(1/2)\subset\mathfrak{I}_1(1/2)\subset \mathfrak{I}_2(1/2)=\mathfrak{I}(1/2)|_{\mathrm{GSp}_4(E)^\natural } \] where $\mathfrak{I}_2(1/2)/\mathfrak{I}_1(1/2)\cong ind_{P^\natural}^{\mathrm{GSp}_4(E)^\natural }\delta_{P^\natural}^{\frac{1}{2}} $ and $\mathfrak{I}_1(1/2)/\mathfrak{I}_0(1/2)\cong ind_{MN}^{\mathrm{GSp}_4(E)^\natural }\delta_{P(Y_D)}^{\frac{3}{5}}\delta_3^{-\frac{1}{2}} ,$ where \[M\cong \mathrm{GL}_1(E)\times \mathrm{GSp}_{1,0}(F),N\cong D\oplus E\mbox{ and }\delta_3(t,x)=|N_{E/F}(t)^4\cdot \lambdabda(x)^{-4} |_F \] for $(t,d)\in M$. There exists an involution on $\mathrm{GSp}_{2,2}(F)$ such that the fixed points coincides with $\mathrm{GSp}_4(E)^\natural$. So applying \cite[Theorem 2.5]{olafsson1987fourier} and \cite[Proposition 4.9]{dima2018analytic}, we obtain the following inequality \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{I}(1/2),\tau )\geq\dim \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{I}_0(1/2),\tau )=\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau^\vee,\mathbb{C}). \] It finishes the proof. \end{proof} \subsection{The distinction problem for $\mathrm{GSp}_{1,1}$} Let $\mathrm{GU}_2(D)=\mathrm{GSp}_{1,1}$ be the inner form of $\mathrm{GSp}_4$ defined over $F,$ whose $E$-points coincide with $\mathrm{GSp}_4(E).$ Assume that $\tau$ is an irreducible representation of $\mathrm{GSp}_4(E)$ with $\omega_\tau|_{F^\times}=\mathbf{1}$. In this subsection, we will study the multiplicity \[\dim\mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C} ). \] \begin{thm}Let $\tau$ be a representation of $\mathrm{GSp}_4(E)$ such that $\Pi_{\phi_{\tau}}$ is generic. Then \begin{enumerate}[(i)] \item If $\tau=\theta(\pi_1\boxtimes\pi_2)$ is a nongeneric tempered representation of $\mathrm{GSp}_4(E),$ where $\pi_1\boxtimes\pi_2$ is an irreducible smooth representation of $\mathrm{GSO}_{4,0}(E),$ then $\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=1$ if and only if one of the following holds: \begin{itemize} \item $\pi_1\ncong\pi_2$ but $\pi_1^\vee\cong\pi_2^\sigma$; \item $\pi_1\cong\pi_2$ are both $(D^\times(F),\omega_{E/F})$-distinguished. \end{itemize} \item If $\tau=\theta(\pi_1\boxtimes\pi_2)=\theta(\pi_2\boxtimes\pi_1)$ is generic, then \[\dim\mathrm{Hom}_{ \mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=\begin{cases} 2,&\mbox{if }\pi_1=\pi_2=\pi(\chi^{-1},\chi^\sigma);\\ 1,&\mbox{if }\pi_1=\pi_2\mbox{ are square-integrable and }D^\times(F)\mbox{-distinguished};\\ 1,&\mbox{if }\pi_1\mbox{ is }D^\times(F)\mbox{-distinguished and }\pi_2=\pi_0;\\ 2,&\mbox{if }\pi_1\neq\pi_2\mbox{ are both }D^\times(F)\mbox{-distinguished};\\ 0,&\mbox{the other cases}. \end{cases} \] Here $\pi_0=\pi(\chi_1,\chi_2 )$ with $\chi_1\neq\chi_2,\chi_1|_{F^\times}=\chi_2|_{F^\times}=\mathbf{1}.$ Note that these conditions are mutually exclusive. \item Assume that $\tau$ is not as in case (i) or (ii), so that $\tau=\theta(\Pi^{D}\boxtimes\chi )$ is generic, where $\Pi^{D}\boxtimes\chi$ is an irreducible representation of $\mathrm{GSO}_{5,1}(E)$. Then $\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=1$ if and only if one of the following holds: \begin{itemize} \item $\phi_\Pi$ is irreducible and conjugate-orthogonal or \item $\phi_\Pi=\phi_\rho+\phi_\rho\mu$ with $\rho^\sigma\cong \rho^\vee\mu^{-1}$ \end{itemize} where $\Pi=JL(\Pi^{D})$ is the Jacquet-Langlands lift to $\mathrm{GL}_4(E)$ of $\Pi^D$. \end{enumerate}\label{innerformperiod} \end{thm} \begin{proof} The proof is very similar to the proof of Theorem \mathrm{Re}f{localgspperiod}. \begin{enumerate}[(i)] \item Assume that $V_R$ is a skew-Hermitian free module over $R$ of rank $2$, corresponding to $D_E$ by the Morita equivalence. Then $\mathbb{R}es_{R/D}V_R$ is a $4$-dimensional skew-Hermitian vector space over $D$ with trivial discriminant. Fix a polorization $\mathbb{R}es_{R/D}V=\mathfrak{U}\oplus \mathfrak{U}^\ast$. Consider the following diagram \[\xymatrix{\mathrm{GSp}_4(E)^\natural\ar@{-}[d]\ar@{-}[rd] & \mathrm{GO} _{2,2}^\ast(F)\ar@{-}[ld]\ar@{-}[d]\ar@{-}[rd] & \mathrm{GL}_2(E)^\natural\ar@{-}[ld]\ar@{-}[d] \\ \mathrm{GSp}_{1,1}(F)& \mathrm{GO} _{4,0}(E)^\natural& \mathrm{GSp}_{1,0}(F). } \] There is an exact sequence of $\mathrm{GO} _{2,2}^\ast(F)$-representations \[\xymatrix{0\ar[r]& \mathfrak{R}^{1,1}(\mathbf{1})\ar[r]& I(\frac{1}{2})\ar[r]& \mathfrak{R}^{1,0}(\mathbf{1})\ar[r]&0, } \] where $I(s)$ is the degenerate principal series of $\mathrm{GO} ^\ast_{2,2}(F)$ and $\mathfrak{R}^{1,j}(\mathbf{1})$ is the theta lift to $\mathrm{GO} _{2,2}^\ast(F)$ the trivial representation of $\mathrm{GSp}_{1,j}(F)$. Set $\tau=\Theta_{2}(\Sigma^+)$, where \[\Sigma^+=\begin{cases} \mathrm{Ind}_{\mathrm{GSO}_{4,0}(E)}^{\mathrm{GO} _{4,0}(E)}(\pi_1\boxtimes\pi_2),&\mbox{ if }\pi_1\ncong\pi_2;\\ (\pi_1\boxtimes\pi_1)^+,&\mbox{ if }\pi_1\cong\pi_2. \end{cases} \] Note that $\mathrm{GO} _{4,0}(E)$ is an anisotropic group. Using the contravariant exact functor $$\mathrm{Hom}_{\mathrm{GO} _{4,0}(E)^\natural}(-,\Sigma^+),$$ we obtain the short exact sequence \[0\rightarrow\mathrm{Hom}_{\mathrm{GO} _{4,0}(E)^\natural}(\mathfrak{R}^{1,0}(\mathbf{1} ),\Sigma^+)\rightarrow \mathrm{Hom}_{\mathrm{GO} _{4,0}(E)^\natural}(I(\frac{1}{2}),\Sigma^+ )\rightarrow \mathrm{Hom}_{\mathrm{GO} _{4,0}(E)^\natural}(\mathfrak{R}^{1,1}(\mathbf{1}),\Sigma^+ )\rightarrow0 .\] With the see-saw identities, we have \begin{equation}\label{nongenericquaternion} 0\rightarrow \mathrm{Hom}_{\mathrm{GSp}_{1,0}(F)}(\Theta_1(\Sigma^+\otimes\nu),\mathbb{C} )\rightarrow \mathrm{Hom}_{\mathrm{GO} _{4,0}(E)^\natural}(I(\frac{1}{2}),\Sigma^+)\rightarrow \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})\rightarrow 0 , \end{equation} where $\Theta_{1}(\Sigma^+\otimes\nu)$ is the big theta lift to $\mathrm{GL}_2(E)$ of $\Sigma^+\otimes\nu$. There is no $F$-rational points on the non-identity connected component of $\mathrm{GO} _{2,2}^\ast$ \cite[Page 21-22]{moeglin1987correspondances}, so that $$\mathrm{GO} ^\ast_{2,2}(F)=\mathrm{GSO}^\ast_{2,2}(F)=\mathfrak{Q}\cdot \mathrm{GO} _{4,0}(E)^\natural,$$ where $\mathfrak{Q}$ is the Siegel parabolic subgroup of $\mathrm{GO} _{2,2}^\ast(F)$. Then \begin{equation}\label{quaternionicopenorbit} \mathrm{Hom}_{\mathrm{GO} _{4,0}(E)^\natural}(I(\frac{1}{2}),\Sigma^+ )=\mathrm{Hom}_{\mathrm{GO} _{4,0}(E)^\natural}(ind_{\mathrm{GO} ^\ast_{2,0}(F)}^{\mathrm{GO} _{4,0}(E)^\natural }\mathbb{C},\Sigma^+ )=\mathrm{Hom}_{\mathrm{GO} ^\ast_{2,0}(F)}(\Sigma^+,\mathbb{C}) .\end{equation} Here $\mathrm{GSO}^\ast_{2,0}(F)$ sits in the following exact sequence \[\xymatrix{1\ar[r]& E^\times\ar@{=}[d]\ar[r]^-i& D_E^\times(E) \times F^\times\ar[r]\ar[d]&\mathrm{GSO}^\ast_{2,0}(F)\ar@{^{(}->}[d]\ar[r]&1\\ 1\ar[r]&E^\times\ar[r]& D_E^\times(E)\times D_E^\times(E)\ar[r]&\mathrm{GSO}_{4,0}(E)\ar[r]&1 } \] where $i(e)=(e,N_{E/F}(e)^{-1})$ and the embedding $\mathrm{GSO}^\ast_{2,0}(F)\hookrightarrow \mathrm{GSO}_{4,0}(E)$ is given by $$(x,t)\mapsto(x,t\cdot x^\sigma )$$ for $x\in D_E^\times(E)$ and $t\in F^\times$. The $\sigma$-action on $D_E^\times(E)$ is induced from the isomorphism $D_E(E)\cong D_E(E)\otimes_E(E,\sigma).$ There are two subcases: \begin{itemize} \item If $\pi_1\ncong\pi_2,$ then $\pi_1\boxtimes\pi_2$ does not participate in theta correspondence with $\mathrm{GL}_2(E)$. The short exact sequence \eqref{nongenericquaternion} implies that \begin{equation}\label{nonsplitquaternionperiod} \dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=\dim \mathrm{Hom}_{\mathrm{GO} _{4,0}(E)^\natural}(I(\frac{1}{2}),\Sigma^+)=\dim\mathrm{Hom}_{\mathrm{GSO}_{2,0}^\ast(F)}(\pi_1\boxtimes\pi_2,\mathbb{C}) . \end{equation} Hence one can get $$\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=\dim \mathrm{Hom}_{D_E^\times(E)}(\pi_2^\vee,\pi_1^\sigma )$$ where $\pi_1^\sigma=JL^{-1}(JL(\pi_1)^\sigma). $ \item If $\pi_1=\pi_2,$ then the short exact sequence \eqref{nongenericquaternion} implies that \[\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=\dim \mathrm{Hom}_{\mathrm{GO} _{4,0}(E)^\natural}(I(\frac{1}{2}),\Sigma^+) \] because $\Theta_{1}(\Sigma^+\otimes\nu)=0$. Note that \[\dim\mathrm{Hom}_{ \mathrm{GSO}^\ast_{2,0}(F)}(\pi_1\boxtimes\pi_1,\mathbb{C} )=\dim\mathrm{Hom}_{\mathrm{GO} _{2,0}^\ast(F)}(\Sigma^+,\mathbb{C})+\dim\mathrm{Hom}_{ \mathrm{GO} ^\ast_{2,0}(F)}(\Sigma^+\otimes\nu,\mathbb{C}) . \] In a similar way, we have $\dim\mathrm{Hom}_{\mathrm{GO} _{2,0}^\ast(F)}(\Sigma^+\otimes\nu,\mathbb{C} )=\dim\mathrm{Hom}_{\mathrm{GSp}_{1,0}(F)}(\pi_1,\mathbb{C}). $ Therefore, if $\pi_1$ is $D^\times(F)$-distinguished, then $\pi_1^\sigma\cong\pi_1^\vee $ and so \[\dim\mathrm{Hom}_{\mathrm{GSO}_{2,0}^\ast(F)}(\pi_1\boxtimes\pi_1,\mathbb{C} )=1=\dim\mathrm{Hom}_{\mathrm{GO} _{2,0}^\ast(F)}(\Sigma^+\otimes\nu,\mathbb{C} ) . \] Then $\dim\mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=\dim\mathrm{Hom}_{ \mathrm{GO} ^\ast_{2,0}(F)}(\Sigma^+,\mathbb{C})=0 $ if $\pi_1$ is $D^\times(F)$-distinguished. Furthermore, $\tau$ is $\mathrm{GSp}_{1,1}(F)$-distinguished if and only if $\pi_1^\vee\cong\pi_1^\sigma$ which is not $D^\times(F)$-distinguished. Thus $\tau$ is $\mathrm{GSp}_{1,1}(F)$-distinguished if and only if $\pi_1$ is $(D^\times(F),\omega_{E/F})$-distinguished, in which case $\phi_{\pi_1}$ is conjugate-symplectic. \end{itemize} (Similarly, one can show that $$\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\omega_{E/F})=\dim \mathrm{Hom}_{D_E^\times(E)}(\pi_2^\vee,\pi_1^\sigma)-\dim \mathrm{Hom}_{D^\times(F)}(\Theta_1(\Sigma^+\otimes\nu),\omega_{E/F}).$$ Here we use the fact $$\omega_{E/F}\circ\lambdabda_V |_{\mathrm{GO} ^\ast_{2,0}(F)}=\mathbf{1}.$$ Hence $\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\omega_{E/F})=1$ if and only if either $\pi_1=\pi_2$ are both $D^\times(F)$-distinguished or $\pi_1\ncong\pi_2$ but $\pi_1^\vee=\pi_2^\sigma$. It will be useful when we verify the Prasad conjecture for $\mathrm{PGSp}_4$ in \S\mathrm{Re}f{secpgsp}.) \item We will use a similar argument. Assume that $V_R$ corresponds to $\mathbb{H}_E^2$ by the Morita equivalence. Then via the see-saw diagrams \[\xymatrix{\mathrm{GO} _{5,1}(E)^\natural\ar@{-}[d]\ar@{-}[rd] & \mathrm{GSp}_{2,2}(F)\ar@{-}[ld]\ar@{-}[d]\ar@{-}[rd] & \mathrm{GO} _{2,2}(E)^\natural\ar@{-}[ld]\ar@{-}[d] \\ \mathrm{GO} ^\ast_{3,0}(F)& \mathrm{GSp}_4(E)^\natural&\mathrm{GO} ^\ast_{1,1}(F) } \] we have $\theta_2^-(\tau)=0$. So Lemma \mathrm{Re}f{GSp:GU} implies that \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{I}(\frac{1}{2}),\tau)=\dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{R}^2(\mathbf{1}),\tau) = \dim \mathrm{Hom}_{\mathrm{GO} _{1,1}^\ast(F)}(\Theta_{4}^+(\tau),\mathbb{C} ) ,\] where $\mathfrak{I}(s)$ is the degenerate principal series of $\mathrm{GSp}_{2,2}(F)$. Due to Lemma \mathrm{Re}f{degpos1/2}, $$\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})\leq\dim\mathrm{Hom}_{\mathrm{GO} _{1,1}^\ast(F)}(\Theta_{4}^+(\tau),\mathbb{C}) .$$ We want to get the reverse inequality. Consider the following diagrams \[\xymatrix{ \mathrm{GSp}_4(E)^\natural\ar@{-}[d]\ar@{-}[rd] & \mathrm{GO} _{2,2}^\ast(F)\ar@{-}[rd]\ar@{-}[d]\ar@{-}[ld]& \mathrm{GL}_2(E)^\natural\ar@{-}[d]\ar@{-}[ld] \\ \mathrm{GSp}_{1,1}(F)& \mathrm{GO} _{2,2}(E)^\natural& \mathrm{GSp}_{1,0}(F). } \] There is an exact sequence of $\mathrm{GO} _{2,2}^\ast(F)$-representations \[\xymatrix{0\ar[r]& \mathfrak{R}^{1,0}(\mathbf{1})\ar[r]& I(-\frac{1}{2})\ar[r]& \mathfrak{R}^{1,1}(\mathbf{1})\ar[r]&0. } \] Note that $\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(\mathfrak{R}^{1,0}(\mathbf{1}),\Sigma^+ )=\dim\mathrm{Hom}_{\mathrm{GSp}_{1,0}(F)}(\Theta_{1}(\Sigma^+\otimes\nu),\mathbb{C})=0 . $ Thanks to \cite[Theorem 2.5]{olafsson1987fourier} and \cite[Proposition 4.9]{dima2018analytic}, we have \begin{align*} \dim\mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})&=\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(\mathfrak{R}^{1,1}(\mathbf{1}),\Sigma^+)\\ &=\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(I(-\frac{1}{2}),\Sigma^+)\\ &\geq\dim\mathrm{Hom}_{\mathrm{GO} _{2,2}(E)^\natural}(ind_{\mathrm{GO} _{1,1}^\ast(F)}^{\mathrm{GO} _{2,2}(E)^\natural}\mathbb{C}, \Sigma^+ )\\ &=\dim\mathrm{Hom}_{\mathrm{GO} _{1,1}^\ast(F)}(\Sigma^+,\mathbb{C}). \end{align*} Therefore $\dim\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\mathbb{C})=\dim\mathrm{Hom}_{\mathrm{GO} _{1,1}^\ast(F)}(\Theta_4^+(\tau),\mathbb{C}) $ unless $\Theta_{4}^+(\tau)$ is reducible. There is no $F$-rational points on the non-identity connected component of $\mathrm{GO} _{1,1}^\ast$ \cite[Page 21-22]{moeglin1987correspondances}, so that \[\mathrm{GO} _{1,1}^\ast(F)=\mathrm{GSO}_{1,1}^\ast(F). \] Assume that $\pi_1\ncong\pi_2$. Since $$\mathrm{GO} _{1,1}^\ast(F)=\mathrm{GSO}^\ast_{1,1}(F)\cong\frac{ \mathrm{GL}_2(F)\times D^\times(F)}{\{(t,t^{-1}) \}},$$ one can obtain that for $\pi_1\neq\pi_2$, $\Theta_{4}^+(\tau)=\mathrm{Ind}_{\mathrm{GSO}(2,2)(E)}^{\mathrm{GO} (2,2)(E)}(\pi_1\boxtimes\pi_2)$ and \begin{equation}\label{quaternionsum} \mathrm{Hom}_{\mathrm{GO} ^\ast_{1,1}(F) }(\Sigma^+,\mathbb{C} )=\mathrm{Hom}_{\mathrm{GO} ^\ast_{1,1}(F)}(\pi_1\boxtimes\pi_2,\mathbb{C}) \oplus \mathrm{Hom}_{\mathrm{GO} _{1,1}^\ast(F)}(\pi_2\boxtimes\pi_1,\mathbb{C} ). \end{equation} There are two subcases: \begin{itemize} \item If $\pi_i~(i=1,2)$ are both $D^\times(F)$-distinguished, then \eqref{quaternionsum} implies that \[\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=\dim \mathrm{Hom}_{\mathrm{GO} ^\ast_{1,1}(F)}(\Sigma^+,\mathbb{C} )=2. \] \item If $\pi_1$ is $D^\times(F)$-distinguished and $\pi_2=\pi(\chi_1,\chi_2)$ with $\chi_1\neq\chi_2,\chi_1|_{F^\times}=\chi_2|_{F^\times}=1,$ then $\pi_2$ is $\mathrm{GL}_2(F)$-distinguished but not $D^\times(F)$-distinguished. So \eqref{quaternionsum} implies that \[\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=1. \] \end{itemize} If $\pi_1=\pi_2$ are both square-integrable representations, then \[\mathrm{Hom}_{\mathrm{GO} ^\ast_{1,1}(F)}(\Sigma^+,\mathbb{C})=\mathrm{Hom}_{\mathrm{GSO}^\ast_{1,1}(F)}(\pi_1\boxtimes\pi_1,\mathbb{C})=\begin{cases} 1,&\mbox{ if }\pi_1\mbox{ is }D^\times(F)\mbox{-distinguished};\\ 0,&\mbox{ otherwise.} \end{cases} \] If $\pi_1=\pi_2=\pi(\chi^{-1},\chi^\sigma),$ then $\Theta_{4}^+(\tau)$ is reducible. We will show that $\tau=I_{Q(Z)}(\mathbf{1},\pi_1)$ does not occure on the boundary of $\mathfrak{I}(\frac{1}{2})$ and so that $$\dim\mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=\dim\mathrm{Hom}_{\mathrm{GO} _{1,1}^\ast(F)}(\Theta_{4}^+(\tau),\mathbb{C}). $$ There is a filtration \[ind_{\mathrm{GSp}_{1,1}(F)}^{\mathrm{GSp}_4(E)^\natural}\mathbb{C}=\mathfrak{I}_0(s)\subset\mathfrak{I}_1(s)\subset\mathfrak{I}_2(s)=\mathfrak{I}(s)|_{\mathrm{GSp}_4(E)^\natural} \] of $\mathfrak{I}(s)|_{\mathrm{GSp}_4(E)^\natural }$ such that $\mathfrak{I}_2(s)/\mathfrak{I}_1(s)=ind_{P^\natural}^{\mathrm{GSp}_4(E)^\natural}\delta_{P^\natural}^{\frac{s+1}{3}}$ and $$\mathfrak{I}_1(s)/\mathfrak{I}_0(s)=ind_{MN}^{\mathrm{GSp}_4(E)^\natural}\delta_{P(Y_D)}^{\frac{1}{2}+\frac{s}{5}}\delta_3^{-\frac{1}{2}}$$ where $\delta_3(t,x)=|N_{E/F}(t)^4\lambdabda(d)^{-4} |_F$ for $(t,x)\in M=\mathrm{GL}_1(E)\times\mathrm{GSp}_{1,0}(F)$. If \[\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{I}_1(\frac{1}{2})/\mathfrak{I}_0(\frac{1}{2}),\tau ) \neq0,\] then \[\mathrm{Hom}_{\mathrm{GL}_1(E) }(|-|_E,R_{\bar{P}''}(I_{Q(Z)}(\mathbf{1},\pi_1)) )\neq 0, \] which is impossible, where $P''=(\mathrm{GL}_1(E)\times\mathrm{GL}_2(E)^\natural)\ltimes N$ is a parabolic subgroup of $\mathrm{GSp}_4(E)^\natural$ and $R_{\bar{P}''}$ denotes the Jacquet funtor associate to the parabolic opposite to $P''$. So $$\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{I}_1(\frac{1}{2})/\mathfrak{I}_0(\frac{1}{2}),\tau ) =0.$$ It is quite straightforward to see that $$\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(ind_{P^\natural}^{\mathrm{GSp}_4(E)^\natural}\delta_{P^\natural}^{\frac{1}{2}},I_{Q(Z)}(\mathbf{1},\pi_1))=0.$$ Hence $\tau=I_{Q(Z)}(\mathbf{1},\pi_1)$ does not occur on the boundary of $\mathfrak{I}(\frac{1}{2})$. \par The big theta lift to $\mathrm{GSO}_{2,2}(E)$ of $\tau$ of $\mathrm{GSp}_4(E)$ is $$\mathrm{Ext}^1_{\mathrm{GSO}(2,2)(E)}(\pi_1\boxtimes\pi_1,\pi_1\boxtimes\pi_1 ).$$ From the following see-saw pairs diagram \[\xymatrix{\mathrm{GSO}_{5,1}(E)^\natural\ar@{-}[d]\ar@{-}[rd] & \mathrm{GSp}_{2,2}(F)\ar@{-}[ld]\ar@{-}[d]\ar@{-}[rd] & \mathrm{GSO}_{2,2}(E)^\natural\ar@{-}[ld]\ar@{-}[d] \\ \mathrm{GO} ^\ast_{3,0}(F)& \mathrm{GSp}_4(E)^\natural&\mathrm{GO} ^\ast_{1,1}(F) } \] one can use the fact $\theta_2^{-}(\tau)=0$ to obtain that \[\dim\mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=\dim\mathrm{Hom}_{\mathrm{GSO}_{1,1}^\ast(F)}(\mathrm{Ext}_{\mathrm{GSO}_{2,2}(E)}^1(\pi_1\boxtimes\pi_1,\pi_1\boxtimes\pi_1 ),\mathbb{C} )=2. \] \item Assume that $\theta_4^{+}(\tau)=0$. Note that $0\rightarrow\mathfrak{R}^2(\mathbf{1})\rightarrow \mathfrak{I}(-\frac{1}{2})\rightarrow \mathfrak{R}^3(\mathbf{1})\rightarrow0$ is exact. Then we can use the same method appearing in (ii) to show that \begin{align*}\dim \mathrm{Hom}_{\mathrm{GO} _{3,0}^\ast(F) }(\Theta_2^-(\tau),\mathbb{C} )&=\dim \mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{R}^{3}(\mathbf{1}),\tau)\\ &=\dim \mathrm{Hom}_{\mathrm{GSp}_{4}(E)^\natural}(\mathfrak{I}(-\frac{1}{2}),\tau)\\ &\geq\dim\mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C}) . \end{align*} We will show that $\tau$ does not occur on the boundary of $\mathfrak{I}(-\frac{1}{2})$ in this case. Then $$\dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{I}(-\frac{1}{2}),\tau)\leq \dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{I}_0(-\frac{1}{2}),\tau ) =\dim\mathrm{Hom}_{\mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C}) $$ and so \[\dim\mathrm{Hom}_{ \mathrm{GSp}_{1,1}(F)}(\tau,\mathbb{C})=\dim\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{I}(-\frac{1}{2}),\tau). \] In order to show that $\tau$ does not occur on the boundary of $\mathfrak{I}(-\frac{1}{2})$, we separate them into two cases. \begin{itemize} \item If $\tau=I_{Q(Z)}(\chi,\pi)$ with $\chi\neq\mathbf{1}$, then $$\mathrm{Hom}_{\mathrm{GSp}_{4}(E)^\natural}(\mathfrak{I}_2(-\frac{1}{2})/\mathfrak{I}_1(-\frac{1}{2}),\tau )=\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(ind_{P^\natural}^{\mathrm{GSp}_4(E)^\natural}\delta_{P^\natural}^{\frac{1}{6}},\tau ) =0. $$ If $\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{I}_1(-\frac{1}{2})/\mathfrak{I}_0(-\frac{1}{2}),\tau )\neq 0 $, then $\mathrm{Hom}_{\mathrm{GL}_1(E)}(\mathbf{1},R_{\bar{P}''}(\tau) )\neq0 $ which is impossible since $R_{\bar{P}''}(\tau)=\chi\otimes\pi\oplus \chi^{-1}\otimes\pi\chi $ and $\chi\neq\mathbf{1}$, where $P''=(\mathrm{GL}_1(E)\times \mathrm{GL}_2(E)^\natural )\rtimes N$. \item If $\pi$ is square-integrable, then $\mathrm{Hom}_{\mathrm{GL}_1(E)}(\mathbf{1},R_{\bar{P}''}(\tau) )=0 $ due to the Casselman criterion in \cite{casselman82duke} for a discrete series representation that $\mathrm{Hom}_{\mathrm{GL}_1(E)}(|-|^s_E,R_{\bar{P}''}(\tau))\neq0$ implies that $s<0$. Hence $\mathrm{Hom}_{\mathrm{GSp}_4(E)^\natural}(\mathfrak{I}_1(-\frac{1}{2})/\mathfrak{I}_0(-\frac{1}{2}),\tau)=0 $. In a similar way, \[\mathrm{Hom}_{\mathrm{GSp}_{4}(E)^\natural}(\mathfrak{I}_2(-\frac{1}{2})/\mathfrak{I}_1(-\frac{1}{2}),\tau )=\mathrm{Hom}_{\mathrm{GL}_2(E)\times F^\times }(\delta_{P^\natural}^{\frac{1}{6}},R_{\bar{P}^\natural}(\tau))=0. \] \end{itemize} Hence $\tau$ does not occur on the boundary of $\mathfrak{I}(-\frac{1}{6})$. Moreover, $\Theta_{2}^-(\tau)=\Pi^D\boxtimes\chi$ is irreducible. Then there exists an identity \[\dim \mathrm{Hom}_{\mathrm{GSp}_{1,1}(F) }(\tau,\mathbb{C})=\dim \mathrm{Hom}_{\mathrm{GO} ^\ast_{3,0}(F)}(\Pi^{D}\boxtimes\chi,\mathbb{C})=\dim \mathrm{Hom}_{D_4^\times(F)}(\Pi^{D},\mathbb{C}), \] where $D_4$ is the division algebra over $F$ with invariance $\frac{1}{4}\in\mathbb{Q}/\mathbb{Z}$. So \begin{itemize} \item If $\Pi=JL(\Pi^D)$ is a square-integrable representation of $\mathrm{GL}_4(E)$, then \cite[Theorem 1]{beuzart2017distinguished} and \cite[Theorem 5.2]{matringe2009distinction} imply that \[\dim\mathrm{Hom}_{\mathrm{GL}_4(F)}(\Pi,\omega_{E/F})=\dim\mathrm{Hom}_{D_4^\times(F)}(\Pi^D,\omega_{E/F}) =\begin{cases} 1,&\mbox{\mbox{ if }}\phi_{\Pi}\mbox{ is conjugate-symplectic};\\ 0,&\mbox{ otherwise.} \end{cases} \] So $\dim\mathrm{Hom}_{D_4^\times(F)}(\pi^D,\mathbb{C})=1$ if and only if $\phi_{\Pi}$ is conjugate-orthogonal. \item If $\Pi^{D}$ is an induced representation $\pi(\rho_D,{(\rho_D)}^\vee\otimes\mu)$ with $\mu\neq\omega_{\rho_D}$, then we use the orbit decomposition $B_1\backslash \mathrm{GL}_2(D_E)(E)/\mathrm{GL}_1(D_4)(F)$ and Mackey theory to get that \[\mathrm{Hom}_{D_4^\times(F)}(\pi^{D},\mathbb{C} )=\mathrm{Hom}_{D^\times_E(E) }(\rho_D^\sigma\otimes\rho_D^\vee\cdot \mu,\mathbb{C} )=\mathrm{Hom}_{D_E^\times(E)}(\rho_D^\sigma,\rho_D\cdot\mu^{-1})=\begin{cases} 1,&\mbox{ if }\rho_D^\sigma\cong \rho_D\mu^{-1};\\ 0,&\mbox{ otherwise.} \end{cases} \] In this case, $\rho^\sigma=\rho\mu^{-1}$ where $\rho=JL(\rho_D)$ is the Jacquet-Langlands lift to $\mathrm{GL}_2(E)$ and $\phi_\Pi=\phi_\rho\oplus\phi_{\rho}^\vee\cdot\mu,$ which is conjugate-orthogonal due to \cite[Theorem 5.2]{matringe2016distinction}. \end{itemize} \end{enumerate} Then we are done. \end{proof} \section{The Prasad conjecture for $\mathrm{GSp}_4$} \subsection{The Prasad conjecture} \label{subsect:prasad} In this subsection, we give a brief introduction to the Prasad conjecture, i.e. \cite[Conjecture 2]{prasad2015arelative}. One may refer to \cite[\S 16]{prasad2015arelative} for more details. \par Let $\mathbf{G}$ be a quasi-split reductive group defined over a local field $F$ with characteristic zero. Let $W_F$ be the Weil group of $F$ and $WD_F$ be the Weil-Deligne group of $F$. Let $E$ be a quadratic extension over $F$. Dipendra Prasad introduces a quadratic character $\chi_\mathbf{G}$ in \cite[\S 10]{prasad2015arelative} and another quasi-split reductive group $G^{op}$ defined over $F$ in \cite[\S 9]{prasad2015arelative}. Then there is a relation between the fibers of the base change map \[\Phi:\mathrm{Hom}(WD_F,{}^LG^{op})\longrightarrow\mathrm{Hom}(WD_E,{}^LG^{op})\] from the Galois side and the $\chi_\mathbf{G}$-distinction problems for $\mathbf{G}(E)/\mathbf{G}(F)$ from the automorphic side. \par More precisely, assume the Langlands-Vogan conjecture in \cite{vogan1993local}. Given an irreducible representation $\pi$ of $\mathbf{G}(E)$ with an enhanced L-parameter $(\phi_\pi,\lambdabda)$, where $\lambdabda$ is a character of the component group $\pi_0(Z(\phi_{\pi}))$ and the $L$-packet $\Pi_{\phi_{\pi}}$ is generic, we have \[\sum_{\alpha}\dim\mathrm{Hom}_{G_\alpha(F)}(\pi,\chi_\mathbf{G})=\sum_{i}m(\lambdabda,\tilde{\phi}_i)\deg\Phi(\tilde{\phi}_i)/d_0(\tilde{\phi}_i) \] where \begin{itemize} \item $\alpha\in H^1(W_F,\mathbf{G})$ runs over all pure inner forms of $\mathbf{G}$ satisfying $G_\alpha(E)=\mathbf{G}(E);$ \item $\tilde{\phi}_i\in \mathrm{Hom}(WD_F,{}^LG^{op})$ runs over all parameters of ${}^LG^{op}$ satisfying $\tilde{\phi}_i|_{WD_E}=\phi_\pi;$ \item $m(\lambdabda,\tilde{\phi})=\dim\mathrm{Hom}_{\pi_0(Z(\tilde{\phi}))}(\mathbf{1},\lambdabda) $ is the multiplicity of the trivial representation contained in the restricted representation $\lambdabda|_{\pi_0(Z(\tilde{\phi}) )}$ ; \item $d_0(\tilde{\phi})=|Coker\{\pi_0(Z(\tilde{\phi}))\longrightarrow\pi_0(Z(\phi_\pi))^{\mathrm{Gal}(E/F)} \}|.$ \end{itemize} \begin{rem} If $H^1(F,\mathbf{G})$ is trivial such as $\mathbf{G}=\mathrm{GSp}_{2n},$ then the automorphic side contains only one term. The Prasad conjecture gives the precise formula for the multiplicity \[\dim\mathrm{Hom}_{\mathbf{G}(F)}(\pi,\chi_\mathbf{G} ). \] \end{rem} \begin{rem} There exists a counterexample even for $\mathrm{GL}_2$ when $\Pi_{\phi_{\pi}}$ is not generic. Let $\mathbf{G}=\mathrm{GL}_2$ and $\pi=\mathbf{1}$ be the trivial representation. Then the automorphic side is zero however the Galois side is nonzero. \end{rem} \begin{rem} If $\tilde{\phi}$ comes from a square-integrable representation, then $\deg\Phi(\tilde{\phi})=1.$ \end{rem} If $\pi$ is square-integrable, then we have a refined version, i.e. the formula for each dimension \[\dim\mathrm{Hom}_{G_\alpha(F)}(\pi,\chi_\mathbf{G}). \] \par Let $Z(\hat{G}^{op})$ be the center of the dual group $\hat{G}^{op}$. There is a perfect pairing \[H^1(\mathrm{Gal}(E/F), Z(\hat{G}^{op}))\times H^1(\mathrm{Gal}(E/F),\mathbf{G}(E))\longrightarrow\mathbb{Q}/\mathbb{Z} \] when Dipendra Prasad studies the character twists in \cite[\S 13]{prasad2015arelative}. Set $\Omega_\mathbf{G}(E)=H^1(\mathrm{Gal}(E/F),Z(\hat{G}^{op})).$ Given a parameter $\tilde{\phi}\in H^1(W_F,\hat{G}^{op}),$ we consider the stabilizer $\Omega_\mathbf{G}(\tilde{\phi},E)\subset \Omega_\mathbf{G}(E)$ under the pairing \[H^1(W_F,Z(\hat{G}^{op}))\times H^1(W_F,\hat{G}^{op})\longrightarrow H^1(W_F,\hat{G}^{op}). \] Set $$A_\mathbf{G}(\tilde{\phi})\subset H^1(\mathrm{Gal}(E/F),\mathbf{G}(E))\cong\Omega_\mathbf{G}(E)^\vee $$ to be the annihilator of the stabilizer $\Omega_\mathbf{G}(\tilde{\phi},E).$ Then there is another perfect pairing \[\Omega_\mathbf{G}(E)/\Omega_\mathbf{G}(\tilde{\phi},E)\times A_\mathbf{G}(\tilde{\phi})\longrightarrow\mathbb{Q}/\mathbb{Z}, \] meaning that in the orbit $\Omega_\mathbf{G}(E)/\Omega_\mathbf{G}(\tilde{\phi},E)$ of character twists of $\tilde{\phi}$ (which go to a particular parameter under the basechange to $E$) there are exactly as many parameters as there are certain pure inner forms of $\mathbf{G}$ over $F$ which trivialize after basechange to $E.$ \par Consider \[F(\phi_\pi)=\{\tilde{\phi}:WD_F\longrightarrow {}^LG^{op}|~\tilde{\phi}|_{WD_E}=\phi_\pi \}=\sqcup_{i=1}^r\mathcal{O}(\tilde{\phi}_i). \] Each orbit $\mathcal{O}(\tilde{\phi}_i)$ of $\Omega_\mathbf{G}(E)$-action on $F(\phi_\pi)$ is associated to a coset $\mathcal{C}_i$ of $A_\mathbf{G}(\tilde{\phi}_i,E)$ in $H^1(\mathrm{Gal}(E/F),\mathbf{G}(E))$ defining a set of certain pure inner forms $G_\alpha$ of $\mathbf{G}$ over $F$ such that $G_\alpha(E)=\mathbf{G}(E).$ Then \[\dim \mathrm{Hom}_{G_{\alpha(F)}}(\pi,\omega_\mathbf{G})=\sum_{i=1}^r m(\lambdabda,\tilde{\phi}_i)\cdot 1_{\mathcal{C}_i}(G_\alpha)/ d_0(\tilde{\phi}_i) ,\] where \begin{itemize} \item $1_{\mathcal{C}_i}$ is the characteristic function of the coset $\mathcal{C}_i;$ \item $m(\lambdabda,\tilde{\phi} )$ is the multiplicity for the trivial representation contained in the restricted representation $\lambdabda|_ {\pi_0(Z(\tilde{\varphi}))},$ which may be zero; \item $d_0(\tilde{\phi}) = |Coker\{\pi_0(Z(\tilde{\phi})) \longrightarrow\pi_0(Z(\phi_\pi))^{\mathrm{Gal}(E/F)}\} | $. \end{itemize} \subsection{The Prasad conjecture for $\mathrm{GL}_2$} Before we give the proof of Theorem \mathrm{Re}f{thm1.2}, let us recall the Prasad conjecture for $\mathbf{G}=\mathrm{GL}_2=\mathrm{GSp}_2.$ Set $\mathbf{G}=\mathrm{GL}_2.$ Then $\chi_\mathbf{G}=\omega_{E/F}$ and $G^{op}=\mathrm{U(2,E/F)}$ is the quasi-split unitary group, where $E$ is a quadratic field extension over a p-adic field $F$. Denote $$^LG^{op}=\mathrm{GL}_2(\mathbb{C})\rtimes<\sigma>,$$ where $\sigma$-action on $\mathrm{GL}_2(\mathbb{C})$ is given by \[\sigma(g)=\omega_0 {(g^t)}^{-1}\omega_0^{-1}=g\cdot\det(g)^{-1},\]$\omega_0=\begin{pmatrix} &1\\-1 \end{pmatrix} \mbox{ and }g\in \mathrm{GL}_2(\mathbb{C}) $, $g^t$ denotes its transpose matrix. Given an irreducible representation $\pi$ of $\mathrm{GL}_2(E)$ with $\phi=\phi_\pi$ irreducible (for simplicity), there is no other pure inner form for $\mathrm{GL}_2$. Then \[\dim \mathrm{Hom}_{\mathrm{GL}_2(F)}(\pi,\omega_{E/F})=|F(\phi)|, \] where $F(\phi)=\{\tilde{\phi}:WD_F\longrightarrow {}^LG^{op}|~\tilde{\phi}|_{WD_E}=\phi \}$ and $|F(\phi)|$ denotes its cardinality. \begin{prop} \label{conj:GL(2)} The following statements are equivalent: \begin{enumerate}[(i)] \item $\dim \mathrm{Hom}_{\mathrm{GL}_2(F)}(\pi,\omega_{E/F})=1;$ \item the Langlands parameter $\phi$ is conjugate-symplectic; \item there is only one extension $\tilde{\phi}\in F(\phi).$ \end{enumerate} \end{prop} \begin{proof} We only prove the direction (ii)$\mathbb{R}ightarrow$(iii) and the rest follows from Flicker's results \cite{flicker1991ondist}. If $\phi$ is conjugate-symplectic, then $$\phi^s=\phi^\vee=\phi(\det\phi)^{-1},$$ where $s\in W_F\setminus W_E $ is fixed. There exists $A\in \mathrm{GL}_2(\mathbb{C})$ such that \[\phi(sts^{-1})=\phi^s(t)= A\cdot\phi(t)\det(\phi(t))^{-1}\cdot A^{-1}\] for all $t\in WD_E$. Pick $a\in\mathbb{C}^\times$ such that $a^2\cdot\det A=1,$ so that $aA\in \mathrm{SL}_2(\mathbb{C}).$ Set \[\tilde{\phi}(s)=aA\cdot \sigma\] and $\tilde{\phi}(t)=\phi(t)$ for $t\in WD_E$. Then $$\tilde{\phi}(sts^{-1})=\tilde{\phi}(s)\cdot \phi(t)\cdot\tilde{\phi}(s)^{-1}$$ and $\tilde{\phi}(s^2)=\phi(s^2)=(\tilde{\phi}(s))^2$ due to the sign of $\phi.$ Therefore $\tilde{\phi}\in F(\sigma).$ If there are two extensions $\tilde{\phi}_i$ with $A_i\in \mathrm{SL}_2(\mathbb{C})$ such that $\tilde{\phi}_i|_{WD_E}=\phi,$ then $A_1A_2^{-1}\in Z(\phi)\cong\mathbb{C}^\times$ by Schur's lemma, so that $\phi_1=\phi_2.$ \end{proof} \begin{rem} This method will appear again when we study the Prasad conjecture for $\mathbf{G}=\mathrm{GSp}_4$ in \S\mathrm{Re}f{7.4.1} The key idea is to choose a proper element $A$ such that the lift $\tilde{\phi}$ satisfies $\tilde{\phi}(s)=A\cdot\sigma$ and $\tilde{\phi}|_{WD_E}=\phi.$ \end{rem} \subsection{The Prasad conjecture for $\mathrm{GSp}_4$}\label{subsect:GSp(4)conj} The aim of this subsection is to verify the Prasad conjecture for $\mathrm{GSp}_4$. Now we consider the generic representation $\tau=\theta(\Pi\boxtimes\chi)$ of $\mathrm{GSp}_4(E)$, with $\phi_\Pi$ conjugate-symplectic and $\chi|_{F^\times}=1.$ Note that the Langlands parameter $\phi_\Pi=i\circ\phi_\tau,$ where \[i:\mathrm{GSp}_4(\mathbb{C})\rightarrow\mathrm{GL}_4(\mathbb{C}) \] is the embedding between $L$-groups. Moreover, $\chi$ is the similitude character of $\phi_\tau.$ If $\phi_{\Pi}$ is conjugate-symplectic (resp. conjugate-orthogonal), we say that $\phi_\tau$ is conjugate-symplectic (resp. conjugate-orthogonal). \begin{lem} Assume that $\tau=\theta(\Pi\boxtimes\chi)$ is a generic representation of $\mathrm{GSp}_4(E)$ and $\omega_\tau|_{F^\times}=\mathbf{1}$. Then $\tau$ is $(\mathrm{GSp}_4(F),\omega_{E/F})$-distinguished if and only if $\phi_{\Pi}$ is conjugate-symplectic. \end{lem} \begin{proof} Due to Theorem \mathrm{Re}f{localgspperiod}, the following are equivalent: \begin{itemize} \item $\tau$ is $\mathrm{GSp}_4(F)$-distinguished; \item $\Pi$ is $\mathrm{GL}_4(F)$-distinguished; \item $\phi_{\Pi}$ is conjugate-orthogonal. \end{itemize} Fix a character $\chi_E$ of $E^\times$ such that $\chi_E|_{F^\times}=\omega_{E/F}$. Then $\tau$ is $(\mathrm{GSp}_4(F),\omega_{E/F})$-distinguished if and only if $\tau\otimes\chi_E\circ\lambdabda_W$ is $\mathrm{GSp}_4(F)$-distinguished, which is equivalent to that $\phi_\Pi\otimes\chi_E$ is conjugate-orthogonal. Note that $\chi_E^{-1}$ is conjugate-symplectic. Hence $\tau$ is $(\mathrm{GSp}_4(F),\omega_{E/F})$-distinguished if and only if $\phi_{\Pi}$ is conjugate-symplectic. \end{proof} Recall that if $\mathbf{G}=\mathrm{GSp}_{2n}$, then $\chi_\mathbf{G}=\omega_{E/F}$ and $$G^{op}(F)=\{g\in \mathrm{GSp}_{2n}(E)|\sigma(g)=\theta(g)\} $$ where $\theta(g)=\lambdabda_W (g)^{-1}g $ is the involution. Note that the $\sigma$-actions on $\mathrm{GSp}_4(E)$ and $\mathrm{GSp}_4(\mathbb{C})$ are totally different. (Hope that it does not confuse the reader.) Observe that $H^1(\mathrm{Gal}(E/F),Z(\hat{G}^{op})^{W_E})=1,$ which corresponds to the fact that the pure inner form of $\mathrm{GSp}_{2n}$ is trivial. According to Theorem \mathrm{Re}f{localgspperiod}, we will divide the proof of Theorem \mathrm{Re}f{thm1.2} into four parts: \begin{itemize} \item $\phi_\tau $ is irreducible; \item $\phi_{\tau}=\rho\oplus\rho\nu$ with $\nu\neq\mathbf{1}$; \item the endoscopic case $\phi_\tau=\phi_{\pi_1}\oplus\phi_{\pi_2} $ and $\tau$ is generic; \item$ \phi_\tau=\phi_{\pi_1}\oplus\phi_{\pi_2} $ and $\tau$ is nongeneric. \end{itemize} See \S\mathrm{Re}f{7.4.1}--\S\mathrm{Re}f{7.4.4}. \subsubsection{The irreducible $L$-parameter $\phi_\tau $}\label{7.4.1} Given a conjugate-symplectic $L$-parameter $\phi=\phi_\tau,$ which is irreducible, we want to extend $\phi$ to $$\tilde{\phi}:WD_F\longrightarrow {}^LG_0= \mathrm{GSp}_4(\mathbb{C})\rtimes<\sigma>,$$ where $\sigma$ acts on $\mathrm{GSp}_4(\mathbb{C})$ by \[\sigma(g)=g\cdot\text{sim}(g)^{-1}. \] Let $s\in W_F\setminus W_E.$ The parameter $\phi$ is conjugate-symplectic, so that $\phi^\vee=\phi^s$ and $\phi^\vee=\phi\chi^{-1}$. Hence there exists an element $A\in \mathrm{GSp}_4(\mathbb{C})$ such that \begin{equation}\label{conjdual} \phi(sts^{-1})=\phi^s(t)=A\cdot \phi(t)\chi^{-1}(t) \cdot A^{-1} \end{equation} for all $t\in WD_E$. Pick $a\in\mathbb{C}^\times$ such that $a^2=\text{sim} (A)^{-1}$. Then $aA\in \mathrm{Sp}_4(\mathbb{C}).$ Set \[\tilde{\phi}(s)=aA\cdot\sigma\mbox{ and }\tilde{\phi}(t)=\phi(t)\] for $t\in WD_E$. Then $\phi(sts^{-1})=A\phi(t)\chi^{-1}(t)A^{-1}=\tilde{\phi}(s)\cdot\phi(t)\cdot\tilde{\phi}(s)^{-1}$. Moreover, we will show that \[\tilde{\phi}(s^2)=\phi(s^2)=(\tilde{\phi}(s))^2. \] Then $\tilde{\phi}\in\mathrm{Hom}(WD_F, {}^LG_0)$ and $\tilde{\phi}|_{WD_E}=\phi.$ \par Assume that $<-,->$ is the $WD_E$-equivariant bilinear form associated to $$\phi_\tau:WD_E \rightarrow \mathrm{GSp}_4(\mathbb{C})=\mathrm{GSp}(V,<-,->).$$ Set $$B(v,w)=<v,A^{-1}w>$$ for $v,w\in V. $ Then \eqref{conjdual} implies that \begin{align*} B(\phi(t)v,\phi(sts^{-1})w)&=<\phi(t)v,\phi(t)\chi^{-1}(t)A^{-1}w>\\ &=\chi(t)\cdot<v,\chi^{-1}(t)A^{-1}w>\\ &=B(v,w). \end{align*} So $B$ is a conjugate-self-dual bilinear form on $\phi$ and hence it has sign $-1$ by Schur's lemma, i.e., $$-B(w,v)=B(v,\phi(s^2)w).$$ Therefore we have \begin{align*} <v,w>&=-<w,v>\\ &=-B(w,Av)\\ &=B(Av,\phi(s^2)w)\\ &=<Av,A^{-1}\phi(s^2)w>\\ &=<v,a^{-2}A^{-2}\phi(s^2)w> \end{align*} and so $\phi(s^2)=a^2A^2=(\tilde{\phi}(s))^2.$ \begin{prop} Assume that $\tau=\theta(\Pi\boxtimes\chi)$ with $\phi_\Pi$ irreducible. Then there exists at most one extension $\tilde{\phi}:WD_F\longrightarrow {}^LG_0$ such that $\tilde{\phi}|_{WD_E}=\phi_\tau.$ \end{prop} \begin{proof} If there are two extensions $\tilde{\phi}_i(i=1,2)$ associated to $A_i\in \mathrm{Sp}_4(\mathbb{C})$ satisfying \[\tilde{\phi}_i(sts^{-1})=\tilde{\phi}_i(s)\cdot\phi(t)\cdot\tilde{\phi}_i(s)^{-1}\] for all $t\in WD_E$, then $A_1A_2^{-1}$ commutes with $\phi.$ So $A_1A_2^{-1}$ is a scalar by Schur's Lemma. Hence $\tilde{\phi}_1=\tilde{\phi}_2.$ \end{proof} Hence, if $\tau=\theta(\Pi\boxtimes\chi)$ with $\phi_\Pi$ irreducible and conjugate-symplectic, then there is one extension $\tilde{\phi}\in F(\phi_\tau)$ and \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\omega_{E/F})=1. \] If $\phi=\phi_\tau$ is conjugate-symplectic and reducible, then there are several cases. \subsubsection{$\phi_\tau=\rho+{\rho}\nu $ with $\nu\neq\mathbf{1}$ and $\rho $ irreducible } If $\phi_\Pi=\rho+\rho\nu$ with $\rho$ irreducible and $\chi=\nu\cdot\det\rho$ conjugate-orthogonal, thanks to \cite[Theorem 5.2]{matringe2009distinction}, there are two subcases: \begin{itemize} \item $\rho$ and $\rho\nu$ are both conjugate-symplectic or \item $\rho^s=\rho^\vee\nu^{-1}.$ \end{itemize} \begin{enumerate}[(i)] \item If $\rho$ and $\rho\nu$ are both conjugate-symplectic, then $\nu$ is conjugate-orthogonal and there exist $$\tilde{\rho}_i:WD_F\longrightarrow \mathrm{GL}_2(\mathbb{C})\rtimes<\sigma>$$ such that $\tilde{\rho}_1|_{WD_E}=\rho,~ \tilde{\rho}_2|_{WD_E}=\rho\nu$ and $\tilde{\rho}_i(s)=A_i\cdot\sigma$ for $A_i\in\mathrm{SL}_2(\mathbb{C})$ due to Proposition \mathrm{Re}f{conj:GL(2)}. Note that $\rho$ is irreducible. Then for $t\in WD_E$, \[\tilde{\rho}_1^s(t)\nu^s(t)=\tilde{\rho}_2^s(t)=A_2\rho^\vee(t)A_2^{-1}\cdot \nu^{-1}(t) \] and so $A_1\rho^\vee(t)A_1^{-1}=A_2\rho^\vee(t)A_2^{-1}$ which implies $A_1A_2^{-1}\in\mathbb{C}^\times$. So $A_1=A_2$ or $-A_2$. Set \[\tilde{\phi}(s)= \begin{pmatrix} &A_1\\-A_1 \end{pmatrix}\cdot\sigma\in \mathrm{Sp}_4(\mathbb{C})\rtimes<\sigma>\mbox{ and }\tilde{\phi}(t)=\begin{pmatrix} \rho(t)\\&\rho(t)\nu(t) \end{pmatrix} \] for $t\in WD_E$. Then $\tilde{\phi}\in F(\phi)$ is the unique extension of $\phi_\tau$. \item If $\rho^s\cong \rho^\vee\nu^{-1},$ then $\phi_\tau^s\cong\phi_\tau^\vee=(\det\rho)^{-1}\cdot\nu^{-1}\phi_\tau=\rho^\vee\nu^{-1}+\rho^\vee$ so that \[\rho^\vee\cong \rho^s\nu^s\cong \rho^s\nu. \] Set $\rho^s(t)\nu(t)=(\det\rho(t))^{-1}\cdot A\rho(t)A^{-1}$ for $t\in WD_E$. Note that $\rho$ is irreducible. Then $$A^{-2}\rho(s^2)\in\mathbb{C}^\times \mbox{ and } \det\rho^s\cdot\det\rho\cdot\nu^2=\mathbf{1}, $$ which implies that $\nu=\nu^s.$ Since \[\rho(s^2)\nu(s^2)=A\rho(s^2)A^{-1}(\det \rho(s^2))^{-1}=\rho(s^2)(\det\rho(s^2))^{-1}, \] we obtain $\nu(s^2)\cdot \det(\rho(s^2))=1$. Set \[\tilde{\phi}(s)=\begin{pmatrix} A\\&A\cdot\det(A^{-1}) \end{pmatrix}\cdot \sigma\mbox{ and }\tilde{\phi}(t)=\begin{pmatrix} \rho(t)\\&\rho(t)\nu(t) \end{pmatrix} \] for $t\in WD_E$. Then $\tilde{\phi}(sts^{-1})=\tilde{\phi}(s)\cdot \phi(t)\cdot\tilde{\phi}(s)^{-1}$ and $\tilde{\phi}|_{WD_E}=\phi.$ \end{enumerate} \subsubsection{Endoscopic case} If $\phi_\tau=\rho_1+\rho_2$ is the endoscopic case, then $\det\rho_1=\det\rho_2$ are both conjugate-orthogonal. There are several subcases. Assume that $\tau=\theta(\pi_1\boxtimes\pi_2)$ is generic, $\rho_i=\phi_{\pi_i}(i=1,2)$ and $\rho_0=\chi_1+\chi_2,$ with $\chi_1\neq\chi_2$ and $\chi_1|_{F^\times}=\chi_2|_{F^\times}=\omega_{E/F}.$ Assume that $\rho_1\neq\rho_2.$ Then \begin{enumerate}[(i)] \item If $\rho_1$ and $\rho_2$ are both conjugate-symplectic and $\rho_i\neq\rho_0~(i=1,2),$ so that both $\pi_1$ and $\pi_2$ are $(D^\times(F),\omega_{E/F})$-distinguished, then \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\omega_{E/F})=2. \] Thanks to Proposition \mathrm{Re}f{conj:GL(2)}, there exist $\tilde{\rho}_1$ and $\tilde{\rho}_2$ of $\mathrm{U(2,E/F)}$ such that $\tilde{\rho}_i|_{WD_E}=\rho_i.$ So there are two lifts $\tilde{\phi}_1=\tilde{\rho}_1+\tilde{\rho}_2$ and $\tilde{\phi}_2=\tilde{\rho}_1\omega_{E/F}+\tilde{\rho}_2$ such that $\tilde{\phi}_i|_{WD_E}=\phi.$ \par If $\rho_1$ and $\rho_2$ are both irreducible, then every lift of $\phi$ should be of the form \[s\mapsto\begin{pmatrix} \lambdabda_1\tilde{\rho}_1(s)&\\&\lambdabda_2\tilde{\rho}_2(s) \end{pmatrix}\in\mathrm{GSp}_4(\mathbb{C})\rtimes<\sigma>\] with $\lambdabda_i^2=1. $ It is known that $\tilde{\phi}=-\tilde{\phi}$ as parameters of ${}^LG_0.$ \par If $\rho_1=\chi^{-1}+\chi^s,$ then the centralizer $Z_{\mathrm{GL}_2(\mathbb{C})}(\rho_1)$ is $\mathbb{C}^\times\times\mathbb{C}^\times$ or $\mathrm{GL}_2(\mathbb{C}).$ Moreover, \[\tilde{\rho}_1(s)=a\begin{pmatrix} 1\\&\chi(s^2) \end{pmatrix}\cdot \sigma\mbox{ with }a^2\chi(s^2)=1.\] In this case, $\tilde{\rho}_1+\tilde{\rho}_2\neq\tilde{\rho}_1\omega_{E/F}+\tilde{\rho}_2$, which will be a different story if $\rho_1=\rho_0.$ \item If $\rho_1= \rho_0$ and $\rho_2$ is conjugate-symplectic, then $\tilde{\rho}_1(s)=\begin{pmatrix} &1\\-1 \end{pmatrix}\cdot\sigma$ and $\tilde{\phi}_1= \tilde{\phi}_2.$ \item If $\rho_1^\vee= \rho_2^s,$ then there exist $A\in \mathrm{SL}_2(\mathbb{C})$ such that \[A^{-1}\rho_1^\vee(t)A=\rho_2^s(t) \] for $t\in WD_E$. Set $\tilde{\phi}(s)=\begin{pmatrix} & A\rho_2(s^2) \\mathbb{A}^{-1} \end{pmatrix}\cdot\sigma$. Then $\tilde{\phi}(sts^{-1})=\tilde{\phi}(s)\cdot\tilde{\phi}(t)\cdot\tilde{\phi}(s^{-1}).$ \end{enumerate} Now we assume $\rho_1=\rho_2.$ \begin{enumerate}[(i)] \item If $\rho_1$ is conjugate-symplectic but $\rho_1\neq\rho_0$, then $\tilde{\phi}_1=\tilde{\rho}_1+\tilde{\rho}_1$ and $\tilde{\phi}_2=\tilde{\rho}_1+\tilde{\rho}_1\omega_{E/F}.$ \item If $\rho_1=\rho_0,$ there is only one lift $\tilde{\phi}=\tilde{\rho}_1+\tilde{\rho}_1.$ \item If $\rho_1$ is not conjugate-symplectic but conjugate-orthogonal, set $$\tilde{\phi}(s)=\begin{pmatrix} &A\\mathbb{A} \end{pmatrix}\cdot\sigma\in \mathrm{Sp}_4(\mathbb{C})\rtimes<\sigma> $$ where $A\in \mathrm{SL}_2(\mathbb{C})$ satisfies $A\rho_1^\vee(t)A^{-1}=\rho_1^s(t) .$ Let us verify $$\phi(s^2)=\tilde{\phi}(s^2)=\tilde{\phi}(s)^2 $$ i.e., $A^2=\rho_1(s^2)$. \begin{itemize} \item If $\rho_1$ is irreducible, then $A^{-2}\rho_1(s^2)\in\mathbb{C}^\times$. Note that $\rho_1$ is conjugate-orthogonal. Then $A^{-2}\rho_1(s^2)=1$, i.e. $A^2=\rho_1(s^2)$. \item If $\rho_1=\mu_1+\mu_2$ with $\mu_1\mu_2^s=\mathbf{1}$, then $\rho_1$ is conjugate-symplectic. \item If $\rho_1=\mu_1+\mu_2$ with $\mu_1\neq\mu_2$ and $\mu_1|_{F^\times}=\mu_2|_{F^\times}=\mathbf{1}$, then $A=1$ and $A^2=1=\rho_1(s^2)$. \end{itemize} \end{enumerate} \subsubsection{Non-generic tempered} \label{7.4.4} Let $\tau $ be an irreducible nongeneric tempered representation of $\mathrm{GSp}_4(E)$ and $\tau=\theta(\pi_1\boxtimes \pi_2 ),$ where each $\pi_i$ is an irreducible representations of $D_E^\times(E)$. If the enhanced $L$-parameter of $\tau$ is $(\phi_\tau,\lambdabda)$, where $\phi_\tau=\rho_1+\rho_2$, $\rho_i=\phi_{\pi_i}$ and $\lambdabda$ is a nontrivial character of the component group $\pi_0(Z_{\phi_\tau}/Z_{\mathrm{GSp}_{4}(\mathbb{C})})$, then \[\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\omega_{E/F} )=0. \] On the Galois side, if $\phi_\pi=\rho_1+\rho_2$, then for arbitrary parameter $\tilde{\phi}$ satisfying $\tilde{\phi}|_{WD_E}=\phi_\tau,$ the restricted representation $\lambdabda|_{\pi_0(Z(\tilde{\phi})) }$ does not contain the trivial character $\mathbf{1},$ i.e. \[m(\lambdabda,\tilde{\phi})=0. \] Finally we can prove Theorem \mathrm{Re}f{thm1.2}. \begin{proof}[Proof of Theorem \mathrm{Re}f{thm1.2}] It is obvious if $\tau$ is a non-generic tempered representation of $\mathrm{GSp}_4(E).$ Since the Levi subgroup of a parabolic subgroup in $\mathrm{GSp}_4$ are $\mathrm{GL}$-type, \cite[Lemma 14]{prasad2015arelative} implies that $\deg\Phi(\tilde{\phi})=1$ in our case. By the above discussions, we know that if $\tau$ is generic, then $\dim\mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\omega_{E/F} )$ equals to the number of inequivalent lifts $|F(\phi_{\tau})|. $ \end{proof} \section{Proof of Theorem \mathrm{Re}f{prasadfordisc}}\label{secpgsp} This section focuses on the Prasad conjecture for $\mathrm{PGSp}_4$. Let $\tau$ be a representation of $\mathrm{PGSp}_4(E),$ i.e., a representation $\tau$ of $\mathrm{GSp}_4(E)$ with trivial central character. If the multiplicity \[\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F} )=\dim \mathrm{Hom}_{\mathrm{GSp}_4(F)}(\tau,\omega_{E/F} ) \] is nonzero, then we say $\tau$ is $(\mathrm{PGSp}_4(F),\omega_{E/F})$-distinguished. Let $\mathrm{PGSp}_{1,1}=\mathrm{PGU}_2(D)$ be the pure inner form of $\mathrm{PGSp}_4$ defined over $F$. Similarly, \[\dim\mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})=\dim\mathrm{Hom}_{ \mathrm{GSp}_{1,1}(F)}(\tau,\omega_{E/F}). \] \subsection{Notation} \begin{itemize} \item $\tau,\pi^{++},\pi^{--},\pi^+$ and $\pi^-$ are representations of $\mathrm{PGSp}_4(E)$. \item $s\in W_F\setminus W_E$ and $\phi_{\tau}^s(t)=\phi_{\tau}(sts^{-1}) $ for $t\in WD_E$. \item $S_\phi=\pi_0(Z(\phi))$ is the component group associated to $\phi$. \item $\tau':WD_F\longrightarrow\mathrm{Sp}_4(\mathbb{C})$ and $\tau_i'$ are Langlands parameters of $\mathrm{PGSp}_4(F)$. \item $\mathcal{C}_i$ is a coset of $A_G(\tau_i')$ in $H^1(W_F,\mathrm{PGSp}_4)$ and $1_{\mathcal{C}_i}$ denotes its characteristic function. \item $\mathrm{PGSp}_{1,1}$ (resp. $PD^\times$) is the pure inner form of $\mathrm{PGSp}_4$ (resp. $\mathrm{PGL}_2$) defined over $F$. \end{itemize} \subsection{The Prasad conjecture for $\mathrm{PGL}_2$} If $\mathbf{G}=\mathrm{PGL}_2$, then $\chi_{\mathbf{G}}=\omega_{E/F} $ and $G^{op}=\mathrm{PGL}_2$. \begin{thm} Let $\pi$ be a generic representation of $\mathrm{PGL}_2(E)$. Then the following are equivalent: \begin{enumerate}[(i)] \item $\dim\mathrm{Hom}_{\mathrm{PGL}_2(F)}(\pi,\omega_{E/F}) =1 $; \item the Langlands parameter $\phi_{\pi}$ is conjugate-symplectic; \item there exists a parameter $\tilde{\phi}:WD_F\rightarrow\mathrm{SL}_2(\mathbb{C})$ such that $\tilde{\phi}|_{WD_E}=\phi_{\pi}$; \item $\pi$ is $(PD^\times(F),\omega_{E/F} )$-distinguished or $\pi=\pi(\chi_E,\chi_E^{-1})$ with $\chi_E|_{F^\times}=\omega_{E/F}$ and $\chi_E^2\neq\mathbf{1}$. \end{enumerate} \end{thm} \begin{proof} See \cite[Theorem 6.2]{gan22arithmeticity} and \cite[the Main Theorem (Local)]{hengfei2016new}. \end{proof} \subsection{The Prasad conjecture for $\mathrm{PGSp}_4$} Recall that if $\mathbf{G}=\mathrm{PGSp}_4$, then $\hat{\mathbf{G}}=\mathrm{Spin}_5(\mathbb{C})\cong \mathrm{Sp}_4(\mathbb{C}),~G^{op}=\mathrm{PGSp}_4$ and $\chi_\mathbf{G}=\omega_{E/F}.$ Let $\tau$ be a representation of $\mathrm{PGSp}_4(E)$ with enhanced $L$-parameter $(\phi_\tau,\lambdabda_\tau )$. Assume that the $L$-packet $\Pi_{\phi_\tau}$ is generic. The Prasad conjecture for $\mathrm{PGSp}_4$ implies the following: \begin{enumerate}[(i)] \item If $\tau$ is $(\mathrm{PGSp}_4(F),\omega_{E/F})$-distinguished, then \begin{itemize} \item $\Pi_{\phi^s_\tau}=\Pi_{\phi^\vee_\tau} ,$ an equality of $L$-packets and \item $\phi_\tau={\tau'}|_{WD_E} $ for some parameter ${\tau' }:WD_F\longrightarrow \mathrm{Sp}_4(\mathbb{C}).$ \end{itemize} \item If $\tau$ is generic and there exists $\tau':WD_F\longrightarrow\mathrm{Sp}_4(\mathbb{C})$ such that $\tau'|_{WD_E}=\phi_\tau $, then $\tau$ is $(\mathrm{PGSp}_4(F),\omega_{E/F})$-distinguished. \item Assume that $\phi_\tau={\tau'}|_{WD_E}$ for some parameter ${\tau'}:WD_F\longrightarrow \mathrm{Sp}_4(\mathbb{C})$. If $\tau$ is a discrete series representation, then we set \[F(\phi_\tau)=\{\tau':\tau'|_{WD_E}=\phi_\tau \}=\bigcup_i \mathcal{O}(\tau_i') \] where $\mathcal{O}(\tau'_i)=\{\tau'_i,~\omega_{E/F}\cdot \tau'_i \},$ which may be a singleton. Given a parameter $\tau_i':W_F\longrightarrow \mathrm{Sp}_4(\mathbb{C})$ with $\phi_\tau$ its restriction to $WD_E$ and $\tau_i'\cdot\omega_{E/F}=\tau_i',$ there exists an element $g_i\in Z(\phi_\tau)$ such that \[(\tau_i'\cdot\omega_{E/F})(x)=g_i\tau_i'(x)g_i^{-1}\] for all $x\in WD_F$ and so $g_i$ normalizes $Z(\tau_i').$ Then $ \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F} )\neq0$ if $\lambdabda_\tau(g_i)=1$ and $\mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})\neq0$ if $\lambdabda_\tau(g_i)=-1.$ In this case, $A_G(\tau_i')\subset H^1(W_F,\mathrm{PGSp}_4)$ is trivial and \[\mathcal{C}_i=\begin{cases} \{\mathrm{PGSp}_4\},& \mbox{ if }\lambdabda(g_i)=1,\\ \{\mathrm{PGSp}_{1,1} \},&\mbox{ if }\lambdabda(g_i)=-1. \end{cases} \] If $\tau_i'\neq\tau_i'\omega_{E/F},$ then $A_G(\tau_i')=H^1(F,\mathrm{PGSp}_4)$ and $\mathcal{C}_i=\{\mathrm{PGSp}_4,\mathrm{PGSp}_{1,1} \}.$ Set $G_\alpha$ to be $\mathrm{PGSp}_4$ or $\mathrm{PGSp}_{1,1}$. Then \[\dim \mathrm{Hom}_{G_\alpha(F)}(\tau,\omega_{E/F} )=\sum_im(\lambdabda,\tau_i') 1_{\mathcal{C}_i}(G_\alpha)/d_0(\tau'_i) ,\] where $m(\lambdabda,\tau_i')$ is the multiplicity of the trivial representation $\mathbf{1}$ contained in the restricted representation $\lambdabda|_{\pi_0(Z(\tau'_i))}.$ \item If $\Pi_{\phi_{\tau}}$ is generic, then we have \eqref{equaforsum}, i.e., \[\dim\mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F})+\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})=\sum_{\varphi\in F(\phi_\tau)}m(\lambdabda,\varphi)\cdot\frac{\deg\Phi(\varphi)}{d_0(\varphi)}. \] \end{enumerate} Let us start to verify the Prasad conjecture for $\mathrm{PGSp}_4$. \begin{thm} Let $\tau$ be a generic representation of $\mathrm{PGSp}_4(E)$. Then $\tau$ is $(\mathrm{PGSp}_4(F),\omega_{E/F})$-distinguished if and only if there exists a parameter $\tau':WD_F\rightarrow\mathrm{Sp}_4(\mathbb{C})$ such that $\tau'|_{WD_E}=\phi_{\tau}$. \end{thm} \begin{proof} Assume that $\tau=\theta(\Pi\boxtimes\chi)$ with $\chi=\mathbf{1}$. Fix $s\in W_F\setminus W_E. $ \begin{enumerate}[(i)] \item If $\tau$ is $(\mathrm{PGSp}_4(F),\omega_{E/F})$-distinguished, then $\phi_{\Pi}$ is conjugate-symplectic and so $\Pi_{\phi_{\tau}^s}=\Pi_{\phi^\vee_\tau}=\Pi_{\phi_\tau}$. If $\phi_{\Pi}$ is irreducible, then we can repeat the process in \S\mathrm{Re}f{7.4.1} to obtain that there exists a parameter $\tau':WD_F\rightarrow\mathrm{Sp}_4(\mathbb{C}) $ such that $\tau'|_{WD_E}=\phi_\tau $. If $\phi_{\Pi}=\rho_1\oplus\rho_2$ is reducible and $\rho_1$ is irreducible, then either $\rho_1^s=\rho_2^\vee$ or both $\rho_1$ and $\rho_2$ are conjugate-symplectic. \begin{itemize} \item If $\rho_1^s=\rho_2^\vee$, then there are two subcases. If $\rho_2^\vee=\rho_2$, then $\rho_1^s=\rho_2$. Set $\tau'=\mathrm{Ind}_{WD_E}^{WD_F}\rho_1$ if $\rho_1\neq\rho_2$. If $\rho_1=\rho_2=\rho_2^\vee$, then $\rho_1^s=\rho_1$ and so there exists a parameter $\tilde{\rho}_1:WD_F\rightarrow\mathrm{GL}_2(\mathbb{C})$ such that $\tilde{\rho}_1|_{WD_E}=\rho_1$. Set $\tau'=\tilde{\rho}_1\oplus\tilde{\rho}_1^\vee$. If $\rho_2^\vee\neq \rho_2$, then $\rho_2^\vee=\rho_1$. Thus $\rho_1^s=\rho_1$ and $\tau'=\tilde{\rho}_1\oplus\tilde{\rho}_1^\vee$. \item If both $\rho_1$ and $\rho_2$ are conjugate-symplectic, then \[\tau'=\begin{cases} \mathrm{Ind}_{WD_E}^{WD_F}\rho_1,&\mbox{ if }\rho_1^s=\rho_2\neq\rho_1; \\ \tilde{\rho}_1\oplus\tilde{\rho}_1^\vee,&\mbox{ if }\rho_1^s=\rho_1. \end{cases} \] \end{itemize} If neither $\rho_1$ nor $\rho_2$ is irreducible, then $\phi_{\tau}$ belongs to the endoscopic case. Thanks to Theorem \mathrm{Re}f{localgspperiod}(ii), either $\rho_1^s=\rho_2^\vee$ or both $\rho_1$ and $\rho_2$ are conjugate-symplectic. The argument is similar and we omit it here. Therefore, there exists $\tau':WD_F\rightarrow\mathrm{Sp}_4(\mathbb{C})$ such that $\tau'|_{WD_E}=\phi_{\tau}$. \item Conversely, if there exists $\tau':WD_F\rightarrow\mathrm{Sp}_4(\mathbb{C})$ such that $\tau'|_{WD_E}=\phi_\tau $, then it suffices to show that $\phi_{\Pi}$ is conjugate-symplectic. Because the nongeneric member in the $L$-packet $\Pi_{\phi_{\tau}}$ is not $(\mathrm{GSp}_4(F),\omega_{E/F})$-distinguished due to Theorem \mathrm{Re}f{localgspperiod}(i) if $\Pi_{\phi_{\tau}}$ contains more than one representation. Assume that \[\phi_\tau:WD_E\longrightarrow\mathrm{Sp}(V,<-,->)=\mathrm{Sp}_4(\mathbb{C}) \] and $$\phi_{\Pi}=i\circ\phi_\tau:WD_E\longrightarrow\mathrm{GL}(V) $$ where $i:\mathrm{Sp}_4(\mathbb{C})\rightarrow\mathrm{GL}(V)$ is the embedding between the $L$-groups. Then we set $$B(m,n)=<m,\tau'(s)^{-1}n>$$ for $m,n\in V$. It is easy to check that $B(\phi_{\Pi}(t)m,\phi_{\Pi}^s(t)n )=B(m,n)$ and \[B(m,\phi_{\Pi}(s^2)n )=<m,\tau'(s)n>=-<\tau'(s)n,m>=-<n,\tau'(s)^{-1}m>=-B(n,m). \] Therefore, the bilinear form $B$ on $V$ implies that $\phi_{\Pi}$ is conjugate-symplectic. \end{enumerate} We have finished the proof. \end{proof} \subsection{The proof of Theorem \mathrm{Re}f{prasadfordisc}}Before we give the proof of Theorem \mathrm{Re}f{prasadfordisc}, we will use the results in Theorem \mathrm{Re}f{localgspperiod} and Theorem \mathrm{Re}f{innerformperiod} to study the equality \eqref{equaforsum} in detail. According to the Langlands parameter $\phi_\tau $, we divive them into three cases: \begin{itemize} \item the endoscopic case, \item the discrete series but non-endoscopic case and \item $\phi_\tau=\rho+\rho\nu $ with $\nu\neq\mathbf{1}$ and $\nu\det\rho=\mathbf{1}$. \end{itemize} Set $S_\phi=\pi_0(Z(\phi))$ to be the component group. We identify the characters of $W_F$ and the characters of $F^\times$ via the local class field theory. \subsubsection{ Endoscopic case} Given $\phi_\tau=\phi_{1}\oplus\phi_{2},$ there are two cases: $\phi_1=\phi_2$ and $\phi_1\neq\phi_2.$ \begin{enumerate}[(A)] \item If $\phi_1=\phi_2=\rho$ are irreducible, then the L-packet $\Pi_{\phi_\tau}=\{\pi^+,\pi^- \}$ and $S_{\phi_\tau}=\mathbb{Z}/2\mathbb{Z}$, where $\pi^-$ (resp. $\pi^+$) is a nongeneric (resp. generic) representation of $\mathrm{PGSp}_4(E)$. There are two subcases: \begin{enumerate}[label=(A\arabic*)] \item If $\rho$ is conjugate-orthogonal, then \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\pi^+,\omega_{E/F} )=0=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\pi^-,\omega_{E/F}) \] and \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\pi^-,\omega_{E/F})=1=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\pi^+,\omega_{E/F}). \] On the Galois side, there is only one extension $\tilde{\phi}=\bar{\rho}\oplus\bar{\rho}\cdot\omega_{E/F}$ with $$\deg\Phi(\tilde{\phi})=2\mbox{ and } S_{\tilde{\phi}}=\{\mathbf{1}\}\rightarrow S_{\phi_\tau},$$ where $\bar{\rho}:WD_F\rightarrow \mathrm{GL}_2(\mathbb{C})\times W_F$ with $\det\bar{\rho}=\omega_{E/F}.$ Note that $\tilde{\phi}=\tilde{\phi}\cdot\omega_{E/F}$. Then $\pi^+$ supports a period on the trivial pure inner form and $\pi^-$ supports a period on a nontrivial pure inner form. \item If $\rho$ is conjugate-symplectic, then \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\pi^-,\omega_{E/F})=0=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\pi^-,\omega_{E/F}) \] and \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\pi^+,\omega_{E/F} )=1,~\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\pi^+,\omega_{E/F} )=2. \] In this case, $\rho$ has two extensions $\bar{\rho} $ and $\bar{\rho}\cdot\omega_{E/F},$ where $\bar{\rho}:WD_F\longrightarrow \mathrm{SL}_2(\mathbb{C}).$ There are three choices for the extension $\tilde{\phi}:WD_F\longrightarrow \mathrm{Sp}_4(\mathbb{C})$ with $\deg\Phi(\tilde{\phi})=1 $ \begin{itemize} \item $\tilde{\phi}^{++}=\bar{\rho}\oplus\bar{\rho}$ with $S_{\tilde{\phi}^{++}}=\mathbb{Z}/2\mathbb{Z}\cong S_{\phi_\tau};$ \item $\tilde{\phi}^{+-}=\bar{\rho}\oplus\bar{\rho}\cdot\omega_{E/F}$ with $S_{\tilde{\phi}^{+-}}=\mathbb{Z}/2\mathbb{Z}\times\mathbb{Z}/2\mathbb{Z}\longrightarrow S_{\phi_\tau} $(sum map); \item $\tilde{\phi}^{--}=\bar{\rho}\cdot\omega_{E/F}\oplus \bar{\rho}\cdot\omega_{E/F}$ with $S_{\tilde{\phi}^{--}}=\mathbb{Z}/2\mathbb{Z}\cong S_{\phi_\tau}.$ \end{itemize} The parameters $\tilde{\phi}^{++}$ and $\phi^{--}$ are in the same orbit under the twisting by $\omega_{E/F},$ which corresponds to both pure inner forms. The parameter $\tilde{\phi}^{+-}$ is fixed under twisting by $\omega_{E/F},$ which supports a period on the trivial pure inner form. \item If $\rho$ is not conjugate-self-dual, then both the Galois side and the automorphic side are $0$. \end{enumerate} \item\label{B} If $\phi_1\neq\phi_2$ are both irreducible, then the L-packet of $\mathrm{PGSp}_4$ is $\Pi_{\phi_\tau}=\{\pi^{++},\pi^{--} \}$ and $$S_{\phi_\tau}=\mathbb{Z}/2\mathbb{Z}\times\mathbb{Z}/2\mathbb{Z}.$$ \begin{enumerate}[label=(B\arabic*)] \item If $\phi_1$ and $\phi_2$ both extend to $L$-parameters of $\mathrm{PGL}_2(F),$ i.e. both are conjugate-symplectic, then one has $\phi_1^s\neq\phi_2,$ $$ \dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\pi^{++},\omega_{E/F})=2=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\pi^{++},\omega_{E/F})$$ and $$\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\pi^{--},\omega_{E/F})=0=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\pi^{--},\omega_{E/F}).$$ On the Galois side, there are also four ways of extending $\phi_\tau.$ For each such extension $\tilde{\phi},$ one has $\deg\Phi(\tilde{\phi})=1$ and the equality of component group \[S_{\tilde{\phi}}=S_{\phi_\tau}=\mathbb{Z}/2\mathbb{Z}\times\mathbb{Z}/2\mathbb{Z}. \] So only the representation $\pi^{++}$ in the L-packet can support a period. And there are $2$ orbits in $F(\phi_\tau)$ under twisting by $\omega_{E/F},$ each of size $2.$ \item If $\phi_1$ and $\phi_2$ do not extend to $L$-parameters of $\mathrm{PGL}_2(F),$ but $\phi_1^s=\phi_2=\phi_2^\vee,$ then $$\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\pi^{++},\omega_{E/F})=0=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\pi^{--},\omega_{E/F})$$ and \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\pi^{--},\omega_{E/F})=1=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\pi^{++},\omega_{E/F}) \] There is a unique way of extending $\phi_\tau=\phi_1\oplus\phi_2$ to $\tilde{\phi}:WD_F\rightarrow \mathrm{Sp}_4(\mathbb{C})$. Namely, $\tilde{\phi}=\mathrm{Ind}_{WD_E}^{WD_F}\phi_1$ is an irreducible $4$-dimensional symplectic representation, with a component group \[S_{\tilde{\phi}}=\mathbb{Z}/2\mathbb{Z}\hookrightarrow S_{\phi_\tau}(\mbox{diagonal embedding}). \] And $S_{\phi_{\tau}}^{\mathrm{Gal}(E/F)}=S_{\tilde{\phi}}$. So $\pi^{++}$ supports a period on the trivial pure inner form and $\pi^{--}$ supports a period on the nontrivial pure inner form. \end{enumerate} \item If $\phi_1=\chi_{1}\oplus\chi_{1}^{-1}$ is reducible, then there is only one element in the L-packet, i.e. $|\Pi_{\phi_\tau}|=1.$ There are two cases: $\phi_1=\phi_2$ and $\phi_1\neq\phi_2.$ \begin{enumerate}[label=(C\arabic*) ] \item If $\phi_1=\phi_2,$ there are three subcases. \begin{enumerate}[label=(C1.\roman*)] \item If $\chi_{1}=\chi_{1}^s=\chi_F|_{W_E},$ then $S_{\phi_\tau}=1$ and \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})=2= \dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F} ) .\] \begin{itemize} \item If $\chi_F^2\neq\omega_{E/F},$ then there are two ways to extend $L$-parameters of $\mathrm{PGL}_2(F),$ denoted by $\bar{\rho}$ and $\bar{\rho}\cdot\omega_{E/F}$. Thus there are $3$ ways of extending $\phi_\tau,$ which are $\tilde{\phi}^{++},\tilde{\phi}^{--}$ and $\tilde{\phi}^{+-}.$ Moreover, $\deg\Phi(\tilde{\phi}^{++})=1=\deg\Phi(\tilde{\phi}^{--})$ and $\deg\Phi(\tilde{\phi}^{+-})=2.$ \item If $\chi_F^2=\omega_{E/F},$ then there is only one way to extend $\phi_\tau.$ Denote it by $\tilde{\phi}.$ Then $$\deg\Phi(\tilde{\phi})=4.$$ \end{itemize} \item If $\chi_{1}\neq\chi_{1}^{-1}$ but $\chi_{1}|_{F^\times}=\omega_{E/F},$ then $S_{\phi_\tau}=1$ and \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F} )=0\mbox{ and }\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F})=1. \] There is only one way to extend $\phi_1,$ denoted by $$\bar{\rho}=\mathrm{Ind}_{WD_E}^{WD_F}\chi_{1}:WD_F\rightarrow\mathrm{SL}_2(\mathbb{C}).$$ Then $\tilde{\phi}=\bar{\rho}\oplus\bar{\rho}$ with $S_{\tilde{\phi}}=\mathbb{Z}/2\mathbb{Z}$ and $\deg\Phi(\tilde{\phi})=1.$ Note that $\tilde{\phi}\cdot\omega_{E/F}=\tilde{\phi}$. Then $\tilde{\phi}$ supports a period on the trivial pure inner form. \item If $\chi_{1}\neq\chi_{1}^{-1}$ but $\chi_{1}|_{F^\times}=\mathbf{1}$, then $S_{\phi_\tau}=1$ and \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F} )=0\mbox{ and }\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F})=1. \] On the Galois side, there is only one choice $\tilde{\phi}=\bar{\rho}\oplus\bar{\rho}$ and $S_{\tilde{\phi}}=\mathbf{1},$ where $$\bar{\rho}=\mathrm{Ind}_{WD_E}^{WD_F}\chi_{1}:WD_F\rightarrow \mathrm{GL}_2(\mathbb{C})$$ with $\det\rho=\omega_{E/F}.$ Since $\tilde{\phi}=\tilde{\phi}\cdot\omega_{E/F},$ it picks up only the trivial pure inner form. \end{enumerate} \item If $\phi_1\neq\phi_2,$ there are several subcases: \begin{enumerate}[label=(C2.\roman*)] \item If $\chi_{1}=\chi_{1}^s=\chi_F|_{W_E}$ and $\phi_2$ is irreducible and conjugate-symplectic , then $S_{\phi_\tau}=\mathbb{Z}/2\mathbb{Z}$ and $$\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})=2=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F} ).$$ \begin{itemize} \item If $\chi_F^2\neq\omega_{E/F},$ then there are four ways of extending ${\phi_\tau}$ and for each such extension $\tilde{\phi},$ one has $S_{\tilde{\phi}}=\mathbb{Z}/2\mathbb{Z}\cong S_{\phi_\tau}.$ There are two orbits under the twisting by $\omega_{E/F},$ each of size $2.$ \item If $\chi_F^2=\omega_{E/F},$ then there are two ways of extending $\phi_\tau.$ For each such extension $\tilde{\phi},$ one has $\deg\Phi(\tilde{\phi})=2.$ There is one orbit under the twisting by $\omega_{E/F}.$ \end{itemize} In this case, the identity \begin{equation}\label{stableequation} \dim\mathrm{Hom}_{G_\alpha(F)}(\tau,\chi_G)=\sum_i m(\lambdabda,\tilde{\phi}_i)\mathbf{1}_{\mathcal{C}_i}(G_\alpha)\cdot\frac{\deg\Phi(\tilde{\phi}_i)}{d_0(\tilde{\phi}_i)} \end{equation} holds for $G_\alpha=\mathrm{PGSp}_4$ and $\mathrm{PGSp}_{1,1}.$ \item If $\chi_{1}=\chi_{1}^s=\chi_F|_{W_E}$ and $\chi_{2}=\chi_{2}^s=\chi_F'|_{W_E},$ where $\phi_2=\chi_{2}\oplus\chi_{2}^{-1},$ then $S_{\phi_\tau}=1$ and \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})=2=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F} ). \] \begin{itemize} \item If neither $\chi_F^2$ nor ${\chi_F'}^2$ equals $\omega_{E/F},$ then there are four ways of extending $\phi_{\tau}.$ There are two orbits under the twisting by $\omega_{E/F},$ each of size $2.$ \item If $\chi_F^2=\omega_{E/F}$ and $\chi_F'^2\neq\omega_{E/F},$ then there are two ways to extend $\phi_\tau$ and for each such extension $\tilde{\phi},$ one has $S_{\tilde{\phi}}=1=S_{\phi_\tau}$ and $\deg\Phi(\tilde{\phi})=2.$ There is one orbit under the twisting by $\omega_{E/F},$ which corresponds to both pure inner forms. \item If $\chi_F^2=\chi_F'^2=\omega_{E/F},$ then there is only one way to extend $\phi_\tau.$ For this extension $\tilde{\phi},$ one has $\deg\Phi(\tilde{\phi})=4.$ \end{itemize} \item If $\chi_{1}\neq\chi_{1}^{-1}$ but $\chi_{1}$ is conjugate-symplectic, and $\phi_2$ is irreducible and conjugate-symplectic, then $S_{\phi_\tau}=\mathbb{Z}/2\mathbb{Z}$ and \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F} )=1=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F}). \] There are two extensions $\tilde{\phi}=\bar{\rho}_1\oplus\bar{\rho}_2$ or $\bar{\rho}_1\oplus\bar{\rho}_2\omega_{E/F}$ with $S_{\tilde{\phi}}=\mathbb{Z}/2\mathbb{Z}\times\mathbb{Z}/2\mathbb{Z},$ where $\bar{\rho}_i:WD_F\rightarrow \mathrm{SL}_2(\mathbb{C})$ satisfies $\bar{\rho}_i|_{WD_E}=\phi_i.$ Here the map $S_{\tilde{\phi}}\rightarrow S_{\phi_\tau}$ is given by $$(x,y)\mapsto x+y.$$ There is one orbit under the twisting by $\omega_{E/F},$ which corresponds to both pure inner forms. \item If $\chi_{1}\neq\chi_{1}^{-1}$ but $\chi_{1}$ is conjugate-symplectic, and $\chi_{2}=\chi_{2}^s=\chi_F'|_{W_E}$ where $\phi_2=\chi_{2}\oplus\chi_{2}^{-1},$ then $S_{\phi_\tau}=1$ and \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F} )=1=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F}). \] \begin{itemize} \item If $\chi_F'^2\neq\omega_{E/F},$ then there are two ways to extend $\phi_\tau$. Set $\tilde{\phi}=\bar{\rho}_1\oplus\bar{\rho}_2$ or $\bar{\rho}_1\oplus\bar{\rho}_2\omega_{E/F}$ with $S_{\tilde{\phi}}=\mathbb{Z}/2\mathbb{Z} .$ There is one orbit under the twisting by $\omega_{E/F},$ which corresponds to both pure inner forms. \item If $\chi_F'^2=\omega_{E/F},$ there is one way to extend $\phi_{\tau}$. Set $\tilde{\phi}=\bar{\rho}_1\oplus\chi_F'\oplus\chi_F'\omega_{E/F}.$ And \[\deg\Phi(\tilde{\phi})=2. \] Note that the identity \eqref{stableequation} fails in this case while the identity \eqref{equaforsum} still holds. \end{itemize} \item If $\phi_1$ and $\phi_2$ are reducible and four different characters $\chi_{1},\chi_{1}^{-1},\chi_2$ and $\chi_2^{-1}$ satisfy $$\chi_{1}|_{F^\times}=\omega_{E/F}=\chi_2|_{F^\times},$$ then $S_{\phi_\tau}=1$ and \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})=0,\] $\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F})=1. $ There is only one extension $\tilde{\phi}=\bar{\rho}_1\oplus\bar{\rho}_2$ with $S_{\tilde{\phi}}=\mathbb{Z}/2\mathbb{Z}\times\mathbb{Z}/2\mathbb{Z}. $ Since $\tilde{\phi}=\tilde{\phi}\cdot\omega_{E/F},$ it picks up the trivial pure inner form. \item If $\phi_1^s=\phi_2^\vee=\phi_2$ and $\phi_1$ is not conjugate-symplectic, then $S_{\phi_\tau }=1$ and \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})=0,\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F}) =1.\] There is only one extension $$\tilde{\phi}= \mathrm{Ind}_{WD_E}^{WD_F}\phi_1:WD_F\rightarrow \mathrm{Sp}_4(\mathbb{C})$$ with the component group $S_{\tilde{\phi}}=\mathbb{Z}/2\mathbb{Z}.$ Since $\tilde{\phi}=\tilde{\phi}\cdot\omega_{E/F},$ it picks up the trivial pure inner form. \end{enumerate} It is easy to check that the identity \eqref{equaforsum} holds when $\Pi_{\phi_{\tau}}$ is generic, i.e., \[\dim\mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F})+\dim\mathrm{Hom}_{ \mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})=\sum_{\tilde{\phi}\in F(\phi_\tau)}m(\lambdabda,\tilde{\phi})\cdot\frac{\deg\Phi(\tilde{\phi})}{d_0(\tilde{\phi})}.\] \end{enumerate} \end{enumerate} \subsubsection{ Discrete and non-endoscopic case}\label{subsec:non-end} Assume that $\phi_\tau $ is irreducible and so $\Pi_{\phi_{\tau}}$ is a singleton. Given a parameter $\phi_\tau,$ which is non-endoscopic, the theta lift $\Theta_4^+(\tau)$ from $\mathrm{PGSp}_4(E)$ to $\rm PGSO_{2,2}(E)$ is zero. If $\phi_\tau$ is conjugate-symplectic, then \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})=1=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F}) .\] There are two extensions $\tilde{\phi}$ and $\tilde{\phi}\cdot\omega_{E/F}$ with a component group $S_{\tilde{\phi}}=S_{\phi_\tau}=\mathbb{Z}/2\mathbb{Z}. $ There is one orbit under the twisting by $\omega_{E/F},$ which corresponds to both pure inner forms. \subsubsection{ Generic but neither discrete nor endoscopic case} If $\phi_\tau=\rho\oplus\rho\nu,\det\rho=\nu^{-1}\neq\mathbf{1},$ then $S_{\phi_\tau}=1.$ There are two cases: \begin{itemize} \item If $\phi_\tau$ is conjugate-symplectic and $\rho^s=\rho$, then \[\dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})=1=\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F}) .\] There are two extensions $\tilde{\phi}=\tilde{\rho}+\tilde{\rho}^\vee$ and $\tilde{\phi}\cdot\omega_{E/F}$ where $\tilde{\rho}:WD_F\rightarrow \mathrm{GL}_2(\mathbb{C})$ satisfies $\tilde{\rho}|_{WD_E}=\rho.$ \item If $\phi_\tau$ is conjugate-symplectic and $\rho^s\neq\rho,$ then \[\dim \mathrm{Hom}_{\mathrm{PGSp}_4(F)}(\tau,\omega_{E/F})=1\mbox{ and } \dim \mathrm{Hom}_{\mathrm{PGSp}_{1,1}(F)}(\tau,\omega_{E/F})=0. \] There is only one extension $\tilde{\phi}=\mathrm{Ind}_{WD_E}^{WD_F}\rho$ such that $\tilde{\phi}|_{WD_E}=\phi_\tau.$ \end{itemize} \begin{proof} [Proof of Theorem \mathrm{Re}f{prasadfordisc}] It follows from the discussions in the endoscopic cases \mathrm{Re}f{B} in \S7.4.1 and the discrete and non-endoscopic case in \S\mathrm{Re}f{subsec:non-end}. \end{proof} \subsection{Further discussion} Let $E$ be a quadratic extension over a non-archimedean field $F.$ Let $\mathbf{G}$ be a quasi-split reductive group defined over $F.$ Let $\tau$ be an irreducible representation of $\mathbf{G}(E)$ with an enhanced $L$-parameter $(\phi_\tau,\lambdabda).$ Assume that $F(\phi_\tau)=\cup_i\mathcal{O}(\tilde{\phi}_i)$ where $\tilde{\phi}_i|_{WD_E}=\phi_\tau.$ \par If for each orbit $\mathcal{O}(\tilde{\phi}_i),$ the coset $\mathcal{C}_i\subset H^1(W_F,\mathbf{G})$ contains all pure inner forms satisfying $G_\alpha(E)=\mathbf{G}(E),$ then $\phi_\tau$ is called a '$full$' L-parameter of $\mathbf{G}(E)$, in which case $1_{\mathcal{C}_i}(G_\alpha)\equiv1$ in \eqref{stableequation}. \par Assume that $\tau$ belongs to a generic L-packet with Langlands parameter $\phi_\tau:WD_E\rightarrow {}^L\mathbf{G}$ and that $\phi_\tau$ is '$full$'. Then there is a conjectural identity \begin{equation}\label{conjectureiden}\dim\mathrm{Hom}_{G_{\alpha}}(\tau,\chi_\mathbf{G})=\sum_im(\lambdabda,\tilde{\phi}_i)\cdot\frac{\deg\Phi_\ast(\tilde{\phi}_i)}{d_0(\tilde{\phi}_i)} \end{equation} for any pure inner form $G_\alpha\in H^1(W_F,\mathbf{G})$ satisfying $G_\alpha(E)=\mathbf{G}(E).$ \par If $H^1(W_F,\mathbf{G})$ is trivial, then any $L$-parameter $\phi_\tau$ is '$full$'. So the conjectural identity \eqref{conjectureiden} holds for $\mathbf{G}=\mathrm{GL}_2.$ In fact, it holds for $\mathbf{G}=\mathrm{PGL}_2$ as well. \end{document}
\begin{document} \title{\bf A generalization of the Littlewood-Paley inequality for the fractional Laplacian $(-{\mathbb D}elta)^{\alpha/2}$} \date{} \author{Ildoo Kim \qquad \hbox{\rm and} \qquad Kyeong-Hun Kim} \author{Ildoo Kim \footnote{Department of Mathematics, Korea University, 1 Anam-dong, Sungbuk-gu, Seoul, South Korea 136-701, \,\, [email protected].}\quad \hbox{\rm and} \quad Kyeong-Hun Kim\footnote{Department of Mathematics, Korea University, 1 Anam-dong, Sungbuk-gu, Seoul, South Korea 136-701, \,\, [email protected]. }} \maketitle \begin{abstract} We prove a parabolic version of the Littlewood-Paley inequality for the fractional Laplacian $(-{\mathbb D}elta)^{\alpha/2}$, where $\alpha\in (0,2)$. \vspace*{.125in} \noindent {\it Keywords: Littlewood-Paley inequality, Fractional Laplacian.} \vspace*{.125in} \noindent {\it AMS 2000 subject classifications:} 42B25, 26D10, 60H15. \end{abstract} \section{Introduction} Let $T_{2,t}$ be the semigroup corresponding to the heat equation $u_t={\mathbb D}elta u$ (see (\ref{06.09.1})). The classical Littlewood-Paley inequality says for any $p\in (1,\infty)$ and $f\in L_p(\bR^d)$, \begin{equation} \label{LP} \int_{\bR^d} \left(\int^{\infty}_0 |\nabla T_{2,t} f|^2dt\right)^{p/2} dx\leq N(p)\|f\|^p_p. \end{equation} In \cite{kr94} and \cite{Kr01} Krylov extended (\ref{LP}) by proving the following parabolic version in which $H$ is a Hilbert space. \begin{thm} \label{krylov} Let $H$ be a Hilbert space, $p\in [2,\infty), -\infty\leq a<b\leq \infty$, $f\in L_p((a,b)\times \bR^d,H)$. Then \begin{equation} \label{eqn krylov} \int_{\bR^d}\int^b_a\left(\int^t_a|\nabla T_{2,t-s}f|^2_{H}\,ds\right)^{p/2}\,dtdx\leq N(p) \int_{\bR^d}\int^b_a |f|^p_{H}\,dtdx. \end{equation} \end{thm} Let $\alpha\in (0,2)$. The main goal of this article is to prove (\ref{eqn krylov}) with $\partial^{\alpha/2}_x$ and $T_{\alpha,t}$ in place of $\nabla$ and $T_{2,t}$ respectively, where $T_{\alpha,t}$ is the semigroup corresponding to the equation $u_t=-(-{\mathbb D}elta)^{\alpha/2}u$. That is, we prove \begin{thm} \label{1.1} Let $H$ be a Hilbert space, $p\in [2,\infty), -\infty\leq a<b\leq \infty$, and $f$ be an $H$-valued function of $(t,x)$, then \begin{equation} \label{999} \int_{\bR^d}\int^b_a\left [\int^t_a|\partial^{\alpha/2}_xT_{\alpha,t-s}f(s,\cdot)(x)|^2_{H} ds\right]^{p/2}dxdt\leq N(\alpha,p)\int_{\bR^d}\int^b_a |f|^p_{H}\,dtdx. \end{equation} \end{thm} If $f(t,x)=f(x)$, then (\ref{999}) easily leads to the Littlewood-Paley inequality (\ref{LP}) with $\partial^{\alpha/2}_x$ and $T_{\alpha,t}$ in place of $\nabla$ and $T_{2,t}$ (see Remark \ref{remark 4}). Our motivation is as follows. For several decades, the fractional Laplacian and partial differential equations with the fractional Laplacian have been studied by many authors, see for instance \cite{CS} and \cite{Ste70}. Motivated by this, we were tempted to construct an $L_p$-theory of stochastic partial differential equations of the type \begin{equation} \label{eqn 0} du=-(-{\mathbb D}elta)^{\alpha/2} u \,dt +\sum_{k=1}^{\infty}f^kdw^k_t, \quad u(0,x)=0. \end{equation} Here $f=(f^1,f^2,\cdots)$ is an $\ell_2$-valued random function of $(t,x)$, and $w^k_t$ are independent one-dimensional Wiener processes. It turns out that if $f=(f^1,f^2,\cdots)$ satisfies certain measurability condition, the solution of this problem is given by \begin{equation} \label{eqn 222} u(t,x)=\sum_{k=1}^{\infty}\int^t_0 T_{\alpha,t-s}f^k(s,\cdot)(x) dw^k_s, \end{equation} and by Burkholder-Davis-Gundy inequality (see \cite{kr95}), we have \begin{equation} \label{eqn 333} \bE \int^T_0\|\partial^{\alpha/2}_xu(t,\cdot)\|^p_{L_p}dt \leq N(p)\, \bE \int^T_0\int_{\bR^d}\left[\int^t_0|\partial^{\alpha/2}_xT_{\alpha,t-s}f(s,\cdot)(x)|^2_{\ell_2} ds\right]^{p/2}dxdt. \end{equation} Actually if $f$ is not random, then the reverse inequality also holds. Thus to prove $\partial^{\alpha/2}_xu\in L_p$ and to get a legitimate start of the $L_p$-theory of SPDEs of type (\ref{eqn 0}), one has to estimate the right-hand side of (\ref{eqn 333}). Later, we will see that (\ref{999}) implies that for any solution $u$ of equation (\ref{eqn 0}), \begin{equation} \label{for fun} \bE \int^T_0\|u(t,\cdot)\|^p_{H^{\alpha/2}_p}dt \leq N(\alpha,p,T)\bE\int^T_0 \||f|_{\ell_2}\|^p_{L_p}ds, \end{equation} where $\|u\|_{H^{\alpha/2}_p}:=\|(1-{\mathbb D}elta)^{\alpha/4}u\|_{L_p}$. As usual $\bR^{d}$ stands for the Euclidean space of points $x=(x^{1},...,x^{d})$, $B_r(x) := \{ y\in \bR^d : |x-y| < r\}$ and $B_r :=B_r(0)$. For $\beta \in (0,1)$, and functions $u(x)$ we set $$ \nabla_x u=(\frac{\partial}{\partial x^1}u,\cdots,\frac{\partial}{\partial x^d}u), \quad \partial_x^{\beta}u(x)=\cF^{-1}(|\xi|^{\beta}\hat{u}(\xi))(x) $$ where $\cF(f)(\xi)=\hat{f}(\xi):=\frac{1}{(2\pi)^d}\int_{\bR^d}e^{-i\xi\cdot x}f(x)dx$ is the Fourier transform of $f$. If we write $N=N(...)$, this means that the constant $N$ depends only on what are in parenthesis. \section{Main Result} In this section we introduce a slightly extended version of Theorem \ref{1.1}. Fix $\alpha \in (0,2)$ and let $p_{\alpha}(t,x)=p(t,x)$, where $t>0$, denote the Fourier inverse transform of $e^{-(2\pi)^\alpha t|\xi|^\alpha}$, that is, $$ p(t,x):= \int_{\bR^d} e^{i \xi \cdot x }e^{-(2\pi)^\alpha t|\xi|^\alpha} d\xi $$ and $p(x) := p(1,x)$. For a suitable function $h$ and $t>0$, define \begin{equation} \label{06.09.1} T_th(x) := (p(t,\cdot) * h(\cdot))(x):=\int_{\bR^d} p(t,x-y)h(y)dy, \end{equation} $$ ({-{\mathbb D}elta})^{\frac{\beta}{2}}h(x) :=\partial_x^{\beta} h:= \cF^{-1}(|\xi|^{\beta} \cF(h)(\xi))(x). $$ Then, for $\beta > 0$, \begin{eqnarray} \partial^{\beta}_x T_th(x) &=&\cF^{-1}( |\xi|^{\beta} e^{-(2\pi)^\alpha t|\xi|^\alpha} \hat{h}(\xi))\nonumber\\ &=&\int_{\bR^d} e^{i\xi \cdot x} |\xi|^{\beta} e^{-(2\pi)^\alpha |t^{1/\alpha}\xi|^{ \alpha}} d\xi \ast h(x)\nonumber\\ &=&t^{-d/\alpha}\int_{\bR^d} e^{i \xi \cdot t^{-1/{\alpha}} x} |t^{-1/\alpha}\xi|^{\beta} e^{-(2\pi)^\alpha |\xi|^\alpha} d\xi \ast h(x)\nonumber\\ &=&t^{-\beta/\alpha}\cdot t^{-d/\alpha} \phi_\beta(x /t^{1/\alpha}) \ast h(x) \label{eqn 06.12.1}, \end{eqnarray} where $$ \phi_\beta(x) := \int_{\bR^d} |\xi|^{\beta} e^{i \xi \cdot x} e^{-(2\pi)^\alpha |\xi|^{\alpha}} d\xi= ({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x). $$ The following two lemmas are crucial in this article and are proved in section \ref{section a priori}. \begin{lemma} \label{lem3} Denote $\hat{\phi}_\beta(\xi)=\cF(\phi_\beta)(\xi)$, then there exists a constant $N=N(d,\alpha,\beta )>0$ such that $$ |\hat{\phi}_\beta(\xi)| \leq N|\xi|^\beta, \quad |\xi| |\hat{\phi}(\xi)| \leq N, $$ $$ |\phi_\beta(x)|\leq N \left( \frac{1}{|x|^{d+\beta}} \wedge 1 \right)\quad \text{and} \quad |\nabla \phi_\beta(x)| \leq N \left( \frac{1}{|x|^{d+1+\beta}} \wedge 1 \right). $$ \end{lemma} \begin{lemma} \label{main lem} For each $\alpha\in (0,2)$ and $\beta > 0$, there exists a continuously differentiable function $\overline{\phi}_\beta(\rho)$ defined on $[0,\infty)$ such that for some positive constant $K$ which depends on $d,\alpha ,\beta$, $$ |\phi_\beta(x)| + |\nabla \phi_\beta(x)| + |x||\nabla \phi_\beta(x)| \leq \overline{\phi}_\beta(|x|), \quad \int_0^\infty |\overline{\phi}_\beta'(\rho)|~d\rho \leq K, $$ $$ \overline{\phi}_\beta(\infty)=0, \quad \int_r^\infty |\overline{\phi}_\beta'(\rho)|\rho^d~d\rho \leq \frac{K}{r^{\beta}} \quad \quad \forall r \geq (10)^{-1/\alpha}. $$ \end{lemma} To make our inequality slightly extended, we consider convolutions (see (\ref{eqn 06.12.1})) with more general functions. Let $\psi(x)$ be a $C^1(\bR^d)$ function such that $|\hat{\psi}(\xi)| \leq K |\xi|^{\nu}$ for some $\nu >0 $, $|\xi|^\lambda |\hat{\psi}(\xi)| \leq K$ for some $\lambda >0$, and assume that for some $\delta \geq \frac{\alpha}{2}$, there exists a continuously differentiable function $\overline{\psi}$ satisfying $$ |\psi(x)|+|\nabla \psi(x)| + |x||\nabla \psi(x)| \leq \overline{\psi}(|x|), \quad \int_0^\infty |\overline{\psi}'(\rho)| ~d\rho \leq K, \quad \overline{\psi}(\infty)=0 $$ and \begin{equation} \label{eqn 06.08} \int_r^\infty |\overline{\psi}'(\rho)|\rho^d ~d\rho \leq (K/r^\delta), \quad \forall r \geq (10)^{-1/\alpha}. \end{equation} By Lemma \ref{lem3} and Lemma \ref{main lem}, we know $\phi_{\alpha/2}$ satisfies all the above assumptions. Define $$ \Psi_th(x):=t^{-d/\alpha} \psi(\cdot /t^{1/\alpha}) \ast h(\cdot )(x). $$ For $f \in C_0^\infty (\bR^{d+1},H),~ t > a \geq -\infty$, and $x \in \bR^d$, we define $$ \cG_af(t,x):=[\int_a^t | \Psi_{t-s}f(s, \cdot)(x)|^2_H ~\frac{ds}{t-s}]^{1/2}, \quad \cG=\cG_{-\infty}. $$ Here is our main result. The proof is given in section \ref{proof of theorem}. \begin{thm} \label{main theorem} Let $p \in [2,\infty)$, $-\infty \leq a < b \leq \infty$ and $f \in C_0^\infty((a,b) \times \bR^d , H). $ Then \begin{eqnarray} \label{4022} \int_{\bR^d} \int_a^b [\cG_a f(t,x)]^p ~dt dx \leq N \int_{\bR^d} \int_a^b |f(t,x)|^p_H~dtdx, \end{eqnarray} where the constant $N$ depends only on $d,p,\alpha,\nu,\lambda,\delta$ and $K$. \end{thm} \begin{remark} \label{remark 22} Take $\psi = \phi_{\alpha/2}$, $\nu=\delta=\alpha/2$, $\lambda=1$, $a=0$ and $b=T$, then (\ref{4022}) implies \begin{equation} \label{eqn 6.10.5} \int_{\bR^d} \int_0^T [\int_0^t|\partial^{\alpha/2}_xT_{\alpha,t-s}f(s, \cdot)(x)|^2_H ds]^{p/2} ~dt dx \leq N \int_{\bR^d} \int_0^T |f(t,x)|^p_H~dtdx. \end{equation} \end{remark} \begin{remark} \label{remark 4} Note that inequality (\ref{lem3}) with $\partial^{\alpha/2}_x$ and $T_{\alpha,t}$ in place of $\nabla$ and $T_{2,t}$ is an easy consequence of (\ref{eqn 6.10.5}). Indeed, take $T=2$ and $f(t,x)=f(x)$. The left-hand side of (\ref{eqn 6.10.5}) is not less than $$ \int_{\bR^d}\int^2_1[\int^1_0|\partial^{\alpha/2}_xT_{\alpha,s}f(x)|^2_H\,ds]^{p/2}\,dtdx =\int_{\bR^d}[\int^1_0|\partial^{\alpha/2}_xT_{\alpha,s}f(x)|^2_H\,ds]^{p/2}\,dx. $$ Thus it follows that $$ \int_{\bR^d}[\int^1_0|\partial^{\alpha/2}_xT_{\alpha,s}f(x)|^2_H\,ds]^{p/2}\,dx\leq N\int_{\bR^d}\|f\|^p_{H}\,dx, $$ and the self-similarity $(\partial^{\alpha/2}_xT_{\alpha,s}f(c\,\cdot))(x)=c^{\alpha/2}(\partial^{\alpha/2}_xT_{\alpha,c^{\alpha}s}f)(cx)$ allows one to replace the upper limit $1$ by infinity with the same constant $N$. \end{remark} \section{Preliminary estimates on $({-{\mathbb D}elta})^{\beta/2}p(t,x)$} \label{section a priori} In this section we study the upper bound of $|({-{\mathbb D}elta})^{\beta/2}p(t,x)|$ and $|\nabla({-{\mathbb D}elta})^{\beta/2}p(t,x)|$, and then we prove Lemma \ref{lem3} and Lemma \ref{main lem}. Actually the arguments in this section allow one to get the upper bound of $|D^m({-{\mathbb D}elta})^{\beta/2}p(t,x)|$ for any $m\geq 0$. \begin{lemma} \label{lem1} There exists a constant $N=N(d,\alpha ,\beta)>0$ such that \begin{equation} \label{eqn 2.1} |({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)| \leq \frac{N}{|x|^{d+\beta}}. \end{equation} \end{lemma} \noindent{\bf Proof.} See \cite{fe} for $d=1$ and \cite{kol} for $d \geq 2$. Actually in \cite{fe}, (\ref{eqn 2.1}) is given only for $\beta=0$. Also in \cite{kol}, $(-{\mathbb D}elta)^{\frac{\beta}{2}}p(x)$ is estimated in terms of power series (Proposition 2.2), however the series does not converge if $\alpha >1$. For these reasons, we give a detailed proof. Also some inequalities obtained in this proof will be used in the proof of Lemma \ref{lem2}. For $d=1$, since $|\xi|$ is an even function, we have \begin{eqnarray} ({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x) \notag &=&\int_{\bR} |\xi|^{\beta}e^{i \xi x }e^{-(2\pi)^\alpha |\xi|^\alpha} d\xi\\ \notag &=&2 \text{Re} \int_{0}^\infty \xi^{\beta}e^{i \xi x }e^{-(2\pi)^\alpha \xi^\alpha} d\xi\\ \label{2.2}&=&2 \text{Re} \frac{1}{x^{1+\beta}}\int_{0}^\infty \xi^{\beta}e^{i \xi }e^{-(2\pi)^\alpha (\xi/x)^\alpha} d\xi. \end{eqnarray} Assume $0 < \alpha \leq 1$. Consider the integrand as a function of the complex variable $\xi$. Since the integrand in (\ref{2.2}) is analytic in the complement of the non-positive real half line and is continuous at zero, if we take principal branch cut, for $N>0$, the path integration is zero on the closed path $$ \gamma_N(t) := \begin{cases} t \quad & \text{if}~ 0 \leq t \leq N \\ N+i(t-N) \quad & \text{if} ~N \leq t \leq 2N \\ 3N-t+iN \quad & \text{if} ~2N \leq t \leq 3N \\ i(4N-t) \quad & \text{if}~ 3N \leq t \leq 4N. \end{cases} $$ By letting $N \to \infty$, one can move the path of integration to the positive imaginary axis, and gets (note $|e^{-(2\pi)^\alpha (i\xi/x)^\alpha}| \leq 1$) $$ |({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)| \label{2.31} = \left|2 \text{Re} \frac{1}{x^{1+\beta}}\int_{0}^\infty (i\xi)^{\beta}e^{- \xi } e^{-(2\pi)^\alpha (i\xi/x)^\alpha} i\, d\xi\right| \leq \frac{2}{|x|^{1+\beta}}\int^{\infty}_0 \xi^{\beta}e^{-\xi}d\xi \leq \frac{N}{|x|^{1+\beta}}. $$ If $1 < \alpha <2$, we use another closed path $$ \gamma_N(t) := \begin{cases} t \quad & \text{if}~ 0 \leq t \leq N \cos \frac{\pi}{2\alpha} \\ N \cos \frac{\pi}{2\alpha} +i\sin\frac{\pi}{2\alpha}(\frac{t}{\cos \frac{\pi}{2\alpha}}-N) \quad & \text{if} ~N\cos \frac{\pi}{2\alpha} \leq t \leq 2N\cos \frac{\pi}{2\alpha} \\ (3N-\frac{t}{\cos \frac{\pi}{2\alpha}})e^{i\frac{\pi}{2\alpha}} \quad & \text{if}~ 2N\cos \frac{\pi}{2\alpha} \leq t \leq 3N\cos \frac{\pi}{2\alpha}. \end{cases} $$ Thanks to the path integration on the above path, which looks like formally replacing $\xi$ by $\xi e^{i\frac{\pi}{2\alpha}}$, we get (since $|e^{-(2\pi)^\alpha (\xi e^{i\frac{\pi}{2\alpha}}/x)^\alpha}|= 1$) \begin{eqnarray*} |({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)| &\leq& |2 \text{Re} \frac{1}{x^{1+\beta}}\int_{0}^\infty (\xi e^{i\frac{\pi}{2\alpha}})^{\beta}e^{ i\xi e^{i\frac{\pi}{2\alpha}} } e^{-(2\pi)^\alpha (\xi e^{i\frac{\pi}{2\alpha}}/x)^\alpha} e^{i\frac{\pi}{2\alpha}}\,d\xi| \\ &\leq & \frac{2}{|x|^{1+\beta}}\int^{\infty}_0 \xi^{\beta}e^{-\xi \sin \frac{\pi}{2\alpha}}\,d\xi \leq\frac{N}{|x|^{1+\beta}}. \end{eqnarray*} Next, let $d \geq 2$. Since the function $({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x) $ is radial, we may assume $x= (|x|,\ldots,0)$, and if we denote the surface of the $d$-dimensional unit ball by $S^{d-1}$ and the surface measure by $d\sigma$, then from the spherical coordinate we have \begin{eqnarray} \notag ({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x) \notag &=&\int_{\bR^d} |\xi|^{\beta}e^{i \xi^1 |x| }e^{-(2\pi)^\alpha |\xi|^\alpha} d\xi\\ \notag &=&\int_{\bR^d} |\xi|^{\beta}\cos( \xi^1 |x| )e^{-(2\pi)^\alpha |\xi|^\alpha} d\xi\\ \notag &=&\int_{0}^\infty r^{\beta+d-1} \int_{S^{d-1}} \cos( r \sigma^1 |x| )e^{-(2\pi)^\alpha |r |^\alpha} d\sigma dr. \end{eqnarray} Furthermore we can express $\sigma \in S^{d-1}$ as $\sigma = (\cos\theta, \phi \sin \theta)$ with $\theta \in [0,\pi]$ and $\phi \in S^{d-2}$, and get \begin{eqnarray*} \notag ({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x) \notag &=&\int_{0}^\infty r^{\beta+d-1} \int_0^{\pi} \sin^{d-2} (\theta) \int_{S^{d-2}} \cos( r \cos\theta |x| )e^{-(2\pi)^\alpha |r |^\alpha} d\phi d\theta dr\\ \notag &=&A_{d-2}\int_{0}^\infty r^{\beta+d-1} \int_0^{\pi} \sin^{d-2} (\theta) \cos( r \cos\theta |x| ) e^{-(2\pi)^\alpha |r |^\alpha} d\theta dr, \end{eqnarray*} where $A_{d-2}$ is the area of $S_{d-2}$ and $A_0:=1$. By the changes of variables $r|x| \to r$ and $t = \cos \theta$, \begin{eqnarray} ({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x) \notag &=&A_{d-2} \frac{1}{|x|^{\beta+d}} \int_{0}^\infty r^{\beta+d-1} \int_0^{\pi} \sin^{d-2} (\theta) \cos( r \cos\theta ) e^{-(2\pi)^\alpha (r/|x|)^\alpha} d\theta dr \nonumber\\ &=&A_{d-2} \frac{1}{|x|^{\beta+d}} \int_{0}^\infty r^{\beta+d-1} \int_{-1}^{1} \cos( r t ) e^{-(2\pi)^\alpha (r/|x|)^\alpha} (1-t^2)^{(d-3)/2} dt dr \label{2.3}. \end{eqnarray} To proceed further, we use Bessel function $J_n(z)$ and Whittaker function $W_{0,n}(z)$. For any complex $z$ that is not negative real and any real $n > -\frac{1}{2}$, define $$ J_n(z) := \frac{(\frac{1}{2}z)^n}{\Gamma(n+\frac{1}{2})\sqrt{\pi}} \int_{-1}^1 (1-t^2)^{n-1/2} \cos (zt) dt, $$ \begin{equation} \label{5141} W_{0,n}(z):= \frac{e^{-z/2}}{\Gamma(n+\frac{1}{2})} \int_0^\infty [t(1+t/z)]^{n-1/2}e^{-t}dt \end{equation} where $\arg z$ is understood to take its principle value, that is, $|\arg z| < \pi$. It is known (see, for instance, \cite{whit} p.346, p.360 and \cite{wang} p.314) that the two functions are related by the formula $$ J_n(z) = \frac{1}{\sqrt{2\pi z}} \left( \exp \{ \frac{1}{2}(n+ \frac{1}{2}) \pi i\} W_{0,n}(2 i z) +\exp \{ -\frac{1}{2}(n+ \frac{1}{2}) \pi i\} W_{0,n}(-2 i z) \right). $$ In particular, if $z$ is a positive real number, \begin{equation} \label{2.4} J_n(z) = 2 \text{Re}\left[ \frac{1}{\sqrt{2\pi z}} \exp \{ \frac{1}{2}(n+\frac{1}{2})\pi i \} W_{0,n}(2 i z) \right]. \end{equation} We also know (see, for instance, \cite{whit} p. 343) \begin{eqnarray} \label{2.5} W_{0,n}(z) = e^{-\frac{1}{2}z} \{ 1+ O(z^{-1})\}. \end{eqnarray} Due to (\ref{5141}) and (\ref{2.4}), from (\ref{2.3}) we have \begin{eqnarray} \notag && ({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)\\ \notag &=& \frac{A_{d-2}}{|x|^{\beta+d}} \int_{0}^\infty r^{\beta+d-1} \int_{-1}^{1} \cos( r t ) e^{-(2\pi)^\alpha (r/|x|)^\alpha} (1-t^2)^{(d-3)/2} dt dr\\ &=& \frac{A_{d-2}}{|x|^{\beta+d}} \int_{0}^\infty r^{\beta + d/2} 2^{d/2-1}\Gamma(\frac{1}{2}(d-1)) \sqrt{\pi}J_{(d/2)-1}(r) e^{-(2\pi)^\alpha (r/|x|)^\alpha} dr\label{eqn 6.4.5}\\ &=& \frac{N(d)}{|x|^{\beta+d}} \text{Re} \int_{0}^\infty r^{\beta + (d-1)/2} \exp \{ \frac{1}{2}(\frac{d}{2}-\frac{1}{2})\pi i \} W_{0,(d/2)-1}(2 i r) e^{-(2\pi)^\alpha (r/|x|)^\alpha} dr, \label{2.66} \end{eqnarray} where $N(d):=2^{(d-1)/2}A_{d-2}\Gamma(\frac{1}{2}(d-1))$. From definition (\ref{5141}) one easily checks that the integrand in (\ref{2.66}) is analytic in the complement of the non-positive real half line and is continuous at zero. Let $0<\alpha \leq 1$. Remembering (\ref{2.5}) and doing the path integration on an appropriate closed path, as in the case $d=1$, we can change the path of integration in (\ref{2.66}) from the positive real half line to the negative imaginary half line. Taking this new path of integration, that is to say, formally replacing $r$ by $-ir$, one gets (note $|e^{-(2\pi)^\alpha (-ir/|x|)^\alpha}|\leq 1$) \begin{eqnarray*} \notag && |({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)|\\ \notag &=& \frac{N(d)}{|x|^{\beta+d}} \left|\text{Re} \int_{0}^\infty (-ir)^{\beta + (d-1)/2} \exp \{ \frac{1}{2}(\frac{d}{2}-\frac{1}{2})\pi i \} W_{0,(d/2)-1}(2 r) e^{-(2\pi)^\alpha (-ir/|x|)^\alpha} i\, dr \right|\\ \notag &\leq& \frac{N}{ |x|^{\beta+d}} \int_{0}^\infty r^{\beta + (d-1)/2} W_{0,(d/2)-1}(2 r) dr \leq \frac{N}{ |x|^{\beta+d}}. \end{eqnarray*} Let $1 <\alpha <2$. Then $|e^{-i re^{-i\frac{\pi}{2\alpha}}}|\leq e^{\frac{-r}{2}}$, and thus $$ |W_{0,(d/2)-1}(2 ire^{-i\frac{\pi}{2\alpha}})|\leq \frac{e^{\frac{-r}{2}}}{\Gamma(d/2-1/2)}\int_0^\infty \left|[t(1+t/(2 ire^{-i\frac{\pi}{2\alpha}}))]^{(d-3)/2} e^{-t}\right|dt. $$ Note that if $d\geq 3$ then $$|1+t/(2 ire^{-i\frac{\pi}{2\alpha}})|^{(d-3)/2}\leq |1+t/r|^{(d-3)/2}, $$ and if $d=2$ then $$|1+t/(2 ire^{-i\frac{\pi}{2\alpha}})|^{-1/2}\leq (1+t\sin\frac{\pi}{2\alpha}/(2r))^{-1/2}\leq 2(1+t/r)^{-1/2}. $$ It follows that for any $r>0$, we have $|W_{0,(d/2)-1}(2 ire^{-i\frac{\pi}{2\alpha}})|\leq 2W_{0,(d/2)-1}(r)$. We do the path integration on a different closed path and change the path of integration in (\ref{2.66}) from the positive real half line to the half line $\{r e^{-i\frac{\pi}{2\alpha}}: r > 0\}$. Taking this new path of integration, that is to say, formally replacing $r$ by $re^{-i\frac{\pi}{2\alpha}}$, one gets (note $|e^{-(2\pi)^\alpha (re^{-i\frac{\pi}{2\alpha}}/|x|)^\alpha}|=1$) \begin{eqnarray*} |({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)| &\leq& \frac{N}{|x|^{\beta+d}}\int_{0}^\infty \left| (re^{-i\frac{\pi}{2\alpha}})^{\beta + (d-1)/2} W_{0,(d/2)-1}(2 ire^{-i\frac{\pi}{2\alpha}}) e^{-(2\pi)^\alpha (re^{-i\frac{\pi}{2\alpha}}/|x|)^\alpha}\right|\,dr\\ &\leq& \frac{N}{ |x|^{\beta+d}} \int_{0}^\infty r^{\beta + \frac{d-1}{2}} W_{0,(d/2)-1}( r) dr \leq \frac{N}{ |x|^{\beta+d}}. \end{eqnarray*} The lemma is proved. { $\Box$ } \begin{remark} \label{remark 6.4} In the proof of Lemma \ref{lem1} (see (\ref{2.2}) and (\ref{eqn 6.4.5})) we proved that for any $\beta\geq 0$, \begin{equation} \label{eqn 6.4.3} \left|\int^{\infty}_0 \xi^{\beta}e^{i\xi}e^{-(2\pi)^\alpha (\xi/x)^\alpha} d\xi \right|<N, \quad \text{when}\,\,d=1, \end{equation} \begin{equation} \label{eqn 6.4.4} \left|\int_{0}^\infty r^{\beta + d/2} J_{(d/2)-1}(r) e^{-(2\pi)^\alpha (r/|x|)^\alpha} dr\right|<N, \quad \text{when}\,\, d\geq 2, \end{equation} where $N=N(\alpha,\beta,d)>0$ is independent of $x$. \end{remark} \begin{remark} \label{remark 6.14} Even though (\ref{eqn 2.1}) is enough for our need, we believe it is not sharp. Actually it is known (see \cite{B}) that if $\beta=0$, then $$ p(t,x) \sim \left(\frac{t}{|x|^{d+\alpha}} \wedge t^{-d/\alpha}\right). $$ \end{remark} \begin{lemma} \label{lem2} There exists a constant $N=N(d,\alpha , \beta)>0$ such that \begin{equation} \label{4021} |\nabla ({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x) | \leq N(\frac{1}{|x|^{\beta+d+1}} \vee \frac{1}{|x|^{\beta+d+\alpha+1}}). \end{equation} \end{lemma} \noindent{\bf Proof.} Let $d=1$. By (\ref{eqn 6.4.3}), \begin{eqnarray*} | \frac{d}{dx}({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)| &=&|\int_{\bR} i \xi |\xi|^{\beta }e^{i \xi x }e^{-(2\pi)^\alpha |\xi|^\alpha} d\xi|\\ &\leq & \frac{1}{|x|^{\beta+2}}|\int_{\bR} \xi |\xi|^{\beta }e^{i \xi }e^{-(2\pi)^\alpha |\xi/x|^\alpha} d\xi|\\ &=&\frac{2}{|x|^{\beta+2}}| \text{Im} \int_{0}^\infty \xi^{1+\beta }e^{i \xi }e^{-(2\pi)^\alpha |\xi/x|^\alpha} d\xi| \leq \frac{N}{|x|^{\beta+2}}. \end{eqnarray*} Let $d\geq 2$. From (\ref{eqn 6.4.5}) and the inequality $$ |\frac{\partial}{\partial x_i} ({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)| =|\frac{\partial}{\partial |x|} ({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)\frac{\partial |x|}{\partial x_i}|\leq |\frac{\partial}{\partial |x|} ({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)| $$ it easily follows that \begin{eqnarray*} &&|({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)|\\ &\leq& \frac{N_1}{|x|^{\beta+d+1}} | \int_{0}^\infty (r)^{\beta + d/2} J_{d/2-1}(r)e^{-(2\pi)^\alpha (r/|x|)^\alpha} dr|\\ &&+\frac{N_2}{|x|^{\beta+d+\alpha+1}}| \int_{0}^\infty (r)^{\beta + d/2+\alpha} J_{d/2-1}(r)e^{-(2\pi)^\alpha (r/|x|)^\alpha} dr|. \end{eqnarray*} Thus by (\ref{eqn 6.4.4}), $$ |({-{\mathbb D}elta})^{\frac{\beta}{2}}p(x)|\leq N(\frac{1}{|x|^{\beta+d+1}} \vee \frac{1}{|x|^{\beta+d+\alpha+1}}). $$ The lemma is proved. { $\Box$ } ({\bf{Proof of Lemma \ref{lem3}}}) First two assertions come from the fact $$ \cF(\phi_\beta(x))(\xi) =\cF( \int_{\bR^d} |\eta|^{\beta} e^{i\eta \cdot x} e^{-(2\pi )^\alpha |\eta|^{\alpha}}~d\eta)(\xi) =|\xi|^{\beta} e^{-(2\pi )^\alpha |\xi|^{\alpha}}. $$ Next, observe that $$ |\phi_\beta(x)|=|(-{\mathbb D}elta)^{\frac{\beta}{2}}p(x)| = |\int_{\bR^d}|\xi|^{\beta}e^{i\xi \cdot x} e^{-(2\pi)^\alpha |\xi|^\alpha}d\xi| \leq \int_{\bR^d}|\xi|^{\beta} e^{-(2\pi)^\alpha |\xi|^\alpha}d\xi<\infty. $$ Similarly, $$ |\nabla \phi_\beta(x)|\leq \int_{\bR^d}|\xi|^{\beta+1} e^{-(2\pi)^\alpha |\xi|^\alpha}d\xi<\infty. $$ Therefore, by Lemma \ref{lem1} and Lemma \ref{lem2}, there exists a constant $N(d,\alpha,\beta)>0$ such that \begin{eqnarray*} &|\phi_\beta (x)|\leq N \left( \frac{1}{|x|^{d+\beta}} \wedge 1 \right), \quad |\nabla \phi_\beta(x)| \leq N \left( \frac{1}{|x|^{d+1+\beta}} \wedge 1 \right). \end{eqnarray*} The lemma is proved. { $\Box$ } {\bf{(Proof of Lemma \ref{main lem})}}\\ By the inequalities in Lemma \ref{lem3}, we have \begin{eqnarray*} |\phi_\beta(x)| + |\nabla \phi_\beta(x)| + |x||\nabla \phi_\beta(x)| \leq N \left(\frac{1}{|x|^{d+\beta}} \wedge 1 \right). \end{eqnarray*} Define $$ \overline{\phi}_\beta(\rho)=\begin{cases} &\frac{N}{\rho^{d+\beta}} \quad \text{if} ~\rho \geq (10)^{-1/\alpha} \\ &N \cdot (10)^{(d+\beta)/\alpha}e^{-(d+\beta)((10)^{1/\alpha}\rho-1)} \quad \text{if}~ \rho < (10)^{-1/\alpha}. \end{cases} $$ Then, $\overline{\phi}_\beta$ is continuously differentiable on $[0,\infty)$ such that $$ \overline{\phi}_\beta(\infty)=0, \quad |\phi(x)| + |\nabla \phi(x)| + |x||\nabla \phi(x)| \leq \overline{\phi}_\beta(|x|), \quad \int_0^\infty |\overline{\phi}_\beta'(\rho)|~d\rho \leq K $$ and for each $r \geq (10)^{-1/\alpha}$, $$ \int_r^\infty |\overline{\phi}_\beta'(\rho)| \rho^d~d\rho =\int_r^\infty(d+\beta) \frac{N}{\rho^{d+1+\beta}} \rho^d~d\rho =\frac{(d+\beta)N}{\beta} r^{-\beta}. $$ The lemma is proved. { $\Box$ } \section{Some estimates on $\cG f$} In this section we develop some estimates of $\cG f$ by adopting the approaches in \cite{Kr01}, where the case $\alpha=2$ is studied. Fix $f\in C_0^\infty(\bR^{d+1},H)$ and denote $u=\cG f$. First, we prove a version of Theorem \ref{main theorem} when $p=2$. \begin{lemma} \label{2-1} There exists a constant $N=N(\nu,\lambda,\alpha,K)>0$ so that for any $T\in (-\infty,\infty]$, \begin{eqnarray} \label{4023} \|u\|^2_{L_2(\bR^{d+1}\cap \{t\leq T\})} \leq N\|f\|^2_{L_2(\bR^{d+1}\cap\{t \leq T\})}. \end{eqnarray} \end{lemma} \noindent{\bf Proof.} By the continuity of $f$, the range of $f$ belongs to a separable subspace of $H$. Thus by using a countable orthonormal basis of this subspace and the Fourier transform one easily finds \begin{eqnarray} \notag \|u\|^2_{L_2(\bR^{d+1} \cap \{t \leq T\})} &=&\int_{\bR^d} \int_{-\infty}^T [\int_{-\infty}^t |\hat{\psi}(\xi (t-s)^{1/\alpha})|^2|\hat{f}(s,\xi)|^2_H \frac{ds}{t-s}]dt d\xi\\ \notag &=&\int_{\bR^d} \int_{-\infty}^T \int_{-\infty}^T I_{s \leq t} |\hat{\psi}(\xi (t-s)^{1/\alpha})|^2|\hat{f}(s,\xi)|^2_H \frac{dt}{t-s} ds d\xi\\ \label{411}&=&\int_{\bR^d} \int_{-\infty}^T \int_{0}^{T-s} |\hat{\psi}(\xi t^{1/\alpha})|^2 \frac{dt}{t} |\hat{f}(s,\xi)|^2_H ds d\xi. \end{eqnarray} By the assumption on $\psi$, for some $\nu, \lambda, K>0$, $$ |\hat{\psi}(\xi)| \leq K |\xi|^\nu,\quad |\xi|^\lambda|\hat{\psi}(\xi)|\leq K. $$ This and the change of the variables $|\xi|^\alpha t \to t$ easily lead to \begin{eqnarray} \int_0^\infty |\hat{\psi}(\xi t^{1/\alpha})|^2 \frac{dt}{t} =\int_0^\infty |\hat{\psi}(t^{1/\alpha}\frac{\xi}{|\xi|})|^2 \frac{dt}{t}\nonumber\\ \leq K^2\int^1_0 t^{-1+2\nu/\alpha}dt+K^2\int^{\infty}_1 t^{-1-2\lambda/\alpha}dt \leq N(\nu, \alpha, \lambda,K).\label{412} \end{eqnarray} Plugging (\ref{412}) into (\ref{411}), \begin{eqnarray*} \|u\|^2_{L_2(\bR^{d+1} \cap \{t \leq T\})} \leq N \int_{-\infty}^T \int_{\bR^d} |\hat{f}(s,\xi)|^2_H ~d\xi ds. \end{eqnarray*} The last expression is equal to the right-hand side of (\ref{4023}), and therefore the lemma is proved. { $\Box$ } For a real-valued function $h$ defined on $\bR^d$, define the maximal function $$ \bM_x h(x) := \sup_{r>0} \frac{1}{|B_r(x)|} \int_{B_r(x)} |h(y)| dy, $$ where $| B_r(x) |$ denotes Lebesgue measure of $B_r(x)$. Similarly, for measurable functions $h=h(t)$ on $\bR$ we introduce $\bM_th$ as the maximal function of $h$ relative to symmetric intervals: $$ \bM_t h(t) := \sup_{r>0} \frac{1}{2r} \int_{-r}^r |h(t+s)|\, ds. $$ For a function $h(t,x)$ of two variables, set $$ \bM_x h(t,x) := \bM_x(h(t,\cdot))(x), \quad \bM_th(t,x) = \bM_t(h(\cdot,x))(t). $$ Denote \begin{equation} \label{4024} Q_0 := [-2^\alpha,0] \times [-1,1]^d. \end{equation} \begin{corollary} \label{co1} Assume that the support of $f$ is within $[-10,10] \times B_{3d}$. Then for any $(t,x) \in Q_0$ \begin{eqnarray} \label{4025} \int_{Q_0} |u(s,y)|^2 \, dsdy \leq N \bM_t \bM_x |f|_H^2 (t,x), \end{eqnarray} where $N$ depends only on $d,\alpha,\nu,\lambda$ and $K$. \end{corollary} \noindent{\bf Proof.} By the Lemma \ref{2-1}, $$ \int_{Q_0} |u(s,y)|^2 ~dsdy \leq \int_{-\infty}^0 \int_{\bR^d} |u(s,y)|^2 dyds \leq N \int_{-10}^0 \int_{B_{3d}} |f(s,y)|_H^2 dyds. $$ Since $|x-y| \leq |x|+|y| \leq 4d$ for any $(t,x) \in Q_0$ and $y \in B_{3d}$, \begin{eqnarray*} \int_{-10}^0 \int_{B_{3d}} |f(s,y)|_H^2 dyds \leq \int_{-10}^0 \int_{|x-y| \leq 4d} |f(s,y)|_H^2 dyds &\leq& N \int_{-10}^0 \bM_x|f(s,x)|_H^2 ds \\ &\leq& N \bM_t\bM_x f(t,x). \end{eqnarray*} The lemma is proved. { $\Box$ } We generalize Corollary \ref{co1} as follows. \begin{lemma} \label{2-3} Assume that $f(t,x)=0$ for $t \neq (-10,10)$. Then for any $(t,x) \in Q_0$, \begin{eqnarray*} \int_{Q_0} |u(s,y)|^2 ~dsdy \leq N \bM_t \bM_x |f|_H^2 (t,x), \end{eqnarray*} where $N=N(d,\alpha,\nu,\lambda,\delta,K)$. \end{lemma} \noindent{\bf Proof.} First, notice that if $0 \leq \varepsilon \leq R \leq \infty$, and $F$ and $G$ are smooth enough, then \begin{eqnarray} \notag \int_{R \geq |z| \geq \varepsilon} F(z) G(|z|)~dz = - \int_\varepsilon^R G'(\rho)(\int_{|z| \leq \rho} F(z) dz) d\rho \\ + G(R) \int_{|z| \leq R} F(z)dz - G(\varepsilon) \int_{|z| \leq \varepsilon} F(z)dz. \label{4026} \end{eqnarray} Indeed, (\ref{4026}) is obtained by applying integration by parts to $$ \int_\varepsilon^R G(\rho)\frac{d}{d\rho} \left(\int_{B_\rho(0)} F(z)\, dz \right)d\rho=\int_\varepsilon^R G(\rho) \left(\int_{\partial B_\rho(0)} F(s) \, dS_{\rho} \right) d\rho =\int_{R \geq |z| \geq \varepsilon} F(z) G(|z|)\,dz. $$ Now take $\zeta \in C_0^\infty (\bR^d)$ such that $\zeta =1$ in $B_{2d}$ and $\zeta=0$ outside of $B_{3d}$. Set $\cA = \zeta f$ and $\cB = (1-\zeta)f$. By Minkowski's inequality, $\cG f \leq \cG \cA+ \cG \cB$. Since $\cG \cA$ can be estimated by Corollary \ref{co1}, we may assume that $f(t,x)=0$ for $x \in B_{2d}$. Denote $\overline{f}=|f|_H$, take $0 > s > r > -10$, and see \begin{eqnarray*} |\Psi_{s-r}f(r, \cdot)(y)|_H &\leq& (s-r)^{-d/\alpha}\int_{\bR^d} |\psi(z/(s-r)^{1/\alpha})| |f(r,y-z)|_H ~dz\\ &\leq& (s-r)^{-d/\alpha}\int_{\bR^d} \overline{\psi}(|z|/(s-r)^{1/\alpha}) \overline{f}(r,y-z)~dz. \end{eqnarray*} Observe that if $(s,y) \in Q_0$ and $|z| \leq \rho$ with a $\rho > 1$, then \begin{eqnarray} \label{4027} |x-y| \leq 2d, \quad B_\rho(y) \subset B_{2d+\rho}(x) \subset B_{\mu \rho}(x), \quad \mu=2d+1, \end{eqnarray} whereas if $|z| \leq 1$, then $|y-z| \leq 2d$ and $f(r,y-z)=0$. Thus by (\ref{4026}), for $0 >s > r > -10$ and $(s,y) \in Q_0$ \begin{eqnarray*} |\Psi_{s-r}f(r, \cdot)(y)|_H &\leq& (s-r)^{-(d+1)/\alpha}\int_1^\infty |\overline{\psi}'(\rho/(s-r)^{1/\alpha})|(\int_{|z| \leq \rho} \overline{f}(r,y-z)~dz) ~d\rho\\ &=& (s-r)^{-(d+1)/\alpha}\int_1^\infty |\overline{\psi}'(\rho/(s-r)^{1/\alpha})|(\int_{B_{\rho}(y)} \overline{f}(r,z)~dz) ~d\rho\\ &\leq& (s-r)^{-(d+1)/\alpha}\int_1^\infty |\overline{\psi}'(\rho/(s-r)^{1/\alpha})|(\int_{B_{\mu \rho}(x)} \overline{f}(r,z)~dz) ~d\rho\\ &\leq& N \bM_x\overline{f}(r,x)(s-r)^{-(d+1)/\alpha}\int_1^\infty |\overline{\psi}'(\rho/(s-r)^{1/\alpha})|\rho^d~ d\rho\\ &=& N \bM_x\overline{f}(r,x)\int_{(s-r)^{-1/\alpha}}^\infty |\overline{\psi}'(\rho)|\rho^d~ d\rho\leq N \bM_x\overline{f}(r,x)(s-r)^{\delta/\alpha}, \end{eqnarray*} where the last inequality follows from (\ref{eqn 06.08}) and the inequality $(s-r)^{-1/\alpha}\geq 10^{-1/\alpha}$. By Jensen's inequality $(\bM_x\overline{f})^2 \leq \bM_x \overline{f}^2$, and therefore, for any $(s,y) \in Q_0$ (remember $\delta\geq \alpha/2$) \begin{eqnarray*} |u(s,y)|^2 = \int_{-\infty}^s |\Psi_{s-r}f(r,\cdot)(y)|_H^2 \frac{dr}{s-r} &\leq& N \int_{-10}^s \bM_x \overline{f}^2(r,x) (s-r)^{2\delta/\alpha -1}dr\\ &\leq& N \int_{-10}^0 \bM_x \overline{f}^2(r,x) dr \leq N \bM_t\bM_x \overline{f}^2(t,x). \end{eqnarray*} The lemma is proved.{ $\Box$ } \begin{lemma} \label{2-4} Assume that $f(t,x)=0$ for $t \geq -8$. Then for any $(t,x) \in Q_0$ \begin{eqnarray} \label{4028} \int_{Q_0} |u(s,y) -u(t,x)|^2~dsdy \leq N\bM_t \bM_x |f|_H^2 (t,x), \end{eqnarray} where $N=N(d,\alpha,\nu,\lambda,\delta,K)$. \end{lemma} \noindent{\bf Proof.} Obviously it is enough to show that \begin{equation} \label{eqn 6.08.8} \sup_{Q_0} [ |D_su|^2 + |\nabla u|^2] \leq N \bM_t \bM_x |f|_H^2 (t,x). \end{equation} By Minkowski's inequality the derivative of a norm is less than or equal to the norm of the derivative if both exist. Thus for fixed $(s,y) \in Q_0$ we have $$ |\nabla u (s,y)|^2 \leq \int_{-\infty}^{-8} |\nabla \Psi_{s-r}f(r,\cdot)(y)|^2_H \frac{dr}{s-r}=:\int^{-8}_{-\infty}I^2(r,s,y)\frac{dr}{s-r}, $$ where \begin{eqnarray*} I(r,s,y)&:=& |\nabla \Psi_{s-r}f(r,\cdot)(y)|_H\\ &=&(s-r)^{-(d+1)/\alpha}|\int_{\bR^d}(\nabla \psi)(z/(s-r)^{1/\alpha})f(r,y-z)~dz|_H\\ &\leq& (s-r)^{-(d+1)/\alpha}\int_{\bR^d} \overline{\psi}(|z|/(s-r)^{1/\alpha})\overline{f}(r,y-z)~dz=:\tilde{I}(r,s,y), \end{eqnarray*} and $\bar{f}:=|f|_{H}$. Using (\ref{4026}) and (\ref{4027}) again, we get for $s>r$, \begin{eqnarray*} \tilde{I}(r,s,y) &\leq&(s-r)^{-(d+2)/\alpha}\int_0^\infty \overline{\psi}'(\rho/(s-r)^{1/\alpha}) (\int_{B_\rho(y)}\overline{f}(r,z)~dz)~d\rho\\ &\leq&(s-r)^{-(d+2)/\alpha}\int_0^\infty \overline{\psi}'(\rho/(s-r)^{1/\alpha}) (\int_{B_{2d+\rho}(x)}\overline{f}(r,z)~dz)~d\rho\\ &\leq& N \bM_x\overline{f}(r,x) (s-r)^{-(d+2)/\alpha}\int_0^\infty \overline{\psi}'(\rho/(s-r)^{1/\alpha}) (2d + \rho)^d d\rho\\ &=& N \bM_x\overline{f}(r,x) (s-r)^{-1/\alpha}\int_0^\infty \overline{\psi}'(\rho)(2d/(s-r)^{1/\alpha} + \rho)^d d\rho. \end{eqnarray*} For $r \leq -8$, we have $s-r\geq 2^\alpha $ and \begin{eqnarray*} \int_0^\infty |\overline{\psi}'(\rho)| ( 2d / (s-r)^{1/\alpha} + \rho)^d~d\rho \leq \int_0^\infty |\overline{\psi}'(\rho)| ( d+ \rho)^d~d\rho \leq N, \end{eqnarray*} \begin{eqnarray*} \tilde{I}(r,s,y) \leq N \bM_x\overline{f}(r,x) (s-r)^{-1/\alpha} \end{eqnarray*} and \begin{eqnarray*} |\nabla u(s,y)|^2 \leq \int_{-\infty}^{-8} \tilde{I}^2(r,s,y) \frac{dr}{s-r} &\leq& N \int_{-\infty}^{-8} \bM_x\overline{f}^2(r,x) \frac{dr}{(s-r)^{2/\alpha+1}}\\ &\leq& N \int_{-\infty}^{-8} \bM_x\overline{f}^2(r,x) \frac{dr}{(-4-r)^{2/\alpha+1}}. \end{eqnarray*} By the integration by parts, \begin{eqnarray} |\nabla u(s,y)|^2 &\leq& N\int_{-\infty}^{-8} \tilde{I}^2(r,s,y) \frac{dr}{s-r} \nonumber\\ &\leq& N\int_{-\infty}^{-8} \frac{1}{(-4-r)^{2/\alpha+2}} (\int_r^0 \bM_x \overline{f}^2(p,x)~dp)~dr \nonumber\\ &\leq& N \bM_t\bM_x\overline{f}^2(t,x) \int_{-\infty}^{-8} \frac{|r|}{(-4-r)^{2/\alpha+2}} ~dr=N\bM_t\bM_x\overline{f}^2(t,x) \label{eqn 06.08.2}. \end{eqnarray} To estimate $D_su$, we proceed similarly. By Minkowski's inequality, \begin{eqnarray} |D_su(s,y)|^2 &\leq& N \int_{-\infty}^{-8} \left( |D_s\Psi_{s-r}f(r,y)|_H^2 \frac{1}{s-r}+ |\Psi_{s-r}f(r,y)|_H^2\frac{1}{(s-r)^3}\right)\,dr \nonumber \\ &=:& N \int_{-\infty}^{-8} J^2(r,s,y) \frac{1}{s-r}\,dr, \label{eqn 6.08.5} \end{eqnarray} where \begin{eqnarray*} J(r,s,y)&:=&(s-r)^{-d/\alpha} |\int_{\bR^d}D_s \psi(z/(s-r)^{1/\alpha}) f(r,y-z)~dz|_H \\ &&+(s-r)^{-d/\alpha-1} | \int_{\bR^d} \psi(z/(s-r)^{1/\alpha}) f(r,y-z)~dz|_H\\ &=&(s-r)^{-d/\alpha} |\int_{\bR^d} \nabla \psi(z/(s-r)^{1/\alpha}) \cdot (-\frac{1}{\alpha}(s-r)^{-1/\alpha-1}z ) f(r,y-z)~dz|_H\\ &&+(s-r)^{-d/\alpha-1} | \int_{\bR^d} \psi(z/(s-r)^{1/\alpha}) f(r,y-z)~dz|_H\\ &\leq& N (s-r)^{-d/\alpha-1 } \int_{\bR^d} \overline{\psi}(|z|/(s-r)^{1/\alpha}) \overline{f}(r,y-z)~dz=N\tilde{I}(r,s,y). \end{eqnarray*} This, (\ref{eqn 06.08.2}) and (\ref{4028}) lead to (\ref{eqn 6.08.8}). The lemma is proved. { $\Box$ } \mysection{Proof of Theorem \ref{main theorem}} \label{proof of theorem} Note that we may assume $a=-\infty$ and $b=\infty$. Indeed, for any $f\in C^{\infty}_0((a,b)\times \bR^d,H)$ we have $f\in C^{\infty}_0(\bR^{d+1},H)$, and inequality (\ref{4022}) with $a=-\infty$ and $b=\infty$ implies the inequality with any pair of $(a,b)$. Since in this case the theorem is already proved if $p=2$, we assume $p>2$. Let $\cF$ be the collections of all balls $Q\subset \bR^{d+1}$ of the type $$ Q_{c}(s,y):=\{ (s-c^\alpha,s) \times (y^1-c/2 , y^1 +c/2) \cdots (y^d-c/2, y^d +c/2) \}, \quad c>0. $$ For a measurable function $h(t,x)$ on $\bR^{d+1}$, define the sharp function $$ h^\#(t,x) := \sup_{Q} \frac{1}{|Q|} \int_Q |h (t,x)-h_Q|\, dyds, $$ where $$ h_Q = \Xint-_Q h ~dyds:=\frac{1}{|Q|} \int_Q h(s,y)\, dyds $$ and the supremum is taken over all balls $Q\in \cF$ containing $(t,x)$. \begin{thm}(Fefferman-Stein). For any $1<q<\infty$ and $h\in L_q(\bR^{d+1})$, \begin{equation} \label{eqn 6.08.8} \|h\|_{L^q}\leq N(q)\|h^\#\|_{L^q}. \end{equation} \end{thm} \noindent{\bf Proof.} Inequality (\ref{eqn 6.08.8}) is a consequence of Theorem IV.2.2 in \cite{Ste}, because the balls $Q_c(s,y)$ satisfy the conditions (i)-(iv) in section 1.1 of \cite{Ste} :\\ (i) $Q_{c}(t,x)\cap Q_{c}(s,y)\neq \emptyset$ implies $Q_{c}(s,y)\subset Q_{N_1c}(t,x)$ ;\\ (ii) $|Q_{N_1c}(t,x)|\leq N_2 |Q_{c}(t,x)|$ ;\\ (iii) $\cap_{c>0}\overline{Q}_{c}(t,x)=\{(t,x)\}$ and $\cup_{c}Q_{c}(t,x)=\bR^{d+1}$ ;\\ (iv) for each open set $U$ and $c>0$, the function $(t,x)\to |Q_{c}(t,x)\cap U|$ is continuous. { $\Box$ } Next we prove \begin{equation} \label{eqn 6.08.9} (\cG f)^{\#}(t,x)\leq N (\bM_t\bM_x|f|^2_{H})^{1/2}(t,x). \end{equation} By Jensen's inequality, to prove (\ref{eqn 6.08.9}) it suffices to prove that for each $Q=Q_{c}(s,y)\in \cF$ and $(t,x)\in Q$, \begin{eqnarray} \label{4033} \Xint-_Q |\cG f- (\cG f)_Q|^2~dyds \leq N(d,\alpha,\nu,\lambda,\delta,K) \bM_t\bM_x|f|_H^2(t,x). \end{eqnarray} It is easy to check that to prove (\ref{4033}) we may assume $(s,y)=(0,0)$. Note that for any $c > 0$, $\Psi_th(c~ \cdot)(x) = \Psi_{tc^\alpha}h(cx)$ and \begin{eqnarray} \notag \cG f(c^{\alpha}~ \cdot , c ~\cdot)(t,x) \notag &=&[\int_{-\infty}^t |\Psi_{(t-s)c^\alpha} f(c^{\alpha} s,\cdot)(cx)|_H^2 \frac{ds}{t-s}]^{1/2}\\ \notag &=&[\int_{-\infty}^{t c^{\alpha}} |\Psi_{(t-c^{-\alpha}s)c^\alpha} f(s,\cdot)(cx)|_H^2 \frac{ c^{-\alpha} ds}{t-c^{-\alpha}s}]^{1/2}\\ \notag &=&[\int_{-\infty}^{t c^{\alpha}} |\Psi_{(c^\alpha t-s)} f(s,\cdot)(cx)|_H^2 \frac{ds}{c^\alpha t-s}]^{1/2}\\ \label{5271}&=&\cG f(c^\alpha t, cx). \end{eqnarray} Since dilations don't affect averages, (\ref{5271}) shows that it suffices to prove (\ref{4033}) when $c=2$, that is $Q=Q_0$ from (\ref{4024}). Now we take a function $\zeta \in C_0^\infty(\bR)$ such that $\zeta=1$ on $[-8,8]$, $\zeta=0$ outside of $[-10,10]$, and $1 \geq \zeta \geq 0$. Define $$ \cA(s,y) := f(s,y) \zeta(s), \quad \cB(s,y):= f(s,y)-\cA(s,y)=f(s,y)(1-\zeta(s)). $$ Then $$ \Psi_{t-s}\cA(s,\cdot) = \zeta(s) \Psi_{t-s}f(s,\cdot), \ \quad \cG f \leq \cG \cA + \cG \cB, \quad \cG \cB \leq \cG f $$ and for any constant $c$, $|\cG f -c | \leq |\cG\cA| + |\cG \cB -c|$. Thus \begin{eqnarray*} \Xint-_{Q_0} |\cG f- (\cG f)_{Q_0}|^2~dyds \leq 4 \Xint-_{Q_0} |\cG f-c|^2~dyds \leq 8 \Xint-_{Q_0} |\cG \cA|^2~dyds + 8\Xint-_{Q_0} |\cG \cB - c |^2~dyds. \end{eqnarray*} Taking $c=\cG \cB(t,x)$, from Lemma \ref{2-3} we get \begin{eqnarray*} \Xint-_{Q_0} |\cG f- (\cG f)_{Q_0}|^2~dyds &\leq& 8 \Xint-_{Q_0} |\cG \cA|^2~dyds + 8\Xint-_{Q_0} |\cG \cB - \cG \cB (t,x) |^2~dyds\\ &\leq& N \bM_t \bM_x |f|_H^2 (t,x)+8\Xint-_{Q_0} |\cG \cB - \cG \cB (t,x) |^2~dyds. \end{eqnarray*} In addition, setting $f_1(s, y) := \cB(s,y)$ on $s \leq 0$ and $f_1(s,y):=0$ on $s >0$, from Lemma \ref{2-4} we see \begin{eqnarray*} \bM_t \bM_x |f|_H^2 (t,x)+8\Xint-_{Q_0} |\cG \cB - \cG \cB (t,x) |^2~dyds &\leq& \bM_t \bM_x |f|_H^2 (t,x)+N \bM_t \bM_x |f_1|_H^2 (t,x)\\ &\leq& N\bM_t \bM_x |f|_H^2 (t,x). \end{eqnarray*} This proves (\ref{eqn 6.08.9}). Finally, combining the Fefferman-Stein theorem and Hardy-Littlewood maximal theorem (see, for instance, \cite{Ste}), we conclude (recall $p/2 >1$) \begin{eqnarray*} \|u\|_{L_p(\bR^{d+1})}^p \leq N \|(\bM_t\bM_x|f|_H^2)^{1/2}\|_{L_p(\bR^{d+1})}^p &=&N \int_{\bR^d} \int_\bR (\bM_t \bM_x |f|_H^2)^{p/2}~dt~dx \\ &\leq& N \int_{\bR^d} \int_\bR (\bM_x |f|_H^2 )^{p/2}~dt~dx \\ & =& N \int_{\bR} \int_{\bR^d} (\bM_x |f|_H^2 )^{p/2}~dx~dt \\ &\leq& N \|f\|_{L_p(\bR^{d+1},H)}^p. \end{eqnarray*} The theorem is proved. { $\Box$ } Below we explain why Theorem \ref{main theorem} implies (\ref{for fun}). By (\ref{eqn 333}) and Remark \ref{remark 22}, for any solution $u$ of (\ref{eqn 0}), we have \begin{equation} \label{eqn 6.9.11} \bE \int^T_0\|\partial^{\alpha/2}_xu(t,\cdot)\|^p_{L_p}dt \leq N \bE \int_{\bR^d} \int_0^T |f|^p_{\ell_2}~dtdx. \end{equation} By (\ref{eqn 222}) and Burkholder-Davis-Gundy inequality, \begin{equation} \label{eqn 33333} \bE \int^T_0\|u(t,\cdot)\|^p_{L_p}dt \leq N \bE \int^T_0\int_{\bR^d}\left[\int^t_0|T_{\alpha,t-s}f(s,\cdot)(x)|^2_{\ell^2} ds\right]^{p/2}dxdt, \end{equation} and by Jensen's inequality \begin{equation} \label{eqn 6.9.10} |T_{t-s}f(x)|^2_{\ell_2}=\sum_k\left(\int_{\bR^d}p(t-s,y)f^k(x-y)dy\right)^2\leq N (p(t-s,\cdot)*|f(\cdot)|^2_{\ell_2})(x). \end{equation} Thus (\ref{eqn 6.9.11}), (\ref{eqn 33333}), (\ref{eqn 6.9.10}) and Remark \ref{remark 6.14} imply $$ \bE\int^T_0\|u\|^p_{H^{\alpha/2}_p}dt\leq N \bE\int^T_0(\|u\|^p_{L^p}+\|\partial^{\alpha/2}u\|^p_{L^p})dt\leq N(T,d,\alpha)\bE \int_{\bR^d} \int_0^T |f|^p_{\ell_2}~dtdx. $$ \end{document}
\begin{document} \title{Convergence analysis of a regularized inexact interior-point method for linear programming problems hanks{Submitted to the editors oday. unding{This work was funded in part by VLAIO Baekeland project HBC.2020.2867 and Motulus BV.} \begin{abstract} Interior-point methods for linear programming problems require the repeated solution of a linear system of equations. Solving these linear systems is non-trivial due to the severe ill-conditioning of the matrices towards convergence. This issue can be alleviated by incorporating suitable regularization terms in the linear programming problem. Regularization also allows us to efficiently handle rank deficient constraint matrices. We provide a convergence analysis of a regularized inexact interior-point method. The term `inexact' refers to the fact that we do not need to compute the true solution of the linear system of equations, only an approximation thereof. The formulation of the algorithm is sufficiently general such that specialized linear algebra routines developed in other work on inexact interior-point methods can also be incorporated in our regularized framework. In this work, we exploit the inexactness by using a mixed-precision solver for the linear system of equations. More specifically, we perform a Cholesky factorization in IEEE single precision and use it as a preconditioner for the Conjugate Gradient method. Numerical experiments illustrate the benefits of this approach applied to linear programming problems with a dense constraint matrix. \end{abstract} \begin{keywords} Inexact interior-point method, regularization, linear programming, mixed-precision \end{keywords} \begin{AMS} 90C51, 90C05, 65F22 \end{AMS} \section{Introduction} We consider the standard form linear programming problem \begin{equation} \label{eq:primal} \min_{x\in\mathbb{R}^n} c^T x \qquad \text{subject to} \qquad Ax = b,\, x\geq 0 \end{equation} with $x,c \in \mathbb{R}^n, b\in \mathbb{R}^m$ and $A\in\mathbb{R}^{m \times n}$ and its dual formulation \begin{equation}\label{eq:dual} \max_{y\in\mathbb{R}^m} b^T y \qquad \text{subject to} \qquad A^T y + z = c,\, z\geq 0 \end{equation} with $y\in\mathbb{R}^m$ and $z\in\mathbb{R}^n$. We refer to $x$ as the primal variables and $(y,z)$ as the dual variables. We do not need to assume that $A$ has full rank, which is one of the benefits of the approach proposed in the current paper. Inequalities should be interpreted component-wise. The Karush-Kuhn-Tucker (KKT) conditions of the primal-dual pair of linear programming problems \cref{eq:primal} and \cref{eq:dual} are given by the non-linear system of equations \begin{align} Ax - b &= 0, \label{eq:primal_kkt}\\ A^T y + z - c &= 0,\label{eq:dual_kkt}\\ x_i z_i &= 0, \qquad (i = 1,\ldots,n) \label{eq:complementarity}\\ (x,z) &\geq 0. \label{eq:nonneg} \end{align} We assume that there exists a strictly feasible point, i.e. a point $(x,z)>0$ that satisfies conditions \cref{eq:primal_kkt,eq:dual_kkt}. In this case it is well known that the KKT conditions are necessary and sufficient for $x$ the be a solution of the primal linear programming problem \cref{eq:primal} and for $(y,z)$ to solve the dual problem \cref{eq:dual}, see for instance \cite{boyd2004convex,nocedal2006numerical}. Primal-dual interior-point methods generally compute a perturbed Newton direction to the KKT system above, where the complementarity condition \cref{eq:complementarity} is relaxed by $x_i z_i = \tau$ for some positive scalar $\tau >0$. The solution $(x(\tau),y(\tau),z(\tau))$ obtained by solving equations \cref{eq:primal_kkt}, \cref{eq:dual_kkt}, \cref{eq:nonneg} and $x_i z_i=\tau$ for all $i$, guides us towards the true solution of the KKT equations for $\tau\rightarrow 0$. This set of points parametrized by $\tau$ is referred to in literature as the central path. Most primal-dual interior-point methods compute a sequence of iterates in some neighborhood of this central path and require that all iterates remain strictly feasible with respect to the constraint $(x,z)>0$. We refer the reader to \cite{wright1997primal} for an excellent general reference on primal-dual interior-point methods. In this work we shall consider $\tau = \beta_1 x^T z / n$ where $\beta_1 \in(0,1)$ is called the centering parameter. In a typical primal-dual infeasible interior-point method we have that in each iteration a Newton-like direction is computed as: \begin{equation} \label{eq:newton_eq} \begin{pmatrix} A & 0 & 0 \\ 0 & A^T & I \\ Z & 0 & X \end{pmatrix} \begin{pmatrix} \Delta x \\ \Delta y \\ \Delta z \end{pmatrix} = \begin{pmatrix} b - A x \\ c - A^T y - z \\ \tau e - X z \end{pmatrix}. \end{equation} Here, we use $X = \diag(x)$ and $Z = \diag(z)$ to denote diagonal matrices containing the elements of $x$ and $z$ respectively. In addition we denote $e = (1,1,\ldots,1)^T \in \mathbb{R}^n$ the vector of length $n$ containing all ones, and $0$ and $I$ the zero-matrix and identity matrix respectively, where the dimensions should be clear from the context. Note that the matrix in \cref{eq:newton_eq} is simply the Jacobian matrix of the non-linear system of equations defined by \cref{eq:primal_kkt}--\cref{eq:complementarity}. These equations are only `mildly' non-linear in the sense that the only non-linearity is due to the complementarity condition. So-called inexact interior-point methods \cite{korzak2000convergence,gondzio2013convergence,baryamureeba2000convergence,bellavia1998inexact,monteiro2003convergence,al2009convergence} do not require that \cref{eq:newton_eq} is solved exactly (i.e. to full machine precision accuracy). This type of interior-point method is for instance useful when we want to use some Krylov subspace method to solve a suitable reformulation of the linear system of equations. Two main approaches exist. The first consists of transforming \cref{eq:newton_eq} to a linear system of equations with a symmetric indefinite matrix, which is referred to as the augmented system approach. In this case we can use a symmetric indefinite Krylov subspace method, such as SYMMLQ or MINRES \cite{paige1975solution}. Alternatively, we can also reformulate \cref{eq:newton_eq} to a linear system with a symmetric positive definite matrix, in which case Conjugate Gradients (CG) \cite{hestenes1952methods} is preferred. This reformulation is referred to as the normal equations approach. We provide some more details on these two different approaches and how we can efficiently solve them in \cref{sec:reformulations}. For more general information on Krylov subspace methods we refer to \cite{liesen2013krylov,saad2003iterative}. When the matrix $A$ does not have full row rank then it holds that the matrix \cref{eq:newton_eq} is singular, which is also true for the reduced linear systems. Moreover, even when $A$ has full rank, solving a suitable reformulation of the linear system \cref{eq:newton_eq} might lead to numerical difficulties. The main reason for this is the ill-conditioning of the matrices towards convergence caused by complementarity of the variables $x$ and $z$, which is a well-known result in the literature. This is also the main difficulty discussed in a lot of the inexact interior-point methods based on Krylov subspace methods. These Krylov subspace methods need a very good preconditioner to make reasonable progress towards the solution, since the rate of convergence is related to the condition number of the matrix. To alleviate these issues, several papers have appeared in the literature that start from a regularized formulation of the linear programming problem, or introduce regularization terms directly in the linear systems \cite{friedlander2012primal,altman1999regularized,saunders1996solving,gondzio2012matrix}. These references either assume that the linear systems are solved exactly or provide no formal convergence analysis at all. As far as we known, no attempt has been made to provide a convergence analysis of a primal-dual infeasible interior-point method that solves a \textit{regularized} linear system \textit{inexactly}. The current manuscript fills this gap. We keep the description of the newly proposed algorithm sufficiently general such that specialized linear algebra routines developed in previous work on inexact interior-point methods can also be incorporated in our regularized framework. Krylov subspace methods with specialized preconditioners \cite{gondzio2013convergence,cui2019implementation,monteiro2003convergence,al2009convergence} are especially well-suited. Note that Krylov subspace methods are in general designed for large and sparse matrices. However, we will use them in a slightly different context for our numerical experiments. More specifically we will consider test-problems with a dense constraint matrix $A$ which requires the approximate solution of a dense linear system of equations in each iteration of the interior-point method. We will solve these systems (approximately) by performing a Cholesky factorization in IEEE single precision and then perform -- if necessary -- a few iterations of the Conjugate Gradient method, preconditioned with the (dense) triangular Cholesky factors. In this way we can interpret the CG iterates as a form of iterative refinement, rather than a true iterative solution method. Alternatively this can be seen as some kind of hybrid between a direct method and an iterative method. Similar techniques have recently been proposed to accelerate the solution of linear systems of equations using lower precision arithmetic \cite{carson2017new,carson2018accelerating,higham2021exploiting,higham2019squeezing}. More details will be provided in \cref{sec:implementation}. The rest of the paper is organized as follows. In \cref{sec:description} we give a detailed description of the new algorithm and provide a convergence analysis. Next, we discuss some implementation details in \cref{sec:implementation}. In \cref{sec:num_ex} we report a number of experiments which illustrate the benefit of the proposed algorithm. Lastly, this work is concluded and an outlook is given in \cref{sec:conclusions}. \section{Description of the algorithm} \label{sec:description} To derive our algorithm we use the regularized linear programming problem defined in \cite{friedlander2012primal}: \begin{equation}\label{eq:regularized_lp} \min_{(x,w)} c^T x + \frac{\rho}{2}\| x - x^k \|^2 + \frac{\delta}{2}\| w + y^k \|^2 \qquad \text{subject to} \qquad Ax +\delta w = b,\, x\geq 0. \end{equation} Here, $\rho \geq 0$ and $\delta \geq 0$ are regularization parameters, $x^k$ and $y^k$ are the current estimates of the primal and dual solutions and $w\in\mathbb{R}^m$ is an auxiliary variable. Here and throughout an upper-index refers to a certain iteration number while a lower-index refers to a particular component of a vector. For example $x_i^k$ denotes the $i$-th component of the $k$-th iterate $x^k$. The norm $\|\cdot\|$ denotes the Euclidean norm. Note that we recover \cref{eq:primal} for the choice $\rho = \delta = 0$. Our algorithm will coincide with the one presented in \cite{korzak2000convergence} for these choices of regularization parameters. With this in mind, we purposefully keep the description of our algorithm as close as possible to \cite{korzak2000convergence}, such that the subtle difference between the two is apparent. The KKT conditions for \cref{eq:regularized_lp} are given by \begin{align*} Ax + \delta w - b &= 0, \\ A^T y + z - c - \rho (x - x^k) &= 0,\\ x_i z_i &= 0, \qquad (i = 1,\ldots,n)\\ \delta (w + y^k) - \delta y &= 0, \\ (x,z) &\geq 0. \end{align*} From the second to last equation we immediately get the relation $w = y - y^k$ when $\delta > 0$. Elimination of this variable from the KKT equations and relaxing the complementarity condition $x_i z_i = \tau$, we get the following Newton-like update \begin{equation} \label{eq:newton_eq_reg} \begin{pmatrix} A & \delta I & 0 \\ -\rho I & A^T & I \\ Z & 0 & X \end{pmatrix} \begin{pmatrix} \Delta x \\ \Delta y \\ \Delta z \end{pmatrix} = \begin{pmatrix} \xi \\ \zeta \\ \eta \end{pmatrix}, \end{equation} with $\xi = b - A x - \delta(y - y^k),\zeta = c +\rho(x - x^k)- A^T y - z$ and $\eta=\tau e - X z$. Now if we choose $x = x^k, y = y^k$ and $z = z^k$, then the right-hand sides in \cref{eq:newton_eq,eq:newton_eq_reg} coincide and the Jacobian matrices only differ by the terms $\delta I$ and $\rho I$. We modify the algorithm in \cite{korzak2000convergence} such that in stead of solving linear systems of the form \cref{eq:newton_eq} inexactly, we now solve linear systems of the form \cref{eq:newton_eq_reg} inexactly. Note that the analysis in \cite{friedlander2012primal} assumes that the linear systems are solved exactly and that the analysis in \cite{korzak2000convergence} does not include any form of regularization. Hence, the algorithm in the current manuscript can be seen as a combination of \cite{friedlander2012primal} and \cite{korzak2000convergence}. Suppose we have some iterate $(x^k,y^k,z^k)\in\mathbb{R}^{n + m + n}$. We refer to this point as an $(\epsilon,\epsilon_p,\epsilon_d)$-solution if the following conditions hold: \begin{equation}\label{eq:def_conv} (x^k,z^k)\geq 0,\, (x^k)^Tz^k \leq \epsilon,\, \|Ax^k - b\| \leq \epsilon_p,\,\|A^T y^k + z^k - c\| \leq \epsilon_d. \end{equation} The goal is to find an $(\epsilon,\epsilon_p,\epsilon_d)$-solution to the pair of linear programming problems \cref{eq:primal,eq:dual} or the determine they are likely to be infeasible when $||(x^k,z^k)||_1>\omega$, for some sufficiently large number $\omega$. Some comments on the latter stopping-criterion are given later in this section, see \cref{thm:omega_stop}. We make sure all iterates of the interior-point method remain in the following central-path neighborhood, originally proposed by Kojima, Megiddo, Mizuno in \cite{kojima1993primal}: \begin{align} \label{eq:N} \mathcal{N} = \{(x,y,z) \in\mathbb{R}^{n + m + n}\,:\, &(x,z)>0, \\ &x_i z_i \geq \gamma x^T z/n \quad (i = 1,\ldots,n \nonumber), \\ & x^T z \geq \gamma_p \|Ax - b \| \text{ or } \|Ax - b \| \leq \epsilon_p, \nonumber \\ & x^T z \geq \gamma_d \|A^T y + z - c\| \text{ or } \|A^T y + z - c\| \leq \epsilon_d, \nonumber \} \end{align} for some constants $\gamma_p,\gamma_d>0$ and $\gamma \in (0,1)$. The same neighborhood is also used in \cite{korzak2000convergence}. When $\gamma,\gamma_p$ and $\gamma_d$ are all very small, say all equal to $10^{-8}$, then these conditions are very loose. We now have all ingredients needed to formulate the regularized inexact interior-point method, see \cref{alg:KMM}. Suppose we have some iterate $(x^k,y^k,z^k)$ that is not yet an $(\epsilon,\epsilon_p,\epsilon_d)$-solution or does not satisfy $||(x^k,z^k)||_1>\omega$. Then we compute an approximation $(\Delta x^k,\Delta y^k,\Delta z^k)$ to the Newton direction \cref{eq:newton_eq_reg} for the choice $(x,y,z) = (x^k,y^k,z^k)$, see \cref{eq:update_eq}. The accuracy of the approximation is described using the norm of the \textit{residual components}, as shown in \cref{eq:acc_r,eq:acc_s}. Next, suitable step-lengths $\alpha^k_p$ and $\alpha^k_d$ are computed such that the new iterates $x^{k+1} = x^k + \alpha^k_p\Delta x^k$ and $(y^{k+1},z^{k+1}) = (y^{k},z^k) + \alpha^k_d(\Delta y^k, \Delta z^k)$ remain in the central path neighborhood $\mathcal{N}$ and satisfy an additional descent condition \cref{eq:descent}. Since $\beta_3>\beta_2$ we can always choose $\alpha_p^k = \alpha_d^k = \bar{\alpha}^k$. However, taking different step-lengths for the primal and dual variables allows us to take larger steps whenever possible and thus make more progress towards the solution. We now turn our attention to a convergence analysis of \cref{alg:KMM}. Our analysis closely resembles the analysis in \cite{korzak2000convergence}, which in its turn closely resembles the analysis of the exact counterpart of Kojima, Megiddo, Mizuno in \cite{kojima1993primal}. Let us denote the following functions for $i = 1,2,\ldots,n$: \begin{align} f_i^k (\alpha) &= (x_i^k + \alpha \Delta x^k_i)(z_i^k + \alpha \Delta z^k_i) - \gamma (x^k + \alpha\Delta x^k)^T(z^k + \alpha\Delta z^k)/n, \label{eq:def_f} \\ g_p^k(\alpha) &= (x^k + \alpha\Delta x^k)^T(z^k + \alpha\Delta z^k) - \gamma_p \|A(x^k + \alpha\Delta x^k) - b \|, \label{eq:def_g_primal} \\ g_d^k(\alpha) &= (x^k + \alpha\Delta x^k)^T(z^k + \alpha\Delta z^k) - \gamma_d \|A^T(y^k + \alpha\Delta y^k)+ z^k + \alpha\Delta z^k - c \|,\label{eq:def_g_dual} \\ h^k(\alpha) &= (1-\alpha(1 - \beta_2)) (x^{k})^T z^k -(x^k + \alpha\Delta x^k)^T(z^k + \alpha\Delta z^k). \label{eq:def_h} \end{align} Furthermore, let us denote $\dot{\alpha}^k$ the largest value in $[0,1]$ such that for all $i=1,\ldots,n$: \begin{align} &f_i^k(\alpha) \geq 0,\, h^k(\alpha) \geq 0, \label{eq:ineq1} \\ &g^k_p(\alpha) \geq 0\text{ or } \|A(x^k + \alpha\Delta x^k) - b \| \leq \epsilon_p \label{eq:ineq2}\\ &g^k_d(\alpha) \geq 0\text{ or } \|A^T(y^k + \alpha\Delta y^k)+ z^k + \alpha\Delta z^k - c \| \leq \epsilon_d, \label{eq:ineq3} \end{align} for all $\alpha \in[0,\dot{\alpha}^k]$. \begin{algorithm} \caption{Regularized inexact interior-point method} \label{alg:KMM} \begin{algorithmic}[1] \STATE{Choose $\omega\gg 1$ large, $\epsilon,\epsilon_p,\epsilon_d, \gamma_p,\gamma_d>0\,,\gamma \in (0,1)$ and $0<\beta_1<\beta_2<\beta_3<1$.} \STATE{Choose regularization parameter $\delta$ and $\rho$ sufficiently small.} \STATE{Choose parameters $\tau_1,\tau_2 \in (0,1]$ such that $\beta_1 + \tau_1 - 1 > 0$ and $\beta_1 + \tau_2 - 1 > 0$.} \STATE{Compute initial point $(x^0,y^0,z^0)\in\mathcal{N}$, with $\mathcal{N}$ defined by \cref{eq:N}.} \FOR{$k = 0,1,2,\ldots$} \STATE{If $(x^k,y^k,z^k)$ is an $(\epsilon,\epsilon_p,\epsilon_d)$-solution or $||(x^k,z^k)||_1 > \omega$: \textbf{STOP}.} \STATE{Compute an \textit{inexact} search direction $(\Delta x^k,\Delta y^k,\Delta z^k)$ that satisfies: \begin{equation} \label{eq:update_eq} \begin{pmatrix} A & \delta I & 0 \\ -\rho I & A^T & I \\ Z^k & 0 & X^k \end{pmatrix} \begin{pmatrix} \Delta x^k \\ \Delta y^k \\ \Delta z^k \end{pmatrix} = \begin{pmatrix} b - A x^k \\ c - A^T y^k - z^k \\ \beta_1\mu_k e - X^k z^k \end{pmatrix} + \begin{pmatrix}r^k \\ s^k\\ 0\end{pmatrix} \end{equation} with $\mu_k = \frac{\left(x^k\right)^T \left(z^k\right)}{n}$ and where the \textit{residual components} satisfy \begin{align} &||r^k||\leq (1-\tau_1) ||Ax^k - b||, \label{eq:acc_r}\\ &||s^k|| \leq (1 - \tau_2)||A^T y^k + z^k - c||. \label{eq:acc_s} \end{align}} \STATE{Compute step-length $\alpha^{*,k} = \min\{\alpha^{*,k}_p,\alpha^{*,k}_d\}$ with \begin{align*} \alpha^{*,k}_p &= \max\{\alpha\in\mathbb{R}: x^k + \alpha \Delta x^k \geq 0\}, \\ \alpha^{*,k}_d &= \max\{\alpha\in\mathbb{R}: z^k + \alpha \Delta z^k \geq 0\}. \end{align*} \label{alpha_star}} \STATE{If $(x^k,y^k,z^k) + \alpha^{*,k}(\Delta x^k,\Delta y^k,\Delta z^k)$ is an $(\epsilon,\epsilon_p,\epsilon_d)$-solution: \textbf{STOP}. \label{terminated}} \STATE{Let $\bar{\alpha}^k$ be the largest value in $[0,1]$ such that for all $\alpha\in[0,\bar{\alpha}^k]:$ \begin{align*} &(x^k,y^k,z^k) + \alpha (\Delta x^k,\Delta y^k,\Delta z^k) \in\mathcal{N} \\ &(x^{k} + \alpha \Delta x^k)^T (z^{k} + \alpha \Delta z^k) \leq ( 1-\alpha(1-\beta_2)) \left(x^k\right)^T z^k \end{align*} \label{alpha_bar}} \STATE{Choose $\alpha_p^k,\alpha_d^k \in[0,1]$ such that $(x^{k+1},y^{k+1},z^{k+1}) \in \mathcal{N}$ and \begin{equation}\label{eq:descent} \left(x^{k+1}\right)^T z^{k+1} \leq (1 - \bar{\alpha}^k(1-\beta_3))\left(x^k\right)^T z^k \end{equation} with $(x^{k+1},y^{k+1},z^{k+1}) = (x^{k} + \alpha_p^k\Delta x^{k},y^{k} + \alpha_d^k\Delta y^k ,z^{k} + \alpha_d^k \Delta z^k)$. \label{enough}} \ENDFOR \end{algorithmic} \end{algorithm} Using the definitions above, we can show the following lemma: \begin{lemma}[Upper bound on $\bar{\alpha}^k$ \cite{korzak2000convergence}] \label{thm:upper_bound_alpha} If \cref{alg:KMM} does not stop on line \ref{terminated} of iteration $k$, then $\dot{\alpha}^{k}=\bar{\alpha}^k < \alpha^{*,k}$. \end{lemma} \begin{proof} The proof of this lemma is given in \cite{korzak2000convergence}. For the sake of completeness, we show the proof again using the notation of the current manuscript. Suppose that $\dot{\alpha}^k \geq \alpha^{*,k}$. Then by definition of $\dot{\alpha}^k$ we have that $\alpha^{*,k}$ satisfies the conditions \cref{eq:ineq1,eq:ineq2,eq:ineq3}. However, by definition of $\alpha^{*,k}$ (see line \ref{alpha_star} in \cref{alg:KMM}) we know that there exists at least one index $i$ such that $(x_i^k + \alpha^{*,k}\Delta x_i^k)(z_i^k + \alpha^{*,k}\Delta z_i^k) = 0$. Combining this result with the fact that $f_i^k(\alpha^{*,k})\geq0$, we immediately get that $(x^k + \alpha^{*,k}\Delta x^k)^T (z^k + \alpha^{*,k}\Delta z^k) = 0$ for this particular index $i$. Now suppose $g_p^k(\alpha^{*,k}) < 0$, then we have $\|A(x^k + \alpha^{*,k}\Delta x^k) - b \| \leq \epsilon_p$. Otherwise, if $g_p^k(\alpha^{*,k}) \geq 0$, we get $\|A(x^k + \alpha^{*,k}\Delta x^k) - b \| = 0 $ by definition of $g_p^k(\alpha)$ and the fact that $(x^k + \alpha^{*,k}\Delta x^k)^T (z^k + \alpha^{*,k}\Delta z^k) = 0$. Similarly, we can show the inequality $\|A^T(y^k + \alpha^{*,k}\Delta y^k)+ z^k + \alpha^{*,k}\Delta z^k - c \| \leq \epsilon_d$. This of course means that $(x^k,y^k,z^k) + \alpha^{*,k}(\Delta x^k,\Delta y^k,\Delta z^k)$ is an $(\epsilon,\epsilon_p,\epsilon_d)$-solution as defined by \cref{eq:def_conv}, which implies that the algorithm should have terminated on line \ref{terminated}. This contradicts the assumption made in the lemma and thus we have shown that $\dot{\alpha}^k < \alpha^{*,k}$. The proof now concludes by observing that the equality $\bar{\alpha}^k = \dot{\alpha}^k$ holds since $\dot{\alpha}^k$ satisfies the constraints $x^k + \dot{\alpha}^k\Delta x^k > 0$ and $z^k + \dot{\alpha}^k\Delta z^k > 0$ since we have a strict inequality $\dot{\alpha}^k < \alpha^{*,k}$. \end{proof} \begin{remark}\label{thm:compute_bar} From this lemma it follows that we do not always need to compute $\bar{\alpha}^{k}$, since this result implies \begin{equation}\label{eq:ineq_use} (1 - \alpha^{*,k}(1-\beta_3))(x^k)^Tz^k \leq (1 - \bar{\alpha}^k(1-\beta_3))(x^k)^Tz^k. \end{equation} Hence, we can compute trial step-lengths $\alpha_p^k = \beta_4 \alpha_p^{*,k}$ and $\alpha_d^k = \beta_4\alpha_d^{*,k}$ with parameter $\beta_4 \in (0,1)$ some constant close to one, e.g. $\beta_4 = 0.99995$, to make sure the iterates remain strictly positive. Then we check whether the new iterates $(x^{k+1},y^{k+1},z^{k+1})$ computed with these step-lengths remain in the neighborhood $\mathcal{N}$ and satisfy the descent condition $(x^{k + 1})^T z^{k + 1} \leq (1 - \alpha^{*,k}(1-\beta_3))(x^k)^Tz^k$, since this implies that \cref{eq:descent} is satisfied since we have the inequality \cref{eq:ineq_use}. \end{remark} \begin{theorem} \Cref{alg:KMM} terminates after a finite number of iterations for regularization parameters $\rho>0$ and $\delta>0$ sufficiently small. \end{theorem} \begin{proof} We follow the same structure as the proof in \cite{korzak2000convergence}, although the details are different since the regularization parameters $\rho$ and $\delta$ have to be carefully taken into account. We prove this claim by contradiction, so suppose that the algorithm does not terminate. Then it holds for all $k\geq 0$ that \begin{equation}\label{eq:lower_bound_mu} (x^k)^T z^k \geq \epsilon^* := \min\{\epsilon,\epsilon_p\gamma_p,\epsilon_d\gamma_d\}, \end{equation} and that $||(x^k,z^k)||_1 \leq \omega$. This implies that the iterates $x^k$ and $z^k$ remain bounded and that there exists some constant $c_1$ such that $(x^k)^Tz^k \leq c_1$. This observation together with the fact that the iterates all satisfy $(x^k,y^k,z^k)\in\mathcal{N}$ allows us to show that the right-hand side in \cref{eq:update_eq} is bounded. For the first component we have: \begin{align*} \|b - Ax^k + r^k\| \leq \|b - Ax^k\| + \|r^k\| &\leq (2-\tau_1)\|Ax^k - b\| \\ &\leq (2-\tau_1) \max\left\{\frac{(x^k)^T z^k}{\gamma_p}, \epsilon_p \right\} \\ &\leq (2-\tau_1)\max\{c_1/\gamma_p,\epsilon_p\}. \end{align*} Similarly, we have for the second component in \cref{eq:update_eq} that \begin{equation*} \| c - A^T y^k - z^k \| \leq (2-\tau_2) \max\{c_1/\gamma_d,\epsilon_d\}. \end{equation*} For the third component in \cref{eq:update_eq} we have: \begin{equation*} \| \beta_1 (x^k)^T z^k e / n - X^k z^k \| \leq \beta_1 (x^k)^T z^k \|e\|/n + \|X^k z^k \|\leq \beta_1 c_1 /\sqrt{n} + \|x^k\| \,\|z^k\|, \end{equation*} which is also bounded since the iterates $x^k$ and $z^k$ are bounded. From Theorem 1 in \cite{armand2013uniform} and the fact that $(x^k)^T z^k \geq \epsilon^*$ we get that the matrices in \cref{eq:update_eq} have a bounded inverse, where the bound is independent of $k$. Hence, together with the boundedness of the right-hand side in \cref{eq:update_eq} we can conclude the $(\Delta x^k,\Delta y^k,\Delta z^k)$ is bounded. This implies that we can always find some constant $c_2 > 0$ such that \begin{equation}\label{eq:bounded} | \Delta x_i^k \Delta z_i^k - \gamma(\Delta x^k)^T\Delta z ^k/n| \leq c_2 \text{ and } |(\Delta x^k)^T \Delta z^k| \leq c_2. \end{equation} We will now show that we can use this result to prove that $\bar{\alpha}^k$ as defined on line \ref{alpha_bar} in \cref{alg:KMM} is bounded below by some value $\alpha^*>0$. For all $k\geq 0$ and $i = 1,\ldots,n$ we have from the third component in \cref{eq:update_eq} that \begin{equation}\label{eq:third_component} z_i^k \Delta x_i ^k + x_i^k \Delta z_i ^k = \beta_1 (x^k)^T z^k /n - x_i^k z_i^k. \end{equation} From this it follows that \begin{equation} \label{eq:xz_i} (x_i^k + \alpha \Delta x^k_i)(z_i^k + \alpha \Delta z^k_i) = (1- \alpha) x_i^k z_i^k + \alpha \beta_1 (x^k)^T z^k /n + \alpha^2 \Delta x_i^k \Delta z_i^k. \end{equation} By taking the sum for $i=1,\ldots,n$ of both sides of equality \cref{eq:third_component} we get \begin{equation*} (z^k)^T \Delta x^k + (x^k)^T \Delta z^k = (\beta_1 - 1) (x^k)^T z^k \end{equation*} from which we get \begin{equation}\label{eq:xtz} (x^k + \alpha\Delta x^k)^T(z^k + \alpha\Delta z^k) = (1 +\alpha(\beta_1 - 1)) (x^k)^T z^k + \alpha^2 (\Delta x^k)^T \Delta z^k. \end{equation} Substituting \cref{eq:xz_i,eq:xtz} in the definition of $f_i^k(\alpha)$ in \cref{eq:def_f} we get: \begin{align*} f_i^k(\alpha) &= (x_i^k + \alpha \Delta x^k_i)(z_i^k + \alpha \Delta z^k_i) - \gamma (x^k + \alpha\Delta x^k)^T(z^k + \alpha\Delta z^k)/n, \\ &= (1- \alpha) x_i^k z_i^k + \alpha \beta_1 (x^k)^T z^k /n +\alpha^2 \Delta x_i^k \Delta z_i^k \\ &\hspace{4cm} -\gamma(1 +\alpha(\beta_1 - 1)) (x^k)^T z^k/n - \gamma \alpha^2 (\Delta x^k)^T \Delta z^k / n \\ &= \underbrace{(1 -\alpha)\left(x_i^k z_i^k - \gamma (x^k)^T z^k/n \right)}_{\geq0} + \alpha^2 \underbrace{\left(\Delta x_i^k \Delta z_i^k - \gamma(\Delta x^k)^T\Delta z ^k/n\right)}_{\geq - c_2} \\ &\hspace{6.5cm}+ \alpha\beta_1(1-\gamma)\underbrace{(x^k)^T z^k}_{\geq \epsilon^*}/n \\ &\geq -c_2 \alpha^2 + \alpha\beta_1(1-\gamma)\epsilon^*/n = \alpha( \beta_1(1-\gamma)\epsilon^*/n - c_2 \alpha). \end{align*} Hence it follows that for all $\alpha\leq\frac{\beta_1(1-\gamma)\epsilon^*}{c_2 n}$ we have $f_{i}(\alpha)\geq0$. Let us now consider the function $g_p^k(\alpha)$ defined in \cref{eq:def_g_primal}. From the first component in \cref{eq:update_eq} we have \begin{align*} A(x^k + \alpha \Delta x^k) - b &= Ax^k - b + \alpha A\Delta x^k \\ &= Ax^k - b + \alpha(b - Ax^k + r^k - \delta \Delta y^k) \\ &= (1-\alpha)(Ax^k - b) + \alpha r^k - \alpha\delta\Delta y^k. \end{align*} By taking norms and using \cref{eq:acc_r} we get \begin{equation} \label{eq:bound_primal} \|A(x^k + \alpha \Delta x^k) - b\| \leq ( 1 -\alpha \tau_1) \|Ax^k - b\| + \alpha \delta \| \Delta y^k\|. \end{equation} Suppose first that $g_p^k(0) <0$, then we have that $\| Ax^k - b\| \leq \epsilon_p$ since $(x^k,y^k,z^k)\in\mathcal{N}$. Now because of the boundedness of $\Delta y^k$ there exists a constant $c_3>0$ such that $\|\Delta y^k\| \leq c_3$ for all $k$. Now if we choose $\delta>0$ small enough such that $\delta \leq \tau_1 \epsilon_p/c_3$ then we have \begin{align*} \|A(x^k + \alpha \Delta x^k) - b\| & \leq (1-\alpha\tau_1)\epsilon_p + \alpha\tau_1\epsilon_p\frac{\|\Delta y^k\|}{c_3} \leq(1-\alpha\tau_1)\epsilon_p + \alpha\tau_1\epsilon_p = \epsilon_p. \end{align*} Now let us consider the case $g_p^k(0)\geq0$. Using \cref{eq:bounded,eq:xtz,eq:bound_primal} and the definition \cref{eq:def_g_primal} we obtain \begin{align*} g_p^k(\alpha) &= (x^k + \alpha\Delta x^k)^T(z^k + \alpha\Delta z^k) - \gamma_p \|A(x^k + \alpha\Delta x^k) - b \| \\ &= (1 +\alpha(\beta_1 - 1)) (x^k)^T z^k + \alpha^2 (\Delta x^k)^T \Delta z^k - \gamma_p \|A(x^k + \alpha\Delta x^k) - b\| \\ &= \alpha(\beta_1 + \tau_1 - 1) \underbrace{(x^k)^T z^k}_{\geq \epsilon^*} + (1 - \alpha \tau_1) (x^k)^T z^k + \alpha^2 \underbrace{(\Delta x^k)^T \Delta z^k}_{\geq -c_2} \\ &\hspace{8cm} - \gamma_p \|A(x^k + \alpha\Delta x^k) - b\| \\ &\geq \alpha(\beta_1 + \tau_1 - 1)\epsilon^* + (1 - \alpha \tau_1) (x^k)^T z^k - c_2\alpha^2 \\ &\hspace{5cm} - \gamma_p( 1 -\alpha \tau_1) \|Ax^k - b\| - \gamma_p \alpha \delta \| \Delta y^k\| \\ & = \alpha(\beta_1 + \tau_1 - 1)\epsilon^* + (1 - \alpha \tau_1) \underbrace{\left( (x^k)^T z^k - \gamma_p\|Ax^k - b\| \right)}_{\geq0} - c_2\alpha^2 -\gamma_p \alpha \delta \| \Delta y^k\| \\ &\geq \alpha\left( (\beta_1 + \tau_1 - 1)\epsilon^* - \gamma_p \delta \| \Delta y^k\| - c_2 \alpha \right). \end{align*} Now choose $\delta$ small enough such that $\delta \leq \frac{(\beta_1 + \tau_1 - 1)\epsilon^*}{2\gamma_p c_3}$, then we have \begin{align*} (\beta_1 + \tau_1 - 1)\epsilon^* - \gamma_p \delta \| \Delta y^k\| &\geq (\beta_1 + \tau_1 - 1)\epsilon^* - \frac{(\beta_1 + \tau_1 - 1)\epsilon^*\|\Delta y^k \|}{2c_3} \\ &\geq (\beta_1 + \tau_1 - 1)\epsilon^*/2. \end{align*} Now it easily follows that $g_p^k(\alpha)\geq 0$ for all $\alpha \leq \frac{(\beta_1 + \tau_1 - 1)\epsilon^*}{2c_2}$. We can treat $g_d^k(\alpha)$ in a similar fashion. We leave out the details, but for completeness we summarize the main important results. First of all, by the second component of \cref{eq:update_eq} we have \begin{equation*} A^T(y^k + \alpha\Delta y^k) + z^k + \alpha \Delta z^k - c = (1-\alpha)(A^Ty^k + z^k -c) + \alpha s^k + \alpha\rho \Delta x^k \end{equation*} from which we get by using \cref{eq:acc_s} \begin{equation}\label{eq:bound_dual} \|A^T(y^k + \alpha\Delta y^k) + z^k + \alpha \Delta z^k - c \| \leq (1-\alpha\tau_2) \| A^Ty^k + z^k -c\| + \alpha\rho \|\Delta x^k\|. \end{equation} Since $\Delta x^k$ is bounded we have some constant $c_4>0$ such that $\|\Delta x^k\| \leq c_4$ for all $k$. Suppose first that $g_d^k(0)<0$, which implies $\| A^Ty^k + z^k -c\|\leq\epsilon_d$, and take $\rho \leq \tau_2 \epsilon_d/c_4$, then we have \begin{equation*} \|A^T(y^k + \alpha\Delta y^k) + z^k + \alpha \Delta z^k - c \| \leq (1-\alpha\tau_2)\epsilon_d + \alpha\rho \|\Delta x_k\| \leq \epsilon_p. \end{equation*} For the case $g_d^k(0)\geq0$ we have, following the same steps as for $g_p^k(\alpha)$, using \cref{eq:bounded,eq:xtz,eq:bound_dual} and the definition \cref{eq:def_g_dual}: \begin{equation*} g_d^k(\alpha)\geq \alpha\left( (\beta_1 + \tau_2 - 1)\epsilon^* - \gamma_d \rho \| \Delta x^k\| - c_2 \alpha \right). \end{equation*} If we now have chosen $\rho$ small enough such that $\rho \leq \frac{(\beta_1 + \tau_2 - 1)\epsilon^*}{2\gamma_d c_4}$ then $g_d^k(\alpha)\geq0$ holds for all $\alpha \leq \frac{(\beta_1 + \tau_2 - 1)\epsilon^*}{2c_2}$. Now the only thing we still need to consider is the inequality $h^k(\alpha)\geq0$ defined by \cref{eq:def_h}. Again using \cref{eq:xtz} we have \begin{align*} h^k(\alpha) &= (1-\alpha(1 - \beta_2)) \left(x^{k}\right)^T z^k -(x^k + \alpha\Delta x^k)^T(z^k + \alpha\Delta z^k) \\ &= (1-\alpha(1 - \beta_2)) \left(x^{k}\right)^T z^k - (1 +\alpha(\beta_1 - 1)) (x^k)^T z^k - \alpha^2 (\Delta x^k)^T \Delta z^k \\ &= \alpha(\beta_2 - \beta_1) (x^k)^T z^k - \alpha^2 (\Delta x^k)^T \Delta z^k \\ &\geq \alpha((\beta_2 - \beta_1)\epsilon^* - \alpha c_2). \end{align*} Hence, it follows that $h^k(\alpha)\geq 0$ for all $\alpha\leq(\beta_2 - \beta_1)\epsilon^*/c_2.$ Now using the results above and \cref{thm:upper_bound_alpha}, we get \begin{equation*} \bar{\alpha}^k \geq \alpha^* := \min\left\{1, \frac{\beta_1(1-\gamma)\epsilon^*}{c_2 n}, \frac{(\beta_1 + \tau_1 - 1)\epsilon^*}{2c_2},\frac{(\beta_1 + \tau_2 - 1)\epsilon^*}{2c_2}, \frac{(\beta_2 - \beta_1)\epsilon^*}{c_2}\right\} \end{equation*} if $\rho$ and $\delta$ are chosen sufficiently small, more specifically if they satisfy \begin{equation*} \delta \leq \min \left\{ \frac{\tau_1\epsilon_p}{c_3}, \frac{(\beta_1 + \tau_1 - 1)\epsilon^*}{2\gamma_p c_3}\right\} \text{ and } \rho \leq \min \left\{ \frac{\tau_2\epsilon_d}{c_4}, \frac{(\beta_1 + \tau_2 - 1)\epsilon^*}{2\gamma_d c_4}\right\}. \end{equation*} This then leads to $(x^k)^T z^k \leq (1-\alpha^*(1-\beta_3))^k (x^0)^T z^0$, which implies that we have $\lim_{k\rightarrow \infty} (x^k)^T z^k = 0$. This contradicts \cref{eq:lower_bound_mu}, which concludes the proof. \end{proof} \begin{remark}\label{thm:omega_stop} Let us conclude this section by briefly explaining the case when \cref{alg:KMM} terminates with some iterate satisfying $\|(x^k,z^k)\|_1>\omega$. The authors in \cite{kojima1993primal} show that in the exact and unregularized case (i.e. $\tau_1 = \tau_2 = 1$ and $\rho = \delta = 0$) this implies there exists no feasible point of the primal-dual pair of linear programming problems \cref{eq:primal,eq:dual} in a large subspace of $\mathbb{R}^{n+m+n}$. The larger we take $\omega$ the larger this subspace of infeasible points becomes. This means that the primal-dual pair \cref{eq:primal,eq:dual} is likely to be infeasible, although no guarantee is given. As was pointed out by the authors in \cite{korzak2000convergence}, it does not seem possible to prove this result in the inexact case. Hence, do not attempt to prove this result for the inexact and regularized case. However, as they point out, this might be of little practical importance since the stopping criterion $\|(x^k,z^k)\|>\omega$ will in most cases not be satisfied in a reasonable amount of time. This downside of not being able to definitely determine infeasibility is thus inherited by \cref{alg:KMM}. \end{remark} \section{Implementation details} \label{sec:implementation} In this section we explain some details of our implementation used in the numerical experiments of \cref{sec:num_ex}. We start with a general discussion on the reformulation of the linear system of equations, which is an important aspect for all primal-dual interior-point methods. Next, we explain how we can leverage single precision floating point arithmetic when solving linear programming problems with a dense constraint matrix. \subsection{Reformulation of the linear system of equations} \label{sec:reformulations} Matrices of the form shown in \cref{eq:newton_eq} or \cref{eq:newton_eq_reg} are always transformed in computational practice to either a symmetric indefinite matrix or a positive definite matrix. Let us briefly explain how this can be achieved by considering the linear system \cref{eq:newton_eq_reg} for a general right-hand side $(\xi,\zeta,\eta) \in\mathbb{R}^{m + n + n}$. Writing out the equations we get: \begin{align} &A\Delta x + \delta \Delta y = \xi, \label{eq:eq1} \\ &A^T \Delta y + \Delta z - \rho\Delta x = \zeta, \label{eq:eq2}\\ &Z\Delta x + X \Delta z = \eta. \label{eq:eq3} \end{align} Isolating $\Delta z$ from \cref{eq:eq3} and combining this with \cref{eq:eq2} we get \begin{align} &A^T \Delta y + X^{-1}(\eta - Z\Delta x) - \rho \Delta x = \zeta \nonumber \\ \Leftrightarrow &A^T \Delta y - \left(X^{-1}Z + \rho I\right)\Delta x = \zeta - X^{-1} \eta. \label{eq:dx} \end{align} This leads to the so-called augmented system approach with a symmetric indefinite matrix \begin{equation}\label{eq:augmented} \begin{pmatrix} - D & A^T \\ A & \delta I \end{pmatrix}\begin{pmatrix}\Delta x\\ \Delta y\end{pmatrix} = \begin{pmatrix}\zeta - X^{-1} \eta \\ \xi \end{pmatrix}, \end{equation} where we introduce the notation $D = (X^{-1}Z + \rho I)$. After (approximately) solving for $\Delta x$ and $\Delta y$ we can recover $\Delta z$ from \cref{eq:eq3}. Even if \cref{eq:eq1,eq:eq2} are not satisfied exactly, for instance if we use a Krylov subspace method to obtain an approximate solution to \cref{eq:augmented}, we can always make sure that \cref{eq:eq3} does hold exactly (assuming exact arithmetic). Hence, we can assume a zero block in the final component of the residual in \cref{eq:update_eq}. \begin{remark} The matrix in \cref{eq:augmented} is symmetric quasidefinite for $\rho$ and $\delta$ strictly positive \cite{vanderbei1995symmetric}. This implies that it allows a Cholesky-type factorization of the form $L\Delta L^T$ where $L$ is a lower triangular matrix and $\Delta$ is diagonal matrix containing both positive and negative entries, i.e. there is no need for $2\times 2$ pivoting. This also means that we can construct a permutation matrix solely to preserve as much sparsity as possible in the Cholesky-like factors. This permutation matrix need not change over the different interior-point iterations since the sparsity pattern of the augmented matrix stays the same. The sparse Cholesky-factorization is generally more efficient than indefinite solvers. These observations have for instance been exploited in \cite{vanderbei1995symmetric,saunders1996solving}. For more information on the stability of the Cholesky factorization applied to symmetric quasidefinite matrices we refer to \cite{gill1996stability}. \end{remark} We can reduce the equations in \cref{eq:augmented} a bit further by elimination of $\Delta x$. From \cref{eq:dx} we get \begin{equation} \label{eq:dx_anders} \Delta x = D^{-1} \left(A^T \Delta y - \zeta + X^{-1} \eta \right). \end{equation} Multiplying both sides of this equation with $A$ and using the fact that $A\Delta x = \xi - \delta \Delta y$ from \cref{eq:eq1}, we get after reordering the terms: \begin{equation}\label{eq:normal_eq} \left(A D^{-1} A^T + \delta I\right) \Delta y = \xi + AD^{-1}(\zeta - X^{-1}\eta). \end{equation} This is known as the normal equations approach. The matrix here is positive definite for $\delta>0$ (even when $A$ is rank deficient), so we can for instance use a Cholesky factorization to solve this linear system of equations. After (approximately) solving for $\Delta y$ we can compute $\Delta x$ using \cref{eq:dx_anders} and $\Delta z$ again using \cref{eq:eq3}. Then we can show (again assuming exact arithmetic) that \cref{eq:eq2} also holds exactly, even if we do not solve \cref{eq:normal_eq} very accurately. Indeed, from \cref{eq:dx_anders} we get \begin{equation*} (X^{-1}Z+ \rho I) \Delta x = A^T \Delta y - \zeta + X^{-1} \eta \Leftrightarrow A^T \Delta y + \underbrace{X^{-1}(\eta - Z \Delta x)}_{= \Delta z \text{ using } \cref{eq:eq3}} - \rho \Delta x = \zeta. \end{equation*} Alternatively we can also compute $\Delta z$ directly from \cref{eq:eq2} and show that \cref{eq:eq3} also holds exactly in this case, even if \cref{eq:normal_eq} is not solved very accurately. Indeed, again starting from \cref{eq:dx_anders} we have \begin{equation} X^{-1}Z \Delta x + \underbrace{\rho \Delta x - A^T \Delta y + \zeta}_{= \Delta z \text{ using } \cref{eq:eq2}} = X^{-1}\eta. \end{equation} Now by multiplying both sides of this equation with $X$ we see that \cref{eq:eq3} holds exactly. This means that we can assume that in both these cases we have $\tau_2 = 1$ in \cref{eq:update_eq} if we use the normal equations approach. From computational experience we believe that this second option, i.e. computing $\Delta z$ using $\cref{eq:eq2}$, is slightly more numerically stable. Lastly we mention that we can monitor the norm of the residual term $r^k$ in \cref{eq:update_eq}, i.e. the inexactness in \cref{eq:eq1}, by computing the residual of the linear system \cref{eq:normal_eq}. This is because -- again using \cref{eq:dx_anders} -- we have \begin{align} \| A\Delta x + \delta \Delta y - \xi \| &= \| A D^{-1} \left(A^T \Delta y - \zeta + X^{-1} \eta \right) + \delta \Delta y - \xi \| \nonumber \\ &= \| \left(A D^{-1} A^T + \delta I\right) \Delta y - \xi - AD^{-1}(\zeta - X^{-1}\eta) \|. \label{eq:stopping} \end{align} This is for instance useful when we use a Krylov subspace method, such as the Conjugate Gradient method, to solve the linear system \cref{eq:normal_eq} up to a certain accuracy. We can simply state the stopping criterion of this iterative method to make sure that \cref{eq:acc_r} is satisfied. This is in fact the approach that we consider in our experiments. \subsection{Mixed-precision linear equation solver} As previously mentioned we can simply use any specialized linear algebra routine developed in other work on inexact interior-point methods and incorporate these techniques in \cref{alg:KMM}. The focus in literature has mostly been on linear programming problems with a sparse constraint matrix, where the linear systems are solved using some Krylov subspace method with a specialized preconditioner \cite{gondzio2013convergence,cui2019implementation,monteiro2003convergence,al2009convergence}. However, for the remainder of this work we consider linear programming problems with a dense constraint matrix. These types of problems arise for instance in Basis Persuit denoising \cite{chen2001atomic}. We aim to exploit the increased efficiency of IEEE single precision compared to IEEE double precision \cite{4610935,30711}. The double precision floating-point format is comprised of 64 bits, while single precision only uses 32 bits. In general we can execute single precision floating point operations (\textsc{flops}) approximately twice as fast as double precision \textsc{flops}. However, operations using single precision are also less accurate since the unit roundoff of single precision is $u_s = 2^{-24} \approx 6.0 \times 10^{-8}$, while double precision has a unit roundoff $u_d = 2^{-53} \approx 1.1 \times 10^{-16}$. IEEE half precision, which is a floating-point format using only 16 bits with unit roundoff $u_h = 2^{-11} \approx 4.9 \times 10^{-4}$, has also recently gained a lot of popularity due to a growing availability in hardware support (for instance on the NVIDIA P100 and V100 GPUs and the AMD Radeon Instinct MI25 GPU \cite{higham2019squeezing}). The unit roundoff of the precision is determined by the number of bits in the `significand', while the number of `exponent' bits determines the range of numeric values, see \cref{fig:bits} for an illustration. Switching to a lower precision thus not only decreases the accuracy, but also the range of values. This becomes especially important when considering half precision which has a limited range of $\pm 65,500$. \begin{figure} \caption{Illustration of the IEEE half, single and double precision floating point formats. \label{fig:bits} \label{fig:bits} \end{figure} Operations in half precision can generally be executed twice as fast as single precision operations, and even up to 8 times faster on specialized tensor cores (for instance on the NVIDIA V100). However, in this paper we focus our attention on exploiting single precision arithmetic, since for the moment we have no access to hardware that supports half-precision arithmetic. In contrast, single precision arithmetic is available on almost all off-the-shelf hardware. Half precision floating point operations can of course easily be simulated in software, but this will evidently not lead to any performance gains compared to single precision. Hence, we leave a detailed discussion of half-precision arithmetic as possible future work. Let us now turn our attention to how we can exploit single precision when solving a linear system of equations $M v = w$ for some general positive definite matrix $M\in\mathbb{R}^{m\times m}$. After performing a Cholesky factorization of $M$ in exact arithmetic we obtain an exact equality $M =L L^T$ with $L$ a lower triangular matrix. Solving a linear system $M v = w$ can then be done very efficiently by first solving the lower-triangular system $Ly = w$ for $y$ and subsequently solving the upper-triangular system $L^T v = y$ for $v$. Computing the Cholesky factorization requires approximately $m^3/3$ floating point operations, while solving a triangular linear system using forward or backward substitution requires $m^2$ \textsc{flops}. All these operations are of course subject to rounding errors in finite precision arithmetic. An important consequence is that the Choleksy factorization might break down when the matrix $M$ is not sufficiently positive definite with respect to the floating point precision used for the factorization. However, even in such a case when the matrix is numerically indefinite or singular, we can still obtain useful information from the Choleksy factorization. In \cite{higham2021exploiting,higham2019squeezing} the authors consider a diagonal perturbation of the matrix $M$ and perform a Cholesky factorization of the shifted matrix $M + c_m u\diag(M)$ instead. Here, $c_m$ is some constant depending on the dimension of the problem and $u$ is the unit roundoff of the precision used for the factorization. By a result from Demmel \cite{demmel1989floating} it follows that the Cholesky factorization of this perturbed matrix will always succeed for $c_m$ sufficiently large. For an $LU$ factorization in finite precision arithmetic it has been reported in literature that the computed $L$ and $U$ factors still contain useful information in the case when the condition number $\kappa(M)$ is (much) larger than $1/u$ and thus numerically singular. More specifically it has been observed that the approximation $\kappa(L^{-1}MU^{-1}) \approx 1 + \kappa(M)u$ often holds in practice \cite{carson2017new}. We illustrate by a small example that this approximation also holds when computing a Cholesky factorization $LL^T \approx M + c_m u\diag(M)$ for some precision $u$. We generate a sequence of matrices $M\in\mathbb{R}^{200\times 200}$ with smallest eigenvalue $\lambda_{\min}(M) = 1$ and largest eigenvalue $\lambda_{\max}(M)=10^k$ for $k$ ranging from $0.1$ to $16$. Then we perform a Choleksy factorization in single precision of the matrix $M + 10u_s\diag(M)$, where $u_s$ is the unit roundoff, and then show the (2-norm) condition number $\kappa(M) = 10^k$ , $\kappa(L^{-1}ML^{-T})$ and the approximation $\kappa(L^{-1}ML^{-T}) \approx 1 + \kappa(M)u$ for the computed factor $L$. The result is given by \cref{fig:cond_est} (left). The same procedure is repeated for half precision. In the latter case we should also be careful with the possibility of overflow and underflow due to the limited range of half-precision floating point numbers. Hence, we first perform a diagonal scaling $H = D_M^{-1} M D_M^{-1}$ with $D_M = \sqrt{\diag(M)}$ as suggested in \cite{higham2021exploiting,higham2019squeezing}, which causes all diagonal entries to be one and all non-diagonal entries to be smaller than one (in absolute value). A Cholesky factorization is then computed in half precision of the perturbed matrix $H +10 u_h I$. The condition numbers $\kappa(M)$, $\kappa(L^{-1}HL^{-T})$ and approximation $\kappa(L^{-1}HL^{-T}) \approx 1 + \kappa(H) u_s$ are shown in \cref{fig:cond_est} (right). \begin{figure} \caption{Condition numbers $\kappa(M)$ and $\kappa(L^{-1} \label{fig:cond_est} \end{figure} We can observe that if $\kappa(A)\leq 1/u$ then we obtain a good Cholesky factor, in the sence that $\kappa(L^{-1}A L^{-T})$ is close to one. However, even when the matrix is numerically singular (with respect to the precision of the factorization), we still have a significant reduction, proportional to the unit roundoff $u$, in the condition number. This implies that, even when the Cholesky factor can not be used to compute a very accurate solution of the linear system of equations, we can still use it as a preconditioner in the Conjugate Gradient method. Since convergence of the Conjugate Gradient method is determined by the condition number of the preconditioned matrix $L^{-1}ML^{-T}$, we can expect very rapid convergence as long as $M$ is not too ill-conditioned. Hence, to summarize we can solve a linear system of equations $M v = w$ using a mixed-precision approach, where we first compute a Cholesky factor $L$ of the shifted matrix $M + c_m u_s \diag(M)$ in single precision and apply the Conjugate Gradient method implemented in double precision. The preconditioner $L^{-T}L^{-1}$ is applied in single precision using backward and forward substitution, with the final result converted back to double precision. Note that in this case the iterations of the Conjugate Gradient method can be interpreted as some kind of iterative refinement. A closely related but slightly different approach was explored in \cite{higham2021exploiting}. \begin{algorithm} \caption{Mixed-precision solver for \cref{eq:normal_eq}. } \label{alg:solver} \begin{algorithmic}[1] \STATE{\textbf{Input:}$A,x,z,\xi,\zeta,\eta,c_m,\rho,\delta,\text{tol}$} \STATE{Compute diagonal matrix $D = X^{-1}Z + \rho I$ in \texttt{float64}} \STATE{Compute right-hand side $w = \xi + AD^{-1}\left(\zeta - X^{-1}\eta\right)$ in \texttt{float64}} \STATE{Compute diagonal matrix $\tilde{D} = \sqrt{D^{-1}}$ in \texttt{float64}} \STATE{Compute $\tilde{M}= A\tilde{D}$ in \texttt{float32}. \label{root}} \STATE{Compute $M = \tilde{M}\tilde{M}^T + \delta I$ in \texttt{float32}.} \STATE{Apply Cholesky to $M + c_m\diag(M)$ in \texttt{float32} and store factor $L$ in \texttt{float32}.} \STATE{Apply the (preconditioned) Conjugate Gradient method in \texttt{float64} to \begin{equation*} L^{-T}L^{-1} (AD^{-1}A + \delta I)\Delta y = L^{-T}L^{-1} w. \end{equation*} Matrix vector products with $(AD^{-1}A + \delta I)$ are computed in \texttt{float64} without explicitly forming the matrix and the preconditioner $L^{-T}L^{-1}$ is applied using backward and forward substitution in \texttt{float32} with the result converted to \texttt{float64}. Conjugate Gradients is terminated when $\|(AD^{-1}A + \delta I)\Delta y - w \| \leq \text{tol}$.} \STATE{\textbf{Output:} Approximate solution $\Delta y$ to the linear system \cref{eq:normal_eq}.} \end{algorithmic} \end{algorithm} This approach can obviously be applied to the linear system of equations \cref{eq:normal_eq}, since the matrix is positive definite for $\delta>0$, even when $A$ is rank deficient. However, an important point that should not be overlooked is the fact that a dense matrix-matrix multiplication is also quite expensive. Explicitly forming the normal equations $AD^{-1}A^T$ is in fact more expensive than the Cholesky factorization in terms of number of operations, since the former operation requires approximately $m^2n$ \textsc{flops} (assuming we exploit the symmetry), while the latter requires about $m^3/3$ \textsc{flops}. Hence, forming the normal equations should also be done in single precision. We summarize our approach to solve \cref{eq:normal_eq} in \cref{alg:solver}, where we denote double precision and single precision with \texttt{float64} and \texttt{float32} respectively. We can apply the mixed-precision solver \cref{alg:solver} in the context of the regularized inexact interior-point method \cref{alg:KMM}. To obtain an \textit{inexact} search direction $(\Delta x_k,\Delta y_k,\Delta z_k)$ satisfying \cref{eq:update_eq} together with \cref{eq:acc_r,eq:acc_s} we simply apply \cref{alg:solver} with input parameters $x = x_k,\, z = z_k,\, \xi = b - Ax^k,\, \zeta = c - A^T y^k - z^k,\, \eta = \beta_1 \mu_k e - X^k z^k$ and $\text{tol}=(1-\tau_1) \|Ax^k - b\|$. We then compute $\Delta x^k$ using \cref{eq:dx_anders} and $\Delta z^k$ using \cref{eq:eq2}. Assuming that the only source of inexactness is in solving the linear system \cref{eq:normal_eq} we have that \cref{eq:acc_r} is satisfied (since we have \cref{eq:stopping}) and \cref{eq:acc_s} is satisfied with $\tau_2 = 1$. \section{Numerical experiments}\label{sec:num_ex} All experiments are performed on a laptop with Intel(R) Core(TM) i7-7700HQ CPU @ 2.80GHz. We have implemented \cref{alg:KMM} with the details as explained in \cref{sec:implementation} in Python 3.8.3 using the Spyder IDE version 4.2.0. The implementation is based on NumPy\footnote{\url{https://numpy.org/}} which allows us to easily convert variables from double precision to single precision using $\texttt{numpy.single}(\cdot)$ and conversely from single to double using $\texttt{numpy.double}(\cdot)$. The Cholesky factorization is implemented using SciPy\footnote{\url{https://www.scipy.org/}} as $\texttt{L}=\texttt{scipy.linalg.cholesky(M,lower = True)}$. The preconditioner application $L^{-T}L^{-1} w$ can then be efficiently implemented as \begin{equation*} \texttt{numpy.double(scipy.linalg.cho}\_\texttt{solve((L,True),numpy.single(w)))}. \end{equation*} Unfortunately there do not seem to be many suitable benchmark linear programming problems of the form \cref{eq:primal} with a dense constraint matrix $A$. Since we only want to perform experiments that are reproducible we use (sparse) test-problems from the NETLIB LP Test problem set\footnote{\url{https://www.numerical.rl.ac.uk/cute/netlib.html}}, but treat these matrices as if they were dense. We consider a selection of $14$ linear programming problems in standard form, where the constraint matrix $A$ has dimensions $m\times n$ with $m\geq 1000$ and $n \leq 10,000$. This selection is based on the fact that the benefit of single precision is expected to be much less apparent when the computational work is limited. We now list our choices of parameters in \cref{alg:KMM} which apply for all experiments, unless explicitly states otherwise. For the stopping criterion of the interior-point method we use $\omega = 10^{40}$ and tolerances $\epsilon = 10^{-8}n$ for the complementary condition and $\epsilon_p= 10^{-8}\|b\|$ and $\epsilon_d = 10^{-8}\|c\|$ for the primal and dual feasibility respectively. Next, we use centering parameter $\beta_1 = 0.1$ and parameters $\beta_2=0.9$ and $\beta_3=0.95$ for the descent condition. For the conditions in \cref{eq:acc_r,eq:acc_s} we set $\tau_1 = 0.95$ and $\tau_2=1$ respectively. Regularization parameters $\rho =\delta=10^{-10}$ were sufficiently small for all test-problems considered. The constant $c_m = 30$ is chosen for the mixed-precision solver \cref{alg:solver}. Lastly we use neighborhood parameters $\gamma = \gamma_p = \gamma_d = 10^{-8}$ in \cref{eq:N}. The initial point $(x_0,y_0,z_0)$ is computed as explained in section 14.2 of \cite{nocedal2006numerical} with a small modification to handle rank deficient matrices $A$. Before calculating $\bar{\alpha}^k$ on line \ref{alpha_bar} in \cref{alg:KMM} we first compute trial step-lengths $\alpha^k_p = 0.99995\alpha_p^{*,k}$ and $\alpha^k_d = 0.99995\alpha_d^{*,k}$ and check whether the new iterates $x^{k+1} = x^{k} + \alpha_p^k \Delta x^k$ and $(y^{k+1},z^{k+1}) = (y^{k},z^{k}) + \alpha_d^k (\Delta y^k,\Delta z^k)$ remain in the neighborhood $\mathcal{N}$ and satisfy the descent property \begin{equation*} \left(x^{k+1}\right)^T z^{k+1} \leq (1 - \alpha^{*,k}(1-\beta_3))\left(x^k\right)^Tz^k \end{equation*} since by \cref{thm:compute_bar} this implies that the conditions on line \ref{enough} hold. If not, we compute $\bar{\alpha}^k$ and choose $\alpha_p^k = \alpha_d^k = \bar{\alpha}^k$. \begin{figure} \caption{Comparison of convergence of the interior-point method for two different choices of regularization parameters applied to NETLIB test-problem \texttt{ship12s} \label{fig:compare_conv} \end{figure} \begin{figure} \caption{Left: number of Conjugate Gradients iterations needed in each interior-point iteration for two different choices of regularization parameters: $\rho = \delta = 10^{-6} \label{fig:cg_its} \end{figure} In our first experiment we illustrate the benefit of the regularization parameters $\rho$ and $\delta$. We apply \cref{alg:KMM} to NETLIB test-problem \texttt{ship12s}. The constraint matrix $A$ has dimensions $1151 \times 2869$ and contains $109$ rows with all zeroes. Hence, the matrix is significantly rank-deficient and we can not apply the algorithm with $\delta = 0$. Normally, we would simply remove these rows from the matrix, but for the purpose of this experiment we leave them in. We apply the algorithm once with $\rho =\delta = 10^{-6}$ and once with $\rho = 0$ and $\delta = 10^{-6}$ and let the algorithm run for a fixed number of iterations. The result is given by \cref{fig:compare_conv} where we show the convergence metrics $\|Ax^k - b\|/\|b\|$ for primal feasibility, $\|A^Ty + z - c\|/\|c\|$ for dual feasibility and $(x^k)^Tz^k /n$ for the complementarity condition. First of all we can clearly see that the algorithm converges nicely towards a primal-dual solution of the linear programming problems \cref{eq:primal,eq:dual} even though the matrix $A$ is rank deficient. Next, we can also clearly observe the benefit of the regularization term $\rho$. In the left figure, when using $\rho = 10^{-6}$ we have a very accurate solution in terms of all the convergence metrics shown, while the primal residual shows some very unstable behavior when $\rho = 0$. This is due to the fact that the matrix $X^{-1}Z$ has components converging either to $0$ or $\infty$ due to the complementarity condition, which causes numerical difficulties. In \cref{fig:cg_its} we report the condition number of the matrix and the number of Conjugate Gradient iterations needed to converge in each iteration of the interior-point method when using $\rho = \delta = 10^{-6}$ and $\rho = \delta = 10^{-8}$. In the first number of iterations of the interior-point method we only need a very few CG iterations to satisfy the stopping criterion in \cref{alg:solver}. This is because of two reasons: first, the matrix $AD^{-1} A^T + \delta I$ is still relatively well-conditioned in the early iterations, and secondly, because the primal residual is still relatively large we have that the stopping criterion for the Conjugate Gradient method $\|(AD^{-1}A + \delta I)\Delta y - w \| \leq (1-\tau_1) \|Ax^k - b\|$ is quite loose. After a while, when the condition number starts to grow and the tolerance becomes more strict, we need to perform a lot more CG iterations. The effect is a bit more pronounced for the case $\rho=\delta=10^{-8}$ since the matrix is a little bit more ill-conditioned. It is also interesting to see that when the interior-point method has converged to double precision accuracy (around iteration 35 according to \cref{fig:compare_conv}) that we need to perform only a very few CG iterations, because in that case the solution $\Delta y$ is very close to zero, which is the initial iterate that we choose in CG. When the number of CG iterations becomes relatively large, the benefit of using a single precision factorization gets nullified by the additional computational cost of the CG iterations. In this case it would be more beneficial to switch from a single precision Cholesky factorization to simply using double precision for all computations. We can choose this `switch-point' based on a simple heuristic. In the ideal case we can expect that a Cholesky factorization and matrix-matrix multiplication as shown in \cref{alg:solver} is twice as fast in single precision than the same operations in double precision. Hence, we can expect a benefit of using the mixed-precision solver as long as the time it takes to perform the CG iterations does not exceed the time for constructing and factorizing the matrix in single precision. With this in mind modify our algorithm as follows. We monitor the CPU time of the construction of the matrix and the Cholesky factorization, as well as the run-time of the Conjugate Gradient method. As soon as the run-time of the Conjugate Gradient method exceeds $0.75\times$ the average time for the construction and factorization of the matrix, we switch to a full double-precision implementation. The factor $0.75$ is chosen here, because there is some additional overhead in practice when using the mixed-precision solver \cref{alg:solver}. We test this simple heuristic with the next experiment. \begin{table} \begin{center} \begin{tabular}{l||rr|rr|rrrr} &&& \multicolumn{2}{c|}{Double precision} & \multicolumn{4}{c}{Mixed-precision} \\ \textbf{Problem} & $m$ & $n$ & $\#$iter & \text{time} & $\#$iter & \text{time} & $\overline{\text{CG}}_\text{its}$ & $k_\text{switch}$ \\ \hline \hline \texttt{bnl2} & 2,324 & 4,486 & 58 & 22.93s & 58 & 17.28s & 3.18 & 48 \\ \texttt{d2q06c} & 2,171 & 5,831 & 52 & 19.87s & 52 & 16.34s & 3.69 & 31 \\ \texttt{degen3} & 1,503 & 2,604 & 30 & 3.79s & 30 & 3.15s & 4.77 & 10 \\ \texttt{maros$\_$r7} & 3,136 & 9,408 & 25 & 29.21s & 25 & 19.08s & 2.12 & $\infty$ \\ \texttt{qap12} & 3,192 & 8,856 & 30 & 33.61s & 31 & 30.68s & 3.69 & 15 \\ \texttt{sctap2} & 1,090 & 2,500 & 23 & 1.47s & 23 & 0.97s & 1.28 & 17 \\ \texttt{sctap3} & 1,480 & 3,340 & 23 & 2.91s & 23 & 2.03s & 1.75 & 19 \\ \texttt{ship12l} & 1,151 & 5,533 & 30 & 3.43s & 30 & 2.77s & 1.50 & 17 \\ \texttt{ship12s} & 1,151 & 2,869 & 30 & 2.54s & 30 & 2.06s & 1.88 & 17 \\ \texttt{stocfor2} & 2,157 & 3,045 & 47 & 12.83s & 51 & 12.28s & 0.95 & 18 \\ \texttt{truss} & 1,000 & 8,806 & 31 & 4.47s & 31 & 3.61s & 3.47 & 16 \\ \texttt{woodw} & 1,098 & 8,418 & 133 & 19.71s & 142 & 20.20s & 1.45 & 20 \\ \texttt{cre$\_$a} & 3,516 & 7,248 & 52 & 74.86s & 55 & 58.78s & 3.77 & 30 \\ \texttt{cre$\_$c} & 3,068 & 6,411 & 54 & 54.95s & 61 & 42.28s & 1.74 & 30 \\ \end{tabular} \end{center} \caption{Comparison of the mixed-precision interior-point method and a double precision implementation applied to a selection of NETLIB test-problems. We report the dimensions of the constraint matrix ($m,n$), the total number of interior-point iterations needed to converge ($\#$iter) and the total run-time (time) for both implementations. For the mixed-precision interior-point method we also report the average number of Conjugate Gradients iterations ($\overline{\text{CG}}_\text{its}$) and the switch-point ($k_\text{switch}$).} \label{table:table} \end{table} \begin{figure} \caption{The time per iteration for the mixed-precision interior-point method compared to a double precision implementation applied to a selection of NETLIB test-problems. The star indicates the switch-point $k_\text{switch} \label{fig:timings} \end{figure} For our final experiment we apply the regularized inexact interior-point method to the $14$ selected NETLIB test-problems, with the parameters as specified in the beginning of the section. We compare our implementation based on the mixed-precision solver \cref{alg:solver}, together with the switch-point explained in the previous paragraph and compare it with the `exact' counterpart where we use a double precision Choleksky factorization to solve the linear system of equations. We compare the number of (interior-point) iterations needed to converge, as well as the total run-time. For the mixed-precision solver we also report the average number of Conjugate Gradient iterations $\overline{\text{CG}}_\text{its}$ needed in each interior-point iteration and the switch-point $k_\text{switch}$ as determined by the heuristic. Note that $k_\text{switch}$ can vary slightly for different runs, since it is determined by actual timings during the execution. However, the overall performance and behavior does not vary much. We report the results in \cref{table:table}. The mixed-precision algorithm outperforms the double precision implementation in all test-problems except \texttt{woodw}. This is due to the fact that the switch-point happens very early in this example and a lot of interior-point iterations are needed for convergence. Interestingly, this is also the only test-problem where we need to compute $\bar{\alpha}^k$ in some of the iterations, see \cref{thm:compute_bar}. The benefit of the mixed-precision solver is rather limited for the test-problems where the switch-point occurs relatively early , see for example \texttt{stocfor2}. For test-problems where we can perform a lot of the Cholesky factorizations in single precision, it is clear that there is a significant reduction in the computational time. Note that for most of these problems, there is no difference in the total number of interior-point iterations needed to converge. In \cref{fig:timings} we show some more detailed timing results. In early iterations we can see that the mixed-precision solver requires slightly more than half the average time per iteration of the double precision implementation. Then, when the number of Conjugate Gradients iterations starts to increase, we can observe that the run-time of the interior-point method also increases. For most of these problems this increase is relatively gradual. At a certain point, the overhead of the Conjugate Gradients iterations is so large that the algorithm determines that we should switch to double precision. Note that this switch-point is chosen quite close to the average time per iteration of the double precision implementation, which is of course the ideal scenario. Hence, the heuristic seems to work quite well. \section{Conclusions and outlook}\label{sec:conclusions} This work presents a convergence analysis of a regularized inexact interior-point method for solving linear programming problems. We illustrate the main benefits of regularization, namely that the fact that it alleviates the numerical difficulties arising from ill-conditioning of the matrices induced by the complementarity condition and that it allows us to easily solve problems with a rank deficient matrix. Another benefit of regularization that is not explored in detail in this work is the possibility of using a Cholesky-type factorization of the symmetric quasidefinite matrix arising from the augmented system approach. This is definitely worth exploring in future research since the augmented form is more suitable for sparse problems, especially when the matrix $A$ has one or more relatively dense column, since this results in a much denser matrix using the normal equations approach. The sparse Cholesky-factorization is also in general much more efficient than indefinite factorizations, since there is no need for pivoting to ensure numerical stability. Solving the linear system of equations \textit{inexactly} in each iteration of the interior-point method offers a great number of possibilities in trying to improve computational performance of the method. The most notable and well-studied approach is the use of Krylov subspace methods combined with suitable preconditioners. The interior-point method developed in the current paper offers the opportunity to use many of the existing specialized linear algebra routines developed in other work. The main focus of these techniques has mostly been on linear programming problems with a sparse constraint matrix. However, we focus our attention on dense matrices. We develop a mixed-precision solver for the normal equations based on the Conjugate Gradient method preconditioned with a Cholesky factorization computed in IEEE single precision. We show that this leads to a significant reduction of the computational time for the majority of test-problems considered in the numerical experiments. We also touch on the possibility of exploiting IEEE half precision, which is expected to provide an even more substantial improvement. This is left a possible future work. \end{document}
{\mathbf e}gin{document} \title{A Return to the Optimal Detection of Quantum Information} \author{Eric Chitambar $^1$} \email{[email protected]} \author{Min-Hsiu Hsieh $^2$} \email{[email protected]} \affiliation{$^1$ Department of Physics and Astronomy{\mbox ,} Southern Illinois University, Carbondale, Illinois 62901, USA\\ $^2$ Centre for Quantum Computation \& Intelligent Systems (QCIS), Faculty of Engineering and Information Technology (FEIT), University of Technology Sydney (UTS), NSW 2007, Australia} \date{\today} {\mathbf e}gin{abstract} In 1991, Asher Peres and William Wootters wrote a seminal paper on the nonlocal processing of quantum information [\textit{Phys. Rev. Lett.} \textbf{66} 1119 (1991)]. We return to their classic problem and solve it in various contexts. Specifically, for discriminating the ``double trine'' ensemble with minimum error, we prove that global operations are more powerful than local operations with classical communication (LOCC). Even stronger, there exists a finite gap between the optimal LOCC probability and that obtainable by separable operations (SEP). Additionally we prove that a two-way, adaptive LOCC strategy can always beat a one-way protocol. Our results provide the first known instance of ``nonlocality without entanglement'' in two qubit pure states. \end{abstract} \maketitle One physical restriction that naturally emerges in quantum communication scenarios is nonlocality. Here, two or more parties share some multi-part quantum system, but their subsystems remain localized with no ``global'' quantum interactions occurring between them. Instead, the system is manipulated through local quantum operations and classical communication (LOCC) performed by the parties. Asher Peres and William Wootters were the first to introduce the LOCC paradigm and study it as a restricted class of operations in their seminal work \cite{Peres-1991a}. To gain insight into how the LOCC restriction affects information processing, they considered a seemingly simple problem. Suppose that Alice and Bob each possess a qubit, and with equal probability, their joint system is prepared in one of the states belonging to the set $\{\ket{D_i}=\ket{s_i}\otimes\ket{s_i}\}_{i=0}^2$, where $\ket{s_i}=U^i\ket{0}$ and $U=exp(-\tfrac{i\pi}{3}\sigma_y)$. This highly symmetric ensemble is known as the ``double trine,'' and we note that lying orthogonal to all three states is the singlet $\ket{\Psi^-}=\sqrt{1/2}(\ket{01}-\ket{10})$. Alice and Bob's goal is to identify which double trine element was prepared only by performing LOCC. Like any quantum operation used for state identification, Alice and Bob's collective action can be described by some \textit{positive-operator valued measure} (POVM). While the non-orthogonality of the states prohibits the duo from perfectly identifying their state, there are various ways to measure how well they can do. Peres and Wootters chose the notoriously difficult measure of accessible information \cite{Holevo-1973a, *Nielsen-2000a}, but their paper raises the following two general conjectures concerning the double trine ensemble, which can apply to any measure of distinguishability: {\mathbf e}gin{enumerate} \item [C1:] LOCC is strictly sub-optimal compared to global operations, \item [C2:] The optimal LOCC protocol involves two-way communication and adaptive measurements. \end{enumerate} The set of global POVMs will be denoted by GLOBAL, and C1 can be symbolized by GLOBAL $>$ LOCC. A two-way LOCC protocol with adaptive measurement refers to \textit{at least} three rounds of measurement, Alice $\to$ Bob $\to$ Alice, where the choice of measurement in each round depends on the outcome of the other party's measurement in the previous round. We symbolize C2 as LOCC $>$ LOCC$_\rightarrow$. In Ref. \cite{Peres-1991a} Peres and Wootters obtained numerical data to support both C1 and C2, but these conjectures have never been proven for the double trine. Before we present our contribution to the problem, we would like to briefly highlight the legacy of the Peres-Wootters paper. Perhaps most notably is that it subsequently led to the discovery of quantum teleportation \cite{Bennett-1993a}. Other celebrated phenomena can also directly trace their roots to Ref. \cite{Peres-1991a} such as so-called nonlocality without entanglement \cite{Bennett-1999a} and quantum data hiding \cite{Terhal-2001a, *DiVincenzo-2002a}. More generally, Ref. \cite{Peres-1991a} paved the way for future research into LOCC and its fundamental connection to quantum entanglement \cite{Horodecki-2009a}. We finally note that in a return to Ref. \cite{Peres-1991a} of his own, Wootters constructed a \textit{separable} POVM that obtains the same information as the best known global measurement \cite{Wootters-2005a}. A POVM $\{\Pi_i\}$ belongs to the class of separable operations (SEP) if each POVM element can be decomposed as a tensor product $\Pi_i=A_i\otimes B_i$ over the two systems. SEP is an important class of operations since every LOCC operation belongs to SEP \cite{Bennett-1999a}. In this paper, we prove that conjectures C1 and C2 are indeed true when distinguishability success is measured by the \textit{minimum error probability}, which is defined as follows. For an ensemble $\mathcal{E}=\{\ket{\psi_i},p_i\}_{i=1}^k$, the error probability associated with some identification POVM $\{\Pi_i\}_{i=1}^k$ is given by $1-\sum_{i=1}^kp_i{\mathbf r}a{\psi_i}\Pi_i\ket{\psi_i}$. Then the minimum error probability of distinguishing $\mathcal{E}$ with respect to a class of operations $\mathcal{S}$ (such as LOCC, SEP, GLOBAL, etc.) is given by the \textit{infimum} of error probabilities taken over all POVMs that can be generated by $\mathcal{S}$. Note that we can replace ``infimum'' by ``minimum'' only if $\mathcal{S}$ is a compact set of operations. While GLOBAL, SEP and LOCC$_\rightarrow$ all have this property, LOCC does not \cite{Chitambar-2012a, Chitambar-2012c}. Hence, to properly discuss the LOCC minimum error, we must consider the class of so-called \textit{asymptotic LOCC}, which is LOCC plus all its limit operations \cite{Chitambar-2012c}. We will prove C1 with respect to this more general class of operations. \textit{A. Global and Separable Operations}: The double trine ensemble has a group-covariant structure which greatly simplifies the analysis. In fact, Ban \textit{et al.} have proven that the so-called ``Pretty Good Measurement'' (PGM) \footnote{Recall that the ``Pretty Good Measurement'' for an ensemble $\{\ket{\phi_i},p_i\}_{i=1}^k$ is the POVM with elements \unexpanded{$p_i\rho^{-1/2}\op{\phi_i}{\phi_i}\rho^{-1/2}$, where $\rho=\sum_{i=1}^kp_i\op{\phi_i}{\phi_i}$} \cite{Hausladen-1994a}.} is indeed an optimal global POVM for discriminating ensembles with such symmetries \cite{Ban-1997a}. For the double trine, the PGM consists of simply projecting onto the orthonormal basis $\{\ket{\Psi^-},U^i\otimes U^i\ket{F_i}\}_{i=0}^2$, where {\mathbf e}gin{equation} \label{Eq:GlobalPOVM} \ket{F_i}\propto U^i\otimes U^i[(\sqrt{2}+1)\ket{00}-(\sqrt{2}-1)\ket{11}]. \end{equation} The corresponding error probability is {\mathbf e}gin{equation} \label{Eq:GlobalProb} 1/2-\sqrt{2}/3\approx 2.86\times 10^{-2}. \end{equation} To show that SEP can also obtain this probability, we explicitly construct a separable POVM. The idea is to mix a sufficient amount of the singlet state with each of the PGM POVM elements so to obtain separability (a similar strategy was employed in Ref. \cite{Wootters-2005a}). The resulting POVM is $\{\op{\tilde{F}_i}{\tilde{F}_i}\}_{i=0}^2$ with $\op{\tilde{F}_i}{\tilde{F}_i}=\op{F_i}{F_i}+1/3\op{\Psi^-}{\Psi^-}$. It is fairly straightforward to compute that $\tilde{F}_0=1/2(\op{\varphi_+}{\varphi_+}+\op{\varphi_-}{\varphi_-})$, where $\ket{\varphi_{\pm}}=\ket{F_0}\pm\sqrt{1/3}\ket{\Psi^-}$ is a product state. This suffices to prove separability of the POVM. \textit{B. LOCC and Asymptotic LOCC}: Let us begin with a clear description of asymptotic LOCC discrimination. In general, a sequence of POVMs $\mathcal{P}^{(n)}:=\{\Pi^{(n)}_i\}_{i=1}^k$ \textit{asymptotically attains} an error probability $P$ on ensemble $\{\ket{\psi_i},p_i\}_{i=1}^k$ if for every $\epsilon>0$ we have $P+\epsilon>1-\sum_{i=1}^kp_i{\mathbf r}a{\psi_i}\Pi_i^{(n)}\ket{\psi_i}$ for sufficiently large $n$. If each POVM in the sequence $\mathcal{P}^{(n)}$ can be generated by LOCC, then $P$ is achievable by asymptotic LOCC. It is known that for an ensemble of linearly independent pure states, the global POVM attaining minimum error consists of orthonormal, rank one projectors \cite{Yuen-1975a} (see also \cite{Mochon-2006a}). We strengthen this result and extend it to the asymptotic setting. {\mathbf e}gin{theorem} \label{Thm1} Let $\mathcal{E}=\{\ket{\psi_i},p_i\}_{i=1}^k$ be an ensemble of linearly independent states spanning some space $S$. Suppose that $P_{opt}$ is the global minimum error probability of $\mathcal{E}$. Then there exists a unique orthonormal basis $\{\ket{\phi_i}\}_{i=1}^k$ of $S$ such that: (a) A POVM attains an error probability $P_{opt}$ on $\mathcal{E}$ if and only if it can also distinguish the $\{\ket{\phi_i}\}_{i=1}^k$ with no error, and (b) A sequence of POVMs asymptotically attains an error probability $P_{opt}$ on $\mathcal{E}$ if and only if it contains a subsequence that can asymptotically distinguish the $\{\ket{\phi_i}\}_{i=1}^k$ with no error. \end{theorem} The proof is given in the Appendix. Theorem \ref{Thm1} essentially reduces optimal distinguishability of non-orthogonal linearly independent ensembles to perfect discrimination of orthogonal ensembles. Applying part (a) to the double trine ensemble, if an LOCC POVM could attain the error probability of Eq. \eqref{Eq:GlobalProb}, then it can also perfectly distinguish the states $\ket{F_i}$ given by \eqref{Eq:GlobalPOVM}. However, these are three entangled states which, by a result of Walgate and Hardy, means they cannot be distinguished perfectly by LOCC \cite{Walgate-2002a}. Therefore, the global minimum error probability is unattainable by LOCC. But is the probability attainable by asymptotic LOCC? If it is, then part (b) of Theorem \ref{Thm1} likewise implies that the $\ket{F_i}$ must be perfectly distinguishable by asymptotic LOCC. While Ref. \cite{Walgate-2002a} provides simple criteria for deciding perfect LOCC distinguishability of two qubit ensembles, no analogous criteria exists for asymptotic LOCC. The only general result for asymptotic discrimination has been recently obtained by Kleinmann \textit{et al.} \cite{Kleinmann-2011a}. Here we cite their result in its strongest form, adapted specifically for the problem at hand. {\mathbf e}gin{proposition}[\cite{Kleinmann-2011a}] \label{Prop1} If the states $\{\ket{F_i}\}_{i=0}^2$ can be perfectly distinguished by asymptotic LOCC, then for all $\chi\in[1/3,1]$ there is a product operator $E\geq 0$ such that (i) $\sum_{i=0}^2{\mathbf r}a{F_i}E\ket{F_i}=1$, (ii) ${\mathbf r}a{F_0}E\ket{F_0}=\chi$, and (iii) the normalized states $\ket{F'_i}:=\tfrac{1}{\sqrt{{\mathbf r}a{F_i}E\ket{F_i}}}E^{1/2}\ket{F_i}$ are perfectly distinguishable by separable operations. \end{proposition} \noindent In the appendix we prove that these three conditions cannot be simultaneously satisfied; therefore, GLOBAL $>$ LOCC for minimum error discrimination. Here, we provide a little intuition into why Proposition \ref{Prop1} must be true. For every LOCC protocol that correctly identifies the given state with probability $1-\epsilon$, we can think of the success probability as smoothly evolving from complete randomness ($\chi=1/3$) to its final average value ($\chi=1-\epsilon$). Then for each $\chi\in(1/3,1-\epsilon)$, the protocol can be halted after some sequence of measurement outcomes (collectively represented by the product operator $E$) such that given these outcomes: (1) there is one state that can be identified with probability $\chi$ (which by symmetry we can assume is $\ket{F_0}$), and (2) the transformed ensemble can be discriminated by a separable POVM with success probability no less than $1-\epsilon$. By compactness of SEP, we let $\epsilon\to 0$ and replace (2) by the condition that a separable POVM perfectly distinguishes the post-halted ensemble. \textit{C. LOCC $>$ LOCC$_\rightarrow$:} We will now compute the minimum one-way error probability for the double trine, and then describe an explicit two-way protocol with a smaller error probability. In the one-way task, Alice makes a measurement and communicates her result to Bob. Without loss of generality, we fine-grain Alice's measurement so that each POVM element is rank one $\op{\eta}{\eta}$, with $\ket{\eta}=r\cos\theta\ket{0}+re^{i\phi}\sin\theta\ket{1}$. Given outcome $\eta$, Bob's task is to optimally discriminate the ensemble $\{\ket{s_i}\}_{i=0}^2$, but now with an updated distribution $\{p_{i}\}_{i=0}^2$ given by {\mathbf e}gin{align} \label{Eq:probsBob} p_{k}&=\tfrac{|\ip{\eta}{s_k}|^2}{3P(\eta)}=\tfrac{2}{3}|\cos\tfrac{2\pi k}{3}\cos\theta+e^{i\phi}\sin\tfrac{2\pi k}{3}\sin\theta|^2. \end{align} Here, $P(\eta)=\tfrac{1}{3}\sum_{i=0}^2|\ip{\eta}{s_i}|^2$, and we've used the covariance $\frac{1}{3}\sum_{i=0}^2\op{s_i}{s_i}=\mathbb{I}/2$. Additionally, we can assume that $p_{0}\geq p_{1},p_{2}$, since if $\ket{\eta}$ fails to generate a distribution with this property, by the symmetry we can always rotate $\ket{\eta}$ such that $p_{0}$ is indeed the maximum post-measurement probability. This means we can only restrict attention to $-\pi/6\leq \theta\leq \pi/6$. Next, we observe that Bob's task of distinguishing the ensemble $\{\ket{s_i},p_{i}\}_{i=0}^2$ is no easier than distinguishing between the two weighted states $\rho=p_0\op{s_0}{s_0}$ and $\sigma=p_{1}\op{s_1}{s_1}+p_{2}\op{s_2}{s_2}$. Indeed, any protocol distinguishing the three $\ket{s_i}$ can always be converted into a protocol for distinguishing $\rho$ and $\sigma$ by simply coarse-graining over all outcomes corresponding to $\ket{s_2}$ and $\ket{s_3}$. The minimum error probability in distinguishing $\rho$ and $\sigma$ is readily found to be (see Appendix): {\mathbf e}gin{equation} \label{Eq:mixedminerr} \tfrac{1}{2}-\tfrac{1}{2}\sqrt{1-3p_{1}p_{2}-p_{0} p_{1}-p_{0} p_{2}}, \end{equation} which simplifies to $\tfrac{1}{2}-\tfrac{1}{24}[75 + 32 \cos(2\theta) - 7 \cos (4\theta)+18\cos(2\phi)\sin^2(2\theta)]^{1/2}$. In the interval $-\pi/6\leq \theta< \pi/6$, a minimum is obtained at $\theta=-\pi/6$ and $\phi=0$. This corresponds to $p_0=p_1=1/2$ and $p_2=0$ with an error probability of $1/2-\sqrt{3}/4$. Now, this probability lower bounds the error probability along each branch of Alice's measurement, and therefore it places a lower bound on any one-way LOCC measurement scheme. In fact, this lower bound turns out to be tight. When Alice performs the POVM $\{\frac{2}{3}(\mathbb{I}-\op{s_i}{s_i})\}_{i=0}^2$ outcome $i$ will eliminate $\ket{s_i}\otimes\ket{s_i}$ but leave the other two states with an equal post-measurement probability. Thus, in each branch we obtain the error probability $1/2-\sqrt{3}/4\approx 6.70\times 10^{-2}$, and this provides the minimum one-way error probability. If we allow feedback from Bob, there exists better measurement strategies. The following protocol generalizes the optimal one-way scheme just described. (Round I) Alice performs the measurement with Kraus operators given by $\{A_i\}_{i=0}^2$ with \[A_i=\sqrt{1/3(1-p)}\op{s_i}{s_i}+\sqrt{1/3(1+p)}\op{s_i^\perp}{s_i^\perp}.\] Here $\ket{s_i^\perp}$ is the state orthogonal to $\ket{s_i}$ (explicitly $\ket{s_i^\perp}=U^i\ket{1}$). Note that this is the square-root of the POVM given by Peres and Wootters \cite{Peres-1991a}. Without loss of generality, we suppose that Alice obtains outcome ``$0$'' and communicates the result to Bob. Her (normalized) post-measurement states are $\ket{s'_0}=\ket{0}$, $\ket{s_1'}=[2(2+p)]^{-1/2}(\sqrt{1-p}\ket{0}-\sqrt{3(1+p)}\ket{1})$, and $\ket{s_2'}=[2(2+p)]^{-1/2}(\sqrt{1-p}\ket{0}+\sqrt{3(1+p)}\ket{1})$. (Round II) From Bob's perspective, he is still dealing with the original states $\ket{s_i}$, but now their prior probabilities have changed to $P_{i|A_0}=P_{A_0|i}$. He now proceeds as if Alice had completely eliminated the state $\ket{s_{0}}$ (i.e. if she had chosen $p=1$ as the strength of her measurement). Specifically, he projects onto the eigenbasis of $\op{s_1}{s_1}-\op{s_2}{s_2}$ which are the states $\ket{\pm}=\sqrt{1/2}(\ket{0}\pm\ket{1})$. A ``$+$'' outcome is associated with $\ket{s_1}$ and a ``$-$'' outcome is associated with $\ket{s_2}$; this is the optimal measurement for distinguishing between two pure states \cite{Helstrom-1976a}. By the symmetry of the states, it is sufficient to only consider the ``$+$'' outcome, which he communicates to Alice. The conditional probabilities are $P_{A_0B_+|0}=(1-p)/6$, $P_{A_0B_+|1}=1/24 (2 + \sqrt{3}) (2 + p)$, and $P_{A_0B_+|2}=1/24 (2 - \sqrt{3}) (2 + p)$. These can be inverted to give $P_{i|A_0B_+}=2P_{A_0B_+|i}$. (Round III) At this point, Alice still has three distinct states $\ket{s_0'}$, $\ket{s_1'}$ and $\ket{s_2'}$. Here, $\ket{s_1'}$ will have the greatest probability while $\ket{s_0'}$ will have the smallest when $p$ is close to $1$. Alice then ignores $\ket{s_2'}$ and performs optimal discrimination between just $\ket{s_{0}'}$ and $\ket{s_{1}'}$. Letting $Q=P_{0|A_0B_+}+P_{1|A_0B_+}$, the minimum error probability is given by the well-known \textit{Helstrom bound} \cite{Helstrom-1976a} with normalized probabilities: \[P_{err}^{(A_0B_+)}=1-\frac{Q}{2}(1+\sqrt{1-4\frac{P_{0|A_0B_+}P_{1|A_0B_+}}{Q^2}|\ip{s_0'}{s_1'}|^2}).\] By symmetry, each sequence of outcomes $(A_i,B_\mu)$ - with $i\in\{0,1,2\}$, $\mu\in\{+,-\}$ - occurs with the same probability. Hence, the total error probability across all branches is given by $P_{err}=6P_{err}^{(A_0B_+)}$. The plot is given in Fig. \ref{Fig:LOCC2}. It obtains a minimum of approximately $6.47\times 10^{-2}$, which is smaller than the one-way optimal of $1/2-\sqrt{3}/4\approx 6.70\times 10^{-2}$. The one-way optimal probability is obtained at the point $p=1$. {\mathbf e}gin{figure}[h] \includegraphics[scale=0.6]{LOCC2.png} \caption{\label{Fig:LOCC2} The error probability $P_{err}$ using the above protocol as a function of Alice's measurement strength $p$. The point $p=1$ is the one-way minimum error probability. } \end{figure} \textit{Discussion and Conclusions:} Our results for minimum error discrimination of the double trine ensemble can be summarized as: \[\text{GLOBAL} = \text{SEP} > \text{LOCC} > \text{LOCC}_\rightarrow.\] We thus put substantial closure to a problem first posed over 20 years ago. A primary motivation for studying this problem is to better understand the limitations of processing quantum information by LOCC. Our results complement a series of recent results in this direction \cite{Kleinmann-2011a, Childs-2012a, Chitambar-2012c}. In particular is Ref. \cite{Kleinmann-2011a} which provides a necessary condition for perfect discrimination by asymptotic LOCC discrimination (Prop. \ref{Prop1} above). Theorem \ref{Thm1} of our paper largely extends this result as we reduce asymptotic minimum error discrimination of linearly independent states to asymptotic perfect discrimination. Our proofs of C1 and C2 are the first of its kind for two qubit ensembles, and we contrast it with previous work on the subject. C1 was first shown by Massar and Popescu for two qubits randomly polarized in the same direction \cite{Massar-1995a}. However, a different distinguishability measure was used and the asymptotic case was not considered. Later, Koashi \textit{et al.} showed an asymptotic form of C1 for two qubit \textit{mixed} states with respect to the different task of ``unambiguous discrimination'' \cite{Koashi-2007a} (the same can also be shown for the double trine ensemble \cite{Chitambar-2013a}). Finally, C2 has been observed by Owari and Hayashi on mixed states and only for a special sort of distinguishability measure \cite{Owari-2008a}. Our work is distinct from all previous results in that it deals with pure states and minimum error probability, a highly natural measure of distinguishability. The fact that we consider pure ensembles with three states is significant since it is well-known that any \textit{two} pure states can be distinguished optimally via LOCC (i.e. LOCC $=$ GLOBAL) \cite{Walgate-2000a, Virmani-2001a}. Thus, with the double trine being a real, symmetric, and pure ensemble of two qubits, we have identified the simplest type of ensemble in which LOCC $\not=$ GLOBAL for state discrimination. Even more, since the double trine ensemble consists of product states (i.e. no entanglement), we have shown that ``nonlocality without entanglement'' can exist in even the simplest types of ensembles with more than two states. This distinction is further sharpened by considering that LOCC $\not=$ SEP for the optimal discrimination of the double trine. Separable operations are interesting since, like LOCC operations, they lack the ability to create entanglement. Nevertheless, SEP evidently possesses some nonlocal power as it can outperform LOCC in discriminating the double trine. Thus, entanglement and nonlocality can truly be regarded as two distinct resources, even when dealing with two qubit pure states. {\mathbf e}gin{acknowledgments} We would like to thank Runyao Duan, Debbie Leung, and Laura Man\v{c}inska for helpful discussions on the topic of LOCC distinguishability. \end{acknowledgments} \appendix \section{Appendix} \subsection{Proof of Theorem \ref{Thm1}} (a) We first recall a few general facts about minimum error discrimination. A POVM $\{\Pi_i\}_{i=1}^k$ is optimal on $\mathcal{E}$ if and only if $\Lambda\geq p_j\op{\psi_j}{\psi_j}$ for all $\ket{\psi_j}$, in which the operator $\Lambda:=\sum_{i=1}^kp_i\Pi_i\op{\psi_i}{\psi_i}$ is hermitian \cite{Holevo-1973a, *Yuen-1975a, *Barnett-2009a}. Since $\sum_{i=1}^k\Pi_i=\mathbb{I}$, we have {\mathbf e}gin{equation*} 0=tr[\Lambda-\Lambda]=\sum_{j=1}^k tr[\Pi_j(\Lambda-p_j\rho_j)]. \end{equation*} Then as $\Lambda-p_j\rho_j\geq 0$ and $tr[\Pi_j(\Lambda-p_j\rho_j)]=tr[\Pi^{1/2}_j(\Lambda-p_j\rho_j)\Pi^{1/2}_j]\geq 0$, we must have that {\mathbf e}gin{equation} \label{Eq:optimal-conds2} \Pi_j(\Lambda-p_j\op{\psi_j}{\psi_j})=(\Lambda-p_j\op{\psi_j}{\psi_j})\Pi_j=0. \end{equation} Our argument now proceeds analogously to the one given in Ref. \cite{Mochon-2006a}. Let $P_S$ be the projector onto $S$, and for some POVM $\{\Pi_i\}_{i=1}^k$ that obtains $P_{opt}$ on $\mathcal{E}$, define $\hat{\Pi}_i=P_S\Pi_i P_S$. As the $\ket{\psi_i}$ are linearly independent, there exists a set of dual states $\ket{\psi_i^\perp}$ such that $\ip{\psi_i^\perp}{\psi_j}=\delta_{ij}$. We first note that $\Lambda-p_i\op{\psi_i}{\psi_i}\geq 0$ implies $\hat{\Lambda}-p_i\op{\psi_i}{\psi_i}\geq 0$, where $\hat{\Lambda}=\hat{\Lambda}^\dagger=\sum_{j=1}^kp_j\hat{\Pi}_j\op{\psi_j}{\psi_j}$. Thus, the POVM $\{\mathbb{I}-P_S,\hat{\Pi}_i\}_{i=1}^k$ also obtains $P_{opt}$ on $\mathcal{E}$. We next note that $\hat{\Pi}_j\ket{\psi_j}\not=0$ for all $j$. For if this were not true for some $\ket{\psi_j}$, then we could contract with $\ket{\psi^\perp_j}$ to obtain $0\leq {\mathbf r}a{\psi^\perp_j} \left(\hat{\Lambda}-p_j\op{\psi_j}{\psi_j}\right)\ket{\psi^\perp_j}= -p_j$. Next, since $\{\mathbb{I}-P_S,\hat{\Pi}_i\}_{i=1}^k$ is an optimal POVM, the corresponding equality of Eq. \eqref{Eq:optimal-conds2} is $0=\hat{\Pi}_j(\hat{\Lambda}-p_j\op{\psi_j}{\psi_j})$. Applying $\ket{\psi_i^\perp}$ to the RHS yields {\mathbf e}gin{equation} \hat{\Pi}_j(p_i\hat{\Pi}_i\ket{\psi_i})=\delta_{ij}p_i\hat{\Pi}_i\ket{\psi_i}. \end{equation} Thus, $\ket{\phi_i}:=\tfrac{1}{\sqrt{{\mathbf r}a{\psi_i}\hat{\Pi}_i^2\ket{\psi_i}}}\hat{\Pi}_i\ket{\psi_i}$ (which is nonzero) lies in the kernel of $\hat{\Pi}_j$ for $i\not=j$, while $\ket{\phi_i}$ is an eigenvector of $\hat{\Pi}_j$ with eigenvalue +1 when $i=j$. Hence, $\hat{\Pi}_i=\op{\phi_i}{\phi_i}$ and $\ip{\phi_i}{\phi_j}=\delta_{ij}$, with $\sum_{i=1}^k\op{\phi_i}{\phi_i}=P_S$. We obviously have ${\mathbf r}a{\phi_i}\Pi_j\ket{\phi_i}=\delta_{ij}$, which means the original POVM can perfectly distinguish the $\ket{\phi_i}$. Conversely, any POVM $\{\Pi_i\}_{i=1}^k$ that perfectly distinguishes the $\ket{\phi_i}$ will satisfy $P_S\Pi_iP_S=\op{\phi_i}{\phi_i}$, and will therefore obtain $P_{opt}$ on $\mathcal{E}$. Finally, let ${\mathbf o}ldsymbol{\Pi}_S$ be the compact, convex set of POVMs with $k=dim(S)$ elements, each having support on $S$. We have just shown that the continuous linear function $f:{\mathbf o}ldsymbol{\Pi}_S\to\mathbb{R}$ given by $f(\{\Pi_i\}_{i=1}^k)=1-\sum_{i=1}^kp_i{\mathbf r}a{\psi_i}\Pi_i\ket{\psi_i}$ can be maximized only by an extreme point of ${\mathbf o}ldsymbol{\Pi}_S$ (rank one projectors). Convexity of ${\mathbf o}ldsymbol{\Pi}_S$ implies that this extreme point $\mathcal{P}_0:=\{\op{\phi_i}{\phi_i}\}_{i=1}^k$ must be unique. (b) For the asymptotic statement, we will need to endow ${\mathbf o}ldsymbol{\Pi}_S$ with a metric. For two POVMs $\mathcal{P}=\{\Pi_1,...,\Pi_k\}$ and $\mathcal{P}'=\{\Pi'_1,...,\Pi'_k\}$ in ${\mathbf o}ldsymbol{\Pi}_S$, we can define a distance measure by $d(\mathcal{P},\mathcal{P}')=\frac{1}{2}\sum_{i=1}^k\|\Pi_i-\Pi_i'\|_1$, where $\|A\|_1={\rm Tr}|A|$ \footnote{A perhaps more natural distance measure between two POVMs is the difference in measurement probabilities, maximized over all trace one, non-negative inputs: $\max_{\rho}\frac{1}{2}\sum_{i=1}^k|tr(\rho[\Pi_i-\Pi_i'])|$. As we are only concerned with issues of convergence, it suffices to consider the equivalent metric $d$ introduced above.}. Note that when $\Pi_i'=\op{\phi_i'}{\phi_i'}$ is pure, we have $\frac{1}{2}\|\Pi_i-\op{\phi_i'}{\phi_i'}\|_1\geq 1-{\mathbf r}a{\phi_i'}\Pi_i\ket{\phi_i'}$ \cite{Nielsen-2000a}. For any $\mathcal{P}=\{\Pi_i\}_{i=1}^k$, define $\hat{\mathcal{P}}=\{P_S\Pi_iP_S\}_{i=1}^k$. Suppose there exists a sequence of POVMs $\mathcal{P}^{(n)}$ such that for any $\epsilon>0$, $f(\hat{\mathcal{P}}^{(n)})< P_{opt}+\epsilon$ for sufficiently large $n$. As $\hat{\mathcal{P}}^{(n)}$ is a sequence in the compact metric space ${\mathbf o}ldsymbol{\Pi}_S$, by the Weierstrass Theorem from analysis, there will exist some convergent subsequence $\hat{\mathcal{P}}^{(n_j)}\to\overline{\mathcal{P}}$. Continuity of $f$ implies that $\lim_{n_j\to \infty}f(\hat{\mathcal{P}}^{(n_j)})=f(\mathcal{P}_0)=f(\overline{\mathcal{P}})$ (recall $P_{opt}=f(\mathcal{P}_0)$). However, by part (a), the POVM in ${\mathbf o}ldsymbol{\Pi}_S$ obtaining $P_{opt}$ is unique and so $\overline{\mathcal{P}}=\mathcal{P}_0$. Thus, $d(\hat{\mathcal{P}}^{(n_j)},\mathcal{P}_0)\to 0$, and so the error on $\mathcal{E}$ of each subsequence $\mathcal{P}^{(n_j)}$ satisfies \[1-\frac{1}{k}\sum_{i=1}^k{\mathbf r}a{\phi_i}\Pi_i^{(n_j)}\ket{\phi_i}\leq \frac{1}{k}d(\hat{\mathcal{P}}^{(n_j)},\mathcal{P}_0)\to 0.\] Conversely, if $1-\frac{1}{k}\sum_{i=1}^k{\mathbf r}a{\phi_i}\Pi_i^{(n_j)}\ket{\phi_i}\to 0$, then $1-{\mathbf r}a{\phi_i}\Pi_i^{(n_j)}\ket{\phi_i}\geq \tfrac{1}{4}\|\Pi_i^{(n_j)}-\op{\phi_i}{\phi_i}\|^2\to 0$ \cite{Nielsen-2000a}, which means $d(\hat{\mathcal{P}}^{(n_j)},\mathcal{P}_0)\to 0$. By continuity of $f$, we have $1-\sum_{i=1}^kp_i{\mathbf r}a{\psi_i}\Pi_i^{(n_j)}\ket{\psi_i}=f(\hat{\mathcal{P}}^{(n_j)})\to P_{opt}$. \subsection{All Conditions of Proposition \ref{Prop1} Cannot be Simultaneously Satisfied} Condition (iii) requires orthogonality ${\mathbf r}a{F_i}E\ket{F_j}=0$ for $i\not=j$, and so in the basis $\{\ket{\Psi^-},\ket{F_i}\}_{i=0}^2$, $E$ must take the form {\mathbf e}gin{align} \label{Eq:E} s\op{\Psi^-}{\Psi^-}+\sum_{i=0}^2(a_i\op{F_i}{F_i}+[b_i\op{\Psi^-}{F_i}+C.C.]) \end{align} where $s,a_i\geq 0$, $\sum_{i=0}^2a_i=1$, and $C.C.$ denotes the complex conjugate. If $E$ is a product operator across Alice and Bob's system, then $\gamma_{01}={}_A{\mathbf r}a{0}E\ket{1}_{A}$ must commute with $\gamma_{10}={}_A{\mathbf r}a{1}E\ket{0}_{A}$. Here we are taking partial contractions on Alice's system so that $\gamma_{01}$ and $\gamma_{10}$ are operators acting on Bob's system. By directly computing the commutator using Eqs. \eqref{Eq:GlobalPOVM} and \eqref{Eq:E}, the condition ${\mathbf r}a{0}[\gamma_{01},\gamma_{10}]\ket{0}=0$ becomes {\mathbf e}gin{equation} 0=6[Im(b_2 - b_3)]^2+(s+ a_0 - \tfrac{2}{3}) (s-\tfrac{1}{3}). \end{equation} With $a_0=\chi\geq\tfrac{1}{3}$ (condition (ii)), it is clear that $s\leq \tfrac{1}{3}$. However, if $s<\tfrac{1}{3}$, then this equation cannot hold for any $a_0\in[\tfrac{1}{3},\tfrac{2}{3}-s)$. Thus, the product form constraint on $E$ requires $a_0=\tfrac{1}{3}$. Next, we focus on the range $a_0\in[\tfrac{1}{3},\tfrac{1}{2})$, which because of (iii), guarantees that $E$ is full rank. It is known that the $\ket{F_i'}$ can be perfectly distinguished by separable operations if and only if $\sum_{i=0}^2C(F'_i)=C(\Psi')$, where $C(\cdot)$ is the concurrence of the state and $\ket{\Psi'}=\frac{E^{-1/2} \ket{\Psi^-}}{\sqrt{{\mathbf r}a{\Psi^-}E^{-1} \ket{\Psi^-}}}$ (see Thm. 2 of \cite{Duan-2007a}). We combine this with the fact that for a general two qubit state $\frac{M\otimes N\ket{\varphi}}{\sqrt{{\mathbf r}a{\varphi}M^\dagger M\otimes N^\dagger N\ket{\varphi}}}$, its concurrence is given by is $C(\varphi)\times\frac{|det (M)||det(N)|}{{\mathbf r}a{\varphi}M^\dagger M\otimes N^\dagger N\ket{\varphi}}$ \cite{Verstraete-2001a}. Therefore after noting that $C(F_i)=1/3$ and writing $E=A\otimes B$, condition (iii) of Proposition \ref{Prop1} can be satisfied if and only if {\mathbf e}gin{align} 1=\frac{1}{3}\sum_i\sqrt{det(A\otimes B)}\frac{{\mathbf r}a{\Psi^-}A^{-1} \otimes B^{-1}\ket{\Psi^-}}{{\mathbf r}a{F_i} A\otimes B\ket{F_i}}. \end{align} To compute ${\mathbf r}a{\Psi^-}A^{-1} \otimes B^{-1}\ket{\Psi^-}$, we use Cramer's Rule which says $(A\otimes B)^{-1}=\frac{1}{det(A\otimes B)}Adj(A\otimes B)$, where $Adj(\cdot)$ denotes the adjugate matrix. From \eqref{Eq:E}, we have that ${\mathbf r}a{\Psi^-}Adj(A\otimes B)\ket{\Psi^-}=\prod_{i=1}^3{\mathbf r}a{F_i} A\otimes B\ket{F_i}$. Substituting this into the above equation gives {\mathbf e}gin{align} \label{Eq:ConcurrenceConditionPostMeasure} 1=& \frac{1}{3}\sum_i\frac{1}{\sqrt{det(A\otimes B)}}\frac{\prod_{j=1}^3{\mathbf r}a{F_j} A\otimes B\ket{F_j}}{{\mathbf r}a{F_i} A\otimes B\ket{F_i}}\notag\\ \geq&\frac{1}{3}\frac{a_0a_1+a_0a_2+a_1a_2}{\sqrt{1/3a_0a_1a_2}}, \end{align} where we have used \eqref{Eq:E} and Hadamard's inequality: $det(A\otimes B)\leq sa_0a_1a_2=1/3a_0a_1a_2$. It is a straightforward optimization calculation to see that under the constraint $\sum_{i=0}^2a_i=1$, the RHS of \eqref{Eq:ConcurrenceConditionPostMeasure} obtains a minimum of 1 if and only if $a_0=a_1=a_2=\tfrac{1}{3}$. This proves that condition (iii) is impossible whenever $\chi>\tfrac{1}{3}$. \section{Eq. \eqref{Eq:mixedminerr} and the Minimum Error for one Mixed and one Pure State} We compute an analytic formula for the minimum error probability in distinguishing weighted qubit states $\rho=p_0\op{\psi_0}{\psi_0}$ and $\sigma=p_1\op{\psi_1}{\psi_1}+p_2\op{\psi_2}{\psi_2}$. The minimum error probability is given by $P_{err}=1/2-1/2\|\rho-\sigma\|_1$, where $\|\cdot\|_1$ is the trace norm. Since $\rho-\sigma$ is hermitian with eigenvalues $\lambda_i$, we have $\|\rho-\sigma\|_1=\sum_{i}|\lambda_i|$. Thus, it is just a matter of computing the eigenvalues of $\Delta:=\rho-\sigma=p_0\op{\psi_0}{\psi_0}-p_1\op{\psi_1}{\psi_1}-p_2\op{\psi_2}{\psi_2}$. Taking $\ket{\psi_i}=c_{i0}\ket{0}+c_{i1}\ket{1}$, we write $\Delta$ in coordinates: {\mathbf e}gin{align} \Delta=&p_0{\mathbf e}gin{pmatrix}|c_{00}|^2& c_{00}c_{01}^*\\ c_{00}^*c_{01}&|c_{01}|^2\end{pmatrix}-\sum_{i=1}^2p_i{\mathbf e}gin{pmatrix}|c_{i0}|^2& c_{i0}c_{i1}^*\\ c_{i0}^*c_{i1}&|c_{i1}|^2\end{pmatrix}.\notag \end{align} For a $2\times 2$ matrix, $M=\left({\mathbf e}gin{smallmatrix}a&b\\c&d\end{smallmatrix}\right)$, its eigenvalues are given by the expression $\lambda_{\pm}=1/2(a+d\pm\sqrt{(a+d)^2-4\det M})$. Thus, have that \[|\lambda_+|+|\lambda_-|={\mathbf e}gin{cases}|a+d|\;\;\;\text{if}\;\; \det M\geq 0\\\sqrt{(a+d)^2-4\det M}\;\;\;\text{if}\;\; \det M\leq 0.\end{cases}\] Letting $M=\Delta$, we can compute that $a+d=p_0-p_1-p_2$ and {\mathbf e}gin{align*} \det \Delta=&p_1p_2(1-|\ip{\psi_1}{\psi_2}|^2)\notag\\& -p_0p_1(1-|\ip{\psi_0}{\psi_1}|^2)-p_0p_2(1-|\ip{\psi_0}{\psi_2}|^2). \end{align*} Therefore, we arrive at the following {\mathbf e}gin{lemma} For the weighted states $\rho$ and $\sigma$, the minimum error probability is {\mathbf e}gin{align} \label{Eq:prob} \tfrac{1}{2}-\tfrac{1}{2}\sqrt{1-4p_1p_2(1-|\ip{\psi_1}{\psi_2}|^2)-4p_0\sum_{i=1}^2p_i|\ip{\psi_0}{\psi_i}|^2}\notag \end{align} if $\det(\rho-\sigma)\leq 0$, and $\tfrac{1}{2}-\tfrac{1}{2}|p_0-p_1-p_2|$ if $\det(\rho-\sigma)\geq 0$. \end{lemma} Now, we use this result with Eq. \eqref{Eq:probsBob} to prove Eq. \eqref{Eq:mixedminerr}. Specifically, since $|\ip{s_i}{s_j}|^2=\tfrac{1}{4}$ for the trine states, we have that $\det\Delta$ is given by {\mathbf e}gin{equation*} \frac{-1}{192}(3(3+\cos(2\phi))+32\cos(2\theta)-(13+3\cos(2\phi))\cos(4\theta)). \end{equation*} It is straightforward to verify that this is not positive for $\phi\in [0,2\pi)$ and $\theta\in[-\pi/6,\pi/6)$. Therefore, by Lemma 1, we obtain Eq. \eqref{Eq:mixedminerr}. {\mathbf i}bliography{QuantumBib} \end{document}
\begin{document} \title{\Large Communication Efficient Coresets for Maximum Matching} \author{Michael Kapralov\\mathcal{E}PFL\footnote{E-mail addresses: [email protected]} \and Gilbert Maystre\\mathcal{E}PFL \and Jakab Tardos\\mathcal{E}PFL} \date{\today} \maketitle \pagestyle{fancy} \fancyhf{} \fancyfoot[LE]{\hspace{1.75cm} \thepage } \fancyfoot[RO]{ \thepage \hspace{1.75cm}} \setlength{\parindent}{0pt} \begin{abstract} \small\baselineskip=9pt In this paper we revisit the problem of constructing {\epsilonm randomized composable coresets} for bipartite matching. In this problem the input graph is randomly partitioned across $k$ players, each of which sends a single message to a coordinator, who then must output a good approximation to the maximum matching in the input graph. Assadi and Khanna \cite{DBLP:conf/spaa/AssadiK17} gave the first such coreset, achieving a $1/9$-approximation by having every player send a maximum matching, i.e. at most $n/2$ words per player. The approximation factor was improved to $1/3$ by Bernstein et al. \cite{DBLP:conf/soda/AssadiBBMS19}.\\ In this paper, we show that the matching skeleton construction of Goel, Kapralov and Khanna \cite{DBLP:conf/soda/GoelKK12}, which is a carefully chosen (fractional) matching, is a randomized composable coreset that achieves a $1/2-o(1)$ approximation using at most $n-1$ words of communication per player. We also show an upper bound of $2/3+o(1)$ on the approximation ratio achieved by this coreset.\epsilonnd{abstract} \section{Introduction} Composable coresets is a generic technique for the analysis of large data, that has been shown to be effective for a variety of problems. In the context of graphs, the idea is to partition the edges of the input graph into $k$ parts, extract some small yet informative summary of each part, and recombine these summaries into a single graph without losing too much in the quality of the solution (the formal definition is presented in Section~\ref{section:coresets}). The small summary of each part is called the composable coreset. This versatile technique translates simply to algorithms in both the MPC and the randomized streaming models (Section 1.1 in \cite{DBLP:conf/spaa/AssadiK17}).\\ The study of {\it randomized} composable coresets in the context of approximating maximum matching was initiated by~\cite{DBLP:conf/spaa/AssadiK17} as the usefulness of {\it deterministic} composable coresets, where the initial partition of the input is arbitrary, was shown to be limited (see e.g. \cite{DBLP:conf/soda/AssadiKLY16,DBLP:conf/esa/Konrad15}). They proved that a maximum matching coreset, which contains $n/2$ edges, achieves nearly $1/9$ approximation, which was improved to nearly $1/3$ by~\cite{DBLP:conf/soda/AssadiBBMS19}. This paper further showed that the approximation quality of the maximum matching coreset is at best $1/2$, and proposed an alternative: the EDCS coreset. EDCS's achieve a nearly $2/3$ approximation as randomized composable coresets for maximum matching; they are however significantly denser, with size $n\cdot\text{poly}(\epsilonpsilon^{-1})\cdot\text{poly}(\log n)$ to achieve a $2/3-\epsilonpsilon$ approximation. More recently, the work of ~\cite{DBLP:conf/icml/AssadiBM19} gave a coreset of linear size in $n$ that achieves an approximation ratio of $1/2-\epsilonpsilon$ for small $\epsilonpsilon>0$, but at the expense of duplicating every edge $\Omega(1/\epsilonpsilon)$ times, increasing the communication accordingly. This is again prohibitively expensive for small $\epsilonpsilon$.\\ As the main result of this paper, we propose a small composable coreset of size at most $n-1$, which nonetheless achieves a $1/2$ approximation ratio, without the need for the duplication of edges. \begin{figure}[ht] \vskip 0.2in \begin{center} \scalebox{0.8}{ \begin{tikzpicture} \node[cloud, cloud puffs=13, draw, thick, fill=data_color, minimum width=3cm, minimum height=3cm, align=center] (data) at (-2, 0) {$G = (V,E)$}; \node[rectangle, draw, thick, fill=alg_color, minimum width=1cm, minimum height=1cm] (a1) at (3, 1.5) {$\mathcal{A}$}; \node[rectangle, draw, thick, fill=alg_color, minimum width=1cm, minimum height=1cm] (a2) at (3, 0) {$\mathcal{A}$}; \node[rectangle, draw, thick, fill=alg_color, minimum width=1cm, minimum height=1cm] (a3) at (3, -1.5) {$\mathcal{A}$}; \node[rectangle, draw, thick, fill=alg_color, minimum width=1cm, minimum height=1cm] (mm) at (7, 0) {$\mathsf{MM}$}; \path[draw, -latex, thick] (data) -- (a1) node [midway, above, sloped] {$G_{1}$}; \path[draw, -latex, thick] (data) -- (a2) node [midway, above, sloped] {$G_{2}$}; \path[draw, -latex, thick] (data) -- (a3) node [midway, above, sloped] {$G_{3}$}; \path[draw, -latex, thick] (a1) -- (mm); \path[draw, -latex, thick] (a2) -- (mm); \path[draw, -latex, thick] (a3) -- (mm); \path[draw, -latex, thick] (mm) -- (10, 0) node [midway, above, sloped, align=center] {$\alpha$-approx.\\ of $\mm{G}$}; \path[draw, dashed] (0, 2) -- (0, -2); \path[draw, dashed] (2, 2) -- (2, -2); \node[rectangle, align=center, minimum width=2, minimum height=1] at (1, 2) {random\\$k$-partition}; \path[draw, dashed] (4, 2) -- (4, -2); \path[draw, dashed] (6, 2) -- (6, -2); \node[rectangle, align=center, minimum width=2, minimum height=1] at (5, 2) {coresets}; \epsilonnd{tikzpicture}} \caption{A visual representation of randomized composable coresets. $G$ is first partitioned into $k$ parts randomly. Then each of those parts is reduced into coresets independently using an algorithm $\mathcal{A}$. A maximum matching is then computed amongst the recombination of all the coresets.} \label{figure:coresetconcept} \epsilonnd{center} \vskip -0.2in \epsilonnd{figure} \begin{theorem} There exists a $1/2-o(1)$-approximate randomized composable coreset with size $n-1$ for bipartite maximum matching as long as $k=\omega(1)$, $\mm{G}=\omega(k\log n)$. \epsilonnd{theorem} \subsection*{Intuition behind our construction} Our coreset is inspired by the previous best known `small' randomized composable coreset, the maximum matching coreset. The main challenge in analyzing the performance of picking any maximum matching as a coreset lies in the fact that graphs that do not admit a perfect matching generally admits many different maximum matchings. To circumvent this, we propose to use \textit{any matching skeleton} (a structure introduced by~\cite{DBLP:conf/soda/GoelKK12}, and later rediscovered by~\cite{DBLP:conf/soda/BernsteinHR18}) as a coreset. This is essentially a carefully chosen `canonical' fractional matching that matches vertices as uniformly as possible (see Section~\ref{section:matchingskeletoncoreset}). Such a fractional matching can always be selected to be supported on a forest by a simple argument similar to the one that establishes the integrality of the bipartite matching polytope, meaning that the support size is never larger than $n-1$. The fact that the coreset is essentially an `optimally spread out' maximum matching leads to a rather simple proof of the approximation ratio of $1/2-o(1)$, which we present in Section~\ref{section:main}. In Section~\ref{section:limitations}, we show that any matching skeleton does not provide a better than $2/3$ approximation to maximum matching, leaving some amount of room for improvement of our approximation ratio bound. \subsection*{Previous results} Coresets have been studied in a variety of contexts \cite{DBLP:conf/spaa/AssadiK17,DBLP:conf/kdd/BadanidiyuruMKK14,DBLP:conf/nips/BalcanEL13,DBLP:conf/nips/BateniBLM14,DBLP:conf/pods/IndykMMM14,DBLP:conf/stoc/MirrokniZ15,DBLP:conf/nips/MirzasoleimanKSK13} (also see e.g. \cite{DBLP:conf/soda/AhnGM12,DBLP:conf/pods/AhnGM12,DBLP:conf/soda/AssadiKLY16,DBLP:conf/stoc/BhattacharyaHNT15,DBLP:journals/algorithmica/BulteauFKP16,DBLP:conf/soda/ChitnisCEHMMV16,DBLP:conf/focs/KapralovLMMS14,DBLP:conf/podc/KapralovW14,DBLP:conf/mfcs/McGregorTVV15, DBLP:journals/corr/abs-2007-14204} for the related work in the linear sketching context). Related to our problem, maximum matching approximation has been widely studied in low space regimes such as MPC \cite{DBLP:conf/soda/AssadiBBMS19,DBLP:conf/spaa/HarveyLL18,DBLP:conf/stoc/CzumajLMMOS18,DBLP:conf/spaa/LattanziMSV11,DBLP:conf/icml/AssadiBM19} and streaming \cite{DBLP:conf/approx/McGregor05,DBLP:conf/soda/GoelKK12,DBLP:conf/approx/KonradMM12,DBLP:journals/talg/PazS19,DBLP:conf/spaa/AssadiK17,DBLP:conf/soda/AssadiB19}. In particular \cite{DBLP:conf/soda/AssadiBBMS19} achieves nearly $2/3$ approximate maximum matching in two MPC rounds and $\widetilde O(\sqrt{mn}+n)$ space per machine, using randomized composable coresets of size $O(n\log n)$. \section{Randomized Composable Coresets} \label{section:coresets} \begin{definition} Let $G = (V, E)$ be a graph and $k \in \mathbb{N}$ and integer. A {\bf random $k$-partition} of $G$ is a set of $k$ random subgraphs $\{G_i=(V,\, E_i)\}_{i \in [k]}$ of $G$, where each edge $e \in E$ is sent uniformly at random to exactly one of the $E_i$. \epsilonnd{definition} \begin{definition}\cite{DBLP:conf/stoc/MirrokniZ15} \label{definition:coreset} Let $\mathcal{A}$ be an algorithm that takes as input a graph $H$ and returns a subgraph $\mathcal{A}(H) \subseteq H$. We say that $\mathcal{A}$ outputs an {\bf $\alpha$-approximate randomized composable coreset} for the maximum matching problem if given any graph $G = (V, E)$, any $k \in \mathbb{N}$ and a random $k$-partition of $G$ we have $$\alpha \cdot \mm{G} \leq \mathcal{E}X{\mm{\mathcal{A}\mathopen{}\mathclose\bgroup\originalleft(G_1 \aftergroup\egroup\originalright) \cup \dots \cup \mathcal{A}\mathopen{}\mathclose\bgroup\originalleft(G_k \aftergroup\egroup\originalright)}}$$ where the expectation is taken over the randomness of the partition. The {\bf size} of the coreset is the number of edges returned by $\mathcal{A}$. \epsilonnd{definition} \begin{remark} Throughout this paper we will assume some natural bounds on the parameter $k$. Firstly, similarly to \cite{DBLP:conf/spaa/AssadiK17, DBLP:conf/soda/AssadiBBMS19}, we suppose that the maximum matching size of the input graph $\mm{G}=\omega(k\log n)$. This allows us to argue concentration at various places in the analysis, and is a natural assumption: The regime where $\mm{G}$ is smaller is handled in \cite{DBLP:journals/corr/ChitnisCEHMMV15}. We will further make the natural assumption that $k=\omega(1)$, that is we parallelize over a superconstant number of machines. \epsilonnd{remark} \section{Preliminaries and Notation} Throughout the paper we consider bipartite graphs, denoted by $G = (P,\, Q,\, E)$, where the vertex-sets $P$ and $Q$ are the two sides of the bipartition, and $E$ is the edge-set. We let $n=|P\cup Q|$ denote the number of vertices in $G$ and $m=|E|$ denote the number of edges. For a vertex $v\in P\cup Q$ of $G$ we write ${\mathcal{G}}amma_G(v)$ to denote the set of neighbors of $v$ in $G$, or ${\mathcal{G}}amma(v)$ if $G$ is clear from context. Similarly, for a set $S\subseteq P\cup Q$ we write ${\mathcal{G}}amma_G(S)$ or ${\mathcal{G}}amma(S)$ to the denote the neighborhood of the set in $G$. \begin{definition} A {\bf matching} in a graph is a set of edges such that no two of them share an end point. The {\bf maximum matching size} of a graph is the maximum possible size of a matching in it; we usually denote it $\mm{G}$. \epsilonnd{definition} \begin{definition} Given a graph $G = (P, Q, E)$, a {\bf fractional matching} is a set of non-negative edge weights $\vec{x}: E \to [0,1]$ such that no vertex has more than unit weight adjacent on it: $$\forall v\in P\cup Q:\sum_{w\in{\mathcal{G}}amma(v)}x_{vw}\le1$$ The {\bf size} of a fractional matching is the sum of all edge-weights. \epsilonnd{definition} Note that an integral fractional matching corresponds to the classical matching definition. We will also use the extended notion of $\alpha$-matching of \cite{DBLP:conf/soda/GoelKK12}, which are classical fractional matching with a changed constraint for one side of the bipartition. \begin{definition} Given a graph $G = (P, Q, E)$, a {\bf $\alpha$-matching with respect to $P$} is a set of non-negative edge weights $\vec{x}: E \to [0, 1]$ that saturates each vertex of $P$ fractionally exactly $\alpha$ times and each vertex of $Q$ at most once. \epsilonnd{definition} \begin{definition} A vertex cover is a set of vertices $\mathbf Phi\subseteq P\cup Q$ such that all edges have at least one end point in $\mathbf Phi$. \epsilonnd{definition} The following theorem is a fundamental fact about bipartite graphs, on which we will be relying throughout the paper. \begin{theorem}\label{thm:vc=mm} For any bipartite graph, the size of the maximum matching, the size of the maximum fractional matching, and the size of the minimum vertex cover are equal. \epsilonnd{theorem} \begin{corollary} \label{corollary:m-vc} If a matching and a vertex cover have the same size, both are optimal. \epsilonnd{corollary} Furthermore, we will rely on the following concentration inequality. \begin{theorem}[Chernoff bound, see \cite{DBLP:books/daglib/0021015}] Let $Y = \sum_{i = 1}^n X_i$ be the sum of $n$ independent binary random variable each with $\mathbf Pr{X_i = 1} = p_i$. Let $\mu_Y = \mathcal{E}X{Y} = \sum_{i = 1}^n p_i$. Then, for any $\epsilon \in (0,1)$, we have: $$\mathbf Pr{X \notin (1 \pm \epsilonpsilon) \mu_Y} \leq 2e^{-\frac{\epsilonpsilon^2 \mu_Y}{3}}$$ \label{theorem:chernoff} \epsilonnd{theorem} \section{Our coreset: the matching skeleton} \label{section:matchingskeletoncoreset} In this section, we recall the notion of matching skeleton, introduced by \cite{DBLP:conf/soda/GoelKK12} and later rediscovered by \cite{DBLP:conf/soda/BernsteinHR18}. We simplify slightly the original definitions and results to suit our needs. We also introduce a new related object, the canonical vertex cover which is central to our proof.\\ We define a partition of the vertex set of $G$ into subgraphs of varying vertex expansion as follows. For each $i = 1,\, \dots$ we define a tuple $(P_i,\, Q_i, \, \alpha_i)$ iteratively as follows, starting with $G_0 = G$, $P_0=P$: \begin{enumerate} \item Let $\alpha_i = \min_{\epsilonmptyset\neq S\subseteq P_{i-1}} \frac{|{\mathcal{G}}amma_{G_{i-1}}(S)|}{|S|}$ \item Let $P_i=$ largest $S\subseteq P_{i-1}$ such that $\frac{|{\mathcal{G}}amma_{G_{i-1}}(S)|}{|S|}=\alpha_i$ \item Let $Q_i= {\mathcal{G}}amma_{G_{i-1}}(P_i)$ \item $G_i = G_{i-1} \setminus (P_i \cup Q_i)$ \epsilonnd{enumerate} This process continues until $G_i$ is empty. \begin{definition} We call each $(P_i,\, Q_i, \, \alpha_i)$ a {\bf block} and $\alpha_i$ its {\bf expansion level}, which is carried over to the vertices of the block using the notation $\alpha(v) := \alpha_i$ for $v \in P_i \cup Q_i$. We call the collection $\{(P_i,\, Q_i, \, \alpha_i)\}_{i\in[k]}$ the {\bf block decomposition} of $G$. \epsilonnd{definition} \begin{remark} A practical way to find $\alpha_1$ is to solve several max-flow instances. For some $\alpha \in \mathbb{R}_+$, let $G_\alpha$ be a copy of $G$ where edges are directed from $P$ to $Q$ with an infinite weight. Also part of $G_\alpha$ is a source vertex $s$ which has edges directed toward each $p \in P$ with weight $\alpha$ and a sink vertex $t$ with unit-weight edges incoming from each $q \in Q$. Observe that $\alpha_1 = \inf\{\alpha \in \mathbb{R}_+: |\mathsf{MC}(\alpha)| > 1\}$ where $\mathsf{MC}(\alpha)$ is the min-cut containing $s$ in $G_\alpha$. Finding $\alpha_1$ thus reduces to solving max-flow instances with increasing $\alpha$ until a non-trivial min-cut is found. This cut actually consists of $P_1$ and $Q_1$ together with $s$. The remaining of the partition is obtained by repeating this argument. \epsilonnd{remark} We now recall the main properties of the block decomposition of $G$, most of which comes from Section 3 of \cite{DBLP:conf/soda/GoelKK12}. \begin{lemma}[\cite{DBLP:conf/soda/GoelKK12}] \label{lemma:blockproperties} Let $\{(P_i,\, Q_i, \, \alpha_i)\}_{i \in [k]}$ be the block partition of $G$. The sequence $(\alpha_i)_{i \in [k]}$ is strictly increasing and such that $\alpha_i = |Q_i|/|P_i|$. Also, for any $i \in [k]$: $${\mathcal{G}}amma(P_i) \subseteq \bigcup_{j \leq i} Q_j$$ \epsilonnd{lemma} Intuitively, each block $P_i\cup Q_i$ is associated with a certain expansion of the $P_i$ side, namely $\alpha_i$. The expansion of the block cannot be greater than $\alpha_i$, as $|Q_i|=\alpha_i|P_i|$. However, it is also no less than $\alpha_i$, as the entire block admits of an $\alpha_i$-matching with respect to $P_i$. \begin{lemma}[\cite{DBLP:conf/soda/GoelKK12},\cite{DBLP:conf/soda/BernsteinHR18}] Let $G = (P, \, Q, \, E)$ be a graph together with its block decomposition $\{(P_i,\, Q_i, \, \alpha_i)\}_{i\in[k]}$. For each $i\in[k]$ there is an $\alpha_i$-matching of $P_i\cup Q_i$ with respect to $P_i$. \epsilonnd{lemma} \begin{remark}[\cite{DBLP:conf/soda/GoelKK12}] The above $\alpha$-matchings can easily be made to have cycle-free supports, by eliminating cycles through standard techniques. \epsilonnd{remark} Now that the block decompositon of a graph is introduced, we can define matching skeletons which are simply the union of the above introduced cycle-free $\alpha$-matching for each block. \begin{definition}[Matching skeleton \cite{DBLP:conf/soda/GoelKK12}] Let $G = (P, \, Q, \, E)$ be a graph together with its block decomposition $\{(P_i,\, Q_i, \, \alpha_i)\}_{i \in [k]}$. For each $i \in [k]$, let $\vec{x}_i: (P_i \times Q_i) \cap E \to [0, 1]$ be a cycle-free $\alpha_i$-matching. We call $$H = \bigcup_{i \in [k]} \support{\vec{x}_i}$$ a matching skeleton of $G$. See Figure \ref{figure:coreset} for a visual example. \epsilonnd{definition} \begin{figure}[ht] \vskip 0.2in \begin{center} \scalebox{0.9}{ \begin{tikzpicture} \coordinate (bca) at (6, 0); \coordinate (bcb) at (6.75, 0); \coordinate (bcc) at (7.5, 0); \coordinate (bsa) at (6.375, 2); \coordinate (bsb) at (7.125, 2); \coordinate (cca) at (9.375, 0); \coordinate (ccb) at (10.125, 0); \coordinate (ccc) at (10.875, 0); \coordinate (csa) at (9, 2); \coordinate (csb) at (9.75, 2); \coordinate (csc) at (10.5, 2); \coordinate (csd) at (11.25, 2); \draw[fill=red!10, rounded corners] (5.4, -0.3) -- (8.1, -0.3) -- (7.425, 2.3) -- (6.075, 2.3)-- cycle; \draw[fill=red!10, rounded corners] (9.075, -0.3) -- (11.175, -0.3) -- (11.85, 2.3) -- (8.4, 2.3)-- cycle; \path[draw=black, thick] (bca) -- (bsa); \path[draw=black, thick] (bca) -- (bsb); \path[draw=black, thick] (bcb) -- (bsa); \path[draw=black, thick] (bcc) -- (bsb); \path[draw=black, thick] (cca) -- (csa); \path[draw=black, thick] (cca) -- (csb); \path[draw=black, thick] (ccb) -- (csb); \path[draw=black, thick] (ccb) -- (csc); \path[draw=black, thick] (ccc) -- (csc); \path[draw=black, thick] (ccc) -- (csd); \path[draw=black, dotted, thick] (ccb) -- (csa); \path[draw=black, dotted, thick] (ccb) -- (csd); \path[draw=black, thick, dotted] (ccb) -- (bsa); \path[draw=black, thick, dotted] (ccc) -- (bsb); \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (bca) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (bcb) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (bcc) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (bsa) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (bsb) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (cca) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (ccb) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (ccc) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (csa) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (csb) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (csc) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (csd) {}; \node (t2) at (6.75, 2.6) {$\alpha_2 = 1.5$}; \node (t3) at (10.125, 2.6) {$\alpha_1 = 0.75$}; \node at (5, 0) {$Q$}; \node at (5, 2) {$P$}; \epsilonnd{tikzpicture} } \caption{A graph $G = (P, \, Q, \, E)$ and its block partition. A cycle-free matching skeleton is shown with solid edges. Due to the construction of the block partition, edges between $P_2$ and $Q_1$ cannot be part of any matching skeleton. Also, from Lemma \ref{lemma:blockproperties}, no edge can exist between $Q_2$ and $P_1$.} \label{figure:coreset} \epsilonnd{center} \vskip -0.2in \epsilonnd{figure} \begin{remark} The matching skeleton coreset has size at most $|P| + |Q| -1$, as it is always a forest. \epsilonnd{remark} We now describe a special kind of vertex cover, related with the block partition of a graph, which will be a crucial tool in analyzing the quality of our coreset. \begin{definition}[Canonical vertex cover] \label{definition:canonicalvc} Given a graph $G = (P, \, Q, \, E)$ together with its block decomposition, we call $$\mathbf Phi=\{q\in Q \,|\, \alpha(q) < 1\} \cup \{p\in P \,|\, \alpha(p) \geq 1\}$$ the {\bf canonical vertex cover} of $G$. That is, in each block we take the smaller side of the bipartition. \epsilonnd{definition} \begin{lemma} \label{lemma:canonicalvc} The canonical vertex cover is a minimum vertex cover. \epsilonnd{lemma} \begin{proof} First we show that $\mathbf Phi$ is indeed a vertex cover. Suppose there exists an edge $\{p,\, q\}$ not adjacent on $\mathbf Phi$ and let $p \in P_i$ and $q \in Q_j$. By definition of the canonical vertex cover, this means that $\alpha_j \geq 1 > \alpha_i$ and hence $i < j$ using monotonicity of the expansion levels (Lemma \ref{lemma:blockproperties}). In turn, this implies that ${\mathcal{G}}amma(p) \not\subseteq \bigcup_{\epsilonll \leq i} Q_\epsilonll$: a contradiction with Lemma \ref{lemma:blockproperties}.\\ We now proceed to show that $\mathbf Phi$ is minimum, by showing that there exists a fractional matching of size $|\mathbf Phi|$ (See Corollary~\ref{corollary:m-vc}). We define this fractional matching block-by-block: Consider the block $(P_i,\, Q_i, \, \alpha_i)$. \begin{itemize} \item If $\alpha_i < 1$, then $\mathbf Phi\cap (P_i \cup Q_i)$ is exactly $Q_i$. In this case, we can take the $\alpha_i$-matching with respect to $P_i$ as our fractional matching. This will have size $\alpha_i|P_i|=|Q_i|$, exactly as desired. \item On the other hand, if $\alpha_i \geq 1$, then $\mathbf Phi\cap (P_i \cup Q_i)$ is exactly $P_i$. Thus, an $\alpha_i$-matching with respect to $P_i$ \textit{scaled down by a factor of $\alpha_i$} is a valid fractional matching of the block, and has size $|P_i|$. \epsilonnd{itemize} \epsilonnd{proof} The above deduction also shows that any matching skeleton contains a maximum matching. Therefore, the matching skeleton coreset performs at least as well as the maximum matching coreset of \cite{DBLP:conf/spaa/AssadiK17}. In particular, this directly yields a lower bound of $1/3$ on the approximation ratio of our coreset. However, a matching skeleton retains more information from the input graph, as the entire block partition can be recovered from it. This allows for a better approximation ratio as Section \ref{section:main} demonstrates. \begin{remark} Let us draw the parallels between the server flows of \cite{DBLP:conf/soda/BernsteinHR18} and the notion of matching skeleton of \cite{DBLP:conf/soda/GoelKK12}. In the context of \cite{DBLP:conf/soda/BernsteinHR18}, the support of a realization of the balanced server flow is simply a matching skeleton. The balancedness condition corresponds to the neighboring property of Lemma \ref{lemma:blockproperties}. Finally, the server flow values of \cite{DBLP:conf/soda/BernsteinHR18} are exactly the expansion levels. \epsilonnd{remark} Finally, we prove a structural result about the robustness of the block decomposition under changes to the edge-set of $G$. This will be crucial to our proofs in both Sections~\ref{section:main} and~\ref{section:limitations}. \begin{lemma}\label{lemma:robust} Let $G = (P, \, Q, \, E)$ be a graph together with its block decomposition $\{(P_i,\, Q_i, \, \alpha_i)\}_{i\in[k]}$ and $H$ a matching skeleton of $G$. Let $E^+, E^- \subseteq P \times Q$ be two sets of edges such that $E^- \cap H = \epsilonmptyset$ and for any $\{p,\, q\} \in E^+$, $\alpha(p) \geq \alpha(q)$.\\ Denote by $G' = (P, Q, E')$ the modification of $G$ with edge set $E' = (E \cup E^+) \setminus E^-$. The block decomposition of $G'$ is still $\{(P_i,\, Q_i, \, \alpha_i)\}_{i\in[k]}$, and therefore $H$ remains a valid matching skeleton of $G'$. \epsilonnd{lemma} \begin{proof} We will use ${\mathcal{G}}amma(S)$, ${\mathcal{G}}amma'(S)$ and ${\mathcal{G}}amma_H(S)$ to denote the neighborhood of some set $S$ in the graphs $G$, $G'$ and $H$ respectively. Consider now the first step of the block decomposition of $G'$. We first prove that no set $S\subseteq P$ has lower expansion than $\alpha_1$ in $G'$. Consider any set $S\subseteq P$. We can lower bound the size of ${\mathcal{G}}amma'(S)$ by ${\mathcal{G}}amma_H(S)$ since $H\subseteq G'$. Moreover, we note that $H$ contains the support of an $\alpha_i$-matching with respect to $P_i$, in block $(P_i\cup Q_i)$, for each $i$. Therefore, the expansion of any subset in $P_i$ is at least $\alpha_i$ and $$|{\mathcal{G}}amma_H(S\cap P_i) \cap Q_i| = |{\mathcal{G}}amma_H(S\cap P_i)|\ge\alpha_i|S\cap P_i|.$$ The equality comes from the fact that a matching skeleton contains no edge crossing two blocks. Using this, we have: $$|{\mathcal{G}}amma'(S)| \ge|{\mathcal{G}}amma_H(S)| \ge\sum_{i=1}^k|{\mathcal{G}}amma_H(S\cap P_i)\cap Q_i| =\sum_{i=1}^k|{\mathcal{G}}amma_H(S\cap P_i)| \ge\sum_{i=1}^k\alpha_i|S\cap P_i| \ge\alpha_1|S|$$ Note that the statement is true with strict inequality when $S\not\subseteq P_1$. On the other hand, the expansion of $P_1$ in $G'$ is exactly $\alpha_1$, as ${\mathcal{G}}amma'(P_1)=Q_1$. This is because $E^+$ cannot have any edge between $P_1$ and $Q\setminus Q_1$.\\ We thus have proven that the first block in the decomposition of $G'$ is $(P_1, \, Q_1, \, \alpha_1)$. One can then proceed by induction on $i$ to prove that the same is true for the $i^\text{th}$ block. The argument is identical to the base case by observing that since $E^+$ cannot have edges between $P_i$ and $\bigcup_{j=i+1}^kQ_j$, it does not increase the expansion of $P_i$. \epsilonnd{proof} \section{Main Result} \label{section:main} Having defined the matching skeleton coreset, we now prove a lower bound of nearly $1/2$ on its effectiveness. This improves upon any known lower bound for a randomized composable coreset of size $O(n)$ for the maximum matching problem. \begin{theorem} $\mathbf MatchingSkeleton(G)$ constitutes a $\mathopen{}\mathclose\bgroup\originalleft(1/2-o(1)\aftergroup\egroup\originalright)$-approximate randomized composable coreset for maximum matching in any bipartite graph $G = (P, \, Q, \, E)$ where $k=\omega(1)$ and the maximum matching size $\mm{G}=\omega(k\log n)$. \label{theorem:halfapprox} \epsilonnd{theorem} \begin{proof} Our analysis is inspired by the maximum matching coreset analysis of ~\cite{DBLP:conf/soda/AssadiBBMS19}, however, we achieve a better approximation ratio using more subtle techniques. Let $\mu$ denote $\mm{G}$. Recall that by the definition of randomized composable coresets (Definition~\ref{definition:coreset}) we must randomly edge-partition $G = (P, \, Q, \, E)$ into $k$ subgraphs $G_1,\ldots,G_k$, and show that the union of each coresets, \begin{equation} \widetilde G=\bigcup_{i=1}^k\mathbf MatchingSkeleton(G_i),\label{equation:main} \epsilonnd{equation} has an expected maximum matching size of $\mu\cdot(1/2-o(1))$, over the randomness of the $k$-partition. We begin by choosing an arbitrary maximum matching $M^*$ of $G$. We separate $G$ in two parts: $M^*$ and $G^-:=G\backslash M^*$ for the purposes of analysis, and for every $i=1,\ldots, k,$ let $G^-_i:=G_i\cap G^-$.\\ We will show the stronger statement that even under {\bf adversarial partitioning} of $G^-$, Equation~\ref{equation:main} holds, as long as $M^*$ is partitioned randomly. From now on we will assume that the partition into $G^-_1,\ldots,G^-_k$ is fixed arbitrarily; we will show that either at least one of $G^-_i$ contains a large matching or $M^*\cap\widetilde G$ is large.\\ Consider an arbitrary $k$-partitioning of $G^-$ into $G^-_1,\ldots,G^-_k$ and let the maximum matching size of $G^-_i$ be $\mu^-_i$. If even one of $\mu^-_i$ is at least $\mu/2$, we are done. Indeed, following Lemma \ref{lemma:canonicalvc}, any matching skeleton of $G_i$ will contain a maximum matching, that is a matching of size $\mm{G_i}\ge\mu^-_i\ge\mu/2$, and hence so will $\widetilde G$. Therefore, we can focus on the case where $\max_{i=1}^k\mu^-_i\le\mu/2$ and use the following lemma, which is our main technical contribution: \begin{lemma}[Main lemma] \label{lemma:main} Consider an arbitrary partitioning of $G^-$ where $\max_{i=1}^k\mu^-_i<\mu/2$. Let $e$ be a uniformly random element of $M^*$. Then $$\mathbf Pr{e\in\widetilde G}\ge1/2-o(1),$$ where the probability is taken over the randomness of the partitioning of $M^*$ as well as the randomness of the choice of $e$. \epsilonnd{lemma} The above lemma relies on a subtle probabilistic argument, and is formulated in terms of a uniformly random edge of $M^*$ for technical reasons. However, an immediate consequence of it is that at least nearly half of the edges of $M^*$ will be taken in $\widetilde G$. This follows by linearity of expectation: $$\mathcal{E}X{\mathopen{}\mathclose\bgroup\originalleft\vert M^*\cap\widetilde G\aftergroup\egroup\originalright\vert} =\mathcal{E}X{\sum_{e\in M^*}\mathbbm1(e\in\widetilde G)} =\sum_{e\in M^*} \mathbf Pr{e\in\widetilde G} \ge\mu\cdot(1/2-o(1)).$$ where the last inequality follows by Lemma~\ref{lemma:main}. We have proven that Equation~\ref{equation:main} holds under adversarial partitioning of $G^-$ both when $\max_{i=1}^k\mu^-_i\ge\mu/2$ and when $\max_{i=1}^k\mu^-_i<\mu/2$, which implies the statement of the theorem. \epsilonnd{proof} We conclude the analysis of the $\mathbf MatchingSkeleton$ coreset by proving Lemma~\ref{lemma:main}. \begin{proofof}{Lemma~\ref{lemma:main}} Without loss of generality we may assume that $e\in G_1$. We know that the maximum matching size of $G^-_1$ is at most $\mu/2$. Consider now adding to $G^-_1$ all edges of $M^*\cap G_1$ {\it except for} $e$. Since the size of $M^*\cap G_1\backslash\{e\}$ is at most $2\mu/k$ with high probability by Theorem~\ref{theorem:chernoff}, the maximum matching size does not increase by more than $2\mu/k$.\\ We base our analysis on fixing the outcome of the random graph $G_1\backslash\{e\}$ to be some fixed $H$. We refer to this event that $G_1\backslash\{e\}=H$ as $\mathcal E(H)$. Suppose that indeed the maximum matching size of $H$ is at most $\mu\cdot(1/2+2/k)$, and hence that $H$ has a canonical vertex cover $\mathbf Phi$ of this size. Recall from Definition~\ref{definition:canonicalvc} that the canonical vertex cover contains exactly the vertices of $Q$ with $\alpha$-value {\it strictly less} than one and the vertices of $P$ with $\alpha$-values {\it at least} one. Therefore, any new edge added to $H$ that is {\it not} adjacent on $\mathbf Phi$ {\it must be} included in any matching skeleton, as we show in the following paragraph.\\ Indeed, consider $e=\{p,\, q\}$ to be such an edge, and suppose that there exists some matching skeleton $H$ of $G_1$ where $e$ is not included. This means, by Lemma~\ref{lemma:robust} with $E^-=\{e\}$ and $E^+=\epsilonmptyset$ that the block decompositions of $G_1$ and $H$ are identical. However, by definition of the canonical vertex cover $\mathbf Phi$ for $H$ and because $p,q \notin \mathbf Phi$, we have $\alpha_H(p) < 1 \leq \alpha_H(q)$. This implies that in the block partition of $G_1$, $p \in P_i$ and $q \in Q_j$ with $i < j$, which is a contradiction of Lemma~\ref{lemma:blockproperties}.\\ Consequently, if $e$ is not adjacent on $\mathbf Phi$, it must be taken into $\mathbf MatchingSkeleton(G_1)$. The last important observation is that the distribution of $e$, when conditioned on $\mathcal E(H)$, is uniform on $M^*\backslash H$. Indeed, this conditioning in no way breaks the symmetry between the unsampled edges $M^*\backslash H$, and $e$ is equally likely to be any of them. Therefore, $e$ is uniformly distributed among at least $\mu\cdot(1-2/k)$ edges among which at most $\mu\cdot(1/2+2/k)$ are adjacent on $\mathbf Phi$: Conditioned on $\mathcal E(H)$, where $\mm{H}\le\mu\cdot(1/2+2/k)$, the probability that $e$ is not adjacent on $\mathbf Phi$ and therefore $e\in\mathbf MatchingSkeleton(G_1)$ is at least $1/2-o(1)$.\\ The above deduction was made with the assumption that $\mm{H}\le\mu\cdot(1/2+2/k)$. However, recall that this happens with high probability by Theorem~\ref{theorem:chernoff}, therefore we can extend the result to full generality. Consider the possible outcomes of $G_1\backslash\{e\}$ to form the family $\mathcal H$. We can split $\mathcal H$ into the disjoint union of $\mathcal H_0$ and $\mathcal H^*$, where $\mathcal H^*$ comprises the anomalous outcomes where the maximum matching size of $H$ is greater than $\mu\cdot(1/2+2/k)$. Then, \begin{align*} \mathbf Pr{e\in \mathbf MatchingSkeleton(G_1)} &= \sum_{H\in\mathcal H} \mathbf Pr{\mathcal E(H)}\mathbf Pr{e\in\mathbf MatchingSkeleton(G_1)|\mathcal E(H)}\\ &\ge \sum_{H\in\mathcal H_0} \mathbf Pr{\mathcal E(H)} \mathbf Pr{e\in\mathbf MatchingSkeleton(G_1)|\mathcal E(H)}\\ &\ge \sum_{H\in\mathcal H_0} \mathbf Pr{\mathcal E(H)}\cdot(1/2-o(1))\\ &=\mathbf Pr{G_1\backslash\{e\}\not\in\mathcal H^*}\cdot(1/2-o(1))\\ &\ge 1/2-o(1), \epsilonnd{align*} as desired. \epsilonnd{proofof} \begin{figure}[ht] \vskip 0.2in \begin{center} \scalebox{0.9}{ \begin{tikzpicture} \coordinate (aca) at (0, 0); \coordinate (acb) at (0.75, 0); \coordinate (acc) at (1.5, 0); \coordinate (acd) at (2.25, 0); \coordinate (ace) at (3, 0); \coordinate (acf) at (3.75, 0); \coordinate (acg) at (4.5, 0); \coordinate (asa) at (1.125, 2); \coordinate (asb) at (1.875, 2); \coordinate (asc) at (2.625, 2); \coordinate (asd) at (3.375, 2); \coordinate (bca) at (6, 0); \coordinate (bcb) at (6.75, 0); \coordinate (bcc) at (7.5, 0); \coordinate (bsa) at (6.375, 2); \coordinate (bsb) at (7.125, 2); \coordinate (cca) at (9.375, 0); \coordinate (ccb) at (10.125, 0); \coordinate (ccc) at (10.875, 0); \coordinate (csa) at (9, 2); \coordinate (csb) at (9.75, 2); \coordinate (csc) at (10.5, 2); \coordinate (csd) at (11.25, 2); \draw[fill=red!10, rounded corners] (-0.6, -0.3) -- (5.1, -0.3) -- (3.675, 2.3) -- (0.825, 2.3)-- cycle; \draw[fill=red!10, rounded corners] (5.4, -0.3) -- (8.1, -0.3) -- (7.425, 2.3) -- (6.075, 2.3)-- cycle; \draw[fill=red!10, rounded corners] (9.075, -0.3) -- (11.175, -0.3) -- (11.85, 2.3) -- (8.4, 2.3)-- cycle; \path[draw=black, thick] (acc) -- (csa); \path[draw=black, thick] (ace) -- (csb); \path[draw=black, thick] (bcb) -- (csc); \path[draw=black, thick] (bcc) -- (csd); \path[draw=black, thick, dotted] (aca) -- (asa); \path[draw=black, thick, dotted] (bca) -- (asc); \path[draw=black, thick, dotted] (cca) -- (asd); \path[draw=black, thick, dotted] (ccb) -- (bsa); \path[draw=black, thick, dotted] (ccc) -- (bsb); \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (aca) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (acb) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (acc) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (acd) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (ace) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (acf) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (acg) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (asa) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (asb) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (asc) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (asd) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (bca) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (bcb) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (bcc) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (bsa) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (bsb) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (cca) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (ccb) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (ccc) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (csa) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (csb) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (csc) {}; \node[circle, thick, draw, fill=gray, minimum size=6, inner sep=0pt, outer sep=0pt] at (csd) {}; \draw[decoration={brace}, decorate, thick] (0.825, 2.4) -- node [above, pos=0.5] {$\mathbf Phi$} (3.675, 2.4); \draw[decoration={brace}, decorate, thick] (6.075, 2.4) -- node [above, pos=0.5] {$\mathbf Phi$} (7.425, 2.4); \draw[decoration={brace, mirror}, decorate, thick] (9.075, -0.4) -- node [below, pos=0.5] {$\mathbf Phi$} (11.175, -0.4); \node (t1) at (2.25, -0.6) {$\alpha = 1.75$}; \node (t2) at (6.75, -0.6) {$\alpha = 1.5$}; \node (t3) at (10.125, 2.6) {$\alpha = 0.75$}; \node at (-1, 0) {$Q$}; \node at (-1, 2) {$P$}; \epsilonnd{tikzpicture}} \caption{A visual representation of the block decomposition of $H$ as trapezoids together with edges of $M^\star \setminus H$. If $e$ turns out to be one of the solid edges it {\it must} be taken into $\mathbf MatchingSkeleton(G_1)$; however, if it is one of the dotted edges, it might not be.} \label{figure:mainproof} \epsilonnd{center} \vskip -0.2in \epsilonnd{figure} \section{Limitations of the matching skeleton coreset}\label{section:limitations} In this section we show the limits of any matching skeleton as a randomized composable coreset for maximum matching by constructing a pathological bipartite graph on which it only preserves the maximum matching size to a factor of $2/3$. \begin{theorem} \label{theorem:main-upper} For large enough $n$ and $k$ such that $k=O(n/\log n)$, $k=\omega(1)$, there exists a bipartite graph $G$ on $n$ vertices with maximum matching size $\mu$, for which the maximum matching size of $$\widetilde G=\bigcup_{i=1}^k\mathbf MatchingSkeleton(G_i)$$ is at most $\mu\cdot(2/3+o(1))$ with high probability. \epsilonnd{theorem} \begin{remark} Note that here the high probability is over the randomness of the partition. The choice of the matching skeleton is considered to be adversarial in each subgraph, among the multiple possible valid choices. \epsilonnd{remark} We begin by defining the graph $G = (P, \, Q, \, E)$. The construction follows the ideas used in\cite{DBLP:conf/soda/AssadiBBMS19} to prove an upper bound on the performance of the maximum matching coreset. Let the vertex-set of $G$ consist of six parts: $P_1$, $P_2$, and $P_3$ make up $P$ on one side of the bipartition and $Q_1$, $Q_2$, and $Q_3$ make up $Q$ on the other side. Let the sizes of $P_1$, $P_2$, $Q_2$, and $Q_3$ be $r$, and let the sizes of $Q_1$ and $P_3$ be $r+2r/k$, where $r$ is some parameter such that $6r+4r/k=n$. The edge-set $E$ is comprised of the following: \begin{itemize} \item A perfect matching between all of $P_1$ and a subset of $Q_1$, \item a complete bipartite graph between $Q_1$ and $P_2$, \item a perfect matching between $P_2$ and $Q_2$, \item a complete bipartite graph between $Q_2$ and $P_3$, \item and a perfect matching between a subset of $P_3$ and all of $Q_3$. \epsilonnd{itemize} The graph is pictured in Figure \ref{figure:zzgraph}. The analysis of the behavior of $\mathbf MatchingSkeleton$ on this graph relies on the observation that in a typical subsampled version, $P_1\cup Q_1\cup P_2$ forms a region of $\alpha$-value at least $1$ while $Q_2\cup P_3\cup Q_3$ forms a region of $\alpha$-value at most $1$. This means that the edges sampled between $P_2$ and $Q_2$ need not be taken into the matching skeleton, which further implies that $\widetilde G$ can be missing the entire $(P_2,Q_2)$ matching.\\ In order to prove this we will need the following basic property of expansion levels. One side of the lemma has been previously shown in \cite{DBLP:conf/soda/BernsteinHR18}. \begin{lemma} \label{lemma:upper-lemma} Consider a bipartite graph $G = (P, \, Q, \, E)$. \begin{itemize} \item If $P$ can be perfectly matched to $Q$, then $\min\alpha\ge1$. \item Conversely, if $Q$ can be perfectly matched to $P$, then $\max\alpha\le1$. \epsilonnd{itemize} \epsilonnd{lemma} \begin{proof} By optimality of the canonical vertex cover (Lemma~\ref{lemma:canonicalvc}), and by Theorem~\ref{thm:vc=mm} we have that the size of the maximum matching is $$\sum_{i=1}^k\begin{cases}|P_i|\text{ if $\alpha_i\ge1$}\\|Q_i|\text{ if $\alpha_i < 1$}\epsilonnd{cases}.$$ In the first case of the lemma, $\mm{G}=|P|=\sum_{i=1}^k|P_i|$, therefore $\alpha_i$ must always be at least $1$. In the second case, $\mm{G}=|Q|=\sum_{i=1}^k|Q_i|$, therefore $\alpha_i$ must always be at most $1$. \epsilonnd{proof} Finally, we state a result on perfect matchings in random bipartite graphs. This is a simplification, and direct result of Corollary 7.13 from \cite{PERFECT_MATCHING_IN_RANDOM}. \begin{theorem} \label{theorem:BB} Let $H$ be a random bipartite graph on $n+n$ vertices, where each of the $n^2$ possible edges appears independently with probability $p=\Omega(\log n/n)$. Then $H$ contains a perfect matching with high probability. \epsilonnd{theorem} We are ready to prove Theorem~\ref{theorem:main-upper}. \begin{proofof}{Theorem~\ref{theorem:main-upper}} Consider $G_i=(P,\, Q,\, E_i)$, the graph $G$ sub-sampled at rate $1/k$. We claim that with high probability the non-isolated vertices of $P_1\cup P_2$ can be perfectly matched to $Q_1$. Indeed, out of $r$ edges of $P_1\times Q_1$, we expect $r/k$ of them to appear in $G_i$ and with high probability no more than $2r/k$ do (see Theorem~\ref{theorem:chernoff}). In this case, at least $r$ unmatched vertices of $Q_1$ remain, which we will call $Q_1'$. Note that the graph between $Q_1'$ and $P_2$ follows the same distribution as the random graph described in Theorem~\ref{theorem:BB}, with $p=1/k=\Omega(\log n/n)$. Therefore, $(Q_1'\times P_2)\cap E_i$ contains a perfect matching with high probability.\\ By Lemma~\ref{lemma:upper-lemma}, this means that the subgraph induced by $P_1\cup Q_1\cup P_2$ in $G_i$ has block decomposition with all $\alpha\ge1$. By similar reasoning we can show that the non-isolated vertices of $Q_2\cup Q_3$ can be perfectly matched to $P_3$. Hence, by Lemma~\ref{lemma:upper-lemma}, the induced subgraph of $Q_2\cup P_3\cup Q_3$ in $G_i$ has a block decomposition with all $\alpha\le1$.\\ Simply taking the disjoint union of these two induced subgraphs does not change the expansion levels. Hence the graph $G_i^-$, consisting of all edges of $G_i$ {\it except those between $P_2$ and $Q_2$}, has block decomposition with the $\alpha$ values of $P_1$, $Q_1$, and $P_2$ being at least $1$, and the $\alpha$ values of $Q_2$, $P_3$ and $Q_3$ being at most $1$. Let $H$ be a matching skeleton of $G_i^-$. By applying Lemma~\ref{lemma:robust} with $E^-=\epsilonmptyset$ and $E^+=E_i\cap P_2\times Q_2$, we get that $H$ is still a matching skeleton of $G_i$. Therefore, there exists a matching skeleton of $G_i$ which contains no edges from $P_2\times Q_2$.\\ In conclusion, it is possible that each coreset selects a matching skeleton of its sub-graph containing no edges from $Q_2 \times P_2$. In such case, the maximum matching of $\widetilde{G}$ has size at most $2r + 4r/k$, whereas that of $G$ was $3r$. \epsilonnd{proofof} \begin{remark} With a simple alteration to the proof, it can be shown that this upper bound holds even when the individual matching skeletons are selected arbitrarily. \epsilonnd{remark} \begin{figure}[ht] \vskip 0.2in \begin{center} \scalebox{0.9}{ \begin{tikzpicture} \path[draw, thick] (-0.30, 0) -- (-0.30, -4); \path[draw, thick] (-0.15, 0) -- (-0.15, -4); \path[draw, thick] (0, 0) -- (0, -4); \path[draw, thick] (0.15, 0) -- (0.15, -4); \path[draw, thick] (0.30, 0) -- (0.30, -4); \path[draw, thick] (4.70, 0) -- (4.7, -4); \path[draw, thick] (4.85, 0) -- (4.85, -4); \path[draw, thick] (5, 0) -- (5, -4); \path[draw, thick] (5.15, 0) -- (5.15, -4); \path[draw, thick] (5.30, 0) -- (5.30, -4); \path[draw, thick] (9.70, 0) -- (9.7, -4); \path[draw, thick] (9.85, 0) -- (9.85, -4); \path[draw, thick] (10, 0) -- (10, -4); \path[draw, thick] (10.15, 0) -- (10.15, -4); \path[draw, thick] (10.30, 0) -- (10.30, -4); \draw[fill=gray, thick, rotate around={-40:(2.5, -2)}] (-1,-1.6) rectangle node[rotate=-40] {complete bipartite} (6,-2.4); \draw[fill=gray, thick, rotate around={-40:(7.5, -2)}] (4, -1.6) rectangle node[rotate=-40] {complete bipartite} (11,-2.4); \path[draw, dashed] (-2, -1.4) -- (0.5, -1.4); \path[draw, dashed] (-2, -2.6) -- (0.5, -2.6); \node[rectangle, align=center, minimum width=2, minimum height=1] at (-1.3, -2) {matching\\ of size $n$}; \node[cloud, cloud puffs=13, draw, thick, fill=data_color, minimum width=2cm, minimum height=2cm, align=center] (t1) at (0, 0) {$P_3$ \\ $r+ \frac{2r}{k}$}; \node[cloud, cloud puffs=13, draw, thick, fill=data_color, minimum width=2cm, minimum height=2cm, align=center] (t2) at (5, 0) {$P_2$ \\ $r$}; \node[cloud, cloud puffs=13, draw, thick, fill=data_color, minimum width=2cm, minimum height=2cm, align=center] (t3) at (10, 0) {$P_1$ \\ $r$}; \node[cloud, cloud puffs=13, draw, thick, fill=data_color, minimum width=2cm, minimum height=2cm, align=center] (b1) at (0, -4) {$Q_3$ \\ $r$}; \node[cloud, cloud puffs=13, draw, thick, fill=data_color, minimum width=2cm, minimum height=2cm, align=center] (b2) at (5, -4) {$Q_2$ \\ $r$}; \node[cloud, cloud puffs=13, draw, thick, fill=data_color, minimum width=2cm, minimum height=2cm, align=center] (b3) at (10, -4) {$Q_1$ \\ $r+ \frac{2r}{k}$}; \epsilonnd{tikzpicture} } \caption{The pathological graph. A typical sub-sampling of the graph has a matching skeleton that does not contain any edges of $Q_2 \times P_2$.} \label{figure:zzgraph} \epsilonnd{center} \vskip -0.2in \epsilonnd{figure} \begin{thebibliography}{99} \bibitem{DBLP:conf/soda/AssadiB19} Sepehr Assadi and Aaron Bernstein. \newblock Towards a unified theory of sparsification for matching problems. \newblock In Jeremy~T. Fineman and Michael Mitzenmacher, editors, {\epsilonm 2nd Symposium on Simplicity in Algorithms, SOSA@SODA 2019, January 8-9, 2019 - San Diego, CA, {USA}}, volume~69 of {\epsilonm {OASICS}}, pages 11:1--11:20. Schloss Dagstuhl - Leibniz-Zentrum f{\"{u}}r Informatik, 2019. \bibitem{DBLP:conf/soda/AssadiBBMS19} Sepehr Assadi, MohammadHossein Bateni, Aaron Bernstein, Vahab~S. Mirrokni, and Cliff Stein. \newblock Coresets meet {EDCS:} algorithms for matching and vertex cover on massive graphs. \newblock In Timothy~M. Chan, editor, {\epsilonm Proceedings of the Thirtieth Annual {ACM-SIAM} Symposium on Discrete Algorithms, {SODA} 2019, San Diego, California, USA, January 6-9, 2019}, pages 1616--1635. {SIAM}, 2019. \bibitem{DBLP:conf/icml/AssadiBM19} Sepehr Assadi, MohammadHossein Bateni, and Vahab~S. Mirrokni. \newblock Distributed weighted matching via randomized composable coresets. \newblock In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, {\epsilonm Proceedings of the 36th International Conference on Machine Learning, {ICML} 2019, 9-15 June 2019, Long Beach, California, {USA}}, volume~97 of {\epsilonm Proceedings of Machine Learning Research}, pages 333--343. {PMLR}, 2019. \bibitem{DBLP:conf/soda/AhnGM12} Kook~Jin Ahn, Sudipto Guha, and Andrew McGregor. \newblock Analyzing graph structure via linear measurements. \newblock In Yuval Rabani, editor, {\epsilonm Proceedings of the Twenty-Third Annual {ACM-SIAM} Symposium on Discrete Algorithms, {SODA} 2012, Kyoto, Japan, January 17-19, 2012}, pages 459--467. {SIAM}, 2012. \bibitem{DBLP:conf/pods/AhnGM12} Kook~Jin Ahn, Sudipto Guha, and Andrew McGregor. \newblock Graph sketches: sparsification, spanners, and subgraphs. \newblock In Michael Benedikt, Markus Kr{\"{o}}tzsch, and Maurizio Lenzerini, editors, {\epsilonm Proceedings of the 31st {ACM} {SIGMOD-SIGACT-SIGART} Symposium on Principles of Database Systems, {PODS} 2012, Scottsdale, AZ, USA, May 20-24, 2012}, pages 5--14. {ACM}, 2012. \bibitem{DBLP:conf/spaa/AssadiK17} Sepehr Assadi and Sanjeev Khanna. \newblock Randomized composable coresets for matching and vertex cover. \newblock In Christian Scheideler and Mohammad~Taghi Hajiaghayi, editors, {\epsilonm Proceedings of the 29th {ACM} Symposium on Parallelism in Algorithms and Architectures, {SPAA} 2017, Washington DC, USA, July 24-26, 2017}, pages 3--12. {ACM}, 2017. \bibitem{DBLP:conf/soda/AssadiKLY16} Sepehr Assadi, Sanjeev Khanna, Yang Li, and Grigory Yaroslavtsev. \newblock Maximum matchings in dynamic graph streams and the simultaneous communication model. \newblock In Robert Krauthgamer, editor, {\epsilonm Proceedings of the Twenty-Seventh Annual {ACM-SIAM} Symposium on Discrete Algorithms, {SODA} 2016, Arlington, VA, USA, January 10-12, 2016}, pages 1345--1364. {SIAM}, 2016. \bibitem{DBLP:books/daglib/0021015} Noga Alon and Joel~H. Spencer. \newblock {\epsilonm The Probabilistic Method, Third Edition}. \newblock Wiley-Interscience series in discrete mathematics and optimization. Wiley, 2008. \bibitem{DBLP:conf/nips/BateniBLM14} MohammadHossein Bateni, Aditya Bhaskara, Silvio Lattanzi, and Vahab~S. Mirrokni. \newblock Distributed balanced clustering via mapping coresets. \newblock In Zoubin Ghahramani, Max Welling, Corinna Cortes, Neil~D. Lawrence, and Kilian~Q. Weinberger, editors, {\epsilonm Advances in Neural Information Processing Systems 27: Annual Conference on Neural Information Processing Systems 2014, December 8-13 2014, Montreal, Quebec, Canada}, pages 2591--2599, 2014. \bibitem{DBLP:conf/nips/BalcanEL13} Maria{-}Florina Balcan, Steven Ehrlich, and Yingyu Liang. \newblock Distributed k-means and k-median clustering on general communication topologies. \newblock In Christopher J.~C. Burges, L{\'{e}}on Bottou, Zoubin Ghahramani, and Kilian~Q. Weinberger, editors, {\epsilonm Advances in Neural Information Processing Systems 26: 27th Annual Conference on Neural Information Processing Systems 2013. Proceedings of a meeting held December 5-8, 2013, Lake Tahoe, Nevada, United States}, pages 1995--2003, 2013. \bibitem{PERFECT_MATCHING_IN_RANDOM} B\'ela Bollob\'as and Alan~M. Frieze. \newblock On matchings and hamiltonian cycles in random graphs. \newblock In Michael, Karo\'nski and Andrzej Ruci\'nski, editors, {\epsilonm Random Graphs '83}, volume 118 of {\epsilonm North-Holland Mathematics Studies}, pages 23 -- 46. North-Holland, 1985. \bibitem{DBLP:journals/algorithmica/BulteauFKP16} Laurent Bulteau, Vincent Froese, Konstantin Kutzkov, and Rasmus Pagh. \newblock Triangle counting in dynamic graph streams. \newblock {\epsilonm Algorithmica}, 76(1):259--278, 2016. \bibitem{DBLP:conf/stoc/BhattacharyaHNT15} Sayan Bhattacharya, Monika Henzinger, Danupon Nanongkai, and Charalampos~E. Tsourakakis. \newblock Space- and time-efficient algorithm for maintaining dense subgraphs on one-pass dynamic streams. \newblock In Rocco~A. Servedio and Ronitt Rubinfeld, editors, {\epsilonm Proceedings of the Forty-Seventh Annual {ACM} on Symposium on Theory of Computing, {STOC} 2015, Portland, OR, USA, June 14-17, 2015}, pages 173--182. {ACM}, 2015. \bibitem{DBLP:conf/soda/BernsteinHR18} Aaron Bernstein, Jacob Holm, and Eva Rotenberg. \newblock Online bipartite matching with amortized replacements. \newblock In Artur Czumaj, editor, {\epsilonm Proceedings of the Twenty-Ninth Annual {ACM-SIAM} Symposium on Discrete Algorithms, {SODA} 2018, New Orleans, LA, USA, January 7-10, 2018}, pages 947--959. {SIAM}, 2018. \bibitem{DBLP:conf/kdd/BadanidiyuruMKK14} Ashwinkumar Badanidiyuru, Baharan Mirzasoleiman, Amin Karbasi, and Andreas Krause. \newblock Streaming submodular maximization: massive data summarization on the fly. \newblock In Sofus~A. Macskassy, Claudia Perlich, Jure Leskovec, Wei Wang, and Rayid Ghani, editors, {\epsilonm The 20th {ACM} {SIGKDD} International Conference on Knowledge Discovery and Data Mining, {KDD} '14, New York, NY, {USA} - August 24 - 27, 2014}, pages 671--680. {ACM}, 2014. \bibitem{DBLP:journals/corr/ChitnisCEHMMV15} Rajesh~Hemant Chitnis, Graham Cormode, Hossein Esfandiari, MohammadTaghi Hajiaghayi, Andrew McGregor, Morteza Monemizadeh, and Sofya Vorotnikova. \newblock Kernelization via sampling with applications to dynamic graph streams. \newblock {\epsilonm CoRR}, abs/1505.01731, 2015. \bibitem{DBLP:conf/soda/ChitnisCEHMMV16} Rajesh Chitnis, Graham Cormode, Hossein Esfandiari, MohammadTaghi Hajiaghayi, Andrew McGregor, Morteza Monemizadeh, and Sofya Vorotnikova. \newblock Kernelization via sampling with applications to finding matchings and related problems in dynamic graph streams. \newblock In Robert Krauthgamer, editor, {\epsilonm Proceedings of the Twenty-Seventh Annual {ACM-SIAM} Symposium on Discrete Algorithms, {SODA} 2016, Arlington, VA, USA, January 10-12, 2016}, pages 1326--1344. {SIAM}, 2016. \bibitem{DBLP:conf/stoc/CzumajLMMOS18} Artur Czumaj, Jakub Lacki, Aleksander Madry, Slobodan Mitrovic, Krzysztof Onak, and Piotr Sankowski. \newblock Round compression for parallel matching algorithms. \newblock In Ilias Diakonikolas, David Kempe, and Monika Henzinger, editors, {\epsilonm Proceedings of the 50th Annual {ACM} {SIGACT} Symposium on Theory of Computing, {STOC} 2018, Los Angeles, CA, USA, June 25-29, 2018}, pages 471--484. {ACM}, 2018. \bibitem{DBLP:journals/corr/abs-2007-14204} Arnold Filtser, Michael Kapralov, and Navid Nouri. \newblock Graph spanners by sketching in dynamic streams and the simultaneous communication model. \newblock {\epsilonm CoRR}, abs/2007.14204, 2020. \bibitem{DBLP:conf/soda/GoelKK12} Ashish Goel, Michael Kapralov, and Sanjeev Khanna. \newblock On the communication and streaming complexity of maximum bipartite matching. \newblock In Yuval Rabani, editor, {\epsilonm Proceedings of the Twenty-Third Annual {ACM-SIAM} Symposium on Discrete Algorithms, {SODA} 2012, Kyoto, Japan, January 17-19, 2012}, pages 468--485. {SIAM}, 2012. \bibitem{DBLP:conf/spaa/HarveyLL18} Nicholas J.~A. Harvey, Christopher Liaw, and Paul Liu. \newblock Greedy and local ratio algorithms in the mapreduce model. \newblock In Christian Scheideler and Jeremy~T. Fineman, editors, {\epsilonm Proceedings of the 30th on Symposium on Parallelism in Algorithms and Architectures, {SPAA} 2018, Vienna, Austria, July 16-18, 2018}, pages 43--52. {ACM}, 2018. \bibitem{DBLP:conf/pods/IndykMMM14} Piotr Indyk, Sepideh Mahabadi, Mohammad Mahdian, and Vahab~S. Mirrokni. \newblock Composable core-sets for diversity and coverage maximization. \newblock In Richard Hull and Martin Grohe, editors, {\epsilonm Proceedings of the 33rd {ACM} {SIGMOD-SIGACT-SIGART} Symposium on Principles of Database Systems, PODS'14, Snowbird, UT, USA, June 22-27, 2014}, pages 100--108. {ACM}, 2014. \bibitem{DBLP:conf/focs/KapralovLMMS14} Michael Kapralov, Yin~Tat Lee, Cameron Musco, Christopher Musco, and Aaron Sidford. \newblock Single pass spectral sparsification in dynamic streams. \newblock In {\epsilonm 55th {IEEE} Annual Symposium on Foundations of Computer Science, {FOCS} 2014, Philadelphia, PA, USA, October 18-21, 2014}, pages 561--570. {IEEE} Computer Society, 2014. \bibitem{DBLP:conf/approx/KonradMM12} Christian Konrad, Fr{\'{e}}d{\'{e}}ric Magniez, and Claire Mathieu. \newblock Maximum matching in semi-streaming with few passes. \newblock In Anupam Gupta, Klaus Jansen, Jos{\'{e}} D.~P. Rolim, and Rocco~A. Servedio, editors, {\epsilonm Approximation, Randomization, and Combinatorial Optimization. Algorithms and Techniques - 15th International Workshop, {APPROX} 2012, and 16th International Workshop, {RANDOM} 2012, Cambridge, MA, USA, August 15-17, 2012. Proceedings}, volume 7408 of {\epsilonm Lecture Notes in Computer Science}, pages 231--242. Springer, 2012. \bibitem{DBLP:conf/esa/Konrad15} Christian Konrad. \newblock Maximum matching in turnstile streams. \newblock In Nikhil Bansal and Irene Finocchi, editors, {\epsilonm Algorithms - {ESA} 2015 - 23rd Annual European Symposium, Patras, Greece, September 14-16, 2015, Proceedings}, volume 9294 of {\epsilonm Lecture Notes in Computer Science}, pages 840--852. Springer, 2015. \bibitem{DBLP:conf/podc/KapralovW14} Michael Kapralov and David~P. Woodruff. \newblock Spanners and sparsifiers in dynamic streams. \newblock In Magn{\'{u}}s~M. Halld{\'{o}}rsson and Shlomi Dolev, editors, {\epsilonm {ACM} Symposium on Principles of Distributed Computing, {PODC} '14, Paris, France, July 15-18, 2014}, pages 272--281. {ACM}, 2014. \bibitem{DBLP:conf/spaa/LattanziMSV11} Silvio Lattanzi, Benjamin Moseley, Siddharth Suri, and Sergei Vassilvitskii. \newblock Filtering: a method for solving graph problems in mapreduce. \newblock In Rajmohan Rajaraman and Friedhelm~Meyer auf~der Heide, editors, {\epsilonm {SPAA} 2011: Proceedings of the 23rd Annual {ACM} Symposium on Parallelism in Algorithms and Architectures, San Jose, CA, USA, June 4-6, 2011 (Co-located with {FCRC} 2011)}, pages 85--94. {ACM}, 2011. \bibitem{DBLP:conf/approx/McGregor05} Andrew McGregor. \newblock Finding graph matchings in data streams. \newblock In Chandra Chekuri, Klaus Jansen, Jos{\'{e}} D.~P. Rolim, and Luca Trevisan, editors, {\epsilonm Approximation, Randomization and Combinatorial Optimization, Algorithms and Techniques, 8th International Workshop on Approximation Algorithms for Combinatorial Optimization Problems, {APPROX} 2005 and 9th InternationalWorkshop on Randomization and Computation, {RANDOM} 2005, Berkeley, CA, USA, August 22-24, 2005, Proceedings}, volume 3624 of {\epsilonm Lecture Notes in Computer Science}, pages 170--181. Springer, 2005. \bibitem{DBLP:conf/nips/MirzasoleimanKSK13} Baharan Mirzasoleiman, Amin Karbasi, Rik Sarkar, and Andreas Krause. \newblock Distributed submodular maximization: Identifying representative elements in massive data. \newblock In Christopher J.~C. Burges, L{\'{e}}on Bottou, Zoubin Ghahramani, and Kilian~Q. Weinberger, editors, {\epsilonm Advances in Neural Information Processing Systems 26: 27th Annual Conference on Neural Information Processing Systems 2013. Proceedings of a meeting held December 5-8, 2013, Lake Tahoe, Nevada, United States}, pages 2049--2057, 2013. \bibitem{DBLP:conf/mfcs/McGregorTVV15} Andrew McGregor, David Tench, Sofya Vorotnikova, and Hoa~T. Vu. \newblock Densest subgraph in dynamic graph streams. \newblock In Giuseppe~F. Italiano, Giovanni Pighizzini, and Donald Sannella, editors, {\epsilonm Mathematical Foundations of Computer Science 2015 - 40th International Symposium, {MFCS} 2015, Milan, Italy, August 24-28, 2015, Proceedings, Part {II}}, volume 9235 of {\epsilonm Lecture Notes in Computer Science}, pages 472--482. Springer, 2015. \bibitem{DBLP:conf/stoc/MirrokniZ15} Vahab~S. Mirrokni and Morteza Zadimoghaddam. \newblock Randomized composable core-sets for distributed submodular maximization. \newblock In Rocco~A. Servedio and Ronitt Rubinfeld, editors, {\epsilonm Proceedings of the Forty-Seventh Annual {ACM} on Symposium on Theory of Computing, {STOC} 2015, Portland, OR, USA, June 14-17, 2015}, pages 153--162. {ACM}, 2015. \bibitem{DBLP:journals/talg/PazS19} Ami Paz and Gregory Schwartzman. \newblock A (2+$\epsilonpsilon$)-approximation for maximum weight matching in the semi-streaming model. \newblock {\epsilonm {ACM} Trans. Algorithms}, 15(2):18:1--18:15, 2019. \epsilonnd{thebibliography} \epsilonnd{document}
\begin{document} \title[Provability logic: models within models in Peano Arithmetic]{Provability logic: models within models in Peano Arithmetic } \author{Alessandro Berarducci and Marcello Mamino} \thanks{Partially supported by the Italian research project PRIN 2017, ``Mathematical logic: models, sets, computability'', Prot. 2017NWTM8RPRIN} \address{Dipartimento di Matematica, Università di Pisa, Largo Bruno Pontecorvo 5, 56127 Pisa, Italy} \email{[email protected]} \address{Dipartimento di Matematica, Università di Pisa, Largo Bruno Pontecorvo 5, 56127 Pisa, Italy} \email{[email protected]} \keywords{Provability logic, Peano Arithmetic. Incompleteness theorems. Modal logic} \begin{abstract}In 1994 Jech gave a model-theoretic proof of G\"odel's second incompleteness theorem for Zermelo-Fraenkel set theory in the following form: $\mathbb{Z}F$ does not prove that $\mathbb{Z}F$ has a model. Kotlarski showed that Jech's proof can be adapted to Peano Arithmetic with the role of models being taken by complete consistent extensions. In this note we take another step in the direction of replacing proof-theoretic by model-theoretic arguments. We show, without the need of formalizing the proof of the completeness theorem within $\PA$, that the existence of a model of $\PA$ of complexity $$\mathsection$igma^0_2$ is independent of $\PA$, where a model is identified with the set of formulas with parameters which hold in the model. Our approach is based on a new interpretation of the provability logic of Peano Arithmetic where $\Box \phi$ is defined as the formalization of ``$\phi$ is true in every $$\mathsection$igma^0_2$-model''. \end{abstract} \maketitle \begin{minipage}{\textwidth} \tableofcontents \end{minipage} \vskip2\baselineskip \section{Introduction} The precise statement of G\"odel's second incompleteness theorem, informally that $\PA$ cannot prove its own consistency, depends upon the choice of an arithmetization of the sentence ``$\PA$ is consistent''. G\"odel, sketching the proof in his seminal 1930 paper~\cite{Godel31}, elected to formalize consistency as syntactic consistency. This is by no means the only reasonable choice, as demonstrated by Thomas Jech's remarkably short proof~\cite{Jech1994} of a model theorethic version of the theorem for~$\mathbb{Z}F$. Namely that $\mathbb{Z}F$ cannot prove that~$\mathbb{Z}F$ has a model. For arithmetic, Jech shows how to transfer his $\mathbb{Z}F$ argument to~$\PA$ by means of a conservativity result~\cite[Remark 2]{Jech1994}. Then, work by Kotlarski adapts Jech's technique~\cite[$\mathsection$ 3.7]{Kotlarski2019} to obtain a direct proof: the idea is to replace models with complete theories and use the Hilbert-Bernays arithmetized completeness theorem. In this note, we take another step in the direction of replacing proof-theoretic by model-theoretic arguments: we will intend consistency to mean that $\PA$ has models of arithmetic complexity $$\mathsection$igma^0_2$. Taking advantage of the fact that $\PA$ has partial truth predicates for formulas of bounded complexity, we show that the existence of a model of $\PA$ of complexity $$\mathsection$igma^0_2$ is independent of $\PA$ (Theorem \ref{thm:main}), where a model is identified with the set of formulas with parameters which are true in the model. The presence of parameters is what makes it possible to express Tarski's truth conditions and do away with the arithmetized completeness theorem, as well as any formalized notion of syntactic consistency. For the reader that might be interested in comparing our approach to other proofs of G\"odel's incompleteness theorems, we may suggest~\cite{Kotlarski2004,Kotlarski2019,Kaye1991}. In our approach, we first define a $\Pi^0_3$~predicate $\fmod(x)$ expressing the fact that $x$ is a code for a $$\mathsection$igma^0_2$-model of $\PA$. We then consider an arithmetical interpretation of modal logic where $\Box \phi$ formalizes the fact that the formula $\phi$ holds in every $$\mathsection$igma^0_2$-model of $\PA$. The formula $\Box \phi$ is in fact provably equivalent to the $$\mathsection$igma^0_1$ formalization of the provability predicate ``$\PA\vdash \phi$'', but since in our formalization we want to avoid the syntactic notion of provability, we are not going to use this fact. Thus, on the face of it, $\Box \phi$ has complexity $\Pi^0_4$. Under our interpretation of the modal operator, $\lnot \Box \perp$ says that there is a $$\mathsection$igma^0_2$-model of $\PA$, and we will prove that this statement is independent of $\PA$ reasoning as follows. The crucial step is to verify L\"ob's derivability conditions \cite{Lob1995} for our intepretation of the modal operator $\Box$, i.e.\ we need to prove: \begin{enumerate} \item $\PA \vdash \phi \implies \PA \vdash \Box \phi$ \item $\PA \vdash \Box \phi \to \Box \Box \phi$ \item $\PA \vdash \Box (\phi \to \psi) \to (\Box \phi \to \Box \psi)$ \end{enumerate} Here and throughout the paper we write $\PA\vdash \theta$ to mean that $\theta$ is true in any model of $\PA$ (and we write $M\models T$ to mean that $M$ is a model of $T$). The modal counterparts of 1.--3. form the basis of the so called ``provability logic'' \cite{Solovay1976,Boolos1994}. From 1.--3. and the fixed point theorem one can derive $\PA\vdash \Box(\Box \phi\to \phi) \to \Box \phi$, whose modal counterpart is also an axiom of provability logic, see for instance \cite{Verbrugge2017}. Under our interpretation, the proof of 3. is straightforward. To prove 1. suppose there is a model $X$ of $\PA$ where $\Box \phi$ fails. We need to find a model $Z\models \PA$ where $\phi$ fails. We can assume that $X$ is countable and has domain $\mathbb N$. By definition there is $y\in X$ such that $X \models \fmod(y)$ and $X\models \text{``}y\models \lnot \phi\text{''}$, namely $X$ thinks that $y$ is a code of a $$\mathsection$igma^0_2$-model where $\phi$ fails. Given $X$ and $y$ we are able to construct a model $Z\models \PA$ (with domain $\mathbb N$) which satisfies exactly those formulas with parameters $\varphi[s]$ such that $X \models \text{``}y \models \varphi[s]\text{''}$. In particular $Z\models \lnot \phi$, thus concluding the proof of 1. Point 2. is the aritmetization of 1., namely we show that there is a function $x,y\mapsto {}^xy$ (of complexity $\Pi^0_3$) which maps, provably in $\PA$, a code $x$ of a $$\mathsection$igma^0_2$-model $X$ and a $y$ such that $X\models \fmod(y)$, into a code of a $$\mathsection$igma^0_2$-model $Z$ as above (the most delicate part is the mechanism to handle non-standard formulas with a non-standard number of parameters). Granted the derivability conditions, we obtain the unprovability of $\lnot \Box \perp$ by standard methods: we define $G$ such that $\PA\vdash G \leftrightarrow \lnot \Box G$ we show that $G$ is unprovable and equivalent to $\lnot \Box \perp$. Finally, we show \begin{enumerate} \item[4.] $\mathbb N \models \Box \phi \implies \PA \vdash \phi$ \end{enumerate} (the opposite direction follows from 1.) and we deduce that the negation of $G$ is also unprovable, hence $\lnot \Box \perp$ is independent of $\PA$. This means that the existence of a model of complexity $$\mathsection$igma^0_2$ is independent of $\PA$. For the proof of 4. suppose that $\PA\not\vdash \phi$. Then there is a $$\mathsection$igma^0_2$-model $M$ of $\PA$ where $\phi$ fails (for a model-theoretic proof of this fact see Fact \ref{fact:kleene}). A code $m\in \mathbb N$ of $M$ withnesses the fact that $\mathbb N\not\models \Box \phi$. \section{Primitive recursive functions} \label{sect:natural} The language of $\PA$ has function symbols $0,S,+,\cdot$ for zero, successor, addition, and multiplication. The axioms of $\PA$ are those of Robinson's arithmetic $\theory Q$ plus the first-order induction scheme. The standard model of $\PA$ is the set $\mathbb N$ of natural numbers with the usual interpretation of the symbols. If $t$ is a closed term of $\PA$ and $M$ is a model of $\PA$, let $t^M\in M$ be the value of $t$ in $M$. If $n\in \mathbb N$, let $\ov n = S^n(0)$ be the numeral for $n$. In the standard model $\mathbb N$ the value of $\ov n$ is $n$. If $f:\mathbb N\to \mathbb N$ is a primitive recursive function then (using G\"odel's $\beta$-function) $f$ can be represented by a $$\mathsection$igma^0_1$-formula $\psi(x,y)$ of $\PA$ in such a way that, forall $m,n\in \mathbb N$ we have: \begin{enumerate} \item $f(m) = n \implies \PA \vdash \psi(\ov m, \ov n)$ \item $f(m) \neq n \implies \PA \vdash \lnot \psi(\ov m, \ov n)$ \item $\PA \vdash \forall x \exists ! y \psi(x,y)$ \end{enumerate} and similarly for $n$-ary functions. In the above situation we shall often write $f(x) = y$ as shorthand for the formula $\psi(x,y)$. Given a model $M$ of $\PA$, with our notational conventions, we have $$f(m) = n \iff M \models f(\ov m) = \ov n.$$ We recall that an element of $M$ is standard if it is the value of some numeral, i.e. it is of the form ${\ov n}^M$ for some $n\in \mathbb N$. If we identify $n\in \mathbb N$ with ${\ov n}^M\in M$, then 1.--3. say that $\psi$ defines an extension of $f:\mathbb N\to \mathbb N$ to a function $f:M\to M$. In general $\psi$ can be chosen to satisfy additional properties which depend on the way $f$ is presented as a primitive recursive function. Consider for instance the function $f(n) = 2^n$ presented via the functional equations $2^0 = 1$ and $2^{n+1} = 2^n 2$. Then $\psi$ can be chosen in such a way that $\PA \vdash \forall x ( 2^{x+1} = 2^x \cdot 2)$, where $2^x$ is defined within $\PA$ as the unique $y$ such that $\psi(x,y)$. With this choice of $\psi$, in any model $M$ of $\PA$, the functional equation $2^{x+1} = 2^x 2$ continues to hold for non-standard values of $x$, thus $\psi(x,y)$ determines (by the induction scheme of $\PA$) a unique definable extension of the function $n\in \mathbb N\mapsto 2^n\in \mathbb N$ to the non-standard elements. In general, two different presentations of the same primitive recursive function determine different extensions to the non-standard elements, unless $\PA$ is able to show that the two representations are equivalent. A representation is natural if $\PA$ proves the validity of the same functional equations that are used in the presentation of the function in the metatheory. We shall always assume that the primitive recursive functions we consider are represented in $\PA$ in a natural way. Given a formula $\phi(x)$ of $\PA$ and a primitive recursive function $f$, we will feel free to write $\phi(f(x))$ as a short-hand for the formula $\exists y (f(x) = y \land \phi(y))$, where ``$f(x) = y$'' stands for the formula $\psi(x,y)$ that we have chosen to represent $f$ inside $\PA$. So, for instance, it makes sense to write $\phi(2^x)$ although the language of $\PA$ does not have a symbol for the exponential function. Using similar conventions, we may act as if the language of $\PA$ had been enriched with a symbol for each primitive recursive function, or, more precisely, for each primitive recursive presentation of a function. We fix an effective G\"odel numbering of terms and formulas of $\PA$ and we write $\gn{\phi}\in \mathbb N$ for the G\"odel number of $\phi$. In the next section we will introduce various primitive recursive functions involved in the formalization of syntactic notion. We use $x_0, x_1, x_2, \ldots$ as formal variables of $\PA$, but we also use other letters (such as $x,y,z,t$) as metavariables standing for formal variables. \section{Arithmetization} The content of this section is entirely standard, but we include it to fix the notations. \begin{prop} There are primitive recursive functions $\fS$, $\f+$, $\fx$, $\fvar$, which are increasing in both arguments, such that: \begin{itemize} \item $\fS(\gn{t}) = \gn{S(t)}$ \item $\f+ (\gn{t_1},\gn{t_2}) = \gn{t_1+t_2}$ \item $\fx (\gn{t_1},\gn{t_2}) = \gn{t_1 \cdot t_2}$ \item $\fvar(i) = \gn{x_i}$ \end{itemize} where $t,t_1,t_2$ are terms and $i\in \mathbb N$. \end{prop} The above functions can be naturally represented in $\PA$ by $$\mathsection$igma^0_1$-formulas, so they have a natural extension (denoted by the same names) to non-standard models of $\PA$. By formalizing the recursive definition of the class of terms inside $\PA$ we obtain: \begin{prop} There is a formula $\tm(x)\in $\mathsection$igma^0_1$ such that $\PA$ proves that, for all $x$, $\tm(x)$ holds if and only if one and only one of the following alternatives holds: \begin{itemize} \item $\exists i \; \; x = \fvar(i)$ \item $x = \gnn{0}$ \item $\exists a \; \tm (a) \land x = \fS(a)$ \item $\exists a,b \; \tm (a) \land \tm(b) \land x = \f+(a,b)$ \item $\exists a,b \; \tm (a) \land \tm(b) \land x = \fx(a,b)$ \end{itemize} Since the class of (codes of) terms is a primitive recursive, under the natural formalization both $\tm(x)$ and its negation are equivalent, in $\PA$, to $$\mathsection$igma^0_1$-formulas. \end{prop} \begin{cor} For every term $t$ of $\PA$, $\PA \vdash \tm(\gnn{t})$. \end{cor} We have analogous propositions for the codes of formulas. \begin{prop} There are primitive recursive functions $\fnot$, $\fand$, $\fexists$, $\feq$, which are increasing in both arguments, such that: \begin{itemize} \item $\fnot (\gn{\phi}) = \gn{\lnot \phi}$ \item $\fand(\gn{\phi},\gn{\psi}) = \gn{\phi \land \psi}$ \item $\fexists(i,\gn{\phi}) = \gn{\exists x_i \phi}$ \item $\feq(\gn{t_1},\gn{t_2}) = \gn{t_1=t_2}$ \end{itemize} where $\phi,\psi$ are formulas, $t_1,t_2$ are terms, and $i\in \mathbb N$. \end{prop} The above functions can be naturally represented in $\PA$ by $$\mathsection$igma^0_1$-formulas, so they have a natural extension (denoted by the same names) to non-standard models of $\PA$. \begin{prop} There is a formula $\fm(x)\in $\mathsection$igma^0_1$ such that $\PA$ proves that, for all $x$, $\fm(x)$ holds if and only if one and only one of the following alternatives holds: \begin{itemize} \item $\exists a,b \; \tm(a) \land \tm(b) \land x = \feq(a,b)$ \item $\exists \phi \; \fm(\phi) \land x = \fnot(\phi)$ \item $\exists \phi, \psi \; \fm(\phi) \land \fm(\psi) \land x = \fand(\phi,\psi)$ \item $\exists i, \phi \; \fm (\phi) \land x = \fexists(i,\phi)$ \end{itemize} Since the class of (codes of) formulas is primitive recursive, under the natural formalization both $\fm(x)$ and its negation are equivalent, in $\PA$, to $$\mathsection$igma^0_1$-formulas. \end{prop} \begin{cor} For every formula $\phi$, $\PA\vdash \fm(\gnn{\phi})$. \end{cor} \begin{defn}\label{defn:fm} If $M$ is a model of $\PA$ and $\phi\in M$ is such that $M \models \fm(\phi)$, we will say that $\phi$ is an arithmetized formula in the model $M$. Similarly, an arithmetized term of $M$ is an element $a\in M$ such that $M \models \tm(a)$. \end{defn} If $\psi$ is a formula of $\PA$ in the metatheory, then $\gnn{\psi}^M$ is an arithmetized formula of $M$, but if $M$ is non-standard there are arithmetized formulas which are not of this form. Similarly, if $t$ is a term of $\PA$, then $\gnn{t}^M$ is a arithmetized term of $M$, and if $M$ is non-standard it will also contain non-standard arithmetized terms. \section{$$\mathsection$igma^0_2$-models} In this section we define a $$\mathsection$igma^0_2$-model as a model $M$ with domain $\mathbb N$ such that the set of formulas with parameters which are true in the model is $$\mathsection$igma^0_2$-definable (so the standard model $(\mathbb N,0,S,+,\cdot)$ is not $$\mathsection$igma^0_2$-definable). We proceed below with the formal definitions. An infinite sequence of natural numbers $(a_n)_n$ is finitely supported if there is $k\in \mathbb N$ such that $a_n = 0$ for all $n\geq k$. There is a bijection between natural numbers and finitely supported sequences of natural numbers: it suffices to map $s\in \mathbb N$ to the sequence of the exponents appearing in the prime factorization $\Pi_k p_k^{a_k}$ of $s+1$ (where $p_0 = 2, p_1 = 3, p_2 = 5$ and in general $p_k$ is the $k+1$-th prime). \begin{defn}[PA]\label{defn:el} Given $s,k$, let $\el(s,k)$ be the least $a$ such that $p_k^{a+1}$ does not divide $s+1$. According to the definition, $$s+1 = \Pi_k p_k^{\el(s,k)}$$ where $\Pi_k p_k^{\el(s,k)}$ can be regarded as a finite product since all but finitely many factors are equal to $1$. Note that $\el(s,k)$ is a primitive recursive function of $s,k$. \end{defn} \begin{rem}[PA]\label{rem:el} The coding of finitely supported sequences defined above is injective \[s_1 = s_2 \;\;\leftrightarrow\;\; \forall k\; \el(s_1,k) = \el(s_2,k)\] \end{rem} \begin{prop}[PA]\label{prop:subs} Given $s,a,k$, there is a unique $t$, denoted $s[a/k]$, such that $\el(t,i) = \el(s,i)$ for all $i\neq k$ and $\el(t,k) = a$. Note that $s[a/k]$ is a primitive recursive function of $s,a,k$. \end{prop} We will consider countable models $M$ of $\PA$. We can assume that all such models have domain $\mathbb N$, but the intepretation of the function symbols $0,S,+,\cdot$ will in general differ from the standard one. \begin{defn}\label{defn:environment} Let $M = (\mathbb N; \;0_M,S_M,+_M,\cdot_M)$ be a model of $\PA$ with domain $\mathbb N$. If $\phi$ is a formula in the language of $\PA$ and $s\in \mathbb N$ we write $$M\models \phi \en{s}$$ to express the fact that $\phi$ holds in $M$ in the environment coded by $s$, i.e. the environment which, for each $i$, assigns the value $\el(s,i)$ to the variable $x_i$. For simplicity we take as a basis of logical connectives $\lnot,\land, \exists$ (negation, conjunction, existential quantification). The universal quantifier~$\forall$ and the logical connectives~$\wedge$ and~$\to$ are defined in terms of $\lnot,\land, \exists$ in the usual way. Tarski's truth conditions then take the following form: \begin{itemize} \item $M\models (\exists x_i \phi) \en{s} \iff \; \text{ there is} \; x\in \mathbb N \; \text{such that}\; M \models \phi (s[x/i])$ \item $M\models (\phi \land \psi) \en{s} \iff M\models \phi \en{s} \; \text{and} \; M \models \psi \en{s}$ \item $M\models (\lnot \phi) \en{s} \iff M\not\models \phi \en{s}$ \item $M \models (t_1= t_2) \en{s} \iff \val(t_1,M,s) = \val(t_2,M,s)$ \end{itemize} where $\val(t,M,s)$ is the value of the term $t$ in the model $M$ when variables are evaluated according to $s$, namely $\val(x_i,M,s) = \el(s,i)$. \end{defn} If $\phi$ is closed (it has no free variables), then the validity of a formula $\phi$ in $M$ does not depend on the environement: $M\models \phi \en{s} \iff M\models \phi\en{0}$. In this case we may write $M\models \phi$ for $M\models \phi\en{0}$. Occasionally we make use of the connective $\perp$ standing for ``false''. Thus for every $M$ we have $M\not\models \perp$. \begin{defn}\label{defn:complexity} Let $M$ be a model of $\PA$ with domain $\mathbb N$. We say that $M$ is a $$\mathsection$igma^0_2$-model if the set of pairs $(\gn{\phi},s)\in \mathbb N\times \mathbb N$ such that $M\models \phi\en{s}$ is an arithmetical set of complexity~$$\mathsection$igma^0_2$. For a technical reason, which will be clarified in the comments before Lemma \ref{lem:Env}, we assume that the constant~$0$ is interpreted in~$M$ with the element~$0\in\mathbb N$, namely $0_M=0$. \end{defn} We recall that a set of natural numbers is $\Delta^0_2$ if both the set and its complement can be defined by a $$\mathsection$igma^0_2$-formula. Notice that a $$\mathsection$igma^0_2$-model is in fact automatically $\Delta^0_2$. We will need the following fact. \begin{fact} \label{fact:kleene} Let $T$ be a recursively axiomatized theory without finite models. If $T$ has a model, then $T$ has a model whose elementary diagram has arithmetic complexity~$\Delta^0_2$. \end{fact} Fact \ref{fact:kleene} can be easily derived from the usual proof of the completeness theorem based on K\"onig's lemma, together with the observation that a recursive binary tree with an infinite path has a $\Delta^0_2$ infinite path (see \cite{Kleene1952,Shoenfield1960}). We thank the anonymous referee for suggesting that it can also be derived model-theoretically from Skolem's proof of the existence of countable models as limits of finite models in \cite{Skolem1922} (see also p. 20-21 of \cite{Wang1970} and related developments in \cite{Quinsey2019,Shelah1984}). We include a model-theoretic proof below. We stress that Fact \ref{fact:kleene} will only be used in the metatheory, namely we do not need to formalize its proof within $\PA$. Moreover, Fact \ref{fact:kleene} will only be used in the proof of $\PA\not\vdash \Box\perp$, but not in the proof of $\PA\not\vdash \lnot \Box \perp$. \begin{proof}[Proof of Fact \ref{fact:kleene}.] We can assume that $T$ has a $\vec{\forall} \vec{\exists}$-axiomatization, namely it is axiomatized by formulas of the form $\forall \bar x \exists \bar y \theta(\bar x, \bar y)$ where $\theta$ is quantifier free and $\bar x, \bar y$ are tuples of variables. We can reduce to this situation by expanding the language $L$ of $T$ with the introduction of a new predicate symbol $R_{\varphi}(\bar x)$ for each $L$-formula $\varphi(\bar x)$ together with the following axioms: \begin{itemize} \item $R_\varphi(\bar x) \leftrightarrow \varphi(\bar x)$ for each atomic $\varphi$ \item $R_{\lnot \varphi}(\bar x) \leftrightarrow \lnot R_{\varphi}(\bar x)$ \item $R_{\alpha\land \beta}(\bar x) \leftrightarrow R_{\alpha}(\bar x) \land R_{\beta}(\bar x)$ \item $R_{\exists y \varphi}(\bar x) \leftrightarrow \exists y R_\varphi(\bar x,y)$ \item $R_{\forall y \varphi}(\bar x) \leftrightarrow \forall y R_\varphi(\bar x,y)$ \end{itemize} (with implicit universal quantifiers over $\bar x$). After such a modification, we can assume that $T$ has effective elimination of quantifiers, a $\vec \forall \vec \exists$-axiomatization, and is formulated in a relational language $L$ (possibly with equality). We need to find a model of $T$ whose atomic diagram is $\Delta^0_2$ (the elementary diagram will then also be $\Delta^0_2$ because $T$ has effective elimination of quantifiers). We will construct a $\Delta^0_2$-model of $T$ as a limit of finite models following the ideas of \cite{Skolem1922,Shelah1984} with suitable modifications to handle theories rather than single formulas. We need some definitions. Let $S\subseteq L$ be a finite fragment of the language $L$. An $(S,m)$-structure is a finite sequence of $S$-structures $\bar M = (M_0,M_1,\ldots, M_m)$ such that $M_\ell$ is a substructure of $M_{\ell + 1}$ for all $\ell < m$. Given another $(S,m)$-structure $\bar N$, we say that $\bar N$ is an $m$-substructure of $\bar M$ if $N_\ell$ is a substructure of $M_\ell$ for all $\ell \leq m$. Let $\varphi := \forall \bar x \exists \bar y \theta$ be a closed formula, with $\theta$ quantifier free. We say that $\varphi$ is a $(p,q)$-formula if the number of $\forall$-quantifiers in $\varphi$ is $p$ and the number of $\exists$-quantifiers is $q$. If $\bar M$ is a $(S,m)$-structure and $\varphi$ is a closed $(p,q)$-formula in the language $S$, we say that $\bar M$ is an $(S,m)$-model of $\varphi$, if for all $\ell < m$ and for every $a_1,\ldots,a_p \in \dom(M_\ell)$ there are $b_1, \ldots, b_q \in \dom(M_{\ell + 1})$ such that $M_{\ell+1}\models \theta(\bar a, \bar b)$. Note that a $(S,0)$-structure satisfies every closed formula. We say that $\bar M$ is $(p,q)$-bounded if $\symbol{124} M_0 \symbol{124} = 1$ and for all $\ell<m$, $\symbol{124} M_{\ell+1} \symbol{124} \leq \symbol{124} M_\ell \symbol{124} + q \symbol{124} M_{\ell} \symbol{124}^{p}$. Note that if $\bar M$ is $(p,q)$-bounded, then it is $(a,b)$-bounded for all $a\geq p, b\geq q$. The following facts follow easily from the definitions. The idea of the proof is as in \cite[Claim 1.3]{Shelah1984} with minor adaptations. \begin{enumerate} \item If $\varphi$ has a model, then for every $n\in \mathbb N$ $\varphi$ has an $(S,n)$-model $\bar M$. \item If $\varphi = \forall \bar x \exists \bar y \theta$ is a $(p,q)$-formula with an $(S,n)$-model $\bar M$, then $\varphi$ has a $(p,q)$-bounded $n$-submodel $\bar N$. (Proof: Define $N_\ell$ by induction on $\ell$. Pick an arbitardy element $a\in M_0$ and put $N_0=\{a\}$. Given $\ell<n$, there are $\symbol{124} N_\ell \symbol{124} ^p$ possible $p$-tuples $\bar x$ from $N_\ell$. For each of them choose a $q$-tuple $\bar y$ from $M_{\ell+1}$ witnessing $\theta(\bar x, \bar y)$ and put its elements in $N_{\ell+1}$.) \end{enumerate} An $(S,n)$-structure $\bar N = (N_0,\ldots, N_n)$ is called initial if $N_n$ is a finite initial segment of $\mathbb N$ (we do not require that $N_\ell$ is initial for $\ell<n$). We observe that, for fixed $S,n,p,q$, there are only finitely many $(p,q)$-bounded initial $(S,n)$-structures and that any $(p,q)$-bounded $(S,n)$-structures is isomorphic to an initial one. Let $(\varphi_n)_{n\in \mathbb N}$ be a recursive enumeration of the axioms of $T$ and let $L_n$ be the language of $\varphi_0\land \ldots \land \varphi_n$ (a finite fragment of $L$). Let $a_n,b_n \in \mathbb N$ be such that $\varphi_n$ is a closed $(a_n,b_n)$-formula. Let $P:= (p_n)_{n\in \mathbb N}$ and $Q := (q_n)_{n\in \mathbb N}$ where $p_n := \max_{k\leq n} a_k$ and $q_n := \sum_{k\leq n} b_k$. Since $\varphi_0\land \ldots \land \varphi_n$ is equivalent to a $(p_n,q_n)$-formula in the language $L_n$, there is an initial $(p_n,q_n)$-bounded $(L_n,n)$-model $\bar N$ of $\varphi_0,\ldots, \varphi_n$. We call such a structure a $T_{\symbol{124} n}$-model. We say that a $T_{\symbol{124} n+1}$-model $\bar M = (M_0,\ldots, M_{n+1})$ extends a $T_{\symbol{124} n}$-model $\bar N=(N_0,\ldots, N_n)$, if for each $\ell \leq n$, $N_\ell$ is the $L_n$-reduct of a substructure of $M_\ell$ (which is a $L_{n+1}$-structure). We define a finitely branching forest $M_T(P,Q)$ as follows. The roots of $M_T(P,Q)$ are the $T_{\symbol{124} 0}$-models. For $n>0$, the nodes of $M_T(P,Q)$ at level $n$ are the $T_{\symbol{124} n}$-models which extend some node of $M_T(P,Q)$ at leven $n-1$. The extension relation turns $M_T(P,Q)$ into a finitely branching forest (we may make it into a finitely branching tree by adding a fictitious new root). By induction on $n$ one can show that every $T_{\symbol{124} n}$-model is isomorphic to a node of $M_T(P,Q)$ at level $n$. Assuming that $T$ has a model, it follows that $M_T(P,Q)$ is infinite. Since moreover $M_T(P,Q)$ is recursive and finitely branching, $M_T(P,Q)$ has an infinite path of complexity $\Delta^0_2$ (just take the left-most path with respect to some natural ordering). Let $M$ be the union of the structures $M_m$ such that there is an $m$-model of the form $\bar M = (M_0,M_1,\ldots, M_m)$ in the path (the domain of $M$ is the union of the domains, and the interpretation of each relation symbol $R\in L$ is the union of its interpretations in those $M_m$ in which it is defined). Then $M$ is a model of $T$ whose atomic diagram has complexity $\Delta^0_2$. \end{proof} \section{Codes of models} In this section we define the notion of $$\mathsection$igma^0_2$-model and show that the set of codes of $$\mathsection$igma^0_2$-models is $\Pi^0_3$-definable (Proposition \ref{prop:code-model}). This is related to the observation in \cite{Kotlarski2004} that the set of codes of consistent complete extensions of a recursively axiomatized theory is $\Pi^0_3$-definable. The difference is that our formulation does not involve the syntactic notion of consistency, which would require fixing a proof-system. We need the fact that in $\PA$ there are $$\mathsection$igma^0_n$-truth predicates for $$\mathsection$igma^0_n$-formulas (see \cite{Hajek1993}). In particular we have: \begin{fact}\label{fact:sat}There is a formula $\sat_2(x_0,x_1)\in $\mathsection$igma^0_2$ such that for every $\psi (x_1) \in $\mathsection$igma^0_2$, $$\PA \vdash \forall x_1 \; \sat_2 (\gnn{\psi},x_1) \leftrightarrow \psi(x_1);$$ \end{fact} For our purposes we need a variation of $\sat_2$ which works for formulas in two variables and additional parameters as in the following corollary. \begin{cor}\label{cor:sat} There is a formula $\sat(x_0,x_1,x_2) \in $\mathsection$igma^0_2$ such that for every $n\in \mathbb N$ and every formula $\psi(z_1, \ldots, z_n,x,y) \in $\mathsection$igma^0_2$, $$\PA\vdash \forall a_1, \ldots, a_n, \;\exists c\; \forall x, y \; \sat(c,x,y) \leftrightarrow \psi(a_1, \ldots, a_n,x,y).$$ \end{cor} The idea is that $c$ codes the predicate $\{(x,y) \mid \psi(a_1,\ldots, a_n, x,y)\}$. \begin{pf} We make use of the predicate $\sat_2$ of Fact \ref{fact:sat} and of the coding of sequences in Definition \ref{defn:el}. For simplicity we write $(s)_i$ for $\el(s,i)$. Let $\sat(c,x,y)$ be the formula $\sat_0((c)_0, f(c,x,y))$ where $f(c,x,y)$ is the least $t$ such that: \begin{itemize} \item $(t)_0 = x$ \item $(t)_1 = y$ \item $\forall i>0 \; (t)_{i+1} = (c)_i$ \end{itemize} Now, given $\psi$, there is a $$\mathsection$igma^0_2$-formula $\theta_\psi(t)$ such that, in $\PA$, $$\theta_\psi(t) \leftrightarrow \psi((t)_2, \ldots, (t)_{n+1}, (t)_0,(t)_1)$$ Reasoning in $\PA$, given $a_1, \ldots, a_n$, let $c$ be minimal such that $(c)_0= \gn{\theta_\psi}$, $(c)_1 = a_1, \ldots, (c)_n = a_n$. Then \begin{align*} \sat(c, x,y) &\leftrightarrow \sat_2(\gnn{\theta_\psi}, f(c,x,y))\\ & \leftrightarrow \theta_\psi(f(c,x,y)) \\ & \leftrightarrow \psi(a_1, \ldots, a_n, x,y) \end{align*} \end{pf} \begin{defn} \label{defn:code} Let $M$ be a $$\mathsection$igma^0_2$-model of $\PA$ (Definition \ref{defn:complexity}). Then by definition there is a $$\mathsection$igma^0_2$-formula $\psi_M(x_0,x_1)$ such that for all formulas $\phi$ of $\PA$ and all $s\in \mathbb N$, $$M\models \phi \en{s} \iff \mathbb N\models \psi_M(\gn{\phi},s)$$ Letting $m = \gn{\psi_M}$, this is equivalent to $$M\models \phi \en{s} \iff \mathbb N\models \sat (m ,\gn{\phi},s)$$ where $\mathbb N$ is the standard model of $\PA$. If the above equivalence holds for all $(\phi,s)$ we say that $m$ is a code for the model $M$. \end{defn} Our next goal is to show that the set of codes of $$\mathsection$igma^0_2$-models is $\Pi^0_3$-definable. We want to do so avoiding any recourse to a proof-system. \begin{defn} We write $\iota y$ for ``the unique $y$ such that''. When we write an expression like $f(x) = \iota y. P(x,y)$ we mean that $f$ is the partial function defined as follows: if there is one and only one $y$ such that $P(x,y)$, then $f(x)$ is such a $y$; in the opposite case $f(x)$ is undefined. \end{defn} \begin{defn}[PA] \label{defn:total} Given $m$, we define partial functions $0_m, s_m,+_m,\cdot_m$ (of arity $0,1,2,2$ respectively) as follows. Fix an arbitrary $s$ (for instance $s=0$). \begin{itemize} \item $0_m = \iota y . \; \sat(m,\; \gnn{0=x_0},\;s[y/0] )$ \item $S_m(a) = \iota y .\; \sat (m,\; \gnn{S(x_0)= x_1}, \;s[a/0,y/1] )$ \item $a+_m b = \iota y . \; \sat (m,\; \gnn{x_0+x_1 = x_2}, \;s[a/0,b/1,y/2] )$ \item $a \cdot_m b = \iota y . \; \sat (m,\; \gnn{x_0\cdot x_1 = x_2}, \;s[a/0,b/1,y/2] )$ \end{itemize} We say that $m$ is total if these functions are total, i.e.\ the various $y$ always exist and are unique. Since $\sat$ is $$\mathsection$igma^0_2$, ``$m$ is total'' is a $\Pi^0_3$-definable predicate in $m$. If $m$ is total we define a function $\fval$ whose first argument satisfies the predicate $\tm(x)$ as follows: \begin{itemize} \item $\fval(\fvar(i),m,s) = \el(s,i)$ \item $\fval(\gnn{0},m,s) = 0_m$ \item $\fval(\fS(a),m,s) = S_m(\fval(a,m,s))$ \item $\fval(\f+(a,b),m,s) = \fval(a,m,s) +_m \fval(b,m,s)$ \item $\fval(\fx(a, b),m,s) = \fval(a,m,s) \cdot_m\fval(b,m,s)$ \end{itemize} Note that $\fval$ is $\Pi^0_3$-definable. \end{defn} \begin{defn}[PA] \label{defn:tarski} We write $\fmod(m)$ if $m$ is total (Definition \ref{defn:total}) and the conjunction of the universal closure of the following clauses holds, where the variables $\phi, \psi$ are relativized to the predicate $\fm$, the variables $a,b$ are relativized to the predicate $\tm$, and the variables $i,s$ are unrestricted. \begin{itemize} \item $0_m = 0$ (see Definition \ref{defn:complexity}) \item $\sat(m, \fexists (i, \phi), s) \; \leftrightarrow \; \exists x \; \sat(m, \phi, s[x/i])$ \item $\sat(m, \fand(\phi,\psi), s)\; \leftrightarrow \; \sat(m, \phi, s) \land \sat(m, \psi, s)$ \item $\sat(m, \fnot (\phi), s) \;\leftrightarrow\; \lnot \sat(m, \phi, s)$ \item $\sat(m, \feq (a,b), s)\; \leftrightarrow\; \fval(a,m,s) = \fval(b,m,s)$ \item $\text{Ax}_{\PA}(\phi)\; \to\; \sat(m, \phi, s)$ \end{itemize} Where $\text{Ax}_{\PA}(x)$ is the natural formalization of ``$x$ is an axiom of $\PA$''. \end{defn} \begin{prop}\mbox{} \label{prop:code-model} \begin{enumerate} \item $\fmod(m)$ is a $\Pi^0_3$-formula in the free variable $m$. \item If $M$ is a $$\mathsection$igma^0_2$-model of $\PA$ and $m$ is a code for $M$ (Definition \ref{defn:code}), then $\mathbb N \models \fmod (m)$. \item If $m\in \mathbb N$ and $\mathbb N \models \fmod (m)$, then there is a $$\mathsection$igma^0_2$-model $M$ such that $$M\models \phi \en{s} \iff \mathbb N\models \sat(m,\gn{\phi},s)$$ for all $\phi,s$. \end{enumerate} \end{prop} If 3. holds, $M$ is the (unique) model coded by $m$. So every $$\mathsection$igma^0_2$-model has a code, but different codes may code the same model. \begin{pf} Point 1. is by inspection of the definition of $\fmod(x)$. Indeed we have already observed that the totality condition in Definition \ref{defn:total} is $\Pi^0_3$. It is also clear that the negative occurrence of the subformula $\exists a \; \sat(m, \phi, s[a/i])$ in Definition \ref{defn:tarski} is $\Pi^0_3$ and the other parts in the definition of $\fmod(x)$ have lower complexity. To prove 2. we recall that, by its very definition, $\fmod(m)$ expresses the fact that the set $\{(\phi,s) \mid \sat(m, \phi, s)\}$ satisfies Tarski's truth conditions for arithmetized formulas (standard or non-standard). When interpreted in the standard model $\mathbb N$, we only need to consider standard arithmetized formulas and (2) follows from the assumption that $M$ is a model. To prove 3., let $m\in \mathbb N$ be such that $\mathbb N \models \fmod(m)$. Define $M$ as the structure with domain $\mathbb N$ which interprets $0,S,+,\cdot$ as $0_m, S_m, +_m, \cdot_m$ respectively. By induction on the complexity of the formula $\phi$ we have $M\models \phi\en{s} \iff \mathbb N\models \sat(m,\gnn{\phi},s)$. \end{pf} \section{An anti-quote notation} \begin{defn} If $\phi$ is a formula without free variables, we write $\true(x,\gnn{\phi})$ for $\sat(x,\gnn{\phi},0)$ and observe that $$\PA\vdash \fmod(m) \to \forall s (\true(m,\gnn{\phi}) \leftrightarrow \sat(m,\gnn{\phi},s)),$$ i.e.\ $\PA$ proves that the truth of a closed formula in a model does not depend on the environment. \end{defn} \begin{defn} If $\psi(x_0, \ldots, x_n)$ is a formula of $\PA$, we write \[\true(m, \gnn{\psi(\dt{a}_0, \ldots, \dt{a}_n)})\] for $\exists s \; \el(s,\ov 0) = a_0 \wedge \dotsb \wedge \el(s,\ov n) = a_n \land \sat(m, \gnn{\psi(x_0, \ldots, x_n)}, s)$. \end{defn} \noindent If $\fmod(m)$ holds, $\sat(m, \gnn{\psi(\dt{a}_0,\ldots, \dt{a}_n)})$ formalizes the fact that $\psi$ holds in the model coded by $m$ in the environment which assigns the value $a_i$ to the variable $x_{i}$. Intuitively $\gnn{~}$ is a quote notation and the dot is an anti-quote. If an expression appears within the scope of $\gnn{~}$ it is only its name that matters, not its value, but if we put a dot on it, it is its value that matters and not its name. The following remark will further clarify the issue. \begin{rem} Assume $\fmod(m)$. If $f$ is a primitive recursive function, there is a difference between $\true(m, \gnn{\psi(\dt{f}(x))})$ and $\true(m, \gnn{\psi(f(\dt{x}))})$. In the first case we evaluate $f(x)$ outside of $m$ and we intepret $\psi(x_0)$ in $m$ in the environment $x_0\mapsto f(x)$. In the second case we interpret the formula $\psi(f(x_0))$ in $m$ in the environment $x_0 \mapsto x$. More precisely, $\PA$ proves that if $\fmod(m)$ holds, then: \begin{itemize} \item $\true(m, \gnn{\psi(\dt{f}(x))})\leftrightarrow \exists s (\el(s,0) = f(x) \land \sat(m, \gnn{\psi(x_0)},s))$ \item $\true(m, \gnn{\psi(f(\dt{x}))}) \leftrightarrow \exists t (\el(t,0) = x \land \sat(m, \gnn{\psi(f(x_0))},t))$ \end{itemize} \end{rem} \noindent For example, $\true(m, \gnn{s(\dt x)=\dt{s}(x)})$ might non hold when~$x=0$. \section{Coding environments} Given a finitely supported sequence $a_0, a_1, \ldots a_n, \ldots \in \mathbb N$, there is some $s\in \mathbb N$ which codes the given sequence in the sense that $\el(s,k) = a_k$ for all $k\in \mathbb N$. Now let $M$ be a model of $\PA$ with domain $\mathbb N$. The aim of this section is to construct a function $\Env$ which, given $M$ and~$s$, produces an element $\Env(s,M)\in M$ such that for all $k\in \mathbb N$ $$\el^M(\Env(s,M),\ov k^M) = \el(s,k)=a_k$$ In fact we will produce a $\Pi^0_3$-definable function $\env$ such that given $s$ and a code $m$ for a $$\mathsection$igma^0_2$-model $M$, yields $\env (s,m)=\Env(s,M)$. To construct $\Env(s,M)$ we encounter a technical difficulty as we need $\el^M(\Env(s,M),\ov k^M) = 0$ for all large enough $k\in \mathbb N$. When $M$ is isomorphic to $\mathbb N$ this implies $0^M= 0$, which is the technical condition required in Definition~\ref{defn:complexity}. A different approach would have been to code environments by finite sequences instead of finitely supported sequences. With this encoding the assumption $0^M=0$ becomes unnecessary at the expense of complicating the definition of Tarski's semantics. \begin{lem}\label{lem:Env} Let $M$ be a model of $\PA$ with domain $\mathbb N$. Given $s\in \mathbb N$, there is a unique $t$, denoted $\Env(s,M)$, such that: \begin{enumerate} \item $\forall k<s \; \forall a$, $\mathbb N \models \el(s,k) = a \implies M \models \el(t, \ov{k}) = a$ \item $M \models \forall k \geq \ov{s}\; \el(t, k) = 0$ \end{enumerate} Note that for $k\geq s$, we have $\mathbb N \models \el(s,k) = 0$. It follows that for all $s,k\in \mathbb N$ we have $M\models \el(\Env(s,M),\ov k) = x_0$ in the environment $x_0 \mapsto \el(s,k)$, or in other words $$\el^M(\Env(s,M),\ov k^M) = \el(s,k)$$ where the superscript indicates the model where $\el$ and $\ov k$ are evaluated. \end{lem} \begin{pf} We will prove the following more general result: for all $n$ there is a unique $t$ such that: \begin{enumerate} \item $\forall k<n \; \forall a$, $\mathbb N \models \el(s,k) = a \implies M \models \el(t, \ov{k}) = a$ \item $M \models \forall k \geq \ov{n}\; \el(t, k) = 0$ \end{enumerate} Granted this, the lemma follows by taking $n=s$. To prove our claim we proceed by induction on $n$. For $n= 0$, we take $t=0$. The inductive step follows from Proposition \ref{prop:subs}, which allows to modify a given coded sequence by changing any of its values. \end{pf} Recalling the substitution function $s[z/k]$ from Proposition \ref{prop:subs}, the crucial property of $\Env$ is that it commutes with substitutions in the sense of the following proposition. \begin{prop}\label{prop:substitutions-easy} Let $M$ be a model of $\PA$ with domain~$\mathbb N$, then for all~\hbox{$z,s,k\in\mathbb N$} $$M \models e_1[z/\ov k]= e_2$$ where $e_1 = \Env(s,M)$ and $e_2 = \Env(s[z/k],M)$. \end{prop} We may write the proposition more perspicuosly as $$M\models \Env(s,M)[z/\ov k] = \Env(s[z/k],M),$$ but note that $\Env(s,M)$ and $\Env(s[z/k],M)$ are defined outside of $M$, while $e_1[z/\ov k]$ depends on the intepretation of a $$\mathsection$igma^0_1$-formula inside $M$ (the formula which defines the primitive recursive substitution function in Proposition \ref{prop:subs}). \begin{pf} It suffices to show that for all $i\in M$, $$M\models \el(\Env(s,M)[z/\ov k],i) = \el(\Env(s[z/k],M),i).$$ We distinguish three cases: \begin{itemize} \item $i={\ov k}^M$ \item $i = \ov x^M$ for some $x\in \mathbb N$ different from $k$ \item $i$ is a non-standard element of $M$ \end{itemize} In the first case both sides of the equality to be proved are equal to $z$. In the second case they are both equal to $\el(s,x)$. In the third case they are both equal to $0$. \end{pf} In the rest of the section we formalize Lemma \ref{lem:env} and Proposition \ref{prop:substitutions-easy} inside $\PA$. We need some definitions. \begin{defn} Let $\num: \mathbb N\to \mathbb N$ be the primitive recursive function $n\mapsto \gn{S^n(0)}$. \end{defn} We can represent $\num$ inside $\PA$, so it will make sense to apply it to non-standard elements of a model of $\PA$. \begin{defn}[PA] \label{defn:numv} Assuming $\fmod(m)$, let $\vnum(n,m) = \fval(\num(n), m, 0)$ (the third argument of $\fval$ codes the environment, which is irrelevant in this case). \end{defn} If $n$ is standard, then $\vnum(n,m)$ is the value of the numeral $\ov n$ in the model coded by $m$. We can now define a function $\env$ such that, if $M$ is a $$\mathsection$igma^0_2$-model with code $m$, then $\env(s,m) = \Env(s,M)$. \begin{lem}[PA] \label{lem:env} Let $m$ be such that $\fmod(m)$. Given $s$, there is a unique $t$, denoted $\env(s,m)$, such that: \begin{enumerate} \item $\forall k <s \; \true(m, \gnn{ \el(\dt{t}, \dt{\vnum}(k,m)) = \dt{\el}(s,k) } )$ \item $\true(m, \gnn{\forall k \geq \dt{\vnum}(s,m) \; \el(\dt{t},k) = 0}).$ \end{enumerate} Similarly to Proposition \ref{lem:Env} for all $s,k$ we have $$\true(m,\gnn{\el(\dt{\env}(s,m),\dt{\vnum}(k,m)) = \dt{\el}(s,k)}).$$ \end{lem} \begin{pf} By formalizing the proof of Lemma \ref{lem:Env} in $\PA$. \end{pf} We can now give a formalized version of Proposition \ref{prop:substitutions-easy}. \begin{prop}[PA]\label{prop:substitutions} $\forall m, z, k, s$, if $\fmod(m)$, then $$\true(m, \gnn{\dt{\env}(s,m)[\dt{z}/\dt{\vnum}(k,m)]= \dt{\env}(s[z/k],m)})$$ \end{prop} \begin{proof}[Proof of Proposition~\ref{prop:substitutions}] Work in $\PA$ and assume $\fmod(m)$. Given $z,k,s$ we need to show $$\true(m, \gnn{\dt{\env}(s,m)[\dt{z}/\dt{\vnum}(k,m)]= \dt{\env}(s[z/k],m)}).$$ By Remark \ref{rem:el} and the definition of~$\fmod(m)$, this is equivalent to \[\forall i \;\true(m, \gnn{ \el(\dt{\env}(s,m)[\dt{z}/\dt{\vnum}(k,m)],\dt i)= \el(\dt{\env}(s[z/k],m),\dt i)})\] We distinguish three cases: \begin{itemize} \item $i = \vnum(k,m)$ \item $i = \vnum(x,m)$ for some $x\neq k$ \item none of the above, namely $i$ is a non-standard element of the model coded by $m$ \end{itemize} \noindent In the first, case we have \begin{align*} & \true(m,\gnn{\el(\dt{\env}(s,m)[\dt{z}/\dt{\vnum}(k,m)],\dt i) = \dt{z}}) && \text{by Proposition \ref{prop:subs}}\\ & \true(m,\gnn{\dt{z} = \dt{\el}(s[z/k],k)}) && \text{by Lemma \ref{lem:env}}\\ & \true(m,\gnn{\dt{\el}(s[z/k],k) = \el(\dt{\env}(s[z/k],m),\dt i)}) && \text{by Proposition \ref{prop:subs}} \end{align*} and we conclude by transitivity of the equality inside the model coded by $m$. \noindent In the second case, we have \begin{align*} & \true(m,\gnn{ \el(\dt{\env}(s,m)[\dt{z}/\dt{\vnum}(k,m)],\dt i)=\el(\dt{\env}(s,m),\dt i)}) & & \text{by Proposition \ref{prop:subs}}\\ & \true(m,\gnn{ \el(\dt{\env}(s,m),\dt i)=\dt{\el}(s,x) }) & & \text{by Lemma \ref{lem:env}} \\ & \true(m,\gnn{ \dt{\el}(s,x)=\dt{\el}(s[z/k],x)}) & & \text{by Proposition \ref{prop:subs}}\\ & \true(m,\gnn{ \dt{\el}(s[z/k],x)=\el(\dt{\env}(s[z/k],m),\dt i) }) & & \text{by Lemma \ref{lem:env}} \end{align*} and we conclude again by transitivity of the equality. \noindent In the third case, \begin{align*} & \true(m,\gnn{\el(\dt{\env}(s,m)[\dt{z}/\dt{\vnum}(k,m)],\dt i) = \el(\dt{\env}(s,m),\dt i)}) && \text{by Proposition \ref{prop:subs}}\\ & \true(m,\gnn{\el(\dt{\env}(s,m),\dt i) = 0}) & & \text{by Lemma \ref{lem:env}}\\ & \true(m,\gnn{0=\el(\dt{\env}(s[z/k],m),\dt i) }) & & \text{by Lemma \ref{lem:env}} \end{align*} and we conclude as above. \end{proof} Recalling that $s+1 = \Pi_i p_i^{\el(s,i)}$ we can illustrate the definition of $\env$ by the following example. \begin{exa} Let $s +1 = 2^7 3^5$ and let $M$ be a $$\mathsection$igma^0_2$-model coded by $m$. Then $\env(s,m)$ is the unique element $t$ such that $M \models x_2+1 = 2^{x_0} 3^{x_1}$ in the environment $x_0\mapsto 7, x_1 \mapsto 5,x_2 \mapsto t$. Note that $7$ and $5$ are not necessarily equal to ${\ov 5}^M$ and ${\ov 7}^M$, so in general $M \not\models x_2+1 = 2^73^5$ in the environment $x_2\mapsto t$. \end{exa} We are now ready to prove Propositions~\ref{prop:substitutions-easy} and~\ref{prop:substitutions}. \section{A model within a model} \begin{prop} \label{prop:inmodel}Let $X$ be a model of $\PA$ with domain $\mathbb N$. Given $y\in X$ such that $X\models \fmod(y)$, there is a model $Z\models \PA$ with domain $\mathbb N$ such that $$Z \models \phi\en{s} \iff X \models \sat(y, \gnn{\phi},t)$$ where $t= \Env(s,X)$. \end{prop} \begin{proof}Let $X,y$ be as in the hypothesis. Let $\mathcal Z$ be the set of pairs $(\phi,s)$ such that $X\models \sat(y,\gnn{\phi},\Env(s,X))$. We need to prove that there is a model $Z$ of $\PA$ with domain $\mathbb N$ such that $Z \models \phi[s]\iff (\phi,s)\in \mathcal Z$. To this aim we need to check Tarski's truth conditions and verify that $\mathcal Z$ contains the axioms of $\PA$. The latter condition follows easily from the assumption $X\models \fmod(y)$. Let us check the truth condition for negation: \begin{align*} (\lnot \phi,s)\in \mathcal Z & \leftrightarrow X \models \sat(y, \gnn{\lnot \phi}, \Env(s,X))\\ & \leftrightarrow X \models \lnot \sat(y, \gnn{\phi},\Env(s,X))\\ & \leftrightarrow X \not\models \sat(y,\gnn{\phi},\Env(s,X)) \\ & \leftrightarrow (\phi,s)\notin \mathcal Z \end{align*} where in the second equivalence we used the fact that $X \models \fmod(y)$. Similarly, we can verify Tarski's truth condition for the quantifier $\exists$: \begin{align*} (\exists x_k \phi, s) \in \mathcal Z & \leftrightarrow X \models \sat(y,\gnn{\exists x_k\phi},\Env(s,X)))\\ & \leftrightarrow X \models \exists x_0 \sat(y, \gnn{\phi},\Env(s,X)[x_0/\ov{k}])\\ & \leftrightarrow\exists z\in \mathbb N \; X \models \sat(y, \gnn{\phi},\Env(s,X)[z/\ov{k}])\\ & \leftrightarrow\exists z \in \mathbb N\; X \models \sat(y, \gnn{\phi},\Env(s[z/k],X))\\ & \leftrightarrow \exists z\in \mathbb N \; (\phi, s[z/k])\in \mathcal Z \end{align*} where in the fourth equivalence we used Proposition \ref{prop:substitutions-easy}. We leave the other verifications to the reader. \end{proof} In the above proposition if $X$ is a $$\mathsection$igma^0_2$-model, then $Z$ is also $$\mathsection$igma^0_2$. In the rest of the section we prove that there is a definable function which computes a code $ {}^xy$ of $Z$ given $y$ and a code $x$ for $X$. \begin{prop}[PA] \label{prop:mod-in-mod} Given $x,y$, there is $z$ such that for all $\phi,s$, \begin{align*} \sat(z, \phi, s) \quad \iff\quad \true(x, \; \gnn{\sat(\dt{y},\dt{\vnum}(\phi,x),\dt{\env}(s,x))})\end{align*} We define ${}^xy$ as the minimal such $z$ and observe that the function $x,y\mapsto {}^xy$ is $\Pi^0_3$-definable. \end{prop} \begin{pf} Given $x,y$, the set $$\{(\phi,s) \mid \true(x, \; \gnn{\sat(\dt{y},\dt{\vnum}(\phi,x),\dt{\env}(s,x))})\}$$ is $$\mathsection$igma^0_2$-definable with parameters $x,y$, so by Corollary \ref{cor:sat} there is some $z$ which codes this set, and we take ${}^xy$ to be the minimal such $z$. It can be readily verified that $x,y\mapsto {}^xy$ is $\Pi^0_3$-definable. \end{pf} \begin{thm}[PA]\label{thm:model-in-model} If $\fmod(x)$ and $\true(x, \gnn{\fmod (\dt y)})$, then $\fmod ({}^x y)$. \end{thm} \begin{pf} We need to show, inside $\PA$, that the class of all pairs $(\phi,s)$ such that $\sat({}^xy,\phi,s)$ satisfies Tarski's truth conditions and contains the arithmetized axioms of $\PA$. The latter property is easy, so we limit ourself to verify the clauses for $\lnot$ and $\exists$ in Tarski's truth conditions. \begin{align*} \sat({}^xy, \fnot(\phi), s) & \leftrightarrow \true(x, \; \gnn{\sat(\dt{y},\dt{\vnum}(\fnot(\phi),x),\dt{\env}(s,x))})\\ & \leftrightarrow \true(x, \gnn{\lnot \sat(\dt{y}, \dt{\vnum}(\phi), \dt{\env}(s,x))}) \\ & \leftrightarrow \lnot \true(x, \gnn{\sat(\dt{y}, \dt{\vnum}(\phi), \dt{\env}(s,x))}) \\ & \leftrightarrow \lnot \sat({}^xy, \phi, s) \end{align*} where in the second equivalence we used the fact that $\true(x,\fmod (\dt{y}))$ and in the third we used the hypothesis $\fmod(x)$. Similarly we have: \begin{align*} \sat({}^xy, \fexists (k, \phi), s) & \leftrightarrow \true(x, \; \gnn{\sat(\dt{y},\dt{\vnum}(\fexists(k,\phi),x),\dt{\env}(s,x))})\\ & \leftrightarrow \true(x, \gnn{\exists x_0 \sat(\dt{y}, \dt{\vnum}(\phi,x), \dt{\env}(s,x)[x_0/\dt{\vnum}(k,x)])}) \\ & \leftrightarrow \exists z \true(x, \gnn{\sat(\dt{y}, \dt{\vnum}(\phi,x), \dt{\env}(s,x)[\dt{z}/\dt{\vnum}(k,x)])}) \\ & \leftrightarrow \exists z \true(x, \gnn{\sat(\dt{y}, \dt{\vnum}(\phi,x), \dt{\env}(s[z/k],x))}) \\ & \leftrightarrow \exists z \sat({}^xy, \phi, s[z/k]) \end{align*} where the fourth equivalence makes use of the properties of $\env$ (Proposition \ref{prop:substitutions}). \end{pf} \section{L\"ob's derivability conditions} \begin{defn} Given a closed formula of $\PA$, we let $\Box \phi$ be the formula $\forall x (\fmod (x) \to \true(x, \gnn{\phi})$. Note that $\Box \phi$ has complexity $\Pi^0_4$. \end{defn} The first three points of the following result correspond to L\"ob's derivability conditions in \cite{Lob1995}. \begin{thm} \label{thm:derivability} Let $\phi, \psi$ be closed formulas of $\PA$. We have: \begin{enumerate} \item If $\PA\vdash \phi$, then $\PA \vdash \Box \phi$ \item $\PA \vdash \Box \phi \to \Box \Box \phi$ \item $\PA \vdash \Box(\phi\to \psi) \to (\Box \phi \to \Box \psi)$ \item $\mathbb N\models \Box \phi \implies \PA \vdash \phi$ \end{enumerate} \end{thm} \begin{pf} (1) Suppose $\PA\not\vdash \Box \phi$. Then there is a model $X\models \PA$ such that $X \models \lnot \Box \phi$. By definition this means that there is $y\in X$ such that $X\models \fmod(y)$ and $X \models \true(y, \gnn{\lnot \phi})$. By Proposition \ref{prop:inmodel} there is a model $Z\models \PA$ such that $Z\models \lnot \phi$, so $\PA\not\vdash \phi$. (2) We write $\Diamond \phi$ for $\lnot \Box \lnot \phi$ and observe that $\Diamond \phi$ is provably equivalent to $\exists x (\fmod (x) \land \true(x,\psi))$. The statement to be proved is equivalent to $\PA\vdash \Diamond \Diamond \phi \to \Diamond \phi$. Now $\Diamond \Diamond \phi$ says that there exist $x,y$ such that $\fmod(x)$, $\true(x, \gnn{\fmod (\dt{y})})$ and $\true(x, \gnn{\true (\dt{y},\gnn{\phi})})$. On the other hand $\Diamond \phi$ says that there is $z$ such that $\fmod(z)$ and $\true(z, \gnn{\phi})$. To prove the implication one can take $z = {}^xy$ as defined in Proposition \ref{prop:mod-in-mod}. (3) Clear from the definitions and the rules of predicate calculus, recalling that $\Box \theta$ stands for $\forall x \; (\fmod (x) \to \true(x, \gnn{\theta}))$. (4) Suppose $PA\not\vdash \phi$. By Fact \ref{fact:kleene} there is a $$\mathsection$igma^0_2$ model $M$ satisfying $\lnot \phi$. Let $m\in \mathbb N$ be a code for such a model. Then $\mathbb N \models \fmod(m)$ and $\mathbb N \models \true(m, \gnn{\lnot \phi})$. This is equivalent to $\mathbb N \models \lnot \Box \phi$. \end{pf} \section{An undecidable formula} By the diagonal lemma given a formula $\alpha(x)$ in one free variable there is a closed formula $\beta$ such that $\PA \vdash \beta \leftrightarrow \alpha(\gnn{\beta})$. Using the diagonal lemma we can define a formula $G$ which says ``I have no $$\mathsection$igma^0_2$-model'', as in the definition below. \begin{defn} Let $G$ be such that $\PA \vdash G \leftrightarrow \lnot \Box G$. \end{defn} Using Theorem \ref{thm:derivability} we deduce that $G$ is undecidable and equivalent to $\lnot \Box \perp$ by the standard arguments, see for instance \cite{Boolos1994}. We give the details below. \begin{lem} $\PA \not\vdash G$. \end{lem} \begin{pf} Suppose $\PA\vdash G$. Then $\PA \vdash \Box G$ (Theorem \ref{thm:derivability}). On the other hand by definition of $G$, $\PA \vdash \lnot \Box G$, contradicting the consistency of $\PA$. \end{pf} \begin{lem} $\PA \vdash G \leftrightarrow \lnot \Box \perp$. \end{lem} \begin{pf} We use 1.--3. in Theorem \ref{thm:derivability}. Reason in $\PA$. If $G$ holds, we get $\lnot \Box G$ by definition of $G$. Since $\perp \to G$ is a tautology we obtain $\Box \perp \to \Box G$, hence $\lnot \Box \perp$. Now assume $\lnot G$. By definition of $G$ we get $\Box G$ and by point 2. in Theorem \ref{thm:derivability} $\Box \Box G$ follows. Moreover we have $\Box (\Box G \leftrightarrow \lnot G)$ (apply the definition of $G$ inside the $\Box$), so we get $\Box \lnot G$. Since we also have $\Box G$, we obtain $\Box \perp$. \end{pf} \begin{lem} $\PA \not\vdash \lnot G$. \end{lem} \begin{pf} Suppose $\PA \vdash \lnot G$. Then by definition of $G$, $\PA \vdash \Box G$, so $\mathbb N \models \Box G$ and by Theorem \ref{thm:derivability}(4) $\PA \vdash G$, contradicting the consistency of $\PA$. \end{pf} We have thus obtained: \begin{thm}\label{thm:main} $\lnot \Box \perp$ is independent of $\PA$, namely $\PA$ does not prove that $\PA$ has a $$\mathsection$igma^0_2$-model. \end{thm} \end{document}
\begin{document} \title[\tiny{Integral closures in real algebraic geometry}] {Integral closures in real algebraic geometry} \author{Goulwen Fichou, Jean-Philippe Monnier and Ronan Quarez} \address{Goulwen Fichou\\ Univ Rennes\\ IRMAR (UMR 6625), Campus de Beaulieu, 35042 Rennes Cedex, France} \email{[email protected]} \address{Jean-Philippe Monnier\\ LUNAM Universit\'e, LAREMA, Universit\'e d'Angers} \email{[email protected]} \address{Ronan Quarez\\ Univ Rennes\\ Campus de Beaulieu, 35042 Rennes Cedex, France} \email{[email protected]} \date\today \subjclass[2010]{14P99,13B22,26C15} \keywords{real algebraic geometry, normalization, regular functions, continuous rational functions} \begin{abstract} We study the algebraic and geometric properties of the integral closure of different rings of functions on a real algebraic variety : the regular functions and the continuous rational functions. \end{abstract} \maketitle The normalization of an algebraic variety is obtained by an algebraic process, corresponding in affine charts to taking the integral closure of the coordinate rings of the affine components in their rings of fractions. The normalization of a given algebraic variety $X$ satisfies two universal properties : it is the biggest algebraic variety with a finite birational morphism onto $X$ and it is also the smallest normal algebraic variety with a morphism onto $X$. The normalization can be though of as a kind of weak polynomial desingularization of an algebraic variety, but much closer to the original variety due to the finiteness property. When dealing with real algebraic varieties however, the normalization procedure may create surprising phenomena, like the lack of surjectivity or the appearance of isolated singular points. The present paper is dedicated to the study of the normalization of real algebraic varieties, together with the integral closure of natural rings of functions on real algebraic varieties, namely the regular functions and the continuous rational functions. As usual in real algebraic geometry, it is sufficient to understand the affine case and even to work with real algebraic sets (which are the real closed points of affine real algebraic varieties) as explained in \cite{BCR}. A particular difference with affine algebraic geometry over $\C$ is the fact that polynomial and regular functions no longer coincide, therefore it is particularly interesting to investigate the relation between the integral closure of polynomial and regular functions. The normalization $X'$ of a real algebraic set $X$ is obtained classically by the following procedure : the ring of polynomial functions $\Pol(X')$ on $X'$ is the integral closure of the ring of polynomial functions $\Pol(X)$ on $X$ in its total ring of fractions $\K(X)$. We provide in the first section some examples to illustrate some pathological behavior with respect to the topology of the varieties, in relation of the notion of central locus of a real algebraic set. The regular functions on a real algebraic set $Y$ are the rational functions with no real poles (e.g. $1/(1+x^2)$ on $\R$), they form the ring $\SO(Y)$ of regular functions on $Y$ that contains $\Pol(Y)$. These functions are the natural functions when dealing with (the real points of) real varieties, since they forget about the poles lying in the complex part. A significant part of the paper is dedicated to the study of the behavior of regular functions during the process of normalization. In particular we compare the integral closure of $\SO(X)$ in $\K(X)$ with $\SO(X')$. We discuss also when the integral closure of the ring of regular functions may be the ring of regular function of a real algebraic set. The material developed there leads to consider real algebraic sets with a totally real normalization, which correspond to those real algebraic sets for which the rings $\SO(X)$ and $\SO(X')$ coincide. As shown in the third section, many good properties of the normalization for complex algebraic varieties are shared by real algebraic sets with a totally real normalization. If the normalization of a given algebraic variety $X$ is the biggest algebraic variety finitely birational morphism $X$, one may wonder whether there exists a biggest real algebraic set finitely biregular to a given real algebraic set. To answer positively this question, we investigate in the fourth section the integral closure of $\Pol(X)$ in $\SO(X)$. It is the ring of polynomial functions of a real algebraic set $X^b$ called the biregular normalization of $X$. We prove, in particular, that $X^b$ is the biggest real algebraic set with a finite polynomial maps onto $X$ which is a biregular isomorphism (i.e that induces an isomorphism between the rings of regular functions). Even more flexible than regular functions, we consider also the class of continuous rational functions on a real algebraic set $X$. A continuous rational functions on $X$ is a continuous function which is regular on a dense Zariski open subset of $X$ (e.g $x^3/(x^2+y^2)$ on $\R^2$). The concept of continuous rational maps or functions was used the first time by W. Kucharz \cite{Ku} in order to approximate continuous maps into spheres. These functions have become recently the object of a lot of research \cite{FHMM, FMQ, FMQ2, KuKu1, KuKu2, KN, Mo}. Koll\'ar \& Nowak \cite{KN} initiated the proper study of continuous rational functions, proving notably that the restriction of such a function to an algebraic subset of $X$ does not remain rational in general. It is however the case as soon as $X$ is nonsingular \cite[Prop. 8]{KN}. As a consequence, one may consider the ring $\SR(X)$ of continuous rational functions, and its subring $\SRR(X)$ consisting of those continuous rational functions which remain rational under restriction. This class, called hereditarily rational in \cite{KN} and regulous in \cite{FHMM}, has been systematically studied in \cite{FHMM}. It is notably shown in \cite{FHMM} that the use of the sheaf of regulous functions instead of the sheaf of regular functions corrects some defects of the classical real algebraic geometry like the absence of an usual Nullstellensatz and Cartan theorems A and B. In general, we do not have injections of $\SR(X)$ and $\SRR(X)$ in $\K(X)$ even when $X$ is irreducible. For that reason, we only consider rational continuous functions on the central locus $\Cent X$ of an irreducible real algebraic set $X$. The central locus of an irreducible real algebraic set is the closure for the Euclidean topology of the set of smooth points or equivalently it is the locus of points where the semi-algebraic dimension is maximal. The fifth section is dedicated to the study of the integral closure of $\SR(\Cent X)$ (the ring of continuous rational functions on the central locus) in $\K(X)$. In case $X$ is non-singular, we prove in particular that $\SR(X)$ is integrally closed. In case $X$ is a curve, we show that the integral closure of $\SR(\Cent X)$ in $\K(X)$ is $\SO(X')$. \section{Preliminaries} In this section we review the basic definition of a real algebraic variety together with the properties of its normalization. We recall the concept of continuous rational functions and hereditarily rational functions. \subsection{Real algebraic sets and varieties} We are interested in this text in the geometry of the real closed points of real algebraic varieties. In this context, it is natural to consider only varieties which are affine since almost all real algebraic varieties are affine \cite[Rem. 3.2.12]{BCR}. We also consider real algebraic sets which are the real closed points of affine real algebraic varieties. We refer to \cite{Man} for basics on real algebraic varieties and $\R$-schemes. More precisely, to a real algebraic variety given by the ideal $I$ in $\R[x_1,\ldots,x_n]$ one associates the real algebraic set $X=\Z(I)$ of all points in $\R^n$ which cancel any polynomial in $I$. Conversely, to any real algebraic set $X\subset \R^n$ one may associate the real algebraic variety given by the ideal $\I(X)\subset \R [x_1,\ldots,x_n]$ of all polynomials which vanish at all points of $X$. Unless specified, all algebraic sets we consider are real. In complex affine algebraic geometry, polynomial and regular functions coincide and thus we have a unique and natural definition of morphism between complex algebraic sets. In the real setting no such natural definition exists. Indeed, the ring of regular functions $\SO(X)$ is the ring of rational functions with no poles on $X$ (see \cite[Sect. 3.2]{BCR} for details) which is strictly bigger (if $\dim X>0$) than the ring of polynomial functions $\Pol(X)=\R [x_1,\ldots,x_n]/I$ where $I=\I(X)$ on a real algebraic set $X$. Let $X\subset\R^n$ be a real algebraic set. The complexification of $X$, denoted by $X_{\C}$, is the complex algebraic set $X_{\C}\subset\C^n$, whose ring of polynomial functions is $\Pol(X_{\C})=\Pol(X)\otimes_{\R}\C$. As already mentionned, we have $\Pol(X_{\C})=\SO(X_{\C})$. Remark that if $X$ is irreducible, then $X_{\C}$ is automatically irreducible (because $X$ is an algebraic set). Let $X\subset\R^n$ and $Y\subset\R^m$ be real algebraic sets. A polynomial map from $X$ to $Y$ is a map whose coordinate functions are polynomial. A polynomial map $\varphi:X\rightarrow Y$ induces an $\R$-algebra homomorphism $\phi:\Pol(Y)\rightarrow \Pol(X)$ defined by $\phi(f)=f\circ\varphi$. The map $\varphi\mapsto \phi$ gives a bijection between the set of polynomial maps from $X$ to $Y$ and the $\R$-algebra homomorphisms from $\Pol(Y)$ to $\Pol(X)$. We say that a polynomial map $\varphi:X\rightarrow Y$ is an isomorphism if $\varphi$ is bijective with a polynomial inverse, or in another words if $\phi:\Pol(Y)\rightarrow \Pol(X)$ is an isomorphism. We define the analog notions with regular functions in place of polynomials ones. In that situation, an isomorphism will be called a biregular isomorphism. Remark that a polynomial map $\varphi:X\rightarrow Y$ extends to a polynomial or regular map $\varphi_{\C}:X_{\C}\rightarrow Y_{\C}$ but in general a regular map cannot be extended to a regular map on the complexifications. Unless specified, all the maps are polynomial. \subsection{Normalization and integral closure} Let us start with the standard abstract algebraic setting. Let $A\to B$ be an extension of rings. An element $b\in B$ is integral over $A$ if $b$ is the root of a monic polynomial with coefficients in $A$. By \cite[Prop. 5.1]{AM}, $b$ is integral over $A$ if and only if $A[b]$ is a finite $A$-module. This equivalence allows to prove that $A_B'=\{b\in B|\,b\, {\rm is\, integral\, over}\,A\}$ is a ring called the integral closure of $A$ in $B$. The extension $A\to B$ is said to be integral if $A_B'=B$. Since we will deal with the normalization of (non-necessarily irreducible) algebraic varieties, one has to deal with rings $A$ which are not necessarily domains but only reduced rings. Hence, in the following $A$ will be a reduced ring admitting a finite number of minimal prime ideals and $B$ be the total ring of fractions $K$ of $A$ (see below), the ring $A_K'$ is denoted by $A'$ and is simply called the integral closure of $A$. The ring $A$ is called integrally closed (in $B$) if $A=A'$ ($A=A_B'$). Recall that if $A$ is a reduced ring with minimal prime ideals $\p_1,\ldots,\p_r$, then $(0)=\p_1\cap\ldots \cap \p_r$ and one has the canonical injections $A\rightarrow A_1\times\ldots\times A_r\rightarrow K_1\times \ldots\times K_r=K$ where $A_i=A/\p_i$ and $K_i$ is the fraction field of $A_i$ for any $i$. The product of fields $K$ is called the total ring of fractions of $A$ and the $A'_i$'s are called the irreducible components of $A$. \begin{prop}\label{ReducedIntegralClosure} Let $A$ be a reduced ring with minimal prime ideals $\p_1,\ldots,\p_r$. Then, $A'=A'_1\times\ldots\times A'_r$ where $A'$ is the integral closure of $A$ in the total ring of fractions $K$, and, for any $i$, $A'_i$ is the integral closure of $A_i$ (in $K_i$). \end{prop} \begin{proof} Let $f\in K$ such that $P(f)=0$ where $P$ is a unitary polynomial with coefficients in $A$. Let us write $f=(f_1,\ldots,f_r)$ where, for each $i$, $f_i\in K_i$. We may write $Q=Q_1\times\ldots \times Q_r$ with $Q_i$ a unitary polynomial with coefficients in $A_i$. Then, $P_i(f_i)=0$ in $A_i$ which means that $f_i\in A'_i$. Conversely, let $f=(f_1,\ldots,f_r)\in K$ be such that each $f_i$ is integral over $A_i$. Since the idempotents of $K_1\times \ldots\times K_r$ are also integral over $A$, then one gets that $f\in A'$ and concludes the proof. \end{proof} Let us emphasize now what happen in the geometric setting, namely when one takes for $A$ the ring of polynomial functions on an algebraic set $X$ over a field $k$ ($k=\R$ or $\C$) and when $K$ is total ring of fractions of $X$ denoted in the following by $\K(X)$. Then, $A'$ is a finite $A$-module (a theorem of Emmy Noether \cite[Thm. 4.14]{Ei}) and thus $A'$ is a finitely generated $k$-algebra and so $A'$ is the ring of polynomial functions of an irreducible algebraic set, denoted by $X'$, called the normalization of $X$. By Proposition \ref{ReducedIntegralClosure}, we have $X'=\bigsqcup_{i=1}^r X_{i}'$ where $X_1,\ldots,X_r$ are the irreducible components of $X$. We recall that a map $X\rightarrow Y$ between two algebraic sets over a field $k$ is said finite (resp. birational) if the ring morphism $\Pol(Y)\rightarrow \Pol(X)$ makes $\Pol(X)$ a finitely generated $\Pol(Y)$-module (resp. if it induces a biregular isomorphism between two dense Zariski open subsets of $X$ and $Y$ or equivalently if the ring morphism $\K(Y)\to \K(X)$ is an isomorphism). For instance, the projection of the nodal curve given by $y^2-x^2-x^3=0$ on the $x$-axis is finite but not birational. The inclusion $A\subset A'$ induces a finite birational map which we denote by $\pi':X'\rightarrow X$, called the normalization map. We say that an algebraic variety $X$ over a field $k$ is normal if its ring of polynomial functions is integrally closed. For a real algebraic set $X\subset \R^n$, we say that $X$ is geometrically normal if the associated complex algebraic set $X_{\C}$ is normal. It is well known that $X$ is normal if and only if $X$ is geometrically normal. Note that the normalization of an algebraic set $X$ is the biggest algebraic set finitely birational to $X$. More precisely, for any finite birational map $\varphi:Y\rightarrow X$, there exists $\psi:X'\to Y$ such that $\pi'=\phi \circ \psi$. Note that finite birational maps behaves nicely with respect to the Euclidean topology. \begin{prop}\label{lem-closed} Let $\pi:Y\to X$ be a finite birational map between irreducible algebraic sets. Then $\pi$ is proper and closed for the Euclidean topology. If moreover $\pi$ is injective, then $\pi$ is closed for the constructible topology. \end{prop} By constructible topology, we mean the topology which closed sets are given by Zariski constructible sets closed for the Euclidean topology. \begin{proof}[Proof of Proposition \ref{lem-closed}] Note that the ring morphism $\Pol(X)\rightarrow \Pol(Y)$ is injective since $\pi$ is birational. We are going to prove that the map $\pi$ is closed and proper with respect to the real spectrum topology (see the section ``Abstract ring of regular functions'' for basics on the real spectrum), then with respect to the semi-algebraic topology and finally with respect to the Euclidean topology. By \cite[Ch. 2, Prop 4.2-4.3]{ABR}, the induced map $\Sp_r\Pol(Y)\rightarrow \Sp_r\Pol(X)$ is closed for the real spectrum topology. According to \cite[Theorem 7.2.3]{BCR}, there is a bijective correspondence between closed semi-algebraic subsets of $X$ (resp. $Y$) and closed constructible subsets of the real spectrum $\Sp_r \Pol(X)$ (resp. the real spectrum $\Sp_r \Pol(Y)$). It follows that the image by $\pi$ of every closed semi-algebraic subset of $Y$ is a closed semi-algebraic subset of $X$. Now it is classical (\cite{vdd} for instance) to conclude that $\pi$ is closed and proper for the Euclidean topology. If $\pi$ is moreover injective, the image by $\pi$ of a Zariski constructible closed subset of $Y$ is a Zariski constructible closed subset of $X$ by \cite[Cor. 4.9]{KP}. \end{proof} \subsection{Surjectivity issues and central locus} Note that for real algebraic varieties, the normalization map $\pi':X'\to X$ may be non-surjective (consider for instance the cubic $X=\Z(y^2-x^2(x-1))$ with an isolated point at the origin) while the normalization of the complexification $\pi_{\C}':X_{\C}'\to X_{\C}$ is always surjective. A similar phenomenon appears in the process of resolving the singularities of a real algebraic varieties. We say that a regular map $Y\to X$ is a resolution of singularities (or a desingularization) if it is a proper birational regular map such that $Y$ is non-singular. The normalization can be though of as a kind of weak polynomial desingularization of an algebraic variety, but much closer to the original variety due to the finiteness property. For curves, the normalization gives a polynomial resolution of singularities. To keep a notion of surjectivity for a resolution of singularities over the real points, we use the concept of central locus of a real algebraic set. \begin{defn} \label{centrale} Let $X$ be an irreducible algebraic set, and denote by $X_{reg}$ the set of non-singular points of $X$. The central locus $\Cent X$ of $X$ is defined to be the Euclidean closure of $X_{reg}$ in $X$. We say that $X$ is central if $X=\Cent X$. \end{defn} \begin{rem} \label{centraledim} Let $X$ be an irreducible algebraic set. By \cite[Prop. 7.6.2]{BCR}, $\Cent X$ is the locus of points of $X$ where the local semi-algebraic dimension is maximal. \end{rem} The central locus well behaves during the process of desingularization. \begin{prop}\cite[Prop. 2.33]{Mo} and \cite[Thm. 2.6, Cor. 2.7]{KK}.\\ \label{centralsurj2} Let $\pi:Y\to X$ be a resolution of singularities. Then $\pi:\Cent Y=Y\to\Cent X$ is well defined, surjective and closed for the Euclidean topology. \end{prop} Notice that the central locus also well behave under finite birational maps. \begin{prop} \label{centralsurj} Let $\pi:Y\to X$ be a finite birational map between irreducible algebraic sets. The $\pi_{|\Cent Y}:\Cent Y\to\Cent X$ is well-defined, surjective and closed for the Euclidean topology. \end{prop} \begin{proof} Let $y\in \Cent Y$, and choose a closed connected semi-algebraic neighborhood $N$ of $y$ in $Y$. Then $\pi(N)$ is closed by Lemma \ref{lem-closed}, connected by continuity of $\pi$ and its semialgebraic dimension is equal to $\dim X$ by birationality of $\pi$. Therefore $\pi(y)$ belongs to $\Cent X$. Then $\pi(\Cent Y)$ is a closed semialgebraic subset of $\Cent X$, and there exist semialgebraic subsets $A\subset \Cent X$ and $B\subset \Cent Y$ of dimension strictly smaller such that $Y\setminus B$ is in bijection with $X\setminus A$, by birationality of $\pi$. Therefore $\pi(\Cent Y)=\Cent X$. \end{proof} These results will be useful is section \ref{sec-cont-rat} when dealing with rational continuous functions. Note however that the property of being central is not always preserved by finite and birational maps. This pathology is illustrated by the following example where the normalization of a central surface creates an isolated point. \begin{ex}\label{grospoint2} Consider the surface $S=\Z((y^2+z^2)^2-x(x^2+y^2+z^2))$ in $\R^3$. Then $S$ is central with a unique singular point at the origin. The singular set of its complexification consists of two complex conjugated curves crossing at the origin. The rational function $f=(y^2+z^2)/x$ satisfies the integral equation $f^2-f-x=0$. Let $Y$ be the surface in $\R^4$ admitting as ring of polynomial function $\Pol(Y)=\Pol(S)[(y^2+z^2)/x]$. We have $$\Pol(Y)\simeq \dfrac{\R[x,y,z,t]}{((y^2+z^2)^2-x(x^2+y^2+z^2),t^2-t-x,xt-(y^2+z^2),(y^2+z^2)t-(x^2+y^2+z^2))}$$ and since $(y^2+z^2)/x$ is integral over $\Pol(S)$ we get a finite birational map $\pi:Y\to S$. Note that $Y$ may be embedded in $\R^3$ via the projection forgetting the $x$ variable, giving rise to the surface defined by the equation $y^2+z^2=t^2(t-1)$ in $\R^3$. This surface is no longer central, with an isolated singular point at the origin. The preimage of the origin in $S$ consists of two points, the isolated point in $Y$ plus a smooth point in the two dimensional sheet of $Y$. Note that $Y$ is normal since its complexification is an (hyper)surface with a singular point. So $Y$ is the normalization of $S$. \end{ex} \section{Integral closure of the ring of regular functions} We have recalled how the normalization of a variety is obtained via the integral closure of the coordinate ring into the total ring of fractions. The aim of this section is to study what happens when one replaces the coordinate ring with the ring of regular functions. Let $X$ be a real algebraic set. The ring of regular functions on $X$ is the localization $\SO(X)=\S(X)^{-1}\Pol(X)$ of the ring $\Pol(X)$ of polynomial functions on $X$ with respect to the multiplicative subset $\S(X)=\{f\in\Pol(X)|\;\Z(f)=\emptyset\}$ of nowhere vanishing functions \cite[Defn. 3.2.1, Prop. 3.2.3]{BCR}. Note that non-isomorphic algebraic sets may share isomorphic rings of regular functions, cf. \cite[Ex. 3.2.8]{BCR}. Clearly, $\S(X)$ doesn't contain any zero divisor of $\Pol(X)$ (if $pq=0$ and $\Z(p)=\emptyset$ then $\Z(q)=X$ i.e $q=0$) and thus we get a natural injective morphism $$\Pol(X)\hookrightarrow \SO(X),\;f\mapsto\dfrac{f}{1}.$$ \subsection{Real prime ideals} For a commutative ring $A$ containning $\QQ$ we denote by $\Sp A$ the Zariski spectrum of $A$, the set of all prime ideals of $A$. We denote by $\MSp A$ the set of maximal ideals of $A$. In this work, we also consider the real Zariski spectrum $\ReSp A$ which consists in all the real prime ideals of $A$. The set of maximal and real ideals of $A$ will be denoted by $\ReMax A$. Recall that an ideal $I$ of $A$ is called real if, for every sequence $a_1,\ldots,a_k$ of elements of $A$, then $a_1^2+\cdots+a_k^2\in I$ implies $a_i\in I$ for $i=1,\ldots,k$. In the following $A$ will mainly stands for the ring $\Pol(X)$ or $\SO(X)$ and, whatever the considered ring, we denote by $\m_x$ the maximal ideal of functions that vanish at $x\in X$. It appears that in $\SO(X)$ any maximal ideal is real: \begin{prop} \label{maxreg1} We have $\MSp \SO(X)= \ReSp \SO(X).$ \end{prop} \begin{proof} Assume $f_1^2+\cdots+f_k^2\in\m$ for some $f_i\in\SO(X)$ and suppose moreover that $f_1\not\in \m$. If $k=1$ then we get a contradiction since a maximal ideal is radical. So assume $k>1$. Since $\m$ is maximal, there exist $g\in \SO(X)$ and $h\in \m$ such that $gf_1=1+h$. We get $g^2f_1^2+\cdots+g^2f_k^2\in\m$ and $g^2f_1^2=(1+h)^2=1+h'$ with $h'\in\m$. Hence the invertible function $1+\sum_{i=2}^k g^2f_i^2$ belongs to $\m$, leading to a contradiction. \end{proof} Using then the real Nullstellensatz \cite[Thm. 4.1.4]{BCR}, one can prove that the sets $\MSp \SO(X)$ and $\ReMax \Pol(X)$ are in bijection with $X$. More precisely: \begin{prop} \label{maxreg2} Any maximal ideal of $\SO(X)$ or $\ReSp \Pol(X)$ has the form $\m_x$ for some $x\in X$. Moreover, for $x\in X$ we have $$\Pol(X)_{\m_x}= \SO(X)_{\m_x}=\SO_{X,x}.$$ \end{prop} \begin{proof} The equality $\Pol(X)_{\m_x}= \SO(X)_{\m_x}$ follows from \cite[Cor. 4, Sect. 4]{Ma}. By \cite[Sect. 3.2]{BCR}, we get $\Pol(X)_{\m_x}=\SO_{X,x}$. \end{proof} Let us set now the algebraic setting which generalizes the geometric viewpoint. \subsection{Abstract ring of regular functions} One may associate to a ring $A$ a topological subspace $\Sp_r A$ which takes into account only prime ideals $\p$ whose residual field admits an ordering. Let us detail this construction a bit. Recall that $\Sp_r A$ is empty if and only if $-1$ is a sum of squares in $A$. For any subset $I\subset A$ we define its zero set $\Z_A(I)\subset\Sp_rA$ by $\Z_A(I)=\{\p\in\Sp A|\,f\in \p\,\,\forall f\in I\}$. Denote by $\T(A)$ the multiplicative part of $A$ which consists in all elements of $A$ that can be written as $1$ plus a sum of squares of elements in $A$, a set also denoted by $1+\sum A^2$. The ring of regular fractions of elements in $A$ on $\Sp_rA$, denoted by $\SO(A)$, is defined by $$\SO(A)=\T(A)^{-1}A.$$ Note that, to be more closely related to the geometric case, one may also consider $\S(A)=\{f\in A|\,\Z_A(f)=\emptyset\}$. In general $\T(A)\subset \S(A)$ and the equality does not hold. Nevertheless, by the Positivestellensatz (\cite[4.4.2]{BCR}) for any element $s\in \S(A)$, there is a sum of squares $u$ and $v$ in $\T(A)$ such that $us^2=v$, which shows that $\S(A)^{-1}A=\T(A)^{-1}A=\SO(A)$. Now, to recover the geometric setting, let $A=\Pol(X)$ with $X$ a real algebraic set. We have $\SO(\Pol(X))=\SO(X)$ since $\S(\Pol(X))=\S(X)$. Indeed, for $f\in\Pol(X)$ then $\Z_{\Pol(X)}(f)=\emptyset$ if and only if $\Z(f)=\emptyset$ (Artin-Lang Theorem \cite[Thm. 4.1.2]{BCR}). \begin{prop} \label{keyreg}\label{keyregprime} If $\p\in\RSp A$ is a real prime ideal, then $\S(A)\cap\p=\emptyset$. Let $\m\in\MSp A$. Then $\m$ is real if and only if $\S(A)\cap\m=\emptyset$. \end{prop} \begin{proof} Assume $\p$ is real. By the real Nullstellensatz \cite[Thm. 2.8]{ABR}, we have $\I(\Z_A(\p))=\p$. If $\S(A)\cap \p\not=\emptyset$ then $\Z_A(\p)=\emptyset$ and we get a contradiction. Assume $\m$ is not real. Arguing as in the proof of Proposition \ref{maxreg1}, there exists $a\in \m$ of the form $1+s$ with $s\in\sum A^2$. Clearly, $a\in\S (A)$ and thus $\S(A)\cap \m\not=\emptyset$. \end{proof} By \cite[Prop. 3.11]{AM} and the above proposition, we get an abstract version of Proposition \ref{maxreg1}. \begin{cor} \label{maxreg1bis} We have $\MSp \SO(A)\subset \RSp \SO(A)$. More precisely, any maximal ideal of $\SO(A)$ is of the form $\S(A)^{-1}\m$ with $\m\in\ReMax A$. Moreover, any real prime ideal of $\SO(A)$ is of the form $\S(A)^{-1}\p$ with $\p\in\ReSp A$. \end{cor} By \cite[Cor.4, Ch. 2, Sect. 4]{Ma} and the above proposition, we get: \begin{cor} \label{maxreg1ter} Let $\p\in\RSp A$, then $A_{\p}=\SO(A)_{\p\SO(A)}$. \end{cor} We recall from \cite[Thm. 4.7, Ch. 2, Sect. 4]{Ma} that, for any integral domain $A$, one has $$A=\bigcap_{\p\in\Sp A}A_{\p}=\bigcap_{\m\in\MSp A}A_{\m},$$ where the intersection has sense in the fraction field of $A$. Since all the maximal ideals of a ring of regular functions are real, by Corollaries \ref{maxreg1bis} and \ref{maxreg1ter}, one has: \begin{prop} \label{equalreg} Let $A$ be an integral domain. We have $$\SO (A)=\bigcap_{\m\in\ReMax A}A_{\m}.$$ \end{prop} A result which one can state in the geometric setting as: \begin{prop} \label{PolyReglocalisation} Let $X$ be an irreducible algebraic set. We have $$\SO(X)=\bigcap_{\p\in\Sp \SO(X)}\SO(X)_{\p}=\bigcap_{x\in X}\SO(X)_{\m_x}=\bigcap_{x\in X}\Pol(X)_{\m_x}=\bigcap_{x\in X}\SO_{X,x}.$$ \end{prop} \subsection{Regular functions and normalization} Let $X$ be a real algebraic set with normalization map $\pi':X'\to X$. We recall that $\K(X)$ denotes the total ring of fractions of $X$. Let $X_1,\ldots,X_t$ be the irreducible components of $X$. They are the zero sets of the minimal prime ideals $\p_1,\ldots,\p_t$ of $\Pol(X)$. Since these ideals are real, it follows from the Nullstellensatz \cite[Thm. 4.1.4]{BCR} that they do not meet $\S(X)$ and thus $\p_1\SO(X),\ldots,\p_t\SO(X)$ are the minimal prime ideals of $\SO(X)$. For $i=1,\ldots,t$, we have $\Pol(X_i)=\Pol(X)/\p_i$ and $\SO(X_i)=\SO(X_i)/\p_i\SO(X_i)$ since localization and quotient commute. It follows that $\K(X)=\prod_{i=1}^t\K(X_i)$ is also the total ring of fractions of $\SO(X)$ and we get the following commutative diagram (all the maps are injective): \[ \begin{array}{ccccccc} \Pol(X) & \longrightarrow & \prod_{i=1}^t\Pol(X_i)& \longrightarrow & \prod_{i=1}^t\K(X_i)\\ \downarrow &&\downarrow & \nearrow& \\ \SO(X) &\longrightarrow &\prod_{i=1}^t\SO(X_i) & & \\ \end{array} \]\\ The goal of this subsection is to compare the integral closure $\SO(X)'$ of $\SO(X)$ and the ring $\SO(X')$ of regular functions on the normalization of $X$. By Proposition \ref{normalisationred} below, it will be sufficient to understand the irreducible case. Note that the process of localization and taking integral closure commute \cite[prop. 5.12]{AM}, so that $\SO(X)'$ is equal to $\S(X)^{-1}\Pol(X')$. It is easy to check that if $f\in \S(X)$ then $f\circ\pi'\in \S(X')$ and thus $$\SO(X)'\hookrightarrow \SO(X').$$ To generalize to an abstract algebraic setting, one may assume that our ring $A$ satisfies the condition: \vskip0,4cm \hyp\; The ring $A$ is reduced with a finite number of minimal primes $\p_1,\ldots,\p_t$ that are all real ideals. \vskip0,4cm This hypothesis fit to our geometric settings where we consider real algebraic varieties whose irreducible components are all real. For instance, we will not consider rings like $A=\R[x,y]/(x^2+y^2+1)$ nor $A=\R[x,y]/(x^2+y^2)$. Under this assumption, it follows from Proposition \ref{keyreg} that $\S(A)$ does not contain any zero divisor element of $A$ (the set of zero divisor elements of $A$ is $\cup_{i=1}^t \p_i$) and we get a sequence of injective rings morphisms $$A\hookrightarrow \SO(A)\hookrightarrow K=\prod_{i=1}^t k(\p_i),$$ where $K$ is the total ring of fractions of $A$ and $k(\p_i)$ is the residue field at $\p_i$. It is worth mentionning that hypothesis \textbf{\textrm{\textsc{(mp)}}} $\,$ is preserved by integral extensions contained in the integral closure: \begin{lem}\label{IntegralExtensionHypothesis} Let $A\rightarrow B\rightarrow A'$ be a sequence of ring extensions where $A$ is reduced and $A'$ is the integral closure of $A$. If $A$ satisfies \hyp, then $B$ satisfies also \hyp\, and moreover the minimal prime ideals of $B$ and $A$ are in bijection given by contraction. \end{lem} \begin{proof} By Proposition \ref{ReducedIntegralClosure} then $A'$ satisfies also \hyp\, and moreover the minimal prime ideals of $A'$ and $A$ are in bijection given by contraction. By $(ii)$ of \cite[Th 9.3, Ch.2, Sect. 9]{Ma}, if $C\to D$ is an integral extension of rings then a prime ideal of $D$ lying over a minimal prime ideal of $C$ is a minimal prime ideal of $D$. It follows now from the lying over property (\cite[Th 9.3 (i), Ch.2, Sect. 9]{Ma} or Proposition \ref{LOP}) that $B$ has a finite number of minimal prime ideals which are in bijection with those of $A$ and $A'$. Since these ideals are contraction of real ideals of $A'$, they are real ideals. \end{proof} Let us state now how are related integral closure and ring of regular functions: \begin{prop} \label{abstractnormalisationred} Let $A$ be a ring satisfying \hyp. Denote by $\p_1,\ldots,\p_t$ its minimal primes and $A_i=A/\p_i$. Then, one has $\SO(A)'\simeq \prod_{i=1}^t\SO(A_i)'$ and $\SO(A')\simeq \prod_{i=1}^t\SO(A_i')$ and a canonical injective map $\SO(A)'\rightarrow \SO(A')$. \end{prop} \begin{proof} Let us recall first that \hyp\; implies that the maps $A\rightarrow \SO(A)$ and $A'\rightarrow \SO(A')$ are all injective. Moreover, by Proposition \ref{ReducedIntegralClosure}, one gets the isomorphisms $\SO(A)'\simeq \prod_{i=1}^t\SO(A_i)'$ and $A'\simeq \prod_{i=1}^t A_i'$ which implies that $\SO(A')\simeq \prod_{i=1}^t\SO(A_i')$. Let us summarize our morphims into the following commutative diagramm: \begin{center} \begin{tikzcd}[row sep=scriptsize, column sep=scriptsize] A\arrow[dddd]\arrow[dr] \arrow[rr]&&\prod_{i=1}^t A_i\arrow[dl]\arrow[dddd]\\ &A'\arrow[d]\simeq\prod_{i=1}^t A_i'\arrow[d]&\\ &\SO(A')\simeq\prod_{i=1}^t \SO(A_i')&\\ &\SO(A)'\arrow[u,dashed]\simeq\prod_{i=1}^t \SO(A_i)'\arrow[u,dashed]&\\ \SO(A)\arrow[ur]\arrow[rr]&&\prod_{i=1}^t \SO(A_i)\arrow[ul]\\ \end{tikzcd} \end{center} Let us end with the dashed arrow. Namely, if $u\in\SO(A)'$, then $u$ is in the total ring of fractions of $A$, and integral over $\SO(A)$. Then, there exists $v\in 1+\sum A^2$, such that $uv$ is integral over $A$. Since $v$ is invertible in $\SO(A')$, one gets $u\in \SO(A')$. \end{proof} One has an immediate geometric version: \begin{prop} \label{normalisationred} Let $X$ be a real algebraic set with normalization map $\pi':X'\to X$. Let $X_1,\ldots,X_t$ be the irreducible components of $X$. We have $$\Pol(X)'=\Pol(X')=\prod_{i=1}^t\Pol(X_i)'=\prod_{i=1}^t\Pol(X_i'),$$ $$\SO(X)'=\prod_{i=1}^t\SO(X_i)'$$ and a natural injective map $$\SO(X)'=\prod_{i=1}^t\SO(X_i)'\hookrightarrow\SO(X')=\prod_{i=1}^t\SO(X_i')\hookrightarrow \prod_{i=1}^t\K(X_i).$$ \end{prop} Note that, if $X$ is normal, then $\SO (X)$ is also integrally closed, and hence the rings $\SO(X)'$ and $\SO(X')$ coincide. The last subsection will give another sufficient condition where this remains true. Before, we have to give some property of the real counterpart of the well known lying over property for integral extensions. \subsection{Real lying over} We will use several times in the paper the lying over property for integral extension of rings. \begin{prop}\cite[Thm. 9.3 (i) and Lem. 2, Ch. 2, Sect. 9]{Ma}.\\ \label{LOP} Let $A\to B$ be an integral extension of rings. \begin{enumerate} \item The associated map $\Sp B\to\Sp A$ is surjective. \item By the map $\Sp B\to\Sp A$, the inverse image of $\Max A$ is $\Max B$. \end{enumerate} \end{prop} As one useful consequence for the sequel, when $A\rightarrow B$ is an integral ring extension and $B$ a domain, one has $$B=\bigcap_{\m\in\MSp A} B_{\m}.$$ Looking at the normalization of a general irreducible real algebraic curve, we see that integral extensions do not necessarily satisfy the real lying over property i.e the map $\ReSp B\rightarrow \ReSp A$ is not necessarily surjective when $A\to B$ is an integral extension of rings. Indeed, it is enough to consider a cubic with an isolated point at the origin given for instance by the equation $y^2-x^2(x-1)=0$. Let $X$ be a real algebraic set. We know by Proposition \ref{maxreg1} that the maximal ideals of the ring of regular functions $\SO(X')$ of the normalization of $X$ are all real. We will show in the next proposition that the set of maximal ideals of the integral closure $\SO(X)'$ of the ring of regular functions on $X$ corresponds to the maximal ideals of $\Sp\Pol(X')$ lying over the maximal and real ideals of $\Sp\Pol(X)$. Therefore, a maximal ideal of $\SO(X)'$ is not necessarily real and consequently the rings $\SO(X)'$ and $\SO(X')$ can be distinct. It shows that the the normalization of a real algebraic set cannot be obtained by taking the integral closure of the ring of regular functions but only considering the integral closure of the ring of polynomial functions. \begin{prop} \label{closint1} Let $X$ be a real algebraic set. \begin{enumerate} \item[1)] The set $\Max \SO(X)'$ is in bijection with the set of maximal ideals of $\Sp\Pol(X')$ lying over the real maximal ideals of $\Pol(X)$. \item[2)] If $X$ is irreducible, then $$\SO(X)'=\bigcap_{x\in X}(\SO_{X,x})'=\bigcap_{\m'\in\Sp\Pol(X'),\;\m'\cap\Pol(X)\in\ReMax\Pol(X)}\Pol(X')_{\m'}$$ whereas $$\SO(X')=\bigcap_{\m'\in \ReMax\Pol(X')}\Pol(X')_{\m'}.$$ \end{enumerate} \end{prop} \begin{proof} Consider the integral extension $\SO(X)=\S(X)^{-1}\Pol(X)\to \SO(X)'=\S(X)^{-1}\Pol(X')$. It follows from Propositions \ref{maxreg1}, \ref{maxreg2} and \ref{LOP} that $\Max\SO(X)'$ is in bijection with the set of maximal ideals of $\Pol(X')$ that do not meet $\S(X)$ which is the set of maximal ideals of $\Pol(X')$ that intersect $\Pol(X)$ as maximal ideals associated to points of $X$. The last equality in the second statement is a consequence of Proposition \ref{PolyReglocalisation}. Notice that if $\m'\in\Max\SO(X')$ then $\SO(X')_{\m'}=\Pol(X')_{\m'\cap\Pol(X')}$ (Proposition \ref{maxreg2}). By the previous remark and Proposition \ref{maxreg2} we get $$\SO(X)'=(\bigcap_{x\in X}\SO_{X,x})'=\bigcap_{x\in X}(\SO_{X,x})'=\bigcap_{\m'\in\Sp\Pol(X'),\;\m'\cap\Pol(X)\in\ReMax\Pol(X)}\Pol(X')_{\m'},$$ that proves the second statement. \end{proof} \begin{ex}\label{CubicIsolatedPoint} Consider the cubic $X=\Z(y^2-x^2(x-1))$ with an isolated point at the origin. Then $\Pol(X')=\Pol(X)[y/x]\simeq \R[x,z]/(z^2-(x-1))$, setting $z=y/x$. The function $f=1/(1+z^2)=x^2/(x^2+y^2)$ is regular on $X'$. However $f\not\in \Pol(X')_{\m'}$ for the non-real maximal ideal $\m'=(1+z^2)$ of $\Pol(X')$ lying over the (real) maximal ideal $\m=(x,y)$ of the origin in $\Pol(X)$. Indeed we have $1/f \in\m'$. In particular $f\in \SO(X')\setminus \SO(X)'$. \end{ex} \begin{ex} \label{ExKollar} Consider the Koll\'ar surface $X=\Z(y^3-x^3(1+z^2))$ \cite{KN}. Then $\Pol(X')=\Pol(X)[y/x]\simeq \R[x,t,z]/(t^3-(1+z^2))$, setting $t=y/x$. The function $f=1/(t^2+t+1+z^2)=x^2/(y^2+xy+x^2+x^2z^2)$ is regular on $X'$. Let $\m=(x,y,z)$ be the maximal and real ideal of polynomial functions on $X$ that vanish at the origin. Over $\m$ we have two maximal ideals of $\Pol(X')$, namely $\m'=(x,t-1,z),\,\m''=(x,t^2+t+1,z)$, and only one of these two ideals is real. We have $f\not\in\Pol(X')_{\m''}$ since $1/f \in \m''$. It follows that $f\in \SO(X')\setminus \SO(X)'$. \end{ex} We denote by $\Norm(X_{\C})$ the normal locus of $X_{\C}$ i.e the set of $y\in X_{\C}$ such that $\SO_{X_{\C},y}$ is integrally closed. From Propositions \ref{maxreg2}, \ref{normalisationred} and \ref{closint1}: \begin{prop} \label{intclosreel2} We have: \begin{enumerate} \item[1)] $\Pol(X)$ is integrally closed if and only if $\Norm(X_{\C})=X_{\C}$. \item[2)] $\SO(X)$ is integrally closed if and only if $X\subset \Norm(X_{\C})$. \end{enumerate} \end{prop} We give in Remark \ref{rem-intclos} an example of a real algebraic set with an integrally closed ring of regular functions, while its ring of polynomial functions is not integrally closed. \section{Totally real normalization} As we have seen, the study of the integral closure of the ring of regular functions of a real algebraic set $X$ is highly related to the behaviour of the complexification $\pi_{\C}':X'_{\C}\rightarrow X_{\C}$ of its normalization map $\pi':X'\to X$. In the sequel, we focus on those real algebraic sets for which the normalization is {\it totally real}. \begin{defn} \label{trn} We say a real algebraic set $X$ has a totally real normalization if $\pi_{\C}'^{-1}(X)=X'$. \end{defn} We will see later that real algebraic varieties with a totally real normalization satisfy very natural properties with respect to finite birational maps onto them. As an appetizer, we begin with a criterion for the equality between $\SO(X)'$ and $\SO(X')$. \begin{prop} \label{egalite} The rings $\SO(X)'$ and $\SO(X')$ are isomorphic if and only if $X$ has a totally real normalization. \end{prop} \begin{proof} By Proposition \ref{normalisationred}, it is sufficient to deal with the case $X$ is irreducible. Assume there exists $x\in X$ such that $\pi_{\C}'^{-1}(x)$ is not totally real. It forces the existence of a non-real maximal ideal $\m'$ of $\Pol(X')$ such that $\m'\cap \Pol(X)=\m_x$. By Propositions \ref{closint1} (first statement) and \ref{maxreg2} then $\SO(X)'$ and $\SO(X')$ cannot be isomorphic. Assume the fibers of $\pi'_{\C}:X'_{\C}\rightarrow X_{\C}$ over the points of $X$ are totally real. It follows that $\ReMax\Pol(X')$ is the set of ideals of $\Pol(X')$ lying over the ideals of $\ReMax\Pol(X)$ and we conclude the proof using the second statement of Proposition \ref{closint1}. \end{proof} Taking the integral closure $\Pol(X)'$ of $\Pol(X)$ in $\K(X)$ is an algebraic process but it has a geometric counterpart, indeed $\Pol(X)'$ is the ring of polynomial functions of an algebraic set $X'$. We may wonder whether taking the integral closure of $\SO(X)$ into $\K(X)$ admits also a geometric counterpart, namely whether $\SO(X)'$ is the ring of regular functions of an algebraic set. \begin{prop} \label{clotregular} If $\SO(X)'$ is the ring of regular functions of an algebraic set, then $\SO(X)'$ is isomorphic to $\SO(X')$. \end{prop} \begin{proof} By assumption $\SO(X)'$ is the ring of regular functions of an intermediate algebraic set $Y$ between $X$ and $X'$ i.e $\Pol(X)\subset\Pol(Y)\subset\Pol(X')$. Since $\SO(X)'=\SO(Y)$ then, by Proposition \ref{maxreg1}, all the maximal ideals of $\SO(X)'$ are necessarily real. It follows from Proposition \ref{closint1} that the fibers of $\pi_{\C}':X'_{\C}\rightarrow X_{\C}$ over the points of $X$ are totally real. By Proposition \ref{egalite}, we get the proof. \end{proof} \begin{rem}\label{rem-intclos} The ring of regular functions of a real algebraic set can be integrally closed while its ring of polynomial functions is not integrally closed. Consider for instance an irreducible algebraic set $X$ of dimension one such that $X_{\C}$ is singular and has only non-real singularities (e.g $X=\Z(y^2-(x^2+1)^2x)$). Let $X'$ be the normalization of $X$. Since for all $x\in X$ the local ring $\SO_{X,x}$ is regular then it follows from Proposition \ref{closint1} that $\SO(X)=\SO(X)'$. By Proposition \ref{egalite}, we see that $\SO(X)$ and $\SO(X')$ are isomorphic. Since $X_{\C}$ is not normal then $X$ is not normal and thus $\Pol(X)$ and $\Pol(X')$ are not isomorphic rings. \end{rem} Having a totally real normalization is a stable property under finite birational map. Even more, it allows to keep surjectivity similarly to the complex setting. More precisely : \begin{prop} \label{prestotrealn} Let $\pi:Y\to X$ be a finite birational map between real algebraic sets, and assume that $X$ admits a totally real normalization. Then $\pi$ is surjective, the fiber $\pi_{\C}^{-1}(x)$ over a real point $x\in X$ is totally real, and $Y$ has also a totally real normalization. \end{prop} \begin{proof} It is clear that $X$ and $Y$ have the same normalization $X'$. The results follow from the fact that for $y\in Y$, a point of $X'_{\C}$ lying over $y$ necessarily belongs to the fiber $(\pi')_{\C}^{-1}(\pi(y))$, which is real by assumption. \end{proof} Concerning more algebraic aspects, algebraic sets with totally real normalization give rise to a going-up property for real prime ideals. \begin{prop}\label{prestotrealn2} Let $\pi:Y\to X$ be a finite birational map between real algebraic sets, and assume that $X$ admits a totally real normalization. Then \begin{itemize} \item[1)] The map $\Sp\Pol(Y)\to\Sp\Pol(X)$ has totally real fibers over the real ideals. Namely, if an ideal $\q\in\Sp\Pol(Y)$ lies over $\p\in\ReSp\Pol(X)$ then $\q$ is a real ideal. \item[2)] The integral extension $\Pol(X)\to \Pol(Y)$ satisfies the going-up property for the real prime ideals. Namely, given two real prime ideals $\p\subset\p'$ of $\Pol(X)$ and $\q\in\ReSp\Pol(Y)$ lying over $\p$, there exists $\q'\in\ReSp \Pol(Y)$ lying over $\p'$ such that $\q\subset\q'$. \end{itemize} \end{prop} \begin{proof} Let us prove 1). We see $\Pol(X)$ (resp. $\Pol(Y)$) as the subring of $\Pol(X_{\C})$ (resp. $\Pol(Y_{\C})$) of elements fixed by the complex conjugation. We also see $X$ (resp. $Y$) as a subset of points of $X_{\C}$ (resp. $Y_{\C}$) in the same way. In both cases, we denote the complex conjugation with a bar. By the lying over property (Proposition \ref{LOP}) there is $\q\in\Sp \Pol(Y)$ lying over $\p$. Assume $\q$ is not a real ideal. It follows that the ideal $\q_{\C}=\q\otimes_{\R}\C$ is no longer a prime ideal in $\Pol(Y_{\C})$ and there exists distinct conjugated ideals $\ir,\overline{\ir}\in\Sp\Pol(Y_{\C})$ such that $\q_{\C}=\ir\cap\overline{\ir}$. On the contrary, we have $\p_{\C}=\p\otimes_{\R}\C\in \Sp\Pol(X_{\C})$ since $\p$ is a real ideal. We have integral extensions $$\dfrac{\Pol(X_{\C})}{\p_{\C}}\to \dfrac{\Pol(Y_{\C})}{\ir} ~~~\textrm{~~~and~~~}~~~ \dfrac{\Pol(X_{\C})}{\p_{\C}}\to \dfrac{\Pol(Y_{\C})}{\overline{\ir}}$$ that induce finite polynomial maps between irreducible algebraic sets $W\to V_{\C}$, $\overline{W}\to V_{\C}$ where $V=\Z(\p)$, $W=\Z_{\C}(\ir)$ and $\overline{W}=\Z_{\C}(\overline{\ir})$. Remark that $V_{\C}=\Z_{\C}(\p_{\C})$ since $\p$ is a real ideal. Let $V'=\Z(\q)$. Notice that the integral extension $$\dfrac{\Pol(X)}{\p}\to\dfrac{\Pol(Y)}{\q}$$ does not induce a finite map $V'\to V$ since $\q$ is different from $\I(V')$ and moreover $V'_{\C}\not=\Z_{\C}(\q_{\C})=W\cup\overline{W}$. Since we have integral ring extensions, the real algebras $$\dfrac{\Pol(X)}{\p} ~~~,~~~ \dfrac{\Pol(Y)}{\q}$$ and the complex algebras $$\dfrac{\Pol(X_{\C})}{\p_{\C}}~~~,~~~\dfrac{\Pol(Y_{\C})}{\ir}~~~,~~~\dfrac{\Pol(Y_{\C})}{\overline{\ir}}$$ have respectively the same Krull dimension. Since $\p$ is a real ideal, we have $\dim V=\dim_{\C}V_{\C}=\dim_{\C}W=\dim_{\C}\overline{W}$. We have $V'=W\cap\overline{W}$ and thus $\dim V'=\dim_{\C}V'_{\C}<\dim V=\dim_{\C}V_{\C}=\dim_{\C}W=\dim_{\C}\overline{W}$. It follows that the surjective finite map $W\to V_{\C}$ (which is the restriction of $\pi_{\C}$ to $W$) has at least a totally non-real fiber over a point of $V$. We get a contradiction using Proposition \ref{prestotrealn} and the proof of 1) is done. The extension $\Pol(X)\to \Pol(Y)$ is integral and thus satisfies the going-up property for prime ideals. By 1) any prime ideal of $\Pol(Y)$ lying over a real prime ideal of $\Pol(X)$ is real, hence $\Pol(X)\to \Pol(Y)$ satisfies the going-up property for the real prime ideals and it gives 2). \end{proof} We denote by $\overline{A}^Z$ the closure of a set $A$ with respect to the Zariski topology. \begin{cor}\label{cor-clo} Let $\pi:Y\to X$ be a finite birational map between real algebraic sets. If $X$ admits a totally real normalization, then $\pi$ is closed for the Zariski topology. \end{cor} \begin{proof} Let $W$ be an algebraic subset of $Y$. We aim to show $\pi(W)$ is an algebraic subset of $X$. It suffices to consider the case $W$ irreducible. So there exists $\q\in\ReSp \Pol(Y)$ such that $W=\Z(q)$. Let $\p=\q\cap\Pol(X)\in\ReSp\Pol(X)$ and $V=\Z(\p)$. We have $V=\overline{\pi(W)}^Z$. Let $x\in V$ and $\m_x$ be the corresponding maximal ideal of $\Pol(X)$. By 2) of Proposition \ref{prestotrealn2} there exists a real prime ideal $\q'$ lying over $\m_x$ such that $\q\subset\q'$. Since $\Pol(X)\to \Pol(Y)$ is integral then $\q'$ is maximal and thus corresponds to a point $y\in W$ such that $\pi(y)=x$. Thus $\pi(W)=V$. \end{proof} To illustrate how useful can be such a result, we refer to Proposition \ref{zerointratcont} in the section ``Zero sets of integral continuous rational functions'' as an application in relation with continuous rational functions. We end this section with a particular focus on finite birational map which are moreover bijective. When the target space admit a totally real normalization, it will lead to a particular rigid class of bijective maps which are not necessarily isomorphisms. In order to state the result, we introduce the notion of hereditarily birational maps, inspired by \cite{KN}. Let $\pi:Y\rightarrow X$ be a finite birational map between algebraic sets. Let $W$ be an irreducible algebraic subset of $Y$ and let $V$ denote the Zariski closure of $\pi(W)$. Then the restriction $\pi_{|W}:W\rightarrow V$ is still finite, but not birational in general. \begin{defn} \label{defrestriction} Let $\pi:Y\rightarrow X$ be a finite birational map between algebraic sets. We say that $\pi$ is hereditarily birational if for every irreducible algebraic subset $W\subset Y$, the restriction $\pi_{|W}:W\rightarrow V$ is birational where $V$ denote the Zariski closure of $\pi(W)$. \end{defn} Note that this condition is far from being automatic, as illustrated by Koll\'ar surface as in Example \ref{ExKollar}. The normalization $\pi':X'\to X$ is even bijective in this example, however its restriction to $W=X'\cap \{x=0\}$ is the projection map from the curve $\{t^3=1+z^2\}\subset \R^2$ onto the $z$-axis whose inverse is not rational. \begin{thm} \label{bijtotrealn} Let $X$ be a real algebraic set with a totally real normalization. Let $\pi:Y\to X$ be a bijective finite birational map from a real algebraic set $Y$. Then \begin{itemize} \item $\pi$ is an homeomorphism for the Euclidean, constructible and Zariski topologies. \item the map $\ReSp\Pol(Y)\to \ReSp\Pol(X)$, $\q\mapsto\q\cap\Pol(X)$, is bijective. \item $\pi$ is hereditarily birational. \end{itemize} \end{thm} \begin{proof} We reduce the proof to the irreducible case as follows. Note first that by definition and Proposition \ref{ReducedIntegralClosure}, each irreducible component of $X$ admits a totally real normalization. Moreover, $X$ and $Y$ have the same number of irreducible components by birationality of $\pi$, with a correspondence between the components induced by $\pi$. Moreover $\pi$ restricted to such a component $Y_1$ of $Y$ is bijective onto the corresponding component $X_1$ of $X$. We prove the surjectivity like this. The finite birational map $\pi_{|Y_1}:Y_1\to X_1$ induces a map $X_1' \to Y_1$ whose composition $X_1' \to Y_1 \to X_1$ coincides with the normalization $X_1'$ of $X_1$. Then, if $x$ is a point in $X_1$, there exist a real point in the normalization of $X_1$ lying over $x$, and its image in $Y_1$ is then a preimage for $x$ by $\pi_{|Y_1}$. The map $\pi$ is an homeomorphism for the Euclidean and constructible topologies using Lemma \ref{lem-closed}. By Corollary \ref{cor-clo}, the map $\pi$ is closed for the Zariski topology and thus it is also an homeomorphism for the Zariski topology. Since the map $\Sp\Pol(Y)\to\Sp\Pol(X)$ has totally real fibers over the real ideals by Proposition \ref{prestotrealn2}, and since $\pi$ is bijective then it follows from the real Nullstellensatz that the map $$\ReSp\Pol(Y)\to \ReSp\Pol(X)~~~,~~~\q\mapsto\q\cap\Pol(X)$$ is bijective. It remains to prove the last statement of the theorem. Let $W$ be an irreducible algebraic subset of $Y$ and $V=\overline{\pi(W)}^{Z}$. We have to show that the algebraic fields extension $\K(V)\to \K(W)$ is an isomorphism. We have $W=\Z(\q)$ for a $\q\in\RSp \Pol(Y)$ and $V=\Z(\p)$ with $\p=\q\cap \Pol(X)\in \RSp \Pol(X)$. Since $\p$ and $\q$ are real prime ideals then $W_{\C}$ and $V_{\C}$ are irreducible and $W$ (resp. $V$) is Zariski dense in $W_{\C}$ (resp. $V_{\C}$). By above results, $\q$ is the unique prime ideal of $\Pol(Y)$ lying over $\p$ and $(\pi_{|W})_{\C}:W_{\C}\to V_{\C}$ is a finite map such that $(\pi_{|W})_{\C}^{-1}(V)=W$ and $\pi_{|W}:W\to V$ is a finite bijective map. Assume the algebraic fields extension $\K(V)\to \K(W)$ has degree $d>1$. There is a non-empty Zariski open subset $U$ of $V_{\C}$ such that $(\pi_{|W})_{\C}^{-1}(x)$ consists of precisely $d$ points whenever $x\in U$. Since $U$ meet $V$, then we get a contradiction and the proof is done. \end{proof} Note that without the assumption that $X$ admits a totally real normalization in Theorem \ref{bijtotrealn}, the restriction of $\pi$ to an irreducible component is no longer bijective in general. Consider for instance for $X$ the union of the cubic with an isolated point from Example \ref{CubicIsolatedPoint} with a vertical line passing from that point. The normalization $Y$ of $X$ consist of a line in disjoint union with the normalization of the cubic, and this is bijective to $X$. However, its restriction to the normalization of the cubic is no longer bijective. \section{Biregular normalization of a real algebraic variety} \subsection{Geometric biregular normalization} Let $X$ be a real algebraic set. In this section, we study the integral closure $\Pol(X)_{\SO(X)}'$ of $\Pol(X)$ in $\SO(X)$. From the sequence of inclusions $$\Pol(X)\subset \SO(X)\subset \K(X),$$ we see that $\Pol(X)_{\SO(X)}'$ is a finite $\Pol(X)$-module (as a submodule of the Noetherian $\Pol(X)$-module $\Pol(X')$) and thus is a finitely generated $\R$-algebra. So, $\Pol(X)_{\SO(X)}'$ is the ring of polynomial functions of a real algebraic set, denoted $X^{b}$ in the sequel. Remark that $X^{b}$ is an intermediate algebraic set between $X$ and $X'$ i.e $\Pol(X)\subset\Pol(X^{b})\subset \Pol(X')$. It follows that we have finite birational polynomial maps $\pi^{b}:X^{b}\to X$ and $\phi : X'\to X^{b}$. We see $\Pol(X^{b})$ as a subring of $\Pol(X')$. \begin{prop} \label{defequivgrn} Considering $\Pol(X^{b})$ as a subset of $\Pol(X')$, the following equality holds : $$\Pol(X^{b})=\{g\in\Pol(X')|\,\exists f\in\SO(X)\,{\rm such\, that}\,f\circ\pi'=g\,\,{\rm in}\,\, \SO(X')\}.$$ \end{prop} \begin{proof} An element of the integral closure of $\Pol(X)$ in $\SO(X)$ is a rational function on $X$ integral over $\Pol(X)$ that can be extended as a regular function $f$ on $X$. This integral rational function gives rise to a polynomial function $g\in\Pol(X')$ and we get $f\circ \pi'=g$ on $X'$ since $\pi'$ is a regular map and since $f\circ \pi'$ and $g$ coincide as rational functions on $X'$. \end{proof} \begin{rem}\label{rem-bir} In other words, for any polynomial function $g$ on $X^b$, there exists a regular function $f$ on $X$ such that $f\circ \pi'=g \circ \phi$. As a consequence $g$ and $f\circ \pi^b$ coincide on the image of $\phi$ in $X^b$. That image being Zariski dense by birationality of $\phi$, the (polynomial and) regular functions $g$ and $f\circ \pi^b$ coincides actually on the whole $X^b$. \end{rem} The interest in $X^b$ comes from the following result, which states that $X^{b}$ is the biggest intermediate algebraic set between $X$ and $X'$ with the same regular functions as $X$. \begin{thm} \label{propunivbiregweak} Let $X$ be an algebraic set, $X'$ be its normalization and $X^b$ be the algebraic set such that $\Pol(X^b)=\Pol(X)_{\SO(X)}'$. Then $X^{b}$ is the biggest algebraic set among the intermediate algebraic sets $Y$ between $X$ and $X'$ such that the induced map $\pi:Y\to X$ is a biregular isomorphism. More precisely, $\pi^{b}$ is a biregular isomorphism and moreover, given an algebraic set $Y$ and a finite birational polynomial map $\pi:Y\to X$, then, $\pi$ is a biregular isomorphism if and only if there exists a finite birational polynomial map $\theta:X^{b}\to Y$ such that $\pi\circ\theta=\pi^{b}$ (as polynomial maps). \end{thm} As a consequence, we called $X^b$ the biregular normalization of $X$. \begin{proof}[Proof of Theorem \ref{propunivbiregweak}] We prove first that the polynomial map $\pi^{b}:X^{b}\to X$ is a biregular isomorphism. Let $y_1,y_2\in X^{b}$ such that $\pi^{b}(y_1)=\pi^{b}(y_2)$. Pick any $g\in\Pol(X^{b})$, let $f\in\SO(X)$ be as in Remark \ref{rem-bir}. Then $$g(y_1)=f\circ \pi^b(y_1)=f\circ \pi^b(y_2)=g(y_2),$$ so that $\pi^{b}$ is injective. To prove the surjectivity of $\pi^b$, take $x\in X$. If $x$ belongs to the image of $\pi'(X')$, then the result is immediate. So assume that there exist conjugated points $y,\overline{y}\in X'_{\C}$ such that $\pi'_{\C}(y)=\pi'_{\C}(\overline{y})=x$, the complex normalization map being surjective. Pick any $g\in\Pol(X^{b})$, and let $f\in\SO(X)$ be such that $g\circ \phi= f \circ \pi'$ on $X'$ as in Remark \ref{rem-bir}. By regularity of $f$, this equality holds actually on a Zariski dense open subset $U$ of $X'_{\C}$ that contains $\pi'^{-1}(X)$. In particular $g(\phi(y))=g(\phi(\overline{y}))$, so that $\phi(y)=\phi(\overline{y})$ is a real preimage of $x$ in $X^b$. This proves the surjectivity of $\pi^b$, and so its bijectivity. Suppose $X^{b}\subset \R^n$ and consider a coordinate function $y_i$ on $X^{b}$ for $i \in \{1,\ldots,n\}$. We want to prove that the function $f_i=y_i \circ (\pi^{b})^{-1}$ is regular on $X$. However $f_i\circ \pi^{b}$ is polynomial on $X^{b}$, so that, by Proposition \ref{defequivgrn}, $f_i$ belongs to $\SO(X)$ as required. It follows that the inverse $(\pi^{b})^{-1}$ is a regular map as expected. \vskip 2mm Let $Y$ be an intermediate algebraic set between $X$ and $X'$ such that the finite birational polynomial map $\pi:Y\to X$ is a biregular isomorphism. Denote by $\phi$ the map $X'\to Y$. It follows that the composition by $\pi^{-1}$ gives an injective mapping $\Pol(Y)\subset \SO(X)$. Hence, for any $g\in\Pol(Y)$ there exists $f=g\circ\pi^{-1}\in\SO(X)$ such that $f\circ\pi=g$ on $Y$ and thus $f\circ\pi'=g\circ\phi$ on $X'$. By Proposition \ref{defequivgrn}, we get $\Pol(Y)\subset\Pol(X^{b})$ and thus we get an induced polynomial map $\theta:X^{b}\to Y$. Assume we have finite birational polynomial maps $\theta:X^{b}\to Y$ and $\pi:Y\to X$ such that $\pi^{b}=\pi\circ\theta$ (i.e $Y$ is an intermediate algebraic set between $X$ and $X^{b}$). We get the following ring extensions $\SO(X)\to \SO(Y)\to \SO(X^{b})$. Since $\SO(X)\to\SO(X^{b})$ is an isomorphism it follows that $\pi$ and $\theta$ are biregular isomorphisms. \end{proof} \begin{cor} \label{intbireg} Let $X$ be an algebraic set. Let $Y$ be an intermediate algebraic set between $X$ and $X^{b}$. Then $Y$ is biregularly isomorphic with $X$ and $X^{b}$. \end{cor} \subsection{Algebraic biregular normalization} The aim of this part is to give a general algebraic setting for the notion of biregular normalization, a setting which will particularly adapted to study the local point of view. Let us recall our natural assumption : \hyp\; The ring $A$ is reduced with a finite number of minimal primes $\p_1,\ldots,\p_t$ that are all real ideals. Under this assumption, we recall that we get an inclusion $A\subset\SO(A)$. \begin{defn} \label{defalgrn} Let $A$ be a ring satisfying \hyp. The biregular integral closure of $A$, denoted by $A^{b}$, is $A_{\SO(A)}'$ i.e the integral closure of $A$ in $\SO(A)$. \end{defn} In the geometric setting i.e $A=\Pol(X)$ with $X$ a real algebraic set then we get $\Pol(X^{b})=\Pol(X)^{b}$. The following proposition says that the biregular integral closure is obtained, locally, by taking the integral closure at any maximal ideal which is not real, and by doing nothing otherwise : \begin{prop} \label{equivdefalgrn} Let $A$ be a a ring satisfying \hyp. We have \begin{enumerate} \item[1)] Let $\m\in\MSp A$ but $\m\not\in\RSp A$. Then $$(A^{b})_{\m}=(A_{\m})'=(A')_{\m}.$$ \item[2)] Let $\m\in\ReMax A$. Then $$(A^{b})_{\m}=A_{\m}=\SO(A)_{\m}.$$ \end{enumerate} \end{prop} \begin{proof} The proof uses the sequence of inclusions $A\subset\SO(A)\subset K$ already described where $K$ denotes the total ring of fractions of $A$. Let $\m\in\Max A$. Let $\overline{A_{\m}}$ be the integral closure of $A_{\m}$ in $\SO(A)_{\m}$. Since integral closure commutes with localization, we get $\overline{A_{\m}}=(A^{b})_{\m}$. Assume $\m\in\MSp A$ but $\m\not\in\RSp A$. By Proposition \ref{keyreg}, $\S(A)\cap\m\not=\emptyset$ and thus $\S(A)^{-1}\m=\SO(A)$ (\cite[Prop. 4.8]{AM}). It follows that $\SO(A)_{\m}=K_{\m}$ and thus $\overline{A_{\m}}=(A^{b})_{\m}=(A_{\m})'=(A')_{\m}$. Assume $\m\in\ReMax A$. It follows from Corollary \ref{maxreg1ter} that $\SO(A)_{\m}=A_{\m}$ (thus $(A^{b})_{\m}=A_{\m}$) and the proof of 1) and 2) is done. \end{proof} It follows from Proposition \ref{equivdefalgrn} that biregular normalization and localization by a maximal ideal commute. \begin{cor} Let $A$ be a a ring satisfying \textbf{\textrm{\textsc{(mp)}}}and let $\m\in\MSp A$. Then $$(A^b)_{\m}=(A_{\m})^b.$$ \end{cor} If one assume that $A$ is a domain, one deduces from $A^b=\bigcap_{\m\in\MSp A} A^b_{\m}$, that: \begin{prop} Let $A$ be a real ring which is a domain. We have $$A^{b}=(\bigcap_{\m\in\MSp A\setminus\ReMax A}(A')_{\m})\cap(\bigcap_{\m\in\ReMax A}A_{\m})$$ where the intersection has a sense in the fraction field of $A$. \end{prop} Let $X$ be a real algebraic set. From the previous proposition, we see that $X^{b}$ is a normalization of the non-real locus of $X_{\C}$. \begin{ex} Consider the irreducible algebraic curve $X=\Z(y^2-(x^2+1)^2x)$). Let $X'$ be the normalization of $X$. Since the singularities of $X$ are non-real we get $X^{b}=X'$. Remark that $\Pol(X')=\Pol(X)[y/(1+x^2)]$ and that $y/(1+x^2)$ is a regular function on $X$ integral over $\Pol(X)$. \end{ex} While usual normalization always separates the irreducible components (Proposition \ref{ReducedIntegralClosure}), it is not the case for the biregular normalization since $\pi^b:X^b\to X$ is a bijection (Theorem \ref{propunivbiregweak}). \begin{ex} Consider the algebraic curve $X=\Z(xy)$. Then $X$ is its own biregular normalization, while $X'$ is the union of two lines. \end{ex} \begin{ex} Consider the Koll\'ar surface $X=\Z(y^3-x^3(1+z^2))$ (Example \ref{ExKollar}). Then $\Pol(X')=\Pol(X)[y/x]$. Since the rational fraction $y/x$ has a pole along the $z$-axis, then it follows that $X^b\not=X'$ even if $\pi':X'\to X$ is a bijection. By Proposition \ref{equivdefalgrn}, we see that $X^b=X$. \end{ex} The reader is reffered to \cite{FMQ2} to find comparisons of the biregular normalization with other kinds of normalizations. \subsection{Biregular extensions of rings} Let $A\to B$ be an integral extension with $A$ satisfying \hyp\, and $B$ a subring of $A'$, the integral closure of $A$. According to Lemma \ref{IntegralExtensionHypothesis}, the ring $B$ also satisfies \hyp. Since the image of $\T(A)$ by the extension is contained in $\T(B)$ and since $\T(A)$ (resp. $\T(B)$) doesn't contain any zero divisor element of $A$ (resp. $B$) then we get an induced ring extension $\SO(A)\to\SO(B)$ that is non necessarily integral. In case $\SO(A)\to\SO(B)$ is an isomorphism we say that $A\to B$ is biregular. \begin{rem} Another possible framework would be to consider integral extensions $A\to B$ such that $A$ and $B$ both satisfy \hyp. As we just said, this condition is fullfiled when $A$ satisfies \hyp\; and $B$ injects into $A'$. The converse being false as one can check with the ring extension $\RR[x]\rightarrow \RR[x,\sqrt{x}]$. Nevertheless, the assumption that $B$ injects into $A'$ seemed more natural to us. Moreover, the results we are interested in (Proposition \ref{CharactBiregExt} and Theorem \ref{propunivbiregweakabstract}) remain exactly the same if we replace one framework with the other. \end{rem} Let us give a characterization for biregular extensions. \begin{prop}\label{CharactBiregExt} \label{equibireg} Let $A$ be a ring satisfying \textbf{\textrm{\textsc{(mp)}}}\; and $A\to B$ an integral extension of rings contained in $A'$. The following properties are equivalent: \begin{enumerate} \item[1)] The extension $A\to B$ is biregular. \item[2)] Given any ideal $\m\in\ReMax A$, there exists a unique maximal $\m'\in\Max B$ lying over $\m$ and we have $\m'\in\ReMax B$ and the map $A_{\m}\to B_{\m'}$ is an isomorphism. \item[3)] Given any real prime ideal $\p\in\RSp A$, there exists a unique prime $\q\in\Sp B$ lying over $\p$ and we have $\q\in\RSp B$ and the map $A_{\p}\to B_{\q}$ is an isomorphism. \end{enumerate} \end{prop} \begin{proof} Obviously 3) implies 2). Let us show that 2) implies 1). Given any ideal $\m\in\ReMax A$, there exists a unique maximal $\m'\in\Max B$ lying over $\m$ and we have $\m'\in\RSp B$ and the map $A_{\m}\to B_{\m'}$ is an isomorphism. By \cite[Ex. 3, Ch. 3, Sect. 9]{Ma}, we get $B_{\m}=B_{\m'}$. Hence we have $A_{\m}=B_{\m}$, and by Corollary \ref{maxreg1ter} we obtain $A_{\m}=\SO(A)_{\m}=B_{\m}=\SO(B)_{\m}=B_{\m'}=\SO(B)_{\m'}$. One has $\SO(A)_{\m}=\SO(B)_{\m}$ for any $\m\in\ReMax A$, which shows that $\SO(A)_{\m}=\SO(B)_{\m}$ for any $\m\in\Max \SO(A)$. By \cite[Prop. 3.9]{AM}, we get that $\SO(A)\to\SO(B)$ is bijective and thus a ring isomorphism. Let us show now that 1) implies 3). Assume $A\to B$ is biregular. Let $\p\in\RSp A$ and let $\q\in\Sp B$ lying over $\p$. By Corollary \ref{maxreg1ter}, we have $A_{\p}\simeq \SO(A)_{\p\SO(A)}$. Since $\SO(A)\simeq \SO(B)$ then it follows that $\q$ does not meet $\T(B)$. Indeed, by the contrary, let us assume that $b\in\q\cap \T(B)$. In $\SO(B)=\SO(A)$, one has $b= \frac{a}{1+\sum_ia_i^2}$ where $a$ and the $a_i$'s ly in $A$. One gets $a\in \q\cap A=\p$. Moreover, since $b\in\T(B)$, it is invertible in $\SO(A)$. Since $\sum_ia_i^2\in \SO(A)$, one gets also that $a\in \SO(A)$, a contradiction. Now, one may look at $\q$ as a prime ideal of $\SO(B)$ lying over the prime ideal $\p$ viewed in $\SO(A)$. The isomorphism $\SO(B)=\SO(A)$ shows that $\q$ is unique and also real. Moreover, one has $$A_{\p}\simeq \SO(A)_{\p\SO(A)}\simeq \SO(B)_{\q\SO(B)}\simeq B_{\q}.$$ \end{proof} Note that the previous proposition shows that a biregular ring extension is a centrally subintegral ring extension as defined in \cite{FMQ2}. We prove now an abstract version of Theorem \ref{propunivbiregweak}, which says that the biregular integral closure $A^{b}$ of $A$ is the biggest intermediate ring between $A$ and $A'$ which is biregular with $A$. Namely: \begin{thm} \label{propunivbiregweakabstract} Let $A$ be a ring satisfying \hyp. Let $A'$ denote the integral closure of $A$. Then, \begin{enumerate} \item $A\to A^{b}$ is biregular, \item Let $A\to B\to A'$ be a sequence of ring extensions. Then, $A\to B$ is biregular if and only if $B\subset A^{b}$. \end{enumerate} \end{thm} \begin{proof} Note that, by Proposition \ref{ReducedIntegralClosure}, $A'$ satisfies \hyp. We begin by proving $A\to A^{b}$ is biregular. Let $\m\in\ReMax A$. By Proposition \ref{equivdefalgrn} we have shown that $(A^{b})_{\m}=A_{\m}$. It follows that $(A^{b})_{\m}$ is local and thus there exists a unique ideal $\m'$ (which is a real ideal) of $A^{b}$ lying over $\m$. By \cite[Ex. 3, Ch. 3, Sect. 9]{Ma}, we get $(A^{b})_{\m}=(A^{b})_{\m'}$ and hence $A_{\m}=(A^{b})_{\m'}$. By Proposition \ref{equibireg}, $A\to A^{b}$ is biregular. Assume first that $A\to B$ be a biregular extension where $B$ is a subring of $A'$, and let us show that $B\subset A^{b}$. Since $B$ satisfies \hyp\, (Lemma \ref{IntegralExtensionHypothesis}), $B$ injects into $\SO(B)$. Since $\SO(A)\simeq\SO(B)$, we get an injective map $B\to \SO(A)$. Since $A\to B$ is integral and since $A^{b}$ is the integral closure of $A$ in $\SO(A)$ we get the desired inclusion $B\subset A^{b}$. Assume now that $A\subset B\subset A^{b}$. It induces $\SO(A)\subset \SO(B)\subset \SO(A^{b})$. Since $\SO(A)=\SO(A^{b})$, one has $\SO(A)=\SO(B)$ and the proof is done. \end{proof} \section{Integral closure of the ring of continuous rational functions}\label{sec-cont-rat} The intriguing behaviour of rational functions on a real algebraic set admitting a continuous extension to the whole algebraic set has been investigated in \cite{KN}. Among them, the class of hereditarily rational functions is of special interest. We investigate in this section the integral closure of the ring of continuous rational functions on the central locus, and end the section with discussing fully the case of curves. We start with recalling the main definitions for continuous rational functions and hereditarily rational functions, and focus later on the restriction of these functions to the central locus on the real algebraic set considered. \subsection{Continuous rational functions and hereditarily rational functions} For $X\subset \R^n$ an algebraic set, the total ring of fractions $\K(X)$ is also the ring of (classes of) rational functions on $X$ (which is a field when $X$ is irreducible). A rational function $f\in \K(X)$ is regular on a Zariski-dense open subset $U\subset X$ if there exist polynomial functions $p$ and $q$ on $\R^n$ such that $\Z(q) \cap U=\emptyset$ and $f=p/q$ on $U$. The couple $(U,f_{|U})$ is called a regular presentation of $f$. \begin{defn} \label{defratcont} Let $f:X\to \R$ be a continuous function. We say that $f$ is a continuous rational function on $X$ if there exists a Zariski-dense open subset $U\subset X$ such that $f_{|U}$ is regular. We denote by $\SR(X)$ the ring of continuous rational functions on $X$. A map $Y\to X$ between real algebraic sets $X\subset \R^n$ and $Y\subset \R^m$ is called continuous rational if its components are continuous rational functions on $Y$. \end{defn} A typical example is provided by the function defined by $(x,y)\mapsto x^3/(x^2+y^2)$ on $\R^2$ minus the origin, with value zero at the origin. Note also that on a curve with isolated points, a function regular on the one-dimensional branches can be extended continuously by any real value at the isolated points. In particular, the natural ring morphism $\SR(X)\rightarrow \K(X)$ which sends $f\in \SR(X)$ to the class $(U,f_{|U})$ in $\K(X)$, where $(U,f_{|U})$ is a regular presentation of $f$, is not injective in general. This phenomenon is related to the notion of central part of a real algebraic set (see \cite[Prop. 2.15]{Mo}). Another stringy phenomenon is illustrated by Koll\'ar example (Example \ref{ExKollar}). Consider the surface $S=\Z(y^3-(1+z^2)x^3)$ in $\R^3$. The continuous function defined by $(x,y,z)\mapsto ~^3 \sqrt{1+z^2}$ is regular on $S$ minus the $z$-axis, however its restriction to the $z$-axis is no longer rational. This pathology leads to the notion of hereditarily rational function in the sense of \cite[Def. 1.4]{KuKu2}. \begin{defn} Let $X$ be an algebraic set. A continuous rational function $f\in \SR(X)$ is hereditarily rational on $X$ if for every irreducible algebraic subset $V\subset X$ the restriction $f_{|V}$ is rational on $V$. We denote by $\SRR(X)$ the ring of hereditarily rational functions on $X$. A map $Y\to X$ between real algebraic sets $X\subset \R^n$ and $Y\subset \R^m$ is called hereditarily rational if its components are hereditarily rational functions on $Y$. \end{defn} In particular, in the case of curves, the rings $\SR(X)$ and $\SRR(X)$ coincide. It is known that for a real algebraic set $X$ with at most isolated singularities, any continuous rational function is also hereditarily rational \cite{KN,Mo}. Note also that the regulous functions introduced in \cite{FHMM} on a general real algebraic set $X\subset \R^n$ as the quotient of $\SRR(\R^n)$ by the ideal of continuous rational functions vanishing on $X$, coincide with the hereditarily rational functions on $X$. By \cite[Thm. 6.4]{FHMM}, the topology generated by zero sets of hereditarily rational functions on $X$ is the algebraically constructible topology on $X$ (denoted by $\Co$-topology). \begin{defn} Let $X$ be an irreducible algebraic set. The subring of all continuous functions on $\Cent X$ that are rational on $X$ is denoted by $\SR(\Cent X)$. A continuous rational function $f\in \SR(\Cent X)$ is hereditarily rational on $\Cent X$ if for every irreducible algebraic subset $V\subset X$ satisfying $V={\overline{V\cap \Cent X}}^Z$, the restriction $f_{|V\cap \Cent X}$ is rational on $V$. We denote by $\SRR(\Cent X)$ the ring of hereditarily rational functions on $\Cent X$. \end{defn} The main interest of restricting continuous rational functions to the central locus is that the canonical maps $\SR(\Cent X)\rightarrow \K(X)$ and $\SRR(\Cent X)\rightarrow \K(X)$ are now injective. \subsection{Zero sets of integral continuous rational functions} The zero set $\Z(f)$ of an hereditarily rational function $f\in\SRR(X)$ is a Zariski constructible set \cite{FHMM}. However in general, the zero set $\Z(f)$ of an arbitrary continuous rational function $f\in\SR(X)$ has only the structure of a closed semi-algebraic set, and it happens indeed that $\Z(f)$ is not Zariski constructible \cite[Ex. 3.6]{Mo}. We provide below sufficient conditions on $X$ and $F$ to ensure that such a pathological behavior can not happen. It consist in an application of the notion of totally real normalization introduced in this paper. More precisely, consider a central irreducible real algebraic set $X$, so that it comes with an extension of domains $\Pol(X)\subset\SR(X)$. Then the zero set of functions belonging to the integral closure $\Pol(X)_{\SR(X)}'$ of $\Pol(X)$ in $\SR(X)$ is Zariski constructible, provided that $X$ admits a central and totally real normalization. \begin{prop} \label{zerointratcont} Let $X$ be an irreducible central real algebraic set with a central normalization which is totally real. The zero set of a continuous rational function on $X$, which is integral over the polynomial functions, is Zariski closed. If $f$ is assume moreover to be hereditarily rational, then the sign of $f$ on $X$ coincides with the sign of a polynomial function on $X$. \end{prop} \begin{proof} Let $\pi':X'\to X$ be the normalization map. By Proposition \ref{centralsurj} the map $\pi'$ is surjective. Let $f\in\Pol(X)_{\SR(X)}'$. Since $X$ is central then $\SR(X)\subset\K(X)$. Let $(U,f_{|U})$ be a regular presentation of $f$ that is integral over $\Pol(X)$. It gives rise to a function $g\in\Pol(X')$ such that $g=f_{|U}\circ\pi'$ on $\pi'^{-1}(U)$. Since $X'$ is central, the continuous functions $g$ and $f\circ\pi'$ coincide on $X'$. It follows that $\Z(f\circ\pi')$ is a Zariski closed subset of $X'$. It follows from Corollary \ref{cor-clo} that $\Z(f)=\pi'(\Z(f\circ\pi'))$ is Zariski closed. The second assertion is a direct consequence of \cite[Thm. B]{Mo}. \end{proof} \subsection{Integral closure of the ring of continuous rational functions on the central locus} Let $X$ be an irreducible real algebraic set. Recall that the ring $\SR(X)$ is not always a domain, contrarily to $\SR(\Cent X)$ whose fraction field is isomorphic to $\K(X)$. Following the arguments used in the proof of \cite[Prop. 2.33]{Mo}, we can show that the continuous rational functions on the central locus of $X$ are exactly the functions on $\Cent X$ that become regular after a well chosen resolution of singularies of $X$ (they are called blow-regular functions in \cite{Mo}). \begin{prop} \label{blowreg} Let $f:\Cent X\to \R$ be a real function. The following properties are equivalent: \begin{enumerate} \item $f\in\SR(\Cent X)$. \item There exists a resolution of singularities $\pi:Y\to X$ such that $f\circ\pi\in\SO(Y)$. \end{enumerate} \end{prop} \begin{rem} Let $\pi:Y\to X$ be a resolution of singularities. \begin{enumerate} \item We have $\pi(Y)=\Cent X$ by Proposition \ref{centralsurj2}. \item The equality $f\circ\pi\in\SO(Y)$ in the statement of the previous proposition means that there exists $g\in\SO(Y)$ such that $f\circ\pi=g$ on $Y$. In particular, since $\pi$ is a regular birational map, the functions $f$, $f\circ\pi$ and $g$ represent the same class in $\K(X)$. \end{enumerate} \end{rem} As a first result, we prove that the ring of continuous rational functions on a smooth central locus of an algebraic set (or the ring of hereditarily rational functions, it is the same in that case) is integrally closed. \begin{thm} \label{intcloslisse1} Let $X$ be an irreducible algebraic set such that $\Cent(X)=X_{reg}$ (it is equivalent to say that $X_{reg}$ is closed for the Euclidean topology). Then $\SR(\Cent X)$ is integrally closed in $\K (X)$. \end{thm} \begin{proof} Let $f\in \K(X)^{*}$ be a non-zero rational function on $X$, and assume that $f$ satisfies an integral equation with continuous rational coefficients on $\Cent X$, namely there exist $d\in\N^{*}$ and $a_i\in\SR(\Cent X)$, $i=0,\ldots, d-1$ such that $$f^d+a_{d-1}f^{d-1}+\cdots+a_0=0$$ in $\K (X)$. Then there exists a non-empty Zariski open subset $U$ of $\Cent X$ such that $$\forall x\in U,~~f^d(x)+a_{d-1}(x)f^{d-1}(x)+\cdots+a_0(x)=0.$$ By Proposition \ref{blowreg}, there exists a resolution of singularities $\pi:Y\rightarrow X$ such that $\tilde{a_i}=a_i\circ \pi$ is regular on $Y$ for $i=0,\ldots, d-1$. In particular $f\circ \pi$ is a rational function on $Y$ which is integral over $\SO (Y)$. Since $Y$ is non-singular then $\SO (Y)$ is integrally closed in $\K(Y)$ by Proposition \ref{intclosreel2}, and thus the rational function $f\circ \pi$ can be extended to a regular function $\tilde{f}$ on $Y$. Obviously, we have $$\forall y\in Y,~~\,\tilde{f}^d(y)+\tilde{a}_{d-1}(y)\tilde{f}^{d-1}(y)+\cdots+\tilde{a}_0(y)=0.$$ Let $x\in \Cent X=X_{reg}$. Since each regular function $\tilde{a}_i$ is constant on $\pi^{-1}(x)$, then for all $y\in\pi^{-1}(x)$ the real number $\tilde{f}(y)$ is a root of the one variable polynomial $$p(t)=t^d+\tilde{a}_{d-1}(x)t^{d-1}+\cdots+\tilde{a}_0(x)\in\R[t].$$ Since $\pi^{-1}(x)$ is connected in the Euclidean topology and moreover $\tilde{f}$ is continuous on $Y$, we obtain that $\tilde{f}$ must be constant on $\pi^{-1}(x)$. Hence $\tilde{f}$ induces a real continuous function $f^c$ on $\Cent X$ such that $\tilde{f}=f^c\circ \pi$ ($\pi:Y\to \Cent X$ is a quotient map for the Euclidean topology \cite[Prop. 2.33]{Mo}) and therefore $f^c$ is a continuous extension to $\Cent X$ of $f$. \end{proof} \begin{cor} \label{intcloslisse} Let $X$ be a non-singular irreducible algebraic set. Then $\SR(X)$ is integrally closed in $\K (X)$. \end{cor} A rational function which does not admit a continuous extension on a given algebraic set may admit different behaviors at a indeterminacy point. It can be unbounded like $1/x$ at the origin in $\R$, bounded with infinitely many limit points like $x^2/(x^2+y^2)$ at the origin in $\R^2$, or bounded with finitely many limit points like in the case of rational function satisfying an integral equation with continuous rational coefficients on the central locus. \begin{lem} \label{indetfinite} Let $X$ be an irreducible algebraic set. Assume $f\in \K(X)$ satisfies an integral equation with coefficients in $\SR(\Cent X)$. Then $f$ admits finitely many different limits at its indeterminacy points that are central. \end{lem} \begin{proof} The rational function $f$ satisfies an integral equation of the form $$f^d+a_{d-1}f^{d-1}+\cdots+a_0=0$$ with $d\in\N^{*}$ and $a_i\in\SR(\Cent X)$, for $i=0,\ldots, d-1$. Let $\pi:Y\to X$ be a resolution of the singularities of $X$. The functions $a_i\circ \pi$ are continuous rational functions on a non-singular variety, therefore they are hereditarily rational on $Y$. Then $f\circ \pi$ is a rational function on $Y$ satisfying an integral equation with hereditarily rational coefficients. By Corollary \ref{intcloslisse}, $f\circ\pi$ can be extended to $Y$ as an hereditarily rational function $\tilde{f}$ on $Y$. Let $x\in \Cent X$. By the arguments used in the proof of Theorem \ref{intcloslisse1} then $\tilde{f}$ is constant on the connected components of $\pi^{-1}(x)$. Since $\tilde{f}$ is hereditarily rational on $Y$, it follows that $\tilde{f}|_{\pi^{-1}(x)}$ is hereditarily rational on $\pi^{-1}(x)$. In particular $\tilde{f}$ is constant on each $\Co$-irreducible component of $\pi^{-1}(x)$. Since there exists a finite number of such components, $\tilde{f}$ takes a finite numbers of values on $\pi^{-1}(x)$, and therefore $f$ admits finitely many limit points at $x$. \end{proof} We extend the notion of connectedness to the constructible topology. \begin{defn} Let $X$ be an algebraic set. Let $Y_1,\ldots,Y_k$ be the $\Co$-irreducible components of $X$. We say that $X$ is $\Co$-connected if $k=1$ or else if $\forall i\not= j$ in $\{1,\ldots,k\}$ there exists a sequence $(i_1,\ldots,i_l)$, $l\geq 2$, of two by two distinct numbers in $\{1,\ldots,k\}$ such that $i_1=i$, $i_l=j$, and for $t=1,\ldots, l-1$ then $Y_{i_t}\cap Y_{i_{t+1}}\not=\emptyset$. \end{defn} For example, an algebraic set $X$ is $\Co$-connected when $X$ is connected. This notion enables to extend Theorem \ref{intcloslisse1} to certain singular cases. \begin{prop} \label{intclossing} Let $X$ be an irreducible algebraic set such that there exists a resolution of singularities $\pi:\tilde{X}\rightarrow X$ such that for all $x\in \Cent X$ the fiber $\pi^{-1} (x)$ is $\Co$-connected. Then $\SR(\Cent X)$ is integrally closed in $\K (X)$. \end{prop} \begin{proof} Assume $f\in \K(X)^{*}$ there exist $d\in\N^{*}$ and $a_i\in\SR(\Cent X)$, $i=0,\ldots, d-1$ such that $$f^d+a_{d-1}f^{d-1}+\cdots+a_0=0$$ in $\K (X)$. Let $\pi:\tilde{X}\rightarrow X$ be a resolution of singularities such that $\forall x\in \Cent X$ the fiber $\pi^{-1} (x)$ is $\Co$-connected. As we have already explained in the proof of Lemma \ref{indetfinite}, the rational function $f\circ \pi$ can be extended as an hereditarily rational function to $\tilde{X}$. Let $\tilde{f}\in\SRR(\tilde{X})$ denote the extension. Let $x\in \Cent X$. We know that $\tilde{f}$ is constant on the connected components of $\pi^{-1}(x)$. Let $Y_1,\ldots,Y_k$ be the $\Co$-irreducible components of $\pi^{-1}(x)$. Since for $i=1,\ldots,k$ $\tilde{f}|_{Y_i}\in\SRR(Y_i)$ (see \cite[Cor. 5.38]{FHMM}) then $\tilde{f}$ is constant on $Y_i$ (see \cite[Cor. 6.6]{FHMM}). Since $\pi^{-1}(x)$ is $\Co$-connected then $\tilde{f}$ is constant on $\pi^{-1}(x)$. We conclude the proof in the same way we did in the proof of Theorem \ref{intcloslisse1}. \end{proof} \begin{ex}\begin{enumerate} \item Let $X$ be the cuspidal plane curve given by $y^2-x^3=0$. By Proposition \ref{intclossing} we know that $\SR(X)$, which is also equal to $\SRR(X)$ since $X$ is a curve, is integrally closed. \item Let $X$ be the nodal plane curve given by $y^2-(x+1)x^2=0$. The rational function $f=y/x$ is integral over $\Pol (X)$ since $f^2-(x+1)=0$ on $X\setminus\{ (0,0)\}$. It is easy to see that $f$ cannot be extended continuously to whole $X$. Hence $\SR(X)$ is not integrally closed. Of course the fiber over the node cannot be connected when we resolve it. \item Let $X$ be the central algebraic surface in $\R^3$ defined as by $y^2=(z^2-x^2)(x^2-2z^2)$. It can be view as the cone over the non-singular curve defined in the plane $z=1$ by the irreducible curve with two connected components $y^2=(1-x^2)(x^2-2)$. The origin is the only singular point of $X_{\C}$ and thus $X$ is normal. Moreover the blowing-up of the origin gives a resolution of the singularities of $X$, with exceptional divisor a smooth irreducible curve with two connected components. It is in particular $\Co$-irreducible, therefore it follows again from Proposition \ref{intclossing} that $\SR(X)$, which is also equal to $\SRR(X)$ since $X$ has only an isolated singularity, is integrally closed. \end{enumerate} \end{ex} \begin{rem} Note that even for normal central surfaces, the ring $\SRR(X)=\SR(X)$ is not necessarily integrally closed. Consider for example the surface $X$ given by $z^2=(x^2+y^2)^2+x^6$ in $\R^3$. The origin is the only singular point of $X_{\C}$ and thus $X$ is normal. The rational function $f=z/(x^2+y^2)$ satisfies the integral equation $f^2=1+x^6/(x^2+y^2)$ with coefficients in $\SRR(X)$. As a consequence $f^2$ converges to 1 at the origin, but $f$ has different signs depending on the sign of $z$. Therefore $f$ can not be continuous at the origin. \end{rem} \subsection{The case of curves} Let $X$ be an irreducible algebraic curve. Recall that in that situation, hereditarily and continuous rational functions (on the central locus) coincide. Let $\pi':X'\rightarrow X$ denote the normalization map. Note that a normal curve is non-singular and thus automatically central. In this final section, we aim to determine the integral closure $\SR(\Cent X)'$ of $\SR(\Cent X)$ in $\K(X)$. By \cite[Prop. 2.4]{FMQ}, we know that $\SR(X')$ coincides with $\SO(X')$, so that $\SR(X')$ is an integrally closed ring by Corollary \ref{intcloslisse} (or Proposition \ref{intclosreel2}). Moreover, the composition with $\pi'$ induces an inclusion $\SR(\Cent X)\subset\SR(X')$, so that we obtain a sequence of inclusions $$\SO(X)\subset \SR(\Cent X)\subset \SO(X').$$ Taking integral closures, we obtain another sequence of inclusions \begin{equation} \label{equ1} \SO(X)'\subset \SR(\Cent X)'\subset \SO(X'), \end{equation} namely $\SR(\Cent X)'$ is an intermediate ring between $\SO(X)'$ and $\SO(X')$. Note that, in the particular case when $X$ has totally real normalization, this sequence of inclusions becomes equalities $\SO(X)'= \SR(\Cent X)'= \SO(X')$ by Proposition \ref{egalite}. \begin{ex} \label{grospoint} {\rm Consider the central curve $X=\Z(y^2-x(x^2+y^2))$. Let $X'$ be its normalization. The curve $X$ has a unique singular point obtained by putting together two complex conjugated points of $X'_{\C}$ and a point of $X'$. Since the normalization map $\pi':X'\rightarrow X$ is a bijection then the composition by $\pi'$ gives an isomorphism between $\SR(X)$ and $\SR(X')=\SO(X')$ and thus $\SR(X)=\SR(X)'=\SO(X')$. Indeed a regular function on $X'$ is constant on the fibers of $\pi'$ and thus induces a (rational) continuous function on $X$. Since the fiber of $\pi_{\C}:X'_{\C}\rightarrow X_{\C}$ over the singular point of $X$ is not totally real then $\SR(X)'=\SO(X')\not= \SO(X)'$ (Proposition \ref{egalite}). In this example, the first inclusion in (\ref{equ1}) is strict and the second one is an equality.} \end{ex} We finally relate the integral closure of $\SR(\Cent X)$ with the ring of regular functions on the normalization of $X$. \begin{thm} \label{intcloscurve4} Let $\pi':X'\rightarrow X$ be the normalization map of an irreducible curve $X$. Then the integral closure $\SR(\Cent X)'$ of $\SR(\Cent X)$ in $\K(X)$ coincides with the ring $\SO(X')$ of regular functions on the normalization $X'$ of $X$. \end{thm} \begin{proof} Note first that we only have to consider the local case. It is not difficult to see that the maximal ideals of $\SR(\Cent X)$ are of the form $\m_x=\{f\in\SR(\Cent X)|\;f(x)=0\}$ for $x\in\Cent X$. Indeed, if $X\subset \R^n$ and since $X$ is a curve then we can extend a continuous rational function on $\Cent X$ as a continuous rational function on $\R^n$ and apply \cite[Prop. 5.11]{FMQ}. Let $x\in \Cent X$. We consider the fiber $(\pi')_{\C}^{-1}(x)=\{y_1,\ldots,y_r,z_1,\overline{z_1},\ldots, z_t,\overline{z_t}\}$ where $r,t$ are integers, the complex involution is denoted by a bar, $y_1,\ldots,y_r$ correspond to points of $X'$ and $z_1,\overline{z_1},\ldots, z_t,\overline{z_t}$ is a set of distinct two-by-two points of $X'_{\C}$. By Proposition \ref{centralsurj2}, we know that $r\geq 1$. By Proposition \ref{centralsurj2}, it follows that $\pi':X'\to\Cent X$ is a quotient map for the Euclidean topology and thus the continuous rational functions on $\Cent X$ correspond to the regular functions on $X'$ which are constant on the fibers of $\pi':X'\to\Cent X$. It follows that $$\SR(\Cent X)_{\m_x}\supset\R+\m_{y_1}\cap\cdots\cap \m_{y_r}$$ with $\m_{y_i}=\{f\in\SO(X')_{\m_x}|\;f(y_i)=0\}$. We want to prove that $$(\SR(\Cent X)_{\m_x})'=(\SR(\Cent X)')_{\m_x}=\SO(X')_{\m_x}.$$ Let $f\in \SO(X')_{\m_x}$. For $i=1,\ldots,r$, there exists $\alpha_i\in\R$ such that $f-\alpha_i\in \m_{y_i}$. Consequently the product $\prod_{i=1}^{r}(f-\alpha_i)$ belong to $ \SR(\Cent X)_{\m_x}$, therefore $f$ satisfies an integral equation with coefficient in $\SR(\Cent X)_{\m_x}$ as required. \end{proof} \end{document}
\begin{document} \begin{frontmatter} \title{Analytical and numerical solution of a transport equation for resonantly interacting waves in MHD for a van dar Waals gas} \author{Harsh V. Mahara and V. D. Sharma} \address{Department of Mathematics, Indian Institute of Technology Bombay, Powai, Mumbai-400076} \begin{abstract} In this paper, we characterized an analytical and numerical study of the resonant interaction between waves in MHD. A system of evolution equations is derived; we focus on the study of the interaction between a selected triad. The resulting evolution equation contains a dispersive term in addition to the nonlinear term and convolution term. Effects of the influence of van der Waals parameter and magnetic field on the formation and structure of solitons are studied. \end{abstract} \begin{keyword} Hyperbolic system\sep Resonant interaction \sep magnetohydrodynamics \sep \end{keyword} \end{frontmatter} \section{Introduction} The study of resonant interaction in weakly nonlinear waves has received increasing attention from mathematicians in the recent past \cite{MR338576, MR867874, MR1087089, MR1881084, MR1297667}. There has been widespread interest in the nonlinear phenomena mainly due to the so called evolution equations, derived from a system of PDEs, representing an essential aspect of the original system \cite{ MR1166185, MR0426722, MR2259844, MR3623396, MR1013433}. To study the wave interactions in one dimension, Majda and Rosales \cite{MR760229} have derived a system of integro-differential equations and have shown their physical applications to the gas dynamics; the analytical and numerical solutions of this equation were studied by Majda, Rosales, and Schnobek \cite{MR975485}. The theory of resonant interaction was applied to elasticity and dispersive plasma in \cite{MR1160152} and \cite{MR0029616}, respectively. Ali and Hunter \cite{MR1616005} applied the Majda-Rosales theory to the MHD system, including the viscous and dispersive effects in one dimension with ideal gas background. The MHD wave interaction shows dispersive behavior that is different from acoustic and elastic waves; this leads to the KdV-type term in the interaction equation. Zabusky and Kruskal \cite{MR716190} studied the solitary wave solutions of the KdV equation with weak dispersion. In this chapter, we study the resonant interaction of weakly nonlinear waves in the MHD system with a van der Waals equation of state and derive an evolution equation corresponding to the fast magnetosonic entropy wave triad. The far-field behaviour of the underlying equation is studied analytically and numerically taking into account the effects of magnetic field and the van der Waals gas. The evolution equation has a Burgers equation type nonlinear term, a weak dispersive term, and a (weakly dispersive \cite{MR1719749}) convolution term corresponding to the interaction between waves. We notice the presence of solitons and study the effect of magnetic field and the real gas parameter $ b $ on the evolution, shape and behavior of solitary wave profiles. The work is organized as follows: The basic equations and formulation of the problem are given in Section \ref{basic equation1}. The detailed derivation of the system of transport equations for the wave amplitudes, exhibiting nonlinearity, dissipation, and dispersion is given in Section \ref{evo1}. The evolution equations related to the fast magnetosonic and entropy wave triad with certain assumption is developed in the Section \ref{evo}. The numerical results, exhibiting the magnetic and real gas effects, are displayed in Section \ref{num1}. Finally, we concluded this chapter with a discussion of our results in Section \ref{con1}. \section{Basic equations}\label{basic equation1} For one dimensional motion, the equations of MHD can be written as a system in the following form \cite{cab70}: \begin{equation}\label{b.2.1} \setlength{\jot}{5pt} \begin{aligned} &\rho_{t}+u\rho_{x}+\rho u_{x}=0,\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\\ &\rho(u_{t}+uu_{x})+ (P+\frac{1}{2}B^{2})_{x}=\frac{4}{3}\mu u_{xx},\\ &\rho(v_{t}+uv_{x})- B_{1}B_{2x}=\mu v_{xx},\\ &\rho(w_{t}+uw_{x})- B_{1}B_{3x}=\mu w_{xx},\\ &\rho(s_{t}+us_{x})= \kappa T_{xx} +\frac{4}{3}\mu u_x^2 + \mu (v_x^2 +w_x^2) + \eta (B_{2x}^2 + B_{3x}^2),\\ &B_{2t}+u B_{2x} + B_{2}u_{x}-B_{1}v_{x}=\eta B_{2xx} + \chi B_{1}B_{3xx},\\ &B_{3t}+u B_{3x} + B_{3}u_{x}-B_{2}w_{x}=\eta B_{3xx} - \chi B_{1}B_{2xx},\\ &B_{1t}=B_{1x}=0, \end{aligned} \end{equation} where, $\, P $ is the pressure, $ \rho $ is the fluid density, $ s $ is the entropy, $ (u,v,w) $ is the fluid velocity vector, $ \mu $ is the viscosity, $ \kappa $ the thermal conductivity, $ T $ the temperature, $ \eta $ the magnetic diffusion, $ \chi $ the hall parameter, $ (B_{1},B_{2},B_{3}) $ the magnetic field vector, and $ B^2 =B_{1}^2+B_{2}^2+B_{3}^3.$ The last equation of \eqref{b.2.1} implies that $ B_{1}(x,t)= \text{constant}, $ which reduces the number of the equations to seven, and is due to the fact the magnetic field is divergence free. We have considered the direction of propagation along the $ x $ axis, and the subscripts $ x $ and $ t $ denote the partial differentiation with respect to the respective variable. The system of equations \eqref{b.2.1} is supplemented by an equation of state which in our case is the van der Waals equation of state of the form \cite{MR1419777} \begin{equation}\label{b.2.2} P = K_{0}\delta\frac{\rho^{1+\delta}\exp(\delta s/R)}{(1-b\rho)^{1+\delta}}, \qquad T = K_{0}\delta\frac{\rho^{\delta}\exp(\delta s/R)}{R(1- b\rho)^{\delta}}, \end{equation} where, $ K_{0} $ is a constant, $ \delta $ is a dimensionless material dependent quantity defined as $ \delta = R/c_{v} $ with $ c_{v} $ the specific heat at constant volume and $ R $ the specific gas constant; $ \delta $ lies in the interval $ 0 < \delta \leq 2/3 $ with $ \delta= 2/3 $, for a monoatomic fluid and the parameter $ b $ represents the van der Waal excluded volume. The system \eqref{b.2.1} can be written in the vector matrix notation as \begin{equation}\label{b.2.3} \mathbf{U}_{t}+\mathbf{A}(\mathbf{U})\mathbf{U}_{x}= \mathbf{M}(\mathbf{U})\mathbf{U}_{xx}+ \mathbf{N}(\mathbf{U})[\mathbf{Q} (\mathbf{U})]_{x}\mathbf{U}_{x}, \end{equation} where $ \mathbf{U}=(\rho,u,v,w,s,B_{2},B_{3})^{tr} $ is the state column vector; $ \mathbf{A},\,\mathbf{M},\,\mathbf{N},\;\text{and}\;\mathbf{Q} $ are square matrices of order 7 having components $ A_{ij},\,N_{ij},\,M_{ij},\;\text{and} \;Q_{ij} $ ,respectively; the non-zero components are as follows: \begin{equation*}\label{b.2.4} \setlength{\jot}{5pt} \begin{aligned} & \quad\qquad A_{ij}=u,~ i=1,\ldots,7, \quad~ A_{12}=\rho,\quad~ A_{21}=\frac{P_{\rho}}{\rho},\quad~ A_{25}=\frac{P_{s}}{\rho},\quad~ A_{26}=\frac{B_{1}}{\rho}, \\ & \quad\qquad A_{27}=\frac{B_{2}}{\rho},\quad~ A_{36}=\frac{-B_{1}}{\rho},\quad~ A_{47}=\frac{-B_{1}}{\rho},\quad~ A_{62}=B_{2},\quad~ A_{63}=-B_{1},\quad~ \\ & \quad\qquad A_{72}=B_{3},\quad~ A_{74}=-B_{1},\quad~ M_{22}=\frac{\mu'}{\rho},\quad~ M_{33}=\frac{\mu}{\rho},\quad~ M_{44}=\frac{\mu}{\rho},\quad~ \\ & \quad\qquad M_{51}=\frac{\kappa T_{\rho}}{\rho T},\quad~ M_{51}=\frac{\kappa T_{s}}{\rho T},\quad~ M_{66}=\eta,\quad~ M_{67}=-\chi B_{1},\quad~ M_{77}=\eta,\quad~ \\ & \quad\qquad N_{51}=\frac{\kappa}{\rho T},\quad~ N_{52}=\frac{\mu'}{\rho T},\quad~ N_{53}=\frac{\mu}{\rho T},\quad~ N_{56}=\frac{\mu}{\rho T},\quad~ Q_{11}=T_{\rho},\quad~ \\ & \quad\qquad Q_{15}=T_{s},\quad~ Q_{22}=u,\quad~ Q_{33}=u,\quad~ Q_{34}=w,\quad~ Q_{66}=B_{2},\quad~ Q_{67}=B_{3}.\quad~ \\ \end{aligned} \end{equation*} In the system \eqref{b.2.3}, matrices $ \mathbf{M} ,$ $ \mathbf{N} ~\text{and}~ \mathbf{Q} $ correspond to the dispersive and diffusive parts; we can split $\mathbf{M}$ as $ \mathbf{M} = \mathbf{M}_{d} + \mathbf{M}_{v}, $ where the dispersive part $ \mathbf{M}_{d} $ is proportional to $ \chi $ and the diffusive part $ \mathbf{M}_{v} $ is proportional to $\mu,\,\kappa,~\text{or}~\eta .$ \section{Derivation of evolution equations}\label{evo1} It is to be noted that the system \eqref{b.2.1} is parabolic in nature with the second-order spatial derivatives. With the removal of viscosity, thermal conduction, magnetic diffusion, and Hall effect, the system \eqref{b.2.1} reduces to a hyperbolic system. To study the interaction between the MHD waves with real gas effects, we derive a system of evolution equations in the present section. We analyse the interaction of waves, which propagate through a constant background state $ \mathbf{U}_{0} = (\,\rho_{0},\, 0,\, 0,\, 0,\, s_{0},\, B_{02},\, 0\, )^{tr} .$ Since the left-hand side of the system \eqref{b.2.3} is hyperbolic, it admits seven families of characteristic velocities at $ \mathbf{U}=\mathbf{U}_{0} $ given by \begin{equation}\label{b.2.5} \setlength{\jot}{5pt} \begin{aligned} &\lambda_{1}=0,\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\\ &\lambda_{2,3}=\mp \frac{B_{01}}{\sqrt{\rho_{0}}},\\ &\lambda_{4,5}=\mp \sqrt{\frac{1}{2}\left(c_{0}^2 + \frac{1}{\rho_{0}}(B_{01}^2+B_{02}^2)\right) -\sqrt{c_{0}^2+\frac{1}{\rho_{0}}(B_{01}^2+B_{02}^2-4c_{0}^2B_{01}^2)}},\\ &\lambda_{6,7}=\mp \sqrt{\frac{1}{2}\left(c_{0}^2 + \frac{1}{\rho_{0}}(B_{01}^2+B_{02}^2)\right) +\sqrt{c_{0}^2+\frac{1}{\rho_{0}}(B_{01}^2+B_{02}^2-4c_{0}^2B_{01}^2)}},\\ \end{aligned} \end{equation} where $ c_{0}=( P_{\rho_{0}})^{1/2} $ is the sound speed. The wave with the speed $ \lambda_{1} $ corresponds to the convection of the entropy with the particle velocity and is called entropy wave, while waves with speeds $ \lambda_{2,3},\, \lambda_{4,5},\, \lambda_{6,7}\, $ correspond to the left and right moving Alfvén waves, slow magnetoacoustic and fast magnetoacoustic waves and are denoted by $ c_{a},c_{s},~\text{and}~c_{f} $, respectively. In certain degenerate cases some wave speeds coincide; for instance, when $ B_{01}=0,\;B_{02}\neq 0, $ we have $ c_s=c_a=0 $ as an eigenvalue of multiplicity five; when $ B_{01}\neq0,\; B_{02}= 0,\;\text{and}\; c_{a}=c_{s} $ we have $ c_{s}=c_{a}=c_{f} $ is an eigenvalue of multiplicity three; and if $ B_{01}\neq0,\; B_{02}= 0,\;\text{and}\; c_{a}\neq c_{s} $ then $c_{a}$ as an eigenvalue of multiplicity two. However, if $ B_{01}\neq 0\;\text{and}\;B_{02}\neq 0 $ the wave speeds are distinct and the left side of \eqref{b.2.3} is strictly hyperbolic; here we focus on this case only. The right eigenvectors of $ \mathbf{A}(\mathbf{U}_0) $ associated with the eigenvalues $\lambda_{i} $ are denoted by $ \mathbf{R}_{i} $, are given by \begin{equation}\label{b.2.6} \setlength{\jot}{5pt} \begin{aligned} &\mathbf{R}_{1}=(~\rho_{0},~0,~0,~0, ~\left(\frac{\rho P_{\rho}}{P_{s}}\right)_0,~0,~0~)^{tr},\qquad\qquad\qquad\\ &\mathbf{R}_{2,3}=(~0,~0,~0,~\mp c_{a}, ~0,~0,~ -B_{01} ~)^{tr},\qquad\qquad\qquad\\ &\mathbf{R}_{4,5}=(~\rho_{0},~\mp c_{s},~\pm \frac{c_{s} B_{01}B_{02}}{\rho_{0}(c_{s}^2-c_{a}^2)},~0, ~0,\frac{B_{02}c_{s}^2}{(c_{s}^2-c_{a}^2)},~ 0 ~)^{tr},\qquad\qquad\qquad\\ &\mathbf{R}_{6,7}=(~\rho_{0},~\mp c_{f},~\pm \frac{c_{f} B_{01}B_{02}}{\rho_{0}(c_{f}^2-c_{a}^2)},~0, ~0,\frac{B_{02}c_{f}^2}{(c_{f}^2-c_{a}^2)},~ 0 ~)^{tr}.\qquad\qquad\qquad\\ \end{aligned} \end{equation} The associated left eigenvectors $ \mathbf{L}_{i},\; i = 1, 2,\ldots,7 ,$ can be obtained using the normalization condition $ \mathbf{L}_{i}. \mathbf{R}_{i} = \delta_{ij}$ where $ \delta_{ij} $ is Kronecker delta. We look for a small amplitude high frequency asymptotic solution of the system \eqref{b.2.3} of the form \begin{equation}\label{b.2.9} \mathbf{U}= \mathbf{U}_{0}+ \epsilon^{a_{1}} \mathbf{U}_{1}(x,t,\xi,\tau)+ \epsilon^{a_{2}} \mathbf{U}_{2}(x,t,\xi,\tau)+ \epsilon^{a_{3}} \mathbf{U}_{3}(x,t,\xi,\tau)+ ... \end{equation} where $\, \displaystyle {\xi=\frac{x}{\epsilon^{b}},~ \tau=\frac{t}{\epsilon^{e}}}\, $ are fast variables, $ a_{1}<a_{2}<a_{3} $ and $ b,e $ are positive real numbers to be specified later, $ \epsilon $ is a small parameter ($ 0<\epsilon \ll 1$) brought into the problem through initial or boundary condition and is regarded as the strength of the perturbed disturbance. It is the ratio of a typical wavelength relative to the wave modulation length scale and also the ratio of dimensioned wave amplitude relative to a parameter with the same dimension appearing in the problem. Each dissipative mechanism existing in the flow defines a local characteristic length (or time) scale. Short wave or high frequency wave assumption is based on the fact that the wavelength of the wave is much smaller than any other characteristic length scale in the problem. In order to incorporate both dispersive and dissipative effects, we assume that dispersion coefficient $ \chi $ is larger in magnitude than the diffusion coefficients, such that \begin{equation}\label{b.2.7} \quad\chi = \epsilon^{g}\hat{\chi},\quad \kappa=\epsilon^h \hat{\kappa},\quad \eta = \epsilon^h \hat{\eta},\quad \mu= \epsilon^h\hat{\mu},\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad \end{equation} where $g<h$ are positive real numbers to be specified later; the hats designate order one parameters. Our scaling implies that \begin{equation}\label{b.2.8} \mathbf{M}_{d}=\epsilon^{g}\,\widehat{\mathbf{M}}_{d},\qquad\mathbf{M}_{v}=\epsilon^{h}\,\widehat{\mathbf{M}}_{v}.\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad \end{equation} \noindent Furthermore, the coefficients matrices $ \mathbf{A},\,\mathbf{M},\,\mathbf{N}\;\text{and}\;\mathbf{Q}\,$ can be expended in a Taylor series about the constant state $ \mathbf{U}_{0} $ as \begin{equation}\label{b.2.10} \mathbf{Y}(\mathbf{U})= \mathbf{Y}(\mathbf{U}_{0})+ \nabla\mathbf{Y}(\mathbf{U}_{0}). (\epsilon^{a_{1}} \mathbf{U}_{1}+\epsilon^{a_{2}} \mathbf{U}_{2}+\ldots)+ \frac{1}{2}\nabla^2\mathbf{Y}(\mathbf{U}_{0}).(\epsilon^{a_{1}} \mathbf{U}_{1}+\ldots)(\epsilon^{a_{1}} \mathbf{U}_{1}+\ldots)^{tr}+\ldots \end{equation} where $\mathbf{Y} $ may represent any of the matrices $ \mathbf{A},\,\mathbf{M},\,\mathbf{N}\;\text{and}\;\mathbf{Q}\,.$ Now using the derivative transformation $ \displaystyle{\frac{\partial}{\partial t} = \frac{\partial}{\partial t} + \epsilon^{-e}\frac{\partial}{\partial \tau},\quad \frac{\partial}{\partial x} = \frac{\partial}{\partial x} + \epsilon^{-b}\frac{\partial}{\partial \xi} }, $ \eqref{b.2.8}, \eqref{b.2.10} and perturbation expansion \eqref{b.2.9} in \eqref{b.2.3} yield the equation \begin{equation}\label{b.2.90} \begin{aligned} &\biggl[\,\frac{\partial}{\partial t} + \epsilon^{-e}\frac{\partial}{\partial \tau}+ [\,\mathbf{A}_{0}+(\epsilon^{a_{1}} \mathbf{U}_{1}+\epsilon^{a_{2}} \mathbf{U}_{2}+\ldots\,). \nabla\mathbf{A}_{0}+\ldots\,] \,.\left(\frac{\partial}{\partial x} + \epsilon^{-b}\frac{\partial}{\partial \xi }\right) \\ & -[\,\epsilon^{g}\,(\widehat{\mathbf{M}}_{0d}+\ldots) + \epsilon^{h}\,(\widehat{\mathbf{M}}_{0v}+\ldots)~]\,. \left(\frac{\partial^2}{\partial x^2} + 2 \epsilon^{-b}\frac{\partial^2}{\partial \xi \partial x } + \epsilon^{-2 b}\frac{\partial^2}{\partial \xi^2 } \right) -[\,\mathbf{N}_{0}+ \ldots\,]\,. \\ & \left(\frac{\partial}{\partial x} + \epsilon^{-b}\frac{\partial}{\partial \xi }\right) [\,\mathbf{Q}_{0}+ \ldots\,] \,.\left(\frac{\partial}{\partial x} + \epsilon^{-b}\frac{\partial}{\partial \xi }\right) \biggr] (\mathbf{U}_{0}+\epsilon^{a_{1}} \mathbf{U}_{1}+\epsilon^{a_{2}} \mathbf{U}_{2}+\epsilon^{a_{3}} \mathbf{U}_{3}+\ldots)=0. \end{aligned} \end{equation} To get both dissipation and dispersion effects into picture, we take $ b=e=a_{1}=1,~ g=a_{2}=3/2,~ \text{and }~ h=a_{3}=2$. Equating the coefficients of $ \epsilon^{n/2} $ to zero; this leads to the following system of PDEs satisfied by $ U_1,~ U_2 $, and $ U_3 $ \begin{equation}\label{b.2.11} \setlength{\jot}{5pt} \begin{aligned} &\quad\mathbf{U}_{1\tau} + \mathbf{A}_{0}\mathbf{U}_{1\xi}=0,\qquad\\ &\quad\mathbf{U}_{2\tau} + \mathbf{A}_{0}\mathbf{U}_{2\xi}= \widehat{\mathbf{M}}_{0d}\mathbf{U}_{1\xi\xi},\qquad\\ &\quad\mathbf{U}_{3\tau} + \mathbf{A}_{0}\mathbf{U}_{3\xi} + \mathbf{U}_{1t} + \mathbf{A}_{0}\mathbf{U}_{1x} + \nabla\mathbf{A}_{0}.\mathbf{U}_{1\xi}\mathbf{U}_{1\xi} = \widehat{\mathbf{M}}_{0v}\mathbf{U}_{1\xi\xi}+ \widehat{\mathbf{M}}_{0d}\mathbf{U}_{1\xi\xi\xi}. \qquad \end{aligned} \end{equation} Where $\; \mathbf{A}_{0}=\mathbf{A}(\mathbf{U})_{0},\; \nabla\mathbf{A}_{0}=\nabla\mathbf{A}(\mathbf{U}_{0}),\; \widehat{\mathbf{M}}_{0v}=\widehat{\mathbf{M}}_{0v}(\mathbf{U}_{0}), ~\text{and}~ \widehat{\mathbf{M}}_{0d}=\widehat{\mathbf{M}}_{0d}(\mathbf{U}_{0}),\;$ The solution of \eqref{b.2.11}(i) is given by \begin{equation}\label{b.2.12} \mathbf{U}_{1}=\sum_{j=1}^{7}\sigma_{j}(x,t,\theta_{j})\,\mathbf{R}_{j}, \end{equation} where $ \sigma_{j} = (\mathbf{L}_{j}.\mathbf{U}_{1}) $ is an arbitrary scalar valued function called the wave amplitude; it depends on the $ j $-th phase variable $ \theta_{j} $ given by $ \theta_{j} = k_{j}\xi -\omega_{j}\tau,$ where the wavenumber $ k_{j} $ and frequency $\omega_{j}$ satisfy $\; \omega_{j}=\lambda_{j}k_{j},\; j=1,\ldots,7; $ indeed, the dependence of $ \sigma_{j} $ on $ \theta_{j} $ describes the waveform. We also assume that $ \sigma_{j}(x,t,\theta) $ has zero mean with respect to the phase variable $ \theta_{j} ,$ i.e., \begin{equation*} \lim_{T\to\infty}\frac{1}{T}\int_{0}^{T}\sigma_{j}(x,t,\theta)~d\theta=0; \end{equation*} and frequency-wave number pairs are related by \begin{equation}\label{b.2.13} \omega_{j}=\mu_{jnm}\omega_{m}+ \mu_{jmn} \omega_{n},\qquad k_{j}=\mu_{jnm}k_{m}+\mu_{jmn}k_{n},\qquad\qquad\qquad \end{equation} where $$\displaystyle{ \mu_{jmn} = \frac{k_{j}(\lambda_{j}-\lambda_{m})}{k_{n}(\lambda_{n}-\lambda_{m})}}.\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$ Using \eqref{b.2.12} in \eqref{b.2.11}(ii) and solving the resulting equation for $\mathbf{U}_{2}$ by the method of characteristics, we get \begin{equation}\label{b.2.14} \mathbf{U}_{2}=\sum_{j=1}^{7}\sigma_{j\theta}(x,t,\theta_{j})\,\mathbf{P}_{j}, \end{equation} where the vector $\mathbf{P}_{j} $ satisfies \begin{equation}\label{b.2.15} (k_{j}\mathbf{A}_{0}-\omega_{j}\mathbf{I})\,\mathbf{P}_{j}=k_{j}^2\,\widehat{\mathbf{M}}_{0d}\,\mathbf{R}_{j}, \end{equation} where the dispersion matrix $ \widehat{\mathbf{M}}_{0d} $ satisfies the solvability condition $ \mathbf{L}_{j}.\widehat{\mathbf{M}}_{0d}\,\mathbf{R}_{j}=0.$ Now we use \eqref{b.2.14}, \eqref{b.2.12} in \eqref{b.2.11}(iii) and solve for $ \mathbf{U}_{3}; $ on using the secularity condition \begin{equation}\label{b.2.16} \lim_{\tau\to\infty}\frac{1}{\tau}\mathbf{U}_{3}(\xi,\tau)=0, \end{equation} we arrive at the following set of following integro-differential equations for the wave amplitudes $ \sigma_{1}(x,t,\theta),\ldots, \sigma_{7}(x,t,\theta) $ \begin{equation}\label{b.2.17} \begin{aligned} &\sigma_{jt}(x,t,\theta) +\lambda_{j}\,\sigma_{jx}(x,t,\theta) + E_{j}\,\sigma_{j}(x,t,\theta)\, \sigma_{j\theta}(x,t,\theta)\\[2pt] &+\sum_{m < n}^{(j)}\mu_{nmj}\Gamma_{jmn}\lim_{T\to\infty}\frac{1}{T} \int_{0}^{T}\sigma_{m}(x,t,\mu_{mnj}\theta+\mu_{mjn}\zeta) \sigma_{n\zeta}(x,t,\zeta)\,d\zeta= \Omega_{j}\sigma_{j\theta\theta}+\Lambda_{j}\sigma_{j\theta\theta\theta}, \end{aligned} \end{equation} where the coefficients are \begin{equation}\label{b.2.18} \setlength{\jot}{5pt} \begin{aligned} &E_{j}=k_{j}\,\mathbf{L}_{j}\,.\,\nabla\mathbf{A}_{0}\,.\,\mathbf{R}_{j}\,\mathbf{R}_{j},\\ &\Gamma_{jmn}=\mu_{jmn}\,k_{n}\,\mathbf{L}_{j}\,.\,\nabla\mathbf{A}_{0}\,.\,\mathbf{R}_{m}\,\mathbf{R}_{n}+ \mu_{jnm}\,k_{m}\,\mathbf{L}_{j}\,.\,\nabla\mathbf{A}_{0}\,.\,\mathbf{R}_{n}\,\mathbf{R}_{m},\\ &\Omega_{j} = k_{j}^2\,\mathbf{L}_{j}\,.\,\widehat{\mathbf{M}}_{0v}\,\mathbf{R}_{j},\qquad \Lambda_{j} = k_{j}^2\,\mathbf{L}_{j}\,.\,\widehat{\mathbf{M}}_{0d}\,\mathbf{P}_{j}. \end{aligned} \end{equation} Here, we have seven integro-differential equations with dissipative and dispersive terms corresponding to the MHD waves in the real gas background. In each case, except for the entropy wave, we have a non zero (for the real gas we considered) self interaction coefficient $ E_{j} $ called the nonlinearity parameter. For the entropy wave, it is zero which can be attributed to the linearly degenerate behavior of the entropy wave \cite{MR760229}; also the entropy wave equation has no integral term, since it is not influenced by the interaction of the other waves due to the fact that entropy wave is a Riemann invariant. The linear integral term in other cases results from the three wave interactions between different waves and its coefficient corresponds to the amount of wave produced through the interaction between other two waves. \section{Resonant interaction between fast magnetoacoustic entropy triad}\label{evo} In this section, the general evolution equation \eqref{b.2.17} derived in the last section is restricted to the study of three wave interaction at a time. The resulting triads can be classified as $ (a) $ magnetoacoustic and entropy waves, $ (b) $ Alfvén and entropy waves, $ (c) $ Alfvén and magnetoacoustic waves, and $ (d) $ slow and fast magnetoacoustic waves. We focus our study on the two fast magnetoacoustic and entropy wave interactions and obtain the precise formulation of the transport equation in this case from the general equation \eqref{b.2.17}. For this purpose, we make certain assumptions: $ (i) $ the wave amplitudes are $ 2\pi$-periodic function of the phase $ \theta ,$ $ (ii) $ all other waves except fast magnetoacoustic and entropy wave triad are in non-resonance (hence the integral term corresponding to them become zero), and $ (iii) $ the fundamental harmonics of all waves in our triad satisfy the resonance conditions \begin{equation}\label{b.2.19} \omega_{1}+\omega_{6}+\omega_{7}=0,\qquad\qquad\qquad k_{1}+k_{6}+k_{7}=0; \end{equation} which are satisfied when $ k_{6}=k_{7}\; \text{and}\; k_{1}=-2k_{7},$ where the wave numbers $k_{1},\,k_{6},\,k_{7} $ correspond to the entropy wave and left and right moving magnetoacoustic waves, respectively. Using the restrictions $ (i),(ii)~\text{and}~ (iii),$ we get the following system of asymptotic equations \begin{eqnarray}\label{b.2.20} \begin{aligned} &\sigma_{6t}- c_{f}\sigma_{6x} - k_{6}E_{f}\sigma_{6}\sigma_{6\theta} - k_{6}\mu_{761}M_{f}\frac{1}{2\pi} \int_{0}^{2\pi}\sigma_{1}(-\theta-\zeta) \sigma_{6\zeta}(\zeta)\,d\zeta= k_{6}^2\Omega_{f}\sigma_{6\theta\theta}-k_{6}^3\Lambda_{f}\sigma_{6\theta\theta\theta},\\ &\sigma_{1t}(\theta)=k_{1}^2\Omega_{e}\sigma_{1\theta\theta},\\ &\sigma_{7t} + c_{f}\sigma_{7x} + k_{7}E_{f}\sigma_{7}\sigma_{7\theta} + k_{7}\mu_{167}M_{f}\frac{1}{2\pi} \int_{0}^{2\pi}\sigma_{6}(-\theta-\zeta) \sigma_{1\zeta}(\zeta)\,d\zeta= k_{7}^2\Omega_{f}\sigma_{7\theta\theta}+k_{7}^3\Lambda_{f}\sigma_{7\theta\theta\theta},\\ \end{aligned} \end{eqnarray} where the coefficients are \begin{equation}\label{b.2.21} \setlength{\jot}{5pt} \begin{aligned} &G_{0}=\left(1+\frac{\rho_{0}}{c_{0}}c_{\rho 0}\right),\qquad H_{0}=\frac{1}{2}\frac{\rho_{0}P_{\rho s 0}}{P_{s0}},\qquad \gamma_{f}=\frac{c_{f}^2-c_{0}^2}{c_{f}^2-c_{s}^2}, \qquad \gamma_{s}=\frac{c_{s}^2-c_{0}^2}{c_{s}^2-c_{f}^2},\\ & E_{f}=\left(G_{0}\,\gamma_{s} + \frac{3}{2} \,\gamma_{f}\right)c_{f}, \qquad M_{f}=\left((G_{0}-H_{0})\,\gamma_{s} + \frac{\gamma_{f}}{2}\right)c_{f} \,, \\ &\Omega_{f} =\frac{c_{a}^2}{2\rho_{0}}\left(\frac{4\,\gamma_{s}}{3\,c_{s}^2} + \frac{\gamma_{f} }{c_{f}^2} \right)\hat{\mu} +\left(\frac{\gamma_{f}\,}{2}\right)\hat{\eta}+ \left(\frac{\gamma_{s}\delta}{2\rho_{0}c_{p0}}\right)\hat{\kappa}, \qquad \Omega_{e} =\left(\frac{1}{\rho_{0}c_{p0}} \right)\hat{\kappa},\\ &\Lambda_{f} = \left(\frac{c_{a}^2}{c_{f}^2-c_{a}^2}\right)\rho_{0}\gamma_{f}c_{f}\hat{\chi}^2. \end{aligned} \end{equation} These equations, with the viscous and dispersive terms are similar in form to the equations for wave interaction in elasticity \cite{MR1160152} and gas dynamics \cite{MR760229}. However, the MHD waves have the dispersive behavior which is not present in acoustic and elastic waves and they encapsulate the KdV type behavior in addition to the resonant interaction. The dynamics of combination of resonant interaction and weak dispersion is complicated. To study the behaviour of these two effects we consider that there are no spacial modulation and we also consider that the viscosity is absent; consequently the second equation implies that the entropy is independent of time and is given by the initial condition. Hence, our system is reduced to a pair for fast magnetosonic waves and entropy wave interaction. Introducing the new independent variables, \begin{equation}\label{b.2.22} \setlength{\jot}{5pt} \begin{aligned} &\alpha_{1}(x,t,\theta)=k_{6}\sigma_{6}(x,t,\theta),\\ &\alpha_{2}(x,t,\theta)=k_{7}\sigma_{7}(x,t,\theta),\\ & K(x,\theta)= k_{1}\sigma_{1\theta}(x,t,\theta), \end{aligned} \end{equation} the resulting pair of equations is given by \begin{equation}\label{b.2.23} \begin{aligned} &\alpha_{1t} + E_{f}\alpha_{1}\alpha_{1x} + M_{f}\frac{1}{2\pi} \int_{0}^{2\pi}K(\zeta-x)\, \alpha_{2}(\zeta)\,d\zeta+ \Lambda_{f}\alpha_{1xxx}=0,\\ &\alpha_{2t} + E_{f}\alpha_{2}\alpha_{2x} - M_{f}\frac{1}{2\pi} \int_{0}^{2\pi}K(x-\zeta)\, \alpha_{1}(\zeta)\,d\zeta+ \Lambda_{f}\alpha_{2 xxx}=0,\\ \end{aligned} \end{equation} where a change of variables from $ \theta $ to $ x $ and $ \alpha_{1}(x,t) $ to $ \alpha_{1}(-x,t) $ is used. Finally, if we take the amplitudes $ \alpha_{1}=\alpha_{2} $ and odd kernel $ K(-x)=-K(x) $ our system \eqref{b.2.23} is symmetric with respect to $ \alpha_{1} $ and $ \alpha_{2}$ and it is reduces to the following single equation \begin{equation}\label{b.2.24} \alpha_{t} + E_{f}\,\alpha \,\alpha_{x} + M_{f}\,\frac{1}{2\pi} \int_{0}^{2\pi}K(y-x)\, \alpha(y)\,dy+ \Lambda_{f}\,\alpha_{xxx}=0. \end{equation} Now, we introduce dimensionless variables, defined as; \begin{equation*} \begin{aligned} &x^{*}=\frac{x}{l},\;\; t^{*}=\frac{t\sqrt{P_{0}/ \rho_{0}}}{l}, \;\; P^{*}=\frac{P}{P_{0}}, \;\; \rho^{*}=\frac{\rho}{\rho_{0}}, \;\; T^{*}=\frac{T}{T_{0}}, \;\; s^{*}=\frac{s}{s_{0}}, \;\; b^{*} = b \rho_{0},\\ &\;\; u^{*}=\frac{u}{\sqrt{P_{0}/ \rho_{0}}}, \;\; v^{*}=\frac{v}{\sqrt{P_{0}/ \rho_{0}}}, \;\; w^{*}=\frac{u}{\sqrt{P_{0}/ \rho_{0}}}, \;\; B_{1}^{*}=\frac{B_{1}}{B_{0}}, \;\; B_{2}^{*}=\frac{B_{2}}{B_{0}}, \;\; B_{3}^{*}=\frac{B_{3}}{B_{0}}; \end{aligned} \end{equation*} using these variables in \eqref{b.2.24}, and dropping the $ * $ sign, the equation remain unchanged in the dimensionless form. Using equation of state \eqref{b.2.2}, the coefficients evaluated at the undisturbed state are given by \begin{equation}\label{b.2.25} \setlength{\jot}{5pt} \begin{aligned} &c_{0}=\sqrt{\frac{(\delta+1)}{(1-b)}},\qquad G_{0}=\frac{(\delta+2)}{2(1-b)},\qquad H_{0}=\frac{(\delta+1)}{2(1-b)},\quad \gamma_{f}=\frac{c_{f}^2-c_{0}^2}{c_{f}^2-c_{s}^2}, \quad \gamma_{s}=\frac{c_{s}^2-c_{0}^2}{c_{s}^2-c_{f}^2},\\ & E_{f}=\left(G_{0}\,\gamma_{s} + \frac{3}{2}\right)c_{f} \,\gamma_{f}, \qquad M_{f}=\left((G_{0}-H_{0})\,\gamma_{s} + \frac{\gamma_{f}}{2}\right)c_{f} \,,\quad \Lambda_{f} = \left(\frac{c_{a}^2}{c_{f}^2-c_{a}^2}\right)\gamma_{f}c_{f}\hat{\chi}^2.\\\notag \end{aligned} \end{equation} \section{Numerical solutions}\label{num1} In this section, we study the numerical solutions of the transport equation \eqref{b.2.24} and discuss the effects of various parameters in the light of the real gas background. Zabusky and Kruskal \cite{MR716190} have studied the solitary wave solution for the KdV-type equation \begin{equation}\label{key} u_{t}+uu_{x}+\beta^2u_{xxx}=0, \end{equation} with a small amount of dispersion $ \beta=0.022 $ and periodic initial data. We investigate the existence of solitary wave solutions of \eqref{b.2.24} in the light of real gas and magnetic field effects. For numerical computation, we use pseudo-spectral method developed by Fronberg and Whitham \cite{MR497916}, which is suitable for the evaluation of certain operators, and can considerably speed up the calculations while using fast Fourier transform. We use the trapezoidal rule to evaluate the integral and the temporal variable is discretized using a fourth-order Runge-Kutta method. The evolution equation \eqref{b.2.24} is composed of a Burgers equation type nonlinear term with a small amount of dispersion coefficient ($ \Lambda_{f}\approx 0.0013 $ ) and an integral term which is weakly dispersive \cite{MR1719749}. In numerical experiments, we have taken the convolution with the kernel $ K(x)= \sin x $ and the periodic initial data $ \alpha(x,0) = A \cos x ,$ with $ A=1,2. $ Since the dispersive effects are negligible, the corresponding terms can be neglected in \eqref{b.2.24} leading to $ \alpha_{t} + E_{f}\, \alpha\, \alpha_{x}=0 $; it's solution is given by the implicit equation $ \alpha = A \cos (x- E_{f} \alpha t) $. Initially the nonlinear term dominates the solution, and a steepening is seen in the region of negative slope, we find that $ \alpha $ tends to become discontinuous at the breakdown time $ t_{b}=\left( \frac{1}{A\, E_{f}}\right) $ but after some time the dispersive term becomes dominant and instead of discontinuity, oscillations of small wavelength develop on the left part of the wave profile; their amplitudes increase, and each oscillation achieves steady amplitude after some time. We consider the effects of van der Waals parameter $ b $ and magnetic field on the formation of these solitons. For case-(I), we perform three sets of numerical experiments each for van der Waal parameter $ b=0,\,b=0.02,\,\text{and}\, b=0.04 $ with initial data $ \alpha (x,0)=\cos x $ and $ \delta =0.04 ,\,\chi=1,\, B_{01}= 0.1,\, B_{02}=0.1.$ Fig. \ref{fig41}(b) shows the wave profiles at breaking time $ t_{b} $, which is almost same in all cases as depicted in Table (\ref{t1.1}); there appears a minute oscillatory behavior in each case which is due to neglected dispersive term. Solitary wave train formations and their overlapping can be seen in Fig. \ref{fig41}(c) and Fig. \ref{fig41}(d), respectively; a space-time evolution of the wave profiles is displayed in (\ref{fig42}). \begin{figure} \caption{Case(I)- Three families of solitary wave profiles for $b=0,\,b=0.02,\,~\text{and} \label{fig41} \end{figure} \begin{table}[b] \caption {Evolution equation coefficients for case-(I).}\label{t1.1} \begin{center} \begin{tabular}{ |c|c|c|c|c|c|c| } \hline & $ E_{f} $ & $ M_{f}$ & $ \Lambda_{f}$ & $ t_{b}$ \\ \hline b=0 & 2.7756 & 1.0134 & 0.00139 & 0.360 \\ b=0.02&2.8096 & 1.0278 & 0.00137 & 0.355 \\ b=0.04& 2.8457 & 1.0430 & 0.00136 & 0.351 \\ \hline \end{tabular} \end{center} \end{table} For case-(II), we perform three sets of experiments with the same data as in case-(I) except the magnetic field parameter, which is $ B_{01}=0.05$. In this case, we find that the breakdown time is almost same as in the previous case depicted in Fig.(\ref{fig43})(b) and Table (\ref{t1.2}). Fig.(\ref{fig43})(c) shows the soliton formation; however the number of solitary waves increases and their width decreases as compared to the previous case, which can be explained by the fact that there is considerable decrease in the dispersion coefficient $ \Lambda $ with the decrease in $ B_{01} $ as shown in Table (\ref{t1.2}); Zabusky and Kruskal \cite{MR716190} had shown that, thickness of the pulse decreases with the decrease in the dispersion parameter and increase in the wave amplitude. \begin{table}[b] \caption {Evolution equation coefficients for case-(II).}\label{t1.2} \begin{center} \begin{tabular}{ |c|c|c|c|c|c|c| } \hline & $ E_{f} $ & $ M_{f}$ & $ \Lambda_{f}$ & $ t_{b}$ \\ \hline b=0 & 2.7725 & 1.0126 & 0.00034 & 0.366 \\ b=0.02&2.8066 & 1.0271 & 0.00034 & 0.356 \\ b=0.04& 2.8428 & 1.0423 & 0.00033 & 0.351 \\ \hline \end{tabular} \end{center} \end{table} \begin{figure} \caption{Space-time (top view) of the temporal development of solitons with initial data $ a(x,0)= \cos x $ for $ \delta =0.04 ,\,\chi=1,\, B_{1} \label{fig42} \end{figure} \begin{figure} \caption{Case(II)-Three families of solitary wave profiles for $b=0,\,b=0.02,\,~\text{and} \label{fig43} \end{figure} Numerical solution for case-(III), with the same set of parameters as in case-(I) except with initial condition $ a(x,0)=2 \cos x $, i.e., with twice the amplitude, are obtained with three sets of experiments. In this case, as is clear from the expression of $ t_{b} $ that the breakdown time decreases to one half that of the first case. Indeed, as the amplitude of the initial profile increases, the amplitude of the solitary wave increases, while, the number of pulses increases and their thickness decreases as depicted in the Figure (\ref{fig44}), which is again explained by the result mentioned in the last case. \begin{figure} \caption{Case(III)-Three families of solitary wave profiles for $b=0,\,b=0.02,\,~\text{and} \label{fig44} \end{figure} For case-(IV), we investigate the influence of magnetic field $ B_{01} $ on the formation of solitons at same time, with the same set of parameters as in case-(I) but with the decreasing magnetic field $ B_{01} $ which is exhibited in the Figure (\ref{fig45}). The number of solitons increases and their width decreases, which is illustrated by a small value of $ \Lambda_{f} $ shown in Table (\ref{t1.3}); and eventually with the decrease in the magnetic field, there is a breakdown of the solution, whereas the breakdown time $ t_{b} $ is almost same in each case as displayed in Table (\ref{t1.3}) \begin{figure} \caption{Case(IV)-The solitary wave profiles with different values of magnetic fields $ B_{01} \label{fig45} \end{figure} \begin{table}[b] \caption {Evolution equation coefficients for case-(IV).}\label{t1.3} \begin{center} \begin{tabular}{ |c|c|c|c|c|c|c| } \hline & $ E_{f} $ & $ M_{f}$ & $ \Lambda_{f}$ & $ t_{b}$ \\ \hline $ B_{01}=0.1 $ & 1.9050 & 0.7465 & 0.00087 & 0.524 \\ $ B_{01}=0.075 $&1.9046& 0.7465 & 0.00049 & 0.524 \\ $ B_{01}=0.05 $& 1.9044 & 0.7465 & 0.00021 & 0.525 \\ $ B_{01}=0.02 $& 1.9042 & 0.7465 & 0.00011 & 0.525 \\ \hline \end{tabular} \end{center} \end{table} \begin{figure} \caption{Case(V)-The solitary wave profiles with different values of magnetic fields $ B_{02} \label{fig47} \end{figure} Finally, in case-(V) and case-(VI), we consider the influence of magnetic field $ B_{02} $ on the solitary wave solutions with the same set of parameters as in case-(I) but with the increasing magnetic field. For case-(V) in Figure (\ref{fig47}), we display the solitary wave profiles with increasing magnetic field and it is noticed that with the increase in the magnetic field there is a higher development in the solitory wave profile, which is explained by the decrease in the breaking time $ t_{b} $ with the increase in magnetic field as depicted in Table(\ref{t1.4}). \begin{table}[b] \caption {Evolution equation coefficients for case-(V) and case-(VI).}\label{t1.4} \begin{center} \begin{tabular}{ |c|c|c|c|c|c|c| } \hline & $ E_{f} $ & $ M_{f}$ & $ \Lambda_{f}$ & $ t_{b}$ \\ \hline $ B_{02}=0 $ & 1.8940 & 0.7431 & 0.00086 & 0.527 \\ $ B_{02}=0.5 $&2.1549& 0.8238 & 0.00112 & 0.464 \\ $ B_{02}=1.0 $& 2.8096 & 1.0278 & 0.00137 & 0.355 \\ $ B_{02}=2.0 $& 4.6079 & 1.5999 & 0.00190 & 0.217 \\ \hline \end{tabular} \end{center} \end{table} \begin{figure} \caption{Case(VI)- Three families of solitary wave profiles at the same time for different values of magnetic fields $ B_{02} \label{fig46} \end{figure} Similarly, in case-(VI), as portrayed in Figure (\ref{fig46}), three families of wave profiles are displayed at a particular time for three different values of magnetic field; in the first Figure it observed that for a strong magnetic field, i.e., $ B_{02}=1, $ the oscillation begin to set in, whereas for the other two values they are still not seen; with the increase in time the evolution of solitons is always ahead of the wave profiles that corresponds to the largest amount of magnetic field and this behaviour is due to the smallest value of $ t_{b} $ corresponding to the most significant value of $ B_{02} $ as depicted in Table (\ref{t1.4}). \section{Conclusions}\label{con1} We have studied, using a perturbation method, a magnetohydrodynamics model in the real gas background with viscosity, magnetic diffusion, thermal conductivity, and magnetic dispersion; we derive a system of evolution equations for the resonant interaction among the characteristics of MHD. To illustrate the effects of the interplay between the nonlinearity, dispersion, and resonance, we focus on a single triad composed of two opposite moving fast magnetoacoustic waves and an entropy wave with certain assumptions. The resulting single equation has a Burgers type nonlinear term with coefficient $ E_{f} $ (always positive for the real gas considered), dispersion term with the coefficient $ \Lambda_{f} $ and linear integral term which corresponds to the resonant interaction among the waves in considered triad with coefficient $ M_{f} $. In all the numerical experiments we perform, the dispersion coefficient is small, hence at the initial stage the quadratic nonlinear term dominates the behavior of solution profile, but after some time the dispersion and integral terms come into play and solitary wave formation takes place. We investigate, how the real gas and magnetic field effects influence the wave formation, shape and amplitude of the solitary wave profiles for periodic initial data. The effects of van der Waals parameter $ b $ are considered in cases (I),(II), and (III); solitary wave formation takes places in each case and it is displayed in the Figs. (\ref{fig41}),(\ref{fig43}), and (\ref{fig44}) showing thereby that they have small effect on the breakdown time $ t_{b} $ and on the development of the solitary wave profiles, however an enhancement in the amplitude of the initial wave profile causes the resulting amplitude of the solitary wave profile to increase and to decreases the breakdown time as shown in Fig. (\ref{fig43}). The effects of magnetic field on the wave evolution are considered in cases (II),(IV),(V), and (VI). An increase in the number of soliton and decrease in their thickness with a reduction in the value of $ B_{01} $ is shown in the Fig. (\ref{fig45}) which is explained from the entries of $ \Lambda_{f} $ in the Table (\ref{t1.4}) for different values of $ B_{01} .$ There is a lagging in evolution of the wave profiles for smaller values of $ B_{02} $ as illustrated in the Fig.(\ref{fig47}); evolution of three different wave profiles (at the same time) corresponding to three different values of $ B_{02} $ is depicted in Fig. (\ref{fig46}) and it is noticed that the breaking time decreases with an increase in the value of $ B_{02} ,$ whereas, the number of solitons and their width remain the same in each case. \section*{References} \end{document}
\begin{document} \articletype{} \title{On the open sea propagation of two-dimensional rotational water waves generated by a moving bed} \author{ \name{Frederick Moscatelli\textsuperscript{a}\thanks{CONTACT Frederick Moscatelli. Email: [email protected]}} \affil{\textsuperscript{a}Faculty of Mathematics, University of Vienna\\ Oskar-Morgenstern-Platz 1, 1090 Vienna, Austria} } \maketitle \begin{abstract} We study the propagation of two-dimensional tsunami waves triggered by a seaquake in the open sea in the presence of underlying wind-generated currents, corresponding to background flows of constant vorticity. A suitable scaling of the governing equations introduces dimensionless parameters, of particular interest being the setting of linear waves that only depend on the vertical movement of the sea bed. We use Fourier analysis methods to extract formulae for the function $f$ which describes the vertical displacement of the water's free surface. We show that the results are particularly useful in the physically relevant shallow-water regime: in the irrotational case the predictions fit well with the observed behaviour of some historical tsunamis. In other situations, the stationary-phase principle gives insight into the asymptotic behaviour of $f$. \end{abstract} \begin{keywords} water waves, free boundary, vorticity \end{keywords} \begin{amscode} 45G99, 58J32, 76B03 \end{amscode} \section{Introduction} The aim of this paper is to pursue a mathematical analysis of tsunami waves triggered by an earthquake in the open sea. Tsunamis are water waves which are generated by an impulsive disturbance that vertically displaces the water column, most commonly by tectonic activity, but sometimes by other means as well, such as underwater landslides or asteroid impact. Hundreds of tsunamis were confirmed since 1900, about 80\% of which occurred in the Pacific Ocean. Most of these tsunami waves have small amplitude, being nondestructive and barely detectable in the ocean (less than 0.3 m high, the average wave height in the Pacific Ocean being about 2.5 m). Large, destructive tsunamis, causing loss of life and major coastal destruction, take place typically once every several decades. The highest tsunami wave that was ever measured exceeded 500 m and occurred on 9 July 1958, in the Lituya Bay inlet of the Gulf of Alaska, caused by a massive rock dislocated by an earthquake and plunging 900 m down into the water (see \cite{Con11}). Let us also mention that 66 million years ago, an asteroid with a diameter of about 10 km struck the waters of the Gulf of Mexico near the Yucat\'an Peninsula, and it is estimated that the impact generated tsunami waves more than 1.5 km high that caused sea levels to rise in all corners of the Earth (see \cite{r}); the subsequent cataclysmic events -- plumes of aerosol, soot and dust filling the air, and wildfires starting as flaming pieces of material blasted from the impact re-entered the atmosphere and rained down -- ended the era of the dinosaurs and triggered a mass extinction of about 75\% of animal and plant life on Earth. Since it appears \cite{as} that up to 75\% of all recorded tsunamis were generated by undersea earthquakes, in this paper we focus on this type. Three events stand out in the last 100 years: the 22 May 1960 Chile tsunami (caused by the largest earthquake ever recorded and with waves propagating across the Pacific Ocean, causing havoc in Hawaii and in Japan), the 26 December 2004 tsunami (that killed more than 200.000 people around the shores of the Indian Ocean), and the tsunami off the coast of Japan on 11 March 2011 (that killed thousands of people and triggered a nuclear accident). In all three cases the earthquake was undersea, along a fault line, in which case the generated waves are typically two-dimensional and with long wavelengths, of the order of 100 km (see the discussions in \cite{as,Con11,dd}). Given that the average ocean depth is about 4 km, this means that tsunami waves in the open ocean are shallow water waves. While near the shore, where the water depth gradually diminishes, nonlinear effects become dominant (and induce wave-breaking), a long-standing issue was whether in the open sea the dominant linear behaviour of the flow is greatly affected by the accumulation of nonlinear factors. Some authors (see \cite{cr, la, se}) advocated that KdV-like models apply, but even in the case of propagation over the maximal distance that is possible (across the Pacific Ocean, which was the case for the 1960 Chile tsunami), the scale analysis reveals that weakly nonlinear theory is not adequate (see the discussion in \cite{as, Con09, Con11, cj, st}): the effects remain negligible in the open sea. Advances in the modelling of the tsunami wave propagation in the open sea, triggered by an undersea earthquake, were obtained in \cite{CG12, dd} for the setting of a still sea prior to the perturbation. In this paper we extend these considerations to accommodate the presence of underlying currents. These are typically modelled by constant-vorticity flows (see the discussion in \cite{TDSP88, Ewi90}) but even the irrotational setting (flows with zero vorticity) might admit uniform tidal currents. \section{The governing equations} We consider an analogous setup as in \cite{CG12}: we consider two-dimensional surface waves with the unbounded $X$-direction corresponding to the direction of wave propagation. The water's free surface is $Y = d + F(X,T)$, where $d$ is the average depth of the sea, the seabed being $Y = H(X,T)$. At time $T=0$, we assume that $F(X,0)=0$ and $H(X,0)=0$. Then the seabed moves for a short time in some compact region $X \in [-L,L]$ and is flat after that again. We are ultimately interested in the deformation of the free surface, i.e. in $F(X,T)$. To do this, we study a partial differential equation that describes its evolution in time. Consider $(U,V)$ the velocity field of the flow in rectangular Cartesian coordinates $(X,Y)$. We assume that the water is inviscid and that the resulting flow is with constant vorticity. Note that the presence of non-uniform currents makes the hypothesis of irrotational flow inappropriate and, since in a setting in which the waves are long compared to the water depth, the existence of a non-zero mean vorticity is important rather than its specific distribution \cite[cf.][]{TDSP88}. This is not merely a mathematical simplification: wind-generated currents in shallow waters with nearly flat beds have been shown to be accurately described as flows with constant vorticity \cite{Ewi90}. We now present the governing equations for water flows \cite{Con11}. We have the incompressible Euler equations \begin{align}\label{Euler PDE} \begin{rcases} U_X + V_Y &= 0,\\ \rho (U_T + U U_X + V U_Y) &= - P_X,\\ \rho (V_T + U V_X + V V_Y) &= - P_Y - \rho g, \end{rcases} \end{align} in $H(X,T) < Y < d + F(X,T)$, where $\rho$ is the constant density, $g$ is the constant acceleration of gravity and $P$ is the pressure. The effects of surface tension are negligible for wavelengths greater than a few centimetres \cite{BK75,Lig96}, hence the major factor governing the wave motion is the balance between gravity and the inertia of the system. We also have the kinematic boundary conditions \begin{align}\label{bc on surface, physical} V = F_T + U F_X \quad \text{on the free surface }Y=d + F(X,T) \end{align} and \begin{align}\label{bc on seabed, physical} V = H_T + U H_X \quad \text{on the rigid bed }Y=H(X,T), \end{align} which ensure that particles on these boundaries are confined to them at all times. We also have the dynamic boundary condition \begin{align}\label{bc pressure, physical} P - P_\text{atm} = 0\quad \text{on the free surface }Y=d + F(X,T), \end{align} where $P_{\text{atm}}$ is the constant atmospheric pressure. This decouples the motion of the water from the motion of the air above it \cite{Joh97}. The system \eqref{Euler PDE}-\eqref{bc pressure, physical} is to be solved with the following initial conditions \begin{align}\label{ic, physical} F(X,0)=0,\quad U(X,Y,0) = A Y + B,\quad V(X,Y,0) = 0. \end{align} These initial conditions express the assumption that initially, at time $T=0$, there is only a horizontal flow whose speed depends linearly on the depth. This is a generalization of \cite{CG12}, where it was assumed that the water is completely at rest at $T=0$, a case which corresponds to $A = B = 0$. A two-dimensional water flow that has constant vorticity at some instant will have this feature at all other times (see the discussion in Chapter 1 of \cite{Con11}). By the initial condition \eqref{ic, physical} this implies \begin{align}\label{vorticity, physical} U_Y - V_X = A \end{align} for $H(X,T) < Y < d + F(X,T)$. Fig \ref{Model at time 0} illustrates our set-up in the case $A,B >0$. \begin{figure} \caption{Pure current flow (in the absence of waves) at the initial time $T=0$} \label{Model at time 0} \end{figure} The first step in the analysis of the system \eqref{Euler PDE}-\eqref{vorticity, physical} will be to non-dimensionalize and scale the equations. First, it will be convenient to introduce the non-dimensional excess pressure $p$ relative to the hydrostatic pressure distribution by \begin{align}\label{def of p} P = P_\text{atm} + \rho g (d-Y) + \rho g d p. \end{align} To obtain meaningful scales, we introduce the average or typical wavelength $\lambda$ of the wave, we use $\sqrt{ g d }$ as a scale for the wave speed and $\lambda/\sqrt{g d}$ as a time scale. We use the change of variables \begin{align}\label{first scaling 1} X = \lambda x,\quad Y = d y,\quad T = \frac{\lambda}{\sqrt{gd}}t,\quad U = \sqrt{g d}u,\quad V = \frac{d\sqrt{gd}}{\lambda}v \end{align} with \begin{align}\label{first scaling 2} F = a f,\quad H = a h, \end{align} where $a$ is a typical, perhaps maximal, amplitude of the wave. Note that \eqref{first scaling 2} should be interpreted as ensuring that the variations of the wave and of the seabed are of comparable size. We introduce the parameters \begin{align}\label{def of eps and delta} \varepsilon = \frac{a}{d},\quad \delta = \frac{d}{\lambda}\,, \end{align} where $\varepsilon$ measures the relative size of the amplitude to the average water depth and $\delta$ measures the average water depth to the wavelength. Now we get the non-dimensional equations \begin{align}\label{non-dim orig} \begin{rcases} &\begin{cases} u_x + v_y = 0\\ u_t + u u_x + v u_y = -p_x\\ \delta^2(v_t + u v_x + v v_y) = -p_y\\ u_y - \delta^2 v_x = \sqrt{\frac{d}{g}}A \end{cases}\quad \text{in }\varepsilon h(x,t) < y < 1 + \varepsilon f(x,t)\\ &p = \varepsilon f \qquad \qquad \quad \text{on }y = 1 + \varepsilon f(x,t)\\ &v =\varepsilon(f_t + u f_x)\quad \text{on }y = 1 + \varepsilon f(x,t)\\ &v =\varepsilon(h_t + u h_x) \quad \text{on }y=\varepsilon h(x,t)\\ &f(x,0)=0,\quad u(x,y,0) =\sqrt{\frac{d}{g}}A y + \frac{B}{\sqrt{gd}},\quad v(x,y,0) = 0 \end{rcases}. \end{align} The magnitudes of $\varepsilon$ and $\delta$ correspond to the different general types of water wave problem: The limits $\delta \to 0$ and $\delta \to \infty$ produce the shallow water and deep water regime, respectively. On the other hand, $\varepsilon \to 0$ corresponds to regime of waves of small amplitude (see the discussion in \cite{Joh97,CJ08}). We want to linearize the problem and in this case we will want to let $\varepsilon \to 0$ and keep $\delta$ fixed. But the system \eqref{non-dim orig} shows that $v$ and $p$ are of order $\varepsilon$ and hence also $u$. Hence we use the following scaling: \begin{align}\label{second scaling} u \mapsto \sqrt{\frac{d}{g}}Ay + \frac{B}{\sqrt{g d}} +\varepsilon u, \quad v \mapsto \varepsilon v, \quad p\mapsto \varepsilon p, \end{align} where we avoid a new notation. We then obtain the system \begin{align}\label{non-dim new 1} \begin{rcases} u_x + v_y &= 0\\ u_t + \left(\sqrt{\frac{d}{g}}A y + \frac{B}{\sqrt{gd}} + \varepsilon u\right)u_x + v\left(\sqrt{\frac{d}{g}}A + \varepsilon u_y\right) &= - p_x\\ \delta^2 \left(v_t + \left(\sqrt{\frac{d}{g}}A y + \frac{B}{\sqrt{gd}} + \varepsilon u\right)v_x + \varepsilon v v_y\right) &= -p_y\\ u_y - \delta^2 v_x &= 0 \end{rcases} \end{align} in $\varepsilon h(x,t) < y < 1 + \varepsilon f(x,t)$ and \begin{align}\label{non-dim new 2} \begin{rcases} &p = f \quad \text{on }y = 1 + \varepsilon f(x,t)\\ &v = f_t + \left(\sqrt{\frac{d}{g}}A (1 + \varepsilon f(x,t)) + \frac{B}{\sqrt{gd}} + \varepsilon u\right)f_x\quad \text{on }y = 1 + \varepsilon f(x,t)\\ &v= h_t + \left(\sqrt{\frac{d}{g}}A \varepsilon h(x,t) + \frac{B}{\sqrt{gd}} + \varepsilon u\right)h_x \quad \text{on }y=\varepsilon h(x,t)\\ &f(x,0) = 0,\quad u(x,y,0) = 0, \quad v(x,y,0) = 0 \end{rcases}. \end{align} We now linearize this system by taking $\varepsilon \to 0$ and obtain \begin{align}\label{reduced eq} \begin{rcases} &\begin{cases} u_x + v_y = 0\\ u_t + \left(\sqrt{\frac{d}{g}}A y + \frac{B}{\sqrt{gd}}\right)u_x + \sqrt{\frac{d}{g}}A v = -p_x\\ \delta^2\left(v_t + \left(\sqrt{\frac{d}{g}}A y + \frac{B}{\sqrt{gd}}\right)v_x\right) = -p_y\\ u_y - \delta^2 v_x = 0 \end{cases}\quad \text{in }0 < y < 1\\ &p = f \quad \text{on }y = 1\\ &v = f_t + \left(\sqrt{\frac{d}{g}}A + \frac{B}{\sqrt{gd}}\right)f_x \quad \text{on }y=1\\ &v = h_t + \frac{B}{\sqrt{gd}} h_x \quad \text{on }y=0\\ &f(x,0)=0,\quad u(x,y,0) = 0,\quad v(x,y,0)=0 \end{rcases}. \end{align} By the first equation in \eqref{reduced eq}, there exists a stream function $\psi$ such that \begin{align}\label{def psi} u = \psi_y,\quad v = -\psi_x \quad \text{in }0<y<1. \end{align} If we plug this in \eqref{reduced eq}, we get \begin{align}\label{reduced eq psi} \begin{rcases} &\begin{cases} \psi_{yy} + \delta^2 \psi_{xx} = 0\\ \psi_{yt} + \left(\sqrt{\frac{d}{g}}A y + \frac{B}{\sqrt{gd}}\right)\psi_{xy} - \sqrt{\frac{d}{g}}A \psi_{x} = -p_x\\ \delta^2\left(\psi_{x t} + \left(\sqrt{\frac{d}{g}}A y + \frac{B}{\sqrt{gd}}\right)\psi_{x x}\right) = p_y \end{cases}\quad \text{in }0 < y <1 \\ &p = f \quad \text{on }y = 1\\ &\psi_x = -f_t - \left(\sqrt{\frac{d}{g}}A + \frac{B}{\sqrt{gd}}\right)f_x \quad \text{on }y=1\\ &\psi_x = -h_t - \frac{B}{\sqrt{gd}} h_x \quad \text{on }y=0\\ &f(x,0)=0,\quad \psi_y(x,y,0) = 0,\quad \psi_x(x,y,0)=0 \end{rcases}. \end{align} Note that we want to eventually find $f$. But $p$ and $\psi$ are also unknown, only $h$ is given. We will now derive equations only involving $\psi$ and $h$. We first differentiate the second equation in \eqref{reduced eq psi} with respect to $x$ and $t$, respectively, to obtain \begin{align}\label{second x} \psi_{xyt} + \left(\sqrt{\frac{d}{g}}A y + \frac{B}{\sqrt{gd}}\right)\psi_{xxy} - \sqrt{\frac{d}{g}}A \psi_{xx} = -p_{xx} \end{align} and \begin{align}\label{second t} \psi_{ytt} + \left(\sqrt{\frac{d}{g}}A y + \frac{B}{\sqrt{gd}}\right)\psi_{xyt} - \sqrt{\frac{d}{g}}A \psi_{xt} = -p_{xt}. \end{align} Furthermore, we can differentiate the fifth equation in \eqref{reduced eq psi} with respect to $x$ to obtain \begin{align}\label{fifth x} \psi_{xx} &= -f_{x t} - \left(\sqrt{\frac{d}{g}}A + \frac{B}{\sqrt{gd}}\right)f_{xx} \quad \text{on }y=1. \end{align} Observe that the fourth equation in \eqref{reduced eq psi} implies that we can exchange the derivatives of $f$ in \eqref{fifth x} by the corresponding derivatives of $p$. But then we can insert the equations \eqref{second x} and \eqref{second t} in \eqref{fifth x} to obtain \begin{align*} \psi_{xx} &=\psi_{y t t} + 2 \left(\sqrt{\frac{d}{g}}A + \frac{B}{\sqrt{gd}}\right)\psi_{xyt}+ \left(\sqrt{\frac{d}{g}}A + \frac{B}{\sqrt{gd}}\right)^2\psi_{x x y} \\&- \sqrt{\frac{d}{g}}A\left(\sqrt{\frac{d}{g}}A + \frac{B}{\sqrt{gd}}\right)\psi_{x x} - \sqrt{\frac{d}{g}}A \psi_{x t} \end{align*} on $y = 1$. We introduce the constant \begin{align}\label{def C} C = \sqrt{\frac{d}{g}}A + \frac{B}{\sqrt{gd}}, \end{align} which is the non-dimensional horizontal velocity of the flow at the surface. This allows us to rewrite the previous equation as \begin{align*} \left(1 + \sqrt{\frac{d}{g}}A C\right)\psi_{xx} = \psi_{y t t} + 2 C \psi_{x y t} + C^2 \psi_{x x y} - \sqrt{\frac{d}{g}}A \psi_{x t} \end{align*} on $y = 1$. By observing the quadratic expression on the right-hand side and introducing the differential operator \begin{align}\label{def T} S = C \partial_x + \partial_t, \end{align} we can rewrite this again as \begin{align*} \left(1 + \sqrt{\frac{d}{g}}A C\right)\psi_{xx} &= S^2 \psi_y - \sqrt{\frac{d}{g}}A \psi_{x t}. \end{align*} So we obtain \begin{align}\label{reduced eq final} \begin{rcases} \psi_{yy} + \delta^2\psi_{xx} &= 0,\quad \text{in } 0< y < 1\\ \left(1 + \sqrt{\frac{d}{g}}A C\right)\psi_{xx} &= S^2 \psi_y - \sqrt{\frac{d}{g}}A \psi_{x t},\quad \text{on }y=1\\ \psi_x &= -h_t - \frac{B}{\sqrt{gd}} h_x, \quad \text{on } y=0\\ \psi_x(x,y,0) = 0&,\quad \psi_y(x,y,0)=0, \end{rcases} \end{align} which only involves $\psi$ and $h$. We can then finally recover $f$ by the following procedure: We know by the fourth and fifth equation in \eqref{reduced eq psi} that \begin{align*} f_x = p_x = -\psi_{y t} + C \psi_{x y} - \sqrt{\frac{d}{g}}A \psi_x \end{align*} on $y = 1$ and hence \begin{align}\label{t derivative of f} f_t = - \psi_x- C f_x = C\psi_{y t} - C^2 \psi_{x y} + \left(\sqrt{\frac{d}{g}}AC -1\right)\psi_x \end{align} on $y = 1$. Together with the initial condition \begin{align*} f(x,0) = 0 \end{align*} we can recover $f$ by \begin{align*} f(x,t) = \int_{0}^t f_t(x,\tau)d \tau\,, \end{align*} and $p$ can be reconstructed similarly. \section{General solution formulae for linear waves} We will consider the space and space-time Fourier transform with notations \begin{align*} \hat{\varphi}(\xi, y , t ) = \frac{1}{\sqrt{2 \pi}}\int_\R \varphi(x,y,t)e^{- i x \xi } dx \end{align*} and \begin{align*} \tilde{\varphi}(\xi,y,\omega) = \frac{1}{2 \pi} \int_\R \int_\R \varphi(x,y,t) e^{- i (x \xi + t\omega)}dx dt. \end{align*} We also define Fourier multipliers: Let $m :\R \rightarrow \cc$ be some function. We define $m(D)$ by \begin{align*} \left(m(D)\varphi\right)(x) = \frac{1}{\sqrt{2 \pi}}\int_\R m(\xi)\hat{\varphi}(\xi)e^{i x \xi }d\xi, \end{align*} or equivalently \begin{align*} \left(\widehat{m(D)\varphi}\right)(\xi) = m(\xi) \hat{\varphi}(\xi), \end{align*} where $D = - i\partial_x$, whenever this is defined. Note that $m(D)$ maps real-valued functions to real-valued functions whenever the condition \begin{align}\label{FM condition} m(-\xi) = \overline{m(\xi)} \end{align} is satisfied. In particular, this is satisfied if $m$ is real-valued and even. We can apply the space-time Fourier transform to the first three equations of system \eqref{reduced eq final} to obtain \begin{align}\label{FT of main eq} \begin{rcases} \tilde{\psi}_{yy} - \delta^2\xi^2 \tilde{\psi} &= 0,\quad \text{in } 0< y < 1\\ \left(1 + \sqrt{\frac{d}{g}}A C\right)\xi^2\tilde{\psi} &= Q^2 \tilde{\psi}_y - \sqrt{\frac{d}{g}}A \xi\omega\tilde{\psi},\quad \text{on }y=1\\ \xi\tilde{\psi} &= - \omega \tilde{h} - \frac{B}{\sqrt{gd}} \xi\tilde{h}, \quad \text{on } y=0 \end{rcases}, \end{align} where \begin{align} Q(\xi,\omega) = C \xi + \omega \end{align} with the relation \begin{align}\label{T and Q} \widetilde{S\phi} = i Q\Tilde{\phi}. \end{align} Since \eqref{FT of main eq} is just a linear ODE of second order in $y$ for fixed $\xi$ and $\omega$, we can, somewhat explicitly, write down the solution: \begin{align}\label{FT of psi by D1 and D2} \tilde{\psi}(\xi,y,\omega) = D_1(\xi,\omega)e^{\delta \xi y} + D_2(\xi,\omega)e^{-\delta \xi y}, \end{align} where $D_1,D_2$ are determined by the boundary conditions. We can then reconstruct $\tilde{f}$ by applying the Fourier transform on the first equation in \eqref{t derivative of f} to obtain \begin{align*} \omega \tilde{f}(\xi,\omega) = - \xi \tilde{\psi}(\xi,1,\omega) - C \xi \tilde{f}(\xi,\omega) \end{align*} and hence \begin{align}\label{FT of f by FT of psi} \tilde{f}(\xi,\omega) = \frac{-\xi}{ C\xi+\omega}\tilde{\psi}(\xi,1,\omega) = \frac{-\xi}{Q(\xi,\omega)}\tilde{\psi}(\xi,1,\omega). \end{align} We ignore the potential singularity for the time being. We know that the functions $D_1$ and $D_2$ are uniquely determined. Once we have formulae for them, we get a formula for $\tilde{\psi}$ by inserting in \eqref{FT of psi by D1 and D2}. Then we can insert this formula in \eqref{FT of f by FT of psi} to obtain an expression for $\tilde{f}$ depending on $\tilde{h}$. We insert \eqref{FT of psi by D1 and D2} in \eqref{FT of main eq} to obtain \begin{align*} \xi\left(D_1(\xi,\omega) + D_2(\xi,\omega)\right) = -\left(\omega + \frac{B}{\sqrt{g d}}\xi\right)\Tilde{h}(\xi,\omega) \end{align*} and \begin{align*} \left(\xi + \sqrt{\frac{d}{g}}A Q(\xi,\omega)\right)\left(D_1(\xi,\omega)e^{\delta \xi} + D_2(\xi,\omega)e^{-\delta \xi}\right) \\= \delta Q(\xi,\omega)^2 \left(D_1(\xi,\omega) e^{\delta \xi} - D_2(\xi,\omega) e^{-\delta \xi}\right). \end{align*} This linear equation in $D_1(\xi,\omega)$ and $D_2(\xi,\omega)$ is solved easily and by \eqref{FT of f by FT of psi} we have \begin{align*} \Tilde{f}(\xi,\omega) &= \frac{-\xi}{Q(\xi,\omega)}\Tilde{\psi}(\xi,1,\omega))\\ &=\frac{\delta Q(\xi,\omega)\left(\omega + \frac{B}{\sqrt{g d}}\xi\right)}{\left(\delta Q(\xi,\omega)^2\cosh(\delta \xi) - \left(\xi + \sqrt{\frac{d}{g}}A Q(\xi,\omega)\right)\sinh(\delta \xi)\right)}\Tilde{h}(\xi,\omega), \end{align*} which we will write as \begin{align}\label{FT of f by FT of h} \Tilde{f}(\xi,\omega) = \frac{\delta^2 Q(\xi,\omega)\left(\omega + \frac{B}{\sqrt{g d}}\xi\right)\Tilde{h}(\xi,\omega)}{\left(\delta^2 Q(\xi,\omega)^2\cosh(\delta \xi) - \left(\delta\xi + \sqrt{\frac{d}{g}}A \delta Q(\xi,\omega)\right)\sinh(\delta \xi)\right)}. \end{align} We will also write this as \begin{align}\label{f by h with multipliers} \left( S^2 + \frac{D -i \sqrt{\frac{d}{g}}A S}{\delta}\tanh(\delta D)\right)f = \frac{ S\left(\partial_t + i\frac{B}{\sqrt{g d}}D\right)}{\cosh(\delta D)}h. \end{align} One would need to consider the roots of the denominator in \eqref{FT of f by FT of h} in order to justify \eqref{f by h with multipliers} rigorously. However for the case $A=0$, it turns out that the singularity cancels. In the case $A = 0$ formula \eqref{f by h with multipliers} has the simpler form \begin{align}\label{f by h with multipliers, A=0} \left(S^2 + \frac{D}{\delta}\tanh(\delta D)\right)f = \frac{S^2}{\cosh(\delta D)}h. \end{align} We will extract a formula for $f$ whenever it solves the following type of equation. Let $\tau:\R \rightarrow \R$ be an even function such that $\tau(\xi) > 0$ for all $\xi\not=0$. Note that in particular \eqref{FM condition} is satisfied. We try to derive a formula for the solution $f$ of \begin{align}\label{f by theta with multipliers} \begin{rcases} \left(S^2 + \tau(D)\right)f &= \theta\\ \lim_{t \to -\infty} f &= 0 \end{rcases}, \end{align} where $\theta$ is a given forcing term. We will assume $f$ and $\theta$ to be in $\S(\R^2,\R)$ and the limit in \eqref{f by theta with multipliers} is adequate for functions in the Schwartz class \cite{Str94}. We can decompose the operator $S^2 + \tau(D)$ into \begin{align}\label{factorizing T and tau} S^2 + \tau(D) = (S + i\sqrt{\tau(D)})(S - i\sqrt{\tau(D)}), \end{align} since $S$ and $i\sqrt{\tau(D)}$ commute. If we now set \begin{align*} f_1 = (S + i\sqrt{\tau(D)})f \end{align*} then we see by \eqref{factorizing T and tau} and the fact that we can commute the two factors that \begin{align*} (S - i\sqrt{\tau(D)})f_1 = \theta. \end{align*} We can now filter $f_1$ by the group $e^{i t \sqrt{\tau(D)}}$: Let $f_2 = e^{- i t \sqrt{\tau(D)}}f_1$. A calculation shows that \begin{align*} Sf_2 = (C\partial_x + \partial_t)f_2 = e^{- i t \sqrt{\tau(D)}}\theta. \end{align*} This is an inhomogeneous transport equation with solution \begin{align*} f_2(x,t) = \int_{- \infty}^t e^{- i s \sqrt{\tau(D)}}\theta(x + C(s - t),s)ds. \end{align*} This yields \begin{align*} \left(\sqrt{\tau(D)}f\right)(x,t) = \Imag \left(\int_{- \infty}^t e^{ i (t-s) \sqrt{\tau(D)}}\theta(x + C(s - t),s)ds\right). \end{align*} If we write out the Fourier multipliers explicitly, we get \begin{align}\label{explicit FM, A=0} f(x,t) = \frac{1}{\sqrt{2 \pi}} \Imag\left(\int_{-\infty}^t \int_\R \frac{e^{i(t-s)\sqrt{\tau(\xi)}}}{\sqrt{\tau(\xi)}}e^{i x \xi} e^{iC(s-t)\xi} \hat{\theta}(\xi,s)d\xi ds\right). \end{align} We will deal with the possible singularity in the for us relevant case: We have $\tau(\xi) = \frac{\xi}{\delta}\tanh(\delta \xi)$ and $\theta = \frac{S^2}{\cosh(\delta D)}h$. We see that $\tau$ is even and $\tau(\xi) >0$ for $\xi \not=0$. We have furthermore that $\tau$ is smooth with derivatives of at most polynomial growth at infinity. We will use that \begin{align}\label{limit of tau near zero} \lim_{\xi \to 0}\frac{|\xi|}{\sqrt{\tau(\xi)}}= 1. \end{align} Now observe that the function $\frac{1}{\sqrt{\tau(D)}}\theta$ is real-valued and hence we can subtract \\ $\left(\frac{1}{\sqrt{\tau(D)}}\theta\right)(0,\cdot)$ in \eqref{explicit FM, A=0} to obtain \begin{align}\label{formula for f with cancelled sing, A=0} f(x,t) = \frac{1}{\sqrt{2 \pi}} \Imag\left(\int_{-\infty}^t \int_\R \frac{e^{i(t-s)\sqrt{\tau(\xi)}}e^{i x \xi} e^{iC(s-t)\xi} - 1}{\sqrt{\tau(\xi)}} \hat{\theta}(\xi,s)d\xi ds\right). \end{align} Clearly, $\hat{\theta}(\xi,s)$ remains bounded around 0. For the quotient, note that we can rewrite it as follows: \begin{align*} \left|\frac{e^{i(t-s)\sqrt{\tau(\xi)}}e^{i x \xi} e^{iC(s-t)\xi} - 1}{\sqrt{\tau(\xi)}} \right| =\left|\frac{e^{i(t-s)\sqrt{\tau(\xi)}}e^{i x \xi} e^{iC(s-t)\xi} - 1}{\xi}\right|\left|\frac{\xi}{\sqrt{\tau(\xi)}}\right| \end{align*} The second factor converges to 1 by \eqref{limit of tau near zero}. For the first factor, note that the function \begin{align*} \xi \mapsto e^{i(t-s)\sqrt{\tau(\xi)}}e^{i x \xi} e^{iC(s-t)\xi} \end{align*} is left and right differentiable in 0 and hence the first factor remains bounded as $\xi \to 0$ for any $s$. We conclude that \eqref{formula for f with cancelled sing, A=0} holds (at least pointwise). Note that we cannot do the same for the case $A\not=0$: The operator on the left hand side of \eqref{f by h with multipliers} is given by \begin{align*} S^2 + (1 - C^2)\frac{D}{\delta}\tanh(\delta D) - \frac{iC}{\delta}\partial_t \tanh(\delta D) \end{align*} and there does not seem to be an apparent way to obtain a root for \begin{align*} (1 - C^2)\frac{D}{\delta}\tanh(\delta D) -\frac{iC}{\delta}\partial_t \tanh(\delta D). \end{align*} In fact, it is unclear whether this operator is even positive. \section{Behaviour in different regimes} We want to investigate some regimes for which our considerations give useful predictions. We will consider on the one hand the case where $\delta$ is small, which is justified considering the model as $\delta \to 0$. On the other hand, we will discuss the case where $\delta$ is finite and non-vanishing. The case where $\delta$ is large (corresponding to the formal limit $\delta \to \infty$) is not relevant for us since deep-water tsunamis are rare. \subsection{The shallow water regime} For simplicity, we use the model in the case $A=0$: As $\delta \to 0$, \eqref{f by h with multipliers, A=0} becomes \begin{align}\label{shallow water pde} (S^2 - \partial_x^2)f = S^2 h. \end{align} The initial conditions are on the one hand $f(x,0)=0$ from \eqref{reduced eq}. On the other hand, this gives $f_x(x,0)=0$ and inserting this in the sixth relation in \eqref{reduced eq} for $t=0$ yields $f_t(x,0) = 0$. By using Duhamel's principle (similar to \cite[p. 80--81]{Eva10}), we get the formula \begin{align}\label{formula for f in shallow water} f(x,t) = \frac{1}{2} \int_0^t \int_{x - (t-s)(C+1)}^{x - (t-s)(C-1)}S^2 h(r,s) dr ds. \end{align} We will assume that $h$ can be separated in the following way: $h(x,t) = a(t)b(x)$ for $a \in C^2(\R,[0,\infty))$, $b \in C^2(\R,\R)$ such that $a(t)=0$ for $t \leq 0$ and $a(t) = 1$ for $t > t_0$ (where $t_0$ represents the duration of the earthquake), and with $b(x) = 0$ for $x \notin (-L,L)$ modelling a localized tsunami source. Inserting this into \eqref{formula for f in shallow water} yields the formula \begin{align*} f(x,t) &= a(t)b(x) \\ &+\frac{1}{2}\int_0^t a(s)\left[b'(x - (t-s)(C-1)) - b'(x - (t-s)(C+1))\right]ds, \end{align*} which is a lot simpler. In the limiting case $t_0 \searrow 0$, $a$ becomes the Heaviside step function (modelling an instantaneous upward thrust of the seabed near to the earthquake's epicentre) and the formula further simplifies to \begin{align}\label{final formula for shallow water} f(x,t) =\frac{C^2}{C^2-1}b(x) + \frac{b(x - t(1+C))}{2(1+C)} + \frac{b(x + t(1-C))}{2(1-C)}, \end{align} for $x \in \R$ and $t >0$. We can draw some insightful conclusions: \begin{itemize} \item at each instant $t >0$ after initiation, the generated wave is localized as $f(x,t) = 0$ for $|x| \geq L + t(1+C)$; \item the surface wave consists of one stationary part (which can be disregarded, since $C \ll 1$) and two travelling waves, one moving to the right and the other moving to the left; \item the wave travelling to the left moves with non-dimensionalized speed $1-C$ (corresponding to $\sqrt{gd} - B$ in the physical variables) and the wave travelling to the right moves with non-dimensionalized speed $1+C$ (corresponding to $\sqrt{gd} + B$ in the physical variables); \item the shapes of the waves travelling to the left and to the right remain unchanged and are precisely that of the bed deformation at a scale of $\frac{1}{2(1-C)}$ and $\frac{1}{2(1+C)}$, respectively. \end{itemize} \begin{figure} \caption{Model shortly after the instantaneous upward and downward thrust} \label{Model at time t small} \end{figure} \begin{figure} \caption{Expected long-term behaviour} \label{Model at time t large} \end{figure} This behaviour is illustrated in Figure \ref{Model at time t small} and Figure \ref{Model at time t large}. On the one hand, the wave travelling to the right is faster for bigger $C$, but the maximal amplitude decreases. On the other hand, the wave travelling to the left becomes slower for bigger $C$, but the maximal amplitude increases. For $B=0$ and hence $C=0$, this model gives good predictions for the maximal amplitude and speed of the propagating wave for the two largest historical tsunamis: the December 2004 and the May 1960 tsunamis, see \cite{CG12}. Since $B \ll \sqrt{gd}$, i.e. $C \ll 1$, the model for $B \not= 0$ will give a very similar prediction, which might be slightly more precise, if one were in a situation, where $B$ is known somewhat precisely, say due to a current. \subsection{The finite, non-vanishing regime} Here we cannot expect to be able to write down $f$ in any explicit fashion. However, we will discuss a method to analyze the asymptotic behaviour of $f$ in \eqref{formula for f with cancelled sing, A=0}. Ultimately, we will see that this approach does not work, since the method only works for too large $t$, see \eqref{stationary-phase analysis takes too long}. We cannot do the same for $B = 0$ or the general case, since we lack a formula which would need to correspond to \eqref{explicit FM, A=0} or \eqref{formula for f with cancelled sing, A=0}. If such a formula were available, stationary-phase analysis might then be appropriate. \subsubsection{Principle of stationary-phase} We want to analyse the asymptotic behaviour of \eqref{formula for f with cancelled sing, A=0}. For this we try to use the stationary-phase principle. Recall that we set $\tau(\xi) = \frac{\xi}{\delta}\tanh(\delta \xi)$ and $\theta = \frac{S^2}{\cosh(\delta D)}h$. We first rewrite \eqref{formula for f with cancelled sing, A=0} as \begin{align*} &f(x,t) = \frac{1}{\sqrt{2 \pi}} \Imag\left(\int_{-\infty}^t \int_\R \frac{e^{i(t-s)\sqrt{\tau(\xi)}}e^{i x \xi} e^{iC(s-t)\xi} - 1}{\sqrt{\tau(\xi)}} \hat{\theta}(\xi,s)d\xi ds\right) \\ &= \frac{1}{\sqrt{2 \pi}} \Imag\left(\int_{-\infty}^t \int_\R \frac{e^{-i s \left( \sqrt{\tau(\xi)} - C \xi\right)}e^{it\left(\sqrt{\tau(\xi)} + (\mathcal{X}-C)\xi)\right)} - 1}{\sqrt{\tau(\xi)}} \hat{\theta}(\xi,s) d\xi ds\right), \end{align*} where $\X = \frac{x}{t}$. The derivative of the phase factor $\sqrt{\tau(\xi)} + (\mathcal{X}-C)\xi$ is given by \begin{align*} [\sqrt{\tau}]'(\xi) + \X - C \end{align*} and one can check that \begin{align}\label{first derivative of root tau} \partial_\xi\sqrt{\tau(\xi)}=\frac{\sinh(\delta \xi)\cosh(\delta \xi) + \delta \xi}{2\delta\cosh^2(\delta \xi)} \sqrt{\frac{\delta\cosh(\delta \xi)}{\xi\sinh(\delta \xi)}} \end{align} and \begin{align}\label{second derivative of root tau} \partial_\xi^2\sqrt{\tau(\xi)} = -\frac{1}{\tau^\frac{3}{2}(\xi)}\frac{\left(\sinh(\delta \xi)\cosh(\delta \xi) - \delta \xi\right)^2 + 4\delta^2\xi^2 \sinh^2(\delta \xi)}{4 \delta^2 \cosh^4(\delta \xi)} < 0. \end{align} One computes the limits \begin{align*} &\lim_{\xi \searrow 0}\partial_\xi\sqrt{\tau(\xi)} = 1,\\ &\lim_{\xi \nearrow 0}\partial_\xi\sqrt{\tau(\xi)} = -1,\\ &\lim_{\xi \to \infty}\partial_\xi\sqrt{\tau(\xi)} = 0,\\ &\lim_{\xi \to -\infty}\partial_\xi\sqrt{\tau(\xi)} = 0\,, \end{align*} and concludes that $\xi \mapsto \partial_\xi\sqrt{\tau(\xi)}$ is strictly decreasing on $(0,\infty)$ from the asymptotic value 1 towards the asymptotic value 0 and on $(-\infty,0)$ from the asymptotic value $0$ to the asymptotic value $-1$. So we conclude that \begin{align*} [\sqrt{\tau}]'(\xi_0) + \X - C = 0 \end{align*} is possible if $\X - C \in (-1,0)\cup(0,1)$ for exactly one $\xi_0$ for given $\X$. So as long as $\X$ is in the corresponding range, we can write $\xi_0 = \xi_0(\X)$. The stationary-phase principle gives \begin{align*} f(x,t)\sim &\frac{1}{\sqrt{t \tau(\xi_0) |[\sqrt{\tau}]''(\xi_0)|}}\cdot \\&\Imag\left(\int_{-\infty}^t e^{-i s \left( \sqrt{\tau(\xi_0)} - C \xi_0\right)} \hat{\theta}(\xi_0,s) e^{it\left(\sqrt{\tau(\xi_0)} + (\mathcal{X}-C)\xi_0 + \sigma\frac{\pi}{4})\right)} ds\right) \end{align*} for large $t$, where $\sigma$ is the sign of $[\sqrt{\tau}]''(\xi_0)$, which is just $-1$ by \eqref{second derivative of root tau}. We see that this is a Fourier transform (with respect to time) and hence get \begin{align*} f(x,t) \sim &\frac{\sqrt{2\pi}}{\sqrt{t \tau(\xi_0) |[\sqrt{\tau}]''(\xi_0)|}}\cdot \\&\Imag\left( \Tilde{\theta}(\xi_0,\sqrt{\tau(\xi_0)} - C \xi_0) e^{it\left(\sqrt{\tau(\xi_0)} + (\mathcal{X}-C)\xi_0 -\frac{\pi}{4})\right)} \right). \end{align*} We have \begin{align*} \Tilde{\theta}(\xi_0,\sqrt{\tau(\xi_0)} - C \xi_0) &= -\frac{Q(\xi_0,\sqrt{\tau(\xi_0)} - C \xi_0)^2}{\cosh(\delta \xi_0)}\Tilde{h}(\xi_0,\sqrt{\tau(\xi_0)} - C \xi_0) \\ &=-\frac{\tau(\xi_0)}{\cosh(\delta \xi_0)}\Tilde{h}(\xi_0,\sqrt{\tau(\xi_0)} - C \xi_0) \end{align*} and hence get \begin{align}\label{f stationary-phase asymptotic} f(x,t) \sim &\frac{-\sqrt{2\pi \tau(\xi_0)}}{\cosh(\delta \xi_0)\sqrt{t |[\sqrt{\tau}]''(\xi_0)|}}\cdot \nonumber\\&\Imag\left( e^{it\left(\sqrt{\tau(\xi_0)} + (\mathcal{X}-C)\xi_0 -\frac{\pi}{4})\right)} \Tilde{h}(\xi_0,\sqrt{\tau(\xi_0)} - C \xi_0) \right). \end{align} Finally, we try to extract the asymptotics with respect to $\delta$: We fix a $\xi > 0$. A look at \eqref{first derivative of root tau} gives that $\delta \mapsto \partial_\xi \sqrt{\tau(\xi)}$ is $C^\infty$ and clearly even in $\delta$. Since we have $\lim_{\delta \searrow 0}\partial_\xi \sqrt{\tau(\xi)} = 1$, the Taylor expansion with respect to $\delta$ around 0 has the form \begin{align*} \partial_\xi \sqrt{\tau(\xi)} = 1 + \alpha(\xi)\delta^2 + O(\delta^3), \end{align*} for some coefficient $\alpha(\xi)$. One could find this coefficient through tedious calculations, but one can instead notice that $2 \alpha(\xi)$ is the coefficient of $\delta^2$ of the expansion of $\left(\partial_\xi \sqrt{\tau(\xi)}\right)^2$. We have \begin{align*} \left(\partial_\xi \sqrt{\tau(\xi)}\right)^2 &=\frac{\sinh(\delta \xi)}{4\delta \xi\cosh(\delta \xi)} + \frac{1}{2\cosh^2(\delta \xi)} + \frac{\delta \xi}{4\sinh(\delta \xi)\cosh^3(\delta \xi)}\\ &= 1 - \delta^2 \xi^2 + O(\delta^3) \end{align*} and hence we conclude $\partial_\xi \sqrt{\tau(\xi)} = 1 - \frac{1}{2}\delta^2\xi^2 + O(\delta^3)$. We conclude that for $\X - C \in (-1,0)$ the stationary-phase point $\xi_0 >0$ with $[\sqrt{\tau}]'(\xi_0) + \X - C = 0$ satisfies $\delta \xi_0 = O(1)$. Inserting this into \eqref{second derivative of root tau} yields \begin{align}\label{second derivative at station} |\partial_\xi^2\sqrt{\tau(\xi_0)}| = O(\delta). \end{align} \subsubsection{Some typical physical parameters} We want to see whether stationary-phase analysis, using the formula \begin{align*} f(x,t) \sim &\frac{-\sqrt{2\pi \tau(\xi_0)}}{\cosh(\delta \xi_0)\sqrt{t |[\sqrt{\tau}]''(\xi_0)|}}\cdot \\&\Imag\left( e^{it\left(\sqrt{\tau(\xi_0)} + (\mathcal{X}-C)\xi_0 -\frac{\pi}{4})\right)} \Tilde{h}(\xi_0,\sqrt{\tau(\xi_0)} - C \xi_0) \right), \end{align*} could be justified here. Since the stationary-phase principle applies for large $t$ we insert typical values of the physical parameters for tsunamis propagating at open sea (see \cite{Con09}): \begin{align*} a = 1\text{m},\quad d=4 \text{km},\quad \lambda = 200 \text{km}. \end{align*} This leads to $\varepsilon = 0.00025$ and $\delta = 0.02$ which point to linear wave approximation. Applying the stationary-phase principle is justified if \begin{align} t |\sqrt{\tau}''(\xi)| \gg 1. \end{align} We have by \eqref{second derivative at station}, $|\sqrt{\tau}''(\xi)| = O(\delta)$, which translates to \begin{align}\label{t gg delta squared} t \gg \frac{1}{\delta^2}. \end{align} In physical variables this means \begin{align}\label{stationary-phase analysis takes too long} T \gg \frac{\lambda}{\sqrt{gd}\delta^2} \approx 2.5 \times 10^6 \text{ s} \approx 700 \text{ h}, \end{align} which takes way too long to be applicable. If one were to relax \eqref{t gg delta squared} to a condition of the form \begin{align*} t \geq \frac{1}{\delta}, \end{align*} one would arrive at \begin{align*} T \geq \frac{\lambda}{\sqrt{gd}\delta} \approx 5 \times 10^4 \text{ s} \approx 14 \text{ h} \end{align*} which would barely be applicable in the case where the epicentre of the seaquake is far off the shore. Nevertheless, relaxing \eqref{t gg delta squared} is quite dubious. We conclude that using stationary-phase analysis is not an effective tool in the case $A = 0$. \end{document}
\begin{document} \begin{abstract} Let $ \lambda ^2 \in \mathbb N $, and in dimensions $ d\geq 5$, let $ A _{\lambda } f (x)$ denote the average of $ f \;:\; \mathbb Z ^{d} \to \mathbb R $ over the lattice points on the sphere of radius $\lambda$ centered at $x$. We prove $ \ell ^{p}$ improving properties of $ A _{\lambda }$. \begin{equation*} \lVert A _{\lambda }\rVert _{\ell ^{p} \to \ell ^{p'}} \leq C _{d,p, \omega (\lambda ^2 )} \lambda ^{d ( 1-\frac{2}p)}, \qquad \tfrac{d-1}{d+1} < p \leq \frac{d} {d-2}. \end{equation*} It holds in dimension $ d =4$ for odd $ \lambda ^2 $. The dependence is in terms of $ \omega (\lambda ^2 )$, the number of distinct prime factors of $ \lambda ^2 $. These inequalities are discrete versions of a classical inequality of Littman and Strichartz on the $ L ^{p}$ improving property of spherical averages on $ \mathbb R ^{d}$. In particular they are scale free, in a natural sense. The proof uses the decomposition of the corresponding multiplier whose properties were established by Magyar-Stein-Wainger, and Magyar. We then use a proof strategy of Bourgain, which dominates each part of the decomposition by an endpoint estimate. \end{abstract} \maketitle \section{Introduction} The subject of this paper is in discrete harmonic analysis, in which continuous objects are studied in the setting of the integer lattice. Relevant norm properties are much more intricate, with novel distinctions with the continuous case arising. In the continuous setting, $ L ^{p}$-improving properties of averages over lower dimensional surfaces are widely recognized as an essential property of such averages \cites{MR0358443,MR1969206,MR1654767,MR0256219}. It continues to be very active subject of investigation. In the discrete setting, these questions are largely undeveloped. They are implicit in work on discrete fractional integrals by several authors \cites{MR2872554,MR1945293,MR1825254,MR1771530}, as well as two recent papers \cites{MR3933540,MR3892403} on sparse bounds for discrete singular integrals. Our main results concern $\ell^p$ improving estimates for averages over discrete spheres, in dimensions $ d \geq 5$, and in dimension $ d=4$, for certain radii. We recall the continuous case. For dimensions $d\geq 2$, let $ d\sigma $ denote Haar measure on the sphere of radius one, and set $ \mathcal A_1 f = \sigma \ast f $ be convolution with respect to $ \sigma $. The classical result of Littman \cite{MR0358443} and Strichartz \cite{MR0256219} gives the sharp $ L ^{p}$ improving property of this average. Here, we are stating the result in a restrictive way, but the full strength is obtained by interpolating with the obvious $ L ^{1} \to L ^{1}$ bound. \begin{priorResults}\label{t:LS} \cites{MR0358443,MR0256219} For dimensions $ d \geq 2$, we have $ \lVert \mathcal A_1 \rVert _{ \frac{d+1}{d} \to d+1}$. \end{priorResults} We study the discrete analog of $ \mathcal A_1 f $ in higher dimensions. For $ \lambda ^2 \in \mathbb N $, let $\mathbb S^d_\lambda := \{ n\in \mathbb{Z}^d \;:\; \lvert n\rvert = \lambda\}$. For a function $ f$ on $ \mathbb Z ^{d}$, define \begin{equation*} A _{\lambda } f (x) = \lvert \mathbb S^d_\lambda \rvert ^{-1} \sum_{n \in \mathbb S^d_\lambda } f (x-n). \end{equation*} The study of the harmonic analytic properties of these averages was initiated by Magyar \cite{MR1617657}, with Magyar, Stein and Wainger \cite{MR1888798} proving a discrete variant of the Stein spherical maximal function theorem \cite{MR0420116}. This result holds in dimensions $ d \geq 5$, as irregularities in the number of lattice points on spheres presenting obstructions to a positive result in dimensions $ d =2,3,4$. In particular, they proved the result below. See Ionescu \cite{MR2053347} for an endpoint result, and the work of several others which further explore this topic \cites{MR1925339,MR2346547,160904313,MR3819049,MR3960006}. \begin{priorResults}\label{t:MSW}[Magyar, Stein, Wainger, \cite{MR1888798}] For $ d \geq 5$, there holds \begin{equation*} \bigl\lVert \sup _{\lambda } \lvert A _{\lambda } f \rvert \bigr\rVert _{p} \lesssim \lVert f\rVert _{p}, \qquad \tfrac{d} {d-2} < p < \infty . \end{equation*} \end{priorResults} \noindent We will refer to $ p _{\textup{MSW}}= \frac{d} {d-2}$ as the Magyar Stein Wainger index. Our first main result is a discrete variant of the result of Littman and Strichartz above. First note that $ A _{\lambda }$ is clearly bounded from $ \ell ^{p}$ to $ \ell ^{p}$, for all $ 1\leq p \leq \infty $. Hence, it trivially improves any $ f \in \ell ^{p} (\mathbb Z ^{d})$ to an $ \ell ^{\infty }(\mathbb Z ^{d})$ function. But, proving a \emph{scale-free version} of the inequality is not at all straightforward. In dimensions $ d=4$, there is an arithmetical obstruction, namely for certain radii $ \lambda $, the number of points on the sphere of radius $ \lambda $ can be very small. To address this, let $\Lambda_d := \{ 0 < \lambda < \infty \;:\; \lambda^2\in \mathbb{N}\}$, for $d\geq 5$, and for $d=4$, \begin{equation}\label{e:Lam} \Lambda_4 := \{ 0 < \lambda < \infty \;:\; \lambda^2\in \mathbb{N}\setminus 4\mathbb{N}\} \end{equation} Following the work of Magyar \cite{MR1925339}, we will address the case of dimension $ d=4$ below. And, we will prove results \emph{below the Magyar Stein Wainger index.} \begin{theorem}\label{t:improve} In dimensions $ d \geq 4$, the inequality below holds for all $ \lambda \in \Lambda _d$. \begin{equation} \label{e:improve} \lVert A _{\lambda }\rVert _{p \to p'} \leq C_{d,p, \omega (\lambda ^2 )} \lambda ^{d \left( 1 - \frac2{p} \right)}, \qquad \tfrac{ d+1} {d - 1} < p \leq 2. \end{equation} Above, $ \omega (\lambda ^2 )$ is the number of distinct prime factors of $ \lambda ^2 $. In order that \eqref{e:improve} hold, it is necessary that $ p \geq \frac{d +1}{d}$, for $ d\geq 5$. \end{theorem} This Theorem was independently proved by Hughes \cite{180409260H}. The proof herein uses the same elements, but optimizes the interpolation part of the argument. It is short, and simple enough that one can give concrete estimates for the dependence on $ \lambda $, which we indicate below. To explain our use of the phrase `scale free' we make this definition. For a cube $ Q \subset \mathbb R ^{d}$ of volume at least one, we set localized and normalized norms to be \begin{equation} \label{e:localNorm} \langle f \rangle _{Q, p} := \Bigl[ \lvert Q\rvert ^{-1} \sum_{n \in Q \cap \mathbb Z ^{d}} \lvert f (n)\rvert ^{p} \Bigr] ^{1/p}, \qquad 0< p \leq \infty . \end{equation} An equivalent way to phrase our theorem above is the following corollary. Note that in this language, the inequalities in \eqref{e:Improve} are uniform in the choice of $ \lambda $. \begin{corollary}\label{c:fixed} Let $ d\geq 4$, and set $ \mathbf I_d$ to be the open triangle with vertices $ (0,1)$, $ (1,0)$, and $ (\frac{d-1} {d+1}, \frac{d-1} {d+1})$. (See Figure~\ref{f:IS}.) For $ (1/p_1, 1/p_2) \in \mathbf I_d$, there is a finite constant $ C = C _{d, p_1 ,p_2, \omega (\lambda ^2 )}$ so that \begin{equation}\label{e:Improve} \langle A _{\lambda } f _1, f_2 \rangle \leq C \langle f_1 \rangle _{Q,p_1} \langle f_2\rangle _{Q,p_2} \lvert Q\rvert, \qquad \lambda \in \Lambda_d. \end{equation} \end{corollary} \begin{figure} \caption{ The triangle $ \mathbf I_d$ of Theorem~\ref{c:fixed} \label{f:IS} \end{figure} Our main inequality is only of interest for $ \tfrac{d+1} {d-1} < p < \frac {d} {d-2} = p _{\textup{MSW}}$, in the case of $ d\geq 5$. Indeed, at $ p _{\textup{MSW}}$, we know a substantially better result. For indicators functions $ f = \mathbf 1_{F}$ and $ g = \mathbf 1_{G}$ supported in a cube $ E$ of side length $ \lambda _0$, we have \cite{181002240} this restricted maximal estimate at the index $ p _{\textup{MSW}}$. \begin{equation} \label{e:KLM} \bigl\langle \sup _{\lambda _0/2 < \lambda < \lambda _0 } A _{\lambda } f , g \bigr\rangle \lesssim \langle f \rangle _{E, \frac{d} {d-2}} \langle g \rangle _{E, \frac{d} {d-2}} \lvert E\rvert . \end{equation} The proof of \eqref{e:improve} requires a circle method decomposition of $ A _{\lambda }$ in terms of its Fourier multiplier. The key elements here were developed by Magyar, Stein and Wainger \cite{MR1888798}, with additional observations of Magyar \cite{MR2287111}. We recall this in \S\ref{s:decompose}. The short proof in \S\ref{s:proof} uses indicator functions, following work of Bourgain \cite{MR812567}, and in the discrete setting Ionescu \cite{I}, and Hughes \cite{MR3671577}. We comment briefly on sharpness in the last section of the paper. We acknowledge useful conversations with Alex Iosevich and Francesco Di Plinio on the topics of this paper. Fan Yang and the referee suggested several improvements of the paper. \section{Decomposition} \label{s:decompose} Throughout $ e (x)= e ^{2 \pi i x}$. The Fourier transform on $ \mathbb Z ^{d}$ is given by \begin{equation} \label{e:FT} \widehat f (\xi ) = \sum_{x\in \mathbb Z ^{d}} e (-\xi \cdot x) f (x), \qquad \xi \in \mathbb T ^{d} \equiv [0,1] ^{d}. \end{equation} We will write $ \widecheck \phi $ for the inverse Fourier transform. The Fourier transform on $ \mathbb R ^{d}$ is \begin{equation} \label{e:FR} \widetilde \phi (\xi ) = \int _{\mathbb R ^{d}} e (- \xi \cdot x ) f (x) \; dx . \end{equation} We work exclusively with convolution operators $ K : f \mapsto \int _{\mathbb T ^{d}} k (\xi ) \widehat f (\xi ) e ( \xi \cdot x )\;d \xi $. In this notation, $ k $ is the multiplier, and the convolution is $ \widecheck k \ast f $. Lower case letters are frequently, but not exclusively, used for the multipliers, and capital letters for the corresponding convolution operators. The following estimate for the number of lattice points on a sphere holds. \begin{equation}\label{e:simeq} \lvert \mathbb{S}_n^d\rvert = \lvert \{ n \in \mathbb Z ^{d } \;:\; \lvert n\rvert = \lambda \}\rvert \simeq \lambda ^{d-2}, \qquad \lambda \in \Lambda_d. \end{equation} Redefine the discrete spherical averages $ A _{ \lambda } f $ to be \begin{align} \label{e:normalA} A _{\lambda } f (x) &= \lambda ^{-d+2 } \sum_{n \in \mathbb Z ^{d } \;:\; \lvert n\rvert = \lambda } f (x-n) \\ &= \int _{\mathbb T ^{d}} a _{\lambda } (\xi ) \widehat f (\xi ) e (\xi \cdot x )\; d \xi \\ \textup{where} \quad a _{\lambda } (\xi ) & = \lambda ^{-d+2} \sum_{n \in \mathbb Z ^{d } \;:\; \lvert n\rvert = \lambda } e (\xi \cdot n). \end{align} The decomposition of $ a _{ \lambda } $ into a `main' term $ c _{\lambda } $ and an `residual' term $ r _{\lambda } = a _{\lambda } - c _{\lambda }$ follows development of Magyar, Stein and Wainger \cite{MR1888798}*{\S5}, Magyar \cite{MR2287111}*{\S4} and Hughes \cite{160904313}*{\S4}. We will be very brief. For integers $ q$, set $ \mathbb Z_q ^{d} = (\mathbb Z / q \mathbb Z )^{d}$. Set $ \mathbb Z _q ^{\times } = \{a \in \mathbb Z _q \;:\; (a,q)=1\}$ to be the multiplicative group. We have \begin{align}\label{e:a1} c _{\lambda } (\xi ) &= \sum_{1\leq q \leq \lambda } c _{\lambda ,q} (\xi ) , \\ \label{e:Cdef} c _{\lambda , q } (\xi )&= \sum_{\ell \in \mathbb Z_q^d} K (\lambda , q, \ell ) \Phi _{q} (\xi - \ell /q) \widetilde {d \sigma _{\lambda }} ( \xi - \ell /q), \\ \label{e:Kdef} K (\lambda , q, \ell ) & = q ^{-d} \sum_{ a \in \mathbb Z^\times_q } \sum_{n\in \mathbb Z_q^d} e_q \bigl(-a\lambda^2 + \lvert n\rvert ^2 a + n \cdot \ell \bigr) . \end{align} Above, $ \Phi $ is a smooth non-negative radial bump function, $ \mathbf 1_{[-1/8,1/8] ^{d}} \leq \Phi \leq \mathbf 1_{[-1/4,1/4] ^{d}}$. Further, $ \Phi _{q} (\xi ) = \Phi (q \xi )$. Throughout we use $ e _{q} (x) = e (x/q) = e ^{2 \pi i x/q}$. The term in \eqref{e:Kdef} is a Kloosterman sum, a fact that is hidden in the expression above, but becomes clear after exact summation of the quadratic Gauss sums. In addition, $ d \sigma _{\lambda }$ is the continuous unit Haar measure on the sphere of radius $ \lambda $ in $ \mathbb R ^{d}$. Recall the stationary phase estimate \begin{equation}\label{e:stationary} \lvert \widetilde {d \sigma _{\lambda }} (\xi ) \rvert \lesssim \lvert \lambda \xi \rvert ^{- \frac{d-1}2}. \end{equation} Essential here is the \emph{Kloosterman refinement}. The estimate below goes back to the work of Kloosterman \cite{MR1555249} and Weil \cite{MR0027006}. Magyar \cite{MR2287111}*{\S4} used it in this kind of setting. (It is essential to the proof of Lemma~\ref{l:Akos}.) \begin{lemma}\label{l:K} \cite{MR2287111}*{Proposition 7} For all $ \eta >0$, and all $ 1 \leq q \leq \lambda $, $ \lambda \in \Lambda_d$, \begin{equation}\label{e:K<} \sup _{\ell } \lvert K (\lambda , q, \ell ) \rvert \lesssim _{\eta } q ^{- \frac{d-3}2 + \eta } \rho (q, \lambda ) , \end{equation} where we write $ q = q_1 2 ^{r}$, with $ q_1$ odd, so that $ \rho (q, \lambda ) = \sqrt { (q_1, \lambda ^2 ) 2 ^{r}}$, where $ (q_1, \lambda ^2 )$ is the greatest common divisor of $ q_1 $ and $ \lambda ^2 $. The implied constant only depends upon $ \eta >0$. \end{lemma} Concerning the terms $ \rho (q, \lambda ) $, we need this Proposition. \begin{proposition}\label{p:rho} We have for $ N< \lambda $ and $ a>1$, and all $ \eta >0$ \begin{align} \label{e:rho} \sum_{ q \;:\; N \leq q } q ^{-a} \rho (\lambda ,q) &\lesssim N ^{1-a} \sigma _{-1/2} (\lambda ^2 ) , \\ \label{e:rho2} \sum_{ 1\leq q \leq N} q ^{\eta } \rho (\lambda ,q) &\lesssim N ^{1+ \eta } \sigma _{ - 1/2 } (\lambda ^2 ). \end{align} Above $ \sigma _{b} (n ) = \sum_{d \;:\; d\,\vert\,n} d ^{b}$ is the generalized sum of divisors function. \end{proposition} \begin{proof} Write $ q = 2 ^{r}s t$, where $ s $ and $ t$ are odd, $ r \geq 0$ and $ ( s , \lambda ^2 )=1$. With this notation, $ \rho (\lambda ,q) = t 2 ^{r}$. For \eqref{e:rho}, the sum we need to estimate is \begin{align*} \sum_{ t\;:\; t\,|\, \lambda ^2 } \sum_{s=1} ^{\infty } \sum_{\substack{r=0\\ 2 ^{r} st\geq N }} ^{\infty } \frac {[ t 2 ^{r} ] ^{\frac{1 }2} } { [t s 2 ^{r}] ^{a}} . \end{align*} We will sum over $ r$ first. There is first the cases in which $ st \leq N$: \begin{align*} \sum_{t \;:\; t\,|\, \lambda ^2 } \sum_{s=1} ^{\infty } \sum_{\substack{r=0\\ 2 ^{r} st\geq N , \ st \leq N}} ^{\infty } \frac {[ t 2 ^{r} ] ^{\frac{1 }2} } { [ st 2 ^{r}] ^{a}} & \lesssim \sum_{ t\;:\; t\,|\, \lambda ^2 } \sum_{ \substack{s=1\\ st \leq N} } ^{\infty } \Bigl(\frac{st}N \Bigr) ^{a - 1/2} \frac{1} {s ^{a} t ^{a - 1/2}} \\ & \lesssim N ^{ 1/2 -a }\sum_{t \;:\; t\,|\, \lambda ^2 } \bigl(N/t \bigr) ^{1/2} = N ^{1-a} \sum_{t \;:\; t\,|\, \lambda ^2 } t ^{-1/2} \\ & \lesssim N ^{1-a} \sigma _{-1/2} (\lambda ^2 ). \end{align*} The second case of $ st > N$ imposes no restriction on $ r$. The sum over $ r \geq 0$ is just a geometric series, therefore we have to bound \begin{align} \sum_{t \;:\; t\,|\, \lambda ^2 } \sum_{ \substack{s=1\\ st > N} } ^{\infty } \frac{1} {s ^{a} t ^{a- 1/2}} & \lesssim \sum_{t \;:\; t\,|\, \lambda ^2 } \Bigl( \frac{t}N \Bigr) ^{a-1} \frac{1} { t ^{a - 1/2}} \\ \label{e:loglog} & \lesssim N ^{1-a} \sum_{t \;:\; t\,|\, \lambda ^2 } \frac1{\sqrt t } \lesssim N ^{1-a } \sigma _{-1/2} (\lambda ^2 ). \end{align} We turn to \eqref{e:rho2} using the notation above. We estimate \begin{align*} \sum_{t \;:\; t\,|\, \lambda ^2 } \sum_{s=1} ^{\infty } \sum_{\substack{r=0\\ 2 ^{r} st\leq N }} ^ \infty [ 2 ^{r}st] ^{\eta } [ 2 ^{r}t] ^{ \frac{1}2} & \lesssim \sum_{t \;:\; t\,|\, \lambda ^2 } \sum_{ \substack{ s=1\\ st \leq N}} ^{\infty } [st] ^{\eta } t ^{\frac{1}2} (N/st) ^{ \frac{1}2+ \eta } \\ & \lesssim N ^{\frac{1}2 + \eta } \sum_{t \;:\; t\,|\, \lambda ^2 } \sum_{ \substack{ 1\leq s \leq N/t }} s ^{ - \frac{1}2} \\ & \lesssim N ^{1+ \eta } \sum_{t \;:\; t\,|\, \lambda ^2} t ^{ - \frac{1}2 } \lesssim N ^{1+ \eta } \sigma _{-1/2} (\lambda ^2 ). \end{align*} \end{proof} The `main' term is $ C _{\lambda } f$, and the `residual' term is $ R _{\lambda } = A _{\lambda } - C _{\lambda } $. This is a foundational estimate for us. (The reader should note that the normalizations here and in \cite{MR2287111} are different.) \begin{lemma}\label{l:Akos}\cite{MR2287111}*{ Lemma 1, page 71} We have, for all $ \epsilon >0$, uniformly in $ \lambda \in \Lambda_d $, $ \lVert R _{\lambda }\rVert_ {2\to 2} \lesssim _{\epsilon} \lambda ^{\frac{1-d}2+ \epsilon }$. \end{lemma} For a multiplier $ m$ on $ \mathbb T ^{d}$, define a family of related multipliers by \begin{equation}\label{e:mq} m _{\lambda , q } = \sum_{\ell \in \mathbb Z^d_q} K (\lambda , q, \ell ) m (\xi - \ell /q). \end{equation} We estimate the Fourier transform here. \begin{proposition}\label{p:mq} For a multiplier $ m_{\lambda , q}$ as in \eqref{e:mq}, we have \begin{equation} \label{e:Mq} \lvert \widecheck m _{\lambda ,q} (n) \rvert \leq q \lvert \widecheck m (n) \rvert . \end{equation} \end{proposition} We include a proof for convenience. \begin{proof} Our needs here are no different than those of \cites{MR1888798,I}. See for instance the argument after \cite{I}*{(2.9)}. Rewrite the Kloosterman sum in \eqref{e:Kdef} in terms of Gauss sums, namely \begin{align} \label{e:Gauss1} K (\lambda , q, \ell ) & = \sum_{ a \in \mathbb Z^\times_q } e_q (-a\lambda^2 )G (a/q, \ell ) , \\ \label{e:Gauss2} \textup{where} \quad G (a/q, \ell ) & := q ^{-d} \sum_{n\in \mathbb Z_q^d} e_q \bigl( \lvert n\rvert ^2 a + n \cdot \ell \bigr) . \end{align} Observe that $ G (a/, \cdot )$ is a Fourier transform on the group $ \mathbb Z^d_q$. Namely, if $ \phi ( \ell ) = e ( \lvert \ell \rvert ^2 a/q)$ is the function on $ \mathbb Z^d_q$, we have $ \widehat \phi (- \ell ) = \widehat \phi (\ell ) = G (a/q, \ell ) $. Using the version formula on that group we have \begin{equation} \label{e:G2} \sum_{\ell \in \mathbb Z^d_q } G (a/q, \ell ) e_q ( y \cdot \ell ) = e_q (\lvert y\rvert ^2 a), \qquad y \in \mathbb Z ^{d}_q . \end{equation} Define \begin{equation} \label{e:m} m ^{a/q} (\xi ) = e _q (-\lambda ^2 a) \sum_{ \ell \in \mathbb Z^d_q} G (a/q, \ell ) m (\xi - \ell /q), \qquad a \in \mathbb Z _q ^{\times }. \end{equation} By \eqref{e:G2}, we have \begin{align*} \widecheck {m ^{a/q}} (n) & = \int _{\mathbb T ^{d}} m ^{a/q} (\xi ) e (- \xi \cdot n) \; d \xi \\ & = e_q (-\lambda ^2 a) \int _{\mathbb T ^{d}} \sum_{ \ell \in \mathbb Z^d_q} G (a/q, \ell ) m (\xi - \ell /q) e (- \xi \cdot n) \; d \xi \\ &= e_q ( (\lvert n \rvert ^2 -\lambda ^2 ) a) \widecheck m (n) \end{align*} Take the absolute value, and sum over $ q\in \mathbb Z ^{\times }_q$ to conclude the Proposition. \end{proof} \section{Proof}\label{s:proof} It suffices to show this. For $ f = \mathbf 1_{F} \subset E = [0, \lambda ] ^{d}\cap \mathbb Z ^{d}$, choices of $ 0< \epsilon <1$, and integers $ N$ we can write \begin{gather}\label{e:Absorb} A _{\lambda } f \leq M_1 + M_2 , \\ \label{e:xM1} \textup{where} \quad \langle M_1 \rangle _{E, \infty } \lesssim N ^{2} \langle f \rangle _{E} \\ \label{e:xM2} \textup{and} \quad \langle M_2 \rangle _{E,2} \lesssim _{\epsilon } N ^{ \epsilon + \frac{3-d}2} \sigma_ {- 1/2 } (\lambda ^2 ) \cdot \langle f \rangle_E ^{1/2} . \end{gather} Above, $ \sigma _ {- 1/2 } (\lambda ^2 )$ is the generalized sum of divisors function, in Proposition~\ref{p:rho}. A straight forward argument concludes the proof from here, by optimizing over $ N$. Indeed, for $ g = \mathbf 1_{G}$ with $ G \subset E$, we have for any integer $ N$, \begin{equation*} \lvert E\rvert ^{-1} \langle A _{\lambda } f, g \rangle \lesssim _{\epsilon } N ^2 \langle f \rangle_E \langle g \rangle_E + N ^{\epsilon +\frac{3-d}2 } \sigma_ {- 1/2 } (\lambda ^2 ) \bigl[ \langle f \rangle _{E} \langle g \rangle_E \bigr] ^{1/2} . \end{equation*} Minimizing over $ N$, we see that we should take \begin{equation*} N ^{\frac{d+1}2 - \epsilon } \simeq \sigma _{- 1/2 } (\lambda ^2 ) \bigl[ \langle f \rangle _{E} \langle g \rangle_E \bigr] ^{ - \frac{1}2}. \end{equation*} With this choice of $ N$, we see that \begin{equation*} \lvert E\rvert ^{-1} \langle A _{\lambda } f, g \rangle \lesssim _{\epsilon } \sigma _ { - 1/2 }(\lambda ^2 ) ^{\frac{4} {d+1} + \epsilon '} \bigl[ \langle f \rangle _{E} \langle g \rangle_E \bigr] ^{ \frac{d-1} {d+1} + \epsilon '}. \end{equation*} Above, $ \epsilon ' = \epsilon ' (\epsilon )$ tends to zero as $ \epsilon $ does. This is a restricted weak type inequality. Interpolation with the obvious $ \ell ^2 $ bound completes the proof of our Theorem. We remark that this gives a concrete estimate of the dependence on $ \lambda ^2 $. We have \begin{equation*} \sigma _{- 1/2 } (n) \leq \prod _{j=1} ^{\omega (n )} (1 - \tfrac1{ \sqrt{p_j} }) ^{-1} \lesssim e ^{c \frac{\sqrt {\omega (n)}} {\log \omega (n)}}, \end{equation*} where $ 2= p_1 < p_2 < \cdots $ is the increasing ordering of the primes. This is at most a constant depending upon $ \omega (n)$, the number of distinct prime factors of $ n$. We turn to the construction of $ M_1$ and $ M_2$. If $ \lambda \leq N$, we set $ M_1 = A _{\lambda } f$. Since we normalize the spherical averages by $ \lambda ^{d-2}$, \eqref{e:xM1} is immediate. Proceed under the assumption that $ N < \lambda $, and write $ A _{\lambda } = C _{\lambda } + R_{\lambda }$, with $ c _{\lambda }$ defined in \eqref{e:a1}. The first contribution to $ M_2$ is $ M _{2,1} = R _{\lambda } f$. By Lemma~\ref{l:Akos}, this satisfies \eqref{e:xM2}. (We do not need the arithmetic function $ \sigma _ {-1/2} (\lambda ^2 )$ in this case.) Turn to $ C _{\lambda }$. The second contribution to $ M_2$ is the `large $ q$' term \begin{equation} \label{e:M22} M _{2,2} = \sum_{N\leq q \leq \lambda } C _{\lambda ,q} f . \end{equation} By the Weil estimates for Kloosterman sums \eqref{e:K<}, and Plancherel, we have \begin{align*} \langle M _{2,2} \rangle _{E,2}& \lesssim _{\epsilon } \langle f \rangle _{E} ^{1/2} \sum_{N\leq q \leq \lambda } q ^{ \frac{1-d}2 + \epsilon } \rho (q, \lambda ) \lesssim _{\epsilon } \langle f \rangle _E ^{1/2} N ^{\epsilon + \frac{3-d}2} \sigma _{-1/2} (\lambda ^2 ) . \end{align*} The last estimate uses \eqref{e:rho}. Turn to the `small $ q$' term. This requires additional contributions to the $ M_1$ and $ M_2$ terms. Write $ c _{\lambda , q} = c _{\lambda , q} ^{1} + c _{\lambda , q} ^2 $, where \begin{equation}\label{e:xcq1} c _{\lambda , q} ^1 (\xi ) = \sum_{\ell \in \mathbb Z_q^d} K (\lambda , q, \ell ) \Phi _{ \lambda q/N} (\xi - \ell /q) \widetilde {d \sigma _{\lambda }} ( \xi - \ell /q). \end{equation} We have inserted an additional cutoff term $\Phi _{ \lambda q/N} $ above. Then, our third contribution to $ M _{2}$ is the high frequency term $ M _{2,3} = \sum_{q \leq N } C _{\lambda ,q} ^{2}f$. Using the stationary decay estimate \eqref{e:stationary} and the Kloosterman refinement \eqref{e:K<} to see that \begin{align*} \langle M _{2,3} \rangle _{E,2} & \lesssim _{\epsilon } \langle f \rangle_E ^{1/2} \sum_{q\leq N} (q/N) ^{ \frac{d-1}2} q ^{\epsilon +\frac{1-d}2} \rho (\lambda ^2 , q) \\ & \lesssim _{\epsilon } \langle f \rangle_E ^{1/2} N ^{\frac{1-d}2} \sum_{q\leq N} q ^{\epsilon } \rho (\lambda ^2 ,q) \lesssim _{\epsilon }\langle f \rangle_E ^{1/2} N ^{\epsilon + \frac{3-d}2} \sigma _{- 1/2 } (\lambda ^2 ) . \end{align*} The last estimate follows from \eqref{e:rho2}. Then the main point is the last contributions to $ M _{1}$ below. The definition of $ M _{1,2}$ is of the form to which \eqref{p:mq} applies. \begin{align} M _{1,2}(n) & \leq \sum_{q \leq N} q \cdot \widecheck \Phi _{ \lambda q/N} \ast d \sigma _{\lambda } \ast f (n) \\ &\lesssim N \langle f \rangle _{E} \sum_{q\leq N} 1 \lesssim N ^2 \langle f \rangle_E. \end{align} Observe that $\Phi _{ \lambda q/N} \ast d \sigma _{\lambda } \ast f $ is an average of $ f$ over an annulus of radius $ \lambda $, and width $ \lambda q/N$. This is compared to $ \langle f \rangle_E$, with loss of $ N/q$. Our proof of \eqref{e:xM1} and \eqref{e:xM2} is complete. \section{Complements to the Main Theorems} \label{s:complements} Concerning sharpness of the $ \ell ^{p}$ improving estimates in Theorem~\ref{t:improve}, the best counterexample we have been able to find shows that if one has the inequality below, \begin{equation} \label{e:assume} \lVert A _{\lambda } f\rVert _{p'} \lesssim \lambda ^{ d (1- \frac{2}p)} \lVert f\rVert _{p}, \end{equation} valid for all $ \lambda $, then necessarily $ p \geq \frac{d+2}{d}$. provided $ d \geq 5$. Indeed, take $ \lambda ^2 $ to be odd, and let $ f $ be the indicator of the sphere of radius $ \lambda $. Use the fact that $ A _{\lambda } f (0) \simeq 1$. But, in the case of $ d\geq 5$, also take $ g$ to be the indicator of the set $ G_ \lambda = \{ A _{\lambda } f > c/ \lambda \}$, for appropriate choice of constant $ c$. That is, $ G_ \lambda $ is the set of $ x$'s for which $ \mathbb S _{\lambda } \cap x+ \mathbb S _{\lambda }$ has about the expected cardinality of $ \lambda ^{d-3}$. We claim that $ \lvert G_ \lambda \rvert \gtrsim \lambda $. For an choice of $ 0< x_1 < \lambda /2$ divisible by $ 4$, note that there are about $ \lambda ^{d-3}$ points $ (x_2 ,\dotsc, x _{d}) \in \mathbb Z ^{d-1}$ of magnitude $ \sqrt {\lambda ^2 - (x_1/2) ^2 }$. From this, we see that \begin{equation*} \lVert (x_1 , 0,\dotsc, 0) - (x_1/2, y_2 ,\dotsc, y_d)\rVert = \lambda . \end{equation*} That is, $ (x_1, 0 ,\dotsc, 0) \in G_ \lambda $. We also have an upper bound for $ G$. Apply the $ \ell ^{p}$ improving inequality \eqref{e:improve} to $ f = \mathbf 1_{S _{\lambda }}$ to see that for $ 0< \epsilon < 1$, \begin{equation} \label{e:generic} \lvert G \rvert = \lvert \{ A _{\lambda } \mathbf 1_{\mathbb S _{\lambda }} > c / \lambda \}\rvert \lesssim \lambda ^{ \frac{d+3} {2} + \epsilon }, \qquad \lambda^2 \in \mathbb{N}. \end{equation} Is this estimate sharp? Notice that this estimate concerns the set of solutions $ n$ to a \emph{pair of} quadratic equations below in which $ x = (x_1 ,\dotsc, x_d)$ is fixed. \begin{align*} n_1 ^2 + \cdots + n _d ^2 &= \lambda ^2 , \\ (n_1 -x_1) ^2 + \cdots + (n _d -x_d) ^2 &= \lambda ^2 , \end{align*} Moreover, we require of $ x$ that the set of possible solutions $ n$ should be of about the expected cardinality. We could not find this estimate in the literature. \begin{bibdiv} \begin{biblist} \bib{MR3819049}{article}{ author={Anderson, Theresa}, author={Cook, Brian}, author={Hughes, Kevin}, author={Kumchev, Angel}, title={Improved $\ell^p$-boundedness for integral $k$-spherical maximal functions}, journal={Discrete Anal.}, date={2018}, pages={Paper No. 10, 18}, issn={2397-3129}, review={\MR{3819049}}, } \bib{MR812567}{article}{ author={Bourgain, Jean}, title={Estimations de certaines fonctions maximales}, date={1985}, ISSN={0249-6291}, journal={C. R. Acad. Sci. Paris S\'er. I Math.}, volume={301}, number={10}, pages={499\ndash 502}, review={\MR{812567}}, } \bib{MR1654767}{article}{ author={Christ, Michael}, title={Convolution, curvature, and combinatorics: a case study}, date={1998}, ISSN={1073-7928}, journal={Internat. Math. Res. Notices}, number={19}, pages={1033\ndash 1048}, url={http://dx.doi.org.prx.library.gatech.edu/10.1155/S1073792898000610}, review={\MR{1654767}}, } \bib{MR3960006}{article}{ author={Cook, Brian}, title={Maximal function inequalities and a theorem of Birch}, journal={Israel J. Math.}, volume={231}, date={2019}, number={1}, pages={211--241}, issn={0021-2172}, review={\MR{3960006}}, doi={10.1007/s11856-019-1853-y}, } \bib{MR3892403}{article}{ author={Culiuc, Amalia}, author={Kesler, Robert}, author={Lacey, Michael T.}, title={Sparse bounds for the discrete cubic Hilbert transform}, journal={Anal. PDE}, volume={12}, date={2019}, number={5}, pages={1259--1272}, issn={2157-5045}, review={\MR{3892403}}, doi={10.2140/apde.2019.12.1259}, } \bib{MR3671577}{article}{ author={Hughes, Kevin}, title={Restricted weak-type endpoint estimates for k-spherical maximal functions}, journal={Math. Z.}, volume={286}, date={2017}, number={3-4}, pages={1303--1321}, issn={0025-5874}, review={\MR{3671577}}, doi={10.1007/s00209-016-1802-y}, } \bib{160904313}{article}{ author={{Hughes}, K.}, title={{The discrete spherical averages over a family of sparse sequences}}, date={2016-09}, journal={ArXiv e-prints}, eprint={1609.04313}, } \bib{180409260H}{article}{ author={{Hughes}, K.}, title={{$\ell^p$-improving for discrete spherical averages}}, date={2018-04}, journal={ArXiv e-prints}, eprint={1804.09260}, } \bib{MR2053347}{article}{ author={Ionescu, Alexandru~D.}, title={An endpoint estimate for the discrete spherical maximal function}, date={2004}, ISSN={0002-9939}, journal={Proc. Amer. Math. Soc.}, volume={132}, number={5}, pages={1411\ndash 1417}, url={https://doi-org.prx.library.gatech.edu/10.1090/S0002-9939-03-07207-1}, review={\MR{2053347}}, } \bib{I}{article}{ author={Ionescu, Alexandru~D.}, title={An endpoint estimate for the discrete spherical maximal function}, date={2004}, ISSN={0002-9939}, journal={Proc. Amer. Math. Soc.}, volume={132}, number={5}, pages={1411\ndash 1417}, url={https://doi-org.prx.library.gatech.edu/10.1090/S0002-9939-03-07207-1}, review={\MR{2053347}}, } \bib{181002240}{article}{ author={{Kesler}, R.}, author={{Lacey}, M.~T.}, author={{Mena Arias}, D.}, title={{Sparse Bound for the Discrete Spherical Maximal Functions}}, date={2018-10}, journal={Pure Appl. Analy., to appear}, eprint={1810.02240}, } \bib{MR3933540}{article}{ author={Kesler, Robert}, author={Arias, Dar\'{\i}o Mena}, title={Uniform sparse bounds for discrete quadratic phase Hilbert transforms}, journal={Anal. Math. Phys.}, volume={9}, date={2019}, number={1}, pages={263--274}, issn={1664-2368}, review={\MR{3933540}}, doi={10.1007/s13324-017-0195-3}, } \bib{MR1555249}{article}{ author={Kloosterman, H.~D.}, title={On the representation of numbers in the form {$ax^2+by^2+cz^2+dt^2$}}, date={1927}, ISSN={0001-5962}, journal={Acta Math.}, volume={49}, number={3-4}, pages={407\ndash 464}, url={https://doi-org.prx.library.gatech.edu/10.1007/BF02564120}, review={\MR{1555249}}, } \bib{MR0358443}{article}{ author={Littman, Walter}, title={{$L\sp{p}-L\sp{q}$}-estimates for singular integral operators arising from hyperbolic equations}, date={1973}, pages={479\ndash 481}, review={\MR{0358443}}, } \bib{MR1888798}{article}{ author={Magyar, A.}, author={Stein, E.~M.}, author={Wainger, S.}, title={Discrete analogues in harmonic analysis: spherical averages}, date={2002}, ISSN={0003-486X}, journal={Ann. of Math. (2)}, volume={155}, number={1}, pages={189\ndash 208}, url={https://doi-org.prx.library.gatech.edu/10.2307/3062154}, review={\MR{1888798}}, } \bib{MR1617657}{article}{ author={Magyar, Akos}, title={{$L^p$}-bounds for spherical maximal operators on {$\bold Z^n$}}, date={1997}, ISSN={0213-2230}, journal={Rev. Mat. Iberoamericana}, volume={13}, number={2}, pages={307\ndash 317}, url={https://doi-org.prx.library.gatech.edu/10.4171/RMI/222}, review={\MR{1617657}}, } \bib{MR1925339}{article}{ author={Magyar, Akos}, title={Diophantine equations and ergodic theorems}, date={2002}, ISSN={0002-9327}, journal={Amer. J. Math.}, volume={124}, number={5}, pages={921\ndash 953}, url={http://muse.jhu.edu.prx.library.gatech.edu/journals/american_journal_of_mathematics/v124/124.5magyar.pdf}, review={\MR{1925339}}, } \bib{MR2287111}{article}{ author={Magyar, Akos}, title={On the distribution of lattice points on spheres and level surfaces of polynomials}, date={2007}, ISSN={0022-314X}, journal={J. Number Theory}, volume={122}, number={1}, pages={69\ndash 83}, url={https://doi-org.prx.library.gatech.edu/10.1016/j.jnt.2006.03.006}, review={\MR{2287111}}, } \bib{MR2346547}{article}{ author={Magyar, Akos}, author={Stein, Elias~M.}, author={Wainger, Stephen}, title={Maximal operators associated to discrete subgroups of nilpotent {L}ie groups}, date={2007}, ISSN={0021-7670}, journal={J. Anal. Math.}, volume={101}, pages={257\ndash 312}, url={https://doi-org.prx.library.gatech.edu/10.1007/s11854-007-0010-4}, review={\MR{2346547}}, } \bib{MR1825254}{article}{ author={Oberlin, Daniel~M.}, title={Two discrete fractional integrals}, date={2001}, ISSN={1073-2780}, journal={Math. Res. Lett.}, volume={8}, number={1-2}, pages={1\ndash 6}, url={https://doi-org.prx.library.gatech.edu/10.4310/MRL.2001.v8.n1.a1}, review={\MR{1825254}}, } \bib{MR2872554}{article}{ author={Pierce, Lillian~B.}, title={Discrete fractional {R}adon transforms and quadratic forms}, date={2012}, ISSN={0012-7094}, journal={Duke Math. J.}, volume={161}, number={1}, pages={69\ndash 106}, url={https://doi-org.prx.library.gatech.edu/10.1215/00127094-1507288}, review={\MR{2872554}}, } \bib{MR1771530}{article}{ author={Stein, E.~M.}, author={Wainger, S.}, title={Discrete analogues in harmonic analysis. {II}. {F}ractional integration}, date={2000}, ISSN={0021-7670}, journal={J. Anal. Math.}, volume={80}, pages={335\ndash 355}, url={https://doi-org.prx.library.gatech.edu/10.1007/BF02791541}, review={\MR{1771530}}, } \bib{MR0420116}{article}{ author={Stein, Elias~M.}, title={Maximal functions. {I}. {S}pherical means}, date={1976}, ISSN={0027-8424}, journal={Proc. Nat. Acad. Sci. U.S.A.}, volume={73}, number={7}, pages={2174\ndash 2175}, review={\MR{0420116}}, } \bib{MR1945293}{article}{ author={Stein, Elias~M.}, author={Wainger, Stephen}, title={Two discrete fractional integral operators revisited}, date={2002}, ISSN={0021-7670}, journal={J. Anal. Math.}, volume={87}, pages={451\ndash 479}, url={https://doi-org.prx.library.gatech.edu/10.1007/BF02868485}, note={Dedicated to the memory of Thomas H. Wolff}, review={\MR{1945293}}, } \bib{MR0256219}{article}{ author={Strichartz, Robert~S.}, title={Convolutions with kernels having singularities on a sphere}, date={1970}, ISSN={0002-9947}, journal={Trans. Amer. Math. Soc.}, volume={148}, pages={461\ndash 471}, url={http://dx.doi.org.prx.library.gatech.edu/10.2307/1995383}, review={\MR{0256219}}, } \bib{MR1969206}{article}{ author={Tao, Terence}, author={Wright, James}, title={{$L\sp p$} improving bounds for averages along curves}, date={2003}, ISSN={0894-0347}, journal={J. Amer. Math. Soc.}, volume={16}, number={3}, pages={605\ndash 638}, url={http://dx.doi.org.prx.library.gatech.edu/10.1090/S0894-0347-03-00420-X}, review={\MR{1969206}}, } \bib{MR0027006}{article}{ author={Weil, Andr\'e}, title={On some exponential sums}, date={1948}, ISSN={0027-8424}, journal={Proc. Nat. Acad. Sci. U. S. A.}, volume={34}, pages={204\ndash 207}, review={\MR{0027006}}, } \end{biblist} \end{bibdiv} \end{document}
\begin{document} \title{Rado Numbers of Regular Nonhomogeneous Equations} \author{Thotsaporn ``Aek'' Thanatipanonda\\ Mahidol University International College\\ Nakhon Pathom, Thailand} \date{August 28, 2019} \maketitle \thispagestyle{empty} \begin{abstract} We consider Rado numbers of the regular equations $\mathcal{E}(b)$ of the form \[ c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k + b, \] where $b \in \mathbb{Z}$ and $c_i \in \mathbb{Z}^{+}$ for all $i$. We give the upper bounds and the sufficient condition for the lower bounds for $t$-color Rado numbers $r(\mathcal{E}(b);t)$ in terms of $r(\mathcal{E}(0);t)$ for both $b>0$ and $b<0$. We also give examples where the exact values of Rado numbers are obtained from these results. \end{abstract} \section{Introduction} In 1916 Issai Schur \cite{Schur} showed that for any $t$ colors, $t \geq 1$, there is a least positive integer $s(t)$ such that for any $t$-coloring on the interval $[1,s(t)]$, there must be a monochromatic solution to $x+y=z$ where $x,y$ and $z$ are positions on the interval. This result is part of Ramsey Theory. The numbers $s(t)$ are called \textit{Schur numbers}. For example $s(2) =5$ and the longest possible interval that avoids the mono solution to $x+y=z$ is $[1,2,2,1]$ (1 represents red color and 2 represents blue color, for example). For 3 colors, $s(3)=14$ and one of the longest interval that avoids the mono solution to $x+y=z$ is $[1, 2, 2, 1, 3, 3, 3, 3, 3, 1, 2, 2, 1]$. It is also known that $s(4)=45$ and $s(5)=161.$ We call the equation \textit{$t$-regular} if $s(t)$ exists for a given $t$ and \textit{regular} if $s(t)$ exists for all $t, \;\ t \geq 1$. Later on, Richard Rado, a Ph.D. student of Schur, generalized Schur's work to a linear homogeneous equation $\sum_{i=1}^k c_ix_i =0$ and found the condition for regularity of these equations, \cite{Rado1,Rado2}. \begin{thm}[Rado's Single Equation Theorem] Let $k \geq 2.$ Let $c_i \in \mathbb{Z}-\{0\}, 1 \leq i \leq k,$ be constants. Then \[ \sum_{i=1}^k c_ix_i =0\] is regular if and only if there exists a nonempty set $D \subseteq \{c_i, \;\ 1 \leq i \leq k \}$ such that $\sum_{d \in D} d =0$. \end{thm} As with Schur numbers, for a linear equation $\mathcal{E}$, we denote by $r(\mathcal{E};t)$ the minimal integers, if it exists, such that any $t$-coloring of $[1,r(\mathcal{E};t)]$ must admit a monochromatic solution to $\mathcal{E}.$ The numbers $r(\mathcal{E};t)$ are called \textit{$t$-color Rado numbers for equation $\mathcal{E}$.} An analog to Rado's Theorem which gives the regularity condition for a linear non-homogeneous equation is given below. \begin{thm} \label{9.10} Let $k \geq 2$ and let $b, c_1, c_2, \dots, c_k$ be nonzero integers. Let $\mathcal{E}(b)$ be the equation \[ \sum_{i=1}^k c_ix_i = b,\] and let $s = \sum_{i=1}^k c_i.$ Then $\mathcal{E}(b)$ is regular if and only if one of the following conditions holds: \begin{enumerate}[(i)] \item $\dfrac{b}{s} \in \mathbb{Z^{+}};$ \item $\dfrac{b}{s}$ is a negative integer and $\mathcal{E}(0)$ is regular. \end{enumerate} \end{thm} We note that it is possible that an equation does not have a mono solution for a coloring on $\mathbb{Z^+}.$ For example, the coloring $[1,2,1,2,1,2,\dots]$ avoids the mono solution to the equation $x+y=2b+1$ for any $b \geq 0.$ Also some equations are $t$-regular but not regular. For example, $3x+y-z=2$ is 2-regular with $r(\mathcal{E}; 2)= 8$ but not regular according to Theorem \ref{9.10}. In this paper, we partially quantify Theorem \ref{9.10} by giving Rado numbers to equations $\mathcal{E}(\tilde{b})$ of the form \begin{equation} \label{main} c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k +\tilde{b}, \end{equation} where $c_i \in \mathbb{Z}^{+}$ for all $i$ and $\tilde{b}$ satisfies the condition $(i)$ or $(ii)$ of Theorem \ref{9.10}. The Rado numbers of \eqref{main} will be written in term of the Rado numbers of the corresponding homogeneous equation, $\mathcal{E}(0).$ In order to distinguish the Rado numbers of the homogeneous equation from those of the non-homogeneous one, we denote by $R_C(t) = R_{[c_1,c_2,\dots,c_{k-1}]}(t)$ the Rado number of the homogeneous equation, $\mathcal{E}(0),$ with $t$ colors. \section{Main Results; case $\tilde{b} < 0$} We consider the Rado numbers of \eqref{main} where the constant $\tilde{b}$ is negative. Theorem \ref{up1} gives the upper bounds and Theorem \ref{low1} gives a sufficient condition for the lower bounds. \begin{thm} \label{up1} Consider equation $\mathcal{E}(\tilde{b}) = \mathcal{E}(-b)$ of the form \[ c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k - b, \;\ \;\ c_i > 0,\, b > 0. \] Let $s = \sum_{i=1}^{k-1} c_i-1$. If $s|b$ and $\mathcal{E}(0)$ is $t$-regular then \[ r(\mathcal{E}(-b); t) \leq \left(\frac{b}{s}+1\right)\cdot R_C(t) -\frac{b}{s}.\] \end{thm} \begin{proof} Assume $s|b$ and $\mathcal{E}(0)$ is $t$-regular. Let $r = (\frac{b}{s}+1)\cdot R_C(t) -\frac{b}{s}.$ We now show that there is no good coloring on the interval $[1,r].$ Define an injective map $f$ from $[1,R_C(t)]$ to $[1,r]$ by \[ f(w) = \left(\frac{b}{s}+1\right)\cdot w -\frac{b}{s}. \] Notice that the $k$-tuple $(w_1, w_2, \dots, w_{k-1}, \sum_{i=1}^{k-1}c_iw_i)$ of the equation \[c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k\] is made to correspond to the $k$-tuple $ (f(w_1), f(w_2), \dots, f(w_{k-1}), f(\sum_{i=1}^{k-1}c_iw_i))$ in \[c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k - b.\] Please check it yourself!!! Now given any coloring $\alpha$ on $[1,r]$, we define the coloring $\chi$ on the interval $[1,R_C(t)]$ by \[ \chi(w) := \alpha( f(w) ) , \;\ \;\ w = 1,2,\dots, R_C(t). \] From the definition of the Rado number, any coloring on $[1,R_C(t)]$ must contain a mono tuple to $\mathcal{E}(0)$. Hence there is also a mono tuple on $[1, r]$ to $\mathcal{E}(-b)$. \end{proof} Next we define a sufficient condition for the lower bounds. \begin{defi}[excellence condition] The coloring on an interval $[1,n]$ satisfies an excellence condition if it does not contain any mono solution to \[c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1}+j = x_k,\] for each $j, \;\ 0 \leq j \leq s= \sum_{i=1}^{k-1} c_i-1$. \end{defi} \begin{thm} \label{low1} Consider the equation $\mathcal{E}(\tilde{b})=\mathcal{E}(-b)$ of the form \begin{equation} \label{negb} c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k - b, \;\ \;\ \text{ where } c_i > 0,\, b > 0. \end{equation} Let $s = \sum_{i=1}^{k-1} c_i-1$. If $s|b$ and there is a coloring on the interval $[1,n]$ which satisfies an excellence condition then \[ r(\mathcal{E}(-b); t) \geq \left(\dfrac{b}{s} +1\right)\cdot n +1.\] \end{thm} \begin{proof} Assume $s|b$ and let $\chi$ be the coloring on $[1, n]$ that satisfies an excellence condition to the equation \[c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} + j = x_k, \;\ \;\ 0 \leq j \leq s.\] Let $r= (\frac{b}{s}+1)\cdot n +1.$ We show that there is a ``good coloring'' to $\mathcal{E}(-b)$ on the interval $[1, r-1] = [ 1, \left(\frac{b}{s}+1\right)\cdot n ].$ We define the coloring $\alpha$ on $[1, \left(\frac{b}{s}+1\right)\cdot n ]$ by \[ \alpha(i) = \chi \left( \left\lceil \frac{i}{\frac{b}{s}+1} \right\rceil \right) . \] Basically, we create the coloring by repeating each point of the original coloring on the $[1,n]$ interval $\frac{b}{s}+1$ times. We now prove the statement by contradiction: Assume there is a mono $k$-tuple on $[1, \left(\frac{b}{s}+1\right)\cdot n ]$ to equation \eqref{negb} written in the form \[ \left(d_1 \left(\frac{b}{s}+1\right)-e_1, d_2 \left(\frac{b}{s}+1\right)-e_2, \dots, d_{k-1} \left(\frac{b}{s}+1\right)-e_{k-1}, \left(\frac{b}{s}+1\right)\cdot \sum_{i=1}^{k-1}c_id_i -\sum_{i=1}^{k-1} c_ie_i +b \right) , \] where $ 1 \leq d_i \leq n $ for all $i$ and $0 \leq e_i \leq b/s.$ Notice that $\alpha( d_i(\frac{b}{s}+1)-e_i) = \chi(d_i).$ However, by this mapping, we have the mono $k$-tuple in $\chi$ as \[ \left( d_1, d_2, \dots, d_{k-1}, \sum_{i=1}^{k-1} c_id_i + \left\lceil \frac{b-\sum_{i=1}^{k-1} c_ie_i}{\dfrac{b}{s}+1} \right\rceil \right) \] But this is a mono solution to \[ c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} + j= x_k, \] for some $j, \;\ 0 \leq j \leq \left\lceil \frac{sb}{b+s}\right\rceil$ which contradicts the excellence condition of $\chi$ we assumed it to have. \end{proof} We note that the upper bounds and lower bounds meet if there is a good coloring of length $n=R_C(t)-1$ that satisfies the excellence condition. \begin{cor} \label{cor1} Consider the equation $\mathcal{E}(-b),$ \[ x_1+x_2+\dots+ x_{k-1} = x_k- b , \;\ \;\ \text{with } k \geq 2, \;\ b >0 \text{ and } (k-2)|b. \] We let $m= b/(k-2).$ Then \[ r(\mathcal{E}(-b);2) = (m+1)(k^2-k-2)+1. \] \end{cor} \begin{proof} It is known (i.e. Theorem 8.23 of \cite{LR}) that \[ r(x_1+x_2+\dots+ x_{k-1} = x_k ;2) = k^2-k-1, \;\ \;\ \text{ for } k\geq 2. \] The coloring $\chi = [1^{k-2},2^{(k-1)(k-2)},1^{k-2}]$ satisfies the excellence condition for each $k$. The result follows from Theorems \ref{up1} and \ref{low1}. \end{proof} This result agrees with Theorems 9.14 and 9.26 of \cite{LR} which applies to any 2-coloring but for a more general $b$ (not only $(k-2)|b$). However, our result applies to any $t$-coloring. \begin{cor} For $m > 0,$ \begin{align*} r(x+y-z = -m ;3) &= 13m+14, \\ r(x+y+z-w = -2m ;3) &= 42m+43, \\ r(x_1+x_2+x_3+x_4-x_5 = -3m ;3) &= 93m+94, \\ r(x_1+x_2+x_3+x_4+x_5-x_6 = -4m ;3) &= 172m+173. \end{align*} \end{cor} The first result was also mentioned in \cite{RS} and \cite{Schaal2}. The good colorings (that also satisfy the excellence condition) of the first two equations can be found by the accompanying program \texttt{Schaal}. The good colorings (that also satisfy the excellence condition) of the equations $x_1+x_2+x_3+x_4=x_5$ and $x_1+x_2+x_3+x_4+x_5=x_6$ were given in \cite{Schaal}. \section{Main Results; case $\tilde{b} > 0$} We consider the Rado numbers of \eqref{main} where the constant $\tilde{b}$ is positive. Theorem \ref{up2} gives the upper bounds and Theorem \ref{low2} gives a sufficient condition for the lower bounds. \begin{thm} \label{up2} Consider the equation $\mathcal{E}(\tilde{b})=\mathcal{E}(b)$ of the form \[ c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k + b, \;\ \;\ \text{ where } c_i > 0, \;\ b > 0. \] Let $s = \sum_{i=1}^{k-1} c_i-1$. If $s|b$ and $\mathcal{E}(0)$ is $t$-regular then \[ r(\mathcal{E}(b); t) \leq \dfrac{b}{s}-\left\lceil\dfrac{b}{s \cdot R_C(t)}\right\rceil+1.\] \end{thm} \begin{proof} Assume $s|b$ and $\mathcal{E}(0)$ is $t$-regular. Let $r = \dfrac{b}{s}-\left\lceil\dfrac{b}{s \cdot R_C(t)}\right\rceil+1.$ We write $b$ as $b = s\left( R_C(t) \cdot m - q \right)$ where $ m = \left\lceil \dfrac{b}{s\cdot R_C(t)} \right\rceil$ and $0 \leq q \leq R_C(t)-1$. Then $r = (R_C(t)-1)\cdot m -q +1$. We show that there is no good coloring on the interval $[1,r].$ \textbf{Case 1:} $m=1$.\\ Then $r=b/s.$ We have a trivial mono solution to $\mathcal{E}(b)$ via $x_1=x_2=x_3=\dots=x_k=r.$ \textbf{Case 2:} $m>1$.\\ Define an injective map $f$ from $[1,R_C(t)]$ to $[1,r]$ by \[ f(w) = (R_C(t)-w)\cdot m -q +w. \] Notice that a tuple $(w_1, w_2, \dots, w_{k-1}, \sum_{i=1}^{k-1}c_iw_i)$ of the equation \[c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k\] is made to correspond to the tuple $ (f(w_1), f(w_2), \dots, f(w_{k-1}), f(\sum_{i=1}^{k-1}c_iw_i))$ in \[c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k + b.\] Please check it yourself!!! Now, given any coloring of $\alpha$ on $[1,r]$, we define the coloring $\chi$ on the interval $[1,R_C(t)]$ by \[ \chi(w) = \alpha( f(w) ) , \;\ \;\ w = 1,2,\dots, R_C(t). \] From the definition of the Rado number, any coloring on $[1,R_C(t)]$ must contain a mono tuple to $\mathcal{E}(0)$. Hence there is also a mono tuple on $[1, r]$ to $\mathcal{E}(b)$. In both cases, there is no good coloring on $[1,r].$ \end{proof} The lower bounds can be stated in similar way to Theorem \ref{low1}. \begin{thm} \label{low2} Consider the equation $\mathcal{E}(\tilde{b})=\mathcal{E}(b)$ of the form \begin{equation} \label{posb} c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k + b, \;\ \;\ \text{ where } c_i > 0, \;\ b > 0. \end{equation} Let $s = \sum_{i=1}^{k-1} c_i-1$. If $s|b$ and there is a coloring on the interval $[1,n]$ which satisfies the excellence condition then \[ r(\mathcal{E}(b); k) \geq \dfrac{b}{s}-\left\lceil\dfrac{b}{s \cdot (n+1)}\right\rceil+1.\] \end{thm} \begin{proof} We invoke the result of Theorem \ref{low1} by rewriting \eqref{posb} in the form of \eqref{negb}. Since $s|b,$ we can write $b$ in the form $b =s\left[ (n+1)m-q \right]$ where $m=\left\lceil \dfrac{b}{s \cdot (n+1)} \right\rceil$ and $0 \leq q \leq n.$ Then $r= \dfrac{b}{s}-\left\lceil\dfrac{b}{s \cdot (n+1)}\right\rceil+1 = (n+1)m-q-m+1 = nm-q+1.$ We show that there is a ``good coloring'' on the interval $[1, r-1] = [ 1, nm-q ]$ to \eqref{posb}. First we rewrite \eqref{posb} as \[ c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k + s\left[ (n+1)m-q \right]. \] We then rewrite this equation again as \begin{align*} & c_1\left[(n+1)m-q-x_1\right]+c_2\left[(n+1)m-q-x_2\right]+\dots + c_{k-1}\left[(n+1)m-q-x_{k-1}\right] \\ &= (n+1)m-q-x_k. \end{align*} Next we add $-s(m-1)$ on both sides of the equation, \begin{align*} & c_1\left[nm-q+1-x_1\right]+c_2\left[nm-q+1-x_2\right]+\dots + c_{k-1}\left[nm-q+1-x_{k-1}\right] \\ &= [nm-q+1-x_k ]-s(m-1). \end{align*} We let $x'_i = nm-q+1-x_i$ for each $i.$ The reader sees that $x'_i$ is $x_i$ after reversing the interval $[1, nm-q].$ The equation after substitution is \begin{equation} \label{last} c_1x'_1+c_2x'_2+\dots+ c_{k-1}x'_{k-1} = x'_k -s(m-1). \end{equation} The next step is clear. We invoke the result from Theorem \ref{low1} (!) that there is a good coloring $\alpha$ on the interval $[1,mn]$ to \eqref{last}. We can then make a good coloring to \eqref{posb} from this interval by taking the elements $1$ to $mn-q$ of $\alpha$ and reverse the interval. \end{proof} Below are some applications of Theorems \ref{up2} and \ref{low2}. \begin{cor} Consider the equation $\mathcal{E}(b)$ of the form \[ x_1+x_2+\dots+ x_{k-1} = x_k + b , \;\ \;\ \text{with } k \geq 2, \;\ b \geq 1 \text{ and } (k-2)|b. \] We let $m= b/(k-2).$ Then \[ r(\mathcal{E}(b);2) = m- \left\lceil \dfrac{m}{k^2-k-1} \right\rceil+1. \] \end{cor} \begin{proof} The proof is the same as for Corollary \ref{cor1} except that this time we apply Theorems \ref{up2} and \ref{low2}. \end{proof} Note that the above result when $k=3$ was mentioned in \cite{BL}. \begin{cor} For $b \geq 1$, \[ r(x+y-z=b; 3) = b-\left\lceil\frac{b}{14} \right\rceil +1. \] \end{cor} \begin{proof} The proof is straight forward. It can be checked that the original coloring $[1, 2, 2, 1, 3, 3, 3, 3, 3, 1, 2, 2, 1]$ satisfies the excellence condition. Then we apply Theorems \ref{up2} and \ref{low2}. \end{proof} This result was a part of Theorem 9.15 in \cite{LR}. Although it was wrongly claimed that $ r(x+y-z=b; 3) = b-\left\lceil\frac{b-1}{14} \right\rceil. $ For the situation when the equation $\mathcal{E}(0)$ given by \[ c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k, \;\ \;\ c_i > 0,\] is not $t$-regular, the trivial bounds of the Rado numbers to \[ c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k+b, \;\ \;\ b > 0, \, s|b,\] are \[ \left\lceil \dfrac{b+1}{s+1} \right\rceil \leq r(\mathcal{E}(b); t) \leq \dfrac{b}{s}, \;\ \text{ for any } t \geq 1.\] The mono solution for the upper bound arises from the tuple $(\frac{b}{s},\frac{b}{s},\dots,\frac{b}{s})$. \section{Final Remarks} So far, our results were obtained by checking the excellence condition of each good coloring. For the 2-coloring and 3-coloring, it seems that there are always colorings of length $n = R_C(t)-1$ to the equations \[ c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k, \;\ \;\ \text{ where } c_i > 0 \] that satisfy the excellence condition. Thus it makes sense to make the following conjecture. \begin{conj} For $t =2$ or $3$, fix constants $c_1, c_2, \dots, c_{k-1}.$ Consider the equation $\mathcal{E}(\tilde{b})$ of the form \[ c_1x_1+c_2x_2+\dots+ c_{k-1}x_{k-1} = x_k +\tilde{b}, \;\ \;\ \text{ where } c_i > 0. \] Let $s = \sum_{i=1}^{k-1} c_i-1$. If $s|\tilde{b}$ and $\mathcal{E}(0)$ is $t$-regular then \[ r(\mathcal{E}(\tilde{b}); t) = \begin{cases} \dfrac{\tilde{b}}{s}-\left\lceil\dfrac{\tilde{b}}{s \cdot R_C(t)}\right\rceil+1, & \text{ for } \tilde{b} >0, \\ -\dfrac{\tilde{b}}{s}\cdot (R_C(t)-1) +R_C(t) , & \text{for } \tilde{b}< 0. \end{cases}\] \end{conj} For $t$-colorings where $t \geq 4$, our Maple program is too slow to give any tangible observations. A faster program could used to verify whether this conjecture still holds. Lastly, the reader might wonder about the other type of equations that we did not consider, i.e. \[ \sum_{i=1}^{k-1}c_ix_i= c_kx_k+b, \;\ \;\ \text{where } c_i \geq 1 , \;\ \text{ for } 1 \leq i \leq k-1 \text{ and } c_k \geq 2. \] It turns out that the Rado numbers of these equations exhibit more complicated patterns from those discovered in this paper. \end{document}
\begin{document} \begin{abstract}Suppose $Y$ is a continuum, $x\in Y$, and $X$ is the union of all nowhere dense subcontinua of $Y$ containing $x$. Suppose further that there exists $y\in Y$ such that every connected subset of $X$ limiting to $y$ is dense in $X$. And, suppose $X$ is dense in $Y$. We prove $X$ is homeomorphic to a composant of an indecomposable continuum, even though $Y$ may be decomposable. An example establishing the latter was given by Christopher Mouron and Norberto Ordoñez in 2016. If $Y$ is chainable or, more generally, an inverse limit of identical topological graphs, then we show $Y$ is indecomposable and $X$ is a composant of $Y$. For homogeneous continua we explore similar problems which are related to a 2007 question of Janusz Prajs and Keith Whittington. \end{abstract} \begin{frontmatter} \title{Singularities of meager composants and filament composants} \author{David Sumner Lipham} \ead{[email protected]} \address{Department of Mathematics, Auburn University, Auburn, AL 36849} \begin{keyword}continuum; meager composant; filament composant; singular; strongly indecomposable; homogeneous \MSC[2010] 54F15, 54D35, 54H15 \end{keyword} \end{frontmatter} \section{Introduction}\label{s1} \subsection{Terminology} By a \textit{continuum} (plural form \textit{continua}) we shall mean a connected compact metrizable space with more than one point. A continuum $Y$ is \textit{decomposable} if there are two proper subcontinua $H,K\subsetneq Y$ such that $Y=H\cup K$. If $Y$ is not decomposable, then $Y$ is \textit{indecomposable}. We will say, more generally, that a connected space $X$ is \textit{indecomposable} if $X$ cannot be written as the union of two proper closed connected subsets. Equivalently, $X$ is indecomposable if $X$ is the only closed connected subset of $X$ with non-void interior \cite[\S48 V Theorem 2]{kur}. A connected space $X$ is \textit{strongly indecomposable} if for every two non-empty disjoint open sets $U$ and $V$ there are two disjoint closed sets $A$ and $B$ such that $X\setminus U=A\cup B$, $A\cap V\neq\varnothing$, and $B\cap V\neq\varnothing$. This term was introduced by the author in \cite{lip}. Strong indecomposability requires that the quasi-components of proper closed subsets of $X$ are nowhere dense in $X$, whereas indecomposability only requires that the connected components of proper closed subsets of $X$ are nowhere dense in $X$. Let $Y$ be a continuum and $x\in Y$. The \textit{composant} of $x$ in $Y$ is the union of all proper subcontinua of $Y$ containing $x$. Following \cite{mou}, the \textit{meager composant} of $x$ in $Y$ is the union of all nowhere dense subcontinua of $Y$ containing $x$. A subcontinuum $K$ of $Y$ is said to be \textit{filament} if there exists a neighborhood of $K$ in which the connected component of $K$ is nowhere dense \cite{pra}. The \textit{filament composant} of $x$ in $Y$ is the union of all filament subcontinua of $Y$ containing $x$. Given a connected subset $X$ of a continuum $Y$, and a point $y\in Y$, then $X$ is said to be \textit{singular with respect to $y$} if $\overline C=X$ for every connected $C\subseteq X$ with $y\in \doverline{{C}}$.\footnote{Whenever $Y$ is a space of which $X$ is a subspace, and $A\subseteq X$, then we write $\overline A$ for the closure of $A$ in $X$, and $\doverline{A}$ for the closure of $A$ in $Y$.} If there exists $y\in Y$ such that $X$ is singular with respect to $y$, then $X$ is \textit{singular} in $Y$. And $X$ is \textit{singular dense} in $Y$ if $X$ is both singular and dense in $Y$. This formulation is easily seen to be equivalent to the one in \cite{mou}. A subset $X$ of a continuum $Y$ is called a \textit{filament set} if each continuum in $X$ is a filament subcontinuum of $Y$. A continuum is \textit{filament additive} if the union of every two intersecting filament subcontinua is filament \cite{pra2}. This property implies the filament composants partition the continuum into pairwise disjoint sets. In homogeneous continua, filament additivity is equivalent to filament composants being filament sets \cite[Corollary 3.6]{pra2}.\footnote{A continuum is \textit{homogeneous} if for every two points $x$ and $y$ in the space there is a homeomorphism which maps the entire space onto itself, and maps $x$ to $y$.} A continuum $Y$ is \textit{filamentable} if there is a filament subcontinuum $L\subseteq Y$ such that $Y\setminus L$ is a filament set. \subsection{Motivation and Summary of Results}There is no difference among composants, meager composants, and filament composants in an indecomposable continuum; \cite[Exercise 6.19]{nad} and \cite[Proposition 1.9]{pra}. And if $Y$ is an indecomposable continuum, and $X$ is any composant of $Y$, then $X$ is singular with respect to each point of the dense $G_\delta$-set $Y\setminus X$ \cite[Theorems 11.15 \& 11.17]{nad}. The composant $X$ is also necessarily dense in $Y$ \cite[Theorem 5.4]{nad}. By contrast, an example in \cite[Section 5]{mou} shows the first two types of composants can differ quite dramatically inside of a decomposable continuum. There was constructed a plane continuum $\mathfrak Y$ with only one traditional composant versus uncountably many meager composants, each singular dense. \begin{figure} \caption{The ``bucket-handle'' $\mathfrak Z$ and the Mouron-Ordoñez continuum $\mathfrak Y$ \textit{(graphics extracted from \cite[\href{ https://www.sciencedirect.com/science/article/pii/S0166864116301675} \label{f1} \end{figure} That $\mathfrak Y$ has only one composant follows easily from the fact that $\mathfrak Y\cap ([0,3]\times [-1,1])$ is connected.\footnote{The continuum $\mathfrak Y\cap ([0,3]\times [-1,1])$ is known as the ``Cajun accordion'' \cite{rog}.} The (singular dense) meager composants of $\mathfrak Y$, on the other hand, are in one-to-one correspondence with the composants of the indecomposable ``bucket-handle'' continuum $\mathfrak Z$. There is a continuous surjection $\mathfrak f:\mathfrak Z\to \mathfrak Y$ witnessing this fact. Letting $\mathfrak X\subseteq \mathfrak Y$ be the image of the $\langle 0,-1\rangle$ endpoint composant of $\mathfrak Z$, we can see that $\mathfrak f\restriction (\mathfrak Z\setminus\mathfrak f^{-1}[\mathfrak X])$ is a homeomorphism (the sets $\mathfrak f^{-1} [\mathfrak X]\subseteq \mathfrak Z$ and $\mathfrak X\subseteq \mathfrak Y$ are indicated by the solid lines in Figure \ref{f1}). In particular, every meager composant of $\mathfrak Y$ other than $\mathfrak X$ is homeomorphic to a composant of $\mathfrak Z$.\footnote{And these non-endpoint composants of $\mathfrak Z$ are mutually homeomorphic \cite{band}.} By the first theorem of this paper, $\mathfrak X$ is also homeomorphic to a traditional composant. \begin{ut}\label{t1}Every singular dense meager composant is homeomorphic to a composant of an indecomposable continuum.\end{ut} Proving Theorem \ref{t1} will demonstrate that if $Y$ is a continuum and $X$ is a singular dense meager composant of $Y$, then there is an indecomposable continuum $Z$ and homeomorphic embedding $\xi:X\hookrightarrow Z$ such that $Z$ has the same dimension as $X$; $\xi[X]$ is a composant of $Z$; and there is a mapping $f:Z\to Y$ such that $f\restriction \xi[X]=\xi^{-1}$ is a homeomorphism onto $X$. This has the following corollary. \begin{uc}\label{cor3}If $Y$ is a continuum with a singular dense meager composant, then each meager composant of $Y$ is dense.\end{uc} Singularity is critical to Corollary \ref{cor3}. For instance, $[-1,0]^2\cup \mathfrak Z$ has both dense and non-dense meager composants. The next result applies to chainable and circularly-chainable continua. It builds on a theorem of Mouron \cite[Theorem 32]{mou2} stating that such a continuum is indecomposable if a sequence of disjoint subcontinua converges to the entire space in the Hausdorff metric. \begin{uc}Let $Y$ be a continuum which is the inverse limit of mutually homeomorphic topological graphs. Then $Y$ is indecomposable if and only if some (every) meager composant of $Y$ is singular dense.\label{t4}\end{uc} Thus, there is no graph-like continuum like $\mathfrak Y$. Observe also that the meager composants of $\mathfrak Y$ are not filament sets. For instance, the subcontinuum $\{0\}\times [-1,1]$ is non-filament. \begin{ut}\label{18}A continuum $Y$ is indecomposable if and only if $Y$ has a meager composant which is also a singular dense filament set.\end{ut} The final results concern homogeneous continua. \begin{ut}\label{t6}A homogeneous continuum $Y$ is indecomposable if and only if $Y$ is filament additive, filamentable, and has singular dense filament composants.\end{ut} \begin{uc}Let $Y$ be a filament additive, filamentable, homogeneous continuum with dense filament composants. Then $Y$ is indecomposable if and only if the filament composants of $Y$ are indecomposable.\label{c7} \end{uc} The sharpness of the last two results is evidenced by the product of a circle with a (non-circle) solenoid. That continuum is homogeneous, filament additive, filamentable, and decomposable. Its filament composants are products of the circle with composants of the solenoid \cite[Theorem 4.4]{pra2}. These sets are dense, but are neither singular nor indecomposable. \section{Properties of indecomposable meager composants} We begin by showing singular dense connecta are indecomposable. \begin{ul}\label{p1}Let $X$ be a connected subset of a continuum $Y$. If $X$ is singular dense in $Y$, then $X$ is indecomposable.\end{ul} \begin{proof}Suppose $X$ is singular dense in $Y$. Let $y\in Y$ be such that $X$ is singular with respect to $y$. For a contradiction suppose $X$ is the union of two proper closed connected subsets $H$ and $K$. By $\doverline X=Y$ we have $y\in \doverline{{H}}$ or $y\in \doverline{{K}}$. Neither ${\overline{H}}$ nor ${\overline{K}}$ is equal to $X$, so this contradicts $X$ being singular with respect to $y$. Therefore $X$ is indecomposable.\end{proof} \begin{ur} \cite[Theorem 7.6]{mou} says that every irreducible continuum with a singular dense meager composant is indecomposable. The proof uses the idea of minimal decompositions. Alternatively, combine Proposition \ref{p1} with \cite[Theorem 6(iii)]{lip}. These results show that if $Y$ is an irreducible continuum in which any connected set is singular dense, then $Y$ is indecomposable. In particular, a continuum is indecomposable if (and only if) its composants are singular. \end{ur} The next proposition shows that meager composants partition a continuum into pairwise disjoint sets (cf. \cite[Proposition 2.5]{mou}). \begin{ul}\label{p2}Let $X$ be the meager composant of a point $x$ in a continuum $Y$. Let $K$ be a nowhere dense subcontinuum of $Y$. If $K\cap X\neq\varnothing$, then $K\subseteq X$.\end{ul} \begin{proof}Suppose $K\cap X\neq\varnothing$. Let $x'\in K\cap X$. There is a nowhere dense subcontinuum $L\supseteq \{x,x'\}$. Then $K\cup L$ is a nowhere dense subcontinuum of $Y$ containing $x$, so $K\subseteq X$. \end{proof} For every topological space $A$ and point $x\in A$, we let $\cnt(x,A)$ denote the connected component of $x$ in $A$. That is, $\cnt(x,A)=\bigcup\{C\subseteq A:C\text{ is connected and }x\in C\}.$ When $A$ is a subset of a topological space $X$, then $A$ is always given the subspace topology. \begin{ul}\label{p3}Let $X$ be a meager composant of a continuum $Y$. If $X$ is indecomposable, then: \begin{enumerate} \item[\textnormal{i.}] every proper closed connected subset of $X$ is compact; \item[\textnormal{ii.}] either $X$ is compact or $X$ is of the first category of Baire; and \item[\textnormal{iii.}] for every $X$-closed set $A\subseteq X$, the component decomposition $\mathfrak A:=\{\cnt(x,A):x\in X\}$ is metrizable and zero-dimensional.\footnote{The set $\mathfrak A$ is given the quotient topology; $\mathfrak U$ is open (closed) in $\mathfrak A$ if and only if $\bigcup \mathfrak U$ is open (closed) in $A$. We say that a space is \textit{zero-dimensional} if it has a basis of clopen (closed-and-open) sets.} \end{enumerate}\end{ul} \begin{proof}Suppose $X$ is indecomposable. (i): Let $C$ be a proper closed connected subset of $X$. Then $\doverline{{C}}$ is a nowhere dense subcontinuum of $Y$ by indecomposability of $X$. By Proposition \ref{p2}, $\doverline{{C}}\subseteq X$, so $\doverline{{C}}=\overline C=C$ is compact. (ii): Suppose $X$ is non-compact. Let $x\in X$, and let $\{U_n:n<\omega\}$ be a basis for $X\setminus \{x\}$ consisting of non-empty open sets. Clearly $X\supseteq \bigcup\{\cnt(x ,X\setminus U_n):n<\omega\}$. Conversely, let $x'$ be any point in $X$. There is a continuum $L\subseteq X$ with $\{x,x'\}\subseteq L$. We know $L\neq X$ because $X$ is not compact, so there exists $n<\omega$ such that $L\cap U_n=\varnothing$. Then $x'\in \cnt(x,X\setminus U_n)$. This shows $X\subseteq \bigcup\{\cnt(x ,X\setminus U_n):n<\omega\}$. Thus $X= \bigcup\{\cnt(x ,X\setminus U_n):n<\omega\}$. Each $\cnt(x, X\setminus U_n)$ is closed and nowhere dense by indecomposability of $X$. Therefore $X$ is of the first category of Baire. (iii): Let $A$ be a closed subset of $X$. If $A=X$, then $\mathfrak A=\{X\}$ is clearly metrizable and zero-dimensional. Let us assume for the remainder of the proof that $A\neq X$. First we will show $\mathfrak A$ is metrizable. This will be useful in proving $\mathfrak A$ is zero-dimensional. Let $\varphi:A\to \mathfrak A$ be the canonical epimorphism defined by $\varphi(x)=\cnt(x,A)$. To prove $\mathfrak A$ is metrizable, it suffices to show $\varphi$ is perfect \cite[Theorem 4.2.13]{eng}. Well, each member of $\mathfrak A$ is compact by Proposition \ref{p3}.i. It remains to show $\varphi$ is closed. To that end, let $C\subseteq A$ be closed. To prove $\varphi[C]=\{A'\in \mathfrak A:A'\cap C\neq\varnothing\}$ is closed in $\mathfrak A$ we must show $\bigcup\varphi[C]$ is closed in $X$. Suppose $x\in \overline{\bigcup\varphi[C]}$. Then there exists $(x_n)\in (\bigcup\varphi[C])^\omega$ such that $x_n\to x$. For each $n<\omega$ let $A_n=\cnt(x_n,A)$. \textit{Case 1: A subsequence of $(A_n)$ has connected union.} Let $(A_{n_k})$ be a subsequence whose union is connected. Then by maximality of the connected component $A_{n_0}$ we have $x_{n_k}\in A_{n_k}=A_{n_{0}}$ for each $k<\omega$. Since $A_{n_{0}}$ is closed we have $x\in A_{n_{0}}\subseteq \bigcup\varphi[C]$. \textit{Case 2: No subsequence of $(A_n)$ has connected union.} By compactness of the hyperspace $K(Y)$ \cite[Corollary 4.13]{nad}, the sequence $(A_n)\in [K(Y)]^\omega$ has an accumulation point $M\in K(Y)$.\footnote{Here $K(Y)$ is the set of non-empty compact subsets of $Y$ equipped with the Vietoris topology \cite[2.7.20]{eng}. For metrizable $Y$, the Vietoris topology coincides with the topology generated by any Hausdorff metric \cite[4.5.23]{eng}.} Necessarily, $x\in M\subseteq \doverline A$ and $M$ is a continuum \cite[Corollary 4.18]{nad}. $M$ is also nowhere dense in $Y$. For suppose otherwise that $M^\mathrm{o}\neq\varnothing$. Then $\Omega\coloneqq \{n<\omega:A_n\cap M^\mathrm{o}\neq\varnothing\}$ is infinite. By hypothesis $\Sigma \coloneqq \bigcup \{A_n:n\in \Omega\}$ is not connected. So there are $Y$-open sets $U$ and $V$ such that $U\cap \Sigma\neq\varnothing$, $V\cap \Sigma\neq\varnothing$, $U\cap V\cap \Sigma=\varnothing$, and $\Sigma\subseteq U\cup V$. The Vietoris open set $\{K\in K(Y):K\cap U\cap M^\mathrm{o}\neq\varnothing\text{ and }K\cap V\cap M^\mathrm{o}\neq\varnothing\}$ contains $M$ as an element but has empty intersection with $\{A_n:n<\omega\}$. This is a contradiction. Therefore $M^\mathrm{o}=\varnothing$, i.e. $M$ is nowhere dense. By Proposition \ref{p2}, $M\subseteq A$. Further, $M\cap C\neq\varnothing$ (otherwise, letting $W$ be a $Y$-open set such that $W\cap X=X\setminus C$ we find that $M\subseteq W$ yet $W$ contains no $A_n$ because $A_n\cap C\neq\varnothing$). Since $M\subseteq \cnt(x,A)$, this implies $ \cnt(x,A)\in \varphi[C]$. So $x\in \bigcup\varphi[C]$. In each of the two possible cases we found $x\in \bigcup\varphi[C]$. Therefore $\bigcup\varphi[C]=\overline{\bigcup\varphi[C]}$, so that $\varphi[C]$ is closed in $\mathfrak A$. We conclude that $\varphi$ is perfect, so $\mathfrak A$ is metrizable. Next we show $\mathfrak A$ is zero-dimensional. Note that if $X$ is compact then so is $A$, and in this case $\mathfrak A$ is already known to be zero-dimensional \cite[Theorem 6.2.24]{eng}. So assume $X$ is non-compact. Let $U$ be an open subset of $Y$ such that $X\setminus U=A$. By Proposition \ref{p3}.ii there exists $y\in U\setminus X$. Let $\varepsilon>0$ such that $B(y,\varepsilon)\subseteq U$, and for each $n<\omega$ put $W_n=B(y,\varepsilon/2^n)$. Fix $x'\in A$. For each $x\in A$ there is a nowhere dense continuum $L\subseteq X$ such that $\{x,x'\}\subseteq L$. Since $y\notin L$ there exists $n<\omega$ such that $W_n\cap L=\varnothing$. Then $x\in \cnt(x',X\setminus W_n)$. This shows \begin{equation}\label{e21}A\subseteq \bigcup\{\cnt(x',X\setminus W_n):n<\omega\}.\end{equation} We also claim that \begin{equation}\label{e22} \cnt(x,A)=\cnt(x,K_n) \text{ for every } x\in K_n\coloneqq \cnt(x',X\setminus W_n)\cap A.\end{equation} Well, suppose $x\in K_n$. Then $\cnt(x,A)\supseteq \cnt(x,K_n)$ because $A\supseteq K_n$. Conversely, $\cnt(x,A)\subseteq\cnt(x,X\setminus W_{n})= \cnt(x',X\setminus W_{n})$ implies $$\cnt(x,A)=\cnt(x,\cnt(x,A)\cap A)\subseteq \cnt(x,\cnt(x',X\setminus W_{n})\cap A)=\cnt(x,K_n).$$ Therefore $\cnt(x,A)=\cnt(x,K_n)$. For each $n<\omega$ let $\mathfrak K_n=\{\cnt(x,K_n):x\in K_n\}$ be the component decomposition of $K_n$. By (\ref{e21}) and (\ref{e22}) we have $$\mathfrak A=\bigcup \{\mathfrak K_n:n<\omega\}.$$ Endow the sets $\mathfrak A$ and $\mathfrak K_n$ with the quotient topologies relative to $A$ and $K_n$, respectively, and observe that each $\mathfrak K_n$ is a subspace of $\mathfrak A$. For if $\mathfrak S$ is any subset of $\mathfrak K_n$, then: \begin{align*} \mathfrak S\text{ is closed in }\mathfrak K_n &\Leftrightarrow \bigcup \mathfrak S \text{ is closed in }K_n\\ &\Leftrightarrow \bigcup \mathfrak S\text{ is closed in }A \\ & \Leftrightarrow \mathfrak S\text{ is closed in }\mathfrak A. \end{align*} The first equivalence is the definition of the quotient topology on $\mathfrak K_n$. The second equivalence holds because $K_n$ is a closed subset of $A$. The third holds by the inclusion $\mathfrak S\subseteq \mathfrak K_n\subseteq \mathfrak A$ and the definition of the quotient topology on $\mathfrak A$. Note that $K_n$ is compact by Proposition \ref{p3}.i, so $\mathfrak K_n$ is zero-dimensional by \cite[Theorem 6.2.24]{eng}. Thus, $\mathfrak A$ is a separable metrizable union of countably many closed (compact) zero-dimensional subspaces. By \cite[Theorem 1.3.1]{eng2}, $\mathfrak A$ is zero-dimensional. \end{proof} The next proposition shows that singularity of dense meager composants can be expressed using various familiar properties of connected sets. And for dense meager composants, being singular with respect to one point implies having a full complementary set of singularities. \begin{ul}\label{p5}Let $X$ be a meager composant of a continuum $Y$. If $X$ is dense in $Y$, then the following are equivalent: \begin{enumerate} \item[\textnormal{i.}] $X$ is indecomposable; \item[\textnormal{ii.}] $X$ is strongly indecomposable; \item[\textnormal{iii.}] there exists $y\in Y$ such that $X$ is singular with respect to $y$; \item[\textnormal{iv.}] $Y\setminus X\neq\varnothing$ and $X$ is singular with respect to each point of $Y\setminus X$; \item[\textnormal{v.}] there exists $y\in Y$ such that the connected set $X\cup \{y\}$ is irreducible; \item[\textnormal{vi.}] $Y\setminus X\neq\varnothing$ and $X\cup \{y\}$ is irreducible for every $y\in Y\setminus X$; \item[\textnormal{vii.}] $X\cup \{y\}$ is indecomposable for every $y\in Y$. \end{enumerate} \end{ul} \begin{proof}Suppose $\doverline X=Y$. First we will show (ii)$\Rightarrow$(i)$\Rightarrow$(vi)$\Rightarrow$(v)$\Rightarrow$(iii)$\Rightarrow$(i)$\Rightarrow$(ii), establishing the equivalence of all items other than (iv) and (vii). Then, to incorporate (iv) and (vii) we will prove (vi)$\Rightarrow$(iv)$\Rightarrow$(iii) and (vi)$\Rightarrow$(vii)$\Rightarrow$(i). (ii)$\Rightarrow$(i): Fairly obvious; see the second paragraph of \cite[Section 2]{lip}. (i)$\Rightarrow$(vi): Suppose $X$ is indecomposable. Then $X\neq Y$ because every indecomposable continuum has more than one meager composant. Let $y\in Y\setminus X$ and fix $x\in X$. For a contradiction suppose $X\cup \{y\}$ is reducible between $x$ and $y$. Let $C\supseteq \{x,y\}$ be a proper closed connected subset of $X\cup \{y\}$. By Proposition \ref{p3}.iii the decomposition of $C\cap X$ into connected components is metrizable and zero-dimensional, so there is a decreasing sequence of $(C\cap X)$-clopen sets $E_0\supseteq E_1\supseteq E_2\supseteq...$ such that $\cnt(x,C\cap X)=\bigcap\{E_n:n<\omega\}$. Each $E_n\cup \{y\}$ is connected, so $K:=\bigcap\{\doverline{E_n\cup \{y\}}:n<\omega\}$ is the intersection of a decreasing sequence of continua. Then $K$ is a continuum. Further, $K$ has non-empty interior because $X$ is a meager composant of $Y$, $\{x,y\}\subseteq K$, and $y\notin X$. Let $U$ be a non-empty $Y$-open set such that $U\subseteq \doverline{E_n\cup \{y\}}$ for each $n<\omega$. Then $U\cap X\subseteq \doverline{E_n\cup \{y\}}\cap X=E_n$ for each $n<\omega$. So $\cnt(x,C\cap X)$, which is a proper closed connected subset of $X$, contains the non-empty $X$-open set $U\cap X$. This contradicts indecomposability of $X$. Therefore $X\cup \{y\}$ is irreducible (between $x$ and $y$). (vi)$\Rightarrow$(v): Trivial. (v)$\Rightarrow$(iii): Suppose $y\in Y$ is such that $X\cup \{y\}$ is irreducible. For every two points $x$ and $x'$ in $X$ there is a continuum $L\subseteq X$ which contains $\{x,x'\}$ and is nowhere dense in $Y$. Since $X$ is dense in $Y$, we know $L$ is also nowhere dense in $X$. Therefore $X$ is reducible, so there exists $x\in X$ such that $X\cup \{y\}$ is irreducible between $x$ and $y$. Let $C$ be any connected subset of $X$ such that $y\in \doverline C$. There is a nowhere dense (in $X$) continuum $L\subseteq X$ such that $L\cap C\neq\varnothing$ and $x\in L$. Then $\overline C\cup L\cup \{y\}$ is a proper closed connected subset of $X\cup \{y\}$ containing $x$ and $y$. By irreducibility it must be that $\overline C\cup L\cup \{y\}=X\cup \{y\}$, whence $\overline C=X$ and $X$ is singular with respect to $y$. (iii)$\Rightarrow$(i): Proposition \ref{p1}. (i)$\Rightarrow$(ii): Suppose $X$ is indecomposable. Toward showing $X$ is strongly indecomposable, let $U$ and $V$ be non-empty disjoint open subsets of $X$. We will exhibit a relatively clopen subset of $X\setminus U$ which intersects $V$ but does not contain $V$. Well, by indecomposability of $X$ there are two connected components $A_0\neq A_1$ of $X\setminus U$ such that $A_0\cap V\neq\varnothing$ and $A_1\cap V\neq\varnothing$. The component decomposition of $X\setminus U$ is zero-dimensional by Proposition \ref{p3}.iii. So in $X\setminus U$ there is a clopen set which contains $A_0$ and misses $A_1$. (vi)$\Rightarrow$(iv): Similar to (v)$\Rightarrow$(iii). (iv)$\Rightarrow$(iii): Trivial. (vi)$\Rightarrow$(vii): For a contradiction suppose (vi) and the negation of (vii). Let $y\in Y$ be such that $X\cup \{y\}$ is decomposable. Let $H$ and $K$ be proper closed connected subsets of $X\cup \{y\}$ such that $H\cup K=X\cup \{y\}$. We have already established (vi)$\Rightarrow$(i), so $y\in (H\cap K)\setminus X$. By (vi), $X\cup \{y\}$ is irreducible. Since $X$ is reducible, this means $X\cup \{y\}$ is irreducible between some $x\in X$ and $y$. But for each $x\in X$ one of the sets $H$ or $K$ will show that $X\cup \{y\}$ is reducible between $x$ and $y$. This is a contradiction. (vii)$\Rightarrow$(i): Trivial.\end{proof} \begin{ur}From the proof of (i)$\Rightarrow$(ii) we see that indecomposable meager composants are strongly indecomposable. It remains an open problem to determine whether there is an indecomposable connected set which is \textit{not} strongly indecomposable, but we suspect there is such an example. Some variations of this problem appear in \cite[Section 5]{lip}. \end{ur} \begin{ur}Regarding (i)$\Rightarrow$(vii), in \cite[Example 1]{lip} there was shown to be a locally compact indecomposable connected plane set whose one-point compactification is decomposable. On the other hand, Mary Ellen Rudin \cite{rud} proved: If $X$ is any connected plane set and $Y$ is the plane closure of $X$, then (i)$\Rightarrow$(vii). We noticed that Rudin's proof could be dramatically simplified if every indecomposable connected plane set were known to be strongly indecomposable. This suggests proving (i)$\Rightarrow$(ii) in general, or just for connected plane sets, could be difficult.\end{ur} \begin{ur}The implication (i)$\Rightarrow$(v) holds more generally when $X$ is any connected set and $Y$ is any compactification of $X$ \cite[Theorem 3]{lip2}. But (i)$\Rightarrow$(vi) is generally false by the example in the previous remark.\end{ur} \section{Proof of Theorem \ref{t1}} \noindent Suppose $X$ is a singular dense meager composant of a continuum $Y$. We will construct an indecomposable continuum $Z$ with a composant homeomorphic to $X$. By strong indecomposability of $X$ (Proposition \ref{p5}.ii) there is a homeomorphic embedding $\iota:X \hookrightarrow [0,1]^\omega$ such that $I\coloneqq \overline{\iota[X]}$ is an indecomposable continuum \cite[Theorem 9]{lip}. Let $\Gamma:Y\hookrightarrow [0,1]^\omega$ be a homeomorphic embedding of $Y$, and put $\gamma\coloneqq \Gamma\restriction X$. Let $\pi_n:[0,1]^\omega\to [0,1]$ be the $n$-th coordinate projection. There is a homeomorphic embedding $\xi:X\hookrightarrow [0,1]^\omega$ such that all of the maps \begin{align*} \varphi_n\coloneqq \pi_n\circ \iota\circ \xi^{-1}:\;&\xi[X]\to [0,1]\text{; and}\\ \psi_n\coloneqq \pi_n\circ \gamma\circ \xi^{-1}:\;&\xi[X]\to [0,1] \end{align*} continuously extend to \begin{equation}Z:=\overline{\xi[X]}. \label{e41}\end{equation} By \cite[Exercise 1.7.C]{eng3}, $\xi$ can even be constructed to obtain $\dim(Z)=\dim(X)$. For each $n<\omega$ let $\Phi_n:Z\to [0,1]$ and $\Psi_n:Z\to [0,1]$ be the continuous extensions of $\varphi_n$ and $\psi_n$, respectively. Define $\Phi:Z\to [0,1]^\omega$ by $\pi_n\circ \Phi=\Phi_n$, and likewise for $\Psi$. Then $\Phi$ maps onto $I$ and $\Psi$ maps onto $\Gamma[Y]$. Since $\Phi\restriction \xi[X]=\iota\circ \xi^{-1}$ and $\Psi\restriction \xi[X]=\gamma\circ \xi^{-1}$ are homeomorphisms and $\xi[X]$ is dense in $Z$ (\ref{e41}), we have \begin{align}\Phi^{-1}[ \iota[X]]&=\xi[X];\text{ and}\label{e42}\\ \Psi^{-1}[ \gamma[X]]&=\xi[X].\label{e43}\end{align} \begin{figure} \caption{Commutative diagrams for $\Phi$ and $\Psi$.} \end{figure} \textit{$Z$ is indecomposable}: By (\ref{e41}) and (\ref{e42}), $\Phi$ maps onto $I$ and maps proper subcontinua of $Z$ to proper subcontinua of $I$. Indecomposability of $I$ therefore implies $Z$ is indecomposable. For if $Z$ were the union of two proper subcontinua $H$ and $K$, then $I$ would be the union of proper subcontinua $\Phi[H]$ and $\Phi[K]$. \textit{$\xi[X]$ is contained in a composant of $Z$}: Since $X$ is singular dense in $Y$ we know that $X$ is not compact. Therefore $\xi[X]\neq Z$. Also $X$ is continuum-wise connected, thus $\xi[X]$ is contained in a composant of $Z$. \textit{$\xi [X]$ contains a composant of $Z$}: Let $x\in X$, and let $N\ni \xi(x)$ be a proper subcontinuum of $Z$. We will show $N\subseteq \xi[X]$. Well, since $Z$ is indecomposable $N$ is nowhere dense in $Z$. Therefore $\Psi[N]$ is a nowhere dense subcontinuum of $\Gamma[Y]$. For otherwise, $\Psi[N]$ contains a $\gamma[X]$-open set $U\neq\varnothing$. By (\ref{e43}), $N$ contains the non-empty $\xi[X]$-open set $\xi\circ\gamma^{-1}[U]$. Since $N$ is closed in $Z$ and (\ref{e41}) holds, this implies $N$ has non-void interior in $Z$, a contradiction. Thus $\Psi[N]$ is nowhere dense. Since $\gamma[X]$ is a meager composant of $\Gamma[Y]$ we have $\Psi[N]\subseteq \gamma[X]$. By (\ref{e43}), $N\subseteq \xi[X]$. The composants of $Z$ are pairwise disjoint, so the two containments show that $\xi[X]$ is equal to a composant of $Z$. This completes the proof of Theorem \ref{t1}. \qed \begin{ur}Define $f= \Gamma^{-1}\circ \Psi$ for a surjection $f:Z\to Y$ such that $f[\xi[X]]=X$. By monotone-light factorization \cite[Theorem 13.3]{nad}, $f$ is equal to a monotone mapping of $Z$ onto some continuum $M$, followed by a surjective mapping $l:M\to Y$ such that $l^{-1}\{y\}$ is totally disconnected for every $y\in Y$. We see that $M$ is also an indecomposable continuum containing $l^{-1}[X]\simeq X$ as a composant. \end{ur} \section{Proof of Corollary \ref{cor3}} \noindent If continuum $Y$ has a singular dense meager composant, we have shown that a continuum $Z$ maps onto $Y$ so that each meager composant of $Y$ contains the image of a composant of $Z$. Each composant of $Z$ is dense, therefore each meager composant of $Y$ is dense. \qed \begin{ur}We now see that if $Y$ is a continuum with a singular dense meager composant, then $Y$ has at least two (disjoint) dense meager composants. In particular, for each $x\in Y$ there exists $y\in Y$ such that the union of all continua in $Y\setminus \{x\}$ containing $y$ is dense in $Y$. This is enough to imply $Y$ is indecomposable if $Y$ is chainable \cite[Corollary 3.6]{oo}. \end{ur} \section{Proof of Corollary \ref{t4}} \noindent We prove only the non-standard implication. Suppose $X$ is a singular dense meager composant of the graph-like continuum $Y$. To show $Y$ is indecomposable, by \cite[Theorem 32]{mou2} it suffices to show there is a sequence $(X_i)$ of pairwise disjoint continua in $Y$ such that $d_H(X_i,Y)\to 0$, where $d_H$ is the Hausdorff distance induced by a metric $d$ on $Y$. It is unknown whether $Y$ must have infinitely many meager composants (see Question 1 in Section 7), but in any case the continua $X_i$ can be selected from $X$. Identify $X$ with a composant of an indecomposable continuum $Z$ which maps onto $Y$ (Theorem \ref{t1}). Let $\varrho_H$ be the Hausdorff metric generated by a metric $\varrho$ on $Z$, and define $\varrho_{\inf}(z,A)=\inf\{\varrho(x,z):z\in A\}$ for each $x\in X$ and $A\subseteq Z$. Let $z\in Z\setminus X$, and let $(x_n)\in X^\omega$ such that $x_n\to z$. Recursively define $X_i$ as follows. Put $X_0=\{x_0\}$. There exists a positive integer $n_1$ such that $\varrho(x_0,x_{n_1})>1/n_1$ and $\varrho_H(\cnt(x_{n_1},Z\setminus B_{\varrho}(x_0,1/n_1)),Z)<1.$ If there were no such integer, then `boundary bumping' \cite[Lemma 6.1.25]{eng} and compactness of the hyperspace $K(Z)$ would reveal a proper subcontinuum of $Z$ containing both $x_0$ and $z$. Set $X_1=\cnt(x_{n_1},Z\setminus B_{\varrho}(x_0,1/n_1))$. Suppose $i>1$ and $X_j\subseteq X$ has been defined for each $j<i$. No proper subcontinuum of $Z$ containing $z$ also meets the compact set $X_0\cup X_1\cup ...\cup X_{i-1}$, so there is a sufficiently large integer $n_i$ such that $\varrho_{\inf}(x_{n_i},X_0\cup X_1\cup ...\cup X_{i-1})>1/n_i$ and $$\varrho_H(\cnt(x_{n_i},Z\setminus B_{\varrho_{\inf}}(X_0\cup X_1\cup ...\cup X_{i-1},1/n_i)),Z)<1/i.$$ Let $X_i=\cnt(x_{n_i},Z\setminus B_{\varrho_{\inf}}(X_0\cup X_1\cup ...\cup X_{i-1},1/n_i))$. The terms of the sequence $(X_i)$ are pairwise disjoint continua in $X$, and $\varrho_H(X_i,Z)\to 0$. Since $Z$ maps continuously onto $Y$, we have $d_H(X_i,Y)\to 0$. \qed \section{Proof of Theorem \ref{18}} \noindent Let $X$ be a meager composant of a continuum $Y$. If $Y$ is indecomposable, then by elementary continuum theory $X$ is both a composant of $Y$ and a singular dense filament subset of $Y$. Now suppose $X$ is a singular dense filament set. Let $x\in X$, and let $A\subseteq Y$ be a minimal non-filament subcontinuum containing $x$ provided by \cite[Corollary 1.13]{pra}. Since $X$ is a filament set there exists $y\in A\setminus X$. Let $C$ be the composant of $x$ in $A$. Then $C\subseteq X$ by minimality of $A$ and the fact that filament subcontinua are nowhere dense. Further $\doverline C=A$ \cite[Exercise 5.20]{nad}, so $y\in \doverline C$. By Proposition \ref{p5}, $X$ is singular with respect to $y$, so $\overline C= X$. By density of $X$ in $Y$ we have $\doverline{{C}}= Y$. Therefore $A=Y$, so $Y$ is indecomposable. \qed \section{Questions} \begin{uq}\label{q1}Let $Y$ be a continuum with a singular dense meager composant. Must $Y$ have at least three meager composants?\end{uq} Compare Question \ref{q1} with \cite[Problem 8.8]{mou} on whether there is a continuum $Y$ with a point $x$ such that the meager composants of $Y$ are $\{x\}$ and $Y\setminus \{x\}$. A counterexample to Question \ref{q1} would also have exactly two meager composants; a dense first category $F_\sigma$-set and its complement, a dense $G_\delta$-set. This $G_\delta$ would not be $F_\sigma$, contrary to \cite[Conjecture 8.4]{mou}. Therefore, we conjecture a positive answer to Question \ref{q1}. We would like to know if there is a homogeneous example like $\mathfrak Y$. \begin{uq}\label{q3}Is there a decomposable homogeneous continuum with singular dense meager composants?\end{uq} Analogous questions for filament composants are also of interest. \begin{uq}\label{q2}Is there a decomposable continuum with singular dense filament composants? \end{uq} \begin{uq}\label{q3}Is every filament additive homogeneous continuum with singular dense filament composants necessarily indecomposable?\end{uq} The next section contains some results toward answering Question 4 in the affirmative. \section{Filament singularities in filament additive homogeneous continua} Here we will prove Theorem \ref{t6} and Corollary \ref{c7}, and show that Question \ref{q3} is related to a question of Prajs \& Whittington. Throughout this section, \textit{$Y$ is assumed to be a filament additive homogeneous continuum with dense filament composants.} For each point $x\in Y$ let $\fcs(x)$ denote the filament composant of $x$. Let $$\sng(x)=\{y\in Y:(\forall\text{ connected }C\subseteq \fcs(x)\text{ with }x\in C)(y\in \doverline{C}\Rightarrow \doverline C=Y)\}$$ be the set of filament singularities of $x$. \begin{ul}$y\in \sng(x)$ if and only if $\fcs(x)$ is singular with respect to $y$. \label{81} \end{ul} \begin{proof}Suppose $\fcs(x)$ is singular with respect to $y$. This means if $C$ is any connected subset of $\fcs(x)$ (with or without the base point $x$) and $y\in \doverline C$, then $\overline C=\fcs(x)$. Further, $\doverline C=Y$ by the standing assumption that $\fcs(x)$ is dense in $Y$. This shows $y\in \sng(x)$. Now suppose $\fcs(x)$ is \textit{not} singular with respect to $y$. Then there is a connected set $C\subseteq \fcs(x)$ such that $y\in \doverline C$ and $\overline C\neq\fcs(x)$. Let $x'\in C$, and let $L\supseteq \{x,x'\}$ be a filament subcontinuum of $Y$. Then $C\cup L$ is a connected subset of $\fcs(x)$, $x\in C\cup L$, and $y\in \doverline{C\cup L}\neq Y$. Thus $y\notin \sng(x)$.\end{proof} Similar to Proposition \ref{p5}: \begin{ul}\label{pi} The following are equivalent: \begin{enumerate} \item[\textnormal{i.}] $\sng(x)\neq\varnothing$; \item[\textnormal{ii.}] $\fcs(x)$ is singular (as defined in Section \ref{s1}); \item[\textnormal{iii.}] $\fcs(x)$ is indecomposable. \end{enumerate}\end{ul} \begin{proof}(i)$\Rightarrow$(ii): Proposition \ref{81}. (ii)$\Rightarrow$(iii): Proposition \ref{p1}. (iii)$\Rightarrow$(i): Suppose $\fcs(x)$ is indecomposable. Let $\{V_n:n<\omega\}$ be a basis of non-empty open sets for $Y\setminus \{x\}$. By $\doverline{\fcs(x)}=Y$ and indecomposability of $X$ each $\cnt(x ,\fcs(x)\setminus V_n)$ is nowhere dense, so there exists $$y\in Y\setminus \bigcup \big\{\doverline{{\cnt(x ,\fcs(x)\setminus V_n)}}:n<\omega\big\}.$$ Then $y\in \sng(x)$. \end{proof} \begin{ul}If $x'\in \fcs(x)$, then $\sng(x)=\sng(x')$.\label{p8}\end{ul} \begin{proof}Let $x'\in \fcs(x)$. By filament additivity $\fcs(x)=\fcs(x')$. So for every $y\in Y$, $\fcs(x)$ is singular with respect to $y$ if and only if $\fcs(x')$ is singular with respect to $y$. By Proposition \ref{81} $\sng(x)=\sng(x')$. \end{proof} A subcontinuum $A\subseteq Y$ is \textit{ample} if $\cnt(A,U)$ is a neighborhood of $A$ for each open set $U\supseteq A$ \cite{pra}. In homogeneous continua, \textit{ample} and \textit{non-filament} are equivalent \cite[Proposition 2.3]{pra}. Minimal ample subcontinua of $Y$ exist by \cite[Corollary 2.5]{pra}. \begin{ul}\label{p66}Let $A$ be a minimal ample subcontinuum of $Y$. If $A\neq Y$, then $A\cap \sng(x)=\varnothing$ for each $x\in A$. \end{ul} \begin{proof}Let $x\in A$. Let $C$ be the composant of $x$ in $A$. Then $C$ is connected, $C\subseteq \fcs(x)$, and $\doverline{{C}}=A$. Thus $A\neq Y$ implies $A\cap \sng(x)=\varnothing$. \end{proof} \begin{ul}\label{t3}$Y$ is indecomposable if and only if $\fcs(x)\cup \sng(x)=Y$ for some (every) $x\in Y$.\end{ul} \begin{proof}If $Y$ is indecomposable then the equation holds for each $x$ because the filament composants and traditional composants of $Y$ coincide and partition $Y$. Conversely, if $Y$ is decomposable then there is a minimal ample subcontinuum $A\neq Y$. Let $x\in A$. Since $\fcs(x)$ is a filament set \cite[Corollary 3.6]{pra2}, $A\setminus \fcs(x)\neq\varnothing$. Also, $A\cap \sng(x)=\varnothing$ by Proposition \ref{p66}. Therefore $A\setminus [\fcs(x)\cup \sng(x)]\neq\varnothing$, whence $\fcs(x)\cup \sng(x)\neq Y$. Since $Y$ is homogeneous we have $\fcs(x)\cup \sng(x)\neq Y$ for every $x\in Y$.\end{proof} By Propositions \ref{81} and \ref{t3}, $Y$ is indecomposable if and only if the filament composants of $Y$ are singular with respect to all points in their respective complements. Density of filament composants is critical to this result. For example, the circle of pseudoarcs is homogeneous, filament additive, and decomposable. Its filament composants are singular with respect to all points in their complements, but are not dense. Let us now examine the non-singularity relation $$\neg\sng=\{\langle x,y\rangle \in Y^2:y\notin \sng(x)\}.$$ Write $\neg\sng\langle x,y\rangle$ for $\langle x,y\rangle\in \neg\sng$. \begin{ul}$\neg\sng$ is an equivalence relation.\label{p7}\end{ul} \begin{proof}We need to show $\neg\sng$ is reflexive, symmetric, and transitive. \textit{Reflexive:} The standing assumption $\doverline{\fcs(x)}=Y$ implies $\{x\}\subsetneq \fcs(x)$. Therefore $\neg\sng\langle x,x\rangle$. \textit{Symmetric:} Suppose $\neg\sng\langle x,y\rangle$. We will show $\neg\sng\langle y,x\rangle$. To that end, let $C\ni x$ be a connected subset of $\fcs(x)$ such that $y\in \doverline{{C}}\neq Y$. Let $p\in Y\setminus \doverline{C}$, and let $\varepsilon>0$ such that $B(p,2\varepsilon)\cap \doverline{{C}}=\varnothing$. For each $n<\omega$: let $\delta_n$ be an Effros number\footnote{If $Y$ is a homogeneous continuum, then for every $\varepsilon>0$ there is a positive number $\delta$, called an \textit{Effros number} for $\varepsilon$, such that for each pair of points $x$ and $y$ with $d(x, y)<\delta$ there is an onto homeomorphism $h:Y\to Y$ such that $h(x)=y$ and $d(z, h(z)) <\varepsilon$ for each $z \in Y$. This is called the Effros Theorem. It follows from the more general \cite[Theorem 2]{eff}.} for $\varepsilon/2^n$; let $x_n\in C$ such that $d(x_n,y)<\delta_n$; and let $h_n:Y\to Y$ be a surjective homeomorphism such that $h_n(x_n)=y$ and $d(z,h_n(z))<\varepsilon/2^n$ for all $z\in Y$. The connected set $E:=\bigcup \{h_n[C]:n<\omega\}$ shows $\neg\sng\langle y,x\rangle$. Indeed, $y\in E$, and $E\subseteq \fcs(y)$ by filament additivity and the fact that homeomorphisms respect filament composants. Further, $x\in \doverline{{E}}$ because $h_n(x)\to x$, and $\doverline{{E}}\cap B(p,\varepsilon)=\varnothing$. Therefore $\neg\sng\langle y,x\rangle$. \textit{Transitive:} Suppose $\neg\sng\langle x,y\rangle$ and $\neg\sng\langle y,z\rangle$. We will show $\neg\sng\langle x,z\rangle$. If $\sng(x)=\varnothing$ then clearly $\neg\sng\langle x,z\rangle$. Now suppose $\sng(x)\neq\varnothing$. Then $\fcs(x)$ is indecomposable by Proposition \ref{pi}. By $\neg\sng\langle x,y\rangle$ there is a connected set $C\subseteq \fcs(x)$ such that $x\in C$ and $y\in \doverline{{C}}\neq Y$. By $\neg\sng\langle y,z\rangle$ and symmetry of $\neg\sng$, there is also a connected set $D\subseteq \fcs(z)$ such that $z\in D$ and $y\in \doverline{{D}}\neq Y$. Indecomposability of $\fcs(x)$ implies $\doverline C$ is nowhere dense, so $\doverline{C}\cup \doverline{D}\neq Y$. There exists $q\in Y\setminus \doverline{C\cup D}$ and $\varepsilon>0$ such that $B(q,2\varepsilon)\cap \doverline{C\cup D}=\varnothing$. For each $n<\omega$ let $\delta_n$ be an Effros number for $\varepsilon/2^n$. Let $x_n\in C \cap B(y,\delta_n/2)$ and $z_n\in D\cap B(y,\delta_n/2)$, so that $d(x_n,z_n)<\delta_n$. Let $h_n:Y\to Y$ be a surjective homeomorphism such that $h_n(z_n)=x_n$ and $d(w,h_n(w))<\varepsilon/2^n$ for all $w\in Y$. The connected set $C\cup\bigcup\{ h_n[D]:n<\omega\}\subseteq \fcs(x)$ witnesses $\neg\sng\langle x,z\rangle$. \end{proof} \begin{figure} \caption{Symmetry (left) and Transitivity (right) of $\neg\sng$ (Proposition \ref{p7} \label{f3} \end{figure} \begin{ur}By Proposition \ref{p7}, the non-singularity relation partitions $Y$ into pairwise disjoint sets. By Proposition \ref{p8} and symmetry of $\neg\sng$, the partition $Y/\neg\sng=\{Y\setminus \sng(x) :x\in Y\}$ is coarser than the partition of $Y$ into filament composants. Likewise, each $\sng(x)$ is a union of filament composants.\end{ur} \begin{ur}Proposition \ref{t3} says $Y$ is indecomposable if and only if $\neg\sng=\fcs$, i.e. $$Y/\neg\sng=\{\fcs(x) :x\in Y\}.$$\end{ur} \begin{ur}Question \ref{q3} asks whether $|Y/\neg\sng|>1$ implies $Y$ is indecomposable.\end{ur} Let us now restate and prove the last two items from Section 1.2. \begin{ut6}A homogeneous continuum $Y$ is indecomposable if and only if $Y$ is filament additive, filamentable, and has singular dense filament composants.\end{ut6} \begin{proof}Suppose the homogeneous continuum $Y$ is filament additive, filamentable, and has singular dense filament composants. Let $L$ be a filament subcontinuum of $Y$ such that $Y\setminus L$ is a filament set. By singularity and Proposition \ref{p7}, there exists exists $x\in Y$ such that $L\cap \neg\sng[x]=\varnothing$. Then $\neg\sng[x]=\bigcup\{\doverline C:C\subseteq \fcs(x)\text{ is connected, } x\in C \text{, and }\doverline C\neq Y\}$ is a continuum-wise connected filament set, so $\neg\sng[x]=\fcs(x)$. By Proposition \ref{t3}, $Y$ is indecomposable.\end{proof} \begin{uc7}Let $Y$ be a filament additive, filamentable, homogeneous continuum with dense filament composants. Then $Y$ is indecomposable if and only if the filament composants of $Y$ are indecomposable.\end{uc7} \begin{proof}If the filament composants of $Y$ are indecomposable, then so is $Y$ by Proposition \ref{pi} and Theorem \ref{t6}. The converse follows from the fact that dense connected subsets of indecomposable continua are indecomposable. \end{proof} \begin{ur}Based on Proposition \ref{p66}, the existence of one minimal ample subcontinuum of $Y$ which meets two non-singularity classes would imply $Y$ is indecomposable. Since each non-singularity class is a union of filament composants, a positive answer to the question of Prajs and Whittington below would imply a positive answer to Question 4. \end{ur} \begin{uq}[{Question 7 in }\cite{pra2}] Let $Y$ be a filament additive homogeneous continuum with dense filament composants. Does each ample subcontinuum of $Y$ intersect every filament composant of $Y$?\end{uq} \small \end{document}
\begin{document} \title[Article Title]{Delving into Identify-Emphasize Paradigm for\\ Combating Unknown Bias} \author[1]{\fnm{Bowen} \sur{Zhao}}\email{[email protected]} \author[2]{\fnm{Chen} \sur{Chen}}\email{[email protected]} \author[1,3]{\fnm{Qian-Wei} \sur{Wang}}\email{[email protected]} \author[2]{\fnm{Anfeng} \sur{He}}\email{[email protected]} \author[1,3]{\fnm{Shu-Tao} \sur{Xia}}\email{[email protected]} \affil[1]{\orgdiv{Tsinghua Shenzhen International Graduate School}, \orgname{Tsinghua University}, \orgaddress{\country{China}}} \affil[2]{\orgdiv{TEG AI}, \orgname{Tencent}, \orgaddress{ \country{China}}} \affil[3]{\orgdiv{Research Center of Artificial Intelligence}, \orgname{Peng Cheng Laboratory}, \orgaddress{ \country{China}}} \abstract{ Dataset biases are notoriously detrimental to model robustness and generalization. The identify-emphasize paradigm appears to be effective in dealing with unknown biases. However, we discover that it is still plagued by two challenges: A, the quality of the identified bias-conflicting samples is far from satisfactory; B, the emphasizing strategies only produce suboptimal performance. In this paper, for challenge A, we propose an effective bias-conflicting scoring method (ECS) to boost the identification accuracy, along with two practical strategies --- peer-picking and epoch-ensemble. For challenge B, we point out that the gradient contribution statistics can be a reliable indicator to inspect whether the optimization is dominated by bias-aligned samples. Then, we propose gradient alignment (GA), which employs gradient statistics to balance the contributions of the mined bias-aligned and bias-conflicting samples dynamically throughout the learning process, forcing models to leverage intrinsic features to make fair decisions. Furthermore, we incorporate self-supervised (SS) pretext tasks into training, which enable models to exploit richer features rather than the simple shortcuts, resulting in more robust models. Experiments are conducted on multiple datasets in various settings, demonstrating that the proposed solution can mitigate the impact of unknown biases and achieve state-of-the-art performance. } \keywords{Unknown Bias, Identify-Emphasize, Bias-Conflicting Scoring, Gradient Alignment, Self-Supervision} \maketitle \section{Introduction} Deep Neural Networks (DNNs) have made significant advances in a variety of visual tasks. DNNs tend to learn \textbf{intended} decision rules to accomplish target tasks commonly. However, they may follow \textbf{unintended} decision rules based on the easy-to-learn shortcuts to ``achieve" target goals in some scenarios~\citep{bahng2020learning}. For instance, when training a model to classify digits on Colored MNIST~\citep{kim2019learning}, where the images of each class are primarily dyed by one pre-defined color respectively (\textit{e.g.}, most `0' are red, `1' are yellow, see examples in Figure~\ref{fig:examples}), the intended decision rules classify images based on the shape of digits, whereas the unintended decision rules utilize color information instead. Following~\cite{nam2020learning}, sample $x$ that can be ``correctly" classified by unintended decision rules is denoted as a \textbf{bias-aligned} sample $\underline{x}$ (\textit{e.g.}, red `0' in Colored MNIST) and vice versa a \textbf{bias-conflicting} sample $\overline{x}$ (\textit{e.g.}, green `0'). \begin{figure*} \caption{(a) Effective bias-Conflicting Scoring (ECS) helps identify real bias-conflicting samples in stage \uppercase\expandafter{\romannumeral1} \label{fig:intro_effect} \end{figure*} There are many similar scenarios in the real world. For example, an animal-centric image set may be biased by the habitats in the background, and a human-centric set may be biased by gender or racial information. Models blinded by biased datasets usually perform poorly in mismatched distributions (\textit{e.g.}, a red `8' may be incorrectly classified as `0' by the model trained on Colored MNIST). Worse, models with racial or gender bias, \textit{etc.} can cause severe negative social impacts. Furthermore, in most real-world problems, the bias information (both bias type and precise labels of bias attribute) is unknown, making debiasing more challenging. Therefore, combating unknown biases is urgently demanded when deploying AI systems in realistic applications. One major issue that leads to biased models is that the training objective (\textit{e.g.}, vanilla empirical risk minimization) can be accomplished through only unintended decision rules~\citep{sagawa2020investigation}. Accordingly, some studies~\citep{nam2020learning,kim2021learning} attempt to identify and emphasize the bias-conflicting samples. Nevertheless, we find that the debiasing effect is hampered by the low identification accuracy and the suboptimal emphasizing strategies. In this work, we build an enhanced two-stage debiasing scheme to combat unknown dataset biases. We present an Effective bias-Conflicting Scoring (ECS) function to mine bias-conflicting samples in stage \uppercase\expandafter{\romannumeral1}. On top of the off-the-shelf method, we propose a peer-picking mechanism to consciously pursue seriously biased auxiliary models and employ epoch-ensemble to obtain more accurate and stable scores. In stage \uppercase\expandafter{\romannumeral2}, we propose Gradient Alignment (GA), which balances the gradient contributions across the mined bias-aligned and bias-conflicting samples to prevent models from being biased. In order to achieve dynamic balance throughout optimization, the gradient information is served as an indicator to down-weight (up-weight) the mined bias-aligned (bias-conflicting) samples. Furthermore, to avoid the models relying solely on simple shortcuts to accomplish the learning objective, we introduce Self-Supervised (SS) pretext tasks in stage \uppercase\expandafter{\romannumeral2}, encouraging richer features to be considered when making decisions. Figure~\ref{fig:intro_effect} depicts the effects of ECS, GA, and SS. In comparison to other debiasing techniques, the proposed solution (i) does not rely on comprehensive bias annotations~\citep{tartaglione2021end,zhu2021learning,li2019repair,Sagawa*2020Distributionally,goel2021model,kim2019learning} or a pre-defined bias type~\citep{bahng2020learning,clark2019don,UtamaDebias2020,geirhos2018imagenet,wang2018learning}; (ii) does not require disentangled representations~\citep{tartaglione2021end,kim2021learning,kim2021biaswap,bahng2020learning}, which may fail in complex scenarios where disentangled features are hard to extract; (iii) does not introduce heavy data augmentations~\citep{geirhos2018imagenet,kim2021biaswap,kim2021learning,goel2021model}, avoiding additional training complexity such as in generative models; (iv) does not involve modification of model backbones~\citep{kim2021learning}, making it easy to be applied to other networks. (v) significantly improves the debiasing performance. The main contributions of this work are summarized as follows: \begin{enumerate}[(1)] \item To combat unknown dataset biases, we present an enhanced two-stage approach (illustrated in Figure~\ref{fig:intro}) in which an effective bias-conflicting scoring algorithm equipped with peer-picking and epoch-ensemble in stage \uppercase\expandafter{\romannumeral1} (in Section~\ref{sec:det}), and gradient alignment in stage \uppercase\expandafter{\romannumeral2} (in Section~\ref{sec:ga}) are proposed. \item In stage \uppercase\expandafter{\romannumeral2} (in Section~\ref{sec:ss}), we introduce self-supervised pretext tasks to demonstrate the ability of the unsupervised learning paradigm to alleviate bias in supervised learning. \item Broad experiments on commonly used datasets are conducted to compare several debiasing methods in a fair manner (overall, we train more than 700 models), among which the proposed method achieves state-of-the-art performance (in Section~\ref{sec:exp}). \item We undertake comprehensive analysis (in Section~\ref{sec:further_ana}), including the efficacy of each component, the solution's effectiveness in various scenarios, the sensitivity of the hyper-parameters, and so on. \end{enumerate} A preliminary version of this work has been accepted by a conference~\citep{zhao2023debias}, but we extend this work with the following additions: (i) we further introduce self-supervised pretext tasks to help the models leverage abundant features and investigate their effectiveness with extended experiments (in Section~\ref{sec:ss} and Section~\ref{sec:quan_com}); (ii) a more detailed description and analysis of the datasets and the compared methods are provided (in Section~\ref{sec:datasets} and Section~\ref{sec:com_methods}); (iii) we present and analyze the results measured on the bias-aligned and bias-conflicting test samples separately (in Section~\ref{sec:quan_com}); (iv) we include more detailed results, such as the performance of the last epoch (in Table~\ref{tab:last_comp}), the precision-recall curves of different bias-conflicting scoring strategies (in Figure~\ref{fig:pr_curves}), the precision and recall of our mined bias-conflicting samples (in Table~\ref{tab:complete_pr}), the final debiasing results of GA with different bias-conflicting scoring methods (in Table~\ref{tab:bga_diff_mining}); (v) the analysis and discussion are extended, such as the number of auxiliary biased models (in Section~\ref{sec:hyperparameters}), when there are only a few bias-conflicting samples (in Section~\ref{app:rho_analysis}), when the training data is unbiased (in Section~\ref{app:safe}), the connection to curriculum learning (in Section~\ref{app:curriculum}); (vi) the limitation and future work are further discussed (in Section~\ref{sec:discussion}). \section{Related work} \label{sec:related} \textbf{Combating biases with known types and labels.} Many debiasing approaches require explicit bias types and bias labels for each training sample. A large group of strategies aims at disentangling spurious and intrinsic features~\citep{moyer2018invariant}. For example, EnD~\citep{tartaglione2021end} designs regularizers to disentangle representations with the same bias label and entangle features with the same target label; BiasCon~\citep{hong2021unbiased} pulls samples with the same target label but different bias labels closer in the feature space based on contrastive learning; and some other studies learn disentangled representation by mutual information minimization~\citep{zhu2021learning,kim2019learning,ragonesi2021learning}. Another classic approach is to reweigh/resample training samples based on sample number or loss of different explicit groups~\citep{li2018resound,sagawa2020investigation,li2019repair}, or even to synthesize samples~\citep{agarwal2020towards}. Besides, \cite{Sagawa*2020Distributionally} and \cite{goel2021model} intend to improve the worst-group performance through group distributionally robust optimization~\citep{goh2010distributionally} and Cycle-GAN~\citep{zhu2017unpaired} based data augmentation, respectively. Furthermore, IRM~\citep{arjovsky2019invariant} is designed to learn a representation that performs well in all environments; domain-independent classifiers are introduced by~\cite{wang2020towards} to accomplish target tasks in each known bias situation. \begin{figure*} \caption{Our debiasing scheme. \textbf{Stage \uppercase\expandafter{\romannumeral1} \label{fig:intro} \end{figure*} \textbf{Combating biases with known types.} To alleviate expensive bias annotation costs, some bias-tailored methods relax the demands by requiring only the bias types~\citep{geirhos2018imagenet}. \cite{bahng2020learning} elaborately design specific networks based on the bias types to obtain biased representations on purpose (\textit{e.g.}, using 2D CNNs to extract static bias in action recognition). Then, the debiased representation is learned by encouraging it to be independent of the biased one. \cite{wang2018learning} try to project the model's representation onto the subspace orthogonal to the texture-biased representation. SoftCon~\citep{hong2021unbiased} serves as an extension of BiasCon to handle cases where only the bias type is available. In addition, the ensemble approach that consists of a bias-type customized biased model and a debiased model is employed in natural language processing as well~\citep{he2019unlearn,clark2019don,cadene2019rubi,UtamaDebias2020,clark2020learning}. \textbf{Combating unknown biases.} Despite the effectiveness of the methodologies described above, the assumptions limit their applications, as manually discovering bias types heavily relies on experts' knowledge and labeling bias attributes for each training sample is even more laborious. As a result, recent studies~\citep{le2020adversarial,kim2019multiaccuracy,hashimoto2018fairness} try to obtain debiased models with unknown biases, which are more realistic. \cite{nam2020learning} mine bias-conflicting samples with generalized cross entropy (GCE) loss~\citep{zhang2018generalized} and emphasize them by using a designed weight assignment function. \cite{kim2021learning} further synthesize diverse bias-conflicting samples via feature-level data augmentation, whereas \cite{kim2021biaswap} directly generate them with SwapAE~\citep{park2020swapping}. RNF~\citep{du2021fairness} uses the neutralized representations from samples with the same target label but different bias labels (generated by GCE-based biased models, the version that accesses real bias labels is called RNF-GT) to train the classification head alone. Besides GCE loss, feature clustering~\citep{sohoni2020no}, early-stopping~\citep{liu2021just}, forgettable examples~\citep{yaghoobzadeh2021increasing} and limited network capacity~\citep{sanh2020learning,Utama2020TowardsDN} are involved to identify bias-conflicting samples. Furthermore,~\cite{creager2021environment} and~\cite{lahoti2020fairness} alternatively infer dataset partitions and enhance domain-invariant feature learning by min-max adversarial training. In addition to the identify-emphasize paradigm,~\cite{pezeshki2020gradient} introduces a novel regularization method for decoupling feature learning dynamics in order to improve model robustness. \begin{algorithm*}[t] \caption{\textbf{E}ffective bias-\textbf{C}onflicting \textbf{S}coring (ECS)} \label{alg:det} \KwIn{$\mathcal{D}$=$\{(x^{i},y^{i})\}_{i=1}^N$; initial models $\dot{f}^0$, $\ddot{f}^0$ and b-c scores $\{s^i\gets 0 \}_{i=1}^N$; loss function $\ell$; threshold $\eta$.} \For{$t=0$ \KwTo $T-1$}{ $\mathcal{B} = \{(x^{j}, y^{j})\}_{j=1}^B \gets \text{FetchBatch}(\mathcal{D})$ \tcp{batch size $B$} $ \{ p( y^{j} \vert \dot{f}^{t}(x^{j}) ) \}, \ \{p( y^{j} \vert \ddot{f}^{t}(x^{j}) )\} \gets \text{Forward}(\mathcal{B}, \dot{f}^{t}, \ddot{f}^{t})$ \; $\dot{l}^t \gets 0; \quad \ddot{l}^t \gets 0;$ \tcp{initialize loss} \For{$j=1$ \KwTo $B$}{ \uIf{$p(y^{j} \vert \dot{f}(x^{j})) >\eta \enspace \text{and} \enspace p(y^{j} \vert \ddot{f}(x^{j})) >\eta$}{ $\dot{l}^t \mathrel{+}= \ell(\dot{f}^{t}(x^{j}), y^{j}); \quad \ddot{l}^t \mathrel{+}= \ell(\ddot{f}^{t}(x^{j}), y^{j})$ \; } \uElseIf{$p(y^{j} \vert \dot{f}(x^{j})) >\eta \enspace \text{and} \enspace p(y^{j} \vert \ddot{f}(x^{j})) \leq\eta$}{ $\dot{l}^t \mathrel{-}= \ell(\dot{f}^{t}(x^{j}), y^{j})$ \; } \ElseIf{$p(y^{j} \vert \dot{f}(x^{j})) \leq\eta \enspace \text{and} \enspace p(y^{j} \vert \ddot{f}(x^{j})) >\eta$}{ $\ddot{l}^t \mathrel{-}= \ell(\ddot{f}^{t}(x^{j}), y^{j})$ \; } } $\dot{f}^{t+1} \gets \text{Backward\&Update} (\dot{f}^{t}, \frac{\dot{l}^t}{B})$ ;\ \quad $\ddot{f}^{t+1} \gets \text{Backward\&Update} (\ddot{f}^{t}, \frac{\ddot{l}^t}{B})$ \; \If{$(t+1) \% T' =0$}{ \For{$i=1$ \KwTo $N$}{ $s^{i} \mathrel{+}= \frac{T'}{T} [1-\frac{p(y^{i} \vert \dot{f}^{t+1}(x^{i}))+p(y^{i} \vert \ddot{f}^{t+1}(x^{i}))}{2}]$ } } } \KwOut{the estimated b-c scores $\{s^i\}_{i=1}^N$.} \end{algorithm*} \textbf{Self-supervised learning.} In recent years, self-supervised learning has achieved significant success in vision tasks. For applications, self-supervised learning has been employed in object recognition/detection/segmentation~\citep{he2020momentum}, video tasks~\citep{tong2022videomae}, few-shot learning~\citep{gidaris2019boosting}, manipulation detection~\citep{zeng2022towards}, \textit{etc}. For pretext tasks in self-supervised training, position prediction~\citep{doersch2015unsupervised}, Jigsaw puzzles~\citep{noroozi2016unsupervised}, rotation prediction~\citep{gidaris2018unsupervised}, clustering~\citep{van2020scan,caron2020unsupervised}, contrastive learning~\citep{chen2020simple,he2020momentum}, mask and reconstruct~\citep{he2022masked}, \textit{etc.} are adopted to extract transferable representations from the unlabeled data. For the training data, besides learning on unlabeled data, self-supervised learning has also been utilized to pursue more general features with labeled~\citep{khosla2020supervised}, partial labeled~\citep{wang2022towards} or mixed data~\citep{zhai2019s4l}. \section{Methodology} \label{sec:method} The whole debiasing solution is illustrated in Figure~\ref{fig:intro}. We present peer-picking, epoch-ensemble for stage \uppercase\expandafter{\romannumeral1} (in Section~\ref{sec:det}), gradient alignment and self-supervised pretext tasks for stage \uppercase\expandafter{\romannumeral2} (in Section~\ref{sec:ga} and Section~\ref{sec:ss}, respectively). \subsection{Effective bias-conflicting scoring} \label{sec:det} Due to the explicit bias information is not available, we try to describe how likely input $x$ is a bias-conflicting sample via the \textbf{bias-conflicting (b-c) score}: $s(x,y)$ $\in$ [0,1], where $y \in \{1,2,\cdots,C\}$ stands for the target label. A larger $s(x,y)$ indicates that $x$ is harder to be recognized via unintended decision rules. As models are prone to fitting shortcuts, previous studies~\citep{kim2021biaswap,liu2021just} resort model's output probability on target class to define $s(x,y)$ as $ 1 - p(y \vert \dot{f}(x) ), $ where $ p(c\vert \dot{f}(x)) = \frac{e^{\dot{f}(x)[c]}}{\sum_{c'=1}^C e^{\dot{f}(x)[c']}} $, $\dot{f}$ is an auxiliary biased model and $\dot{f}(x)[c]$ denotes the $c^{\text{th}}$ index of logits $\dot{f}(x)$. Despite this, over-parameterized networks tend to ``memorize" all samples, resulting in low scores for the real bias-conflicting samples as well. To avoid it, we propose the following two strategies. The whole scoring framework is summarized in Algorithm~\ref{alg:det} (noting that the ``for'' loop is used for better clarification, which can be avoided in practice). \textbf{Training auxiliary biased models with peer-picking.} Deliberately amplifying the auxiliary model's bias seems to be a promising strategy for better scoring~\citep{nam2020learning}, as heavily biased models can assign high b-c scores to bias-conflicting samples. We achieve this by \textbf{confident-picking} --- only picking samples with confident predictions (which are more like bias-aligned samples) to update auxiliary models. Nonetheless, a few bias-conflicting samples can still be overfitted and the memorization will be strengthened with continuous training. Thus, with the assist of \textbf{peer model}, we propose \textbf{peer-picking}, a co-training-like~\citep{han2018co} paradigm, to train auxiliary biased models. Our method maintains two auxiliary biased models $\dot{f}$ and $\ddot{f}$ simultaneously (identical structure here). Considering a training set $\mathcal{D}$ = $\{(x^{i},y^{i})\}^N_{i=1}$ with $B$ samples in each batch, with a threshold $\eta \in (0,1)$, each model divides samples into confident and unconfident groups relying on the output probabilities on target classes. Consequently, four clusters are formed as shown in Figure~\ref{fig:intro}. For the red cluster ($\mathcal{O}_1$), since two models are confident on them, it is reasonable to believe that they are indeed bias-aligned samples, therefore we pick up them to update model via gradient descent as usual (Line 7,12 of Algorithm~\ref{alg:det}). While the gray cluster ($\mathcal{O}_2$), on which both two models are unconfident, will be discarded outright as they might be bias-conflicting samples. The remaining purple clusters ($\mathcal{O}_3$ and $\mathcal{O}_4$) indicate that some samples may be bias-conflicting, but they are memorized by one of auxiliary models. Inspired by the work for handling noisy labels~\citep{pmlr-v119-han20c}, we endeavor to force the corresponding model to forget the memorized suspicious samples via gradient ascent (Line 9,11,12). We average the output results of the two heavily biased models $\dot{f}$ and $\ddot{f}$ to obtain b-c scores (Line 15). \textbf{Collecting results with epoch-ensemble.} During the early stage of training, b-c scores $\{s^i\}$ ($s^i$:=$s(x^{i}, y^{i})$) of real bias-conflicting samples are usually higher than those of bias-aligned ones, while the scores may be indistinguishable at the end of training due to overfitting. Unfortunately, selecting an optimal moment for scoring is strenuous. To avoid tedious hyper-parameter tuning, we collect results every $T'$ iterations (typically every epoch in practice, \textit{i.e.}, $T'=\lfloor \frac{N}{B} \rfloor$) and adopt the ensemble averages of multiple results as the final b-c scores (Line 15). We find that the ensemble can alleviate the randomness of a specific checkpoint and achieve superior results without using tricks like early-stopping. \subsection{Gradients alignment} \label{sec:ga} Then, we attempt to train the debiased model $f$. We focus on an important precondition of the presence of biased models: the training objective can be achieved through unintended decision rules. To avoid it, one should develop a new learning objective that cannot be accomplished by these rules. The most straightforward inspiration is the use of plain reweighting (Rew) to intentionally rebalance sample contributions from different domains~\citep{sagawa2020investigation}: \begin{small} \begin{equation} \mathcal{L}_{Rew} = \sum_{i=1}^{\underline{N}} \frac{\overline{N}}{\gamma \cdot \underline{N}} \cdot \ell(f(\underline{x}^{i}), y^{i}) + \sum_{j=1}^{\overline{N}} \ell(f(\overline{x}^{j}), y^{j}), \label{eq:reweight} \end{equation} \end{small}where $\overline{N}$ and $\underline{N}$ are the number of bias-conflicting and bias-aligned samples respectively, $\gamma \in (0, \infty)$ is a reserved hyper-parameter to conveniently adjust the tendency: when $\gamma \rightarrow 0$, models intend to exploit bias-aligned samples more and when $\gamma \rightarrow \infty$, the behavior is reversed. As depicted in Figure~\ref{fig:acc_c_mnist}, assisted with Rew, unbiased accuracy skyrockets in the beginning, indicating that the model tends to learn intrinsic features in the first few epochs, while declines gradually, manifesting that the model is biased progressively (adjusting $\gamma$ can not reverse the tendency). The above results show that the static ratio between $\overline{N}$ and $\underline{N}$ is not a good indicator to show how balanced the training is, as the influence of samples can fluctuate during training. Accordingly, we are inspired to directly choose gradient statistics as a metric to indicate whether the training is overwhelmed by bias-aligned samples. Let us revisit the commonly used cross-entropy loss: \begin{equation} \ell(f(x), y) = -\sum_{c=1}^{C} \mathbb{I}_{c=y} \log p(c \vert f(x)). \end{equation} For a sample $(x,y)$, the gradient on logits $f(x)$ is given by \begin{equation} \begin{aligned} &\nabla_{f(x)} \ell(f(x), y) =\\ &[ \frac{\partial \ell(f(x), y)}{\partial f(x)[1]}, \frac{\partial \ell(f(x), y)}{\partial f(x)[2]}, \cdots, \frac{\partial \ell(f(x), y)}{\partial f(x)[C]} ]^{\mathsf{T}}. \end{aligned} \end{equation} We define the current gradient contribution of sample $(x,y)$ as \begin{equation} \begin{aligned} g(x,y \vert f) &= \parallel \nabla_{f(x)} \ell(f(x), y) \parallel_1\\ &=\sum_{c=1}^C \vert \frac{\partial \ell(f(x), y)}{\partial f(x)[c]} \vert\\ &= 2 \vert \frac{\partial \ell(f(x), y)}{\partial f(x)[y]} \vert = 2 - 2p(y \vert f(x)). \end{aligned} \end{equation} Assuming within the $t^{\text{th}}$ iteration ($t \in [0, T-1]$), the batch is composed of $\underline{B}^t$ bias-aligned and $\overline{B}^t$ bias-conflicting samples ($B$ in total, $\underline{B}^t \gg \overline{B}^t$ under our concerned circumstance). The accumulated gradient contributions generated by bias-aligned samples are denoted as \begin{equation} \underline{g}^t = \sum_{i=1}^{\underline{B}^t} g(\underline{x}^i,y^i \vert f^t), \end{equation} similarly for the contributions of bias-conflicting samples: $\overline{g}^t$. We present the statistics of $\{ \overline{g}^t \}_{t=0}^{T-1}$ and $\{ \underline{g}^t \}_{t=0}^{T-1}$ when learning with the standard ERM learning objective (Vanilla) and Equation~\eqref{eq:reweight} (Rew) respectively in Figure~\ref{fig:grad_c_mnist}. For vanilla training, we find the gradient contributions of bias-aligned samples overwhelm that of bias-conflicting samples at the beginning, thus the model becomes biased towards spurious correlations rapidly. Even though at the late stage, the gap in gradient contributions shrinks, it is hard to rectify the already biased model. For Rew, we find the contributions of bias-conflicting and bias-aligned samples are relatively close at the beginning (compared to those under Vanilla), thus both of them can be well learned. Nonetheless, the bias-conflicting samples are memorized soon due to their small quantity, and the gradient contributions from the bias-conflicting samples become smaller than that of the bias-aligned samples gradually, leading to biased models step by step. \begin{figure} \caption{Unbiased accuracy on Colored MNIST.} \label{fig:acc_c_mnist} \end{figure} \begin{figure} \caption{Statistics of $\{ \overline{g} \label{fig:grad_c_mnist} \end{figure} The above phenomena are well consistent with the accuracy curves in Figure~\ref{fig:acc_c_mnist}, indicating that the gradient statistics can be a useful ``barometer'' to reflect the optimization process. Therefore, the core idea of gradient alignment is to rebalance bias-aligned and bias-conflicting samples according to their currently produced gradient contributions. Within the $t^{\text{th}}$ iteration, We define the contribution ratio $r^t$ as: \begin{equation} r^t = \frac{ \overline{g}^t} {\gamma \cdot \underline{g}^t} = \frac{\sum_{j=1}^{ \overline{B}^t } [1 - p(y^j \vert f^{t}(\overline{x}^{j}) ) ]} {\gamma \cdot \sum_{i=1}^{\underline{B}^t} [1 - p(y^i \vert f^{t}(\underline{x}^{i}) ) ]}, \label{eq:r_ga} \end{equation} where $\gamma$ plays a similar role as in Rew. Then, with $r^t$, we rescale the gradient contributions derived from bias-aligned samples to achieve alignment with that from bias-conflicting ones, which can be simply implemented by reweighting the learning objective for the $t^{\text{th}}$ iteration: \begin{equation} \mathcal{L}_{GA}^t = \sum_{i=1}^{\underline{B}^t} r^t \cdot \ell(f^{t}(\underline{x}^{i}), y^{i}) + \sum_{j=1}^{\overline{B}^t} \ell(f^{t}(\overline{x}^{j}), y^{j}), \label{eq:GA} \end{equation} \textit{i.e.}, the modulation weight is adaptively calibrated in each iteration. As shown in Equation~\eqref{eq:r_ga} and~\eqref{eq:GA}, GA only needs negligible computational extra cost (1$\times$ forward and backward as usual, only increases the cost of computing $r^t$). As shown in Figure~\ref{fig:grad_c_mnist}, GA can dynamically balance the contributions throughout the whole training process. Correspondingly, it obtains optimal and stable predictions as demonstrated in Figure~\ref{fig:acc_c_mnist} and multiple other challenging datasets in Section~\ref{sec:exp}. Noting that as bias-conflicting samples are exceedingly scarce, it is unrealistic to ensure that every class can be sampled in one batch, thus all classes share the same ratio in our design. \begin{figure*} \caption{The illustrations of contrastive learning (left) and dense contrastive learning (right).} \label{fig:dcl} \end{figure*} To handle unknown biases, we simply utilize the estimated b-c score $\{s^i\}_{i=1}^N$ and a threshold $\tau$ to assign input $x$ as bias-conflicting ($s(x,y)$ $\geq$ $\tau$) or bias-aligned ($s(x,y)$ \textless $\tau$) here. For clarity, GA with the pseudo annotations (bias-conflicting or bias-aligned) produced by ECS will be denoted as `ECS+GA' (similarly, `ECS+$\triangle$' represents combining ECS with method $\triangle$). \subsection{Self-supervised pretext tasks} \label{sec:ss} The skewed feature representation is an important factor for the biased model. So, if we can help the model learn richer representation, the bias can be alleviated to some extent. Self-supervised learning has received a lot of attention and made significant progress in recent years, allowing the model to learn transferable feature representations based on various image regions. Inspired by the desiderata of debiasing and the ability of self-supervised learning, in this work, we investigate the efficacy of self-supervised learning on labeled data for debiasing. Specifically, we further exploit self-supervision as an auxiliary task in the debiased training scheme to pursue unbiased representations. The workflow is illustrated in Figure~\ref{fig:intro}. As examples, in this work, we employ the dense contrastive learning~\citep{wang2021dense} and the rotation prediction task~\citep{gidaris2018unsupervised} as the pretext tasks. We detail the two tasks below. Other advanced self-supervision techniques can be incorporated into the pipeline similarly. \begin{figure*} \caption{Training examples. The height of the cylinder reflects the number of samples, \textit{i.e.} \label{fig:eg_mnist_app} \label{fig:eg_cifar} \label{fig:eg_bird} \label{fig:eg_celeba} \label{fig:example_multi_bias} \label{fig:examples} \end{figure*} \textbf{Dense contrastive learning.} Contrastive learning has achieved considerable success in self-supervised learning, which encourages the features of the positive pair to be close while pushing the representations of the negative pair away. The positive pair is typically formed by the two augmentation views ($\mathcal{A}_0$ and $\mathcal{A}_1$, $\mathcal{A}_a$ stands for the augmentations, here random crop and horizontal flip are employed) of the same image. Following MoCo~\citep{he2020momentum}, contrastive learning can be considered as training an encoder for a dictionary look-up task as shown in Figure~\ref{fig:dcl} (left). For an encoded query $q$ (derived from $\mathcal{A}_0(x)$) and its positive key $k_p$ (derived from $\mathcal{A}_1(x)$), negative keys $\{k_{n_1}, k_{n_2}, \cdots\}$ (maintained in the queue), the contrastive learning loss is formed as: \begin{equation} \ell_{cl} = -\log \frac{e^{q \cdot k_p}}{e^{q \cdot k_p} + \sum_{i} e^{q \cdot k_{n_i}}}. \end{equation} We omit the temperature here for brevity. Commonly, the query and keys are encoded at the level of global feature. To compel the model to use richer features, we adopt a ``dense" version~\citep{wang2021dense} here which considers a dense pairwise contrastive learning task (at the level of local feature) instead of the global image classification. By replacing the global projection head with the dense projection head as depicted in Figure~\ref{fig:dcl} (right), we can obtain a $Z \times Z$ dense feature map. The $z^{\text{th}}$ query out of $Z^2$ encoded queries is denoted as $q^z$, its positive key is denoted as $k^z_p$ and a negative key is denoted as $k^z_{n_i}$, then the dense contrastive learning loss is formed as: \begin{equation} \ell_{dcl} = \frac{1}{Z^2} \sum_{z=1}^{Z^2} -\log \frac{e^{q^z \cdot k^z_p}}{e^{q^z \cdot k^z_p} + \sum_{i} e^{q^z \cdot k^z_{n_i}}}. \end{equation} The pair construction of dense contrastive learning follows~\cite{wang2021dense} and~\cite{he2020momentum}. The negative keys are the encoded local features stored in the queue. For the positive key, considering the two views' extracted feature maps before the projection head, by downsampling (average pooling) the pre-project features to also have the shape of $Z\times Z$, a similarity matrix with dimension ${Z^2\times Z^2}$ can be calculated. Assuming the $j^{\text{th}}$ pre-project feature vector from $\mathcal{A}_1(x)$ is most similar to the $i^{\text{th}}$ pre-project feature vector from $\mathcal{A}_0(x)$. Then for the features after the projection head, we can treat the corresponding $j^{\text{th}}$ post-project feature vector from $\mathcal{A}_1(x)$ as the positive key for the $i^{\text{th}}$ post-project feature vector from $\mathcal{A}_0(x)$. \textbf{Rotation prediction.} The main idea of image rotation prediction is to predict the rotation degree of the deliberately rotated input images, resulting a 4-class classification problem. The loss function for each sample is formulated by: \begin{equation} \ell_{rot} = \frac{1}{4} \sum_{a=0}^3 \ell(f_{rot}(\mathcal{A}_a (x)), a), \end{equation} here $\{\mathcal{A}_0, \mathcal{A}_1, \mathcal{A}_2, \mathcal{A}_3\}$ is the set of transformations with 4 rotation degrees $\{0^{\circ}, 90^{\circ}, 180^{\circ}, 270^{\circ}\}$, $\ell$ is the cross-entropy loss. \section{Experiments} \label{sec:exp} \subsection{Datasets} \label{sec:datasets} We mainly conduct experiments on five benchmark datasets. Some examples from the used datasets are exhibited in Figure~\ref{fig:examples}. For Colored MNIST (C-MNIST), the task is to recognize digits (0 - 9), in which the images of each target class are dyed by the corresponding color with probability $\rho \in \{95\%, 98\%, 99\%, 99.5\%\}$ and by other colors with probability $1-\rho$ (a higher $\rho$ indicates more severe biases). Similarly, for Corrupted CIFAR10, each object class in it holds a spurious correlation with a corruption type. Two sets of corruption protocols are utilized, leading to two biased datasets~\citep{nam2020learning}: Corrupted CIFAR10$^1$ and CIFAR10$^2$ (C-CIFAR10$^1$, C-CIFAR10$^2$) with $\rho \in \{95\%, 98\%, 99\%, 99.5\%\}$. Following previous work~\citep{nam2020learning}, Corrupted CIFAR10$^1$ is constructed with corruption types \{\textit{Snow, Frost, Fog, Brightness, Contrast, Spatter, Elastic, JPEG, Pixelate, Saturate}\}; Corrupted CIFAR10$^2$ is constructed with corruption types \{\textit{GaussianNoise, ShotNoise, ImpulseNoise, SpeckleNoise, GaussianBlur, DefocusBlur, GlassBlur, MotionBlur, ZoomBlur, Original}\}. In Biased Waterbirds (B-Birds)\footnote{The data is available at \url{https://nlp.stanford.edu/data/dro/waterbird_complete95_forest2water2.tar.gz}.} which is a composite dataset that superimposes foreground bird images from CUB~\citep{welinder2010caltech} onto background environment images from Places~\citep{zhou2017places}, ``waterbirds'' and ``landbirds'' are highly correlated with ``wet'' and ``dry'' habitats (95\% bias-aligned samples, \textit{i.e.}, $\rho=95\%$). Consequently, the task aiming to distinguish images as ``waterbird" or ``landbird" can be influenced by background. In Biased CelebA (B-CelebA) which is established for face recognition where each image contains multiple attributes~\citep{liu2015deep}\footnote{The data is available at \url{http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html}.}, blond hair is predominantly found in women, whereas non-blond hair mostly appears in men ($\rho=99\%$). When the goal is to classify the hair color as ``blond" or ``non-blond", the information of gender (``male'' or ``female'' in this dataset) can be served as a shortcut~\citep{nam2020learning}. To focus on the debiasing problem, we balance the number of images per target class in B-Birds and B-CelebA. \subsection{Compared methods} \label{sec:com_methods} We choose various methods for comparison: standard ERM (Vanilla), Focal loss~\citep{lin2017focal}, plain reweighting~\citep{sagawa2020investigation} (Rew and ECS+Rew), REBIAS~\citep{bahng2020learning}, BiasCon~\citep{hong2021unbiased}, RNF-GT~\citep{du2021fairness}, GEORGE~\citep{sohoni2020no}, LfF~\citep{nam2020learning}, DFA~\citep{kim2021learning}, SD~\citep{pezeshki2020gradient}, ERew~\citep{clark2019don} and PoE~\citep{clark2019don} (ECS+ERew and ECS+PoE)\footnote{As the auxiliary biased models used in ERew and PoE are designed for NLP tasks, here, we combine our ECS with them.}. Among them, REBIAS requires bias types; Rew, BiasCon, RNF-GT, and GA are performed with real bias-conflicting or bias-aligned annotations\footnote{As stated in the original papers, BiasCon and RNF-GT have variations that do not require real annotations assist with various auxiliary biased models. We only provide the upper bound of these strategies when combating unknown biases, as we found that the auxiliary models have a significant impact on the outcomes.}. We present a brief analysis of these debiasing approaches as follows based on technique categories. \textbf{Reweighting-based strategies}. Rew is a straightforward static reweighting strategy based on the number of samples per group. Both LfF and ERew reassign sample weights assisted with a biased model but differ in weight assignment functions. ERew is also a static reweighting approach that employs output scores of a pre-trained biased model as the weight indicator. LfF applies dynamic weight adjustments during training. LfF and ERew just reweight a sample with the information from itself, whereas Rew uses global information within one minibatch to obtain sample weight\footnote{LfF is implemented at~\url{https://github.com/alinlab/LfF} and ERew is implemented at~\url{https://github.com/UKPLab/emnlp2020-debiasing-unknown}.}. \textbf{Feature disentanglement}. REBIAS designs specific networks according to the bias type for obtaining biased representations intentionally (for our experiments, we employ CNNs with smaller receptive fields for capturing texture bias according to the original paper). Then the debiased representation is learned by encouraging it to be independent of the biased one, during which Hilbert-Schmidt Independence Criterion (HSIC) is employed to measure the degree of independence between the two representations. Building on LfF, DFA further introduces disentangled representations to augment bias-conflicting samples at the feature level. The methods try to explicitly extract disentangled feature representations, which is difficult to be achieved in complex datasets and tasks. BiasCon directly uses contrastive learning to pull the same target class but different bias class sample pairs closer than the other pairs\footnote{ REBIAS is implemented at~\url{https://github.com/clovaai/rebias}. DFA is implemented at~\url{https://github.com/kakaoenterprise/Learning-Debiased-Disentangled}. BiasCon is implemented at~\url{https://github.com/grayhong/bias-contrastive-learning}. }. \textbf{Distributionally robust optimization (DRO)}. Many previous studies resort to DRO to achieve model fairness. GEORGE performs clustering based on the feature representations of the auxiliary biased models first and then expects to obtain fair models by using DRO with the pseudo groups. However, due to overfitting, we find that clustering with features generated from vanilla biased models is not robust and accurate, resulting in substantially inferior performance when performing DRO using the imprecise clusters\footnote{ We adopt the clustering methods utilized in GEORGE referring to~\url{https://github.com/HazyResearch/hidden-stratification}. }. \textbf{Ensemble approaches}. Product-of-Experts (PoE) is widely adopted in NLP-related debiasing tasks, which tries to train a debiased model in an ensemble manner with an auxiliary biased model, by combining the softmax outputs produced from the biased and debiased models\footnote{The method is implemented at~\url{https://github.com/UKPLab/emnlp2020-debiasing-unknown}. }. \textbf{Regularization methods}. In addition, SD directly replaces the common $l_2$ regularization with an $l_2$ penalty on the model's logits. The optimal strength of the regularization term can be hard to search, which may be very different for various datasets and tasks\footnote{It is implemented at~\url{https://github.com/mohammadpz/Gradient_Starvation}. }. \subsection{Evaluation metrics} Following~\cite{nam2020learning}, we mainly report the overall unbiased accuracy, alongside the accuracy of bias-aligned and bias-conflicting test samples individually. For experiments on Colored MNIST, Corrupted CIFAR10$^1$ and CIFAR10$^2$, we evaluate models on the unbiased test sets in which the bias attributes are independent of the target labels. For Biased Waterbirds and CelebA, to evaluate unbiased accuracy with the official test sets which are biased and imbalanced, the accuracies of each (\textit{target}, \textit{bias}) group are calculated separately and then averaged to generate the overall accuracy~\citep{nam2020learning}. We also show the fairness performance in terms of DP and EqOdd~\citep{reddy2021benchmarking}. For the definitions of DP and EqOdd, following~\citet{reddy2021benchmarking}, let $x$, $y$, $b$, $y'$ denote the input, target label, the bias label, and the model's prediction respectively, Demographic Parity (DP) is defined as $1 - \vert p(y'=1 \vert b=1) - p(y'=1 \vert b=0) \vert$; Equality of Opportunity \textit{w.r.t} $y=1$ (EqOpp1) is defined as $1 - \vert p(y' = 1 \vert y = 1, b = 0) - p(y' = 1 \vert y = 1, b = 1) \vert$ and Equality of Opportunity \textit{w.r.t} $y = 0$ (EqOpp0) is defined as $1 - \vert p(y' = 1 \vert y = 0, b = 0) - p(y' = 1 \vert y = 0, b = 1) \vert$, Equality of Odds (EqOdd) is defined as $0.5 \times$(EqOpp0 + EqOpp1). \subsection{Implementation} The studies for the previous debiasing approaches are usually conducted with varying network architectures and training schedules. We run the representative methods with identical configurations to make fair comparisons. We use an MLP with three hidden layers (each hidden layer comprises 100 hidden units) for C-MNIST, except for the biased models in REBIAS (using CNN). ResNet-20~\citep{he2016deep} is employed for C-CIFAR10$^1$ and C-CIFAR10$^2$. ResNet-18 is utilized for B-Birds and B-CelebA. We implement all methods with PyTorch~\citep{paszke2019pytorch} and run them on a Tesla V100 GPU. For experiments on Colored MNIST, we use Adam optimizer to train models for 200 epochs with learning rate 0.001, batch size 256, without any data augmentation techniques. For Corrupted CIFAR10$^1$ and CIFAR10$^2$, models are trained for 200 epochs with Adam optimizer, learning rate 0.001, batch size 256, image augmentation including only random crop and horizontal flip. For Biased Waterbirds and CelebA, models are trained from imagenet pre-trained weights (Pytorch torchvision version) for 100 epochs with Adam optimizer, learning rate 0.0001, batch size 256, and horizontal flip augmentation technique. Dense contrastive learning is utilized on B-Birds and rotation prediction is employed on C-CIFAR10$^1$ and C-CIFAR10$^2$ (as we find dense prediction is not suitable for images with very small resolution). The code and README are provided in the supplementary material. \begin{sidewaystable*}[htbp] \centering \begin{minipage}{0.99\textheight} \caption{Overall unbiased accuracy (\%) and standard deviation over three runs. Best results with unknown biases are shown in bold. $^\dag$ indicates that the method requires prior knowledge regarding bias.} \resizebox{\textwidth}{!}{ \begin{tabular}{l | c c c c | c c c c | c c c c | c | c} \toprule & \multicolumn{4}{c|}{Colored MNIST} & \multicolumn{4}{c|}{Corrupted CIFAR10$^1$} & \multicolumn{4}{c|}{Corrupted CIFAR10$^2$} & B-Birds & B-CelebA \\ \multicolumn{1}{c|}{$\rho$} & 95\% & 98\% & 99\% & 99.5\% & 95\% & 98\% & 99\% & 99.5\% & 95\% & 98\% & 99\% & 99.5\% & 95\% & 99\% \\ \midrule Vanilla & 85.7$_{\pm 0.1}$ & 73.6$_{\pm 0.5}$ & 60.7$_{\pm 0.6}$ & 45.4$_{\pm 0.8}$ & 44.9$_{\pm 1.0}$ & 30.4$_{\pm 1.0}$ & 22.4$_{\pm 0.8}$ & 17.9$_{\pm 0.9}$ & 42.7$_{\pm 0.9}$ & 27.2$_{\pm 0.6}$ & 20.6$_{\pm 0.5}$ & 17.4$_{\pm 0.8}$ & 77.1$_{\pm 1.5}$ & 77.4$_{\pm 1.6}$\\ Focal & 86.7$_{\pm 0.2}$ & 75.8$_{\pm 0.6}$ & 62.4$_{\pm 0.3}$ & 45.9$_{\pm 0.9}$ & 45.5$_{\pm 1.0}$ & 30.7$_{\pm 1.1}$ & 22.9$_{\pm 1.1}$ & 17.8$_{\pm 0.5}$ & 41.9$_{\pm 0.5}$ & 26.9$_{\pm 0.5}$ & 21.0$_{\pm 0.6}$ & 17.0$_{\pm 0.2}$ & 78.6$_{\pm 0.7}$ & 78.1$_{\pm 1.0}$ \\ GEORGE & 87.0$_{\pm 0.5}$ & 76.2$_{\pm 0.9}$ & 62.4$_{\pm 0.6}$ & 46.4$_{\pm 0.2}$ & 44.6$_{\pm 1.0}$ & 29.5$_{\pm 1.0}$ & 21.8$_{\pm 0.3}$ & 17.9$_{\pm 0.6}$ & 44.2$_{\pm 1.9}$ & 27.3$_{\pm 1.6}$ & 20.7$_{\pm 1.2}$ & 17.7$_{\pm 0.3}$ & 79.3$_{\pm 0.9}$ & 78.2$_{\pm 0.9}$ \\ LfF & 88.2$_{\pm 0.9}$ & 86.7$_{\pm 0.6}$ & 80.3$_{\pm 1.2}$ & 73.2$_{\pm 0.9}$ & 59.6$_{\pm 0.8}$ & 50.4$_{\pm 0.5}$ & 42.9$_{\pm 2.8}$ & 34.6$_{\pm 2.3}$ & 58.5$_{\pm 0.8}$ & 49.0$_{\pm 0.4}$ & 42.2$_{\pm 1.1}$ & 33.4$_{\pm 1.2}$ & 80.4$_{\pm 1.1}$ & 84.4$_{\pm 1.5}$ \\ DFA & 89.8$_{\pm 0.2}$ & 86.9$_{\pm 0.4}$ & 81.8$_{\pm 1.1}$ & 74.1$_{\pm 0.8}$ & 58.2$_{\pm 1.8}$ & 50.0$_{\pm 2.3}$ & 41.8$_{\pm 4.7}$ & 35.6$_{\pm 4.6}$ & 58.6$_{\pm 0.2}$ & 48.7$_{\pm 0.6}$ & 41.5$_{\pm 2.2}$ & 35.2$_{\pm 1.9}$ &79.5$_{\pm 0.7}$ & 84.3$_{\pm 0.6}$ \\ SD & 86.7$_{\pm 0.3}$ & 73.9$_{\pm 0.2}$ & 59.7$_{\pm 0.5}$ & 42.4$_{\pm 1.1}$ & 43.1$_{\pm 0.5}$ & 28.6$_{\pm 1.5}$ & 21.6$_{\pm 0.9}$ & 17.7$_{\pm 0.6}$ & 41.4$_{\pm 0.3}$ & 27.0$_{\pm 0.8}$ & 20.0$_{\pm 0.2}$ & 17.5$_{\pm 0.3}$ & 76.8$_{\pm 1.3}$ & 77.8$_{\pm 1.1}$ \\ ECS+Rew & 91.8$_{\pm 0.2}$ & 88.6$_{\pm 0.7}$ & 84.2$_{\pm 0.3}$ & 78.9$_{\pm 0.9}$ & 58.5$_{\pm 0.0}$ & 47.5$_{\pm 0.6}$ & 38.6$_{\pm 1.1}$ & 33.4$_{\pm 1.2}$ & 61.4$_{\pm 0.7}$ & 53.2$_{\pm 0.3}$ & 47.4$_{\pm 1.2}$ & 40.3$_{\pm 0.6}$ & 82.7$_{\pm 0.7}$ & 88.3$_{\pm 0.4}$ \\ ECS+ERew & 91.0$_{\pm 0.2}$ & 87.5$_{\pm 0.2}$ & 81.4$_{\pm 0.9}$ & 71.3$_{\pm 2.2}$ & 59.8$_{\pm 0.5}$ & 47.9$_{\pm 1.0}$ & 38.5$_{\pm 0.2}$ & 30.2$_{\pm 1.3}$ & 62.2$_{\pm 0.5}$ & 51.1$_{\pm 0.2}$ & 41.4$_{\pm 0.9}$ & 25.9$_{\pm 1.6}$ & 84.9$_{\pm 0.9}$ & 80.5$_{\pm 0.6}$ \\ ECS+PoE & 80.2$_{\pm 1.5}$ & 75.4$_{\pm 1.4}$ & 64.4$_{\pm 2.7}$ & 50.0$_{\pm 3.0}$ & 54.4$_{\pm 0.2}$ & 48.7$_{\pm 1.3}$ & \textbf{45.6}$_{\pm 1.3}$ & \textbf{42.7}$_{\pm 0.8}$ & 47.9$_{\pm 0.8}$ & 40.3$_{\pm 1.3}$ & 36.8$_{\pm 2.5}$ & \textbf{42.4}$_{\pm 2.3}$ & 85.8$_{\pm 0.6}$ & 81.1$_{\pm 0.1}$ \\ ECS+GA & \textbf{92.1}$_{\pm 0.1}$ & \textbf{89.5}$_{\pm 0.4}$ & \textbf{86.4}$_{\pm 0.5}$ & \textbf{79.9}$_{\pm 0.8}$ & \textbf{61.0}$_{\pm 0.1}$ & \textbf{51.7}$_{\pm 0.5}$ & 42.6$_{\pm 0.7}$ & 35.0$_{\pm 0.5}$ & \textbf{64.1}$_{\pm 0.3}$ & \textbf{57.0}$_{\pm 0.6}$ & \textbf{50.0}$_{\pm 1.5}$ & 41.8$_{\pm 0.8}$ & \textbf{86.1}$_{\pm 0.5}$ & \textbf{89.5}$_{\pm 0.5}$\\ \hline \hline $^\dag$REBIAS & 85.5$_{\pm 0.6}$ & 74.0$_{\pm 0.7}$ & 61.1$_{\pm 0.8}$ & 44.5$_{\pm 0.4}$ & 44.8$_{\pm 0.3}$ & 29.9$_{\pm 0.7}$ & 22.4$_{\pm 1.1}$ & 17.7$_{\pm 0.3}$ & 41.5$_{\pm 1.0}$ & 27.0$_{\pm 0.6}$ & 20.6$_{\pm 0.6}$ & 17.9$_{\pm 0.3}$ & 77.5$_{\pm 0.6}$ & 78.1$_{\pm 1.2}$ \\ $^\dag$Rew & 91.5$_{\pm 0.0}$ & 87.9$_{\pm 0.4}$ & 83.8$_{\pm 0.6}$ & 77.6$_{\pm 0.7}$ & 59.1$_{\pm 0.2}$ & 48.9$_{\pm 0.8}$ & 40.4$_{\pm 0.4}$ & 33.4$_{\pm 1.4}$ & 61.1$_{\pm 0.2}$ & 53.1$_{\pm 0.8}$ & 46.9$_{\pm 1.1}$ & 41.2$_{\pm 0.6}$ & 86.0$_{\pm 0.4}$ & 90.7$_{\pm 0.4}$ \\ $^\dag$RNF-GT & 84.3$_{\pm 4.1}$ & 75.9$_{\pm 3.6}$ & 66.3$_{\pm 8.2}$ & 59.1$_{\pm 5.7}$ & 52.1$_{\pm 0.7}$ & 39.1$_{\pm 1.2}$ & 30.6$_{\pm 1.3}$ & 22.2$_{\pm 0.4}$ & 50.3$_{\pm 1.0}$ & 34.9$_{\pm 0.5}$ & 27.9$_{\pm 0.6}$ & 19.8$_{\pm 0.4}$ & 81.2$_{\pm 1.3}$ & 85.1$_{\pm 2.7}$ \\ $^\dag$BiasCon & 90.9$_{\pm 0.1}$ & 86.7$_{\pm 0.1}$ & 83.0$_{\pm 0.0}$ & 79.0$_{\pm 1.5}$ & 59.0$_{\pm 0.6}$ & 48.6$_{\pm 0.6}$ & 39.0$_{\pm 0.4}$ & 32.4$_{\pm 0.3}$ & 60.0$_{\pm 0.3}$ & 49.9$_{\pm 0.3}$ & 43.0$_{\pm 0.4}$ & 37.4$_{\pm 0.8}$ & 84.1$_{\pm 0.6}$ & 90.4$_{\pm 1.2}$ \\ $^\dag$GA & 92.4$_{\pm 0.3}$ & 89.1$_{\pm 0.2}$ & 85.7$_{\pm 0.4}$ & 80.4$_{\pm 0.5}$ & 61.5$_{\pm 0.8}$ & 52.9$_{\pm 0.3}$ & 43.5$_{\pm 1.6}$ & 33.9$_{\pm 0.8}$ & 64.5$_{\pm 0.2}$ & 56.9$_{\pm 0.2}$ & 51.1$_{\pm 0.3}$ & 43.6$_{\pm 0.8}$ & 87.9$_{\pm 0.5}$ & 92.3$_{\pm 0.2}$ \\ \bottomrule \end{tabular} } \label{tab:overall_acc} \end{minipage} \\ \begin{minipage}{0.99\textheight} \caption{Overall unbiased accuracy and standard deviation of the last epoch over 3 runs (\%). Best results with unknown biases are in bold. $^\dag$ indicates that they require prior knowledge regarding biases.} \resizebox{\textwidth}{!}{ \begin{tabular}{l | c c c c | c c c c | c c c c | c | c} \toprule & \multicolumn{4}{c|}{Colored MNIST} & \multicolumn{4}{c|}{Corrupted CIFAR10$^1$} & \multicolumn{4}{c|}{Corrupted CIFAR10$^2$} & B-Birds & B-CelebA \\ \multicolumn{1}{c|}{$\rho$} & 95\% & 98\% & 99\% & 99.5\% & 95\% & 98\% & 99\% & 99.5\% & 95\% & 98\% & 99\% & 99.5\% & 95\% & 99\% \\ \midrule Vanilla & 85.3$_{\pm 0.2}$ & 73.5$_{\pm 0.6}$ & 59.5$_{\pm 0.6}$ & 43.2$_{\pm 1.0}$ & 42.6$_{\pm 0.4}$ & 27.7$_{\pm 1.0}$ & 19.8$_{\pm 1.0}$ & 15.6$_{\pm 0.8}$ & 39.3$_{\pm 0.6}$ & 25.3$_{\pm 1.3}$ & 18.5$_{\pm 0.5}$ & 14.2$_{\pm 0.3}$ & 75.7$_{\pm 0.8}$ & 71.0$_{\pm 1.0}$\\ Focal & 86.7$_{\pm 0.2}$ & 75.2$_{\pm 0.4}$ & 61.7$_{\pm 0.8}$ & 44.2$_{\pm 0.7}$ & 43.6$_{\pm 1.4}$ & 27.6$_{\pm 2.0}$ & 20.9$_{\pm 0.9}$ & 14.8$_{\pm 0.7}$ & 39.2$_{\pm 1.0}$ & 25.3$_{\pm 1.4}$ & 18.2$_{\pm 0.1}$ & 14.5$_{\pm 0.2}$ & 76.4$_{\pm 0.3}$ & 71.8$_{\pm 0.8}$ \\ GEORGE & 86.7$_{\pm 0.2}$ & 74.3$_{\pm 0.8}$ & 60.1$_{\pm 0.9}$ & 43.8$_{\pm 1.2}$ & 41.3$_{\pm 1.7}$ & 27.3$_{\pm 0.3}$ & 19.1$_{\pm 0.1}$ & 14.7$_{\pm 1.0}$ & 41.4$_{\pm 1.1}$ & 25.4$_{\pm 2.4}$ & 18.5$_{\pm 1.6}$ & 14.8$_{\pm 0.9}$ & 76.3$_{\pm 0.3}$ & 70.5$_{\pm 0.4}$ \\ LfF & 78.0$_{\pm 1.8}$ & 75.1$_{\pm 0.3}$ & 68.8$_{\pm 3.1}$ & 67.8$_{\pm 1.5}$ & 56.7$_{\pm 2.1}$ & 49.4$_{\pm 0.7}$ & 39.8$_{\pm 1.9}$ & 32.1$_{\pm 2.0}$ & 57.8$_{\pm 0.8}$ & 47.3$_{\pm 0.2}$ & 40.5$_{\pm 1.4}$ & 31.3$_{\pm 1.0}$ & 76.9$_{\pm 2.0}$ & 61.0$_{\pm 1.2}$ \\ DFA & 83.6$_{\pm 0.9}$ & 81.2$_{\pm 1.9}$ & 76.0$_{\pm 3.2}$ & 65.7$_{\pm 0.8}$ & 54.8$_{\pm 0.5}$ & 47.9$_{\pm 1.8}$ & 39.5$_{\pm 5.1}$ & 33.4$_{\pm 4.0}$ & 56.5$_{\pm 0.9}$ & 46.0$_{\pm 1.2}$ & 39.1$_{\pm 2.7}$ & 33.5$_{\pm 2.6}$ &74.5$_{\pm 1.1}$ & 73.2$_{\pm 3.7}$ \\ SD & 86.3$_{\pm 0.3}$ & 73.6$_{\pm 0.3}$ & 58.4$_{\pm 0.2}$ & 39.9$_{\pm 0.9}$ & 40.5$_{\pm 0.7}$ & 25.0$_{\pm 0.3}$ & 19.3$_{\pm 1.2}$ & 14.9$_{\pm 0.2}$ & 38.5$_{\pm 0.5}$ & 23.3$_{\pm 0.2}$ & 17.8$_{\pm 0.9}$ & 14.5$_{\pm 0.1}$ & 76.0$_{\pm 0.9}$ & 70.6$_{\pm 0.4}$ \\ ECS+Rew & 87.7$_{\pm 0.2}$ & 79.6$_{\pm 0.2}$ & 67.4$_{\pm 0.7}$ & 58.3$_{\pm 6.0}$ & 56.0$_{\pm 0.4}$ & 42.6$_{\pm 1.1}$ & 34.2$_{\pm 0.5}$ & 29.4$_{\pm 1.8}$ & 56.6$_{\pm 0.4}$ & 48.8$_{\pm 1.1}$ & 39.5$_{\pm 2.8}$ & 31.1$_{\pm 1.4}$ & 77.2$_{\pm 0.6}$ & 80.5$_{\pm 5.9}$ \\ ECS+ERew & 88.8$_{\pm 0.2}$ & 81.7$_{\pm 2.6}$ & 68.5$_{\pm 3.6}$ & 51.4$_{\pm 1.5}$ & 57.2$_{\pm 1.2}$ & 44.1$_{\pm 0.3}$ & 34.6$_{\pm 1.2}$ & 24.0$_{\pm 0.6}$ & 59.7$_{\pm 0.6}$ & 45.6$_{\pm 1.0}$ & 35.2$_{\pm 2.6}$ & 20.8$_{\pm 0.7}$ & 80.8$_{\pm 2.0}$ & 74.9$_{\pm 0.4}$ \\ ECS+PoE & 80.1$_{\pm 1.5}$ & 75.0$_{\pm 1.5}$ & 64.1$_{\pm 2.5}$ & 49.1$_{\pm 2.3}$ & 52.8$_{\pm 0.6}$ & 45.3$_{\pm 2.2}$ & 40.4$_{\pm 1.0}$ & \textbf{39.4}$_{\pm 0.9}$ & 46.7$_{\pm 1.5}$ & 37.9$_{\pm 0.3}$ & 35.7$_{\pm 1.6}$ & 39.6$_{\pm 1.8}$ & 83.2$_{\pm 2.2}$ & 74.5$_{\pm 0.1}$ \\ ECS+GA & \textbf{91.8}$_{\pm 0.2}$ & \textbf{88.9}$_{\pm 1.1}$ & \textbf{84.6}$_{\pm 0.7}$ & \textbf{78.7}$_{\pm 0.5}$ & \textbf{59.6}$_{\pm 0.5}$ & \textbf{50.8}$_{\pm 1.0}$ & \textbf{40.9}$_{\pm 0.3}$ & 34.3$_{\pm 0.4}$ & \textbf{62.2}$_{\pm 0.6}$ & \textbf{55.4}$_{\pm 2.6}$ & \textbf{49.3}$_{\pm 1.2}$ & \textbf{40.5}$_{\pm 0.5}$ & \textbf{85.5}$_{\pm 0.9}$ & \textbf{87.4}$_{\pm 1.8}$\\ \hline \hline $^\dag$REBIAS & 85.3$_{\pm 0.5}$ & 73.4$_{\pm 0.3}$ & 60.8$_{\pm 0.9}$ & 42.8$_{\pm 0.5}$ & 42.8$_{\pm 1.1}$ & 28.5$_{\pm 1.0}$ & 20.2$_{\pm 0.6}$ & 14.9$_{\pm 1.3}$ & 38.9$_{\pm 1.0}$ & 23.8$_{\pm 1.1}$ & 18.3$_{\pm 1.0}$ & 14.3$_{\pm 0.3}$ & 75.5$_{\pm 0.4}$ & 70.4$_{\pm 0.3}$ \\ $^\dag$Rew & 88.1$_{\pm 0.4}$ & 79.2$_{\pm 0.2}$ & 65.5$_{\pm 0.9}$ & 51.8$_{\pm 1.2}$ & 55.9$_{\pm 0.3}$ & 45.3$_{\pm 1.8}$ & 34.7$_{\pm 0.5}$ & 28.5$_{\pm 1.9}$ & 56.6$_{\pm 1.0}$ & 47.5$_{\pm 1.8}$ & 40.3$_{\pm 0.6}$ & 32.7$_{\pm 2.1}$ & 78.8$_{\pm 0.5}$ & 82.8$_{\pm 5.1}$ \\ $^\dag$RNF-GT & 83.0$_{\pm 3.8}$ & 73.8$_{\pm 3.3}$ & 63.8$_{\pm 1.4}$ & 48.8$_{\pm 1.7}$ & 50.7$_{\pm 0.7}$ & 36.1$_{\pm 1.0}$ & 27.6$_{\pm 1.8}$ & 21.9$_{\pm 0.5}$ & 47.5$_{\pm 0.8}$ & 32.8$_{\pm 1.0}$ & 25.4$_{\pm 0.7}$ & 19.4$_{\pm 0.6}$ & 79.0$_{\pm 1.2}$ & 74.6$_{\pm 2.9}$ \\ $^\dag$BiasCon & 88.0$_{\pm 0.7}$ & 79.4$_{\pm 0.7}$ & 70.9$_{\pm 0.9}$ & 56.3$_{\pm 0.9}$ & 56.2$_{\pm 0.8}$ & 40.9$_{\pm 0.3}$ & 31.6$_{\pm 0.8}$ & 27.1$_{\pm 0.6}$ & 56.3$_{\pm 0.8}$ & 42.0$_{\pm 1.1}$ & 33.4$_{\pm 1.1}$ & 28.3$_{\pm 0.2}$ & 78.5$_{\pm 1.1}$ & 75.2$_{\pm 1.2}$ \\ $^\dag$GA & 92.1$_{\pm 0.4}$ & 88.6$_{\pm 0.3}$ & 84.4$_{\pm 0.4}$ & 77.7$_{\pm 1.3}$ & 59.1$_{\pm 1.3}$ & 49.9$_{\pm 1.6}$ & 41.8$_{\pm 2.2}$ & 32.8$_{\pm 1.0}$ & 62.8$_{\pm 1.0}$ & 55.8$_{\pm 0.3}$ & 50.1$_{\pm 0.8}$ & 42.6$_{\pm 0.9}$ & 87.7$_{\pm 0.5}$ & 91.8$_{\pm 0.4}$ \\ \bottomrule \end{tabular} } \label{tab:last_comp} \end{minipage} \end{sidewaystable*} \subsection{Main results} \label{sec:quan_com} We present the main experimental results in this section. Due to the self-supervised pretext tasks will increase the training cost (but no inference latency), we split the comparison into two parts: without the self-supervised pretext tasks (in Section~\ref{sec:res_wo_ss}) and with them (in Section~\ref{sec:res_w_ss}). \subsubsection{Without self-supervision} \label{sec:res_wo_ss} \textbf{The proposed method achieves better performance than others.} The overall unbiased accuracy is reported in Table~\ref{tab:overall_acc}. Vanilla models commonly fail to produce acceptable results on unbiased test sets, and the phenomenon is aggravated as $\rho$ goes larger. Different debiasing methods moderate bias propagation with varying degrees of capability. When compared to other SOTA methods, the proposed approach achieves competitive results on C-CIFAR10$^1$ and noticeable improvements on other datasets across most values of $\rho$. For instance, the vanilla model trained on C-CIFAR10$^2$ ($\rho=99\%$) only achieves 20.6\% unbiased accuracy, indicating that the model is heavily biased. While, ECS+GA leads to 50.0\% accuracy, and exceeds other prevailing debiasing methods by 3\% - 30\%. When applied to the real-world dataset B-CelebA, the proposed scheme also shows superior results, demonstrating that it can effectively deal with subtle actual biases. Though the main purpose of this work is to combat unknown biases, we find GA also achieves better performance compared to the corresponding competitors when the prior information is available. \begin{figure*} \caption{Bias-aligned accuracy (horizontal-axis) and bias-conflicting accuracy (vertical-axis).} \label{fig:acc_aligned_conflicting} \end{figure*} We provide the accuracy measured on the bias-aligned and bias-conflicting test samples separately in Figure~\ref{fig:acc_aligned_conflicting}. We find ECS+GA can achieve high bias-conflicting accuracy as well as bias-aligned accuracy mostly, leading to superior overall unbiased performance. Note that, too high bias-aligned accuracy is not always good. Though the vanilla model can obtain a very high illusory bias-aligned accuracy assisted with biases, it does not learn intrinsic features as shown in Figure~\ref{fig:cam_supp1}, leading to extremely pool out-of-distribution generalization. As an instance, although the vanilla model trained on Corrupted CIFAR10$^2$ ($\rho=99\%$) achieves high bias-aligned accuracy (99.6\%), the value (99.6\%) actually reflects the model's ability to discriminate bias attribute rather than target attribute. In fact, when training on an unbiased training set ($\rho=10\%$), the corresponding accuracy is only 79.5\%, reflecting that the real target task is harder to learn than the spurious one. \textbf{Plain reweighting is an important baseline.} We find Rew (and ECS+Rew) can achieve surprising results compared with recent state-of-the-art methods, while it is overlooked by some studies. The results also indicate that explicitly balancing the bias-aligned and bias-conflicting samples is extremely important and effective. \begin{table}[t] \centering \caption{Performance in terms of DP and EqOdd on Biased Waterbirds (left) and Biased CelebA (right).} \setlength\tabcolsep{3pt} \resizebox{0.999\columnwidth}{!}{ \begin{tabular}{l|cc|cc} \toprule & DP $\uparrow$ & EqOdd $\uparrow$ & DP $\uparrow$ & EqOdd $\uparrow$\\ \midrule Vanilla & 0.57$_{\pm 0.01}$ & 0.57$_{\pm 0.01}$ & 0.43$_{\pm 0.01}$ & 0.43$_{\pm 0.02}$ \\ LfF & 0.63$_{\pm 0.03}$ & 0.61$_{\pm 0.03}$ & \textbf{0.80}$_{\pm 0.06}$ & 0.76$_{\pm 0.07}$ \\ DFA & 0.55$_{\pm 0.01}$ & 0.55$_{\pm 0.02}$ & 0.69$_{\pm 0.01}$ & 0.76$_{\pm 0.06}$ \\ ECS+Rew & 0.61$_{\pm 0.02}$ & 0.60$_{\pm 0.01}$ & 0.59$_{\pm 0.01}$ & 0.68$_{\pm 0.18}$ \\ ECS+GA & \textbf{0.99}$_{\pm 0.01}$ & \textbf{0.99}$_{\pm 0.01}$ & 0.73$_{\pm 0.02}$ & \textbf{0.91}$_{\pm 0.02}$ \\ \bottomrule \end{tabular} } \label{tab:fairness} \end{table} \begin{table*}[t] \centering \caption{The effectiveness of self-supervision. The overall unbiased accuracy and standard deviation of the last epoch over 3 runs (\%) are reported. Best results with unknown biases are in bold. $^\dag$ indicates that they require prior knowledge regarding biases.} \resizebox{0.97\textwidth}{!}{ \begin{tabular}{l|cccc|cccc|c} \toprule & \multicolumn{4}{c|}{Corrupted CIFAR10$^1$} & \multicolumn{4}{c|}{Corrupted CIFAR10$^2$} & B-Birds \\ \multicolumn{1}{c|}{$\rho$} & 95\% & 98\% & 99\% & 99.5\% & 95\% & 98\% & 99\% & 99.5\% & 95\% \\ \midrule Vanilla & 42.6$_{\pm0.4}$ & 27.7$_{\pm1.0}$ & 19.8$_{\pm1.0}$ & 15.6$_{\pm0.8}$ & 39.3$_{\pm0.6}$ & 25.3$_{\pm1.3}$ & 18.5$_{\pm0.5}$ & 14.2$_{\pm0.3}$ & 75.7$_{\pm0.8}$ \\ \quad+SS & 51.8$_{\pm0.8}$ & 38.5$_{\pm1.2}$ & 29.8$_{\pm0.9}$ & 23.7$_{\pm0.5}$ & 47.9$_{\pm1.4}$ & 34.5$_{\pm1.0}$ & 26.4$_{\pm0.5}$ & 20.2$_{\pm0.2}$ & 80.8$_{\pm0.6}$ \\ ECS+GA & 59.6$_{\pm0.5}$ & 50.8$_{\pm1.0}$ & 40.9$_{\pm0.3}$ & 34.3$_{\pm0.4}$ & 62.2$_{\pm0.6}$ & 55.4$_{\pm2.6}$ & 49.3$_{\pm1.2}$ & 40.5$_{\pm0.5}$ & 85.5$_{\pm0.9}$ \\ \quad+SS & \textbf{63.5}$_{\pm0.6}$ & \textbf{55.7}$_{\pm2.2}$ & \textbf{50.1}$_{\pm0.6}$ & \textbf{44.5}$_{\pm0.4}$ & \textbf{65.1}$_{\pm1.9}$ & \textbf{59.0}$_{\pm1.2}$ & \textbf{55.0}$_{\pm1.1}$ & \textbf{48.6}$_{\pm0.8}$ & \textbf{86.9}$_{\pm0.3}$ \\ \hline \hline $^\dag$GA & 59.1$_{\pm1.3}$ & 49.9$_{\pm1.6}$ & 41.8$_{\pm2.2}$ & 32.8$_{\pm1.0}$ & 62.8$_{\pm1.0}$ & 55.8$_{\pm0.3}$ & 50.1$_{\pm0.8}$ & 42.6$_{\pm0.9}$ & 87.7$_{\pm0.5}$ \\ \quad+SS & 63.6$_{\pm0.6}$ & 57.0$_{\pm0.5}$ & 52.2$_{\pm1.2}$ & 45.6$_{\pm1.1}$ & 64.7$_{\pm0.9}$ & 59.9$_{\pm0.5}$ & 56.5$_{\pm0.4}$ & 50.6$_{\pm0.7}$ & 89.5$_{\pm0.0}$ \\ \bottomrule \end{tabular} } \label{tab:res_ss} \end{table*} \textbf{Early-stopping is not necessary for GA to select models.} Plain reweighting requires strong regularizations such as early-stopping to produce satisfactory results~\citep{byrd2019effect,Sagawa*2020Distributionally}, implying that the results are not stable. Due to the nature of combating unknown biases, the unbiased validation set is not available, thus recent studies choose to report the best results among epochs~\citep{nam2020learning,kim2021learning} for convenient comparison. We follow this evaluation protocol in Table~\ref{tab:overall_acc}. However, in the absence of prior knowledge, deciding when to stop can be troublesome, thus some results in Table~\ref{tab:overall_acc} are excessively optimistic. We claim that if the network can attain dynamic balance throughout the training phase, such early-stopping may not be necessary. We further provide the last epoch results in Table~\ref{tab:last_comp} to validate it. We find that some methods suffer from serious performance degradation. On the contrary, GA achieves steady results (with the same and fair training configurations). In other words, our method shows superiority under two model selection strategies simultaneously. \textbf{The proposed method has strong performance on fairness metrics as well.} As shown in Table~\ref{tab:fairness}, the proposed method also obtains significant improvement in terms of DP and EqOdd. These results further demonstrate that the proposed method is capable of balancing bias-aligned and bias-conflicting samples, as well as producing superior and impartial results. \subsubsection{With self-supervision} \label{sec:res_w_ss} \textbf{Self-supervision improves vanilla training.} As shown in Table~\ref{tab:res_ss}, the self-supervised pretext tasks achieve obvious improvement over vanilla training, demonstrating the effectiveness of self-supervision in the context of debiasing. \textbf{Self-supervision also promotes advanced debiasing methods.} As shown in Table~\ref{tab:res_ss}, the self-supervised pretext tasks also lead to significant gains on the basis of different debiasing methods and on a variety of datasets. When the training is heavily biased, the improvement is very significant, \textit{e.g.}, 10.2\% and 8.1\% gains on C-CIFAR10$^1$ and C-CIFAR10$^2$ ($\rho = 99.5\%$) beyond our method ECS+GA, respectively. Due to the low diversity of the bias-conflicting samples within the severely biased training data, the gain of ECS+GA may be limited, but self-supervision helps the model discover more general characteristics from the adequate bias-aligned examples. \section{Further analysis} \label{sec:further_ana} \subsection{ECS shows superior ability to mine bias-conflicting samples} We separately verify the effectiveness of each component of ECS on C-MNIST ($\rho=98\%$) and B-CelebA. A good bias-conflicting scoring method should prompt superior precision-recall curves for the mined bias-conflicting samples, \textit{i.e.}, give real bias-conflicting (aligned) samples high (low) scores. Therefore, we provide the average precision (AP) in Table~\ref{tab:variants} (P-R curves are illustrated in Figure~\ref{fig:pr_curves}). When comparing \#0, \#4, \#5, and \#6, we observe that epoch-ensemble, confident-picking and peer model all can improve the scoring method. In addition, as shown in Table~\ref{tab:overall_acc}, ECS+GA achieves results similar to GA with the help of ECS; ERew, PoE, and Rew combined with ECS also successfully alleviate biases to some extent, demonstrating that the proposed ECS is feasible, robust, and can be adopted in a variety of debiasing approaches. \begin{table*}[t] \centering \captionof{table}{Average precision (\%) of the mined bias-conflicting samples. VM: scoring with vanilla model.} \resizebox{0.85\textwidth}{!}{ \begin{tabular}{l|l|c c c|c c} \toprule & & Epoch-Ensemble & Confident-Picking & Peer Model & C-MNIST & B-CelebA \\ \midrule \#0 & VM & & & & 27.0 & 13.3 \\ \#1 & ES (in JTT) & & & & 45.6 & 47.9 \\ \#2 & GCE (in LfF) & & & & 37.0 & 27.8 \\ \#3 & GCE + EE & \checkmark & & & 89.3 & 52.1 \\ \hline \hline \#4 & ECS (Ours) & \checkmark & & & 53.8 & 46.5 \\ \#5 & ECS (Ours) & \checkmark & \checkmark & & 95.0 & 61.5 \\ \#6 & ECS (Ours) & \checkmark & \checkmark & \checkmark & \textbf{98.8} & \textbf{67.6} \\ \bottomrule \end{tabular} } \label{tab:variants} \end{table*} We further compare the methods: \#1 collecting results with early-stopping (ES) in JTT~\citep{liu2021just}, \#2 training auxiliary biased model with GCE loss in LfF (and \#3 collecting results with epoch-ensemble on top of it). When comparing \#1 and \#4, both early-stopping and epoch-ensemble can reduce the overfitting to bias-conflicting samples when training biased models, yielding more accurate scoring results. However, early-stopping is laborious to tune~\citep{liu2021just}, whereas epoch-ensemble is more straightforward and robust. From \#2 and \#3, we see that epoch-ensemble can also enhance other strategies. Comparing \#3 and \#5, GCE loss is helpful, while confident-picking gains better results. Noting that though co-training with peer model raises some costs, it is not computationally complex and can yield significant benefits (\#6), and even without peer model, \#5 still outperform previous ways. Peer models are expected to better prevent bias-conflicting samples from affecting the training, so we can get better auxiliary biased models. Though the only difference between peer models is initialization in our experiments, as DNNs are highly nonconvex, different initializations can lead to different local optimal~\citep{han2018co}. We provide the visualizations of the predictions of peer models (during training) in Figure~\ref{fig:peer_vis}. \begin{figure} \caption{Precision-recall curves of different bias-conflicting scoring methods on Colored MNIST ($\rho=98\%$, left) and Biased CelebA (right).} \label{fig:pr_curves} \end{figure} \begin{figure} \caption{Visualizations of the predictions of peer models (during training).} \label{fig:peer_vis} \end{figure} \begin{table}[t] \centering \caption{Results (precision, recall, and unbiased accuracy) of GA combined with different bias-conflicting scoring methods.} \setlength\tabcolsep{3pt} \resizebox{0.49\textwidth}{!}{ \begin{tabular}{ c | c c c | c c c} \toprule & \multicolumn{3}{ c|}{Colored MNIST ($\rho=98\%$)} & \multicolumn{3}{ c}{Biased CelebA} \\ & P (\%) & R (\%) & Acc (\%) & P (\%) & R (\%) & Acc (\%) \\ \midrule \#0+GA & 90.9 & 0.8 & 49.2 & 77.8 & 1.5 & 78.9 \\ \#1+GA & 96.4 & 2.2 & 58.9 & 79.2 & 20.6 & 86.2 \\ \#2+GA & 98.6 & 23.4 & 76.6 & 79.6 & 16.9 & 84.2 \\ \#3+GA & 98.9 & 45.7 & 86.3 & 79.3 & 27.3 & 86.2 \\ \#4+GA & 98.5 & 5.5 & 62.7 & 79.8 & 16.2 & 82.8 \\ \#5+GA & 99.8 & 67.9 & 88.9 & 79.2 & 39.6 & 87.9 \\ \#6+GA & 99.9 & \textbf{84.8} & \textbf{89.5} & 79.1 & \textbf{50.0} & \textbf{89.1} \\ \hline \hline Vanilla & - & - & 73.6 & - & - & 77.4 \\ \bottomrule \end{tabular} } \label{tab:bga_diff_mining} \end{table} We also provide the results of GA combined with the above bias-conflicting scoring variants in Table~\ref{tab:bga_diff_mining} (for fairness, all methods are compared under a similar precision), which show all the proposed components contribute to a more robust model in stage \uppercase\expandafter{\romannumeral2}. Finally, we provide the precision and recall of our mined bias-conflicting samples with the help of ECS and the typical value of $\tau$ (0.8) in Table~\ref{tab:complete_pr}. \begin{table}[t] \centering \caption{Precision and recall (\%) of the mined bias-conflicting samples with ECS.} \begin{tabular}{llcc} \toprule & $\rho$ & Precision & Recall \\ \midrule & 95\% & 99.5 & 70.0 \\ Colored & 98\% & 99.9 & 84.8 \\ MNIST & 99\% & 99.8 & 89.3 \\ & 99.5\% & 99.6 & 92.7 \\ \midrule & 95\% & 96.3 & 92.4 \\ Corrupted & 98\% & 92.3 & 94.2 \\ CIFAR10$^1$ & 99\% & 87.6 & 93.2 \\ & 99.5\% & 76.0 & 94.8 \\ \midrule & 95\% & 99.2 & 94.2 \\ Corrupted & 98\% & 98.3 & 94.9 \\ CIFAR10$^2$ & 99\% & 97.3 & 95.0 \\ & 99.5\% & 93.3 & 94.8 \\ \midrule B-Birds & 95\% & 77.7 & 65.2 \\ \midrule B-CelebA & 99\% & 79.1 & 50.0 \\ \bottomrule \end{tabular} \label{tab:complete_pr} \end{table} \begin{figure*} \caption{Ablation on thresholds $\eta$, $\tau$ and balance ratio $\gamma$.} \label{fig:vary_param} \end{figure*} \subsection{Differences between GA and its counterparts} Focal loss, LfF, DFA, and ERew just reweight a sample with the information from itself (individual information), different from them, GA, as well as Rew, use global information within one batch to obtain modulation weight. Correspondingly, the methods based on individual sample information can not maintain the contribution balance between bias-aligned and bias-conflicting samples, which is crucial for this problem as presented in Section~\ref{sec:quan_com}. Different from the static rebalance method Rew, we propose a dynamic rebalance training strategy with aligned gradient contributions throughout the learning process, which enforces models to dive into intrinsic features instead of spurious correlations. Learning with GA, as demonstrated in Figure~\ref{fig:acc_c_mnist} and Table~\ref{tab:last_comp}, produces improved results with no degradation. The impact of GA on the learning trajectory presented in Figure~\ref{fig:grad_c_mnist} also shows that GA can schedule the optimization processes and take full advantage of the potential of different samples. Besides, unlike the methods for class imbalance~\citep{cui2019class,tan2021equalization,zhao2020maintaining}, we try to rebalance the contributions of implicit groups rather than explicit categories. \subsection{The sensitivity of the introduced hyper-parameters} \label{sec:hyperparameters} Though the hyper-parameters are critical for methods aimed at combating unknown biases, recent studies~\citep{nam2020learning,kim2021learning} did not include an analysis for them. Here, we present the ablation studies on C-MNIST ($\rho=98\%$) for the hyper-parameters ($\eta$, $\tau$, $\gamma$) in our method as shown in Figure~\ref{fig:vary_param}. We find that the method performs well under a wide range of hyper-parameters. Specifically, for the confidence threshold $\eta$ in ECS, when $\eta$ $\rightarrow$ 0, most samples will be used to train the auxiliary biased models, including the bias-conflicting ones, resulting in low b-c scores for bias-conflicting samples (\textit{i.e.}, low recall of the mined bias-conflicting samples); when $\eta$ $\rightarrow$ 1, most samples will be discarded, including the relative hard but bias-aligned ones, leading to high b-c scores for bias-aligned samples (\textit{i.e.}, low precision). The determination of $\eta$ is related to the number of categories and the difficulty of tasks, \textit{e.g.}, 0.5 for C-MNIST, 0.1 for C-CIFAR10$^1$ and C-CIFAR10$^2$ (10-class classification tasks), 0.9 for B-Birds and B-CelebA (2-class) here. As depicted in Figure~\ref{fig:vary_param}, ECS achieves consistent strong mining performance around the empirical value of $\eta$. We also investigate ECS+GA with varying $\tau$. High precision of the mined bias-conflicting samples guarantees that GA can work in stage \uppercase\expandafter{\romannumeral2}, and high recall further increases the diversity of the emphasized samples. Thus, to ensure the precision first, $\tau$ is typically set to 0.8 for all experiments. From Figure~\ref{fig:vary_param}, ECS+GA is insensitive to $\tau$ around the empirical value, however, a too high or too low value can cause low recall or low precision, resulting in inferior performance finally. For the balance ratio $\gamma$, though the results are reported with $\gamma$ = 1.6 for all settings on C-MNIST, C-CIFAR10$^1$ and C-CIFAR10$^2$, 1.0 for B-Birds and B-CelebA, the proposed method is not sensitive to $\gamma$ $\in$ $[1.0, 2.0]$, which is reasonable as $\gamma$ in such region makes the contributions from bias-conflicting samples close to that from bias-aligned samples. \begin{table}[t] \centering \caption{Average precision of the mined bias-conflicting samples on Colored MNIST ($\rho=98\%$).} \begin{tabular}{cccc} \toprule \#auxiliary biased models & 1 & 2 & 4 \\ \midrule AP (\%) & 95.0 & 98.8 & 99.0\\ \bottomrule \end{tabular} \label{tab:num_biased_models} \end{table} We further add an ablation on the number of auxiliary models in Table~\ref{tab:num_biased_models}, showing more auxiliary biased models ($>2$) can get slightly better results. However, more auxiliary models will raise costs simultaneously, so we choose to use two auxiliary models in our design. \begin{table}[t] \centering \caption{Accuracies (\%) on four test groups of Multi-Color MNIST. `$\infty$' states the reported results from DebiAN. The first line of the header \textit{w.r.t.} left color bias, the second one \textit{w.r.t.} right color bias.} \setlength\tabcolsep{2pt} \resizebox{\columnwidth}{!}{ \begin{tabular}{lccccc} \toprule & aligned & aligned & conflicting & conflicting & \multirow{2}[1]{*}{Avg.} \\ & aligned & conflicting & aligned & conflicting & \\ \midrule LfF$^{\infty}$ & 99.6 & 4.7 & \textbf{98.6} & 5.1 & 52.0 \\ PGI$^{\infty}$ & 98.6 & 82.6 & 26.6 & 9.5 & 54.3 \\ EIIL$^{\infty}$ & \textbf{100.0} & \textbf{97.2} & 70.8 & 10.9 & 69.7 \\ DebiAN$^{\infty}$ & \textbf{100.0} & 95.6 & 76.5 & 16.0 & 72.0 \\ \midrule ECS+GA & \textbf{100.0} & 89.7 & 96.1 & \textbf{24.3} & \textbf{77.5} \\ \bottomrule \end{tabular} } \label{tab:multi_bias} \end{table} \begin{table}[t] \centering \caption{Unbiased Accuracy (\%) on Colored MNIST with few bias-conflicting samples.} \begin{tabular}{cccc} \toprule $\rho$ & 99.7\% & 99.9\% & 99.95\% \\ \#bias-conflicting samples & 180 & 60 & 30 \\ \midrule Vanilla & 32.8 & 18.3 & 14.1 \\ Rew & 56.0 & 27.3 & 22.9 \\ GA & \textbf{68.0} & \textbf{60.0} & \textbf{53.9} \\ \bottomrule \end{tabular} \label{tab:rho_to1} \end{table} \subsection{When there are multiple biases} Most debiasing studies~\citep{nam2020learning,kim2021learning} only discussed single bias. However, there may be multiple biases, which are more difficult to analyze. To study the multiple biases, we further adopt the Multi-Color MNIST dataset following~\cite{Li_2022_ECCV} which holds two bias attributes: left color ($\rho=99\%$) and right color ($\rho=95\%$), see examples in Figure~\ref{fig:examples}. In such training sets, though it seems more intricate to group a sample as bias-aligned or bias-conflicting (as a sample can be aligned or conflicting \textit{w.r.t.} left color bias or right color bias separately), we still simply train debiased models with GA based on the b-c scores obtained via ECS. We evaluate ECS+GA on four test groups separately and present them in Table~\ref{tab:multi_bias}. We find the proposed method also can manage the multi-bias situation. \begin{table}[t] \centering \caption{Accuracy (\%) on the unbiased training data (Colored MNIST, $\rho=10\%$).} \begin{tabular}{ccc} \toprule Vanilla & LfF & ECS+GA \\ \midrule \textbf{97.8} & 95.1 & 96.8 \\ \bottomrule \end{tabular} \label{tab:bal_train} \end{table} \begin{table}[t] \centering \caption{Performing GA with ordered learning on Colored MNIST ($\rho=98\%$). Here, ``Easy'' or ``Hard'' means only using bias-aligned or bias-conflicting training samples to update model.} \resizebox{\columnwidth}{!}{ \begin{tabular}{cc|c} \toprule 1-100 epochs & 101-200 epochs & Unbiased Accuracy (\%) \\ \midrule Vanilla & Vanilla & 73.6 \\ Easy & GA & 80.9 \\ Hard & GA & 87.5 \\ GA & GA & \textbf{89.1} \\ \bottomrule \end{tabular} } \label{tab:cu_learning} \end{table} \subsection{When there are only a few bias-conflicting samples} \label{app:rho_analysis} If the collected training set is completely biased (\textit{i.e.}, $\rho=100\%$), GA is not applicable. So, we want to know how GA performs when there are only a few bias-conflicting samples (\textit{i.e.}, $\rho \rightarrow 100\%$). The results are provided in Table~\ref{tab:rho_to1}, from which we find GA can achieve noticeable improvement even with few bias-conflicting samples. \subsection{When training data is unbiased} \label{app:safe} It is important that the debiasing method is safe, \textit{i.e.}, can achieve comparable results to Vanilla when the training data is unbiased. We conduct experiments on Colored MNIST with $\rho=10\%$ (\textit{i.e.}, an unbiased training set) and the results are shown in Table~\ref{tab:bal_train}. From which, our method degrades slightly and still surpasses the debiasing method LfF. Actually, under an unbiased training set, our method tends to regard hard samples as bias-conflicting in stage \uppercase\expandafter{\romannumeral1}, and emphasize them in stage \uppercase\expandafter{\romannumeral2}. \begin{figure*} \caption{Visualized activate maps of different models (last epoch) on Biased Waterbirds.} \label{fig:cam_supp1} \end{figure*} \subsection{Connection to curriculum learning} \label{app:curriculum} Curriculum learning claims that using easy samples first can be superior, on the contrary, anti-curriculum learning argues that employing hard samples first is useful in some situations. We investigate the strategies of ordered learning in the context of debiasing. As presented in Table~\ref{tab:cu_learning}, both learning easy and hard samples first lead to inferior results than performing GA throughout the training process, showing that it is important to achieve a balance between bias-conflicting and bias-aligned samples in the whole learning stage. \subsection{Visualization results} We visualize the activation maps via CAM~\citep{zhou2016learning} in Figure~\ref{fig:cam_supp1}. Vanilla models usually activate regions related to biases when making predictions, \textit{e.g.}, the background in B-Birds. LfF and ECS+PoE can focus attention on key areas in some situations, but there are still some deviations. Meanwhile, the proposed ECS+GA and ECS+GA+SS mostly utilizes compact essential features to make decisions. \section{Limitation and future work} \label{sec:discussion} Despite the achieved promising results, the debiasing method can be further improved in some aspects. First, our method and many previous approaches (such as LfF, DFA, BiasCon, RNF-GT \textit{etc.}) are based on the assumption that there exist bias-conflicting samples in the training set. Although the assumption is in line with most actual situations, it should be noted that there are some cases where the collected training sets are completely biased (\textit{i.e.}, $\rho=100\%$), in which these methods are not applicable. For these cases, we should pay attention to methods that aim to directly prevent models from only pursuing easier features, such as SD. Second, though the proposed ECS achieves significant improvement when compared with previous designs, we find that the bias-conflicting sample mining is not trivial, especially in complex datasets. The precision and recall achieved by our method on Biased Waterbirds and CelebA are still significantly lower than that on simple datasets like Colored MNIST and Corrupted CIFAR10 as shown in Table~\ref{tab:complete_pr}. For extreme cases, if the bias-conflicting scoring system fails, then the effect of GA can be influenced. Therefore, a better bias-conflicting scoring method is helpful and worth continuing to explore. \section{Conclusions} \label{sec:dis} Biased models can cause poor out-of-distribution performance and even negative social impacts. In this paper, we focus on combating unknown biases which is urgently required in realistic applications, and propose an enhanced two-stage debiasing method. In the first stage, an effective bias-conflicting scoring approach containing peer-picking and epoch-ensemble is proposed. Then we derive a new learning objective with the idea of gradient alignment in the second stage, which dynamically balances the gradient contributions from the mined bias-conflicting and bias-aligned samples throughout the learning process. We also incorporate self-supervision into the second stage, assisting in the extraction of features. Extensive experiments on synthetic and real-world datasets reveal that the proposed solution outperforms previous methods. \backmatter \bmhead{Acknowledgments} This work is supported in part by the National Natural Science Foundation of China under Grant 62171248, the R\&D Program of Shenzhen under Grant JCYJ20220818101012025, the PCNL KEY project (PCL2021A07), and Shenzhen Science and Technology Innovation Commission (Research Center for Computer Network (Shenzhen) Ministry of Education). \end{document}
\begin{document} \title{{f Paraconformal structures, ordinary differential equations and totally geodesic manifolds} \begin{abstract} We construct point invariants of ordinary differential equations that generalise the Cartan invariants of equations of order two and three. The vanishing of the invariants is equivalent to the existence of a totally geodesic paraconformal structure which consist of a paraconformal structure, an adapted $GL(2,\mathbb{R})$-connection and a two-parameter family of totally geodesic hypersurfaces on the solution space. The structures coincide with the projective structures in dimension 2 and with the Einstein-Weyl structures of Lorentzian signature in dimension 3. We show that the totally geodesic paraconformal structures in higher dimensions can be described by a natural analogue of the Hitchin twistor construction. We present a general example of Veronese webs which correspond to the hyper-CR Einstein-Weyl structures in dimension 3. The Veronese webs are described by a hierarchy of integrable systems. \end{abstract} \section{Introduction} A paraconformal structure, or a $GL(2,\mathbb{R})$-structure, on a manifold $M$ is a smooth field of rational normal curves in the tangent bundle $TM$. The structures have been investigated since the seminal paper of Bryant \cite{B} who has related the geometry of four-dimensional $GL(2,\mathbb{R})$-structures to the contact geometry of ordinary differential equations (ODEs) of order four and consequently constructed examples of spaces with exotic holonomies. The result of Bryant can be seen as a generalisation of the paper of Chern \cite{Ch} who has proved that the conformal Lorentzian metrics on three-dimensional manifolds can be obtained from ODEs of order three (see also \cite{FKN}). The higher dimensional cases has been treated by many authors, for example in \cite{Db,DT,GN,N}. It is proved that the solution space of an ODE has a canonical paraconformal structure if and only if the W\"unschmann invariants vanish. In the present paper we consider paraconformal structures admitting the following additional structure: an adapted connection $\nabla$ and a 2-parameter family of hyper-surfaces totally geodesic with respect to $\nabla$. The structures will be referred to as the \emph{totally geodesic paraconformal structures}. The structures are very well known in low dimensions. Indeed, in dimension 2 the structures coincide with the projective structures \cite{BDE} and in dimension 3 the structures coincide with the Einstein-Weyl structures of Lorentzian signature \cite{D,T}. An unified approach to the projective structures on a plane and to the three-dimensional Einstein-Weyl structures was given in the complex setting by Hitchin \cite{H} in terms of a twistor construction. Much earlier, it was proved by E.~Cartan that in both cases the geometry is related to the point geometry of ODEs \cite{C1,C2}. The solution space of an ODE of order 2 or 3, respectively, has a canonical projective structure or an Einstein-Weyl structure, respectively, if and only if the Cartan invariant or the Cartan and the W\"unschmann invariants, respectively, vanish. The Cartan invariant of the second order ODEs has already been known to Tresse \cite{Kr,Tr}. Our first aim in the present paper is to provide an unified approach to the Cartan invariants of second and third order ODEs given up to point transformations and generalise them to higher order ODEs. The second aim is to analyse the geometry of the totally geodesic paraconformal structures. Finally we consider a general example based on special families of foliations, called Veronese webs, introduced by Gelfand and Zakharevich \cite{GZ} in connection to bi-Hamiltonian structures on odd dimensional manifolds. Our first new result is Theorem \ref{thm1b} that gives a characterisation of those paraconformal structures that can be constructed from ODEs. This result concerns the contact geometry of ODEs and, in a sense, completes results of \cite{Db,DT,GN}. Sections \ref{sec_ODEcon}-\ref{sec_gen} concern point geometry of ODEs and are the core of the paper. In particular Theorems \ref{thm_ord2} and \ref{thm_ord3} provide new approach to the Cartan invariants of ODEs of order 2 and 3 and give new, more simple, formulae for the invariants. Theorems \ref{thm_ord4} and \ref{thm_gen} generalise the Cartan invariants to higher dimensions. Section \ref{sec_twistor} is devoted to a natural generalisation of the Hitchin twistor construction. The Hitchin construction involves a two-dimensional manifold and a curve with a normal bundle $O(1)$ or $O(2)$. Clearly one can consider curves with normal bundles $O(k)$, $k>2$. We argue that so-obtained structures correspond to higher-dimensional totally geodesic paraconformal structures. This should be compared to \cite{MP} where the authors are interested in torsion-free connections. On contrary, generic totally geodesic paraconformal structures considered in the present paper have non-trivial torsion. In the Hitchin's paper there is no construction of invariants on the side of ODEs. We concentrate on this issue in the present paper and our invariants characterise those equations for which the solutions are curves with self intersection number $k$. Section \ref{sec_ricci} is devoted to the Ricci curvature tensor of a totally geodesic paraconformal connection. We prove that the symmetric part of the Ricci curvature tensor is a section of the bundle of symmetric 2-tensors annihilating all null directions of the structure. In dimension 3 the condition is equivalent to the Einstein-Weyl equation. The last Section of the paper is devoted to Veronese webs. We show that any Veronese web defines a totally geodesic paraconformal structure such that the associated twistor space fibres over $\mathbb{R} P^1$. In particular, Veronese webs in dimension 3 give an alternative description of the hyper-CR Einstein-Weyl structures \cite{D,DK1,DK2}. We prove that in the general case the Veronese webs, or equivalently the totally geodesic paraconformal structures such that the corresponding twistor space fibres over $\mathbb{R} P^1$, are in a one to one correspondence with the solutions to the system \begin{equation}\label{eq_int_sys} (a_i-a_j)\partial_0w\partial_i\partial_jw+ a_j\partial_iw\partial_j\partial_0w- a_i\partial_jw\partial _i\partial_0w=0,\qquad i,j=1,\ldots,k, \end{equation} where $a_i$ are distinct constants and $w\colon\mathbb{R}^{k+1}\to\mathbb{R}$. In this way we give a geometric meaning to the hierarchy of integrable systems introduced in \cite{DK1}. Another applications of our results to the Veronese webs include: a construction of the canonical connections for the Veronese webs (Theorem \ref{thm3}) and a local characterisation of the flat Veronese webs in terms of the torsion of the canonical connection (Corollary \ref{cor_webs}). Moreover, we give new, elementary proof of the so-called Zakharevich conjecture \cite{P} (Corollary \ref{cor_webs2}). All these results translate to bi-Hamiltonian structures via the Gelfand-Zakharevich reduction \cite{GZ}. \section{Paraconformal structures and connections}\label{sec_paraconf} Let $M$ be a manifold of dimension $k+1$. A paraconformal structure on $M$ is a vector bundle isomorphism $$ TM\simeq \underbrace{S\odot S\odot\cdots\odot S}_k $$ where $S$ is a rank-two vector bundle over $M$ and $\odot$ denotes the symmetric tensor product. It follows that any tangent space $T_xM$ is identified with the space of homogeneous polynomials of degree $k$ in two variables. The natural action of $GL(2,\mathbb{R})$ on $S$ extends to the irreducible action on $TM$ and reduces the full frame bundle to a $GL(2,\mathbb{R})$-bundle. Therefore the paraconformal structures are sometimes called $GL(2,\mathbb{R})$-geometries. We refer to \cite{B,DT} for more detailed descriptions of the paraconformal structures. A paraconformal structure defines the following cone $$ C(x)=\{v\odot\cdots\odot v\ |\ v\in S(x)\}\subset T_xM $$ at each point $x\in M$ and it is an easy exercise to show that the field of cones $x\mapsto C(x)$ defines the paraconformal structure uniquely. If a basis $e_0,e_1$ in $S(x)$ is chosen then any $v\in S(x)$ can be written as $v=se_0+te_1$ and then $$ C(x)=\{s^kV_0+s^{k-1}tV_1+\cdots+t^kV_k\ |\ (s,t)\in\mathbb{R}^2\} $$ where $V_i=\binom{k}{i}e_0^{\odot k-i}\odot e_1^{\odot i}$. We shall denote $$ V(s,t)=s^kV_0+s^{k-1}tV_1+\cdots+t^kV_k $$ and refer to the vectors as null vectors. The cone $C(x)$ defines a rational normal curve $(s:t)\mapsto \mathbb{R} V(s,t)$ of degree $k$ in the projective space $P(T_xM)$. Sometimes, for convenience, we will use an affine parameter $t=(1:t)$ and denote $V(t)=V_0+tV_1+\cdots+t^kV_k$. Derivatives of $V(t)$ with respect to $t$ will be denoted $V'(t)$, $V''(t)$ etc. Let us stress that the parameter $t$ depends on the choice of a basis in $S$. However we shall use it in order to have a convenient description of the paraconformal structure. We will consider connections $\nabla$ on $M$ which are compatible with the projective structure in a sense that the parallel transport preserves the null vectors i.e. it preserves the field of cones $x\mapsto C(x)$. Precisely we have \begin{definition} A connection $\nabla$ is called \emph{paraconformal} for a given paraconformal structure $x\mapsto C(x)$ on a manifold $M$ if $$ \nabla_YV(t)\in\mathrm{span}\{V(t),V'(t)\} $$ for any $t\in\mathbb{R}$ and any vector field $Y$ on $M$. \end{definition} From the point of view of $GL(2,\mathbb{R})$-structures, the connections satisfying the above condition are in a one to one correspondence with the principal $GL(2,\mathbb{R})$-connections. We are interested in the properties of the geodesics of $\nabla$. Therefore, at least at this point, we will not impose any additional assumptions on the torsion of a connection. Let us only remark here that in low dimensions ($k=1,2$) there are plenty of torsion-free connections adapted to a paraconformal structure. On the other hand, already in the case $k=3$ any connection adapted to a \emph{generic} paraconformal structure has a torsion but in the most interesting case related to ODEs there is a unique torsion-free connection \cite{B}. Let us fix a point $x\in M$. We define the following 1-parameter family of $i$-dimensional subspaces of $T_xM$ for any number $i\in\{1,\ldots,k\}$ \begin{equation}\label{eq_Vi} \mathcal{V}_i(t)(x)=\mathrm{span}\{V(t)(x),V'(t)(x),V''(t)(x),\ldots,V^{(i-1)}(x)\}. \end{equation} The family $\{\mathcal{V}_i(t)(x)\ |\ t\in\mathbb{R},\ x\in M\}$, for any $i$, is canonically defined by the paraconformal structure itself, although the choice of the parameter $t$ is not canonical. The hyperplanes $\mathcal{V}_k(t)(x)$ will be referred to as $\alpha$-planes of the structure. In what follows we will consider paraconformal structures with an adapted connection such that the $\alpha$-planes are tangent to totally geodesic submanifolds of $M$. Two problems arise. First of all, the subspaces $\mathcal{V}_k$ have to be tangent to submanifolds of $M$ and this issue does not depend on $\nabla$. The second problem is how to make a submanifold totally geodesic with respect to some connection. We will show that there are obstructions for the existence of such connections. In terms of ODEs the obstructions are expressed by new point invariants. In order to guarantee the integrability of $\mathcal{V}_k$ we shall consider the following notions \begin{definition} A co-dimension one submanifold $N\subset M$ is called an \emph{$\alpha$-submanifold} of a paraconformal structure on $M$ if all tangent spaces $T_xN$, $x\in N$, are $\alpha$-planes of the paraconformal structure. A paraconformal structure is \emph{$\alpha$-integrable} if any $\alpha$-plane is tangent to some $\alpha$-submanifold of $M$. \end{definition} In the next section we shall prove that all $\alpha$-integrable paraconformal structures can be defined in terms of special ODEs. \section{ODEs and paraconformal structures}\label{sec_ODEinv} Paraconformal structures can be constructed out of ODEs. We will consider ODEs in the following form $$ x^{(k+1)}=F(t,x,x',\ldots,x^{(k)}).\eqno{(F)} $$ The following theorem is a compilation of results of Chern \cite{Ch}, Bryant \cite{B}, Dunajski and Tod \cite{DT} (see also \cite{FKN,GN,N}). \begin{theorem}\label{thm1} If the W\"unschmann invariants of $(F)$ vanish then the solution space of $(F)$ possesses a canonical paraconformal structure. \end{theorem} To explain the meaning of the theorem and give an insight into its proof we recall that the geometry of an ODE of order $k+1$ is described on a manifold of $k$-jets, denoted $J^k(\mathbb{R},\mathbb{R})$. There is a canonical projection $\pi$ from $J^k(\mathbb{R},\mathbb{R})$ to the solution space $M_F$ with one-dimensional fibre tangent to the total derivative vector field $$ X_F=\partial_t+x_1\partial_0+x_2\partial_1+\cdots+F\partial_k, $$ where $t,x_0,x_1,\ldots,x_k$ are standard coordinates on the space of jets and $\partial_i=\frac{\partial}{\partial x_i}$. It follows that $M_F=J^k(\mathbb{R},\mathbb{R})/X_F$. The term \emph{canonical} in the theorem means that the null vectors of the paraconformal structure are tangent to $\pi_*\partial_{k}$. An equation of order $k+1$ has $k-1$ W\"unschmann invariants (or strictly speaking relative invariants). In particular there are no invariants for equations of order 2. There is one invariant in order three. This invariant was originally defined by W\"unschmann \cite{Wu} and later used by Chern \cite{Ch}. The two invariants in order four were introduced by Bryant \cite{B}. The general case was treated by Dunajski and Tod \cite{DT}. We use the name W\"unschmann invariants in all cases for convenience and because all invariants have similar nature. Actually, in the linear case, all of them were defined already by Wilczynski \cite{W}. Doubrov \cite{Db,Db2} generalised the Wilczynski invariants to non-linear case by computing Wilczynski invariants for the linearised equation. It appears that this procedure also gives the W\"unschmann invariants, c.f. \cite{DT}. In what follows we will sometimes say that the W\"unschmann condition (or Bryant condition in the case of order 4) holds if all W\"unschmann invariants (or equivalently the generalised Wilczynski invariants of Doubrov) vanish. In the present paper the following approach to the W\"unschmann invariants will be useful. One looks for sections of $\mathcal{V}=\mathrm{span}\{\partial_k\}$ and $\mathcal{X}_F=\mathrm{span}\{X_F\}$, which are necessarily of the form $g\partial_k$ and $fX_F$ for some functions $f$ and $g$, and imposes the condition \begin{equation}\label{eq_a} \mathrm{ad}_{fX_F}^{k+1}g\partial_k=0\mod g\partial_k,\mathrm{ad}_{fX_F}g\partial_k,\mathrm{ad}_{fX_F}^2g\partial_k,\ldots,\mathrm{ad}_{fX_F}^{k-2}g\partial_k,X_F, \end{equation} where $\mathrm{ad}_XY=[X,Y]$ is the Lie bracket of vector fields and $\mathrm{ad}_X^{i+1}Y=[X,\mathrm{ad}_X^iY]$. One can prove that such $f$ and $g$ always exist (see Proposition 4.1 \cite{K1}) and then $$ \mathrm{ad}_{fX_F}^{k+1}g\partial_k=L_0g\partial_k+L_1\mathrm{ad}_{fX_F}g\partial_k+\ldots,+L_{k-2}\mathrm{ad}_{fX_F}^{k-2}g\partial_k\mod X_F. $$ for some coefficients $L_i$. Then, there exist rational numbers $c_{ij}\in\mathbb{Q}$ such that the W\"unschmann invariants are given by the formulae $$ W_i=L_i+\sum_{j>i}c_{ij}(fX_F)^{j-i}(L_j). $$ In particular the vanishing of all $W_i$ is equivalent to the vanishing of all $L_i$. The construction described above is a non-linear version of a construction of the Halphen normal form and reproduces the Wilczynski invariants for linear equations \cite{W}. Moreover, if the W\"unschmann invariants vanish then \begin{equation}\label{eq_wun} \mathrm{ad}_{fX_F}^{k+1}g\partial_k=0\mod \mathcal{X}_F \end{equation} and it follows that $g\partial_k$ depends polynomially on a parameter on integral curves of $\mathcal{X}_F$. It implies that the projection of $g\partial_k$ to the solution space $J^k(\mathbb{R},\mathbb{R})/X_F$ defines a field of rational normal curves in $P(TM_F)$. This completes a sketch of the proof of Theorem \ref{thm1}. The theorem can be strengthen to the following theorem \begin{theorem}\label{thm1b} If the W\"unschmann invariants of $(F)$ vanish then the corresponding paraconformal structure on the solution space is $\alpha$-integrable. Conversely, all $\alpha$-integrable paraconformal structures can be locally obtained in this way. \end{theorem} \begin{proof} Let \begin{equation}\label{eq_Dk} \mathcal{D}_k=\mathrm{span}\{\partial_k,\partial_{k-1},\ldots,\partial_{1}\} \end{equation} be an integrable corank 2 distribution on the space of jets $J^k(\mathbb{R},\mathbb{R})$. Note that $\mathcal{D}_k$ is tangent to the fibres of the projection $J^k(\mathbb{R},\mathbb{R})\to J^0(\mathbb{R},\mathbb{R})$. Equivalently $$ \mathcal{D}_k=\mathrm{span}\{g\partial_k,\mathrm{ad}_{fX_F}g\partial_k,\mathrm{ad}_{fX_F}^2g\partial_k,\ldots,\mathrm{ad}_{fX_F}^{k-1}g\partial_k \}\mod X_F, $$ for arbitrary nowhere vanishing functions $f$ and $g$. In particular one can take $f$ and $g$ as in \eqref{eq_wun}. Then, it follows from the construction presented above that the projection of $\mathcal{D}_k$ to the solution space $J^k(\mathbb{R},\mathbb{R})/X_F$ gives a 2-parameter family of $\alpha$-submanifolds and any $\alpha$-plane of the paraconformal structure is tangent to some submanifold from this family. Indeed, the null directions of the paraconformal structure are defined by $\pi_*g\partial_k$ and the manifold $M_F$ is the quotient space $J^k(\mathbb{R},\mathbb{R})/X_k$. Thus, $\pi_*\mathcal{D}_k$ are of the form \eqref{eq_Vi}, where as the parameter $t$ one takes a parametrisation of integral lines of $fX_F$. Therefore, the structure is $\alpha$-integrable. In order to prove the second part we shall use \cite{K1}. First of all we associate to a paraconformal structure a pair of distributions $(\mathcal{X},\mathcal{V})$ on the fibre bundle $P(C)$ over $M$, where $P(C)$ is the projectivisation of the null cone of the paraconformal structure. We define $\mathcal{X}$ as the distribution tangent to the fibres of $P(C)$. Thus $\mathrm{rk}\,\mathcal{X}=1$. $\mathcal{V}$ is the tautological distribution on $P(C)\subset P(TM)$. Then $\mathrm{rk}\,\mathcal{V}=2$ and $\mathcal{X}\subset\mathcal{V}$. According to \cite{K1} it is sufficient to prove that the pair $(\mathcal{X},\mathcal{V})$ is of equation type, i.e. $\mathcal{V}$ is locally diffeomorphic to the Cartan distribution on $J^k(\mathbb{R},\mathbb{R})$. We first note that due to the fact that $C(x)$ is a rational normal curve of degree $k$ the pair $(\mathcal{X},\mathcal{V})$ defines the following flag \begin{equation}\label{flag} \mathcal{V}\subset\mathrm{ad}_\mathcal{X}\mathcal{V}\subset\ldots\subset\mathrm{ad}^{k-1}_\mathcal{X}\mathcal{V}\subset\mathrm{ad}^k_\mathcal{X}\mathcal{V}=TP(C) \end{equation} where $\mathrm{rk}\,\mathrm{ad}^i_\mathcal{X}\mathcal{V}=i+2$ and for two distributions $\mathcal{Y}_1$ and $\mathcal{Y}_2$ we define $$ [\mathcal{Y}_1,\mathcal{Y}_2]=\mathrm{span}\{[Y_1,Y_2]\ |\ Y_1\in\Gamma(\mathcal{Y}_1),\ Y_2\in\Gamma(\mathcal{Y}_2)\} $$ and then inductively $\mathrm{ad}^{i+1}_{\mathcal{Y}_1}\mathcal{Y}_2=[\mathcal{Y}_1,\mathrm{ad}^i_{\mathcal{Y}_1}\mathcal{Y}_2]$ (c.f. \cite{K1}). In terms of \eqref{eq_Vi} $$ (\mathrm{ad}^i_\mathcal{X}\mathcal{V})(x,t)=\pi_*^{-1}(\mathcal{V}_{i+1}(x)(t)) $$ where $\pi\colon P(C)\to M$ is the projection, $x\in M$ and $t\in P(C)(x)$ is an affine coordinate. Further, since $P(C)$ parametrises all $\alpha$-planes, the $\alpha$-integrability of the structure gives a foliation of $P(C)$ of co-dimension 2. The tangent bundle of the foliation is an integrable sub-distribution $\mathcal{F}\subset \mathrm{ad}^{k-1}_\mathcal{X}\mathcal{V}$. We set $\mathcal{F}_{k-1}=\mathcal{F}$ and define $$ \mathcal{F}_{i-1}=\mathrm{span}\{Y\in\Gamma(\mathcal{F}_i)\ |\ [X,Y]\in\Gamma(\mathrm{ad}_\mathcal{X}^i\mathcal{V}),\ X\in\Gamma(\mathcal{X})\},\qquad i=k-1,\ldots,1. $$ Then $\mathcal{F}_{i-1}$ is a sub-distribution of $\mathrm{ad}_\mathcal{X}^{i-1}\mathcal{V}$ of co-rank 1, because $$ \mathrm{ad}_\mathcal{X}^{i-1}\mathcal{V}=\mathrm{span}\{Y\in\Gamma(\mathrm{ad}^i_\mathcal{X}\mathcal{V})\ |\ [X,Y]\in\Gamma(\mathrm{ad}_\mathcal{X}^i\mathcal{V}),\ X\in\Gamma(\mathcal{X})\} $$ and $\mathcal{F}$ is of co-rank 1 in $\mathrm{ad}_\mathcal{X}^{k-1}\mathcal{V}$. We shall prove that $\mathcal{F}_{i-1}$ is integrable provided that $\mathcal{F}_i$ is integrable. Let $Y_1, Y_2\in\Gamma(\mathcal{F}_{i-1})$. Then $[Y_1,Y_2]$ is a section of $\mathcal{F}_i$ because $\mathcal{F}_{i-1}\subset\mathcal{F}_i$. Moreover, since $Y_j\in\Gamma(\mathrm{ad}_\mathcal{X}^{i-1}\mathcal{V})$, $j=1,2$, we have $[X,Y_j]\in\Gamma(\mathrm{ad}^i_\mathcal{X}\mathcal{V})$. Hence, $[X,Y_j]=Z_j+f_jX$, $j=1,2$, where $Z_j\in\Gamma(\mathcal{F}_i)$ and $f_j$ is a function. The Jacobi identity reads $$ [X,[Y_1,Y_2]]=[Z_1, Y_2]+[Y_1, Z_2]+f_1[X,Y_2]-f_2[X,Y_1]\mod \mathcal{X}. $$ The right hand side is a section of $\mathrm{ad}_\mathcal{X}^i\mathcal{V}$. Thus, $[Y_1,Y_2]$ is a section of $\mathcal{F}_{i-1}$ and consequently $\mathcal{F}_{i-1}$ is integrable. We have proved that all $\mathrm{ad}^i_\mathcal{X}\mathcal{V}$ contain integrable, co-rank one sub-distributions $\mathcal{F}_i$ such that $\mathrm{ad}^i_\mathcal{X}\mathcal{V}=\mathcal{F}_i\oplus\mathcal{X}$. It follows that $[\mathrm{ad}^i_\mathcal{X}\mathcal{V},\mathrm{ad}^i_\mathcal{X}\mathcal{V}]=[\mathcal{X},\mathrm{ad}_\mathcal{X}^i\mathcal{V}]=\mathrm{ad}^{i+1}_\mathcal{X}\mathcal{V}$. Thus \eqref{flag} is a regular Goursat flag, c.f. \cite{A,MZ}, because of $\mathrm{rk}\,\mathrm{ad}^i_\mathcal{X}\mathcal{V}=i+2$. This completes the proof. \end{proof} \begin{remark} Theorem \ref{thm1b} can be considered as a generalisation to higher dimensions of the 3-dimensional case \cite{Ch,FKN}. Indeed, in dimension 3 all paraconformal structures can be obtained from ODEs \cite{FKN} and in this dimension all paraconformal structures are $\alpha$-integrable. In higher dimensions one needs to assume that a paraconformal structure is $\alpha$-integrable in order to be defined by an ODE. \end{remark} The construction of the W\"unschmann invariants presented above can be split into two steps. One look first for a function $g$ and then for $f$. The first step already gives interesting results. Namely, \eqref{eq_a} can be weakened to \begin{equation}\label{eq_b} \mathrm{ad}_{X_F}^{k+1}g\partial_k=0\mod g\partial_k,\mathrm{ad}_{X_F}g\partial_k,\mathrm{ad}_{X_F}^2g\partial_k,\ldots,\mathrm{ad}_{X_F}^{k-1}g\partial_k \end{equation} and such $g$ always exists. This gives $k$ coefficients $K_0,K_1,\ldots,K_{k-1}$ defined by the formula $$ \mathrm{ad}_{X_F}^{k+1}g\partial_k= -K_0g\partial_k+K_1\mathrm{ad}_{X_F}g\partial_k-K_2\mathrm{ad}_{X_F}^2g\partial_k+\ldots+(-1)^{k-1}K_{k-1}\mathrm{ad}_{X_F}^{k-1}g\partial_k $$ (we add the minus signs for convenience). The coefficients, called curvatures in \cite{J,JK}, have well defined geometric meaning. They are invariant with respect to contact transformations that do not change the independent variable $t$. The class of transformations was called time-preserving contact transformations (or contact-affine transformations) in \cite{JK}. The class gives a natural framework in the context of control mechanical systems \cite{J,JK}, Finsler geometry (in this case $K_0$ is the flag curvature) and webs \cite{K2}. In the present paper we will use the invariants $K_i$ to write down more complicated objects in a simple form (compare \cite{G} in the case of second order). The curvatures $K_i$ can be explicitly computed in terms of the original equation $(F)$ using \cite[Proposition 2.9]{JK}. We will provide the formulae in the case of equations of order 2, 3 and 4 in Appendix \ref{ap_formulae}. A function $g$ defined by \eqref{eq_b} is a non-trivial solution to \begin{equation}\label{eq_g} X_F(g)=\frac{g}{k+1}\partial_kF. \end{equation} We will use the notation $$ V=g\partial_k. $$ In the subsequent sections we will extensively use the Lie derivative $\mathcal{L}_{X_F}$ acting on different objects. If not mentioned otherwise the terms ``derivative'' or ``differentiation'' will refer to $\mathcal{L}_{X_F}$. Moreover, we will denote differentiations by adding primes to the objects. In particular we will have $$ V'=\mathrm{ad}_{X_F}V,\quad V''=\mathrm{ad}_{X_F}^2V,\quad\ldots\quad, V^{(j)}=\mathrm{ad}_{X_F}^jV $$ for the vector field $V$ or $$ K_i'=X_F(K_i),\quad K_i''=X_F^2(K_i),\quad\ldots\quad, K_i^{(j)}=X_F^j(K_i) $$ for the curvatures $K_i$. \section{ODEs and connections}\label{sec_ODEcon} We assume that an ODE $(F)$ defines a paraconformal structure on $M_F$ via Theorem~\ref{thm1}, i.e. all W\"unschmann invariants vanish. Let us introduce on the space of jets $J^k(\mathbb{R},\mathbb{R})$ the following integrable distributions (generalising \eqref{eq_Dk}) \begin{equation}\label{eq_Di} \mathcal{D}_i=\mathrm{span}\{\partial_k,\partial_{k-1},\ldots,\partial_{k-i+1}\}=\mathrm{span}\{V,V',\ldots,V^{(i-1)}\} \end{equation} which are tangent to the fibres of the natural projections $J^k(\mathbb{R},\mathbb{R})\to J^{k-i}(\mathbb{R},\mathbb{R})$, for $i=1,\ldots,k$. The distributions can be projected to the solution space $M_F$. The projections give exactly the subspaces $\mathcal{V}_i\subset TM_F$ defined before by formula \eqref{eq_Vi} for a paraconformal structure. Therefore, one can ask if the projection of leaves of $\mathcal{D}_k$ defines a two-parameter family of totally geodesic hypersurfaces in $M$. If yes, then we shall consider ODEs up to point transformations, i.e. transformations of variables $t$ and $x$ only, because in terms of jets we get precisely contact transformations preserving $\mathcal{D}_k$. It follows that there is a double fibration picture \begin{equation}\label{diag_fibr} M_F\longleftarrow J^k(\mathbb{R},\mathbb{R})\longrightarrow B \end{equation} where $B=J^0(\mathbb{R},\mathbb{R})$ is the space where $(F)$ is defined and $M_F$ is the solution space as before. \begin{definition} A class of point equivalent equations admits a \emph{totally geodesic paraconformal connection} if the projections of the integral manifolds of $\mathcal{D}_k$ to the solution space $M_F$ are totally geodesic submanifolds with respect to a paraconformal connection on $M_F$. \end{definition} In order to construct a paraconformal connection on $M_F$ we will construct a connection on $J^k(\mathbb{R},\mathbb{R})$ which is ``invariant'' along $\mathcal{X}_F$ and then we will project it to $M_F$. Precisely, if $\nabla$ is a connection on $J^k(\mathbb{R},\mathbb{R})$ then we would like to define a connection $\tilde \nabla$ on $M_F$ by the formula \begin{equation}\label{proj_nabla} \tilde\nabla_{Y_1}Y_2=\pi_*\nabla_{\pi_*^{-1}Y_1}\pi_*^{-1}Y_2. \end{equation} The definition is correct only for special $\nabla$. There are two difficulties. Firstly, the lifts $\pi_*^{-1}Y_i$ are given modulo $\mathcal{X}_F$ only. Secondly, $\nabla$ may depends on a point in the fibre of $\pi$. To overcome the difficulties we need several additional conditions. \begin{lemma}\label{lemma0} A connection $\nabla$ on $J^k(\mathbb{R},\mathbb{R})$ defines a connection $\tilde \nabla$ on $M_F$ via \eqref{proj_nabla} if and only if \begin{enumerate} \item $\nabla_YX=0\mod \mathcal{X}_F$, \item $\nabla_{X}Y=[X,Y]\mod \mathcal{X}_F$, \item $\mathcal{L}_X\nabla Y=\nabla [X,Y]\mod \Omega^1(J^k(\mathbb{R},\mathbb{R}))\otimes\mathcal{X}_F$, \end{enumerate} where $X$ is an arbitrary section of $\mathcal{X}_F$ and $Y$ is an arbitrary vector field on $J^k(\mathbb{R},\mathbb{R})$. \end{lemma} \begin{proof} The first two conditions are equivalent to the fact that $\nabla_{\pi_*^{-1}Y_1}\pi_*^{-1}Y_2 \mod\mathcal{X}_F$ does not depend on the lift of $Y_1$ or $Y_2$ to $J^k(\mathbb{R},\mathbb{R})$. The third condition (together with the first one) is equivalent to the fact that $\mathcal{L}_X\nabla Y=0\mod\mathcal{X}_F$ for $Y$ being a lift of a vector field on $M_F$. It means that $\pi_*\nabla Y$ is well defined independently on the point in the fibre of $\pi$, hence defines a connection on $M_F$. \end{proof} If we assume that equation $(F)$ satisfies the W\"unschmann condition and a connection $\nabla$ on $J^1(\mathbb{R},\mathbb{R})$ satisfies the three conditions given in Lemma \ref{lemma0}, then the connection $\tilde \nabla$ on $M_F$ will be compatible with the paraconformal structure on $M_F$ defined by $(F)$ if and only if \begin{equation}\label{cond_nabla} \nabla V=\alpha V+\beta V', \end{equation} for some two one-forms $\alpha$ and $\beta$ on $J^k(\mathbb{R},\mathbb{R})$. \begin{lemma}\label{lemma1} The one-forms $\alpha$ and $\beta$ satisfy the following system of differential equations \begin{eqnarray} &&\alpha'+k\beta''=0,\nonumber\\ &&\left(\binom{k+1}{j-1}-\frac{k}{2}\binom{k+1}{j}\right)\beta^{(k-j+2)} +(-1)^{j+1}K_j'\beta+(-1)^{j+1}(k-j+1)K_j\beta'\nonumber \\ &&\qquad=(-1)^{j+1}dK_j +\sum_{l=j+1}^{k-1}(-1)^{l+1} \left(\binom{l}{j-1}-\frac{k}{2}\binom{l}{j}\right)K_l\beta^{(l-j+1)}\label{system} \end{eqnarray} for $j=0,\ldots,k-1$. \end{lemma} \begin{proof} A consecutive application of Lemma \ref{lemma0} gives $\mathcal{L}^i_{X_F}\nabla V=\nabla V^{(i)} \mod \mathcal{X}_F$. This written in terms of $\alpha$ and $\beta$ reads \begin{equation}\label{eq_nabla} \nabla V^{(i)}=\sum_{j=0}^{i+1}\left(\binom{i}{j}\alpha^{(i-j)}+ \binom{i}{j-1}\beta^{(i-j+1)}\right)V^{(j)}. \end{equation} The formula is valid for all $i$. For $i=1,\ldots,k$ it defines the connection uniquely (note that for $i=k$ the formula involves $K_j$'s via the last term $V^{(k+1)}=\sum_{j=0}^{k-1}(-1)^{j+1}K_jV^{(j)}$) and for $i=k+1$ it gives a set of conditions that should be satisfied by $\alpha$ and $\beta$. The conditions are as follows \begin{eqnarray*} &&\binom{k+1}{j}\alpha^{(k-j+1)}+\binom{k+1}{j-1}\beta^{(k-j+2)} +(-1)^{j+1}K_j'\beta+(-1)^{j+1}(k-j+1)K_j\beta' \\ &&\qquad=(-1)^{j+1}dK_j +\sum_{l=j+1}^{k-1}(-1)^{l+1} \left(\binom{l}{j}\alpha^{(l-j)}+ \binom{l}{j-1}\beta^{(l-j+1)}\right)K_l \end{eqnarray*} for $j=0,\ldots,k$. In particular, for $j=k$ we get $$ 2\alpha'=-k\beta'' $$ and using it we can eliminate derivatives of $\alpha$ from the remaining equations and obtain \eqref{system} as a result. \end{proof} We get the following result \begin{theorem}\label{thm2} An ODE of order $k+1$ with the vanishing W\"unschmann invariants admits a totally geodesic paraconformal connection if and only if there exists a one-form $\beta$ on $J^k(\mathbb{R},\mathbb{R})$ satisfying \begin{equation}\label{eq_gen} -\frac{1}{2}\binom{k+2}{3}\beta'''+(-1)^kK_{k-1}'\beta+2(-1)^kK_{k-1}\beta'=(-1)^kdK_{k-1} \end{equation} and \begin{equation}\label{cond_gen} \beta(V)=\beta(V')=\cdots=\beta(V^{(k-1)})=0. \end{equation} \end{theorem} \begin{proof} Assume first that an ODE admits a totally geodesic paraconformal connection. Then by Lemma \ref{lemma1} it satisfies System \eqref{system}. In particular, for $j=k-1$ one gets \eqref{eq_gen}. Moreover one should have $$ \nabla_{V^{(j)}}V^{(k-1)}\in D_k\mod \mathcal{X}_F $$ for all $j\leq k-1$. But, it follows from \eqref{eq_nabla} that the coefficient of $\nabla_{V^{(j)}}V^{(k-1)}$ next to $V^{(k)}$ is exactly the one form $\beta$ evaluated on $V^{(j)}$. Therefore $\beta(V)=\beta(V')=\cdots=\beta(V^{(k-1)})=0$. In order to prove the theorem in the opposite direction it is sufficient to show that if \eqref{eq_gen} has a solution $\beta$ and the W\"unschmann invariants vanish then $\beta$ solves also all other equations from the System \eqref{system}. But in Lemma \ref{lemma0} one can use an arbitrary section of $\mathcal{X}_F$ instead of $X_F$. It is convenient to make all computations using a multiple of $X_F$ by a function $f$ as in \eqref{eq_wun}. Such a function $f$ exists due to the W\"unschmann condition. If \eqref{eq_wun} is satisfied then all $K_i$ in \eqref{system} are zero and the System \eqref{system} takes the form $\beta^{(k-j+2)}=0$, $j=0,\ldots,k-1$. The system clearly has a solution. \end{proof} \begin{remark} A reasoning similar to the proof of Theorem \ref{thm2} implies that the projections to $M_F$ of the integral manifolds of $\mathcal{D}_i$ are totally geodesic for a paraconformal connection if $\beta(V)=\beta(V')=\cdots=\beta(V^{(i-1)})=0$. In particular, if projections of the integral manifolds of $\mathcal{D}_i$ are totally geodesic then also projections of the integral manifolds of $\mathcal{D}_j$ for $j<i$ are totally geodesic. Let us use \eqref{eq_nabla} again and compute the following torsion coefficient \begin{eqnarray*} &&T(\nabla)(V^{(i)},V^{(i+1)})=\nabla_{V^{(i)}}V^{(i+1)}-\nabla_{V^{(i+1)}}V^{(i)}-[V^{(i)},V^{(i+1)}]\\ &&\qquad=\beta(V^{(i)})V^{(i+2)}\mod\mathcal{D}_{i+2}. \end{eqnarray*} The last equality holds because $[V^{(i)},V^{(i+1)}]\in\mathcal{D}_{i+2}$. The expression has sense for $i=0,\ldots,k-2$ and it follows that the condition $\beta(V)=\beta(V')=\cdots=\beta(V^{(i)})=0$ is expressed in terms of the torsion $T(\nabla)(V^{(i)},V^{(i+1)})$ for $i=0,\ldots,k-2$. However, the condition \eqref{cond_gen} for $i=k-1$ has a different nature. \end{remark} \begin{remark} Instead of using in the proof of Theorem \ref{thm2} the vector field $fX_F$ satisfying \eqref{eq_wun} one can differentiate \eqref{eq_gen} sufficiently many times and subtract it from the remaining equations from \eqref{system} in such a way that the highest derivatives of $\beta$ are eliminated. Then one will recover the W\"unschmann condition as vanishing of coefficients next to the derivatives of $\beta$ of lower order. Conditions are given in terms of $K_i$'s. In particular in the case of an equation of order 3 we get \begin{equation}\label{wun_ord3} K_0+\frac{1}{2}K_1'=0 \end{equation} and it can be checked that $W_0=K_0+\frac{1}{2}K_1'$ is really the W\"unschmann invariant ($K_0$ and $K_1$ are given explicitly below in Appendix \ref{ap_formulae}). In the case of order 4 we get \begin{eqnarray} &&K_0+\frac{3}{10}K_1'-\frac{9}{100}K_2^2=0,\label{wun0_ord4}\\ &&K_1+K_2'=0.\label{wun1_ord4} \end{eqnarray} We have computed that the conditions coincide with the conditions in \cite[Theorem 1.3]{DT} and consequently with \cite{B} (again, $K_0$, $K_1$ and $K_2$ are given explicitly below in Appendix \ref{ap_formulae}). Namely \eqref{wun1_ord4} is exactly the second condition in \cite{DT} and $W_1=K_1+K_2'$ is the W\"unschmann invariant. The first condition in \cite{DT} has the form $$ K_0+K_1'+\frac{7}{10}K_2''-\frac{9}{100}K_2^2-\frac{1}{4}\partial_3F(K_1+K_2')=0 $$ which is \eqref{wun0_ord4} modulo \eqref{wun1_ord4} and the derivative of \eqref{wun1_ord4}. In the general case the simplest W\"unschmann condition has the form \begin{equation}\label{wun_gen} K_{k-2}+\frac{k-1}{2}K_{k-1}'=0 \end{equation} The other are more complicated, but we will not need them in the explicit form. \end{remark} \section{Twistor correspondence}\label{sec_twistor} The condition \eqref{cond_gen} means that the one-form $\beta$ is a pullback of a one-form defined on the space $B=J^0(\mathbb{R},\mathbb{R})$. One can call $B$ the twistor space. Indeed, due to the double fibration \eqref{diag_fibr} a point in $B$ can be considered as a hypersurface in $M_F$ and a point in $M_F$ is represented by a curve in $B$ which is a solution to $(F)$. There is $(k+1)$-parameter family of such curves corresponding to different points in $M_F$. In the complex setting one can repeat the reasoning of \cite[Section 5]{H}. One considers a complex surface $B$ and a curve $\gamma\subset B$ with a normal bundle $N_\gamma\simeq O(k)$. Then $H^0(\gamma,N_\gamma)=\mathbb{C}^{k+1}$ and $H^1(\gamma,N_\gamma)=0$. Therefore by the Kodaira theorem one gets a $(k+1)$-dimensional complex manifold $M$ parametrising a family of curves in $B$ with self intersection number $k$. One can see a paraconformal structure in this picture, geodesics of an adapted connection and a set of totally geodesic surfaces. Indeed, if $\gamma$ is a curve in $B$ then due to $N_\gamma\simeq O(k)$ we get that for any collection of points $\{y_1,\ldots,y_k\}$, $y_i\in \gamma$, possibly with multiplicities, there is a one-parameter family of curves in $B$ which intersect $\gamma$ exactly at these points. The family of curves defines a geodesic in $M$ (the fact that such a definition gives geodesics of a connection can be proved exactly as in \cite{B} and follows from the fact $H^1(\gamma,N_\gamma)=0$). The null geodesics are defined by $y_i$'s such that $y_1=y_2=\cdots=y_k$. A totally geodesic hypersurface in $M$ corresponding to a point $y\in B$ is defined by all curves which pass through $y$. It follows automatically from the definition of the geodesics that such hypersurfaces are totally geodesic indeed. One gets from the twistor construction not a unique connection but rather a set of unparameterised geodesics i.e.~a class of connections sharing the geodesics. The connections have in general non-vanishing torsion if we impose that they are adapted to the paraconformal structure (see \cite{MP} for a twistor construction leading to torsion-free structures). We shall call the structure a \emph{projective structure with a torsion}. Clearly, one can consider torsion-free connections shearing the geodesics, but then the compatibility with the paraconformal structure is lost. Anyway, we shall prove later that starting from dimension 4 (the classical dimensions 2 and 3 considered in \cite{H} are slightly different) the one form $\beta$ is unique and the set of projectively equivalent paraconformal connections depends on an arbitrary one-form $\alpha$ on $M$ as in the case of the torsion-free connections (Corollaries \ref{cor_ord4} and \ref{cor_ordgen}). \section{Second order}\label{sec_ord2} Let $$ x''=F(t,x,x'') $$ be a second order ODE. The tangent bundle to a two-dimensional manifold has a natural $GL(2,\mathbb{R})$-structure. Hence, any second order equation defines a paraconformal structure on its solution space. However, the existence of a totally geodesic paraconformal connection is a more restrictive condition which is equivalent to the existence of a projective structure. A result due to Cartan \cite{C1} says that a class of point equivalent ODEs defines a projective structure on the solution space if and only if the Cartan invariant $C$ vanishes. In coordinates (see \cite{CS}) \begin{eqnarray*} C&=&\partial_0^2F-\frac{1}{2}F\partial_0\partial_1^2F-\frac{1}{2}\partial_0F\partial_1^2F -\frac{2}{3}\partial_t\partial_0\partial_1F +\frac{1}{6}\partial_t^2\partial_1^2F+\\ &&\frac{1}{3}x_1\partial_t\partial_0\partial_1^2F+ \frac{1}{6}\partial_tF\partial_1^3F+ \frac{1}{3}F\partial_t\partial_1^3F- \frac{2}{3}x_1\partial_0^2\partial_1F +\frac{1}{6}x_1^2\partial_0^2\partial_1^2F+\\ &&\frac{1}{6}x_1\partial_0F\partial_1^3F+ \frac{1}{3}x_1F\partial_0\partial_1^3F+ \frac{2}{3}\partial_1F\partial_0\partial_1F- \frac{1}{6}\partial_1F\partial_t\partial_1^2F-\\ &&\frac{1}{6}x_1\partial_1F\partial_0\partial_1^2F+ \frac{1}{6}F^2\partial_1^4F. \end{eqnarray*} On the other hand Theorem \ref{thm2} specified to $k=1$ implies that the existence of a totally geodesic paraconformal connection is equivalent to the existence of a solution to \begin{equation}\label{eq_ord2} -\frac{1}{2}\beta'''-K_0'\beta-2K_0\beta'=-dK_0 \end{equation} satisfying \begin{equation}\label{cond_ord2} \beta(V)=0. \end{equation} Thus, we reproduce Cartan's result in the following form \begin{theorem}\label{thm_ord2} A class of point equivalent second order ODEs defines a projective structure on its solution space if and only if \begin{equation}\label{cartan_ord2} 4V'(K_0)-V(K_0')=0. \end{equation} Additionally $C=4V'(K_0)-V(K_0')$. \end{theorem} \begin{proof} We are looking for a common solution to \eqref{eq_ord2} and \eqref{cond_ord2}. Let us denote $$ \beta(V')=b. $$ Taking into account that $V''=-K_0V$ and differentiating \eqref{cond_ord2} one finds $$ \beta'(V)=-b,\quad \beta''(V)=-2b',\quad \beta'''(V)=-3b''+K_0b $$ and $$ \beta'(V')=b',\quad \beta''(V')=b''-K_0b,\quad \beta'''(V')=b'''-K_0'b-3K_0b'. $$ Thus, evaluating \eqref{eq_ord2} on $V$ and $V'$ one gets \begin{eqnarray*} &&\frac{3}{2}b''+\frac{3}{2}K_0b=-V(K_0),\\ &&\frac{1}{2}b'''+\frac{1}{2}K_0'b+\frac{1}{2}K_0b'=V'(K_0). \end{eqnarray*} Differentiating the first equation and substituting to the second one one gets \eqref{cartan_ord2}. Besides one can check by direct computations using formulae in Appendix \ref{ap_formulae} that \eqref{cartan_ord2} coincides with $C$. \end{proof} \section{Third order}\label{sec_ord3} Let $$ x'''=F(t,x,x',x'') $$ be a third order ODE. Its solution space is a three dimensional manifold. A paraconformal structure on a three dimensional manifold is a conformal metric $[\mathbf{g}]$ of Lorentzian signature. Moreover, a torsion-free connection adapted to a paraconformal structure is a Weyl connection $\nabla$ for $[\mathbf{g}]$. We recall that if a representative $\mathbf{g}\in[\mathbf{g}]$ is chosen then a Weyl connection $\nabla$ is uniquely defined by a one-form $\varphi$ such that $$ \nabla\mathbf{g}=\varphi\mathbf{g}. $$ According to Cartan \cite{C2}, a Weyl connection $\nabla$ is totally geodesic in our sense if and only if the Einstein equation is satisfied $$ Ric(\nabla)_{sym}=\frac{1}{3}R_\mathbf{g}(\nabla)\mathbf{g} $$ where $Ric(\nabla)_{sym}$ is the symmetric part of the Ricci curvature of $\nabla$ and $R_\mathbf{g}(\nabla)$ is the scalar curvature with respect to $\mathbf{g}$. The pair $([\mathbf{g}],\nabla)$ is called an Einstein-Weyl structure in this case. Cartan also proved that there is a one to one correspondence between Einstein-Weyl structures and third order ODEs for which the W\"unschmann $W_0$ and Cartan $C$ invariants vanish (see \cite{C3,T}). In coordinates \begin{eqnarray*} W_0&=&\partial_0F -\frac{1}{2}X_F(\partial_1F) +\frac{1}{3}\partial_1F\partial_2F+\frac{1}{6}X_F^2(\partial_2F) -\frac{1}{3}X_F(\partial_2F)\partial_2F+\frac{2}{27}(\partial_2F)^3,\\ C&=&X_F^2(\partial_2^2F)-X_F(\partial_1\partial_2F)+\partial_0\partial_2F. \end{eqnarray*} On the other hand Theorem \ref{thm2} implies that if $W_0=0$ then the existence of a totally geodesic paraconformal connection is equivalent to the existence of a solution to \begin{equation}\label{eq_ord3} -2\beta'''+K_1'\beta+2K_1\beta'=dK_1 \end{equation} satisfying \begin{equation}\label{cond_ord3} \beta(V)=\beta(V')=0. \end{equation} We reproduce Cartan's result in the following way \begin{theorem}\label{thm_ord3} A class of point equivalent third order ODEs defines an Einstein-Weyl structure on its solution space if and only if $W_0=0$ and \begin{equation}\label{cartan_ord3} 2V'(K_1)+V(K_1')=0 \end{equation} Additionally $C=-\frac{3}{2}(V'(K_1)-V(K_0))$ and under the W\"unschmann condition $2V'(K_1)+V(K_1')=2(V'(K_1)-V(K_0))=-\frac{3}{4}C$. \end{theorem} \begin{proof} The formula for $C$ in terms of $K_0$ and $K_1$ can be verified by computations using the appropriate formulae given in Appendix \ref{ap_formulae}. The invariant meaning of this expression follows from our proof. We are looking for a common solution to \eqref{eq_ord3} and \eqref{cond_ord3}. Let us denote $$ \beta(V'')=b. $$ Taking into account that $V'''=-K_0V+K_1V'$ and differentiating \eqref{cond_ord3} one finds $$ \beta'(V)=0,\quad \beta''(V)=b,\quad \beta'''(V)=3b', $$ $$ \beta'(V')=-b,\quad \beta''(V')=-2b',\quad \beta'''(V')=-3b''-K_1b, $$ and $$ \beta'(V'')=b',\quad \beta''(V'')=b''+K_1b,\quad \beta'''(V'')=b'''+K_1'b+3K_1b'+K_0b. $$ Thus, evaluating \eqref{eq_ord3} on $V$, $V'$ and $V''$ one gets \begin{eqnarray*} &&6b'=-V(K_1),\\ &&6b''=V'(K_1),\\ &&2b'''+K_1'b+4K_1b'+2K_0b=-V''(K_1). \end{eqnarray*} The last equation reduces to $-2b'''-4K_1b'=V''(K_1)$ due to the W\"unschmann condition $W_0=0$ which is equivalent to $K_1'=-2K_0$. Then, differentiating the first equation one gets that a common solution $b$ exists if and only if $V'(K_1)+\frac{1}{2}V(K_1')=0$ and $K_1V(K_1)=2V''(K_1)+\frac{1}{2}V_1(K_1')$. We get \eqref{cartan_ord3} and using the W\"unschmann condition again \begin{equation}\label{cartan_ord3'} K_1V(K_1)=2V''(K_1)-V_1(K_0). \end{equation} Now, the theorem follows from the following \begin{lemma} If \eqref{cartan_ord3} holds and the W\"unschmann invariant vanishes for a third order ODE then also \eqref{cartan_ord3'} holds. \end{lemma} \begin{proof} We can write $[V,V']=AV+BV'$ for some functions $A$ and $B$. Taking the Lie brackets with $X_F$ and using the fact that $V'''=-K_0V+K_1V'$ we get formulae for $[V,V'']$, $[V',V'']$ in terms of $A$, $B$ and their derivatives. Namely $[V,V'']=A'V+(A+B')V'+BV''$ and $[V',V'']=(A''+V(K_0)-BK_0-AK_1)V+(2A'+B''-V(K_1))V'+(A+2B')V''$. One more Lie bracket and the Jacobi identity gives the following three equations \begin{eqnarray*} &&3A'+3B''-V(K_1)=0,\\ &&3A''+B'''-X_FV(K_1)+V(K_0)-2BK_0+2B'K_1-V'(K_1)=0,\\ &&A'''-3B'K_0-BK_0'-A'K_1+XV(K_0)+V'(K_0)=0. \end{eqnarray*} Differentiating the first equation and substituting to the second and third one we can eliminate $A$ and its derivatives. But due to the W\"unschmann condition we can also eliminate $B$ and its derivatives and get one relation $$ 2V'(K_0)+\frac{2}{3}X_F^2V(K_1)+2X_FV(K_0)-\frac{2}{3}K_1V(K_1). $$ The subsequent use of the W\"unschmann condition and the relation $\mathrm{ad}_{X_F}^iV=V^{(i)}$ reads $$ K_1V(K_1)=2V'(K_0)+V''(K_1)+V(K_0'). $$ On the other hand, differentiating \eqref{cartan_ord3} we get $$ 0=V''(K_1)-3V'(K_0)-V(K_0') $$ and adding the last two equations we finally get \eqref{cartan_ord3'}. \end{proof} \end{proof} \section{Fourth order}\label{sec_ord4} Let $$ x^{(4)}=F(t,x,x',x'',x''') $$ be a fourth order ODE. This is the case considered by Bryant \cite{B}. However, the torsion free connections of \cite{B} are not, in general, totally geodesic in the sense of the present paper. Bryant proved in \cite[Theorem 4.1]{B} that a paraconformal structure possesses a torsion-free connection (unique) if and only if every null plane, i.e.\ every subspace $\mathcal{V}_2(s:t)(x)\subset T_xM$ for $x\in M$ and $(s:t)\in \mathbb{R} P^1$ in the notation of Section \ref{sec_paraconf}, is tangent to a totally-geodesic surface in $M$. We argued in Section \ref{sec_ODEcon} (the first remark following Theorem \ref{thm2}) that this condition is really expressed in terms of the torsion. Actually, according to Bryant, this condition is also equivalent to the fact that a paraconformal structure is defined by an equation and it can be expressed as vanishing of a polynomial of degree 7 in $(s:t)$ (compare \cite{K1}). The corresponding ODE satisfies the Bryant-W\"unschmann condition and the geometry is related to the contact geometry of ODEs. On contrary, Theorem \ref{thm2} describes paraconformal structures satisfying more restrictive conditions related to point geometry of ODEs. Namely, we assume that any subspace $\mathcal{V}_3(s:t)(x)\subset T_xM$ for $x\in M$ and $(s:t)\in \mathbb{R} P^1$ is tangent to a totally-geodesic submanifold of $M$. We get that it happens if and only if the Bryant-W\"unschmann condition holds and additionally there is a solution to \begin{equation}\label{eq_ord4} 5\beta'''+K_2'\beta+2K_2\beta'=dK_2 \end{equation} satisfying \begin{equation}\label{cond_ord4} \beta(V)=\beta(V')=\beta(V'')=0. \end{equation} Of course, a torsion-free connection also exists in this case, but this connection is not necessarily totally geodesic in our sense as we will see in Section \ref{sec_Veronese}. We get the following new result \begin{theorem}\label{thm_ord4} A class of point equivalent fourth order ODEs admits a totally geodesic paraconformal connection if and only if the Bryant-W\"unschmann condition holds and \begin{equation}\label{cartan_ord4} 4V'(K_2)+3V(K_2')=0. \end{equation} Additionally under the Bryant-W\"unschmann condition $4V'(K_2)+3V(K_2')=4V'(K_2)-3V(K_1)$. \end{theorem} \begin{proof} The proof is similar to the proofs of Theorems \ref{thm_ord2} and \ref{thm_ord3}. Let us denote $$ \beta(V''')=b. $$ Taking into account that $V^{(4)}=-K_0V+K_1V'-K_2V''$ and differentiating \eqref{cond_ord4} one finds $$ \beta'(V)=0,\quad \beta''(V)=0,\quad \beta'''(V)=-b, $$ $$ \beta'(V')=0,\quad \beta''(V')=b,\quad \beta'''(V')=3b', $$ $$ \beta'(V'')=-b,\quad \beta''(V'')=-2b',\quad \beta'''(V'')=-3b''+K_2b. $$ and $$ \beta'(V''')=b',\quad \beta''(V''')=b''-K_2b,\quad \beta'''(V''')=b'''-K_2'b-3K_2b'-K_1b. $$ Thus, evaluating \eqref{eq_ord4} on $V$, $V'$, $V''$ and $V'''$ one gets \begin{eqnarray*} &&5b=-V(K_2),\\ &&15b'=V'(K_2),\\ &&15b''-3K_2b=-V''(K_2),\\ &&5b'''-4K_2'b-13K_2b'-5K_1b=V'''(K_2). \end{eqnarray*} If we differentiate the first equation and substitute it to the second one we get the condition \eqref{cartan_ord4}. From the last two equations we get two additional conditions of higher order. However, as in the case of order three, they are consequences of \eqref{cartan_ord4} and the W\"unschmann condition and do not give new conditions on the equation $(F)$ (we have checked it by direct computations in coordinates). The identity $4V'(K_2)+3V(K_2')=4V'(K_2)-3V(K_1)$ follows from \eqref{wun1_ord4}. \end{proof} \begin{corollary}\label{cor_ord4} If a fourth order ODE admits a totally geodesic paraconformal connection then a solution $\beta$ to \eqref{eq_ord4} and \eqref{cond_ord4} is unique. \end{corollary} \begin{proof} The one-form $\beta$ is uniquely defined by the condition $5b=-V(K_2)$. \end{proof} \begin{remark} It would be nice to have a characterisation of $(F)$ admitting a totally geodesic paraconformal structure in terms of the curvature of the associated torsion-free Bryant connection. The curvature was explicitly computed in \cite{N2}. However, the Bryant connection is an object invariant with respect to the group of contact transformations which is much bigger than the group of point transformations. In fact a class of point equivalent ODEs splits into several classes of point equivalent ODEs. Therefore the problem would be to determine if a given class of contact equivalent ODEs contains a subclass of point equivalent ODEs admitting a totally geodesic paraconformal connection (a priori, the subclass is not unique). The problem is similar, in spirit, to the problem considered in \cite{DK2} where we characterise hyper-CR Einstein-Weyl structures in terms of point invariants. One gets invariants of very high order and a similar result should hold in the present case. \end{remark} \section{General case}\label{sec_gen} In this section we consider an ODE in the form $(F)$. Our main result is as follows \begin{theorem}\label{thm_gen} A class of point equivalent ODEs of order $k+1$, $k\geq 4$, admits a totally geodesic paraconformal connection if and only if the W\"unschmann condition holds, \begin{equation}\label{cartan_gen0} V^{(i)}(K_{k-1})=0,\qquad i=0,\ldots,k-4, \end{equation} \begin{equation}\label{cartan_gen} 4V^{(k-2)}(K_{k-1})+3V^{(k-3)}(K_{k-1}')=0, \end{equation} and \begin{eqnarray} &&\left(\frac{2}{\gamma_k}-1\right)K_{k-1}V^{(k-3)}(K_{k-1})= (-1)^k\left(V^{(k-2)}(K_{k-1}')-2V^{(k-1)}(K_{k-1})\right),\label{cartan_gen2}\\ &&\left(\frac{2}{3\gamma_k}-1\right)K_{k-1}V^{(k-2)}(K_{k-1}) +\left(\frac{1}{\gamma_k}+\frac{k-3}{2}\right)K_{k-1}'V^{(k-3)}(K_{k-1})\nonumber\\ &&\qquad=(-1)^{k+1}\frac{1}{3}\left(V^{(k-2)}(K_{k-1}'')-V^{(k-1)}(K_{k-1}')+V^{(k)}(K_{k-1})\right),\label{cartan_gen3} \end{eqnarray} where $\gamma_k=-\frac{1}{2}\binom{k+2}{3}$. Additionally \eqref{cartan_gen} is equivalent to $$ 4V^{(k-2)}(K_{k-1})-\frac{6}{k-1}V^{(k-3)}(K_{k-2})=0. $$ \end{theorem} \begin{proof} Let $\theta_0,\theta_1,\ldots,\theta_k$ be one-forms dual to vector fields $V,V',\ldots,V^{(k)}$. Assume that $\beta(V_k)=b$. Then $\beta=b\theta_k$. We compute that $\theta_k'=-\theta_{k-1}$, $\theta_k''=\theta_{k-2}+(-1)^{k+1}K_{k-1}\theta_k$ and $\theta_k'''=-\theta_{k-3}+(-1)^kK_{k-1}+(-1)^{k+1}(K_{k-1}'+K_{k-2})\theta_k$. Thus $$ \beta'=b'\theta_k-b\theta_{k-1} $$ and \begin{eqnarray*} &&\beta'''=\left(b'''+3(-1)^{k+1}b'K_{k-1}+(-1)^{k+1}b(K_{k-1}'+K_{k-2})\right)\theta_k+\\ &&\qquad\left(-3b''+(-1)^kK_{k-1}\right)\theta_{k-1}+3b'\theta_{k-2}-b\theta_{k-3}. \end{eqnarray*} Substituting this to \eqref{eq_gen}, evaluating on $V_0,\ldots,V_k$, and using the W\"unschmann condition \eqref{wun_gen} we get the conditions \eqref{cartan_gen0}-\eqref{cartan_gen2} in a way analogous to lower dimensional cases. \end{proof} \begin{corollary}\label{cor_ordgen} If an ODE of order $k+1>4$ admits a totally geodesic paraconformal connection then a solution $\beta$ to \eqref{eq_gen} and \eqref{cond_gen} is unique. \end{corollary} \begin{proof} The one-form $\beta$ is uniquely defined by the condition $\binom{k+2}{3}b=(-1)^k2V^{(k-3)}(K_{k-1})$ which is obtained by evaluation of \eqref{eq_gen} on $V^{(k-3)}$. \end{proof} \begin{remark} Our conjecture is that the equations \eqref{cartan_gen2} and \eqref{cartan_gen3} are redundant and follow from \eqref{cartan_gen0}, \eqref{cartan_gen} and the W\"unschmann condition. However, we were unable to prove it in full generality. \end{remark} \section{Ricci curvature}\label{sec_ricci} Let $\nabla$ be a totally geodesic paraconformal connection associated to $(F)$ of order $k+1\geq 3$. Direct computations show $$ R(\nabla)(Y_1,Y_2)V=d\alpha(Y_1,Y_2)V+d\beta(Y_1,Y_2)V'-\beta\wedge\alpha'(Y_1,Y_2)V-\beta\wedge\beta'(Y_1,Y_2)V' $$ and one gets $$ Ric(\nabla)(V,V)=d\beta(V',V)-\beta\wedge\beta'(V',V). $$ But, if $\beta(V)=\beta(V')=0$ then the right hand side vanishes (we use here $[V,V']\in\mathrm{span}\{V,V'\}$) and therefore \begin{equation}\label{eq_einstein} Ric(\nabla)(V,V)=0. \end{equation} It follows that the symmetric part of the Ricci tensor of $\nabla$ is a section of the bundle of symmetric 2-tensors annihilating the field of null cones $x\mapsto C(x)$ of the paraconformal structure. The bundle has rank one in the case of dimension 3. In fact it coincides with the conformal class $[\mathbf{g}]$. It follows that \eqref{eq_einstein} is equivalent to the Einstein-Weyl equation in this case. The situation is more complicated in higher dimensions because the bundle of symmetric tensors annihilating the null cone has rank bigger than 1 (e.g. the rank is three in the case of dimension 4). It is an interesting question to determine if the condition \eqref{eq_einstein} implies that $\nabla$ is totally geodesic (it is the case in dimension 3). \section{Veronese webs}\label{sec_Veronese} A particularly simple example of paraconformal structures admitting totally geodesic connections can be obtained from special families of foliations, called \emph{Veronese webs}. We shall ultimately show that the structures are described by solutions to the integrable system \eqref{eq_int_sys}. The Veronese webs are one-parameter families of foliations introduced by Gelfand and Zakharevich \cite{GZ} in connection to bi-Hamiltonian systems on odd-dimensional manifolds. Precisely, a one-parameter family of foliations $\{\mathcal{F}_t\}_{t\in\mathbb{R}}$ of co-dimension 1 on a manifold $M$ of dimension $k+1$ is called Veronese web if any $x\in M$ has a neighbourhood $U$ such that there exists a co-frame $\omega_0,\ldots,\omega_k$ on $U$ such that $$ T\mathcal{F}_t=\ker\left(\omega_0+t\omega_1+\cdots+t^k\omega_k\right). $$ In \cite{K2} we proved that there is a one to one correspondence between Veronese webs and ODEs for which all curvatures $K_i$ vanish. The equations are given modulo time-preserving contact transformations, mentioned earlier in Section \ref{sec_ODEinv}. But if all $K_i=0$ then automatically all conditions given in Theorems \ref{thm_ord2}, \ref{thm_ord3}, \ref{thm_ord4} and \ref{thm_gen} are satisfied. Therefore all Veronese webs admit totally geodesic paraconformal connections. The paraconformal structures obtained in this way will be referred to as \emph{of Veronese type}. The structures are very specific. They correspond to projective structures defined by connections with skew-symmetric Ricci tensor in the case of order 2 (see \cite{K3}), and to Einstein-Weyl structures of hyper-CR type in the case of order 3 (see \cite{DK1}). The connections in the case of order 2 are projectively equivalent to the Chern connections of classical 3-webs \cite{K3}. The hyper-CR Einstein-Weyl structures are connected to integrable equations of hydrodynamic type and have Lax pairs with no terms in the direction of a spectral parameter \cite{D}. A characterisation of this special Einstein-Weyl structures in terms of point invariants of the related ODEs is complicated and involves 4 additional invariants of high order \cite{DK2}. In any case, Veronese webs define exactly those totally geodesic paraconformal structures for which the corresponding twistor space fibres over $\mathbb{R} P^1$ (c.f. \cite{D,DK1}). Let $\omega(t)=\omega_0+t\omega_1+\cdots+t^k\omega_k$. Then the curve $t\mapsto \mathbb{R}\omega(t)\in P(T^*M)$ is a Veronese curve dual to the curve $t\mapsto\mathbb{R} V(t)\in P(TM)$ $$ V(t)=V_0+tV_1+\ldots+t^kV_k $$ of null directions defining a paraconformal structure in Section \ref{sec_paraconf}. The duality means that $$ V(t)\in\ker\omega(t)\cap\ker\omega'(t)\cap\ldots\cap\ker\omega^{(k-1)}(t) $$ and conversely $$ \ker\omega(t)=\mathcal{V}_k(t)=\mathrm{span}\{V(t),V'(t),\ldots,V^{(k-1)}(t)\}. $$ In the case of Veronese webs the parameter $t$ is well defined globally uniquely modulo the M\"obius transformations $t\mapsto\frac{at+b}{ct+d}$, where $a,b,c,d\in\mathbb{R}$ and $ad-bc\neq 0$. Moreover, the distributions $\mathcal{V}_k(t)$ are integrable for any particular choice of $t$. It means that \begin{equation}\label{eq_integrability} \omega(t)\wedge d\omega(t)=0, \end{equation} for any $t$. In order to get useful formulae we note that due to the integrability condition one can choose local coordinates $x_0,\ldots,x_k$ on $M$ such that $T\mathcal{F}_{t_i}=\ker dx_i$ for some fixed $t_0,\ldots,t_k\in\mathbb{R}$. If we also assume that $$ T\mathcal{F}_{t_{k+1}}=\ker dw $$ for a function $w=w(x_0,\ldots,x_k)$ then one verifies that \begin{equation}\label{eq_omega} \omega(t)=\sum_{i=0}^k (t_{k+1}-t_i)\prod_{j\neq i}(t-t_j)\partial_iwdx_i \end{equation} and $\omega$ is given up to a multiplication by a function on $M$. The following theorem for $k=2$ was proved in \cite{Z} and \cite{DK1}. It is new for $k>2$. \begin{theorem}\label{thm3} Let $M$ be a manifold of dimension $k+1$ with local coordinates $x_0,\ldots,x_k$ and let $t_0,\ldots,t_{k+1}\in\mathbb{R}$ be distinct numbers. Then, any $w$ satisfying the system \begin{equation}\label{eq_hirota} \sum_{cycl(i,j,l)}a_{ij,l}\partial_i\partial_j w\partial_l w=0,\qquad 0\leq i<j<l\leq k, \end{equation} where $$ a_{ij,l}=(t_i-t_j)(t_{k+1}-t_l), $$ defines a paraconformal structure via \eqref{eq_omega}, and conversely, any paraconformal structure of Veronese type can be locally put in this form. Moreover: \begin{enumerate} \item If $k>2$ then all totally geodesic paraconformal connections for a Veronese web are given by the formula $$ \nabla\partial_i=\left(\frac{d(\partial_iw)}{\partial_iw}+\alpha\right)\partial_i $$ where $\alpha$ is an arbitrary one-form on $M$. Moreover, there is the unique $\alpha$ such that the torsion of the corresponding paraconformal connection satisfies $$ T(\nabla)(V(t),V'(t))\in\mathrm{span}\{V(t)\},\qquad t\in\mathbb{R}. $$ \item If $k=2$ then a Veronese web defines the following conformal metric $$ \mathbf{g}=\sum_{i,j=0}^2(t_3-t_i)(t_3-t_j)\left(t_i^2+t_j^2-t_it_j-\sum_{l=0}^2t_l^2\right) \partial_iw\partial_jwdx_idx_j $$ and there is the unique torsion-free totally geodesic paraconformal connection $\nabla$ such that $([\mathbf{g}], \nabla)$ is an Einstein-Weyl structure. The Weyl one-form for $\nabla$ is given by $$ \varphi=\left(\frac{\partial_0\partial_1w}{\partial_1w}+\frac{\partial_0\partial_2w}{\partial_2w}\right)dx_0 +\left(\frac{\partial_0\partial_1w}{\partial_0w}+\frac{\partial_1\partial_2w}{\partial_2w}\right)dx_1 +\left(\frac{\partial_0\partial_2w}{\partial_0w}+\frac{\partial_1\partial_2w}{\partial_1w}\right)dx_2. $$ \item If $k=1$ then for a given Veronese web there is the unique torsion-free parconformal connection satisfying the additional condition $\nabla_Y V(t)\in\{V(t)\}$ for any vector field $Y$ and any $t\in\mathbb{R}$. The connection is given by $$ \nabla \partial_0=\left(\frac{\partial_0\partial_0w}{\partial_0w}-\frac{\partial_0\partial_1w}{\partial_1w}\right)dx_0\partial_0, \qquad\nabla \partial_1=\left(\frac{\partial_1\partial_1w}{\partial_1w}-\frac{\partial_0\partial_1w}{\partial_0w}\right)dx_1\partial_1. $$ \end{enumerate} \end{theorem} \begin{proof} The integrability condition \eqref{eq_integrability} written in coordinates and in terms of the function $w$ takes the form $$ \sum_{i<j<l}\left(\sum_{cycl(i,j,l)}(T_i-T_j)T_l\partial_i\partial_j w\partial_lw\right) dx_i\wedge dx_j\wedge dx_l=0 $$ where $$ T_i=(t_{k+1}-t_i)\prod_{j\neq i}(t-t_j). $$ Thus, for any $i<j<l$ one gets the equation \begin{equation}\label{eq_hirota0} \sum_{cycl(i,j,l)}(T_i-T_j)T_l\partial_i\partial_j w\partial_l w=0 \end{equation} which should be satisfied for any $t\in \mathbb{R}$. But the coefficient $(T_i-T_j)T_l$ equals to $$ a_{ij,l}P_{ijl}(t,t_1,\ldots,t_k,t_{k+1}) $$ where $$ P_{ijl}=(t-t_i)(t-t_j)(t-t_l)(t-t_{k+1})\prod_{s\neq i,j,l}(t-t_s)^2 $$ is a polynomial that does not depend on the permutation of indices $(i,j,l)$ and has zeroes (with multiplicities) exactly at points $t_0,\ldots,t_{k+1}$. Thus, for $t\in\{t_0,\ldots,t_{k+1}\}$ we get that the condition \eqref{eq_hirota0} is void and for $t\notin\{t_0,\ldots,t_{k+1}\}$ the condition \eqref{eq_hirota0} reduces to \eqref{eq_hirota}. The converse statement follows from the fact that any Veronese web can be written down as \eqref{eq_omega} in some coordinate system. The formulae for $\nabla$ can be computed in the following way. We have $\nabla V(t)=\alpha V(t)+\beta V'(t)$ where the one-forms $\alpha$ and $\beta$, a priori, depend on $t$. However, in the Veronese case system \eqref{system} gives $\alpha'=-\frac{k}{2}\beta''$ and $\beta'''=0$. It follows that $\beta$ is a polynomial of degree 2 in $t$. Moreover, in the case of Veronese webs $V(t)$ considered on $M$ as in Section \ref{sec_paraconf} satisfies \eqref{cond_gen} and it means that $\beta=f\omega$ for some function $f$ on $M$. Hence, comparing the degrees of polynomials, we conclude $f=0$ for $k>2$. Consequently $\alpha$ does not depend on $t$. Therefore $\nabla V(t)=\alpha V(t)$ and in coordinates we get $\nabla\partial_i=\left(\frac{d(\partial_iw)}{\partial_iw}+\alpha\right)\partial_i$. Now we shall prove that the condition $T(\nabla)(V(t),V'(t))\in\mathrm{span}\{V(t)\}$ normalises $\alpha$ uniquely. First we prove that if $i<j$ then \begin{equation}\label{eq_vv} [V^{(i)}(t), V^{(j)}(t)]\in\mathrm{span}\{V^{(i)}(t),V^{(i+1)}(t),\ldots, V^{(j)}(t)\} \end{equation} for any fixed $t$. This follows from the following \begin{lemma}\label{lemma2} All distributions $\mathcal{V}_i(t)=\mathrm{span}\{V(t),V'(t),\ldots,V^{(i-1)}(t)\}$, where $t\in\mathbb{R}$ is fixed, are integrable. \end{lemma} \begin{proof} Note that $\mathcal{V}_i(t)=\ker\{\omega(t),\omega'(t),\ldots,\omega^{k-i}(t)\}$. Moreover, if $i=k$ then $\mathcal{V}_k(t)$ is integrable by definition. This is expressed by \eqref{eq_integrability}. Now, we proceed by induction and prove that $$ d\omega^{(i)}\wedge\omega\wedge\omega'\wedge\ldots\wedge\omega^{(i)}=0. $$ Assuming that the formula above is true and differentiating it we get $$ d\omega^{(i+1)}\wedge\omega\wedge\omega'\wedge\ldots\wedge\omega^{(i)}+ d\omega^{(i)}\wedge\omega\wedge\omega'\wedge\ldots\wedge\omega^{(i-1)}\wedge\omega^{(i+1)}=0. $$ Finally, multiplying by $\omega^{(i+1)}$ the second term vanishes and we get $$ d\omega^{(i+1)}\wedge\omega\wedge\omega'\wedge\ldots\wedge\omega^{(i+1)}=0. $$ \end{proof} Now, to prove \eqref{eq_vv} it is sufficient to consider $t=0$ only because $GL(2,\mathbb{R})$ acts on $t$ and all points in the projective line are equally good. We have $\mathcal{V}_i(0)=\mathrm{span}\{V_0,\ldots,V_{i-1}\}$. But also we have $\mathcal{V}_i(\infty)=\mathrm{span}\{V_k,V_{k-1},\ldots,V_{k-i+1}\}$. Thus, applying Lemma \ref{lemma2} to $\mathcal{V}_j(0)$ and $\mathcal{V}_i(\infty)$, we get that the intersection $\mathcal{V}_j(0)\cap\mathcal{V}_i(\infty)$ is integrable and \eqref{eq_vv} holds. Let us consider $T(\nabla)(V(t),V'(t))\mod V(t)$. We have $$ T(\nabla)(V(t),V'(t))=\alpha(V(t))V'(t)-h(t)V'(t)\mod V(t) $$ where $h(t)$ is defined by $[V(t),V'(t)]=h(t)V'\mod V(t)$. Since $\alpha$ is independent of $t$ and $V(t)$ is a polynomial of degree $k$ in $t$ it is sufficient to show that $h(t)$ is a polynomial of degree $k$ in $t$. If it is the case, then $\alpha(V(t))=h(t)$ fixes $\alpha$ uniquely. But it is easy to show that \eqref{eq_vv} implies that $h(t)$ is a polynomial of degree $k$ in $t$. Indeed, we can normalise $\omega$ such that $\omega(t)(V^{(k)}(t))=1$. Then a simple induction gives $\omega^{(i)}(t)(V^{(j)}(t))=\pm\delta_i^{k-j}$ for all $i,j=0,\ldots,k$. In particular $h(t)=\pm \omega^{(k-1)}(t)([V(t),V'(t)])$. Differentiating this equation $k+1$ times, using \eqref{eq_vv} and the fact that $\omega$ and $V$ are polynomials of degree $k$ in $t$ we get that $h^{(k+1)}(t)=0$. This completes the proof in the case $k>2$. In the case $k=2$ the theorem follows from \cite{DK1}. Here, we present a sketch of a different, more direct, proof. We have $\beta=f\omega$ and $\alpha=\tilde\alpha-ftk\omega''$, where $\tilde\alpha$ is a one-form on $M$. Simple but long computations prove that $\nabla$ is torsion-free if and only if $$ f=\frac{1}{4}\left(\frac{\partial_0\partial_1w}{\partial_0w\partial_1w}- \frac{\partial_1\partial_2w}{\partial_1w\partial_2w}\right)\frac{1}{(t_3-t_1)(t_0-t_2)} $$ and $$ \tilde\alpha=-\frac{1}{4}\sum_{cycl(0,1,2)}\left( \frac{t_1-3t_2}{t_1-t_2}\frac{\partial_0\partial_1w}{\partial_1w} +\frac{t_2-3t_1}{t_2-t_1}\frac{\partial_0\partial_2w}{\partial_2w}\right)dx_0. $$ The formula for $f$ does not depend on the permutation of indices $(0,1,2)$ due to \eqref{eq_hirota}. Having $f$ and $\tilde\alpha$ one has all ingredients necessary for the computation of $\nabla\mathbf{g}$ and consequently $\varphi$. The so-obtained connection satisfies the Einstein-Weyl equation due to results of Cartan. In the case $k=1$ the one-form $\beta$ is linear in $t$ and equals $f\omega$ for some function $f$. It follows that $\alpha$ does not depend on $t$ and the vanishing of the torsion gives $$ \alpha=-\left(\frac{\partial_0\partial_1w}{\partial_1w}-2f(t_2-t_0)\partial_0w\right)dx_0 -\left(\frac{\partial_0\partial_1w}{\partial_0w}-2f(t_2-t_1)\partial_1w\right)dx_1. $$ It can be shown that the so obtained connection satisfies $\nabla_Y V(t)\in\mathrm{span}\{V(t)\}$ for any $Y$ if and only if $f=0$. For $f\neq 0$ we only have $\nabla_{V(t)} V(t)\in\mathrm{span}\{V(t)\}$. \end{proof} We shall say that the unique connection from Theorem \ref{thm3} for $k>2$ is \emph{canonical}. Note that the unique connection in the case $k=1$ fits into the scheme $k>2$. On the other hand the unique torsion-free Weyl connection in the case $k=2$ is different since, in general, the associated one-form $\beta$ is non-trivial. However, in the case $k=2$ one can consider connections such that $\beta=0$ as well, and among them there is a unique one with the torsion normalised as in the case $k>2$. Another approach to canonical connections for Veronese webs has been recently proposed by A.~Panasyuk (personal communication). The connection for $k=1$ is exactly the Chern connection of a 3-web \cite{K2}. The connection for $k=2$ is exactly the hyper-CR connection from \cite{DK1} (in \cite{DK1} the conformal class is defined by our $\mathbf{g}$ multiplied by $(w_0w_1w_2)^{-1}$ and consequently the one-form $\phi$ from \cite{DK1} equals $\varphi-d\ln(w_0w_1w_2)$). If $k>2$ we get the following result, which in particular shows that the totally geodesic paraconformal connections are not the torsion free connections used in \cite{B}. \begin{corollary}\label{cor_webs} If $k>2$ then for a non-flat paraconformal structure of Veronese type all totally geodesic paraconformal connections have non-vanishing torsion. In particular a Veronese web is flat if and only if the corresponding canonical connection is torsion-free. \end{corollary} \begin{proof} We assume that a Veronese web $\{\mathcal{F}_t\}_{t\in\mathbb{R}}$ is described by a function $w$ as in Theorem \ref{thm3}. A connection $\nabla$ is defined by the formula $\nabla\partial_i=\left(\frac{d(\partial_iw)}{\partial_iw}+\alpha\right)\partial_i $. Thus, if the torsion of $\nabla$ vanishes then $\alpha(\partial_i)=-\frac{\partial_i\partial_jw}{\partial_jw}$ for any $j\neq i$. Taking $l\neq j$ and computing $\alpha(\partial_i)$ in two ways we get that $\partial_i\left(\frac{\partial_jw}{\partial_l w}\right)=0$. It implies that $\mathbf{grad}(w)$ is proportional to a vector field $(b_0,\ldots,b_k)$ where $b_i$ is a function of $x_i$ only. All $b_i$ are non-vanishing functions because any two foliations from the family $\{\mathcal{F}_t\}_{t\in\mathbb{R}}$ intersect transversally. If we change local coordinates $\tilde x_i:=\int b_idx_i$ then still $\ker d\tilde x_i=T\mathcal{F}_{t_i}$ and we get that in new coordinates the web is described by $w=\tilde x_0+\ldots+\tilde x_k$ which means that the corresponding paraconformal structure is flat. \end{proof} \paragraph{The Bryant connection.} As mentioned before, there is a unique torsion-free paraconformal connection (Bryant connection) in the case of paraconformal structures defined by equations of order 4. It follows from above that in the case of Veronese webs the one-form $\beta$, involving the torsion-free connection via $\nabla V(t)=\alpha V(t)+\beta V'(t)$ does not vanish unless the structure is flat. Precisely, $\beta=\beta_0+t\beta_1+t^2\beta_2$ for some one-forms $\beta_i$ which do not depend on $t$. The one-forms $\beta_i$ can be computed explicitly. We shall do this in order to show the difference between totally geodesic connections and the Bryant connection. Assume that a paraconformal structure is given by $V(t)=V_0+ tV_1(t)+t^2V_2(t)+t^3V_3(t)$ and let us introduce structural functions $c_{ij}^l$ by $$ [V_i,V_j]=\sum_{l=0}^3c_{ij}^lV_l $$ and let $\eta_0,\eta_1,\eta_2,\eta_3$ be the dual one-forms such that $\eta_i(V_j)=\delta_{ij}$. Then \begin{eqnarray*} &&\beta_0=\frac{1}{3}c_{02}^3\eta_0+\frac{1}{3}c_{12}^3\eta_1+(2c_{03}^2-c_{02}^1)\eta_2-c_{03}^1\eta_3,\\ &&\beta_1=(c_{03}^3-c_{02}^2)\eta_0+ \left(\frac{1}{3}c_{01}^0+\frac{1}{3}c_{13}^3-c_{03}^2\right)\eta_1+ \left(\frac{1}{3}c_{23}^3+\frac{1}{3}c_{02}^0-c_{03}^1\right)\eta_2+(c_{03}^0-c_{13}^1)\eta_3,\\ &&\beta_2=-c_{03}^2\eta_0+(2c_{03}^1-c_{13}^2)\eta_1+\frac{1}{3}c_{12}^0\eta_2+\frac{1}{3}c_{13}^0\eta_3, \end{eqnarray*} and additionally \begin{eqnarray*} \alpha=(3c_{02}^2-2c_{03}^3)\eta_0+(3c_{03}^2-c_{01}^0)\eta_1-c_{02}^0\eta_2-c_{03}^0\eta_3. \end{eqnarray*} To get these expressions one considers $\nabla V(t)=\alpha V(t)+\beta V'(t)$ which gives $\nabla V_i$ in terms of $\alpha$ and $\beta_i$. Then the vanishing of the torsion gives 24 linear equations for 16 unknown functions: $\beta_i(V_j)$ and $\alpha(V_j)$, $i=0,1,2$, $j=0,\ldots,3$. However, the structural functions $c_{ij}^l$ satisfy eight additional linear relations given explicitly in \cite{K1}. The additional relations are exactly obstructions for the vanishing of the torsion. In the case of Veronese webs $$ V_i=(-1)^{i+1}\binom{3}{i}\sum_{j=0}^3\frac{t_j^{3-i}}{\partial_j w}\left(\prod_{l\in\{0,1,2,3,4\}\setminus j}\frac{1}{t_l-t_j}\right)\partial_j $$ and $$ c_{ij}^l=\sum_{a,b=0}^3 d_{ij,ab}^l \frac{\partial_a\partial_b w}{\partial_a w\partial_b w} $$ where $d_{ij,ab}^l$ are certain constants depending on $t_i$'s. \paragraph{The Zakharevich conjecture.} The proof of Theorem \ref{thm3} gives also an elementary proof of the following result which was previously proven by Panasyuk \cite{P} in the analytic category and further generalised in \cite{BD} (the problem is called the Zakharevich conjecture in \cite{P}). \begin{corollary}\label{cor_webs2} The integrability condition \eqref{eq_integrability} is satisfied for any $t$ if and only if it is satisfied for $k+3$ distinct values of $t$. \end{corollary} \begin{proof} If $t_{k+2}\notin\{t_0,\ldots,t_{k+1}\}$ then $P_{ijl}(t_{k+2})\neq 0$ and we can divide \eqref{eq_hirota0} by $P_{ijl}(t_{k+2})$. Then we get that \eqref{eq_hirota} has to be satisfied if the integrability condition holds for $t_{k+2}$. But then it follows from Theorem \ref{thm3} that the integrability condition holds for any $t$. \end{proof} \paragraph{Integrable systems, bi-Hamiltonian systems and Lax tuples. } Note that the constants $a_{ij,l}$ in Theorem \ref{thm3} satisfy $\sum_{cycl(i,j,l)}a_{ij,l}=0$. Thus \eqref{eq_hirota} the equation is the dispersionless Hirota equation in the case $k=2$ (c.f. \cite{DK1,FK,Z}). It follows that the first part of Theorem \ref{thm3} is a generalisation of the preprint \cite[Corollary 3.7]{Z}. Let us define the following $t$-dependent vector fields $$ L_i(t)=-\frac{\partial_iw}{\partial_0w}\partial_0+a_i(t-t_i)\partial_i $$ where $a_i=\frac{t_{k+1}-t_0}{t_{k+1}-t_i}$, for $i=1,\ldots,k$. The vector fields are linear in $t$ and satisfy $\omega(t)(L_i(t))=0$. Therefore they span the distribution tangent to $\mathcal{F}_t$, for any $t\in\mathbb{R}$. We will call $(L_1(t),\ldots, L_k(t))$ the \emph{Lax tuple} of the paraconformal structure of a Veronese type. One verifies that the Lax tuple commutes, i.e. $$ [L_i,L_j]=0, $$ if and only if \eqref{eq_int_sys} is satisfied. Thus, \eqref{eq_int_sys} is equivalent to \eqref{eq_hirota}. In this way we recover the hierarchy of integrable systems \cite[Equation 6]{DK1}. The following result extends \cite[Theorem 4.1]{DK1} to the case of arbitrary $k$. \begin{proposition} Let $N=M\times \mathbb{R}^k$ and let $y_1,\ldots,y_k$ be coordinates on $\mathbb{R}^k$. If $(L_i(t))_{i=}^{k}$ is a Lax tuple on $M$ defined by a Veronese web then $$ t\mapsto \sum_{i=1}^{k}L_i(t)\wedge\partial_{y_i} $$ is a bi-Hamiltonian structure on $N$ and the construction is converse to the Gelfand-Zakharevich reduction. \end{proposition} \appendix \section{Appendix: formulae}\label{ap_formulae} All formulae below are either taken from \cite{JK} or computed by hands using \cite[Proposition 2.9]{JK}. All vector fields $V^{(i)}$ are given up to a multiplicative factor $g$ from equation \eqref{eq_g} which can be neglected. \vskip 2ex {Order 2:} $$ K_0=-\partial_0F+\frac{1}{2}X_F(\partial_1F)-\frac{1}{4}(\partial_1F)^2 $$ $$ V=\partial_1,\qquad V'=-\partial_0-\frac{1}{2}\partial_1F\partial_1. $$ \vskip 2ex {Order 3:} \begin{eqnarray*} K_0&=&\partial_0F-X_F(\partial_1F) +\frac{1}{3}\partial_1F\partial_2F+ \frac{2}{3}X_F^2(\partial_2F)- \\ &&\frac{2}{3}X_F(\partial_2F)\partial_2F+ \frac{2}{27}(\partial_2F)^3,\\ K_1&=&\partial_1F-X_F(\partial_2F)+\frac{1}{3}(\partial_2F)^2. \end{eqnarray*} \begin{eqnarray*} V&=&\partial_2,\\ V'&=&-\partial_1-\frac{2}{3}\partial_2F\partial_2,\\ V''&=&\partial_0+\frac{1}{3}\partial_2F\partial_1+ \left(\partial_1F+\frac{4}{9}(\partial_2F)^2 -\frac{2}{3}X_F(\partial_2F)\right)\partial_2. \end{eqnarray*} \vskip 2ex {Order 4:} \begin{eqnarray*} K_0&=&-\partial_0F+X_F(\partial_1F)-X_F^2(\partial_2F)+\frac{3}{4}X_F^3(\partial_3F) -\frac{9}{16}X_F(\partial_3F)^2+\\ &&\frac{18}{64}X_F(\partial_3F)(\partial_3F)^2- \frac{3}{256}(\partial_3F)^4-\frac{1}{4}\partial_1F\partial_3F +\frac{1}{2}X_F(\partial_2F)\partial_3F -\\ &&\frac{3}{4}X_F^2(\partial_3F)\partial_3F+ \frac{1}{4}X_F(\partial_3F)\partial_2F-\frac{1}{16}\partial_2F(\partial_3F)^2,\\ K_1&=&-\partial_1F+2X_F(\partial_2F)-2X_F^2(\partial_3F)-\frac{1}{2}\partial_2F\partial_3F +\frac{3}{2}X(\partial_3F)\partial_3F-\frac{1}{8}(\partial_3F)^3,\\ K_2&=&-\partial_2F+\frac{3}{2}X_F(\partial_3F)-\frac{3}{8}(\partial_3F)^2. \end{eqnarray*} \begin{eqnarray*} V&=&\partial_3,\\ V'&=&-\partial_2-\frac{3}{4}\partial_3F\partial_3,\\ V''&=&\partial_1+\frac{1}{2}\partial_3F\partial_2+\left(\frac{9}{16}(\partial_3F)^2 -\frac{3}{4}X_F(\partial_3F)\right)\partial_3,\\ V'''&=&-\partial_0 -\frac{1}{4}\partial_3F\partial_1 +\left(\frac{5}{4}X_F(\partial_3F)-\frac{7}{16}(\partial_3F)^2\right)\partial_2+\\ &&\left(\frac{27}{16}X_F(\partial_3F)-\frac{3}{4}X_F^2(\partial_3F)-\partial_1F -\frac{1}{2}\partial_2F\partial_3F -\frac{27}{64}(\partial_3F)^3\right)\partial_3. \end{eqnarray*} \vskip 2ex {General case, equations of order $k+1$:} $$ K_{k-1}=(-1)^k\left(\partial_{k-1}F-\frac{k}{2}X_F(\partial_kF)+\frac{k}{2(k+1)}(\partial_kF)^2\right). $$ $$ V=\partial_k,\qquad V'=-\partial_{k-1}-\frac{k}{k+1}\partial_kF\partial_k, $$ $$ V^{(i)}=\sum_{j=0}^i\binom{i}{j}X_F^j(g)\mathrm{ad}_{X_F}^{i-j}\partial_k. $$ where $X_F^j(g)$ can be computed using \eqref{eq_g} several times. \paragraph{Acknowledgements.} The work has been partially supported by the Polish National Science Centre grant DEC-2011/03/D/ST1/03902. \end{document}
\begin{document} \begin{abstract} Let $X$ be a smooth complex projective variety of dimension $n$. We prove bounds on Fujita's basepoint freeness conjecture that grow as $n\operatorname{log}\operatorname{log}(n)$. \end{abstract} \title{Logarithmic bounds on Fujita's conjecture} \section{Introduction} The purpose of this paper is to prove the following result: \begin{theorem}\label{intro1} Let $X$ be a smooth projective variety of dimension $n$ defined over an algebraically closed field of characteristic zero and let $L$ be an ample line bundle on $X$. Then $K_X + mL$ is basepoint free for any positive integer $m\geqslantslant \operatorname{max}\{n+1, n(\operatorname{log}\operatorname{log}(n)+2.34)\}$. \end{theorem} A conjecture of Fujita \cite{fujita1} states that, in the hypothesis of Theorem \ref{intro1}, $K_X+mL$ is basepoint free for all $m\geqslantslant n+1$. Since maps to projective space are one of the main tools used in the study of projective varieties, Fujita's conjecture has received considerable attention. Reider proved the conjecture for surfaces in \cite{reider} shorty after its formulation by using Bogomolov's instability theorem for rank two vector bundles. Ein and Lazarsfeld proved it for threefolds in \cite{einlazarsfeld1} by introducing techniques from the Minimal Model Program. Later, Kawamata proved the conjecture for fourfolds in \cite{kawamata1} and Ye and Zhu recently proved it for fivefolds in \cite{yezhu1} and \cite{yezhu2}. For sixfolds, we prove the following: \begin{theorem}\label{intro2} Let $X$ be a smooth projective variety of dimension six defined over an algebraically closed field of characteristic zero and let $L$ be an ample line bundle on $X$. Then $K_X+mL$ is basepoint free for any positive integer $m\geqslantslant 8$. \end{theorem} While these sporadic cases may be considered as evidence for the conjecture to hold true in general, in higher dimensions much less is known. The first general result is due to Angehrn and Siu, who used techniques of analytic algebraic geometry to prove that $K_X+mL$ is basepoint free for all $m\geqslantslant (n^2+n+2)/2$ in \cite{angehrnsiu}. Koll\'{a}r adapted their proof to the algebraic setting in \cite{pairs}. By using a different idea, later Helmke \cite{helmke1} also established a general method that essentially leads to a quadratic bound. Heier \cite{heier} combined Angehrn-Siu's approach and Helmke's approach to give a bound that is $O(n^{4/3})$. Once one knows that the linear series $|K_X+mL|$ gives a morphism to projective space, some questions naturally arise. For instance, it is interesting to know if the morphism is birational. More in general, we say that $K_X+mL$ separates $r$ points if the restriction morphism \[ H^0(X, K_X+mL)\rightarrow H^0(T, (K_X+mL)|_T) \] is surjective for any reduced subscheme $T$ of length $r$. Naturally, $K_X+mL$ separates two points if and only if the morphism defined by $|K_X+mL|$ is birational. Angehrn and Siu \cite{angehrnsiu} (see also \cite{pairs}) showed that $K_X+mL$ separates $r$ points for $m\geqslantslant (n^2 +2rn-n+2)/2$. In this direction we prove: \begin{theorem}\label{intro3} Let $X$ be a smooth projective variety of dimension $n$ defined over an algebraically closed field of characteristic zero and let $L$ be an ample line bundle. Then $K_X+mL$ separates $r$ points for any positive integer $m\geqslantslant r+n-1+\sqrt r \, n (\log \log n + 2.34)$. \end{theorem} Let us now briefly explain the ideas behind the proofs of Theorems \ref{intro1}-\ref{intro3}. Let $x$ be a point in a smooth projective variety $X$ and let $L$ be an ample line bundle. Suppose that we wish to find a section of $H^0(X,K_X+L)$ that does not vanish at $x$. If $L^n$ is large enough, a well established method in birational geometry is to find a divisor $D\in |L|_{\mathbb{Q}}$ that has large order of vanishing at $x$. In this way the pair $(X,D)$ has a non Kawamata log terminal center $Z$ containing $x$, and then one tries to use vanishing theorems to lift sections of $L|_Z$. If $Z$ is zero dimensional this is easily done, but $Z$ may very well be higher dimensional. Therefore one needs to cut down $Z$ in dimension. At this stage, two approaches are possible. Helmke's approach is to insist in finding a divisor $D'\in |L|_Z|_{\mathbb{Q}}$ with large order of vanishing at $x$ and then lifting $D'$ to $X$. Finding such $D'$ is now harder than it was finding $D$, as $Z$ may be singular at $x$. However, Helmke proved that if $Z$ is a log canonical center of dimension $d$, then $\operatorname{mult}_x Z\leqslantslant \binom{n-1}{n-d}$. By cutting down one step at a time, one eventually gets a zero dimensional log canonical center. Angehrn and Siu's method is instead to find a divisor $D'\in |L|_Z|_{\mathbb{Q}}$ highly singular at a smooth point $y$ near $x$, and then take the limit as $y$ approaches $x$. As we mentioned earlier, both methods give a quadratic bound on $m$. We will follow Helmke's approach. The crucial new ingredient, however, is to consider all steps simultaneously rather than one at a time. By doing so, we rephrase the problem of bounding $m$ into an optimization problem of a linear function on a compact convex polyhedron. This approach allows us to estimate very efficiently the maximum of the linear function, as it suffices to evaluate it at the vertices of the polyhedron. We illustrate this idea in the simple case of dimension two. Let $X$ be a smooth surface, let $L$ be an ample line bundle and fix a point $x\in X$. Let $D\in|L|_{\mathbb{Q}}$ be a $\mathbb{Q}$-divisor such that $\operatorname{ord}_x D\geqslantslant 1$ and consider the log canonical threshold: \[ t_1 = \operatorname{sup}\{c|(X, cD) \text{ is log canonical at $x$}\} \] Clearly \[ 0\leqslantslant t_1\leqslantslant 2\tag{1} \] Let $D_1 = t_1 D$ and let $Z$ be the minimal log canonical center of $(X, D_1)$ at $x$. If $Z$ is zero dimensional, then we are done. If not, then $Z$ is a curve which is smooth at $x$ by inversion of adjunction. At this point we introduce the following important definition, due to Helmke (see also \cite{ein}). \begin{definition} Let $(X,\Delta)$ be a log pair. Let $n$ be the dimension of $X$ and let $x$ be a smooth point in $X$. Let $\pi: Y \rightarrow X$ be the blowing up of $X$ at $x$ with exceptional divisor $E$. The local discrepancy $b_x(X,\Delta)$ of $(X,\Delta)$ over $x$ is: \[ \inf\{b\mid \text{There is a non klt center of $(Y, \pi^* \Delta - (n-1-b)E)$ in $E$}\} \] \end{definition} In our example, let $b_1 = b_x(X, D_1)$. It is then easy to show that \[ 0\leqslantslant b_1 \leqslantslant 2- t_1\tag{2} \] Also, since $Z$ is one dimensional, we have \[ 0\leqslantslant b_1\leqslantslant 1\tag{3} \] Next, we want to cut down $Z$. Let $D'\in |L|_Z|_{\mathbb{Q}}$ be a divisor such that $\operatorname{ord}_x D' \geqslantslant 1$ and let $D''$ be a general lifting of $D'$ to $X$. Finally, let \[ t_2 = \operatorname{sup}\{c\mid (X, D_1 + cD'') \text{ is log canonical at $x$}\} \] and set $D_2 = D_1 + t_2 D''$. It is again easy to see that \[ 0\leqslantslant t_2 \leqslantslant b_1 \tag{4} \] Notice that $x$ is a log canonical center of $(X,D_2)$ and that $D_2\sim_{\mathbb{Q}} (t_1+t_2)L$. It is then a standard argument to deduce that $K_X+mL$ has a section that does not vanish at $x$ for $m\geqslantslant \lceil t_1+t_2+\epsilon \rceil$, so now the problem is to bound $t_1+t_2$. Consider the set $C\subseteq \mathbb{R}^3$ consisting of points $(t_1, t_2, b_1)$ satisfying conditions $(1)-(4)$ above. Then $C$ is the convex hull of the points $(0,0,0)$, $(0,0,1)$, $(2,0,0)$, $(1,0,1)$, $(1,1,1)$, $(0,1,1)$. Therefore, for any point of $C$ we have $t_1+t_2\leqslantslant 2$, so that $K_X+mL$ is basepoint free for $m\geqslantslant 3$. This same idea applies to higher dimensions. However, the situation is considerably more complicated due to the presence of singularities of $Z$ and due to the fact that the geometry of $C$ becomes increasingly complex. In order to deal with the problem more efficiently, in higher dimensions we do not compute all the vertices of $C$, but only those for which $\sum_i t_i$ is large. In the above example, this amounts to noticing that one may rewrite $(2)$ as \[ t_1 \leqslantslant 2-b_1 \] By combining this with $(4)$, we get \[ t_1 + t_2 \leqslantslant (2-b_1) + b_1\leqslantslant 2 \] The generalization of the expression $(2-b_1)+b_1$ to higher dimensions is the function $f(\underline{b}, \underline{d}, n, 1)$ of Section \ref{sectionoptimization}. Much of the work of the paper is devoted to carefully estimating $f$ in terms of $n$ only, which then leads to the result. We would like to give here an idea on how this is done at least in the case when $X$ is a threefold. First, define $D_1$, $b_1$ and $t_1$ as above and suppose that $\operatorname{LLC}(X, D_1, x)=\{Z_1\}$, where $Z_1$ is an irreducible surface. By Theorem \ref{multiplicity} we have that \[ m_1 = \operatorname{mult}_x Z_1 \leqslantslant 3-\lceil b_1 \rceil \] For the next step, choose a $\mathbb{Q}$-divisor $D'\in |L|_{Z_1}|_{\mathbb{Q}}$ with large order of vanishing at $x$. Notice that, differently than above, the best bound we may hope for is $\operatorname{ord}_x D' \geqslantslant \frac{1}{\sqrt{m_1}}$, due to the fact that $Z_1$ is possibly singular (see Definition \ref{multdef} for the definition of order of vanishing in this context). Let $D''$ be a general lifting of $D'$ to $X$, let \[ t_2 = \operatorname{sup}\{c\mid (X, D_1 + cD'') \text{ is log canonical at $x$}\} \] and set $D_2 = D_1 + t_2 D''$. If we define $b_2 = b_x(X, D_2)$, then by an argument due to Helmke (see also Theorem \ref{cutlc}) we have that \[ b_2 \leqslantslant b_1 - t_2 \cdot \operatorname{ord}_x D' \leqslantslant b_1 - \frac{t_2}{\sqrt{m_1}} \] Now suppose that $\operatorname{LLC}(X, D_2, x)=\{Z_2\}$, where $Z_2$ is an irreducible curve. Then we have that $Z_2$ is smooth near $x$ by inversion of adjunction. Let $D'\in |L|_{Z_2}|_{\mathbb{Q}}$ be a divisor such that $\operatorname{ord}_x D' \geqslantslant 1$ and let $D''$ be a general lifting of $D'$ to $X$. Finally, let \[ t_3= \operatorname{sup}\{c\mid (X, D_2 + cD'') \text{ is log canonical at $x$}\} \] and set $D_3 = D_2 + t_3 D''$. We have that $t_3 \leqslantslant b_2$ and that $x$ is a log canonical center of $(X,D_3)$. Putting everything together, we get \begin{equation*} t_1 + t_2 + t_3\leqslantslant 3-b_1 + (b_1-b_2)\sqrt{3-\lceil b_1 \rceil} + b_2 \end{equation*} Notice that $b_1 \leqslantslant \operatorname{dim}(Z_1) = 2$. Therefore, $\lceil b_1 \rceil \neq 3$ and the above expression does not decrease if we decrease $b_2$. In particular, we may assume $b_2=0$. But then we get \[ t_1 + t_2 + t_3 \leqslantslant 3 - b_1 + b_1\sqrt{3-\lceil b_1 \rceil}\leqslantslant 2+\sqrt{2}<4 \] This proves Fujita's basepoint freeness conjecture in dimension three, at least in the case when the log canonical centers constructed inductively have dimension two and one respectively (the other cases being entirely analogous). As $n$ grows larger, however, bounding $f(\underline{b}, \underline{d}, n, 1)$ becomes increasingly difficult. For example, if $X$ is a fourfold and if there are four steps in the inductive process, the upper bound on $\sum_i t_i$ is \[ 4-b_1 + (b_1-b_2)\sqrt[3]{4-\lceil b_1 \rceil} + (b_2-b_3)\sqrt{\binom{4-\lceil b_2 \rceil}{2}}+ b_3 \] We refer to Section \ref{sectionoptimization} for the details of the estimates on $f(\underline{b}, \underline{d}, n, r)$ and we refer to Appendix \ref{appendix} for a proof of their optimality. \ \ \noindent \textbf{Acknowledgements:} We would like to thank Prof. James M\textsuperscript{c}Kernan and Prof. Bangere Purnaprajna for their encouragement and for helpful comments on the paper. We would also like to thank Fei Ye and Zhixian Zhu for interesting discussions on Section \ref{sectionsixfolds}. The research of LG is supported by the Alexander von Humboldt Research Fellowship for Postdoctoral Researchers. \section{Preliminaries} \subsection{Notation} We work over an algebraically closed field $k$ of characteristic zero. Most of the following notation is standard. $\mathbb{N}$ is the set of natural numbers, zero included. We denote the logarithmic function with natural base as $\operatorname{log}:\mathbb{R}^+ \rightarrow \mathbb{R}$. We denote by $W\colon \mathbb R_{\geqslant 0} \to \mathbb R_{\geqslant 0}$ the principal branch of Lambert's productlog function (see Definition \ref{definitionW}). A $\mathbb{Q}$-Cartier divisor $D$ on a normal variety $X$ is nef if $D\cdot C\geqslantslant 0$ for any curve $C\subseteq X$. We use the symbol $\sim_\mathbb{Q}$ to indicate $\mathbb{Q}$-linear equivalence and the symbol $\equiv$ to indicate numerical equivalence. We denote by $|D|_\mathbb{Q}$ the $\mathbb{Q}$-linear series of a $\mathbb{Q}$-Cartier divisor $D$. A pair $(X,\Delta)$ consists of a normal variety $X$ and a $\mathbb{Q}$-Weil divisor $\Delta$ such that $K_X + \Delta$ is $\mathbb{Q}$-Cartier. If $\Delta\geqslantslant 0$, we say $(X,\Delta)$ is a log pair. If $f:Y \rightarrow X$ is a birational morphism, we may write $K_Y + f^{-1}_* \Delta = f^*(K_X+ \Delta) + \sum_i a_i E_i$ with $E_i$ $f$-exceptional divisors. A log pair $(X,\Delta)$ is called log canonical (or lc) if $a_i\geqslantslant -1$ for every $i$ and for every $f$, and it's called Kawamata log terminal (or klt) if $a_i>-1$ for every $i$ and $f$, and furthermore $\lfloor\Delta \rfloor=0$. The rational numbers $a_i$ are called the discrepancies of $E_i$ with respect to $(X,\Delta)$ and do not depend on $f$. We say that a subvariety $V\subseteq X$ is a non klt center if it is the image of a divisor of discrepancy at most $-1$. A non klt center $V$ is a log canonical center if $(X,\Delta)$ is log canonical at the generic point of $V$. A non klt place (respectively log canonical place) is a valuation corresponding to a divisor of discrepancy at most (respectively equal to) $-1$. The set of all log canonical centers passing though $x\in X$ is denoted by $\operatorname{LLC}(X,\Delta,x)$, and the union of all the non klt centers is denoted by $\operatorname{Nklt}(X,\Delta,x)$. Finally, the log canonical threshold of $(X,\Delta)$ at a point $x$ is $\operatorname{lct}(X,\Delta,x)=\operatorname{sup}\{c>0| (X,c\Delta) \text{ is lc at $x$}\}$. \subsection{Log canonical centers} We recall here some standard definitions and results in birational geometry for the convenience of the reader. \begin{definition} Let $X$ be an irreducible projective variety of dimension $n$ and let $D$ be a $\mathbb{Q}$-Cartier divisor. Let $m$ be a positive integer such that $mD$ is Cartier. The volume of $D$ is: \[ \operatorname{vol}(X,D)=\limsup_{k\to\infty}\frac{n! h^0(X,kmD)}{(km)^n}. \] \end{definition} \begin{definition}\label{multdef} Let $X$ be an irreducible projective variety, let $x$ be a point of $X$ and let $D$ be a $\mathbb{Q}$-Cartier divisor on $X$. Let $m$ be a positive integer such that $mD$ is Cartier and let $f\in \mathcal{O}_{X,x}$ be a defining equation. Then we define the order of vanishing of $D$ at $x$ as \[ \operatorname{ord}_x D = \frac{1}{m}\operatorname{max}\{s\in \mathbb{N}| f\in \mathfrak{m}_x ^s\} \] \end{definition} \begin{lemma}\label{volumesections} Let $X$ be an irreducible projective variety of dimension $n$ and let $D$ be a $\mathbb{Q}$-Cartier divisor on $X$. Let $T$ be a finite set of points of $X$ of cardinality $r$. Then there is a $\mathbb{Q}$-divisor $D'\in |D|_{\mathbb{Q}}$ such that \[ \operatorname{ord}_x D' \geqslantslant \Big( \frac{\operatorname{vol}(X, D)}{r\operatorname{mult}_x X} \Big)^{1/n} \] for all $x\in T$. \end{lemma} \begin{proof} See \cite[Proposition 3.2]{helmke1} or \cite[Proposition 2.1]{kawamata1}. \end{proof} \begin{definition} Let $(X,\Delta)$ be a log pair with $X$ smooth, and let $\mu : Y\rightarrow X$ be a log resolution. We define the multiplier ideal sheaf of the pair $(X,\Delta)$ to be \[ \mathcal{I}(X,\Delta) = \mu _* \mathcal{O}_Y (K_{Y/X} - \lfloor \mu ^* \Delta \rfloor) \subseteq \mathcal{O}_X \] \end{definition} We have that $(X,\Delta)$ is klt if and only if $\mathcal{I}(X,\Delta)=\mathcal{O}_X$, and it is lc if and only if $\mathcal{I}(X, (1-\epsilon)\Delta)=\mathcal{O}_X$ for any $0<\epsilon\ll 1$. Therefore $\operatorname{Nklt}(X,\Delta)=\operatorname{Supp}(\mathcal{O}_X / \mathcal{I}(X,\Delta))$. \begin{theorem}[Nadel vanishing theorem]\label{nadel} Let $X$ be a smooth projective variety and $\Delta\geqslantslant 0$ a $\mathbb{Q}$-divisor on $X$. Let $D$ be any integral divisor such that $D-\Delta$ is big and nef. Then $H^i (X, \mathcal{O}_X (K_X + D)\otimes \mathcal{I}(X,\Delta))=0$ for $i>0$. \end{theorem} \begin{proof} See \cite[Section 9.4.B]{lazarsfeld2}. \end{proof} \begin{proposition}\label{mult} Let $X$ be an irreducible variety of dimension $n$ and let $\Delta$ be an effective $\mathbb{Q}$-divisor on $X$. If $\operatorname{ord}_x \Delta\geqslantslant n$ at some smooth point $x\in X$, then $\mathcal{I}(X,\Delta)_x\subseteq \mathfrak{m}_x$, where $\mathfrak{m}_x$ is the maximal ideal of $x$. \end{proposition} \begin{proof} This is \cite[Proposition 9.3.2]{lazarsfeld2}. \end{proof} \begin{lemma}\label{mlc} Let $(X,\Delta)$ be a log pair such that $\Delta$ is $\mathbb{Q}$-Cartier. Assume that $X$ is klt and $(X,\Delta)$ is lc. If $W_1$ and $W_2$ are log canonical centers of $(X,\Delta)$ and $W$ is an irreducible component of $W_1 \cap W_2$, then $W$ also is a log canonical center of $(X,\Delta)$. In particular, if $(X,\Delta)$ is not klt at $x\in X$, then there exists the unique minimal element of $\operatorname{LLC}(X,\Delta,x)$. \end{lemma} \begin{proof} See \cite[Proposition 1.5]{kawamata1}. \end{proof} We will refer to the following result as \say{tie breaking}. \begin{lemma}\label{tiebreak} Let $(X,\Delta)$ be a log pair such that $X$ is klt and $\Delta$ is $\mathbb{Q}$-Cartier. Let $S$ be a finite set of points of $X$. Suppose that there is a point $x\in S$ such that $\{x\}\in \operatorname{LLC}(X,\Delta,x)$ and that for each point $y\in S\setminus\{x\}$ there is a non klt center of $(X,\Delta)$ containing $y$ but not $x$. Let $D$ be an ample $\mathbb{Q}$-Cartier divisor. Then there exists a positive rational number $a>0$ such that for any $0<\epsilon\ll 1$ there exists a $\mathbb{Q}$-Cartier divisor $E\in |aD|_{\mathbb{Q}}$ such that \begin{enumerate} \item $(X,(1-\epsilon)\Delta + \epsilon E)$ is not klt at any point of $S$. \item $(X,(1-\epsilon)\Delta + \epsilon E)$ is lc at $x$. \item $\operatorname{LLC}(X,(1-\epsilon)\Delta + \epsilon E, x)=\{x\}$. \end{enumerate} \end{lemma} \begin{proof} This is an analogue of \cite[Proposition 6.2]{helmke1}. For each $y\in S\setminus\{x\}$ let $v_y$ be a non klt place of $(X,\Delta)$ whose center $Z_y$ contains $y$ but not $x$. Let $b$ be a positive rational number such that there exists $E_1\in|bD|_{\mathbb{Q}}$ with $v_y(E_1) > v_y(\Delta)$ for all $y\in S\setminus\{x\}$. After possibly taking a larger $b$ we may assume that the common support of all such $E_1$ is exactly the union of the $Z_y$. Similarly, let $v_x$ be a log canonical place of $(X,\Delta)$ with center $x$ and let $c$ be a positive rational number such that there exists $E_2\in |cD|_{\mathbb{Q}}$ with $v_x(E_2) > v_x(\Delta)$. Again, after possibly taking a larger $c$ we may assume that the common support of all such $E_2$ is exactly $x$. Set $a=b+c$. For general choices of $E_1$ and $E_2$ the pair $(X,(1-\epsilon)\Delta + \epsilon E_1 + \epsilon E_2)$ is not lc at any point of $S$ for any small $\epsilon$. Let \[ t=\operatorname{sup}\{d|(X,(1-\epsilon)\Delta + \epsilon E_1 + d\epsilon E_2) \text{ is lc at $x$}\} \] Clearly $t<1$. Finally, take $E_3\in |(1-t)c D|_{\mathbb{Q}}$ general enough. We have \[ \operatorname{LLC}(X, (1-\epsilon)\Delta + \epsilon (E_1 + tE_2 + E_3))=\{x\} \] Furthermore, $S$ is contained in $\operatorname{Nklt}(X, (1-\epsilon)\Delta + \epsilon (E_1 + tE_2 + E_3))$ and $E_1 + tE_2 + E_3\in |aD|_{\mathbb{Q}}$. Therefore, we may take $E=E_1 + tE_2 + E_3$. \end{proof} \section{The inductive method}\label{sectioninduction} In this section we describe an inductive method for cutting down the dimension of non klt centers. This essentially due to Helmke (see in particular \cite[Proposition 6.3]{helmke1}). Since we will need Helmke's result in a slightly different form, we go over its proof and make the appropriate changes. \begin{proposition}\label{cutlc} Let $(X,\Delta)$ be a log pair, where $X$ is a smooth projective variety of dimension $n$. Let $S$ be a finite set of points contained in $\operatorname{Nklt}(X,\Delta)$ and let $r$ be the cardinality of $S$. Let $T$ be a nonempty subset of $S$ such that: \begin{enumerate} \item $(X,\Delta)$ is log canonical at all points of $T$. \item All points of $T$ share a common minimal log canonical center $Z$. \item Every point in $S\setminus T$ is contained in a non klt center of $(X,\Delta)$ that does not contain any point of $T$. \end{enumerate} Let $d=\operatorname{dim}Z$ and let $D$ be an ample $\mathbb{Q}$-divisor. If $d>0$, then there exists a nonempty subset $T'$ of $T$, a rational number $t$ such that \[ 0\leqslantslant t \leqslantslant b_x(X,\Delta) \Big( \frac{r\operatorname{mult}_x Z}{D^d \cdot Z}\Big) ^{1/d} \] for every point $x\in T$ and a $\mathbb{Q}$-divisor $D'\in |D|_{\mathbb{Q}}$ such that \begin{enumerate} \item $(X,\Delta+tD')$ is log canonical at all points of $T'$. \item All points of $T'$ share a common minimal log canonical center $Z'$ strictly contained in $Z$. \item Every point in $S\setminus T'$ is contained in a non klt center of $(X,\Delta+tD')$ that does not contain any point of $T'$. \end{enumerate} Furthermore \[ b_x (X,\Delta+tD')\leqslantslant b_x(X,\Delta) - t \cdot \Big( \frac{D^d\cdot Z}{r\operatorname{mult}_x Z}\Big)^{1/d} \] for all points $x\in T'$. \end{proposition} \begin{proof} Notice that $D^d \cdot Z=\operatorname{vol}(Z, D|_Z)$. Then by Lemma \ref{volumesections} there exists a $\mathbb{Q}$-divisor $D''\in |D|_Z|_{\mathbb{Q}}$ such that \[ \operatorname{ord}_x D'' \geqslantslant \Big( \frac{D^d\cdot Z}{r\operatorname{mult}_x Z}\Big)^{1/d} \] for every point $x$ in $T$. Let $D'\in|D|_{\mathbb{Q}}$ be a general lifting of $D''$ to $X$ and let \[ t=\sup\{c| (X,\Delta+cD') \text{ is log canonical at some point of $T$}\} \] By the definition of local discrepancy over $x$ and by the proof of \cite[Proposition 3.2]{helmke1}, we have \[ 0\leqslantslant t \leqslantslant b_x(X,\Delta) \Big( \frac{r\operatorname{mult}_x Z}{D^d \cdot Z}\Big) ^{1/d} \] for all points $x$ in $T$. Let $T_1$ be the set of points of $T$ where $(X,\Delta+tD')$ is log canonical. For each $x$ in $T_1$ let $Z_x$ be the minimal log canonical center of $(X,\Delta+tD')$ at $x$. By construction, for any $x\in T_1$ we have that $Z_x$ is strictly contained in $Z$. Choose a maximal element $Z'$ in the set $\{Z_x | x\in T_1\}$ ordered by inclusion and let $T'=\{x\in T_1| Z_x = Z'\}$. Now, if $x\in S\setminus T$ then there is a non klt center of $(X,\Delta+tD')$ containing $x$ but none of the points of $T'$ by hypothesis. If $x\in T\setminus T'$ either $(X,\Delta+tD')$ is not log canonical at $x$, or $(X,\Delta+tD')$ is log canonical at $x$ but the minimal log canonical center $Z_x$ does not contain $Z'$. In either case, if $x\in S\setminus T$ there is a non klt center which does not contain any of the points of $T'$. The final statement of the Proposition is also clear. \end{proof} \begin{remark}\label{fulldim} If $\Delta=0$ in Proposition \ref{cutlc}, then the conclusion holds without any hypothesis. \end{remark} Proposition \ref{cutlc} shows that it is crucial to have control over the singularities of log canonical centers. In this direction, we have: \begin{theorem}\label{multiplicity} Let $X$ be a smooth projective variety and $(X,\Delta)$ be log canonical at $x\in X$. Let $Z_d$ be the union of the elements of $\operatorname{LLC}(X,\Delta,x)$ of dimension $d$. Then \[ \operatorname{mult}_x Z_d \leqslantslant \binom{n-\lceil b_x(X,\Delta)\rceil}{n-d} \] \end{theorem} \begin{proof} See \cite[Theorem 4.3]{helmke1}. \end{proof} \begin{comment} We introduce now the second inductive method, which is originally due to Angehrn and Siu \cite{angehrnsiu}. We take here an algebraic approach due to Koll\'{a}r \cite{pairs}. \begin{proposition}\label{cutlc2} Let $(X,\Delta)$ be a log pair, where $X$ is a smooth projective variety of dimension $n$. Let $S$ be a finite set of points contained in $\operatorname{Nklt}(X,\Delta)$ and let $r$ be the cardinality of $S$. Let $T$ be a nonempty subset of $S$ such that: \begin{enumerate} \item $(X,\Delta)$ is log canonical at all points of $T$. \item All points of $T$ share a common minimal log canonical center $Z$. \item Every point in $S\setminus T$ is contained in a non klt center of $(X,\Delta)$ that does not contain any point of $T$. \end{enumerate} Let $d=\operatorname{dim}Z$ and let $D$ be an ample $\mathbb{Q}$-divisor. If $d>0$, there exists a subset $T'$ of $T$, a rational number \[ 0\leqslantslant t \leqslantslant d \Big( \frac{r}{D^d \cdot Z}\Big) ^{1/d} \] and a $\mathbb{Q}$-divisor $D'\in |D|_{\mathbb{Q}}$ such that \begin{enumerate} \item $(X,\Delta+tD')$ is log canonical at all points of $T'$. \item All points of $T'$ share a common minimal log canonical center $Z'$ strictly contained in $Z$. \item Every point in $S\setminus T'$ is contained in a non klt center of $(X,\Delta+tD')$ that does not contain any point of $T'$. \end{enumerate} \end{proposition} \begin{proof} The proof is almost the same as that of Proposition \ref{cutlc1}, with the only difference being the way we create non klt centers on $Z$. We choose points $\tilde{T}$ sufficiently close to $T$ and lying in the smooth locus of $Z$. Then by Lemma \ref{volumesections} there exists a $\mathbb{Q}$-divisor $\tilde{D}''\in |D|_Z|_{\mathbb{Q}}$ such that \[ \operatorname{mult}_x \tilde{D}'' \geqslantslant \Big( \frac{D^d\cdot Z}{r}\Big)^{1/d} \] for every point $x$ in $\tilde{T}$. In particular, $t\tilde{D}''$ is not klt at any point of $\tilde{T}$ for \[ t\geqslantslant d \Big( \frac{r}{D^d \cdot Z}\Big) ^{1/d} \] Let $D''\in|D_Z|_{\mathbb{Q}}$ be the limit of the divisors $\tilde{D}''$ for $\tilde{T}\rightarrow T$ as in the proof of \cite[Theorem 6.4]{pairs}. Now proceed exactly as in Proposition \ref{cutlc1}. \end{proof} \end{comment} \section{Optimization}\label{sectionoptimization} Let $s<n$ be nonnegative integers and let $r$ be any positive integer. Consider the set $R_{s,n} \subseteq \mathbb{R}^{s+2}\times \mathbb{N}^{s+2}$ consisting of elements \[ (\underline{b}, \underline{d}) = (b_0, b_1, \cdots, b_s, b_{s+1}, d_0, d_1, \cdots, d_s, d_{s+1}) \] satisfying the following conditions: \[ 0= b_{s+1} < b_s < \cdots < b_1 < b_0=n \] \[ 0= d_{s+1} < d_s < \cdots < d_1<d_0=n \] and $b_i \leqslantslant d_i$ for all $1\leqslantslant i\leqslantslant s$. This section is devoted to the study of the functions: \[ f(\underline{b}, \underline{d}, n, r)=\sum_{i=0} ^ s (b_i - b_{i+1}) \left[ r\binom{n-\lceil b_i\rceil}{n-d_i} \right]^{1/d_i} \] and \[ F(n,r)=\operatorname{max}\{f(\underline{b}, \underline{d}, n, r)\mid (\underline{b}, \underline{d})\in \cup_{s=0} ^{n-1} R_{s,n}\} \] In particular, we aim to prove the following upper bounds. \begin{theorem}\label{upperbound} Let $n$ and $r$ be positive integers. Then: \begin{enumerate} \item $F(n,1)<\operatorname{max}\{n+1, n(\operatorname{log}\operatorname{log}(n)+2.34)\}$ \item $F(n,r) < r+n-1 + \sqrt{r} n (\operatorname{log}\operatorname{log}(n)+2.34)\}$ \end{enumerate} \end{theorem} We start by pointing out that in order to maximize $f$, it is sufficient to consider integral values of $b_i$. \begin{lemma}\label{maxinteger} Let $(\underline{b}, \underline{d})\in R_{s,n}$. Then there is an integer $s'\leqslantslant s$ and an element $(\underline{b}', \underline{d}')\in R_{s', n}$ such that the vector $\underline{b}'$ consists of integers and $f(\underline{b}, \underline{d}, n, r)\leqslantslant f(\underline{b}', \underline{d}', n, r)$. \end{lemma} \begin{proof} Consider the set $B_s\subseteq \mathbb R^{s+2}$ consisting of elements $$ \underline b' = (b'_0,\cdots, b'_{s+1} ) $$ satisfying the conditions \begin{gather*} 0= b'_{s+1}\leqslantslant b'_s \leqslantslant \cdots \leqslantslant b'_1 \leqslantslant b'_0=n, \\ \lceil b_i\rceil -1 \leqslantslant b'_i \leqslantslant \lceil b_i \rceil \end{gather*} Consider the linear function: \[ L(\underline{b}') = \sum_{i=0} ^ s (b'_i - b'_{i+1}) \left[ r\binom{n-\lceil b_i\rceil}{n-d_i} \right]^{1/d_i} \] Notice that if $\underline{b'}$ is in the interior of $B_s$ then we have that $f(\underline{b'}, \underline{d}, n, r)=L(\underline{b'})$. Also, $f(\underline{b}, \underline{d}, n, r)=L(\underline{b})$. The set $B_{s}$ is a convex compact subset of $\mathbb R^{s+2}$, therefore $L$ achieves its maximum value at a vertex $\underline b'\in B_s$. By construction, all the vertices of $B_s$ have integral coordinates. We have: $$ f(\underline{b}, \underline{d}, n, r) = L(\underline b)\leqslantslant L(\underline b')\leqslantslant f(\underline b', \underline d, n, r).$$ After possibly erasing the entries of $b_i'$ which are repeated and the corresponding $d_i'$, we may assume that $(\underline{b'}, \underline{d'})$ belongs to $R_{s',n}$ for some $s'\leqslantslant s$. \end{proof} Notice that if $n$ and $r$ are fixed, Lemma \ref{maxinteger} reduces the computation of $F(n,r)$ to finitely many steps. If $n$ is small enough, this computation may be carried out by a computer. We list in Table 1 the first few values of $\lfloor F(n,r) \rfloor$. \begin{table}[h!] \label{tablefirstvalues} \begin{center} \begin{tabular} {c|| cccccccccccccccc} \diaghead(3,-2){fjjak} {\,$r$} {$n$} & \ 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11 & 12 & 13 & 14 & 15 & 16 & 17\\ \midrule 1& \ 2 & 3 & 4 & 6 & 8 & 9 & 11&13&15 & 17 & 19 & 21 & 24 & 26 & 28 &30\\ 2&\ 3 & 4 & 6 & 8 &10&11&13&15 &18 & 20 & 22 & 24 & 26 & 28 & 30 & 33\\ \bottomrule \end{tabular} \end{center} \caption {Values of $\lfloor F(n,r) \rfloor$ for $2\leqslantslant n \leqslantslant 17$ and $r=1,2$.} \end{table} Next, by dropping the condition that $d_{j+1}$ is strictly less than $d_{j}$ for all $j$, we show that we may reduce to the case in which $\underline b$ is the sequence of natural numbers $(0,1,\cdots, n)$ ranging from 0 to $n$. \begin{lemma}\label{sumbound} Let $(\underline{b}, \underline{d})\in R_{s,n}$. Then there is a function $d:\{1,\cdots ,n\}\to \mathbb N$ with $b\leqslant d(b)\leqslant n$ such that: \[ f(\underline{b},\underline{d},n,r) \leqslantslant \sum_{b=1} ^n \left[ r\binom{n-b}{n-d(b)}\right] ^{1/d(b)} \] \end{lemma} \begin{proof} By Lemma \ref{maxinteger}, we may assume that all the $b_i$ are integers. Now if $x\leqslantslant n$ is a positive real number, we define $i(x)$ by the property $b_{i(x)+1} < x \leqslantslant b_{i(x)}$ and set $d(x)=d_{i(x)}$. Therefore, we have: \begin{equation*} \begin{split} f(\underline{b}, \underline{d}, n, r)&=\sum_{i=0} ^ {s} (b_i - b_{i+1}) \left[ r\binom{n-b_i}{n-d_i} \right]^{1/d_i}\\ &= \sum_{i=0} ^{s} \sum_{b=b_{i+1}+1} ^{b_i} \left[ r\binom{n-b_i}{n-d_i} \right]^{1/d_i}\\ &\leqslantslant\sum_{i=0} ^{s} \sum_{b=b_{i+1}+1} ^{b_i} \left[ r\binom{n-b}{n-d(b)} \right]^{1/d(b)}\\ &=\sum_{b=1} ^n \left[ r\binom{n-b}{n-d(b)}\right] ^{1/d(b)} \end{split} \end{equation*} \end{proof} We now need to measure the contribution of each term in Lemma \ref{sumbound}. We start with the following elementary estimate, which is not optimal, but already implies an upper bound on $F(n,r)$ that is quadratic in $n$ and essentially linear in $r$. \begin{lemma}\label{elementaryestimate} Let $b\leqslant d\leqslant n $ and $r$ be positive integers. Then $$\left[ r\binom{n-b}{n-d}\right] ^{1/d} \leqslant \sqrt[b]r + n-b $$ \end{lemma} \begin{proof} First, we have that $\binom{n-b}{n-d}\leqslant n^{d-b}$. Then, using Young's inequality $A^\lambda B^{1-\lambda} \leqslant \lambda A + (1-\lambda) B$ we get \begin{align*} r^{1/d} \binom{n-b}{n-d}^{1/d} &\leqslant (\sqrt[b] r)^{b/d} n^{1- b /d}\\ & \leqslant \frac b d \sqrt[b] r + \left(1 - \frac b d\right) n\\ & \leqslant \frac b b \sqrt[b] r + \left(1 - \frac b n\right) n\\ & = \sqrt[b]r + n-b \end{align*} \end{proof} \begin{corollary}\label{elementarycorollary} For all positive integers $n$ and $r$ we have $$F(n,r) \leqslant \frac {n(n-1)} 2 + \sum _{b=1}^n \sqrt[b]r $$ \end{corollary} \begin{proof} This follows from Lemma \ref{sumbound} and Lemma \ref{elementaryestimate}. \end{proof} \subsection{Optimization with Lambert's W function} In order to give sharper estimates on $F(n,r)$ for large values of $n$, it is convenient to introduce the Lambert function. \begin{definition}\label{definitionW} Consider the function $u:\mathbb{R}_{\geqslantslant 0}\rightarrow \mathbb{R}_{\geqslantslant 0}$ defined by $u(x)=xe^x$. We define the Lambert function $W:\mathbb{R}_{\geqslantslant 0}\rightarrow \mathbb{R}_{\geqslantslant 0}$ as the inverse of $u$. \end{definition} \begin{lemma}\label{Wbound} Let $b\leqslantslant d\leqslantslant n$ and $r$ be positive integers. Then \[ \left[ r\binom{n-b}{n-d}\right] ^{1/d}\leqslantslant \sqrt[b]{r} \operatorname{exp}\left( W\left( \frac{n}{b \sqrt[b]{r}} \right) \right) \] \end{lemma} \begin{proof} If $b=d$ there is nothing to prove, because the binomial on the left-hand side reduces to 1, while the exponential on the right-hand side is $\geqslantslant 1$. Therefore, suppose that $d>b$. By the basic version of Stirling's inequality $A!\geqslantslant (A/e)^A$ we obtain: \[ \binom{n-b}{n-d} = \frac{(n-b)\cdot (n-b-1) \cdots (n-d+1)}{(d-b)!}\leqslantslant \left( \frac{en}{d-b} \right) ^{d-b} \] Therefore: \[ \binom{n-b}{n-d}^{1/d} \leqslantslant \left( \frac{en}{d-b} \right) ^{\frac{d-b}{d}} \] Let $\delta=d-b$. By taking the logarithmic derivative in $\delta$, we see that the expression \[ \psi(\delta)=r^{\frac{1}{b+\delta}} \left( \frac{en}{\delta} \right) ^{\frac{\delta}{b+\delta}} \] is maximized when \[ \delta + \operatorname{log}(r) = b(\operatorname{log}(n)-\operatorname{log}(\delta)) \] Then $\delta$ may be expressed as $\delta=b w $ where $w = W\big( \frac{n}{b\sqrt[b]{r}}\big)$. Now, plugging this value of $\delta$ into $\psi$, and using the defining properties of $W$, we get: \[ r^{\frac{1}{b+\delta}} \left( \frac{en}{\delta} \right) ^{\frac{\delta}{b+\delta}} = \sqrt[b]{r}^{\frac 1 {1+w}} \left(e \sqrt [b]r e^w \right)^{\frac w {w+1}} = \sqrt [b]r e^w \] \end{proof} \begin{remark}\label{remarkWbound} By the properties of the Lambert function, the previous lemma may also be written in the following way: $$ \left[ r\binom{n-b}{n-d}\right] ^{1/d} \leqslant n/\delta (b) $$ where $\delta(b) = b W\big( n/({b\sqrt[b]{r}})\big)$. \end{remark} We are ready to prove Theorem \ref{upperbound}. We start with its first part. \begin{theorem}\label{upperbound1} Let $n$ be a positive integer. Then: \[ F(n,1)<\operatorname{max}\{n+1, n(\operatorname{log}\operatorname{log}(n)+2.34)\} \] \end{theorem} \begin{proof} By Lemma \ref{sumbound} and Lemma \ref{Wbound}, we get: \[ f(\underline{b}, \underline{d}, n, 1) \leqslantslant \sum_{b=1} ^n e^{W(n/b)}\leqslantslant e^{W(n)} + \int_1 ^n e^{W(n/x)}dx \] By the change of variable $t=n/b$ we get: \[ \int_1 ^n e^{W(n/x)}dx = n\int_1 ^n \frac{e^{W(t)}}{t^2} dt = n\left[\operatorname{log}W(t)- \frac{1}{W(t)}\right]_1 ^n \] Therefore \begin{equation*} \begin{split} f(\underline{b}, \underline{d}, n, 1) &\leqslantslant n\left (\frac 1 {W(n)}+ \operatorname{log}W(n)-\frac 1 {W(n)}-\operatorname{log}W(1)+\frac 1 {W(1)}\right)\\ &= n \ \big(\operatorname{log}W(n)-\operatorname{log}W(1)+W(1)^{-1}\big) \end{split} \end{equation*} Now, for $n\geqslantslant 3$ we have that $W(n)\leqslant \log n$ and so \[ \operatorname{log}W(n)-\operatorname{log}W(1)+1/W(1)< \operatorname{log}\operatorname{log}(n)+2.34 \] For $n<3$, we may use Table 1 instead. \end{proof} Similarly, we prove now the second part of Theorem \ref{upperbound}. \begin{theorem}\label{upperbound2} Let $n,r\geqslant 2$ be a positive integers. Then: \[ F(n,r)<r+n-1+\sqrt r n(\operatorname{log}\operatorname{log}(n)+2.34) \] \end{theorem} \begin{proof} As in the case $r=1$, we start with Lemma \ref{sumbound}, which gives \[ f(\underline{b}, \underline{d}, n, r) \leqslantslant \sum_{b=1} ^n \left[ r\binom{n-b}{n-d}\right] ^{1/d} \] This time, however, we estimate with Lemma \ref{Wbound} only the terms of the sum with $b\geqslantslant 2$. For the first term, instead, we use Lemma \ref{elementaryestimate}. We get \[ f(\underline{b}, \underline{d}, n, r) \leqslantslant r+ n-1 +\sum_{b=2} ^n \sqrt[b]r e^{W(n/(b\sqrt[b]r))} \] Since $W(n/(b\sqrt[b]r)\leqslant W(n/b)$ and $\sqrt[b]r\leqslant \sqrt r$ for $b\geqslant 2$, we may continue as in the proof of Theorem \ref{upperbound1}: \begin{align*} f(\underline{b}, \underline{d}, n, r) &\leqslantslant r+n-1 + \sqrt r\, \int_1 ^n e^{W(n/x)}dx\\ & \leqslant r+n-1 + \sqrt r \, n (\log W(n) - W(n)^{-1} + 2.34)\\ & \leqslant r+n-1 + \sqrt r\, n (\log \log n + 2.34) \end{align*} \end{proof} \section{The main result}\label{sectionmainresult} Here we apply the methods developed so far to prove Theorem \ref{intro1} and Theorem \ref{intro3}. We start with the following: \begin{theorem}\label{main} Let $X$ be a smooth projective variety of dimension $n$ and let $D$ be an ample $\mathbb{Q}$-divisor. Let $S$ be any finite set of points of $X$ of cardinality $r$. Suppose that \[ D^d \cdot Z\geqslantslant 1 \] for all irreducible $d$-dimensional subvarieties $Z$ containing at least one point of $S$. Fix any positive rational number $0<\epsilon\ll 1$. Then there exists a point $x\in S$ and a $\mathbb{Q}$-divisor $\Delta\in |tD|_{\mathbb{Q}}$ such that: \begin{enumerate} \item $t < F(n,r)+\epsilon$. \item $(X,\Delta)$ is log canonical but not Kawamata log terminal at $x$. \item $\operatorname{LLC}(X,\Delta,x)=\{x\}$. \item $S$ is contained in $\operatorname{Nklt}(X,\Delta)$. \end{enumerate} \end{theorem} \begin{proof} We define inductively a sequence of $\mathbb{Q}$-divisors, subvarieties, finite sets of points, positive rational numbers and positive integers $(D_i, Z_i, T_i, t_i, d_i)$ for $0\leqslantslant i\leqslantslant s+1$ as follows. Set $D_0 = 0$, $Z_0 = X$, $T_0 = S$, $t_0 = 0$ and $d_0 = n$. Now suppose that we are given $(D_i, Z_i, T_i, t_i, d_i)$. If $i>0$, suppose that: \begin{enumerate} \item $(X, D_i)$ is log canonical at all points of $T_i$. \item All points of $T_i$ share a common minimal log canonical center $Z_i$. \item Every point in $S\setminus T_i$ is contained in a non klt center of $(X, D_i)$ that does not contain any point of $T_i$. \item $d_i = \operatorname{dim}(Z_i)$. \end{enumerate} If $d_i = 0$, we stop. If not, we construct $(D_{i+1}, Z_{i+1}, T_{i+1}, t_{i+1}, d_{i+1})$ as follows. By Proposition \ref{cutlc} (see also Remark \ref{fulldim} for the case $i=0$), there exists a nonempty subset $T'$ of $T_i$, a rational number $t$ and a $\mathbb{Q}$-divisor $D'\in |D|_{\mathbb{Q}}$ such that \begin{enumerate} \item $(X, D_i + tD')$ is log canonical at all points of $T'$. \item All points of $T'$ share a common minimal log canonical center $Z'$ strictly contained in $Z_i$. \item Every point in $S\setminus T'$ is contained in a non klt center of $(X, D_i + tD')$ that does not contain any point of $T'$. \item For all points $x\in T'$ we have \[ b_x(X,D_i+tD')\leqslantslant b_x(X,D_i) - \frac{t}{(r\operatorname{mult}_x Z_i)^{1/d_i}} \] \end{enumerate} We set $D_{i+1} = D_i + tD'$, $Z_{i+1}=Z'$, $T_{i+1}=T'$, $t_{i+1}=t$ and $d_{i+1}=\operatorname{dim}(Z')$. By construction, $Z_{s+1}$ is zero dimensional and non-empty. Let $x$ be any point contained in $Z_{s+1}$. We define a sequence of positive rational numbers and positive integers $(b_i, m_i)$ for $0\leqslantslant i\leqslantslant s+1$ as follows. We set $b_0 = n$ and $m_0=1$. For any $i>0$, we set $b_i = b_x(X, D_i)$ and $m_i =\operatorname{mult}_x Z_i$. By $(4)$ above we have that for all $0\leqslantslant i\leqslantslant s$: \[ t_{i+1} \leqslantslant (b_i-b_{i+1})\cdot (rm_i)^{1/d_i}\tag{\text{*}} \] By Theorem \ref{multiplicity} we have that \[ m_i \leqslantslant \binom{n-\lceil b_i\rceil}{n-d_i} \] Therefore by $(*)$: \[ \sum_{i=0} ^s t_{i+1}\leqslantslant f(\underline{b}, \underline{d}, n, r) \leqslantslant F(n,r) \] We may now conclude by tie breaking with Lemma \ref{tiebreak}. \end{proof} \begin{theorem}\label{gg} Let $X$ be a smooth projective variety of dimension $n$, let $L$ be an ample line bundle and let $r$ be a positive integer. Then $K_X + mL$ separates $r$ points for $m > F(n,r)$. \end{theorem} \begin{proof} Since $L$ is a line bundle we have that $L^d \cdot Z\geqslantslant 1$ for all irreducible $d$-dimensional subvarieties $Z$. Let $m>F(n,r)$ be any positive integer. We prove that $K_X + mL$ separates $r$ points by induction on $r$. The first step is to show that $K_X + mL$ is base point free. Let $x$ be any point of $X$ and let $t$ and $\Delta\in|L|_{\mathbb{Q}}$ be as in Theorem \ref{main} with $S=\{x\}$. Then $t < F(n,r)+\epsilon$ for a small positive rational number $\epsilon$. Consider the short exact sequence \[ 0\rightarrow \mathcal{O}_X (K_X+mL)\otimes \mathcal{I}(X,\Delta)\rightarrow \mathcal{O}_X (K_X+mL)\rightarrow \frac{\mathcal{O}_X (K_X+mL)}{\mathcal{I}(X,\Delta)\otimes \mathcal{O}_X (K_X+mL)}\rightarrow 0 \] Since $\operatorname{LLC}(X,\Delta,x)=\{x\}$, we have that $\mathcal{O}_x$ is a direct summand of \[ \frac{\mathcal{O}_X (K_X+mL)}{\mathcal{I}(X,\Delta)\otimes \mathcal{O}_X (K_X+mL)} \] Therefore, by taking the associated long exact sequence and by using Theorem \ref{nadel}, we get a surjection \[ H^0(X, \mathcal{O}_X (K_X+mL))\rightarrow k \] which is what we wanted. Now suppose that $K_X+mL$ separates all $r-1$ points. Fix any set $S$ of $r$ points of $X$. Again, let $t$ and $\Delta\in|L|_{\mathbb{Q}}$ be as in Theorem \ref{main} with this choice of $S$ and let $x$ be a point of $S$ such that $\operatorname{LLC}(X,\Delta,x)=\{x\}$. Consider once more the short exact sequence above. We have a splitting: \[ \frac{\mathcal{O}_X (K_X+mL)}{\mathcal{I}(X,\Delta)\otimes \mathcal{O}_X (K_X+mL)} = \mathcal{O}_x \oplus \frac{\mathcal{O}_X(K_X+mL)}{\mathcal{I}'\otimes \mathcal{O}_X(K_X+mL)} \] By lifting a section that is $0$ on the second factor and $1$ on the first factor, we get a section $s\in H^0(X, \mathcal{O}_X(K_X+mL))$ that vanishes on $S\setminus \{x\}$ and that does not vanish on $x$. Since $K_X + mL$ separates points of $S\setminus \{x\}$ by induction, we have that $K_X+mL$ separates points of $S$. Since $S$ is arbitrary, the statement follows. \end{proof} \begin{proof}(of Theorem \ref{intro1} and Theorem \ref{intro3}). Immediate from Theorem \ref{gg} and Theorem \ref{upperbound}. \end{proof} We also record here the following result. \begin{corollary} Let $X$ be a smooth projective threefold and let $L$ be an ample line bundle. Then $|K_X+5L|$ defines a birational morphism. \end{corollary} \begin{proof} This is a consequence of Theorem \ref{gg} and the fact that $F(3,2)<5$ by Table 1. \end{proof} \subsection{Sixfolds}\label{sectionsixfolds} Table 1 in Section \ref{sectionoptimization} shows that for $n\leqslantslant 4$ the values of the function $F(n,1)$ are enough to prove Fujita's basepoint freeness conjecture. For larger values of $n$, however, the geometry of the problem is not fully reflected in the combinatorics of $F$. In fact, it is possible to carry out a slightly finer study by sharpening the inequalities appearing in the proof of Theorem \ref{main} in certain geometric situations. This was for instance done in \cite{yezhu1} and \cite{yezhu2} to prove Fujita's freeness conjecture for $n=5$. This kind of study does not change the asymptotic behavior in $n$, so we only carry it out here for $n=6$ as an example. Let $s<n$ be two positive integers. Consider the set $U_s \subseteq \mathbb{R}^{s+2}\times \mathbb{N}^{s+2}\times \mathbb{N}^{s+2}$ consisting of elements \[ (\underline{b}, \underline{d}, \underline{m}) = (b_0, \cdots, b_{s+1}, d_0, \cdots, d_{s+1}, m_0, \cdots, m_{s+1}) \] satisfying the following conditions: \begin{enumerate} \item $0= b_{s+1} < b_s < \cdots < b_1 < b_0=n$. \item $0= d_{s+1} < d_s < \cdots < d_1<d_0=n$. \item $b_i \leqslantslant d_i$ for all $1\leqslantslant i\leqslantslant s$. \item $m_i \leqslantslant \binom{n-\lceil b_i \rceil}{n-d_i}$ for all $1\leqslantslant i\leqslantslant s$. \item $b_i\leqslantslant 2/m_i$ if $d_i=2$. \item $d_1\neq n-1$. \end{enumerate} Consider now the functions: \[ g(\underline{b}, \underline{d}, \underline{m},n)=\sum_{i=0} ^s (b_i - b_{i+1}) \sqrt[d_i]{m_i} \] and \[ G(n)=\operatorname{max}\{g(\underline{b}, \underline{d}, \underline{m}, n)| (\underline{b}, \underline{d}, \underline{m})\in \cup_{s=0} ^{n-1} U_s\} \] \begin{lemma}\label{finebound} We have that $G(6)<8$. \end{lemma} \begin{proof} Fix $\underline{d}$ and $\underline{m}$. Then $\overline{U_s} \cap \mathbb{R}^{s+2} \times \{\underline{d}\} \times \{\underline{m}\}$ is a compact convex polyhedron and therefore it's the convex hull of its vertices. By linearity of $g$ in the $b_i$ entries, $g$ is maximal at one such vertex $(\underline{b}, \underline{d}, \underline{m})$. By looking at the conditions $(1)-(6)$ defining $U_s$ we see that $\underline{b}$ consists of integral entries, unless $2\in \{d_i\}$. Suppose then that $2\in\{d_i\}$ and let $i$ be the index such that $d_i=2$. By possibly erasing all entries (if any) $(b_j, d_j)$ such that $b_j=b_i$ for $j<i$, we may assume that $i$ is the smallest index such that $b_i$ is a non-integral entry in $\underline{b}$. Furthermore, we must have $m_i\geqslantslant 3$ and $b_i = 2/m_i$. In this case, it's immediate to see that \[ \sum_{j=i} ^{s+1} (b_j-b_{j+1}) \binom{n-\lceil b_j \rceil}{n-d_j} ^{1/d_j} \leqslantslant \frac{2}{m_i ^{1/2}} \] Therefore $g(\underline{b}, \underline{d}, \underline{m}, n)$ is bounded above by the expression: \[ \sum_{j=0} ^{i-2} (b_j - b_{j+1}) \binom{n-\lceil b_j \rceil}{n-d_j}^{1/d_j} + (b_{i-1}-2/m_i) \binom{n-\lceil b_{i-1} \rceil}{n-d_{i-1}} ^{1/d_{i-1}} + \frac{2}{m_i ^{1/2}} \] In either case, we have once again reduced the problem to a finite number of computations and the result follows by running a computer program on all the possible combinations. \end{proof} Theorem \ref{main} may be slightly sharpened by the following. \begin{theorem} Let $X$ be a smooth projective variety of dimension $n$ and let $D$ be an ample $\mathbb{Q}$-divisor. Let $x$ be a point of $X$. Suppose that \[ D^d \cdot Z\geqslantslant 1 \] for all irreducible $d$-dimensional subvarieties $Z$ containing $x$. Fix any positive rational number $0<\epsilon\ll 1$. Then there exists a $\mathbb{Q}$-divisor $\Delta\in |tD|_{\mathbb{Q}}$ such that: \begin{enumerate} \item $t < G(n)+\epsilon$. \item $(X,\Delta)$ is log canonical but not Kawamata log terminal at $x$. \item $\operatorname{LLC}(X,\Delta,x)=\{x\}$. \end{enumerate} In particular, if $D$ is Cartier and $n=6$ then $K_X + mD$ is basepoint free for all $m\geqslantslant 8$. \end{theorem} \begin{proof} Consider the sequence $(D_i, Z_i, T_i, t_i, d_i, b_i, m_i)$ obtained by applying the proof of Theorem \ref{main}. Suppose first that $d_1\neq n-1$. Then $(\underline{b}, \underline{d}, \underline{m})$ belongs to $U_s$. In fact, conditions $(1)-(5)$ are clear and $(6)$ follows for example from \cite[Theorem B.1]{yezhu2}. Therefore, in this case we are done by relation $(*)$ of the proof of Theorem \ref{main} and by Theorem \ref{gg}. On the other hand, if $d_1=n-1$, then by the proof of \cite[Theorem 4.4]{helmke2} applied to $(n+\epsilon)D$, we have that \[ t_1 \leqslantslant n-b_1 \cdot \sqrt[n-1]{m_1} \] If one uses this estimate for $t_1$ and relation $(*)$ for $t_i$ with $i\geqslantslant 2$, we see that $b_1$ simplifies from the expression. Therefore, we may always assume that $(\underline{b}, \underline{d}, \underline{m})$ belongs to $U_s$. Finally, if $n=6$, then the result follows from Lemma \ref{finebound}. \end{proof} \section{Appendix}\label{appendix} In this appendix, we complement Section \ref{sectionoptimization} by showing that the asymptotic bound of Theorem \ref{upperbound} (1) is optimal. We also show some alternative ways of bounding $F(n,r)$. \subsection{Optimality} Theorem \ref{upperbound} (1) asserts that $F(n,1)=O(n\operatorname{log}\operatorname{log}(n))$. Here we show: \begin{theorem}\label{lowerbound} For $n$ large enough \[ F(n,1)\geqslantslant \frac{1}{4e} n\operatorname{log}\operatorname{log}(n) \] \end{theorem} We start with the following. \begin{lemma}\label{estimatelowerbound} Let $b\leqslant d\leqslant n$ be positive integers such that $ b \leqslant n/10$ and $$ bW(n/b) \leqslant d-b\leqslant 2bW(n/b).$$ Then $$ \binom{n-b}{n-d}^{1/d} \geqslant \frac 1 {4e}\frac n { b W(n/b)} $$ \end{lemma} \begin{proof} First, note that the function $b\mapsto bW(n/b)$ is strictly increasing. Using the fact that $W(10)<2$, we see that $$ d\leqslant b+2bW\left(\frac n b \right) \leqslant \frac n {10} (1+2W(10))<\frac n 2.$$ Therefore, we may estimate $\binom {n-b}{n-d}$ as $$ \binom {n-b}{n-d} \geqslant \left(\frac {(n/2)}{d-b}\right) ^{d-b} $$ Let $w=W(n/b)$ and note that $$ \left( \frac {en }{bw}\right)^{w/(w+1)} = \frac {n}{bw} $$ because both sides are equal to $e^w$. Then: \begin{align*} \binom{n-b}{n-d}^{1/d} &\geqslant \left(\frac{n}{2(d-b)}\right) ^{(d-b)/d}\\ & \geqslant \left(\frac 1 {4e}\frac {en}{bw}\right) ^{w/(w+1)}\\ & = \left(\frac 1 {4e}\right) ^{w/(w+1)} \frac {n}{bw}, \end{align*} which is at least $(4e)^{-1} n/(bw)$. \end{proof} Lemma \ref{estimatelowerbound} allows us to state sufficient conditions on $(\underline b, \underline d)$ for which $f(\underline b, \underline d, n,1)$ grows asymptotically as $n\log \log n$. \begin{lemma}\label{lemmalowerbound} Let $n\geqslant 10$ and let $$(b_{s+1},\ldots, b_1) = (0, 1, \ldots ,\lfloor n/10\rfloor )$$ be the whole reverse sequence of integers up to $n/10$. Now suppose that $(\underline b,\underline d)\in R_{s,n}$ satisfies the following condition: $$ b_j W(n/b_j) \leqslant d_j- b_j \leqslant 2 b_j W(n/b_j) $$ for all $1\leqslant j\leqslant s$. Then $$ f(\underline b, \underline d, n,1) \geqslantslant \frac 1 {4e} n \log \log n $$ \end{lemma} \begin{proof} By Lemma \ref{estimatelowerbound} we have \begin{align*} f(\underline b, \underline d , n, 1) & = n- \lfloor n/10\rfloor + \sum_{j=1}^{s} \binom{n-b_j}{n-d_j}^{1/d_j}\\ & \geqslantslant \frac 9 {10} n + \frac 1 {4e}\sum_{b=1}^{\lfloor n/10\rfloor} \frac n { b W(n/b)}\\ & \geqslantslant \frac 9 {10} n + \frac 1 {4e}\int_{1}^{n/10} \frac n { b W(n/b)} db \end{align*} Then by the substitution $z=n/b$ we get \begin{align*} \int_{1}^{n/10} \frac n { b W(n/b)} db & = n \int_{10}^{n}\frac z { W(z)} dz\\ & = n \Big[ \log W(z) - \frac 1 {W(z)} \Big]_{10}^{n}\\ \end{align*} Note that $-1/W(n)+1/W(10)\geqslantslant 0$ and $\log W(10)<\log 2$. Therefore $$f(\underline b, \underline d, n,1)\geqslant \frac n {4e} \log W(n) + \frac 9 {10} n- \frac {\log 2}{4e} n$$ Since the inequality $$ W(n)\geqslant \frac 1 2 \log n $$ holds for all $n$, and since $\log 2/(4e)<0.4$, we are done. \end{proof} We are now ready to prove Theorem \ref{lowerbound}. \begin{proof}(of Theorem \ref{lowerbound}). It suffices to show that for $n$ large enough there exist $(\underline b,\underline d)\in R_{s,n}$ as in Lemma \ref{lemmalowerbound}. To this aim, let $n\geqslant 110$, let $(b_{s+1},\ldots, b_1) = (0, 1, \ldots ,\lfloor n/10\rfloor )$ and let $$d_j = b_j + \lceil b_j W(n/b_j)\rceil.$$ Note that $d_j \leqslant b_j + 2 b_j W(n/b_j)$ for all $1\leqslant j\leqslant s$ because $$ b_j W(n/b_j) \geqslantslant W(10)>1.$$ It now suffices to show that $$ 0= d_{s+1} < d_s < \cdots < d_1<d_0=n. $$ The inequality $d_1<n$ follows from the the following computation: \begin{align*} d_1 &\leqslant \frac n {10}+ \frac n {10} W(11) + 1 \\ & < \frac {3n} {10}+1 \leqslant n, \end{align*} where we tacitly used inside $W$ the estimate $\lfloor n/10\rfloor \geqslantslant n/11$, valid for all $n\geqslant 110$. Finally, note that the function $$ \delta(b) = b W(n/b) $$ has derivative given by the formula $$ \delta'(b) = \frac {W(n/b)^2 } {W(n/b)+1}. $$ If $b\leqslant n/10$ then $W(n/b)\geqslant W(10)>1.7$ and so $\delta '(b)>1$. This implies that for all $1\leqslant j\leqslant s-1$ we have $$d_j-d_{j+1} \geqslant 2$$ and so in particular $d_{j+1}<d_j$. \end{proof} \begin{remark} It is well-known that it is possible to get better estimates on Fujita's conjecture if in the inductive process the dimension of the log canonical centers decreases only by one (i.e. if $d_{j+1} = d_j-1$ for some $j$). See for example \cite{fujita2,kawamata1,helmke2,yezhu1,yezhu2} and see also condition (5) in the definition of $U_{s}$ in Section \ref{sectionsixfolds}. In the proof of Theorem \ref{lowerbound}, however, we have constructed an element $(\underline b,\underline d)$ for which $f(\underline b, \underline d,n,1)$ grows as $n\log \log n$ and such that $$ d_{j+1}\leqslant d_j - 2$$ for all $1\leqslant j\leqslant s$. In particular this shows that a study like the one carried out in Section \ref{sectionsixfolds} does not change the asymptotic behavior of $F$. \end{remark} \subsection{Other estimates} We record the following elementary refinement of Lemma \ref{elementaryestimate} which is enough to give logarithmic bounds on the problem of separation of $r$ points. \begin{lemma}\label{newelementaryestimate} Let $b\leqslant d\leqslant n $ and $r$ be positive integers. Then $$\left[ r\binom{n-b}{n-d}\right] ^{1/d} \leqslant \sqrt[b]r + e\frac n b - e $$ \end{lemma} \begin{proof} By the basic version of Stirling's inequality $A!\geqslantslant (A/e)^A$, we have that $$\binom{n-b}{n-d}^{1/(d-b)}\leqslant \frac {e(n-b)}{d-b}.$$ Then, using Young's inequality $A^\lambda B^{1-\lambda} \leqslant \lambda A + (1-\lambda) B$ we get \begin{align*} r^{1/d} \binom{n-b}{n-d}^{1/d} & = (\sqrt[b] r)^{b/d} \cdot \left(\binom{n-b}{n-d}^{1/(d-b)}\right)^{1- b /d}\\ & \leqslant \frac b d \sqrt[b] r + \left(1 - \frac b d\right) \binom{n-b}{n-d}^{1/(d-b)}\\ & \leqslant \frac b d \sqrt[b] r + \frac e d (n-b)\\ & \leqslant \frac b b \sqrt[b] r + \frac e b (n-b)\\ & = \sqrt[b]r + e\frac n b - e. \end{align*} \end{proof} \begin{corollary}\label{newelementarycorollary} Let $n,r$ be positive integers. Then $$ F(n,r)\leqslant e n \log n + \sum _{b=1}^{n} \sqrt[b]r $$ \end{corollary} \begin{proof} Let $(\underline b, \underline d)\in R_{s,n}$. By Lemma \ref{sumbound} and Lemma \ref{newelementaryestimate} we have \begin{align*} f(\underline b, \underline d, n,r) &\leqslant en\left(-1+ \sum _{b=1}^{n} \frac 1 b\right) +\sum _{b=1}^{n} \sqrt[b]r\\ & \leqslant e n \log n + \sum _{b=1}^{n} \sqrt[b]r. \end{align*} \end{proof} We conclude by including the following simple estimates from below. \begin{proposition}\label{easylowerboundsinr} Let $n,r$ be positive integers. Then $$ F(n,r)\geqslant \max \left\{ \frac {r^{1/n}}{4e} n \log \log n, \sum _{b=1}^{n} \sqrt[b]r \right\} $$ \end{proposition} \begin{proof} The first lower bound follows from Corollary \ref{lowerbound} and the simple observation that $$ F(n,r)\geqslant r^{1/n} F(n,1).$$ To prove the second lower bound, let $$\underline b = \underline d = (n,n-1,\ldots,1,0)$$ Then $$ F(n,r)\geqslant f(\underline b, \underline d, n,r) = \sum _{b=1}^{n} \sqrt[b]r. $$ \end{proof} If one fixes $n$ and lets $r$ be large enough, it is possible to compute $F(n,r)$ exactly. \begin{proposition} Fix a positive integer $n$. Then for each large enough positive integer $r$ we have $$ F(n,r) = \sum _{b=1}^{n} \sqrt[b]r. $$ \end{proposition} \begin{proof} By Proposition \ref{easylowerboundsinr} it is sufficient to prove the inequality $F(n,r) \leqslant \sum _{b=1}^{n} \sqrt[b]r$. Let $(\underline b, \underline d)\in R_{s,n}$. By Lemma \ref{sumbound} we have \[ f(\underline{b},\underline{d},n,r) \leqslantslant \sum_{b=1} ^n \left[ r\binom{n-b}{n-d(b)}\right] ^{1/d(b)} \] for some $b\leqslant d(b)\leqslant n$. If $r$ is large enough then each term of the sum is dominated, respectively, by a $b$-th root of $r$: $$ r^{1/d(b)}\left[ \binom{n-b}{n-d(b)}\right] ^{1/d(b)} \leqslant r^{1/b} $$ \end{proof} \end{document} \end{document}
\begin{document} \title{FABLE: Fast Approximate Quantum Circuits for Block-Encodings} \author{Daan Camps} \email{[email protected]} \affiliation{National Energy Research Scientific Computing Center, Lawrence Berkeley National Laboratory, Berkeley, CA 94720, USA} \author{Roel~Van~Beeumen} \email{[email protected]} \affiliation{Applied Mathematics and Computational Research Division, Lawrence Berkeley National Laboratory, Berkeley, CA 94720, USA} \date{\today} \begin{abstract} Block-encodings of matrices have become an essential element of quantum algorithms derived from the quantum singular value transformation. This includes a variety of algorithms ranging from the quantum linear systems problem to quantum walk, Hamiltonian simulation, and quantum machine learning. Many of these algorithms achieve optimal complexity in terms of black box matrix oracle queries, but so far the problem of computing quantum circuit implementations for block-encodings of matrices has been under-appreciated. In this paper we propose FABLE, a method to generate approximate quantum circuits for block-encodings of matrices in a fast manner. FABLE circuits have a simple structure and are directly formulated in terms of one- and two-qubit gates. For small and structured matrices they are feasible in the NISQ era, and the circuit parameters can be easily generated for problems up to fifteen qubits. Furthermore, we show that FABLE circuits can be compressed and sparsified. We provide a compression theorem that relates the compression threshold to the error on the block-encoding. We benchmark our method for Heisenberg and Hubbard Hamiltonians, and Laplacian operators to illustrate that they can be implemented with a reduced gate complexity without approximation error. \end{abstract} \maketitle \section{Introduction} The quantum singular value transformation (QSVT) \cite{gisu2018, gisu2019} combines and extends qubitization \cite{loch2019} and quantum signal processing \cite{loch2017}. The QSVT provides a unifying framework encompassing many quantum algorithms that provide a speed-up over the best known classical algorithm~\cite{Martyn2021}. All quantum algorithms derived from the quantum singular value transformation ultimately rely on the notion of a \textit{block-encoding} of some matrix $A$ that represents the problem at hand. This matrix can, for example, be the Hamiltonian of the quantum system to be simulated~\cite{Berry2015}, the discriminant matrix of the Markov chain in a quantum walk~\cite{szeg2004, quant-ph/0401053}, or the matrix to be solved for in the quantum linear systems problem~\cite{Childs2017}. These matrices are, in general, non-unitary operators and cannot be directly run on a quantum computer that only performs unitary evolution. This constraint is usually overcome by enlarging the Hilbert space and embedding the non-unitary operator in a specific state of the ancillary qubits. The most commonly used embedding is a block-encoding where the system matrix $A$ is embedded in the leading principal block of a larger unitary matrix acting on the full Hilbert space: \begin{myequation} U = \begin{bmatrix} A & *\, \\ * & *\, \end{bmatrix}, \label{eq:BE} \end{myequation} where $*$ indicate arbitrary matrix elements. We assume that $\|A\|_{2} \leq 1$ since otherwise such an embedding cannot exist. In this case, we say that $U$ block encodes $A$. Thus far, complexity results for quantum algorithms derived from the quantum singular value transformation have been formulated in terms of query complexity, i.e., how many queries to the block-encoding $U$ are required to solve the problem. The question how a quantum circuit can be generated for \cref{eq:BE} has so far been under appreciated. Encoding schemes for sparse matrices have been proposed~\cite{Childs2017,gisu2018} and ultimately rely on sparse access oracles for which a quantum circuit implementation can be challenging. Some explicit circuit implementations for quantum walks on highly-structured graphs are provided in~\cite{Loke2017} and are closely connected to block-encodings~\cite{gisu2018}. Similarly, \cite{yang22} shows how to generate quantum circuits for block-encoding certain specific $2^n \times 2^n$ sparse matrices in poly$(n)$ complexity. In this paper we take a more general approach and propose \emph{FABLE}, which stands for Fast Approximate BLock-Encodings. FABLE is an efficient algorithm to generate quantum circuits that block encode arbitrary matrices up to prescribed accuracy. FABLE circuits consist of a \emph{matrix query oracle}, that we implement with simple one-qubit $R_y$ and $R_z$ rotations and two-qubit CNOT gates, and some additional Hadamard and SWAP gates. The gate complexity of a FABLE circuit for general, unstructured $N \times N$ matrices is bounded by $\bigO(N^2)$ gates with a modest prefactor of 2 for real-valued matrices (4 for complex-valued matrices), and a limited polylogarithmic overhead. In this sense, FABLE circuits are a quantum circuit representation of dense matrices with an optimal asymptotic gate complexity. However, this gate complexity scales exponentially in the number of qubits for generic dense matrices as encoding an unstructured matrix of exponential dimension is a difficult task. Luckily, more relevant problems usually contain a lot of structure and, as we will show, FABLE circuits can be compressed which often leads to significantly reduced gate complexities for many problems of interest. This process can be interpreted as a \emph{sparsification} of the circuit and matrix. However, the FABLE algorithm applies a Walsh-Hadamard transformation to the matrix data and performs the sparsification in the Walsh-Hadamard domain. Sparse FABLE matrices thus corresponds to matrices that contain many zeros in this domain, which does not necessarily correspond to sparsity in the original domain. Thanks to these characteristics, FABLE circuits are well-suited for implementing quantum algorithms derived from the QSVT in the NISQ era and beyond. The remainder of this paper is organized as follows. \Cref{sec:BE} formally defines the concept of block-encodings. \Cref{sec:circ} presents a circuit structure for block-encodings in terms of a matrix query oracle and gives a naive implementation of this oracle. \Cref{sec:FABLE} introduces the improved circuit implementation for matrix query oracles that are used in FABLE circuits for real- and complex-valued matrix data. In \Cref{sec:approx}, we extended the definition of block-encodings to approximate block-encodings and we discuss how FABLE circuits can be compressed. We relate the threshold in the compression algorithm to the error on the block-encoding. \Cref{sec:ex} provides some examples that show that FABLE can be used to block encode Heisenberg and Hubbard Hamiltonians and discretized differential operators with a significantly reduced gate complexity. Implementations of the FABLE algorithm built on top of QCLAB~\cite{qclab, qclabpp} and Qiskit~\cite{Qiskit} are made publicly available on \url{https://github.com/QuantumComputingLab/fable}. Without loss of generality, we assume in the remainder of this paper that the matrix size is $N \times N$ with $N = 2^n$. \section{Block-encodings}\label{sec:BE} A \emph{block-encoding} of a non-unitary matrix is the embedding of a properly scaled version of that matrix in the leading principal block of a bigger unitary \cite{gisu2018,gisu2019}. A formal definition for a block-encoding of an $n$-qubit matrix $A$ in an $m$-qubit unitary $U$ is as follows. \begin{definition} \label{def:BE} Let $a,n, m \inN$, $m = a + n$. Then an $m$-qubit unitary $U$ is a ${(\alpha, a)}$-block-encoding of an $n$-qubit operator $A$ if \begin{myequation} \tilde A = \left(\bra{0}^{\otimes a} \otimes \eI_{n} \right) U \left(\ket{0}^{\otimes a} \otimes \eI_{n} \right). \end{myequation} and $A = \alpha \tilde A$. \end{definition} The parameters $(\alpha, a)$ are, respectively, the \emph{subnormalization factor} necessary for encoding matrices of arbitrary norm and the number of \emph{ancilla} qubits used in the block-encoding. Since $\normtwo{U} = 1$, we have that $\normtwo{\tilde A} \leq 1$ and therefore $\normtwo{A} \leq \alpha$. Every unitary is already a trivial ${(1,0)}$-block-encoding of itself and every non-unitary matrix can be embedded in a ${(\normtwo{A},1)}$-block-encoding~\cite{QI:Alber:2001}. This does not guarantee the existence of an efficient quantum circuit implementation, but merely considers the matrix representation. For a block-encoding, we say that $\tilde A$ is the partial trace of $U$ over the zero state of the ancilla space. This naturally partitions the Hilbert space $\cH_m$ into $\cH_a \otimes \cH_n$. Given an $n$ qubit state, $\ket{\psi} \in \cH_n$, the action of $U$ on $\ket{\phi} = \ket{0}^{\otimes a} \otimes \ket{\psi}$ becomes \begin{myequation} U \ket{\phi} = \ket{0}^{\otimes a} \otimes \tilde A \ket{\psi} + \sqrt{1 - \norm{ \tilde A \ket{\psi}}^2} \ket{\sigma^{\perp}}, \end{myequation} with \begin{align} \left(\bra{0}^{\otimes a} \otimes \eI_{n}\right) \ket{\sigma^\perp} &= 0, \\ \norm[\big]{\ket{\sigma^\perp}} &= 1, \end{align} and $\ket{\sigma^\perp}$ the normalized state for which the ancilla register has a state orthogonal to $\ket{0}^{\otimes a}$. With probability $\norm{\tilde A \ket{\psi}}^2$, a partial measurement of the ancilla qubits results in $0^{\otimes a}$ and the signal qubits are in the target state $\tilde A \ket{\psi}/\norm{\tilde A \ket{\psi}}$. Using amplitude amplification~\cite{grover, gisu2019}, this process must be repeated $1/\norm{\tilde A \ket{\psi}}$ times for success on average. \begin{figure} \caption{Abstract quantum circuit for an ${(\alpha, a)} \label{fig:BE} \end{figure} \cref{fig:BE} provides the high-level structure of a quantum circuit for a block-encoding. We note that an encoding of a matrix can be coupled to any other state of the ancilla space besides the all-zero state and this state does not even have to be a computational basis state. This generalization is discussed in~\cite{gisu2018}. Encoding the matrix data in the all-zero state of the ancilla qubits has become the most widely used choice as this leads to an embedding of the matrix in the leading block of the unitary matrix, i.e., a block-encoding. \section{Quantum Circuits for Block-Encodings} \label{sec:circ} Quantum circuits for block-encodings of sparse matrices are often presented in terms of query oracles that provide information about the position and binary description of the matrix entries~\cite{gisu2019,Childs2017}. These oracles can be combined into a \emph{matrix query oracle}~\cite{lin2022} that provides access to the matrix data. Our approach is different, we immediately define a matrix query operation $O_A$ for a given matrix $A$ and consequently discuss how this oracle can be directly synthesized in a quantum circuit. \begin{definition} \label{def:OA} Let $a_{ij}$ be the elements of an $N \times N$ matrix $A$ with $N = 2^n$, $\|a_{ij}\| \leq 1$. Then the matrix query operation $O_A$ applies \begin{equation} O_A \ket0 \ket{i} \ket{j} = \left( a_{ij} \ket0 + \sqrt{1 - |a_{ij}|^2} \ket1 \right) \ket{i} \ket{j}, \label{eq:OA} \end{equation} where $\ket{i}$ and $\ket{j}$ are $n$-qubit computational basis states. \end{definition} A high-level quantum circuit to block-encode a matrix $A$ is proposed in \cref{fig:BEcirc}, where $H^{\otimes n}$ is an $n$-qubit Hadamard transformation that creates an equal superposition over the row qubits, the matrix query unitary $O_A$ is given in \eqref{eq:OA}, and the $2n$-qubit $\swap$ gate is implemented as $\swap\ket{i}\ket{j} = \ket{j}\ket{i}$. This circuit is closely related to similar circuits in~\cite{lin2022}, but we encode all information about the matrix in a single matrix query oracle. \begin{figure} \caption{High-level quantum circuit structure for block-encoding a matrix $A$ in terms of a matrix query oracle $O_A$. If the $n+1$ ancilla qubits are measured in the zero state, the signal register is in the desired state ${\tilde A\ket\psi} \label{fig:BEcirc} \end{figure} The following theorem ascertains that the circuit from \cref{fig:BEcirc} is indeed a block-encoding of the target matrix $A$. Our proof follows a similar reasoning to~\cite{lin2022} and is included for completeness. \begin{theorem} \label{thm:UA} The circuit $U_A$ in \cref{fig:BEcirc} is an $(1/2^n, n+1)$-block-encoding of the $n$-qubit matrix $A$ if $|a_{ij}| \leq 1$. \end{theorem} \begin{proof} The circuit $U_A$ can be written in matrix notation as: \begin{equation*} U_A = (\eye[1] \otimes H^{\otimes n} \otimes \eye[n]) (\eye[1] \otimes \swap) O_A (\eye[1] \otimes H^{\otimes n} \otimes \eye[n]). \end{equation*} For $U_A$ to be an $(1/2^n, n+1)$-block-encoding of $A$, we need to verify according to \Cref{def:BE} that \begin{equation*} \bra{0} \bra{0}^{\otimes n} \bra{i} \, U_A \, \ket{0} \ket{0}^{\otimes n} \ket{j} = \frac{1}{2^n} a_{ij}. \end{equation*} On one hand, we have \begin{align*} \ket{0} \ket{0}^{\otimes n} \ket{j} & \\ \quad \xrightarrow{H^{\otimes n}} & \ \frac{1}{\sqrt{2^n}} \sum_{k=0}^{2^n-1} \ket{0} \ket{k} \ket{j}, \\ \xrightarrow{O_A} & \ \frac{1}{\sqrt{2^n}} \sum_{k=0}^{2^n-1} \left(a_{kj} \ket{0} + \sqrt{1 - |a_{kj}|^2} \ket{1} \right) \ket{k} \ket{j}, \\ \xrightarrow{\swap} & \ \frac{1}{\sqrt{2^n}} \sum_{k=0}^{2^n-1} \left(a_{kj} \ket{0} + \sqrt{1 - |a_{kj}|^2} \ket{1} \right) \ket{j} \ket{k}, \end{align*} while on the other hand, we have \begin{equation*} \ket{0} \ket{0}^{\otimes n} \ket{i} \xrightarrow{H^{\otimes n}} \ \frac{1}{\sqrt{2^n}} \sum_{\ell=0}^{2^n-1} \ket{0} \ket{\ell} \ket{i}. \end{equation*} Combining both, yields \begin{align*} \bra{0} \bra{0}^{\otimes n} & \bra{i} \, U_A \, \ket{0} \ket{0}^{\otimes n} \ket{j} \\ = & \ \frac{1}{2^n} \left( \sum_{\ell=0}^{2^n-1} \bra{0} \bra{\ell} \bra{i} \right) \\ & \ \quad \ \left( \sum_{k=0}^{2^n-1} \left(a_{kj} \ket{0} + \sqrt{1 - |a_{kj}|^2} \ket{1} \right) \ket{j} \ket{k} \right), \\ = & \ \frac{1}{2^n} \sum_{\ell=0}^{2^n-1} \sum_{k=0}^{2^n-1} a_{kj} \braket{\ell | j} \braket{i | k} \\ = & \ \frac{1}{2^n} a_{ij}. \end{align*} which completes the proof. \end{proof} All circuit elements in \cref{fig:BEcirc}, except for $O_A$, can be readily written in simple 1- and 2-qubit gates. The complexity of the Hadamard and SWAP gates is only poly$(n)$. We present next how the oracle $O_A$ from \Cref{def:OA} can be implemented in simple 1- and 2-qubit gates for arbitrary matrices. We first consider the cases of real-valued matrices and discuss complex-valued matrices in~\Cref{sec:complex}. In case that $A$ is a real-valued matrix, we see from \eqref{eq:OA} that for given row and column indices $i$ and $j$, $O_A$ acts on the $\ket0$ state of the first qubit as an $R_y$ gate with angle \begin{equation} \theta_{ij} = \arccos(a_{ij}), \label{eq:theta} \end{equation} i.e., \begin{align} R_y(2\theta_{ij}) \ket0 &= \begin{bmatrix} \cos(\theta_{ij}) & -\sin(\theta_{ij}) \\ \sin(\theta_{ij}) & \phantom{-}\cos(\theta_{ij}) \end{bmatrix} \begin{bmatrix} 1 \\ 0 \end{bmatrix}, \nonumber \\ &= \begin{bmatrix} a_{ij} \\ \sqrt{\smash[b]{1 - a_{ij}^2}} \end{bmatrix}. \end{align} Hence, the matrix query unitary $O_A$ is a matrix with the following structure for real-valued matrices \begin{equation} O_A = \left[\begin{smallmatrix} c_{00} & & & & -s_{00} \\ & c_{01} & & & & -s_{01} \\ & & \ddots & & & & \ddots \\ & & & c_{N-1,N-1} & & & & -s_{N-1,N-1} \\ s_{00} & & & & c_{00} \\ & s_{01} & & & & c_{01} \\ & & \ddots & & & & \ddots \\ & & & s_{N-1,N-1} & & & & c_{N-1,N-1} \end{smallmatrix}\right], \label{eq:OAmat} \end{equation} where $c_{ij} := \cos(\theta_{ij})$ and $s_{ij} := \sin(\theta_{ij})$, with $\theta_{ij}$ given by \eqref{eq:theta}. A first naive implementation of the $O_A$ oracle \eqref{eq:OA} for $A\inR[N][N]$ uses $N^2$ multi-controlled $R_y$ gates. We use the notation $C^n(R_y)$ for an $R_y$ gate with $n$ control qubits. The circuit construction for $O_A$ uses one $C_n(R_y)$ gate for each matrix entry $a_{ij}$ where the control qubits encode the row and column indices $\ket{i}\ket{j}$ of the corresponding entry. This circuit is illustrated below for the encoding of a $2 \times 2$ matrix using 3 qubits and 4 $C^2(R_y)$ gates \[ \begin{myqcircuit} & \gate{R_y(2\theta_{00})} & \gate{R_y(2\theta_{01})} & \gate{R_y(2\theta_{10})} & \gate{R_y(2\theta_{11})} & \qw \\ & \ctrlo{-1} & \ctrlo{-1} & \ctrl{-1} & \ctrl{-1} & \qw \\ & \ctrlo{-1} & \ctrl{-1} & \ctrlo{-1} & \ctrl{-1} & \qw \end{myqcircuit} \] where $\theta_{00}, \theta_{01}, \theta_{10}, \theta_{11}$ are given by \eqref{eq:theta}. It is clear that this circuit implements \eqref{eq:OA} for real-valued data. The major disadvantage of this naive approach is that it requires $N^2$ $C^{2n}(R_y)$ gates to implement the $O_A$ oracle for $A \inR[N][N]$. However, every $C^{2n}(R_y)$ requires $\bigO(N^2)$ 1- and 2-qubit gates to be implemented~\cite{Barenco1995}. This brings the total gate complexity of this naive circuit implementation to $\bigO(N^4)$ which is excessive as it is quadratically worse than the classical representation cost. We propose a quadratic reduction in gate complexity with the FABLE implementation of block-encoding circuits in the next section. \section{FABLE Circuits for Block-Encodings}\label{sec:FABLE} We continue with the case of real-valued matrices and discuss how to extend this to complex-valued matrices afterwards. \subsection{Query oracles for real-valued matrices} We illustrate the idea of the improved circuit construction for $O_A$ for a small-scale example of $A \inR[2][2]$ from the previous section. In that case, the circuit structure is given by \[ \begin{myqcircuit} & \gate{R_y(\hat \theta_0)} & \targ & \gate{R_y(\hat \theta_1)} & \targ & \gate{R_y(\hat \theta_2)} & \targ & \gate{R_y(\hat \theta_3)} & \targ & \qw \\ & \qw & \qw & \qw & \ctrl{-1} & \qw & \qw & \qw & \ctrl{-1} & \qw \\ & \qw & \ctrl{-2} & \qw & \qw & \qw & \ctrl{-2} & \qw & \qw & \qw \end{myqcircuit} \] where the angles $\hat\theta_0, \hat\theta_1, \hat\theta_2, \hat\theta_3$ are computed from the data $A\inR[2][2]$ as we will explain next. The circuit structure is derived from~\cite{Mottonen2004}. We analyze the action of the above circuit based on the following two elementary properties of $R_y$ rotations: \begin{equation} \begin{split} R_y(\theta_0) \, R_y(\theta_1) & = R_y(\theta_0 + \theta_1), \\ X \, R_y(\theta) \, X & = R_y(-\theta), \end{split}\label{eq:rotcond} \end{equation} It follows that the state of the first qubit is rotated as \begin{align*} 00&: & \phantom{X} R_y(\hat \theta_3) \phantom{X} R_y(\hat \theta_2) \phantom{X} R_y(\hat \theta_1) \phantom{X} R_y(\hat \theta_0) = \qquad\qquad\qquad& \\ && R_y( \phantom{-}\hat\theta_3 + \hat\theta_2 + \hat\theta_1 + \hat\theta_0), \\ 01&: & \phantom{X} R_y(\hat \theta_3) X R_y(\hat \theta_2) \phantom{X} R_y(\hat \theta_1) X R_y(\hat \theta_0) = \qquad\qquad\qquad& \\ && R_y( \phantom{-}\hat\theta_3 - \hat\theta_2 - \hat\theta_1 + \hat\theta_0),\\ 10&: & X R_y(\hat \theta_3) \phantom{X} R_y(\hat \theta_2) X R_y(\hat \theta_1) \phantom{X} R_y(\hat \theta_0) = \qquad\qquad\qquad& \\ && R_y( - \hat\theta_3 - \hat\theta_2 + \hat\theta_1 + \hat\theta_0),\\ 11&: & X R_y(\hat \theta_3) X R_y(\hat \theta_2) X R_y(\hat \theta_1) X R_y(\hat \theta_0) = \qquad\qquad\qquad& \\ && R_y( - \hat\theta_3 + \hat\theta_2 - \hat\theta_1 + \hat\theta_0), \end{align*} where the rotation angle depends on the state of the last two control qubits as indicated above. To implement an $O_A$ oracle with angles $\theta_{00}$, $\theta_{01}$, $\theta_{10}$, $\theta_{11}$ as given by \eqref{eq:theta}, we \emph{vectorize} $A$ to $\vecc(A)$ in row-major order such that $\vecc(A)_{i + j \cdot N} = a_{ij}$ to obtain relabeled angles $(\theta_0, \ldots, \theta_3)$. We see from the system of equation above that these angles have to satisfy \begin{equation} \begin{bmatrix} \theta_0 \\ \theta_1 \\ \theta_2 \\ \theta_3 \end{bmatrix} = \begin{bmatrix} 1 & \phantom{-}1 & \phantom{-}1 & \phantom{-}1 \\ 1 & -1 & -1 & \phantom{-}1 \\ 1 & \phantom{-}1 & -1 & -1 \\ 1 & -1 & \phantom{-}1 & -1 \end{bmatrix} \begin{bmatrix} \hat\theta_0 \\ \hat\theta_1 \\ \hat\theta_2 \\ \hat\theta_3 \end{bmatrix}. \end{equation} This is a structured linear system that can be written as \begin{align} \begin{bmatrix} \theta_0 \\ \theta_1 \\ \theta_2 \\ \theta_3 \end{bmatrix} & = \begin{bmatrix} 1 & \phantom{-}1 & \phantom{-}1 & \phantom{-}1 \\ 1 & -1 & \phantom{-}1 & -1 \\ 1 & \phantom{-}1 & -1 & -1 \\ 1 & -1 & -1 & \phantom{-}1 \end{bmatrix} \begin{bmatrix} 1 & & & \\ & 1 & & \\ & & 0 & 1 \\ & & 1 & 0 \end{bmatrix} \begin{bmatrix} \hat\theta_0 \\ \hat\theta_1 \\ \hat\theta_2 \\ \hat\theta_3 \end{bmatrix}, \\ & = ( \hat H \otimes \hat H ) P_G \begin{bmatrix} \hat\theta_0 \\ \hat\theta_1 \\ \hat\theta_2 \\ \hat\theta_3 \end{bmatrix}, \label{eq:sys} \end{align} where $\hat H = \left[\begin{smallmatrix} 1 & \phantom{-}1 \\ 1 & -1 \end{smallmatrix}\right]$ is a scalar multiple of the Hadamard gate and $P_G$ is the permutation matrix that transforms binary ordering to Gray code ordering. This algorithm generalizes to $O_A$ oracles for matrices $A \inR[N][N]$~\cite{Mottonen2004}. The corresponding circuit structure consists of a gate sequence of length $2^{2n}$ alternating between $R_y$ and CNOT gates. Note that the $R_y$ gates only act on the first qubit, which is also the target qubit of the CNOT gates, and the control qubit for the $\ell$th CNOT gate is determined by the bit where the $\ell$th and $(\ell + 1)$st Gray code differ. For an $O_A$ oracle with angles $\bftheta = (\theta_0, \ldots, \theta_{2^{2n}-1})$ given by \eqref{eq:theta}, the angles of the $R_y$ gates in the quantum circuit, $\hat \bftheta = (\hat \theta_0, \ldots, \hat \theta_{2^{2n}-1})$, are related to $\bftheta$ through the linear system \begin{align} \left( \hat H^{\otimes 2n} \, P_G \right) \hat \bftheta = \bftheta. \label{eq:ls} \end{align} This linear system can be efficiently solved by a classical algorithm in $\bigO(N^2\log N^2)$ using a fast Walsh--Hadamard transform~\cite{fwht} which is implemented in the reference implementation of FABLE. This circuit structure is known as a \emph{uniformaly controlled $R_y$ rotation}~\cite{Mottonen2004} because it rotates the target qubit over a different angle for each bitstring in the control register. We use the following concise notation for uniformly controlled rotations used in the implementation of $O_A$: \[ \begin{myqcircuit*}{1}{0.75} \lstick{} & \qw & \multigate{2}{O_A} & \qw \\ \lstick{} & {/} \qw & \ghost{O_A} & \qw \\ \lstick{} & {/} \qw & \ghost{O_A} & \qw \end{myqcircuit*} \quad = \quad \begin{myqcircuit} & \qw & \gate{R_y(\bftheta)} & \qw \\ & {/} \qw & \ctrlsq{-1} & \qw \\ & {/} \qw & \ctrlsq{-1} & \qw \end{myqcircuit}\ \ . \] The gate complexity of implementing $O_A$ with this approach is $\bigO(N^2)$ for $A \inR[N][N]$, where $N^2$ CNOT and $N^2$ single-qubit $R_y$ gates are required, i.e., the prefactor in the circuit complexity is 2. This reaches the same asymptotic gate complexity as classically required to store the data and is optimal for unstructured data. \subsection{Query oracles for complex-valued matrices} \label{sec:complex} In case that $A$ is a complex-valued matrix, we encode the matrix elements as a product of $R_y$ and $R_z$ rotations. For given row and column indices $i$ and $j$, the matrix element to be encoded is $a_{ij} = |a_{ij}| e^{\I \alpha_{ij}}$, and $O_A$ acts on the $\ket0$ state of the first qubit as a product of an $R_y$ and $R_z$ gate with angles \begin{align} \theta_{ij} &= \arccos(|a_{ij}|), \label{eq:theta2}\\ \phi_{ij} &= -\alpha_{ij}, \label{eq:thetarho} \end{align} i.e., \begin{align} R_z(2\phi_{ij}) & R_y(2\theta_{ij}) \ket0 \nonumber \\ & = \begin{bmatrix} e^{-\I \phi_{ij}} & \\ & e^{\I \phi_{ij}} \end{bmatrix} \begin{bmatrix} \cos(\theta_{ij}) & -\sin(\theta_{ij}) \\ \sin(\theta_{ij}) & \phantom{-}\cos(\theta_{ij}) \end{bmatrix} \begin{bmatrix} 1 \\ 0 \end{bmatrix}, \nonumber \\ & = \begin{bmatrix} |a_{ij}| e^{\I \alpha_{ij}} \\ \sqrt{\smash[b]{1 - |a_{ij}|^2}} e^{-\I \alpha_{ij}} \end{bmatrix}. \end{align} Our previous analysis extends to uniformly controlled $R_z$ rotations because the conditions in~\eqref{eq:rotcond} are satisfied for $R_z$ gates~\cite{Mottonen2004}. It follows that we can implement the $O_A$ oracle for complex-valued matrices as the product of uniformly controlled $R_y$ and $R_z$ rotations \[ \begin{myqcircuit*}{1}{0.75} \lstick{} & \qw & \multigate{2}{O_A} & \qw \\ \lstick{} & {/} \qw & \ghost{O_A} & \qw \\ \lstick{} & {/} \qw & \ghost{O_A} & \qw \end{myqcircuit*} \quad = \quad \begin{myqcircuit} & \qw & \gate{R_y(\bftheta)} & \gate{R_z(\bfphi)} &\qw \\ & {/} \qw & \ctrlsq{-1} & \ctrlsq{-1} & \qw \\ & {/} \qw & \ctrlsq{-1} & \ctrlsq{-1} & \qw \end{myqcircuit}\ \ , \] where the $R_y$ and $R_z$ rotations respectively set the magnitude and phase of the matrix elements. The corresponding rotation angles can be computed separately through two independent linear systems of the form~\eqref{eq:ls} by using the magnitude and phase of the matrix data, respectively. Hence, the gate complexity for $O_A$ oracles of complex-valued matrices is twice the cost of real-valued matrices, while the asymptotic complexity in terms of 1- and 2-qubit gates remains $\bigO(N^2)$. The complete FABLE circuits for the real and complex case with the $O_A$ oracles implemented as uniformly controlled rotations are given in \cref{fig:FABLE-detail}. \begin{figure} \caption{FABLE quantum circuit structures for real and complex matrices with $O_A$ oracles implemented as uniformly controlled rotations.\label{fig:FABLE-detail} \label{fig:FABLE-detail} \end{figure} \section{Approximate Block-Encodings and Circuit Compression} \label{sec:approx} Thus far we have only considered exact implementations of $O_A$ resulting in exact block-encodings. In this section, we focus the ``A'' in FABLE and introduce \emph{approximate} block-encodings. We show how to compress FABLE circuits and what the resulting approximation error of the block-encoding is. \subsection{Approximate block-encodings} We begin with extending \Cref{def:BE} to approximate block-encodings that only implement the target matrix up to a certain precision $\varepsilon$. \begin{definition} \label{def:ABE} Let $a,n, m \inN$, $m = a + n$, and $\varepsilon\inR^+$. Then an $m$-qubit unitary $U$ is an ${(\alpha, a, \varepsilon)}$-block-encoding of an $n$-qubit operator $A$ if \begin{myequation} \tilde A = \left(\bra{0}^{\otimes a} \otimes \eI_{n} \right) U \left(\ket{0}^{\otimes a} \otimes \eI_{n} \right), \end{myequation} and \( \normtwo[\big]{A - \alpha \tilde A} \leq \varepsilon. \) \end{definition} The parameter $\varepsilon$ is the absolute \emph{error} on the block-encoding. As before, we have that $\normtwo{\tilde A} \leq 1$ and therefore $\normtwo{A} \leq \alpha + \varepsilon$. \subsection{FABLE circuit compression and sparsification} We illustrate the idea of the circuit compression algorithm for a uniformly controlled rotation gate with 8 angles: \[ \resizebox{\columnwidth}{!}{ $ \begin{myqcircuit} & \gate{\hat \theta_0} & \targ & \gate{\hat \theta_1} & \targ & \gate{\hat \theta_2} & \targ & \gate{\hat \theta_3} & \targ & \gate{\hat \theta_4} & \targ & \gate{\hat \theta_5} & \targ & \gate{\hat \theta_6} & \targ & \gate{\hat \theta_7} & \targ & \qw \\ & \qw & \qw & \qw & \qw & \qw & \qw & \qw & \ctrl{-1} & \qw & \qw & \qw & \qw & \qw & \qw & \qw & \ctrl{-1} & \qw\\ & \qw & \qw & \qw & \ctrl{-2} & \qw & \qw & \qw & \qw & \qw & \qw & \qw & \ctrl{-2} & \qw & \qw & \qw & \qw & \qw \\ & \qw & \ctrl{-3} & \qw & \qw & \qw & \ctrl{-3} & \qw & \qw & \qw & \ctrl{-3} & \qw & \qw & \qw & \ctrl{-3} & \qw & \qw & \qw \\ \end{myqcircuit} $} \] For conciseness, we have omitted the labels from the rotation gates and only show their parameter. The gates can be $R_y$, $R_z$, or in general $R_{\alpha}$ gates with $\alpha$ a normalized linear combination of $\sigma_y$ and $\sigma_z$ because the conditions in~\eqref{eq:rotcond} are satisfied for all $R_{\alpha}$ gates~\cite{Mottonen2004}. The compression algorithm uses a cutoff threshold $\delta_c \inR[+]$ and considers all $\hat\theta_i \leq \delta_c$ to be negligible. Assume for the example above that $\hat\theta_2, \hat\theta_3, \hat\theta_4, \hat\theta_5, \hat\theta_6 \leq \delta_c$. This means that the respective single-qubit rotations can be removed from the circuit, yielding \[ \begin{myqcircuit} & \gate{\hat \theta_0} & \targ & \gate{\hat \theta_1} & \targ & \targ & \targ & \targ & \targ & \targ & \gate{\hat \theta_7} & \targ & \qw \\ & \qw & \qw & \qw & \qw & \qw & \ctrl{-1} & \qw & \qw & \qw & \qw & \ctrl{-1} & \qw\\ & \qw & \qw & \qw & \ctrl{-2} & \qw & \qw & \qw & \ctrl{-2} & \qw & \qw & \qw & \qw \\ & \qw & \ctrl{-3} & \qw & \qw & \ctrl{-3} & \qw & \ctrl{-3} & \qw & \ctrl{-3} & \qw & \qw & \qw \\ \end{myqcircuit}\ \ . \] The resulting circuit contains a series of consecutive CNOT gates that can be further simplified as they mutually commute and share the same target qubit. Any two CNOT gates in a series of consecutive CNOT gates that have the same control qubit cancel out. It follows that in the example circuit, 5 single qubit rotations and 4 CNOT gates can be removed, yielding the compressed circuit: \[ \begin{myqcircuit} & \gate{\hat \theta_0} & \targ & \gate{\hat \theta_1} & \targ & \targ & \gate{\hat \theta_7} & \targ & \qw \\ & \qw & \qw & \qw & \ctrl{-1} & \qw & \qw & \ctrl{-1} & \qw\\ & \qw & \qw & \qw & \qw & \qw & \qw & \qw & \qw \\ & \qw & \ctrl{-3} & \qw & \qw & \ctrl{-3} & \qw & \qw & \qw \\ \end{myqcircuit}\ \ . \] The circuit compression algorithm consists of two steps: \begin{enumerate} \item[i.] Remove all rotation gates for angles $\hat\theta_i \leq \delta_c$ in $\hat\bftheta$ from the circuit; \item[ii.] Perform a parity check on the control qubits of the CNOT gates in each series of consecutive CNOT gates: keep one CNOT gate with control qubit $i$ if there are an odd number of CNOT gates with control qubit $i$ in the series, otherwise remove all CNOT gates with control qubit $i$. \end{enumerate} This procedure can be considered as data sparsification since it allows us to represent the block-encoded matrix with fewer than $N^2$ parameters. However, since we perform the sparsification on the $\hat\bftheta$ angles after the Walsh--Hadamard transform, a sparse representation of $\hat\bftheta$ does not typically mean that $\bftheta$ and $A$ are sparse in the usual sense of containing many zeroes. FABLE circuits are efficient for the class of matrices that are sparse in the Walsh--Hadamard domain. The following theorem relates the cutoff threshold $\delta_c$ used in the circuit compression algorithm to the first-order error on the block-encoding as defined in \Cref{def:ABE}. We consider the case of real-valued data. \begin{theorem} \label{thm:bound} For an $n$-qubit matrix $A\inR[N][N]$, $|a_{ij}| \leq 1$, the FABLE circuit with cutoff compression threshold $\delta_c \inR[+]$ gives an $(1/2^n, n+1, N^3\delta_c)$-block-encoding of $A$ up to third order in $\delta_c$. \end{theorem} \begin{proof} In order to prove that a cutoff compression threshold $\delta_c$ leads to an absolute error of at most $N^3 \delta_c + \bigO(\delta_c^3)$ on the the block-encoding, we start with the linear system \eqref{eq:ls} that relates the angles of the uniformly controlled rotations $\hat\bftheta$ to the angles of the matrix query oracle $\bftheta$. After thresholding the parameters $\hat \bftheta$ with cutoff $\delta_c$, the uniformly controlled rotation is constructed with parameters $\hat\bftheta + \bfdelta\hat\bftheta$, where $|\delta\hat\theta_i| \leq \delta_c$. It follows that $\| \bfdelta\hat\bftheta \|_2 \leq N \delta_c$. This perturbs the angles in $O_A$ from $\bftheta$ to \[ \tilde\bftheta = (\hat H^{\otimes 2n} P_G) (\hat\bftheta + \bfdelta\hat\bftheta). \] By linearity, the error on $O_A$ thus becomes \[ \bfdelta\bftheta = \tilde\bftheta - \bftheta = (\hat H^{\otimes 2n} P_G) \bfdelta\hat\bftheta, \] and we get that \[ \normtwo{\bfdelta\bftheta} \leq \normtwo{\hat H^{\otimes 2n}} \normtwo{P_G} \normtwo{\delta\hat\bftheta} \leq N^2 \delta_c, \] as $P_G$ is a unitary matrix and $\normtwo{\hat H^{\otimes 2n}} = N$. This implies that the element-wise error is now only bounded by $|\delta \theta_i| = |\theta_i - \tilde\theta_i| \leq N^2 \delta_c$. This relates to the element-wise error on $a_i$ as: \begin{align*} |\delta a_i| = |a_i - \tilde a_i| & = |\cos(\theta_i) - \cos(\tilde\theta_i)| \\ & = |\cos(\theta_i) - \cos(\theta_i + \delta\theta_i)| \\ & = |2 \sin(\delta \theta_i / 2)\sin(\theta_i + (\delta\theta_i/2))|\\ & \leq 2 |\sin(\delta \theta_i / 2)| \approx N^2 \delta_c + \bigO(\delta_{i}^3). \end{align*} In the final approximation, we used a truncated series expansion. We thus have that $\|\bfdelta\bfa\|_2 \leq N^3\delta_c$. As $\|A\|_2 \leq \|A\|_F = \|\vecc(A)\|_2$ we get the upper bound. \end{proof} Numerical results that verify this error bound are presented in \cref{fig:bound} which shows the result of noise-free QCLAB~\cite{qclab} simulations for compressed FABLE circuits. The FABLE circuits are generated for randomly generated matrices with entries drawn from the standard normal distribution, the matrices are 2 to 7 qubit operators such that the FABLE circuits require 5 to 15 qubits. We observe that the bound from \Cref{thm:bound} always holds but is overly pessimistic. Not shown in \cref{fig:bound} are the majority of random realizations with an error close to the $10^{-16}$ machine precision. A similar analysis can be performed for the complex-valued case where the error on the magnitude and phase of the matrix data have to be considered independently. \begin{figure} \caption{Error on simulated data and theoretical error bound from \Cref{thm:bound} \label{fig:bound} \end{figure} \section{Discussion and examples}\label{sec:ex} FABLE circuits provide a fast and convenient way of generating quantum circuits consisting of simple 1- and 2-qubit gates that block-encode arbitrary matrices. Thanks to their versatility, we expect them to become very useful to run and benchmark quantum algorithms derived from the QSVT for small to medium-scale experiments in the NISQ era. The conditions imposed on the block-encoded matrix are minimal: a FABLE circuit exists for any matrix that satisfies $|a_{ij}| \leq 1$ as this is the only requirement for real rotation angles to exist according to \eqref{eq:theta} and \eqref{eq:theta2}. For matrices of small norm, the probability of a successful measurement can vanish. In the extreme case of the all-zero matrix, a FABLE encoding exists, but the probability of measuring the desired state will also be zero. For the remainder of this section, we show the gate complexities of FABLE circuits for three different model problems: a Heisenberg Hamiltonian, a Fermi--Hubbard Hamiltonian in 1D and 2D, and a discretized Laplacian operator in 1D and 2D. We have selected these example problems as they perform well within FABLE encodings and require relatively few gates. However, we do indicate that for more complicated model problems this is not necessarily the case, which highlights the limitations of our approach. \subsection{Heisenberg Hamiltonians} Block-encodings of Hamiltonians are of particular interest as they can be used in the QSVT for Hamiltonian simulation~\cite{gisu2018} or for preparing ground and excited states~\cite{Lin2020}. Where previous theoretical analysis showed that these methods have optimal asymptotic scaling in terms of oracle query complexity, we can now directly use FABLE to obtain an upper bound for the asymptotic gate complexity for specific Hamiltonians. We study the performance of FABLE for block-encoding localized Hamiltonians. Specifically, we are interested in Heisenberg type spin chains Hamiltonians \begin{align*} H = &\sum_{i=1}^{n-1} J_x \, X\p{i}X\p{i+1} + J_y \, Y\p{i}Y\p{i+1} + J_z \, Z\p{i}Z\p{i+1} \\ & + \sum_{i=1}^{n} h_z Z\p{i}, \end{align*} where $X\p{i}$, $Y\p{i}$, and $Z\p{i}$ are the Pauli matrices, \begin{equation} X = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, \quad Y = \begin{bmatrix} 0 & -\I \\ \I & 0 \end{bmatrix}, \quad Z = \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, \end{equation} acting on the $i$th qubit. We consider a Heisenberg XXX model where $J_x = J_y = J_z$, $h_z = 0$ for systems of $2, \ldots, 7$ qubits. We set the compression threshold $\delta_c$ to $\epsilon_m$, with $\epsilon_m$ the machine precision . The CNOT and $R_y$ gate complexities are summarized in \cref{fig:HeisXXX_gates}. We observe that even with such a small compression threshold, FABLE can give an accurate encoding of a Heisenberg XXX model with just $50\%$ of the CNOT gates and $25\%$ of the $R_y$ rotations. \begin{figure} \caption{CNOT (\emph{blue circles} \label{fig:HeisXXX_gates} \end{figure} More general Heisenberg models with different values for $J_x$, $J_y$ and $J_z$ or with an external field ($h_z \neq 0$) can be encoded with FABLE, but their circuits cannot be compressed and sparsified even though the corresponding matrices are sparse. \subsection{Fermi--Hubbard model} The second example we consider is a special case of the Fermi--Hubbard Hamiltonian, \begin{equation} H = - t \sum_{ij}\sum_{\sigma\in\lbrace \uparrow,\downarrow \rbrace} c^{\dagger}_{i\sigma}c_{j\sigma} + U \sum_i c^{\dagger}_{i\uparrow}c_{i\uparrow}c^{\dagger}_{i\downarrow}c_{i\downarrow}, \end{equation} where $c^{\dagger}_{i\sigma}$ ($c_{i\sigma}$) is the creation (annihilation) operator for site $i$ and spin $\sigma$. The first term is the hopping term with strength $t$, the second term is the interaction term with strength $U$. We generate the Hamiltonian through OpenFermion~\cite{openfermion} for the case $t=1$, $U=0$, i.e., non-interacting fermions on a 1D and 2D lattices with two spin orbitals per site and non-periodic boundary conditions. The resulting Hamiltonians are mapped to qubits using the Bravyi--Kitaev transformation and block-encoded with FABLE. The compression threshold is again set to $\epsilon_m$ and the gate complexities of the FABLE circuits are listed in~\Cref{tab:hubbard}. We observe that both in 1D and 2D the operators can be encoded with a relatively small fraction of the maximum number of gates and that this fraction decreases for growing problem size. We ran the same experiment for interacting Hubbard Hamiltonians ($U \neq 0$) but for this case, the matrices do not compress well and the maximum number of gates is required to encode the Hamiltonians. \begin{table*}[t] \centering\small \begin{tabularx}{0.9\textwidth}{lcC|cC|cC} \toprule & & & \multicolumn{2}{c|}{CNOT} & \multicolumn{2}{c}{$R_y$} \\[5pt] Model & Size & $n$ qubits & Gates & Fraction[\%] & Gates & Fraction[\%] \\ \midrule \multirow{5}{*}{1D Hubbard} & 2 sites & 4 & 130 & 50.8 & 65 & 25.4 \\ & 3 sites & 6 & 1,098 & 26.8 & 513 & 12.5 \\ & 4 sites & 8 & 6,666 & 10.2 & 3,073 & 4.7 \\ & 5 sites & 10 & 35,850 & 3.4 & 16,385 & 1.6\\ & 6 sites & 12 & 180,234 & 1.1 & 81,921 & 0.5\\[5pt] \multirow{2}{*}{2D Hubbard} & $2 \times 2$ sites & 8 & 8,706 & 13.3 & 3,329 & 5.1\\ & $2 \times 3$ sites & 12 & 252,626 & 1.5 & 90,113 & 0.5\\ \bottomrule \end{tabularx} \caption{CNOT and $R_y$ gate complexities for 1D and 2D Hubbard models, both in absolute number of gates and as a fraction of the maximum number of gates ($4^n$).} \label{tab:hubbard} \end{table*} \subsection{Elliptic partial differential equations} As a second example we consider 1D and 2D discretized Laplace operators which are relevant in the solution of elliptic partial differential equations such as the Laplace equation $\Delta u = 0$ and the Poisson equation $\Delta u = f$. We consider finite difference discretization of the second order derivatives. For example, on a 1D equidistant grid with Dirichlet boundary conditions, we approximate the second order derivate in point $x_j$ as: \begin{equation} u^{\prime\prime}_j \approx \Frac{u_{j+1} - 2u_j + u_{j-1}}{\Delta x^2}, \end{equation} where $\Delta x$ is the step size. This three-point stencil leads to the 1D discretized Laplace operator: \begin{equation} L_{xx} = \begin{bmatrix} 2 & -1 & 0 & \cdots & * \\ -1 & 2 & -1 & \ddots & \vdots \\ 0 & \ddots & \ddots & \ddots & 0 \\ \vdots & \ddots & -1 & 2 & -1 \\ * & \cdots & 0 & -1 & 2 \end{bmatrix}, \label{eq:laplace1D} \end{equation} where the entries $*$ in the lower-left and upper-right corner are either both equal to $0$ for non-periodic boundary conditions, or both equal to $-1$ for periodic boundary conditions. In 2D, the discretized Laplace operator becomes the Kronecker sum of discretizations along the $x$- and $y$-directions: \begin{equation} L = L_{xx} \oplus L_{yy} = L_{xx} \otimes I + I \otimes L_{yy}, \label{eq:laplace2D} \end{equation} which corresponds to a five-point stencil. This allows for periodic or non-periodic boundary conditions and the number of discretization points can differ in both dimensions. Second order derivatives are of great importance in quantum field theory~\cite{QFT} and can be useful to simulate scalar fields with quantum computers. We generate 1D Laplacian matrices~\eqref{eq:laplace1D} on $2$ to $7$ qubits and use FABLE to generate block-encoding circuits. The compression threshold is set to $\epsilon_m$ such that an accurate block-encoding is obtained. The results are summarized on the first row of \cref{fig:Laplace} for non-periodic and periodic Dirichlet boundary conditions. Similarly, the second row of \cref{fig:Laplace} shows the results of encoding 2D Laplacians on different rectangular grids requiring at most $7$ qubits. The following pertinent observations can be made from these results. First, periodic boundary conditions lead to much reduced gate counts compared to non-periodic boundary conditions. This is natural as there exist more structure in the periodic Laplacians. Second, the 2D Laplacians can be compressed better than 1D Laplacians and require in some cases fewer than 5\% of the maximum number of gates. Third, for the 2D Laplacians, discretization grids with a smaller aspect ratio, i.e., closer to a square, require in general fewer gates than more rectangular grids for the same number of grid points. \begin{figure*} \caption{CNOT (\emph{blue circles} \label{fig:Laplace} \end{figure*} \subsection{Quantum image encodings} The circuit construction in FABLE is based on the concept of uniformly controlled rotations introduced in~\cite{Mottonen2004}. This circuit construction method was recently used in the QPIXL framework that unifies quantum image representations~\cite{amankwah2021}. In all proposed quantum image representations and quantum image processing algorithms, the image is encoded in a quantum state and the circuit implementation becomes a state preparation problem. FABLE, and block-encodings in general, can be directly used to encode image data by embedding it in the unitary operator itself. This alternative to previously proposed quantum image encodings could potentially have benefits for certain quantum image processing tasks as it trivially preserves the 2D structure in the image. \section{Conclusion}\label{sec:conc} In this paper, we have introduced FABLE, an algorithm for \emph{fast} generation of quantum circuits that \emph{approximately} block-encode arbitrary target matrices. FABLE circuits obtain the optimal asymptotic gate complexity for generic dense operators and can be efficiently compressed for many problems of interest. Circuit compression and sparsification can lead to a significant reduction in gate complexity. More specifically, matrices that are sparse in the Walsh--Hadamard domain can in general be efficiently encoded. An interesting future research direction would be to precisely characterize the class of matrices that are sparse in the Walsh--Hadamard domain as these have great potential for successful experimental realizations of quantum algorithms based on block-encodings. We analyzed the relation between the compression threshold and the approximation error in the block-encoding and provide an upper bound on this error in \Cref{thm:bound} which shows a linear relation between the compression threshold, the problem size, and the approximation error. Our numerical simulations show that this bound can be saturated for larger thresholds and problem sizes. We illustrated FABLE on example problems ranging from Heisenberg and Hubbard Hamiltonians to discretized Laplacian operators. These examples show that high compression levels are feasible for certain structured problems, but we have observed that more general Hamiltonians with more interaction terms do not compress well. This highlights the limitation of our direct approach to encoding the matrix data. Balancing direct FABLE encodings of smaller terms in the Hamiltonian expression with a \emph{Linear Combination of Unitaries} (LCU)~\cite{Berry2015c} circuit construction in order to combine the smaller terms into a large-scale block-encoding can potentially mitigate this issue. This is another future research direction. \section*{Acknowledgment} The authors would like to thank Michael Kreschkuk and David Williams-Young for their input and discussions which have improved the quality of this work. This work was supported by the Laboratory Directed Research and Development Program of Lawrence Berkeley National Laboratory and used resources of the National Energy Research Scientific Computing Center (NERSC), a U.S. Department of Energy Office of Science User Facility located at Lawrence Berkeley National Laboratory, operated under Contract No. DE-AC02-05CH11231. \end{document}
\begin{document} \draft \title{Propagators of the Jaynes-Cummings model in open systems} \author{T.W. Chen, C.K. Law and P.T. Leung} \address{Department of Physics, The Chinese University of Hong Kong, Shatin, Hong Kong SAR, PR China} \date{\today} \maketitle \begin{abstract} We present a propagator formalism to investigate the scattering of photons by a cavity QED system that consists of a single two-level atom dressed by a leaky optical cavity field. We establish a diagrammatic method to construct the propagator analytically. This allows us to determine the quantum state of the scattered photons for an arbitrary incident photon packet. As an application, we explicitly solve the problem of a single-photon packet scattered by an initially excited atom. \end{abstract} \pacs{PACS number(s): 42.50.-p, 03.65.Nk, 03.65.Ca} \narrowtext \section{Introduction} The engineering of novel quantum states of photons is a topic of interest fundamentally and for applications. As is well known in atomic and particle physics, exotic states could result from scattering processes. This suggests light scattering by suitable quantum systems can be an important tool for state engineering. A common and yet realistic situation in quantum optics involves the scattering of photon wave packets, comprising a few photons, from atoms and other photons situated inside an optical cavity. The atoms and the cavity field together form a cavity QED system that scatters incident photon packets (Fig. 1). The main question is: What is the quantum state of the scattered photons? As the cornerstone of a series of investigations of this topic, we establish in the present paper a propagator method to describe the scattering of photon packets from a cavity where a two-level atom, either in its ground or excited state, is placed with the companion of some quasi-mode photons. After the interaction, the input photons and the quasi-mode photons as well leak out of the cavity and become the output state of the system. The propagator method proposed here is capable to extract the details of the output state and thus form a powerful tool to analyze the physical content underlying the scattering process. In addition to its potential applications in the study of novel quantum states, the model considered here is in fact a generalized variant of the Jaynes-Cummings model (JCM) \cite{JC}, which has been a fundamental model in the realm of photon-atom interaction ever since its first introduction \cite{JC_review}. Not only does it provide the simplest description of an atom interacting with quantized fields in optical cavities, JCM is also an important tool for controlling quantum states \cite{Law1}. One prominent generalization of the JCM is the inclusion of the leakage effect of the cavity, which is inevitable in any real experimental setup. Indeed recent studies in cavity QED have also emphasized the important role of dissipation effects in quantum information devices \cite{Knight}. In conventional approaches to JCM in a leaky cavity it is customary to consider the space outside the cavity --- the environment --- as a Markovian bath, followed by solving the master equation of the reduced density matrix of the cavity field \cite{Puri,Gardiner,Gea,Dae,Won}. Thus, energy inside the cavity flows unidirectionally to the environment, which plays the role of a sink of energy and the information on the exact state of the environment is reckoned unimportant and is lost during the evolution. The model discussed in this paper distinguishes itself from other dissipative JCM's in that the cavity and its environment play an equal role during the interaction. These two parts, constituting the ``universe" in consideration, communicate their information through partially transmitting walls dividing them. Photons inside the cavity, described in terms of discrete quasi-modes, interact with the atom and eventually leak out of the cavity. On the other hand, incident photons characterized by continuous wave number enter the cavity, participate in the interactions with the atom, and leave the cavity in its final state as well. More importantly, these two kinds of photons lose their respective identities after entering (or returning to) the environment and interfere to form novel photon states. It is for this reason that we devote the current paper to a proper formalism describing this generalized JCM model. To this end, we present here a propagator method that solves directly the quantum state of the whole amalgamated system, consisting of the atom, the cavity and the environment. Rather than discrete cavity modes, the atom now couples with the continuous field modes of the whole system \cite{Loudon,Leung,Scully,Kupi}. In this sense, the artificial separation between the cavity and its environment is completely eliminated, and the effect of dissipation can be treated in a more rigorous and fundamental way. Once the propagator of the system is known, the exact details of the output photons can be obtained immediately without further ado. This knowledge is bound to be important for issues related to quantum information and quantum measurement. The pure state approach has previously been used to study resonance fluorescence \cite{Mollow} and spontaneous atomic decay in a vacuum cavity, and exact solutions have been found \cite{Leung,Lewenstein,Uji,Gar}. The major objective in the present paper is to formulate a comprehensive theory studying the dynamical response of a two-level atom under the influence of photons inside the cavity and from the environment. In a recent paper by the authors \cite{Law2}, a less general case with the input photons restricted to the cavity mode (i.e., quasi-mode) was considered and solved by method of Laplace transform. This restriction is now removed in this paper. By evaluating the propagator of the system, whose construction is based on a diagrammatic expansion method, one can readily handle situations with an arbitrary initial state. Under the rotating-wave-approximation, the full propagator is a block diagonal matrix with a sequence of $2 \times 2$ matrices forming its diagonal block. Each of these matrices is a propagator of the system in a subspace categorized by the total excitation number $N$ (to be defined rigorously in Sect. IV), providing a natural classification of relevant propagators. A construction method is established here to express the $N$-excitation propagators in terms of those of the lower excitations. Thus, the full propagator of the system can be obtained in a systematic way, paving the way for further investigation into the quantum state of the output photons. This paper is organized as follows: In Sec.~II, we describe the system under study, its normal modes, the atom-field interactions and obtain the Hamiltonian. An introduction on the propagator method and the related diagrammatic expansion will be given in Sec.~III. The general solution for the propagator is derived in Sec.~IV. We then apply the propagator for $N=2$ to study the scattering of a single-photon packet from a two-level atom in its excited state in Sec.~V. We draw our conclusion in Sec.~VI. \section{The Model} \subsection{The cavity and continuous modes} We consider here a two-sided Fabry-Perot cavity in one-dimensional space. The two transmitting mirrors (hereafter referred to as mirrors R and L) are placed at $x=\pm L/2$. The set of continuous modes of such a Fabry-Perot cavity is known in the literature \cite{Loudon,Kupi}. In the following, we will briefly sketch the main result for reference. The two mirrors enclosing the cavity are modeled by two thin dielectric slabs of thickness $l$ and refractive indices $n_{\alpha}$ ($\alpha=$L,R). In the limit $l \rightarrow 0$ and $n_{\alpha} \rightarrow \infty$ such that $n_{\alpha}^2 l \rightarrow \mu_{\alpha}$ is finite, the corresponding complex amplitude reflection $r_\alpha$ and transmission coefficients $t_\alpha$ ($\alpha={\rm L,R}$) are in turn expressed as \begin{eqnarray} \label{transmission} r_\alpha &=& \frac{i k \mu_\alpha}{2 - i k \mu_\alpha} \, , \\ t_\alpha &=& \frac{2}{2 - i k \mu_\alpha} \, , \end{eqnarray} where $k$ is the wave number. There are two independent sets of modes for the entire system (polarization is ignored in the current study), namely, the left-propagating modes $u_{\rm L} (k,x)$ and the right-propagating modes $u_{\rm R}(k,x)$, which are given by \cite{Loudon,Kupi}: \begin{equation} \label{Left_mode} u_{\rm L} (k,x) = \left\{ {\begin{array}{*{20}c} {e^{ikx} + R_{\rm L}(k) e^{ - ikx} } \\ {I_{\rm L}(k) e^{ikx} + J_{\rm L}(k) e^{ - ikx} } \\ {T_{\rm L}(k) e^{ikx} } \\ \end{array}} \right. \begin{array}{*{20}c} {} \\ {} \\ {} \\ \end{array} \begin{array}{*{20}c} {} \\ {} \\ {} \\ \end{array} \begin{array}{*{20}c} {} \\ {} \\ {} \\ \end{array} \begin{array}{*{20}c} { - \infty < x < - L/2} \\ {- L/2 < x < L/2} \\ {L/2 < x < \infty } \\ \end{array} \end{equation} \begin{equation} \label{Right_mode} u_{\rm R} (k,x) = \left\{ {\begin{array}{*{20}c} {T_{\rm R}(k) e^{-ikx}} \\ {I_{\rm R}(k)e^{-ikx} + J_{\rm R}(k) e^{ ikx} } \\ {e^{-ikx} + R_{\rm R}(k) e^{ikx}} \\ \end{array}} \right. \begin{array}{*{20}c} {} \\ {} \\ {} \\ \end{array} \begin{array}{*{20}c} {} \\ {} \\ {} \\ \end{array} \begin{array}{*{20}c} {} \\ {} \\ {} \\ \end{array} \begin{array}{*{20}c} { - \infty < x < - L/2} \\ {- L/2 < x < L/2} \\ {L/2 < x < \infty } \\ \end{array} \end{equation} where \begin{eqnarray} \label{RLk} R_{\rm L}(k) &=& \{ r_{\rm L} e^{-ikL} + r_{\rm R} e^{ikL + 2 i \arg{t_{\rm L}}}\}/D(k) \, , \\ \label{ILk} I_{\rm L}(k) &=& t_{\rm L}/D(k) \, , \\ \label{JLk} J_{\rm L}(k) &=& t_{\rm L} r_{\rm R} e^{ikL}/D(k) \, , \\ \label{TLk} T_{\rm L}(k) &=& T_{\rm R}(k) = t_{\rm L} t_{\rm R} /D(k) \, , \end{eqnarray} with \begin{equation} \label{Dk} D(k) = 1 - r_{\rm L} r_{\rm R} e^{2 i k L} \, . \end{equation} Likewise, $I_{\rm R}(k)$, $J_{\rm R}(k) $ and $R_{\rm R}(k) $ can be obtained by interchanging the roles of L and R in the above equations. The left-luminating modes are shown in Fig.~\ref{L_mode_fig}. The mode functions $u_{\rm L} (k,x)$ and $u_{\rm R}(k,x)$ ($k \ge 0$) together form a complete orthonormal set in $-\infty \le x \le \infty$, satisfying the orthonormal condition: \begin{equation} \int^{\infty}_{-\infty} n(x) u_\alpha(k,x) u^{\ast}_{\beta} (k',x) dx = 2 \pi \delta_{\alpha \beta} \delta (k-k') \, , \end{equation} where $\alpha$,$\beta=$L,R. The quasi-mode frequencies, ${\bar k}_m$ ($m=1,2,3,\ldots$), defined by the zeros of $D(k)$, are given explicitly by \begin{equation} {\bar k}_m = \frac{i}{2L} {\rm ln}({r_{\rm L}r_{\rm R}}) + \frac{m \pi}{L} \, . \end{equation} For real-valued $k$ close to a quasi-mode frequency, i.e., $|k - {\bar k}_m| \ll \pi/L$, we have \begin{equation} D(k) = -2iL {\large (} \Delta k + i \kappa_{\rm c} {\large)} \, , \end{equation} where $\Delta k = k - k_{\rm c}$, with \begin{eqnarray} k_{\rm c} &=& \frac{m \pi}{L} - \frac{1}{2L} {\rm Im}[\ln({r_{\rm L} r_{\rm R}})] \, , \\ \kappa_{\rm c} &=& - \frac{1}{2L} {\rm Re}[\ln({r_{\rm L}r_{\rm R}})] \, . \end{eqnarray} Here $\kappa_{\rm c}$ is the decay rate of the cavity. \subsection{The two-level atom and interaction Hamiltonian} \label{TLA coupling with EM-field} Consider a system of a two-level atom placed inside the cavity described above (or other leaky cavities in general) at $x=x_0$. The ground-state energy of the atom is arbitrarily taken as zero, while the excited-state energy is $\omega _A$ in units of $\hbar =c=1$. The full Hamiltonian of the system in the rotating-wave-approximation is given by \begin{eqnarray} \label{Hamiltonian_1} H &=&\omega _A\sigma_{+}\sigma_{-}+\int_0^\infty k(a_{k{\rm L}}^{\dagger} a_{k{\rm L}}+a_{k{\rm R}}^{\dagger}a_{k{\rm R}}) dk \nonumber \\ &&+\int_0^\infty \left\{\left[ g_{{\rm L}}(k)a_{k{\rm L}}+g_{{\rm R}}(k)a_{k{\rm R}}\right]\sigma _{+}+{\rm h.c.}\right\} dk \,, \end{eqnarray} where $a_{k\alpha }$ and $a_{k\alpha }^{\dag }$ ($\alpha ={\rm L},{\rm R}$) are the annihilation and creation operators of the $k\alpha $-mode photon, $\sigma _{\pm }$ are the pseudo-spin flip operators of the atom. They satisfy the usual commutation relations: $\left[a_{k\alpha},a_{k^{\prime}\alpha^{\prime}}^{\dagger}\right] = \delta(k-k^{\prime})\delta_{\alpha \alpha^{\prime}} \, , \left[a_{k\alpha},a_{k^{\prime}\alpha^{\prime}}\right] = \left[a_{k\alpha}^{\dagger},a_{k^{\prime} \alpha^{\prime}}^{\dagger}\right] =\left[ a_{k\alpha},\sigma_{\pm}\right] = \left[ a_{k\alpha}^{\dag},\sigma_{\pm}\right] =0 \,$, and $\left\{\sigma_{-} , \sigma_{+} \right\} = 1$. The coupling constant $g_{\alpha }(k)$ of the atom with the $k\alpha $-mode photon depends on the dipole moment of the atom and is proportional to $u_\alpha(k,x_0)$. Here we are particularly interested in the case where the transition frequency of the atom is close to one of the resonance frequencies of the cavity, say, $k_{\rm c}$. Hence, only those continuous modes with frequencies near $k_{\rm c}$ have significant interactions with the atom and in this single-mode approximation, $g_{\alpha }(k) \propto (\Delta k+i\kappa _{\rm c})^{-1}\,$. Despite that the atom ostensibly couples with both the R and L modes, it is always possible to use a unitary transformation to redefine the photon modes so that the atom interacts with only one set of modes. For concreteness, we consider in the present paper a symmetric cavity with identical mirrors and the atom being situated at its center. However, it can be proved that generalizations to cases with dissimilar mirrors, arbitrary atomic position and one-sided cavity are straightforward. For this specific model, we have $u_{{\rm L}}(k,x_{0})=u_{{\rm R}}(k,x_{0})$, $g_{{\rm L}}(k)=g_{{\rm R}}(k)$. Accordingly, one can define a new basis of photons by the unitary transformation \begin{eqnarray} \label{ak+} a_{k +} &=& \frac{1}{\sqrt{2}} {\large (} a_{k {\rm L}} + a_{k {\rm R}} {\large )} \, , \\ \label{ak-} a_{k -} &=& \frac{1}{\sqrt{2}} {\large (} a_{k {\rm L}} - a_{k {\rm R}} {\large )} \, . \end{eqnarray} It is readily observed that the ``$-$" modes do not couple to the atom. In the following discussion, we will ignore the ``$-$" modes and will focus on the evolution of the ``$+$" modes, with the ``$+$" index being suppressed. Hence, the full Hamiltonian reduces to \begin{equation}\label{Hamiltonian} H =\omega _A\sigma _{+}\sigma _{-}+\int_0^\infty k a_{k}^{\dag }a_{k} dk +\int_0^\infty {\large [ } g(k)a_{k}\sigma _{+}+g^{\ast }(k)a_{k}^{\dag }\sigma _{-} {\large ]} dk \,, \end{equation} where $a_{k}$ here actually denotes $a_{k+}$ and $g(k)=\sqrt {2} g_{\rm R}(k)$. \section{Propagator and Feynman Diagrams} \label{feynman_rules} The dynamical response of a system with a Hamiltonian $H$ is governed by the retarded Green's function that satisfies the equation: \begin{equation} \left( i \frac{d}{dt} - H\right) K_{+}(t,t^{\prime})=\delta(t-t^{\prime}) \, , \end{equation} and is null for $t < t^{\prime}$. Obviously, $K_{+}$ can be given explicitly by $K_{+}(t,t^{\prime}) = e^{-iH(t-t^{\prime })}\theta(t-t^{\prime})$, where $\theta (x)$ is the Heaviside step function. Accordingly, its Fourier transform, $G(\omega)$, defined by the relation \begin{equation} \label{inverse_fourier} G(\omega) = - i\int_{ - \infty }^\infty {K_{+}(t,0)} e^{ i\omega t} dt \, \end{equation} and termed the retarded propagator $G(\omega)$, can be expressed symbolically as \begin{equation} G(\omega) = \frac{1}{\omega - H } \, , \end{equation} where the prescription $\omega = \mathop {\lim}\limits_{\epsilon \rightarrow 0^+} (\omega + i \epsilon)$ is assumed hereafter. To establish a diagrammatic expansion for $G(\omega)$, we separate the Hamiltonian in the form: $H=H_0+V$, where \begin{equation} H_0=\omega _A\sigma _{+}\sigma _{-}+\int_0^\infty k a_{k}^{\dag }a_{k} dk \, \end{equation} is the free atom-field Hamiltonian, and \begin{equation}\label{} V=\int_0^\infty {\large [ } g(k)a_{k}\sigma _{+}+g^{\ast }(k)a_{k}^{\dag }\sigma _{-} {\large ]} dk \end{equation} represents the interaction between the atom and the field. Hence, the propagator can formally be expanded in a power series of $V$, yielding \begin{eqnarray} \label{expandH} \frac{1}{\omega-H_0 - V}&=& \frac{1}{\omega - H_0} + \frac{1}{\omega - H_0} V \frac{1}{\omega - H_0} \nonumber \\ && + \frac{1}{\omega - H_0} V \frac{1}{\omega - H_0} V \frac{1}{\omega - H_0} + ........ \, . \end{eqnarray} The propagator so defined is an operator. In the energy-eigenstate basis, the transition amplitudes are given by the matrix elements of the propagator, also referred to as ``propagator" hereafter. The main purpose of this paper is to calculate these amplitudes by the associated Feynman diagrams as illustrated in Fig.~\ref{diag_sym}. The basic construction rules and interpretations of these diagrams are specified as follows: 1. {\it External and Internal Lines}: Photons are represented by wavy lines labelled by their momenta $k$. Atoms in the excited and the ground states are respectively represented by solid and dashed lines . 2. {\it Vertex Factors}: Each vertex contributes a factor $g(k)$ (photon absorption) or $g^{\ast}(k)$ (photon emission) to the associated amplitude. 3. {\it Free Propagators}: Each segment in between successive vertices contributes a factor $(\omega-E)^{-1}$ to the associated amplitude, where $E$ is the energy of the free Hamiltonian (i.e., $H_0$) in this segment. 4.{\it \ Integrate Over Internal Momenta}. For each internal momentum $k$, write down a factor $dk$ and integrate. \section{Evaluation of the propagators} In this section, we evaluate the matrix elements of the propagator using the diagrammatic rules stated in Sec.~\ref{feynman_rules}. Owing to the rotating-wave-approximation, it is readily observed from Eq.~(\ref{Hamiltonian}) that the total excitation number \begin{equation} N=\sigma _{+}\sigma _{-}+\int_0^{\infty} a_{k}^{\dag }a_{k} dk \end{equation} is a constant of motion, resulting in vanishing propagators from an initial state to a final state with different excitation numbers. Therefore, the full propagator can be represented by an infinite sequence of $2\times2$ matrices, each characterized by its excitation number $N$. Hereafter we will, for convenience, define an energy eigenstate of $H_0$ with an excitation number $N$ by: \begin{equation} \label{photon_state} |p;k_{1},k_{2},\cdots k_{N-p}\rangle = \frac{1}{\sqrt{(N-p)!}} a_{k_1}^{\dag} a_{k_2}^{\dag} \cdots a_{k_{N-p}}^{\dag} \sigma_{+}^p |0;\phi\rangle \equiv |p;{\bf K}_{N-p}\rangle \,, \end{equation} where $p=0$($1$) if the atom is in its ground (excited) state, and $|\phi\rangle$ is the vacuum-field state. The factor $1/\sqrt{(N-p)!}$ is introduced here to take care of the multiple-count of the bosonic states in integrations and $|p;k_{1},k_{2},\cdots k_{N-p}\rangle$ is not necessarily normalized to unity. In terms of this notation the four propagators with excitation number $N$ are \begin{equation} G_{pq}^{(N)}(\omega ;{\bf K}_{N-p},{\bf K^{\prime }}_{N-q})\equiv \left\langle p;{\bf K}_{N-p}\right| \frac{1}{\omega -H}\left| q; {\bf K^{\prime }}_{N-q}\right\rangle \,, \label{def_GN00} \end{equation} where $p,q=0,1$. As an example, the propagator of zero excitation number, governing the propagation of a ground-state atom in vacuum, is trivially given by \begin{equation} \label{vacuum_propagator} G_{00}^{(0)}(\omega ;\phi ,\phi)= \frac{1}{\omega } \, , \end{equation} for there is only one diagram, namely, the free propagation of the collective vacuum state. \subsection{Quasi-mode propagators} \label{quasi_mode_sec} Before proceeding to explicit evaluation of general propagators, we introduce here the concepts of quasi-mode photon states and quasi-mode propagators. A normalized quasi-mode single-photon state is defined by \begin{equation} |1_{\rm c} \rangle = a_{\rm c}^{\dag} | \phi \rangle \, , \end{equation} where $a_{c}^{\dag}$ is the effective creation operator for the quasi-mode: \begin{equation} \label{def_ac} a_{\rm c}^{\dag} = \frac{1}{\sqrt{\lambda}} \int_{0}^{+\infty} dk \, g^{\ast}(k) \, a_{k}^{\dag} \, . \end{equation} The quantity $\lambda$ in the normalization constant is the coupling strength defined by \begin{equation} \lambda = \int_{0}^{+\infty} g(k) g^{\ast}(k) dk \, . \end{equation} Physically speaking, the state $| 1_{\rm c} \rangle$ is, in a perturbative sense and also in the single-mode-approximation, the cavity field set up by the atom during de-excitation process. Similarly, the atom-field state with $N$ excitations, where there are $N-p$ quasi-mode photons and $p$ atomic excitation, is defined by \begin{equation} |p;(N-p)_{\rm c}\rangle \equiv \frac{1}{\sqrt{(N-p)!}}(a_{\rm c}^{\dag })^{(N-p)}\sigma _{+}^{p}|0;\phi\rangle \,. \end{equation} We therefore accordingly define the $N$-excitation quasi-mode propagator by \begin{equation} \label{def_GcN00} \Phi^{(N)}_{pq}(\omega) = \left\langle p;(N-p)_{\rm c} \right| \frac{1}{\omega - H} \left| q;(N-q)_{\rm c} \right\rangle \, . \end{equation} From the Hamiltonian given in Eq.~(\ref{Hamiltonian}), the importance of the quasi-mode propagator is readily clear. Aside from the input and output photons, those present in the intermediate states are all quasi-mode photons. Thus, these {\em quasi-mode propagators form the backbone of our theory from which other propagators can be derived}. From definition~(\ref{def_GcN00}), it is obvious that the vacuum propagator in Eq.~(\ref{vacuum_propagator}) is the simplest quasi-mode propagator: $\Phi _{00}^{(0)}(\omega ) = G_{00}^{(0)}(\omega ;\phi ,\phi)=\omega^{-1}$. We begin with the quasi-mode propagator of single excitation \begin{equation} \Phi_{11}^{(1)}(\omega)=\langle 1;\phi|\frac{1}{\omega -H} |1;\phi\rangle \, . \end{equation} All relevant diagrams are shown in Fig.~\ref{Gee_fig} and the physical picture indicated by the diagrams is clear. The excited-state atom may freely propagate in vacuum, possibly followed by equal numbers of emissions and absorptions of quasi-mode photons, and exits in its excited state. In Fig.~\ref{Gee_fig}, bold-wavy lines are used to represent intermediate quasi-mode photon state, to distinguish it from the input and output normal-mode states. According to the Feynman rules, $\Phi_{11}^{(1)}(\omega)$ is given by an infinite series: \begin{equation} \Phi_{11}^{(1)}(\omega)=\frac{1}{{\omega -\omega _A}} \left( {1+\frac{\zeta }{\omega -\omega _A}+\frac{\zeta ^{2}} {(\omega -\omega _A)^{2}}+\frac{\zeta ^{3}} {(\omega -\omega _A)^{3}}+\cdots }\right) \label{x_series} \\ \,, \end{equation} where \begin{equation} \zeta(\omega) =\int_{0}^{+\infty} \frac{g(k)g^{\ast }(k)}{\omega -k} dk \,. \end{equation} Adopting the single-mode approximation and, as usual, extending the lower limit of all the $k$-integrations from $0$ to $-\infty$, we find \begin{equation} \zeta (\omega)=\frac{\lambda }{\omega -k_{\rm c}+i\kappa _{\rm c}} \,. \end{equation} Therefore, Eq.~(\ref{x_series}) can be expressed in the closed form \begin{equation} \label{1st_quasi} \Phi_{11}^{(1)}(\omega)=\frac{A_{+}^{(1)}}{\omega -\Omega _{+}^{(1)}}+\frac{A_{-}^{(1)}}{\omega -\Omega _{-}^{(1)}}\,, \label{Ge0e0} \end{equation} where \begin{eqnarray} \label{Omega_eN} A_{\pm }^{(N)} &=& \frac{1}{2}\left[ 1\pm \frac{(\omega _A -k_{\rm c}+i\kappa_{\rm c})/2}{\sqrt{(\omega _A-k_{\rm c}+i\kappa _{\rm c})^{2}/4+N\lambda }}\right] \, , \\ \Omega _{\pm }^{(N)} &=& \frac{\omega _A}{2}+\left( N-\frac{1}{2} \right)\left( k_{\rm c}-i\kappa _{\rm c}\right) \pm \sqrt{\left( \frac{\omega_A-k_{\rm c} +i\kappa _{\rm c}}{2}\right) ^{2}+N\lambda} \, , \end{eqnarray} for $N=1,2,3,\ldots$. It is then obvious that $\sqrt{N \lambda}$ essentially plays the role of the $N$-photon Rabi frequency. We have derived in the Appendix the quasi-mode propagator for an arbitrary excitation number $N$. They are given by: \begin{eqnarray} \label{phi10=phi01} {\Phi }_{11}^{(N)}(\omega ) &=& \frac{A_{+}^{(N)}} {\omega -\Omega _{+}^{(N)}}+\frac{A_{-}^{(N)}} {\omega -\Omega _{-}^{(N)}} \, , \\ {\Phi}^{(N)}_{00} (\omega) &=& \frac{1-A^{(N)}_{+}} {\omega - \Omega^{(N)}_{+}} + \frac{1-A^{(N)}_{-}} {\omega - \Omega^{(N)}_{-}} \, , \\ {\Phi}^{(N)}_{01}(\omega) &=& \frac{\sqrt{N\lambda}} {\omega-\omega_A -(N-1)(k_{\rm c} - i \kappa_{\rm c})} {\Phi}^{(N)}_{00}(\omega) \, , \\ {\Phi }_{10}^{(N)}(\omega ) &=& {\Phi }_{01}^{(N)}(\omega ) \, . \end{eqnarray} \subsection{Propagators of single excitation ($N=1$)} \label{first_order} With the help of the quasi-mode propagators, we can derive the simplest propagators for the case $N=1$ to manifest the techniques in calculations aided by the diagrammatic scheme. We begin with the propagator $G^{(1)}_{11}(\omega;\phi,\phi)=\langle 1; \phi| {(\omega - H)}^{-1} | 1; \phi \rangle$, which coincides with the first-order quasi-mode propagator given in Eq.~(\ref{1st_quasi}), i.e., \begin{equation} G^{(1)}_{11}(\omega;\phi,\phi) = \Phi^{(1)}_{11}(\omega) \, . \end{equation} One can, of course, follow the same route in the deviation of $ G_{11}^{(1)}(\omega ;\phi ,\phi )$ to obtain the other three propagators of single excitation. However, it is clear that once a particular $N$-excitation propagator is derived, other members in the same class can be easily obtained in an alternative way by relating them to the one already obtained, as shown in Fig.~\ref{first-order}. For example, from Fig.~\ref{first-order}~(a), the propagator $G_{01}^{(1)}(\omega ;k,\phi )=\langle 0;k|{(\omega - H)}^{-1}|1;\phi\rangle$ can be related to $G_{11}^{(1)}(\omega ;\phi ,\phi )$ by: \begin{equation} G_{01}^{(1)}(\omega ;k,\phi )=\frac{g^{\ast }(k)}{\omega -k} G_{11}^{(1)}(\omega ;\phi ,\phi )\,. \label{G_g1k_e0} \end{equation} Likewise, we can show from Fig.~\ref{first-order}~(b) that the propagator \begin{equation} G_{10}^{(1)}(\omega ;\phi ,k)=\langle 1;\phi|\frac{1}{\omega -H}|0;k\rangle =\frac{g(k)}{\omega -k}G_{11}^{(1)}(\omega ;\phi ,\phi )\,. \label{G_e0_g1k} \end{equation} Eqs.~(\ref{G_g1k_e0}) and (\ref{G_e0_g1k}) also reveal a useful relation of the propagators, namely, the propagator $G_{pq}^{(N)}(\omega ;{\bf K}_{N-p},{\bf K^{\prime }}_{N-q})$ can be obtained from $G_{qp}^{(N)}(\omega ;{\bf K^{\prime }}_{N-q},{\bf K}_{N-p})$ by simply replacing each $g(k)$ with $g^{\ast }(k)$, and vice versa, which is obvious from the diagrammatic scheme and the Feynman rules. Finally, for the scattering of a photon from the $k^{\prime }$-th mode to the $k$-th mode by the ground-state atom, the corresponding propagator is $G_{00}^{(1)}(\omega ;k,k^{\prime })= \langle 0;k|{(\omega - H)}^{-1}|0;k^{\prime }\rangle$. Similarly, from Fig.~\ref{first-order}~(c), we have \begin{equation} G_{00}^{(1)}(\omega ;k,k^{\prime })=\frac{\delta (k-k^{\prime })}{\omega -k^{\prime}}+\frac{g(k)g^{\ast }(k^{\prime })} {(\omega-k)(\omega -k^{\prime })}G_{11}^{(1)}(\omega ;\phi ,\phi )\,. \label{G_g1k'_g1k} \end{equation} \subsection{Construction of propagators of general excitation number $N$} In general, an $N$-excitation propagator can be calculated from those of lower excitation numbers. We will develop here a systematic approach to evaluate the propagators with $N \ge 2$. First of all, in some diagrams there may exist ``spectator photons" that do not interact with the atom in the whole evolution. These diagrams are said to be factorizable (or unlinked), and can be straightforwardly related to propagators of lower excitation numbers in a way to be stated explicitly in the following discussion. We therefore focus mainly on the {\em linked} diagrams that all the input and output photons take part in the interactions. These absorptions and emissions can take place in any order, resulting in different physical processes. For each linked diagram we label it with a set of momenta, ${\bf S} \equiv \{{\tilde{k}}_{1},{\tilde{k}}_{2}, \cdots, {\tilde{k}}_{2N-p-q}\}$, comprising the time-ordered momenta of the photons created or annihilated in the process. Here $p$ and $q$ are integers defined in Eq.~(\ref{def_GN00}). The elements of ${\bf S}$ are taken from $N-p$ output photons in the set ${\bf K}_{N-p}$ and $N-q$ input photons in the set ${\bf K^{\prime }}_{N-q}$, with no repetitions. In other words, a particular ${\bf S}$ represents a particular sequence of the $N-q$ absorptions of the input photons and $N-p$ emissions of the output photons. For a given ${\bf S}$, we introduce a quantity ${\cal L}_{pq}^{(N)}(\omega ;{\bf S})$ to denote the corresponding contributions to the propagator. The symbol ``${\cal L}$" is used here to refer to ``linked" diagrams. We note here that this convention should be accompanied by a final symmetrization of the propagator with respect to ${\bf K}_{N-p}$ and ${\bf K^{\prime }}_{N-q}$. There are at most only three kinds of photons present in any segment of the Feynman diagrams, namely, the input and output photons in continuous modes, and the quasi-mode photons. Between the $2N-p-q$ vertices associated with the sequence defined by ${\bf S}$, the atom only interacts with the quasi-mode photons. Therefore the evolution is governed by the quasi-mode propagators derived. This explains the importance of the quasi-mode propagators discussed in Sec.~(\ref{quasi_mode_sec}). It is found that ${\cal L}_{pq}^{(N)}(\omega;{\bf S})$ can be written in the following compact form \begin{equation} \label{barG=prodX} {\cal L}_{pq}^{(N)}(\omega ;{\bf S}) =\left( \prod_{i=1}^{N-q} g({k^{\prime }}_{i})\right) \left( \prod_{i=1}^{N-p} g^{\ast}(k_{i})\right) \left( \prod_{i=0}^{2N-p-q} \Phi_{p_{i}q_{i}}^{(N_{i})}(\omega -E_{i})\right) \,. \end{equation} The physical picture of the above equation is clear. $N-p$ times of absorptions of input photons and $N-q$ times of emissions of output photons must take place at some time, with amplitude given by the first part of the equation. These events split the whole process into $2N-p-q+1$ segments. In a particular segment, the atom interacts only with the quasi-mode photons. This means that the propagator of this segment is governed by the quasi-mode propagators derived in the last section. The presence of continuous mode spectator photons in the $i$-th segment has the sole effect of shifting the frequency $\omega $ by $E_{i}$, which is the total energy of the continuous-mode photons present in the $i$-th segment \footnote{For example, in Fig.~\ref{Gekek}, consider the first stage of the second term in the summation, the excited atom interacts with quasi-mode photon while the input normal-mode photon $k^{\prime}_1$ acts as spectator photon. The evolution of the atom plus quasi-mode would be governed by the the single excitation quasi-mode propagator $\Phi^{(1)}_{11}~(\omega)$ in the absence of $k^{\prime}_1$. However when $k^{\prime}_1$ is present, the propagtor of the whole system in this segment becomes $\Phi^{(1)}_{11}~(~\omega~-~k^{\prime}_1~)$. }. While not explicitly shown, the ${\cal L}_{pq}^{(N)}(\omega;{\bf S})$ in Eq.~(\ref{barG=prodX}) depends on ${\bf S}$ through the terms $N_{i}$, $p_{i}$, $q_{i}$ and $E_{i}$. Summing all diagrams corresponding to all possible ${\bf S}$, we have \begin{equation} \label{Lambda} {\Lambda}_{pq}^{(N)}(\omega ;{\bf K}_{N-p},{\bf K^{\prime }} _{N-q}) \equiv \sum_{{\bf S}}{\cal L}_{pq}^{(N)}(\omega ;{\bf S})\,, \end{equation} which is the propagator that includes all linked diagrams only. To include the unlinked diagrams, we go back to the case that some of the input and output photons act as spectators in the whole process. There may be one spectator, which, without loss of generality, can be assumed to be $k_{N-p}$ (${k^{\prime}}_{N-q}$). The propagation of the remaining system is governed by ${\Lambda}^{(N-1)}_{pq}$. Similarly we can have two photons acting as spectators, which are assumed to be $k_{N-p}$ (${k^{\prime}}_{N-q}$) and $k_{N-p-1}$ (${k^{\prime}}_{N-q-1}$), and so on. The maximum possible number of spectator photons is $M={\rm min}\{N-p,N-q\}$. Hence we have \begin{eqnarray} \label{solution} G_{pq}^{(N)}(\omega ;{\bf K}_{N-p},{\bf K^{\prime }}_{N-q}) &=& \sum_{{\rm sym}}\sum_{j=0}^{M} \left[ \left( \prod_{l=0}^{j-1} \delta (k_{N-p-l}-{k^{\prime}}_{N-q-l})\right) \right. \nonumber \\ && \times \left. {\Lambda}_{pq}^{(N-j)}(\omega -\sum_{l=0}^{j-1}k_{N-p-l}; {\bf K}_{N-j-p},{\bf K^{\prime }}_{N-j-q}) \right] \,. \end{eqnarray} In the above equation, $\mathop \sum \limits_{{\rm sym}}$ denotes the symmetrization of the expression with respect to the input and output photons, and ${\Lambda}$ is given by Eq.~(\ref{Lambda}). \subsection{Example: Propagators with excitation number $N=2$} The general expression of the propagators is simple if the excitation number $N$ is not large. In this section we will derive the four propagators of excitation number $N=2$. Consider first the propagator $G_{11}^{(2)}(\omega ;k_{1},k_{1}^{\prime }) \equiv \langle 1;k_{1}|( \omega -H)^{-1}|1;{k^{\prime }}_{1}\rangle$. According to Eq.~(\ref{solution}), we have \begin{equation} G_{11}^{(2)}(\omega ;k_{1},{k^{\prime }}_{1})={\Lambda} _{11}^{(2)}(\omega ;k_{1},{k^{\prime }}_{1}) +\delta (k_{1}-{k^{\prime }}_{1}) {\Lambda}_{11}^{(1)}(\omega -k_{1};\phi ,\phi )\,, \end{equation} where \begin{equation} {\Lambda}_{11}^{(2)}(\omega ;k_{1},{k^{\prime }}_{1})=\sum_{{\bf S}} {\cal L}_{11}^{(2)}(\omega ;{\bf S})\,, \label{G2bar} \end{equation} with ${\bf S}=\{k_{1},{k_{1}}^{\prime }\}$ or $\{{k_{1}}^{\prime },k_{1}\}$. One can readily show that (see Fig.~\ref{Gekek}) \begin{equation} {\cal L}_{11}^{(2)}(\omega ;{\bf S}=\{k_{1},k_{1}^{\prime }\}) =g({k^{\prime }}_{1})g^{\ast }(k_{1}) {\Phi }_{10}^{(1)}(\omega -k_{1}){\Phi }_{11}^{(2)}(\omega ) {\Phi }_{01}^{(1)}(\omega -{k^{\prime}}_{1}) \, \end{equation} and \begin{equation} {\cal L}_{11}^{(2)}(\omega ;{\bf S} = \{k_{1}^{\prime },k_{1}\}) = g({k^{\prime}}_{1})g^{\ast }(k_{1}) {\Phi }_{11}^{(1)}(\omega -k_{1}) {\ \Phi}_{00}^{(0)}(\omega -k_{1}-{k^{\prime }}_{1}) {\Phi}_{11}^{(1)}(\omega- {k^{\prime }}_{1}) \, . \end{equation} The total propagator is hence \begin{eqnarray} \nonumber G_{11}^{(2)}(\omega ;k_{1},{k^{\prime }}_{1}) &=& \Phi_{11}^{(1)}(\omega-k_{1})\left\{ \frac{{}}{{}} \delta (k_{1}- {k_{1}}^{\prime }) +g({k^{\prime }}_{1})g^{\ast }(k_{1}) \Phi_{11}^{(1)}(\omega -{\ k^{\prime}}_{1})\right. \\ \nonumber && \times \left. \left[ \frac{1}{\omega -k_{1}-{k_{1}}^{\prime }}+\frac{\lambda }{ (\omega -k_{1}-k_{\rm c}+i\kappa _{\rm c})(\omega -{k_{1}}^{\prime }-k_{\rm c}+i\kappa _{\rm c})}{\Phi }_{11}^{(2)}(\omega )\right] \right\} \, . \\ \label{G211} \end{eqnarray} The other three propagators of the same excitation number can be evaluated similarly. However, as mentioned in Sec.~\ref{first_order}, these propagators can be obtained immediately from their relations with $G_{11}^{(2)}$. For example, the propagator $G_{01}^{(2)}(\omega ;k_{1}k_{2},{k^{\prime }}_{1})= \langle 0;k_{1},k_{2}|{(\omega - H)}^{-1}|1;{k^{\prime }}_{1}\rangle$ is given by (see Fig.~\ref{second-order}~(a)) \begin{equation} \label{G201_211} G_{01}^{(2)}(\omega ;k_{1}k_{2},{k^{\prime }}_{1}) = \frac{1}{\omega -k_{1}-k_{2}} {\large [} g^{\ast }(k_{1})G_{11}^{(2)}(\omega ;k_{2},{k^{\prime }}_{1})+g^{\ast }(k_{2})G_{11}^{(2)}(\omega ;k_{1},{k^{\prime }}_{1}) {\large ]} \,. \end{equation} Likewise, we can obtain the remaining two propagators with $N=2$ from Figs.~\ref{second-order}~(b) and (c), and the results are stated below: \begin{eqnarray} \nonumber G_{10}^{(2)}(\omega ;k_{1},{k^{\prime }}_{1}{k^{\prime}}_{2}) &=&\langle 1;k_{1}|\frac{1}{\omega -H} |0;{k^{\prime}}_{1},{k^{\prime }}_{2}\rangle\\ &=&\frac{1}{\omega -{k^{\prime }}_{1}-{k^{\prime }}_{2}} {\large [} g({k^{\prime }}_{1})G_{11}^{(2)}(\omega ;k_{1},{k^{\prime}}_{2}) +g({k^{\prime }}_{2})G_{11}^{(2)}(\omega ;k_{1},{k^{\prime }}_{1}) {\large ]} \, . \\ \nonumber G_{00}^{(2)}(\omega ;k_{1}k_{2},{k^{\prime}}_{1}{k^{\prime }}_{2}) &=&\langle 0;k_{1},k_{2}|\frac{1}{\omega -H}|0;{k^{\prime }}_{1},{k^{\prime }} _{2}\rangle \\ \nonumber &=&\frac{1}{\omega -{k^{\prime }}_{1}-{k^{\prime }}_{2}} {\large [} \delta({k^{\prime }}_{1}-k_{1}) \delta ({k^{\prime }}_{2}-k_{2})+\delta ({k^{\prime }}_{1}-k_{2}) \delta ({k^{\prime }}_{2}-k_{1}) {\large ]} \\ \nonumber &&+\frac{1}{\omega -{k^{\prime }}_{1}-{k^{\prime }}_{2}}{\large [} g({k^{\prime }}_{1})G_{01}^{(2)}(\omega ;k_{1}k_{2},{k^{\prime }}_{2}) +g({k^{\prime }}_{2})G_{01}^{(2)}(\omega ;k_{1}k_{2}, {k^{\prime }}_{1}){\large]} \,. \\ \end{eqnarray} \section{Application: Single-atom Single-photon Scattering} As an application of our method, we study the scattering of a single-photon wave packet by an excited two-level atom inside the cavity. In particular, we investigate how the spectral width of the incident photon affects the outcome of stimulated emission process. To begin, we consider the incident photon initially prepared in the ``+" modes as defined in Eq.~(\ref{ak+}). Hence the initial state is given by \begin{equation} | \psi(t=0) \rangle = \int dk' C(k')a_{k'} ^{\dag} |1;\phi \rangle \, , \end{equation} where $C(k')$ is the photon amplitude. At a later time $t$, the state becomes \begin{equation} | \psi(t) \rangle = \int dk B(k,t) a_k ^{\dag} | 1; \phi \rangle + {1 \over \sqrt{2}} \int\!\!\!\int dk_1 \, dk_2 C(k_1,k_2,t) a_{k_1} ^{\dag} a_{k_2} ^{\dag}| 0; \phi \rangle \, , \end{equation} where the two-photon amplitude $C(k_1,k_2)=C(k_2,k_1)$ satisfies the normalization condition, \begin{equation} \int \!\!\! \int \left| C(k_1,k_2) \right|^2 dk_1 \, dk_2 = 1 \, . \end{equation} For simplicity, we will only consider the resonance case: $k_{\rm c}=\omega_{A}$. The long time state $| \psi(t \rightarrow \infty)\rangle$ is determined by the asymptotic behavior of $C(k_1,k_2,t)$. Utilizing the propagator obtained in Eq.~(\ref{G201_211}), we have \begin{equation} \lim_{t\rightarrow \infty} C(k_1,k_2,t) = \lim_{t \rightarrow \infty} \frac{i}{2 \pi} \int d\omega e^{-i \omega t} \int dk^{\prime} G^{(2)}_{10}(\omega;k_1 k_2, k^{\prime}) C(k^{\prime}) \, . \end{equation} The explicit form is given by \begin{eqnarray} \nonumber C(k_1,k_2,t \rightarrow \infty) &\rightarrow& \frac{1}{\sqrt{2}} e^{-i(k_1 + k_2)t} \bigg\{ g^{\ast}(k_1) \Phi^{(1)}_{11}(k_1) C(k_2) +g^{\ast}(k_1)g^{\ast}(k_2) \\ \nonumber && \times \bigg[ \Phi^{(1)}_{11}(k_1) I_1(k_1,k_2)+\frac{\lambda}{k_1-k_{\rm c}+ i \kappa_{\rm c}} \Phi^{(1)}_{11}(k_1) \Phi^{(2)}_{11}(k_1+k_2) I_2(k_1,k_2) \bigg] \bigg\} \\ \label{Ck1k2} && + \{ k_1 \leftrightarrow k_2 \} \, , \end{eqnarray} where \begin{eqnarray} \label{I1} I_1(k_1,k_2) &=& \lim_{\delta \rightarrow 0^+} \int_{-\infty}^{\infty} \frac{g(k') \Phi^{(1)}_{11}(k_1 + k_2 -k')}{k_1 - k'+i \delta} C(k')dk' \, , \\ \label{I2} I_2(k_1,k_2) &=& \int_{-\infty}^{\infty} \frac{g(k')}{k_1+k_2-k'-k_{\rm c}+i\kappa_{\rm c}}\Phi^{(1)}_{11}(k_1+k_2-k')C(k')dk' \, , \end{eqnarray} and $\{ k_1 \leftrightarrow k_2 \}$ denotes the previous expression with $k_1$ and $k_2$ interchanged. It should be noted that the contour of the $I_1$-integration should be closed in the lower half-plane. The main advantage of our formalism is that it can handle any state of the incident photon. As an example, we consider that $C(k')$ is a lorentzian with a peak frequency equal to the cavity resonance frequency, i.e., \begin{equation} \label{input_no_gk} C(k')=\sqrt{\frac{\kappa_{\rm in}}{\pi}} \frac{1}{k'-k_{\rm c}+i\kappa_{\rm in}} \, . \end{equation} Here the width $\kappa_{\rm in}$ is the spectral width of the incident photon. The pole is located in the lower-half-plane in order to ensure that the atom can only ``feel" the photon for $t \ge 0$. In Fig.~\ref{joint-count-widths} we show the contour-plot of $|C(k_1,k_2)|^2$ for $\lambda=0.1\kappa_{\rm c}^2$, with various spectral widths of incident photons. When $\kappa_{\rm in} \gg \gamma_{\rm sp} $ (Fig.~\ref{joint-count-widths}~(a)), the input photon has a very short pulse duration compared with the decay time of the atom. Therefore the incident photon is incapable of having sufficient interactions with the atom. As a result, the output state is approximately a direct product state of the input photon and the spontaneously-decayed photon of the atom, which corresponds to a ``cross" shape in $|C(k_1,k_2)|^2$. A similar effect can be seen if the incident photon has a narrow width such that $\kappa_{\rm in} \ll\gamma_{\rm sp}$ (Fig.~\ref{joint-count-widths}~(d)). In this case, the input photon has a very long duration and so it participates in the interaction mainly after the atom has reached the ground state, causing no interference with the photon emitted from spontaneous decay. However, interesting features show up when $\kappa_{\rm in}$ is the same order as $\gamma_{\rm sp}$ (Figs.~\ref{joint-count-widths}~(b) and (c)). We see that the final two-photon amplitude is drastically different. For example, in Figs.~\ref{joint-count-widths}~(b), there is an unexpected dip at the center and peaks at $\Delta k\approx \gamma_{\rm sp}$. In other words, although the frequencies of the input photon and the photon emitted in spontaneous decay both peak at $k_c$, it is very unlikely to have two photons with frequencies around $k_c$ in the output. Instead, the peak frequency has been shifted to $\Delta k_1, \Delta k_2 \approx \lambda/\kappa_{\rm c}$. To understand the interference effects shown in Figs.~\ref{joint-count-widths}~(b) and (c), we identify the contributions of relevant diagrams associated with the propagator. Fig.~\ref{joint-count-paths} (a) shows the contributions solely from the unlinked diagram in which the incident photon does not participate in the interactions at all. Obviously, the corresponding two-photon amplitude disagrees with the exact one. However, a much better agreement can be achieved if we include just the lowest order linked diagram given in Fig.~\ref{joint-count-paths} (b). The interference between the linked diagram and unlinked diagram produces a two-photon amplitude that is almost the same as the exact one (Fig.~\ref{joint-count-paths}(c)). \section{conclusion} In this paper, we have developed a diagrammatic scheme to construct the propagator governing the interaction between an atom and photons in a leaky cavity. Under the assumption that the frequency dependence of atom-field coupling is a lorentzian, we found that the perturbation series is summable and so exact solution can be obtained for any excitation number $N$. The propagator provides an analytical tool to investigate the cavity QED effect for the photon scattering problem. In particular, the quantum state of output photons in continuous modes can be determined explicitly. Our results are illustrated by the derivation of propagators with excitation number $N=2$, which are then applied to study the scattering of a single-photon wave packet from an excited atom. We found that the spectral width of the incident photon can significantly modify the final two-photon amplitudes. This occurs when $\kappa_{\rm in}$ matches the cavity modified atom decay rate. Our calculations show that two output photons are entangled in the sense that their spectrum displays nontrivial correlation. With the aid of Feynman diagrams, we have identified the essential processes causing the interference. However, more detailed investigations are needed for a thorough understanding of the rich features in cavity QED assisted photon scattering problems. \acknowledgments Our work is supported in part by the Hong Kong Research Grants Council (grant No: CUHK4282/00P) and a direct grant (Project ID: 2060150) from the Chinese University of Hong Kong. \appendix \section{Details of the derivation of quasi-mode propagators} The main idea of the evaluation of the propagators is to move all the annihilation and creation operators to the right and left side of the perturbation series, employing the commutation relations, followed by noticing the fact that $a_{k}|\phi\rangle = \langle \phi | a_{k}^{\dag}=0$ and $\sigma _{-}^{2}=\sigma _{+}^{2}=0$. The commutation relations between $a_{\rm c}$, $a_{\rm c}^{\dag }$ with $H_0$ and $V$ are: \begin{eqnarray} \label{ac_dagger_V} \left[a_{\rm c}^{\dag },V\right] &=&-\sqrt{\lambda }\sigma _{+}\,, \\ \label{ac_V} \left[ a_{\rm c},V\right] &=&\sqrt{\lambda }\sigma _{-}\,, \\ \label{ac_dagger_H0} \frac{1}{\omega -H_{0}}a_{\rm c}^{\dag } &=&\int_{-\infty }^{+\infty}dk\,g^{\ast }(k)\,a_{k}^{\dag }\frac{1}{\omega -k-H_{0}}\,, \\ \label{ac_H0} a_{\rm c}\frac{1}{\omega -H_{0}} &=&\int_{-\infty }^{+\infty }dk\, g(k)\frac{1}{\omega -k-H_{0}}a_{k} \, . \end{eqnarray} \subsubsection{${\Phi}^{(N)}_{00} (\omega)$} Consider the quasi-mode propagator \begin{equation} \Phi _{00}^{(N)}(\omega )=\langle 0;N_{\rm c}|\frac{1}{\omega -H}|0;N_{\rm c}\rangle\,. \end{equation} The atom is initially at its ground state with $N$ photons in quasi-mode. The propagator measures the probability that the system remains in the same state finally. The interaction-free part can be evaluated by considering Eq.~(\ref{ac_dagger_H0}), which gives \begin{eqnarray} \nonumber \langle 0;N_{\rm c}|\frac{1}{\omega -H_{0}}|0;N_{\rm c}\rangle &=&\frac{1}{N!\lambda ^{N}}{\Huge \langle }0;\phi{\Huge |} \prod_{i=1}^{N}\int g({k^{\prime }}_{i})\, d{k^{\prime }}_{i}\,a_{{k^{\prime}}_{i}}^{\dag } \int g^{\ast }(k_{i})\,dk_{i}\,a_{k_{i}} {\Huge |}0;\phi{\Huge \rangle}\frac{1}{\omega -\sum_{i=1}^{N}k_{i}} \\ &=&\frac{1}{\omega -N(k_{\rm c}-i\kappa _{\rm c})}\,. \end{eqnarray} Paths with an odd number of interactions have zero contributions. And from the commutation relations, it is readily shown that \begin{eqnarray} \nonumber V\frac{1}{\omega -H_{0}}|0;N_{\rm c}\rangle &=&\frac{1}{\lambda ^{N/2}\sqrt{N!}}\prod_{j\neq i}\int dk_{j}\,g^{\ast }(k_{j})\,a_{k_{j}}^{\dag }\sigma _{+}|0;\phi\rangle \sum_{i}\frac{\lambda } {\omega -k_{\rm c}+i\kappa _{\rm c}-\sum_{j\neq i}k_{j}}\,, \\ \end{eqnarray} and similarly \begin{eqnarray} \left( V\frac{1}{\omega -H_{0}}\right) ^{2}|0;N_{\rm c}\rangle &=&\frac{1}{\lambda ^{N/2}\sqrt{N!}}\sum_{i}\left( \prod_{j\neq i}\int dk_{j}\,g^{\ast }(k_{j})a_{k_{j}}^{\dag }\right) \int dk\,g^{\ast }(k)a_{k}^{\dag }|0;\phi\rangle \nonumber \\ &&\times \frac{\lambda } {(\omega -\sum_{j\neq i}k_{j}-k_{\rm c}+i\kappa_{\rm c}) (\omega-\sum_{j\neq i}k_{j}-\omega _A)} \nonumber \\ &=&\frac{N\lambda }{\lambda ^{N/2}\sqrt{N!}}\left( \prod_{j=1}^{N}\int dk_{j}\,g^{\ast }(k_{j})a_{k_{j}}^{\dag }|0;\phi\rangle \right) \nonumber \\ && \times \frac{1}{(\omega -k_{\rm c}+i\kappa _{\rm c}-\sum_{i=1}^{N-1}k_{j})(\omega -\omega _{\rm a}-\sum_{i=1}^{N-1}k_{j})}\,. \end{eqnarray} Hence \begin{eqnarray} \nonumber && \langle 0;N_{\rm c}| \frac{1}{\omega-H}V\frac{1}{\omega-H}V \frac{1}{\omega-H}|0;N_{\rm c}\rangle \\ &=& \frac{N \lambda}{\left[\omega-N (k_{\rm c} - i \kappa_{\rm c})\right]^2\left[\omega - \omega_A-(N-1)(k_{\rm c} - i \kappa_{\rm c})\right]} \, . \end{eqnarray} It can be shown that the contributions of different paths with even times of interactions, \begin{eqnarray} \nonumber && \langle 0; N_{\rm c}| \frac{1}{\omega-H_0} \left( \frac{V}{\omega-H_0}\right)^{i} | 0;N_{\rm c} \rangle \\ &=& \frac{1}{\left[\omega - N (k_{\rm c} - i \kappa_{\rm c})\right]^{i/2+1}} \left[\frac{N \lambda} {\omega - \omega_A - (N-1)(k_{\rm c} - i \kappa_{\rm c})}\right]^{i/2} \, . \end{eqnarray} Hence \begin{eqnarray} {\Phi}^{(N)}_{00}(\omega) &=& \frac{1}{\omega - N (k_{\rm c} -i \kappa_{\rm c})} \left( 1+\zeta+\zeta^2 + \cdots \right) \nonumber \\ &=& \frac{1}{\left[ \omega - N (k_{\rm c} -i \kappa_{\rm c})\right](1 -\zeta)} \, , \end{eqnarray} where \begin{equation} \zeta=\frac{N\lambda}{\left[\omega - N (k_{\rm c} - i \kappa_{\rm c})\right] \left[ \omega - \omega_A - (N-1) (k_{\rm c} - i \kappa_{\rm c})\right] } \, . \end{equation} Similar to Eq.~(\ref{Ge0e0}), the propagator can be written as a sum of two lorentzians, \begin{eqnarray} \label{GgNgN_2} {\Phi }_{00}^{(N)}(\omega ) &=&\frac{\omega -\omega _{\rm a}-(N-1)(k_{\rm c}-i\kappa _{\rm c})}{\left[\omega -N(k_{\rm c}-i\kappa _{\rm c})\right] \left[\omega -\omega _A-(N-1)(k_{\rm c}-i\kappa _{\rm c})\right]-N\lambda } \nonumber \\ &=&\frac{1-A_{+}^{(N)}}{\omega -\Omega _{+}^{(N)}}+\frac{1-A_{-}^{(N)}}{ \omega -\Omega _{-}^{(N)}}\,, \end{eqnarray} where \begin{eqnarray} A_{\pm }^{(N)} &=&\frac{1}{2}\left[ 1\pm \frac{(\omega _{\rm a}-k_{\rm c}+i\kappa _{\rm c})/2}{\sqrt{(\omega _A-k_{\rm c}+i\kappa _{\rm c})^{2}/4+N\lambda }}\right] \, , \label{Omega_gN} \\ \Omega _{\pm }^{(N)} &=&\frac{\omega _A}{2}+\left( N-\frac{1}{2}\right) \left( k_{\rm c}-i\kappa _{\rm c}\right) \pm \sqrt{\left( \frac{\omega _A-k_{\rm c}+i\kappa _{\rm c}}{2}\right) ^{2}+N\lambda }\,. \nonumber \\ && \end{eqnarray} By substituting $N=0$, we have $A^{(0)}_{+} = 0$, $A^{(0)}_{-} = 1$, $\Omega^{(0)}_{+} = \omega_A - k_{\rm c} + i \kappa_{\rm c}$, $\Omega^{(0)}_{-} = 0$, and the propagator reduces to \begin{equation} {\Phi}^{(0)}_{00} (\omega)= \frac{1}{\omega} \, . \end{equation} \subsubsection{${\Phi}^{(N)}_{01} (\omega)$} The propagator \begin{equation} {\Phi}^{(N)}_{01} (\omega) = \langle 0;N_{\rm c} | \frac{1}{\omega - H} | 1;(N-1)_{\rm c}\rangle \end{equation} can be evaluated by the usual commutation relations, and noticing that \begin{eqnarray} \nonumber && V\frac{1}{\omega - H_0}\frac{1}{\sqrt{(N-1)!}} (a_{\rm c}^{\dag})^{N-1}\sigma_+ |0;\phi \rangle \\ &=& \frac{1}{\sqrt{N!}\lambda^{N/2}} \left(\prod_{i}^{N}\int dk_i g^{\ast}(k_i) a_{k_i}^{\dag}| 0;\phi \rangle \right) \frac{\sqrt{N\lambda}}{\omega - \omega_A - \sum_{j=1}^{N-1} k_j} \, . \end{eqnarray} It can be proved that \begin{eqnarray} {\Phi}^{(N)}_{01}(\omega) &=& \frac{\sqrt{N\lambda}} {\omega -\omega_A -(N-1) (k_{\rm c} - i \kappa_{\rm c})} {\Phi}^N_{00}(\omega) \nonumber \\ &=& \frac{\sqrt{N\lambda}}{\Omega^{(N0)}_{+1}-\Omega^{(N0)}_{-1}} \left[ \frac{1}{\omega - \Omega^{(N0)}_{+1}} -\frac{1}{\omega - \Omega^{(N0)}_{-1}} \right] \, . \end{eqnarray} \subsubsection{${\Phi}^{(N)}_{10}(\omega)$} The calculation of \begin{equation} {\Phi}^{(N)}_{10}(\omega) = \langle 1;(N-1)_{\rm c}| \frac{1}{\omega - H} | 0;N_{\rm c} \rangle \end{equation} is similar to that of $\Phi^{(N)}_{01} (\omega)$. In fact, the symmetries in the commutation relations result in the relation \begin{equation} \label{Geg_Gge} {\Phi}^{(N)}_{10}(\omega)={\Phi}^{(N)}_{01} (\omega) \, . \end{equation} The above equation can also be obtained immediately by replacing $g(k)$ with $g^{\ast}(k)$, which does not change the expression in this case since the quasi-mode propagators do not contain any free $g(k)$ or $g^{\ast}(k)$ terms. \subsubsection{${\Phi}^{(N)}_{11} (\omega)$} The propagator \begin{equation} {\Phi}^{(N)}_{11} (\omega) = \langle 1;(N-1)_{\rm c}| \frac{1}{\omega - H} | 1;(N-1)_{\rm c}\rangle \end{equation} can be evaluated by similar derivations, yielding \begin{equation} \langle 1;(N-1)_{\rm c} | \frac{1}{\omega - H_0} | 1;(N-1)_{\rm c} \rangle = \frac{1}{\omega - \omega_A - (N-1) (k_{\rm c} -i \kappa_{\rm c})} \, , \end{equation} and for an even integer $i$, \begin{eqnarray} \nonumber && \langle 1; (N-1)_{\rm c}| \frac{1}{\omega-H_0} \left(\frac{V}{\omega -H_0} \right)^{i} | 1; (N-1)_{\rm c} \rangle \\ &=& \frac{N\lambda} {\left[\omega - \omega_A - (N-1) (k_{\rm c} - i \kappa_{\rm c})\right]^2} \langle 0;N_{\rm c}| \frac{1}{\omega - H_0} \left( \frac{V}{\omega - H_0} \right)^{i-2} |0;N_{\rm c} \rangle \, . \end{eqnarray} Similarly all paths with odd number times of interactions have null contributions to the propagator. Hence, we have \begin{eqnarray} \nonumber {\Phi}^{(N)}_{11} (\omega) \nonumber &=& \frac{1}{\omega - \omega_A - (N-1) (k_{\rm c} - i \kappa_{\rm c})} \\ \nonumber && \times \left\{ 1 + \frac{N\lambda} {\left[\omega - N (k_{\rm c} - i\kappa_{\rm c})\right] \left[\omega -\omega_a - (N-1) (k_{\rm c} - i \kappa_{\rm c})\right] - N\lambda}\right\} \\ \nonumber &=& \frac{\omega - N(k_{\rm c} - i \kappa_{\rm c})} {(\omega-\Omega^{(N)}_{+}) (\omega - \Omega^{(N)}_{-})} \\ \label{GeNeN_2} &=& \frac{A^{(N)}_{+}}{\omega - \Omega^{(N)}_{+}} + \frac{A^{(N)}_{-}}{\omega - \Omega^{(N)}_{-}} \, . \end{eqnarray} A very simple relation exists between the $e$(excited-state)$\rightarrow e$ and $g$(ground-state)$\rightarrow g$ propagators, \begin{equation} \label{Ggg_Gee} {\Phi}^{(N)}_{11} (\omega) = \frac{\omega - N (k_{\rm c} - i \kappa_{\rm c})}{\omega - \omega_A - (N-1) (k_{\rm c} - i \kappa_{\rm c})} {\Phi}^{(N)}_{00}(\omega) \, . \end{equation} It can be verified that ${\Phi}^{(N)}_{11}(\omega)$ reduces to that of Eq.~(\ref{Ge0e0}) if we take $N=1$. \begin{references} \bibitem{JC} E. T. Jaynes, and F. W. Cummings, Proc. IEEE {\bf 51}, 89 (1963). \bibitem{JC_review} For reviews, see for example, H. I. Yoo, and J. H. Eberly, Phys. Rep. {\bf 118}, 239 (1985); B. W. Shore, and P. L. Knight, J. Mod. Opt. {\bf 40}, 1195 (1993). \bibitem{Law1} C. K. Law, and J. H. Eberly, Phys. Rev. Lett. {\bf 76}, 1055 (1996). \bibitem{Knight} A. Beige, D. Braun, B. Tregenna, and P. L. Knight, Phys. Rev. Lett. {\bf 85} 1762 (2000). \bibitem{Puri} G. S. Agarwal and R. R. Puri, Phys. Rev. A {\bf 33}, 1757 (1986); R. R. Puri and G. S. Agarwal, {\it ibid}. {\bf 33}, 3610 (1986). \bibitem{Gardiner} C. W. Gardiner, {\it Quantum Noise} (Springer-Verlag, Berlin, 1991). \bibitem{Gea} J. Gea-Banacloche, Phys. Rev. A {\bf 47}, 2221 (1993). \bibitem{Dae} B. Daeubler, H. Risken, and L. Schoendoff, Phys. Rev. A {\bf 48}, 3955 (1993). \bibitem{Won} A. J. van Wonderen, Phys. Rev. A {\bf 56}, 3116 (1997). \bibitem{Loudon} M. Ley and R. Loudon, J. Mod. Opt. {\bf 34}, 227 (1987). \bibitem{Leung} H. M. Lai, P. T. Leung, and K. Young, Phys. Rev. A {\bf 37}, 1597 (1988). \bibitem{Scully} J. Gea-Banacloche, N. Lu, L. M. Pedrotti, S. Prasad, M. O. Scully, and K. W{\' o}dkiewicz, Phys. Rev. A {\bf 41}, 369 (1990). \bibitem{Kupi} D. Kupiszewska and J. Mostowski, Phys. Rev. A {\bf 41}, 4636 (1990). \bibitem{Mollow} B. R. Mollow, Phys. Rev. A {\bf 12}, 1919 (1975). \bibitem{Lewenstein} M. Lewenstein, J. Zakrzewski, and T. W. Mossberg, Phys. Rev. A {\bf 38}, 808 (1988). \bibitem{Uji} X. P. Feng, and K. Ujihara, Phys. Rev. A {\bf 41}, 2668 (1990). \bibitem{Gar} B. M. Garraway, Phys. Rev. A {\bf 55}, 2290 (1997); {\bf 55}, 4636 (1997); B. J. Dalton, S. M. Barnett, and B. M. Garraway, Phys. Rev. A {\bf 64}, 053813 (2001). \bibitem{Law2} C. K. Law, T. W. Chen and P. T. Leung, Phys. Rev. A {\bf 61}, 023808 (2000). \end{references} \begin{figure} \caption{A sketch of the system: A two-sided Fabry-Perot cavity with a two-level atom inside and partially reflecting mirrors at both ends. We only consider identical mirrors and the atom being locating at the center.} \label{sketch} \end{figure} \begin{figure} \caption{The left-luminating modes, with lumination from the left, and transmitted waves only at the right. This set of modes are labeled by the subscript L and the positve wave-number $k$. Together with the right-luminating modes, these continous field modes form a complete and orthogonal set of the system.} \label{L_mode_fig} \end{figure} \begin{figure} \caption{Basic components of the Feynman diagrams: (a) The total propagator from initial state $\psi_i$ to final state $\psi_f$. (b) Free propagation of a ground-state atom. (c) Free propagation of an excited-state atom. (d) Free propagation of a $k$-th mode photon. (e) An excited-state atom decays into ground state and emits a $k$-th mode photon. (f) A ground-state atom excited by a $k$-th mode photon and jumps to the excited state.} \label{diag_sym} \end{figure} \begin{figure} \caption{The propagator $\Phi^{(1)} \label{Gee_fig} \end{figure} \begin{figure} \caption{Relationship between the propagators of single-excitation. The $G$'s appear in different components are different because they are attached with different input and output legs.} \label{first-order} \end{figure} \begin{figure} \caption{Feynman diagrams for $G^{(2)} \label{Gekek} \end{figure} \begin{figure} \caption{Relationship between the propagators with two excitations $N=2$.} \label{second-order} \end{figure} \begin{figure} \caption{Contour-plot of $\left| C(k_1,k_2) \right|^2$ for $\lambda=0.1 \kappa_{\rm c} \label{joint-count-widths} \end{figure} \begin{figure} \caption{The contributions of different Feynman diagrams for the case $\kappa_{\rm in} \label{joint-count-paths} \end{figure} \end{document}
\begin{document} \title{Quantum Support Vector Machine without Iteration} \author{Rui~Zhang, Jian~Wang*, Nan~Jiang**, and~Zichen~Wang, \IEEEcompsocitemizethanks{\IEEEcompsocthanksitem Rui Zhang and Jian Wang are with the Beijing Key Laboratory of Security and Privacy in Intelligent Transportation, Beijing Jiaotong University, Beijing 100044, China. \protect\\ E-mail: [email protected] \IEEEcompsocthanksitem Nan Jiang and Zichen Wang are with the Faculty of Information Technology, Beijing University of Technology, Beijing 100124, China. \protect\\ E-mail: e-mail: [email protected].} \thanks{Manuscript received April , ; revised August , .}} \markboth{Journal of \LaTeX\ Class Files,~Vol.~, No.~, ~} {Shell \MakeLowercase{\textit{et al.}}: Quantum Support Vector Machine without Iteration} \IEEEtitleabstractindextext{ \begin{abstract} Quantum algorithms can enhance machine learning in different aspects. In 2014, Rebentrost $et~al.$ constructed a least squares quantum support vector machine (LS-QSVM), in which the Swap Test plays a crucial role in realizing the classification. However, as the output states of a previous test cannot be reused for a new test in the Swap Test, the quantum algorithm LS-QSVM has to be repeated in preparing qubits, manipulating operations, and carrying out the measurement. This paper proposes a QSVM based on the generalized quantum amplitude estimation (AE-QSVM) which gets rid of the constraint of repetitive processes and saves the quantum resources. At first, AE-QSVM is trained by using the quantum singular value decomposition. Then, a query sample is classified by using the generalized quantum amplitude estimation in which high accuracy can be achieved by adding auxiliary qubits instead of repeating the algorithm. The complexity of AE-QSVM is reduced to $O(\kappa^{3}\varepsilon^{-3}(log(mn)+1))$ with an accuracy $\varepsilon$, where $m$ is the number of training vectors, $n$ is the dimension of the feature space, and $\kappa$ is the condition number. Experiments demonstrate that AE-QSVM is advantageous in terms of training matrix, the number of iterations, space complexity, and time complexity. \end{abstract} \begin{IEEEkeywords} Quantum support vector machine, quantum amplitude estimation, quantum singular value decomposition, quantum inner estimation, quantum computing. \end{IEEEkeywords}} \maketitle \IEEEdisplaynontitleabstractindextext \IEEEpeerreviewmaketitle \IEEEraisesectionheading{\section{Introduction}\label{sec:introduction}} \IEEEPARstart{I}{n} recent years, machine learning techniques have become powerful tools for finding patterns in data, for instance image recognition \cite{Yeung2021Image}, automated driven cars \cite{Zhang2021DDE}, network security \cite{He2021Research}, and \emph{etc}. The development of machine learning has rapidly increased the demand for computing power in hardware. As the spacing of transistors approaches the physical limit of process manufacturing, it has been a bottleneck that researchers intend to improve the operating capability of the classical computer. More powerful ways of processing information are needed as the amount of data generated in our society is growing. Quantum computation is a promising new paradigm for performing fast computations, with the experimental demonstrations of quantum supremacy marked as the latest milestone \cite{Nielsen2000Quantum, Arute2019Supplementary, Zhong2021Quantum, Xin2020IEEEVPQC}. The study of quantum computation originated in the 1980s. In 1982, Feynman \cite{Feynman1982Simulating} was the first to proposed the concept of quantum computation. After that the integer factoring problem \cite{Shor1994Algorithms} and the database search algorithm \cite{Grover1996fast} were essential evidences supporting the power of quantum computation. During the first decade of the 21st century, quantum computation appeared in various areas of computer science such as cryptography \cite{Gisin2001Quantum,Salim2020Enhancing,Kulik2022Experimental}, signal processing \cite{Zhou2015Quantum, Jiang2016Quantum, Zhang2022Boundary,Li2018Quantum}, and information theory \cite{Bennett2008Quantum}. Quantum machine learning (QML) is an emerging research area that attempts to harness the power of quantum information processing to obtain speedups for classical machine learning tasks. Despite the fact that quantum machine learning is a recently surging field, it already encompasses a rich set of quantum techniques and approaches, for example linear regression \cite{Schuld2016Prediction,Wang2017Quantum,Yu2021An}, clustering analysis \cite{Esma2013Quantum}, dimensionality reduction \cite{Romero2017Quantum,Yu2019Quantum}, data classification \cite{Cong2016Quantum,Schuld2017Implementing,2018Quantum,Dang2018Image}, and neural networks \cite{Li2020Quantumneural,Zhao2019Building,Joshi2021Entanglement}. Besides, quantum machine learning algorithms have been applied to channel discrimination \cite{Xin2020IEEEVPQC}, vehicle classification \cite{Yu2008Quantum}, and image classification \cite{Cavallaro2020Approaching}. Support vector machine (SVM) is a supervised machine learning technique for solving classification. In recent years, there have been many studies about quantum SVM (QSVM) \cite{Allcock2020A, Kerenidis2021Quantum, Li2015Experimental, Havenstein2018Comparisons, Ye2020Quantum, Willsch2020Support}, it provides quadratic speedup and was first proposed by Anguita $et~al.$ \cite{Anguita2003Quantum}. Moreover, the least squares QSVM (LS-QSVM) \cite{Rebentrost2014Quantum} given by Rebentrost $et~al.$ provide an exponential speedup compared with the classical algorithm. In LS-QSVM and its descendants, the classification was realized by using the Swap Test \cite{Buhrman2001Quantum,Garcia-Escartin2013swap}. The output state is obtained by measuring the ancillary qubit in the Swap Test. See Fig. \ref{fig1}, $|0\rangle$, $|\rho_{1}\rangle$, and $|\rho_{2}\rangle$ are the input states. For the outcome $|0\rangle$, we have an entangled state $\frac{|\rho_{1}\rangle|\rho_{2}\rangle+|\rho_{2}\rangle|\rho_{1}\rangle}{\sqrt{2}}$ and for the outcome $|1\rangle$, $\frac{|\rho_{1}\rangle|\rho_{2}\rangle-|\rho_{2}\rangle|\rho_{1}\rangle}{\sqrt{2}}$. In both cases, it is impossible to completely separate the input states for a second time. That is to say, Swap Test is destructive \cite{Garcia-Escartin2013swap}. In LS-QSVM, however, the success probability $P$ can be obtained to an accuracy $\varepsilon$ by iterating $O(P(1-P)/\varepsilon^{2})$ times of the Swap Test \cite{Rebentrost2014Quantum}. Hence, if the accuracy $\varepsilon$ is achieved, the completed algorithm must be carried out repeatedly; thus, resulting in a high consumption in qubit and time. \begin{figure} \caption{The quantum circuit of the Swap Test and its evolution} \label{fig1} \end{figure} To be specific, as shown in Fig. \ref{fig2}, LS-QSVM inherits the pattern of the classical least squares SVM (LS-SVM), i.e., the sample set was initially trained to obtain the parameter in LS-QSVM and the new sample was classified by using this parameter. In cases where classical sampling algorithms require polynomial time, an exponential speedup is obtained in LS-QSVM \cite{Rebentrost2014Quantum, Bishwas2018An, Bishwas2016Big, Windridge2018Quantum, Feng2019Quantum}. However, the speedup was achieved only for one time in training and classification. As analyzed above, the quantum state collapses due to the use of the Swap Test. Therefore, we have to repeatedly train the sample and carry out a classification to get the classification label with a high accuracy. \begin{figure} \caption{The comparation of LS-SVM and LS-QSVM} \label{fig2} \end{figure} In this work, we make progress for the challenge described above and propose a quantum support vector machine based on amplitude estimation (AE-QSVM). At first, the training process of AE-QSVM is realized by using the quantum singular value decomposition. Then, we present a method to calculate the inner product based on quantum amplitude estimation to classify the new sample. Compared with LS-QSVM whose complexity is $O(\frac{\kappa^{3}\varepsilon^{-3}log(mn)+log(n)}{12\varepsilon^{2}}) $, the complexity of AE-QSVM is reduced to $O(\kappa^{3}\varepsilon^{-3}(log(mn)+1))$ with an accuracy $\varepsilon$, where $m$ is the number of training vectors, $n$ is the dimension of the feature space, and $\kappa$ is the condition number. The remainder of the paper is organized as follows: The related works are given in Section \ref{S2}. We present a general algorithm for quantum amplitude estimation in Section \ref{S3}. Section \ref{S4} presents AE-QSVM. Simulations are presented in Section \ref{S5}. In Section \ref{S6}, we summarize this paper and discuss further research for AE-QSVM. Appendix reviews the basic concepts of quantum computation. \section{Related works}\label{S2} \subsection{Quantum support vector machine} As shown in \cite{Rebentrost2014Quantum}, the QSVM is based on a least squares version of the classical SVM \cite{Suykens1999LeastSquares}. The task for the SVM is to classify a vector into one of two classes, given $m$ training data points of the form $\{(\boldsymbol x_{k}, y_{k}): \boldsymbol x_{k}\in \mathds{R}^{n}, y_{k}=\pm1\}_{k=1,\ldots,m}$, where $y_{k}=1$ or $-1$, depending on the class to which $\boldsymbol x_{k}$ belongs. In LS-SVM, the problem after applying the optimization of the Lagrangian can be formulated as a linear equation: \begin{equation}\label{1} F\begin{pmatrix}b\\ \boldsymbol \alpha\end{pmatrix}= \begin{pmatrix}0&\boldsymbol 1^{\mathrm{ T }}\\ \boldsymbol 1& K+\gamma^{-1} I_{m}\end{pmatrix} \begin{pmatrix}b\\ \boldsymbol \alpha\end{pmatrix}=\begin{pmatrix}0\\ \boldsymbol y \end{pmatrix}, \end{equation} where $\boldsymbol 1=(1,\ldots,1)^{\mathrm{ T }}$, $I_{m}$ represents the $m\times m$ density matrix, $\gamma$ is a hyperparameter describing the ratio of the Lagrangian's components, $\boldsymbol \alpha=(\alpha_{1},\ldots,\alpha_{m})$ is the Lagrange multiplier, $\boldsymbol y=(y_{1},\ldots,y_{m})$ is the label of training set, $b$ is the offset of the hyperplane, and $K$ is an $m\times m$ kernel matrix. According to the Eq. (\ref{1}), for query data $\boldsymbol x\in \mathds{R}^{n}$, the classifier can be determined by the following function: \begin{equation}\label{2} f(x)=sign(\sum_{k=1}^{m}\alpha_{k}\boldsymbol x_{k}^{\mathrm{ T }}\boldsymbol x+b). \end{equation} In the quantum case, Eq. (\ref{1}) can be compactly rewritten as $F|b, \boldsymbol \alpha\rangle=|0, \boldsymbol y\rangle$. When the matrix $F$ is well conditioned and has sparsity polylogarithmic in the dimension, the quantum state $|b,\boldsymbol \alpha\rangle=F^{-1}|0, \boldsymbol y\rangle$ is produced by using the HHL algorithm \cite{Aram2009Quantum}. The desired LS-QSVM parameters can be represented as follows: \begin{equation}\label{3} |b,\boldsymbol \alpha\rangle=\frac{1}{\sqrt{C}}(b|0\rangle+\sum_{i=1}^{M}\alpha_{k}|k\rangle), \end{equation} where $C=b^{2}+\sum_{k=1}^{M}\alpha_{k}^{2}$. For a classification task, the training data oracle is constructed as follows: \begin{equation}\label{11} |\tilde{\boldsymbol\mu}\rangle=\frac{1}{\sqrt{N_{\tilde{\boldsymbol\mu}}}}(b|0\rangle|0\rangle+ \sum_{k=1}^{m}\alpha_{k}|\boldsymbol x_{k}||k\rangle|\boldsymbol x_{k}\rangle), \end{equation} with $N_{\tilde{\mu}}=b^{2}+\sum_{k=1}^{m}\alpha_{k}^{2}|\boldsymbol x_{k}|^{2}$. In addition, the query state is constructed as follows: \begin{equation}\label{12} |\tilde{\boldsymbol z}\rangle=\frac{1}{\sqrt{N_{\tilde{\boldsymbol z}}}}(|0\rangle|0\rangle+\sum_{k=1}^{m}|\boldsymbol x||k\rangle|\boldsymbol x\rangle|0\rangle), \end{equation} with $N_{\tilde{\boldsymbol z}}=m|\boldsymbol x|^{2}+1$. For the physical implementation of the inner product, Swap Test can be utilized to project the ancillary qubit in state $1/\sqrt{2}(|0\rangle|\tilde{\boldsymbol\mu}\rangle+|1\rangle|\tilde{\boldsymbol z}\rangle)$ to state $1/\sqrt{2}(|0\rangle-|1\rangle)$. Then the success probability is $P=1/2(1-\langle\tilde{\boldsymbol\mu}|\tilde{\boldsymbol z}\rangle)$, which can be used to determine the label of $|\boldsymbol x\rangle$ according to the sign of $1/2-P$. However, LS-QSVM only supports the binary classification. Bishwas $et~al.$ handled the quantum multiclass classification problem by using all-pair technique \cite{Bishwas2018An} and one-against-all approach \cite{Bishwas2016Big}, respectively. Hou $et~al.$ \cite{Hou2020Quantum} presented semi-supervised SVM, which exhibited a quadratic speed-up over classical algorithm. Feng $et~al.$ \cite{Feng2019Quantum} present an improved QSVM model, exponentially improving the dependence on precision while keeping essentially the same dependence on other parameters. Besides, the SVM with the quantum kernel algorithm was also proposed to solve classification problems \cite{Bishwas2020Gaussian,Schuld2018Quantum}. \subsection{Quantum amplitude estimation} Quantum amplitude estimation (QAE) algorithm \cite{Brassard2000Quantum} is a fundamental quantum algorithm that allows a quantum computer to estimate the amplitude $U|0\rangle$ for a quantum circuit $U$. QAE \cite{Brassard2000Quantum, Vazquez2021Efficient, Grinko2021Iterative} has attracted much attention as a fundamental subroutine of a wide range of application-oriented quantum algorithms, such as the Monte Carlo integration \cite{Montanaro2015Quantum, Rebentrost2018Quantum, Miyamoto2020Reduction} and machine learning tasks \cite{Miyahara2019Quantum, Kerenidis2019q}. QAE was first introduced by Brassard $et~al.$ \cite{Brassard2000Quantum}, in which a unitary operator $\mathcal{A}_{1}$ acts on an initial state $|0\rangle^{\otimes p}$ and an ancillary $|0\rangle$ such that: \begin{equation}\label{13} \mathcal{A}_{1}|0\rangle^{\otimes p}|0\rangle=\sqrt{a_{1}}|\psi_{1}\rangle|1\rangle+\sqrt{1-a_{1}}|\psi_{0}\rangle|0\rangle. \end{equation} Let $|\varphi_{1}\rangle=\sqrt{a_{1}}|\psi_{1}\rangle|1\rangle$, $|\varphi_{0}\rangle=\sqrt{1-a_{1}}|\psi_{0}\rangle|0\rangle$, and $|\Phi\rangle=|0\rangle^{\otimes p}|0\rangle$. Then \begin{equation}\label{14} |\varphi\rangle=\mathcal{A}_{1}|\Phi\rangle=|\varphi_{1}\rangle+|\varphi_{0}\rangle. \end{equation} Following \cite{Brassard2000Quantum}, $|\varphi_{1}\rangle$ and $|\varphi_{0}\rangle$ are respectively the $p$-qubit normalized good and bad states. QAE is to estimate the probability that the measuring $|\varphi\rangle$ yields a good state, i.e., $a_{1}=\langle\varphi_{1}|\varphi_{1}\rangle$. The probability to measure the good state can be amplified by applying the following unitary operator: \begin{equation}\label{15} \mathcal{Q}_{1}=-\mathcal{A}_{1}\mathcal{S}_{0}\mathcal{A}_{1}^{-1}\mathcal{S}_{\varphi_{1}}, \end{equation} where $\mathcal{A}_{1}^{-1}$ is the inverse of $\mathcal{A}_{1}$, $\mathcal{S}_{0}=I-2|0\rangle\langle0|$, and $\mathcal{S}_{\varphi_{1}}=I-2|\varphi_{1}\rangle\langle\varphi_{1}|$. In QAE, it uses $l$ ancillary qubits initialized in equal superposition to represent the final result. Then it applies the operator $\mathcal{Q}_{1}$ controlled by the ancillary qubits. Eventually, it performs an inverse quantum fourier transformation ($FT^{+}$) on the ancillary qubits before they are measured. Subsequently, the measured integer $y\in{0,\ldots, L-1}$ ($L=2^{l}$) is mapped to an angle $\tilde{\theta}_{a_{1}}=y\pi/L$. Thereafter, the resulting estimate of $a_{1}$ is defined as $\tilde{a}_{1}=sin^{2}\tilde{\theta}_{a_{1}}$. \section{The generalized quantum amplitude estimation}\label{S3} In QAE \cite{Brassard2000Quantum}, it is assumed that the initial state of the interest problem is $|0\rangle^{\otimes p}$. In this section, we describe another method for the case where the initial state is arbitrary state $|\Phi\rangle$. Our method is the generalization of the quantum amplitude estimation (GQAE). Let $\mathcal{A}$ be a quantum algorithm that acts on the initial state $|\Phi_{1}\rangle$ and an ancillary qubit $|0\rangle$ such that: \begin{equation}\label{15.9} \mathcal{A}|\Phi_{1}\rangle|0\rangle=|\Psi_{1}\rangle+|\Psi_{0}\rangle. \end{equation} Let $|\Phi\rangle=|\Phi_{1}\rangle|0\rangle$, we have \begin{equation}\label{16} |\Psi\rangle=\mathcal{A}|\Phi\rangle=|\Psi_{1}\rangle+|\Psi_{0}\rangle. \end{equation} We will present how to estimate the probability $a$ when measuring $|\Psi\rangle$ produces a good state, where $a=\langle\Psi_{1}|\Psi_{1}\rangle$. The amplification amplitude operation Eq. (\ref{17}) is hereby important for estimation. \begin{equation}\label{17} \mathcal{Q}=-\mathcal{A}\mathcal{S}_{\Phi}\mathcal{A}^{-1}\mathcal{S}_{\chi}, \end{equation} where $\mathcal{S}_{\chi}=I-2|\Psi_{1}\rangle\langle\Psi_{1}|$ conditionally changes the sign of the amplitudes of the good states, \begin{equation}\label{18} |x\rangle\longmapsto \begin{cases} -|x\rangle& \text{ if } x\text{~is~good~state}, \\ |x\rangle& \text{ if } x \text{~is~bad~state}, \end{cases} \end{equation} and the operator $\mathcal{S}_{\Phi}=I-2|\Phi\rangle\langle\Phi|$ changes the sign of the amplitude if and only if the state is the initial state $|\Phi\rangle$. In particular, it is the QAE proposed in \cite{Brassard2000Quantum}, if $|\Phi\rangle=|0\rangle^{\otimes s}|0\rangle$, where $s$ is the number of qubits used to represent $|\Phi_{1}\rangle$. We will show that the quantum state $|\Psi\rangle$ can still be written as a linear combination of $|\Psi_{1}\rangle$ and $|\Psi_{0}\rangle$ after performing $j$ times of operator $\mathcal{Q}$. \begin{lemma}\label{L3.1} We have that \begin{eqnarray}\label{19} \mathcal{Q}|\Psi_{1}\rangle &=& (1-2a)|\Psi_{1}\rangle-2a|\Psi_{0}\rangle, \\ \mathcal{Q}|\Psi_{0}\rangle &=& 2(1-a)|\Psi_{1}\rangle+(1-2a)|\Psi_{0}\rangle, \end{eqnarray} where $a=\langle\Psi_{1}|\Psi_{1}\rangle$. \end{lemma} \begin{proof} First consider the action of the operator $\mathcal{Q}$ on the vector $|\Psi_{1}\rangle $: \begin{eqnarray}\label{20} \nonumber &&\mathcal{Q}|\Psi_{1}\rangle\\ \nonumber &=& -\mathcal{A}\mathcal{S}_{\Phi}\mathcal{A}^{-1}\mathcal{S}_{\chi} |\Psi_{1}\rangle \\ \nonumber &=& \mathcal{A}\mathcal{S}_{\Phi}\mathcal{A}^{-1}|\Psi_{1}\rangle \\ \nonumber &=&\mathcal{A}(I-2|\Phi\rangle\langle\Phi|)(|\Phi\rangle-\mathcal{A}^{-1}|\Psi_{0}\rangle)\\ \nonumber &=&-\mathcal{A}|\Phi\rangle-\mathcal{A}\mathcal{A}^{-1}|\Psi_{0}\rangle+2\mathcal{A}|\Phi\rangle\langle\Phi|\mathcal{A}^{-1}|\Psi_{0}\rangle\\ \nonumber&=&-(|\Psi_{1}\rangle+|\Psi_{0}\rangle)-|\Psi_{0}\rangle+2 (|\Psi_{1}\rangle+|\Psi_{0}\rangle) [(\langle\Psi_{1}|\\ \nonumber &&+\langle\Psi_{0}|) |\Psi_{0}\rangle]\\ \nonumber&=&-|\Psi_{1}\rangle-2|\Psi_{0}\rangle+2(1-a) |\Psi_{1}\rangle+2(1-a)|\Psi_{0}\rangle\\ &=&(1-2a)|\Psi_{1}\rangle-2a|\Psi_{0}\rangle, \end{eqnarray} where $a=\langle\Psi_{1}|\Psi_{1}\rangle$. Next consider the action of the operator $\mathcal{Q}$ on the vector $|\Psi_{0}\rangle $: \begin{eqnarray}\label{21} \nonumber &&\mathcal{Q}|\Psi_{0}\rangle\\ \nonumber &=&-\mathcal{A}\mathcal{S}_{\Phi}\mathcal{A}^{-1}\mathcal{S}_{\chi}|\Psi_{0}\rangle \\ \nonumber &=&-\mathcal{A}\mathcal{S}_{\Phi}\mathcal{A}^{-1}|\Psi_{0}\rangle \\ \nonumber&=&-\mathcal{A}(I-2|\Phi\rangle\langle\Phi|)(|\Phi\rangle-\mathcal{A}^{-1}|\Psi_{1}\rangle)\\ \nonumber&=&\mathcal{A}|\Phi\rangle+\mathcal{A}\mathcal{A}^{-1}|\Psi_{1}\rangle-2\mathcal{A}|\Phi\rangle\langle\Phi|\mathcal{A}^{-1}|\Psi_{1}\rangle\\ &=&2(1-a)|\Psi_{1}\rangle+(1-2a)|\Psi_{0}\rangle. \end{eqnarray} \end{proof} Following the previous study \cite{Brassard2000Quantum}, the subspace $\mathcal{H}_{\Psi}$ has an orthonormal basis consisting of two eigenvectors of $\mathcal{Q}$ according to Theorem \ref{The1}. \begin{theorem} \label{The1} For any initial quantum state $|\Phi\rangle$, $|\Psi\rangle=\mathcal{A}|\Phi\rangle$ can be expressed as follows: \begin{equation}\label{22} |\Psi\rangle=\frac{-i}{\sqrt{2}}(e^{i\theta_{a}}|\Psi_{+}\rangle- e^{-i\theta_{a}}|\Psi_{-}\rangle), \end{equation} where $|\Psi_{\pm}\rangle=\frac{1}{\sqrt{2}}(\frac{1}{\sqrt{a}} |\Psi_{1}\rangle\pm\frac{i}{\sqrt{1-a}}|\Psi_{0}\rangle)$ represent two eigenvectors of $\mathcal{Q}$. The corresponding eigenvalues are $\lambda_{\pm}=e^{\pm i2\theta_{a}}$, where $i=\sqrt{-1}$ denotes the principal square root of $-1$, and the angle $\theta_{a}$ is defined so that $sin^{2}(\theta_{a})=a$, and $0\leq\theta_{a}\leq\pi/2$. \end{theorem} \begin{proof} The angle $\theta_{a}$ is defined so that $sin^{2}(\theta_{a})=a$. At first, we prove that $|\Psi\rangle$ can be represented as the linear combination of $|\Psi_{+}\rangle$ and $|\Psi_{-}\rangle$: \begin{eqnarray} \nonumber && |\Psi\rangle \\ \nonumber&=& |\Psi_{1}\rangle+|\Psi_{0}\rangle\\ \nonumber &=& \frac{2\sqrt{a}}{2\sqrt{a}}|\Psi_{1}\rangle+\frac{2\sqrt{1-a}}{2\sqrt{1-a}}|\Psi_{0}\rangle\\ \nonumber &=& \frac{-i}{2\sqrt{a}}(2isin\theta_{a})|\Psi_{1}\rangle+ \frac{1}{2\sqrt{1-a}}(2cos\theta_{a})|\Psi_{0}\rangle\\ \nonumber &=& \frac{-i}{2\sqrt{a}}(cos\theta_{a}+isin\theta_{a}-cos\theta_{a}+isin\theta_{a}) |\Psi_{1}\rangle\\ \nonumber&&+\frac{1}{2\sqrt{1-a}}(cos\theta_{a}+isin\theta_{a}+cos\theta_{a}-isin\theta_{a})|\Psi_{1}\rangle\\ \nonumber&=&\frac{-i}{\sqrt{2}}[(cos\theta_{a}+isin\theta_{a})(\frac{1}{\sqrt{2a}}|\Psi_{1}\rangle+ \frac{i}{\sqrt{2(1-a)}}|\Psi_{0}\rangle)\\ \nonumber&&-(cos\theta_{a}-isin\theta_{a})(\frac{1}{\sqrt{2a}}|\Psi_{1}\rangle- \frac{i}{\sqrt{2(1-a)}}|\Psi_{0}\rangle)]. \end{eqnarray} Let $|\Psi_{\pm}\rangle=\frac{1}{\sqrt{2}}(\frac{1}{\sqrt{a}} |\Psi_{1}\rangle\pm\frac{i}{\sqrt{1-a}}|\Psi_{0}\rangle)$, we have \begin{equation} |\Psi\rangle =\frac{-i}{\sqrt{2}}(e^{i\theta_{a}}|\Psi_{+}\rangle- e^{-i\theta_{a}}|\Psi_{-}\rangle). \end{equation} Next, we prove that $|\Psi_{+}\rangle$ is the eigenvector of $\mathcal{Q}$. The corresponding eigenvalues are $\lambda_{+}=e^{i2\theta_{a}}$. \begin{eqnarray}\label{23} \nonumber &&\mathcal{Q}|\Psi_{+}\rangle\\ \nonumber&=&\mathcal{Q} (\frac{1}{\sqrt{2a}}|\Psi_{1}\rangle+ \frac{i}{\sqrt{2(1-a)}}|\Psi_{0}\rangle)\\ \nonumber &=& \frac{1}{\sqrt{2a}}\mathcal{Q} |\Psi_{1}\rangle+\frac{i}{\sqrt{2(1-a)}}\mathcal{Q} |\Psi_{0}\rangle\\ \nonumber &=&[\frac{1-2a}{\sqrt{2a}}+\frac{2i(1-a)}{\sqrt{2(1-a)}}] |\Psi_{1}\rangle+[\frac{-2a}{\sqrt{2a}}+\frac{i(1-2a)}{\sqrt{2(1-a)}}] |\Psi_{0}\rangle\\ \nonumber \nonumber&=&(1-2a+2i\sqrt{a}\sqrt{1-a})(\frac{1}{\sqrt{2a}}|\Psi_{1}\rangle+\frac{i}{\sqrt{2(1-a)}}|\Psi_{0}\rangle)\\ &=&e^{i2\theta_{a}}|\Psi_{+}\rangle, \end{eqnarray} where the third equal sign is based on Lemma \ref{L3.1}. The same method can be used to prove that $\mathcal{Q}|\Psi_{0}\rangle=e^{-i2\theta_{a}}|\Psi_{1}\rangle$. The result is confirmed. \end{proof} After $j$ applications of operator $\mathcal{Q}$, the state is \begin{equation}\label{24} \mathcal{Q}^{j}|\Psi\rangle=\frac{-i}{\sqrt{2}}(e^{(2j+1)i\theta_{a}}|\Psi_{+}\rangle- e^{-(2j+1)i\theta_{a}}|\Psi_{-}\rangle). \end{equation} We can use the same method as introduced in \cite{Brassard2000Quantum} to estimate $a$ (Algorithm 1 in Table \ref{tab1}). The resulting estimate of $a$ is defined as $\tilde{a}$. The quantum circuit to implement this algorithm is depicted in Fig. \ref{fig3}. \begin{table} \caption{The generalized quantum amplitude estimation} \setlength{\tabcolsep}{0.1pt} \begin{tabular}{p{240pt}} \hline $\mathbf{Algorithm~1:}$ The generalized quantum amplitude estimation\\ \hline $\mathbf{1}$. Prepare the quantum state $|0\rangle^{\otimes h}|\Phi_{1}\rangle|0\rangle$.\\ $\mathbf{2}$. Apply Hadamard to the first register $|0\rangle^{\bigotimes h}$, where $h$ is an integer that relates to the precision.\\ $\mathbf{3}$. Apply $\mathcal{A}$ to the second register $|\Phi_{1}\rangle|0\rangle$.\\ $\mathbf{4}$. Apply $\mathcal{Q}^{2^{j}}$ to the second register controlled by the first register.\\ $\mathbf{5}$. Apply Fourier transformation in inverse to the first register.\\ $\mathbf{6}$. Measure the first register and denote the outcome $|y\rangle$.\\ $\mathbf{7}$. Output $\tilde{a}=sin^{2}(\pi\frac{y}{2^{h}})$. \\ \hline \end{tabular} \label{tab1} \end{table} \begin{figure} \caption{The quantum circuit of the generalized quantum amplitude estimation} \label{fig3} \end{figure} \section{The quantum support vector machine based on GQAE}\label{S4} After introducing the tool we cast our gaze over the QSVM based on the GQAE (AE-QSVM), striving to address the problem arising in training and classification. At first, the training progress of the QSVM is realized based on quantum singular value decomposition. Then a query state was classified using GQAE to calculate the inner product. \subsection{Quantum support vector machine training}\label{S4.1} In LS-QSVM, the input matrix $F$ is not expected to be sparse and well structured, thus restricting the applications of HHL algorithm. An important method for solving linear equations is singular value decomposition. This paper use the quantum singular value decomposition to train AE-QSVM \cite{Kerenidis2020Quantum}. Singular value is equal to eigenvalue for the square matrix $F$. Therefore, the singular value decomposition of matrix $F$ is written as $F=\sum_{i}\lambda_{i}\nu_{i}\nu_{i}^{\mathrm{ T }}$, where $\lambda_{i}\geq0$ are the eigenvalues and $\nu_{i}$ are the corresponding eigenvectors. If the matrix $F$ is singular, the solution $F^{-1}|0, \boldsymbol y\rangle$ can be achieved for the Moore-Penrose pseudoinverse, that is, the nonzero eigenvalues are only inverted. As in Algorithm $2$ (Table \ref{tab2}), the quantum linear systems $F|b, \boldsymbol \alpha\rangle=|0, \boldsymbol y\rangle$ is solved by using the singular value decomposition algorithm \cite{Kerenidis2020Quantum}. We obtain the parameters of AE-QSVM: \begin{equation}\label{24.9} |b,\boldsymbol\alpha\rangle=\sum_{i}\beta_{i}\frac{1}{\bar{\lambda}_{i}}|\nu_{i}\rangle. \end{equation} In the basis of training set labels, the expansion coefficients of the ultimate state are the desired support vector machine parameters: \begin{equation}\label{25} |b,\boldsymbol\alpha\rangle=\frac{1}{\sqrt{C}}(b|0\rangle+\sum_{i=1}^{M}\alpha_{k}|k\rangle), \end{equation} where $C=b^{2}+\sum_{k=1}^{M}\alpha_{k}^{2}$. \begin{table} \caption{Solve the quantum linear systems by using the improved singular value decomposition algorithm} \label{table} \setlength{\tabcolsep}{3pt} \begin{tabular}{p{240pt}} \hline $\mathbf{Algorithm~2:}$ Solve the quantum linear systems using the improved singular value decomposition algorithm \cite{Kerenidis2020Quantum} \\ \hline Require: Matrix $F$ stored in the data structure, such that eigenvalues of $F$ lie in $[1/\kappa,1]$. Input state $|0,y\rangle =\sum_{i}\beta_{i}|\nu_{i}\rangle$ .\\ $\mathbf{1}$. Perform singular value decomposition with precision $\varepsilon_{1}$ for $F$ on $|0,y\rangle$ to obtain $\sum_{i}\beta_{i}|\nu_{i}\rangle|\bar{\lambda}_{i}\rangle$ . \\ $\mathbf{2}$. Perform a conditional rotation and uncompute the SVE register to obtain the state:$\sum_{i}\beta_{i}|\nu_{i}\rangle (\frac{1}{\bar{\lambda}_{i}}|0\rangle+\gamma|1\rangle)$.\\ $\mathbf{3}$. Perform the amplitude amplification to obtain $|b,\alpha\rangle=\sum_{i}\beta_{i}\frac{1}{\bar{\lambda}_{i}}|\nu_{i}\rangle$. \\ \hline \end{tabular} \label{tab2} \end{table} \subsection{Classification based on generalized quantum amplitude estimation} We have implement the training procedure of quantum support vector machine and would like to classify a query state $|\boldsymbol x\rangle$. Following Ref. \cite{Rebentrost2014Quantum}, the training data oracle is constructed as follows by adding a register to the state $|b,\boldsymbol\alpha\rangle$: \begin{equation}\label{26} |\tilde{\boldsymbol\mu}\rangle=\frac{1}{\sqrt{N_{\tilde{\boldsymbol\mu}}}}( b|0\rangle|0\rangle+\sum_{k=1}^{M}\alpha_{k}|\boldsymbol x_{k}||k\rangle|\boldsymbol x_{k}\rangle), \end{equation} with $N_{\tilde{\mu}}=b^{2}+\sum_{k=1}^{M}\alpha_{k}^{2}|\boldsymbol x_{k}|^{2}$. The query state is constructed: \begin{equation}\label{27} |\tilde{\boldsymbol x}\rangle=\frac{1}{\sqrt{N_{\tilde{\boldsymbol z}}}}(|0\rangle|0\rangle+\sum_{k=1}^{M}|\boldsymbol x||k\rangle|\boldsymbol x\rangle), \end{equation} with $N_{\tilde{\boldsymbol z}}=M|\boldsymbol x|^{2}$. For the classification, we perform a quantum inner estimation for $|\tilde{\boldsymbol\mu}\rangle$ and $|\tilde{\boldsymbol x}\rangle$ based on GQAE for the following reason: \begin{equation}\label{28} \langle\tilde{\boldsymbol\mu}|\tilde{\boldsymbol x}\rangle=\frac{1}{\sqrt{N_{\tilde{\boldsymbol\mu}}}\sqrt{N_{\tilde{\boldsymbol z}}}}(b+\sum_{k=1}^{M}\alpha_{k}|\boldsymbol x_{k}||\boldsymbol x|\langle\boldsymbol x_{k}|\boldsymbol x\rangle). \end{equation} Following the previous studies \cite{Lin2020Quantum,Li2019Quantum,Hou2020Quantum}, two ancillary qubits can be used to construct entangled states \begin{equation}\label{29} |\phi_{0}\rangle=\frac{1}{\sqrt{2}}(|0\rangle|\tilde{\boldsymbol\mu}\rangle-|1\rangle|\tilde{\boldsymbol x}\rangle)|0\rangle. \end{equation} Firstly, a Hadamard gate is acted on the first register (denoted as $H_{1}$) to obtain the following state: \begin{eqnarray}\label{30} \nonumber &&|\phi_{1}\rangle\\ \nonumber &=& H_{1}|\phi_{0}\rangle\\ \nonumber&=& \frac{1}{\sqrt{2}}(\frac{|0\rangle+|1\rangle}{\sqrt{2}}|\tilde{\boldsymbol\mu}\rangle- \frac{|0\rangle-|1\rangle}{\sqrt{2}}|\tilde{\boldsymbol x}\rangle)|0\rangle \\ &=& \frac{1}{2}(|0\rangle|\tilde{\boldsymbol\mu}\rangle|0\rangle+|1\rangle|\tilde{\boldsymbol\mu}\rangle|0\rangle -|0\rangle|\tilde{\boldsymbol x}\rangle|0\rangle+|1\rangle|\tilde{\boldsymbol x}\rangle|0\rangle). \end{eqnarray} Then, the Pauli-$X$ gate is applied to the third register controlled by the first register with $|0\rangle$ (denoted as $C\-/X$). That is, $|0\rangle|i\rangle|j\rangle\rightarrow|0\rangle|i\rangle|j\oplus1\rangle$ and $|1\rangle|i\rangle|j\rangle\rightarrow|1\rangle|i\rangle|j\rangle$, where $\oplus$ is modulo $2$ addition. \begin{eqnarray}\label{31} \nonumber &&|\phi_{2}\rangle\\ \nonumber &=&(C\-/X)|\phi_{1}\rangle\\ \nonumber &=&\frac{1}{2}(|0\rangle|\tilde{\boldsymbol\mu}\rangle|1\rangle+|1\rangle|\tilde{\boldsymbol\mu}\rangle|0\rangle -|0\rangle|\tilde{\boldsymbol x}\rangle|1\rangle+|1\rangle|\tilde{\boldsymbol x}\rangle|0\rangle)\\ &=& \frac{1}{2}(|0\rangle|\tilde{\boldsymbol\mu}\rangle-|0\rangle|\tilde{\boldsymbol x}\rangle)|1\rangle+ \frac{1}{2}(|1\rangle|\tilde{\boldsymbol\mu}\rangle+|1\rangle|\tilde{\boldsymbol x}\rangle)|0\rangle. \end{eqnarray} Let $\mathcal{A}_{1}=(H_{1})(C\-/X)$ ,$\psi_{1}=\frac{1}{2}(|0\rangle|\tilde{\boldsymbol\mu}\rangle-|0\rangle|\tilde{\boldsymbol x}\rangle)|1\rangle$, $\psi_{0}=\frac{1}{2}(|1\rangle|\tilde{\boldsymbol\mu}\rangle+|1\rangle|\tilde{\boldsymbol x}\rangle)|0\rangle$. The problem of interest is given by an operator $\mathcal{A}_{1}$ acting on $|\phi_{0}\rangle$ such that \begin{equation}\label{32} \mathcal{A}_{1} : |\phi_{0}\rangle\longrightarrow |\psi_{1}\rangle+|\psi_{0}\rangle. \end{equation} Then we can use GQAE to calculate $\langle\psi_{1}|\psi_{1}\rangle$. Therefore, the inner product of $|\tilde{\boldsymbol\mu}\rangle$ and $|\tilde{\boldsymbol x}\rangle$ can be obtained for the following equation: \begin{equation}\label{33} \langle\psi_{1}|\psi_{1}\rangle=\frac{1}{2}(1-\langle \tilde{\boldsymbol\mu}|\tilde{\boldsymbol x}\rangle). \end{equation} When we estimate that the inner product of $\langle\psi_{1}|\psi_{1}\rangle$ is greater than $\frac{1}{2}$ through GQAE, $|\boldsymbol x\rangle$ belongs to the $+1$; otherwise, $-1$. Obviously, AE-QSVM is executed one time to obtain the classification result with an accuracy $\varepsilon$ when using GQAE to classify new samples, where $\varepsilon$ depends on the number of auxiliary qubits in the GQAE algorithm. The following will compare the algorithms LS-QSVM and AE-QSVM in terms of qubit consumption and complexity. \subsection{Space complexity analysis of AE-QSVM } The space complexity of AE-QSVM is analyzed in this subsection. That is we want to analyzed the qubit consumption. For AE-QSVM, the qubit consumptions in training process is $3+\lceil log(m+1)\rceil+k+\lceil log(2+\frac{1}{2\varepsilon})\rceil+1$, where $k$ represents that the eigenvalues of $F$ are approximated to an accuracy of $2^{-k}$ \cite{Nielsen2000Quantum, Kerenidis2020Quantum}. Estimating the inner product in AE-QSVM, $h$ ancillary qubit is needed to have an accuracy of $\varepsilon$ \cite{Brassard2000Quantum}. \begin{equation}\label{33.2} \varepsilon=\frac{2\pi\sqrt{a(1-a)}}{H}+\frac{\pi^{2}}{H^{2}}, \end{equation} where $H=2^{h}$. Solving Eq. (\ref{33.2}) and considering $H>0$, we have \begin{equation}\label{33.3} H=\frac{\pi(\sqrt{a(1-a)}+\sqrt{a(1-a)+\varepsilon})}{\varepsilon}. \end{equation} Therefore, \begin{equation}\label{33.3} h\leq\lceil log\frac{\pi+\sqrt{3}\pi}{\varepsilon}\rceil. \end{equation} Consequently, the qubit consumption of AE-QSVM is \begin{equation}\label{33.4} 3+\lceil log(m+1)\rceil+\lceil log\varepsilon\rceil+\lceil log(2+\frac{1}{2\varepsilon})\rceil+1+\lceil log\frac{\pi+\sqrt{3}\pi}{\varepsilon}\rceil. \end{equation} Besides, for LS-QSVM, the qubit consumption in training process is also $3+\lceil log(m+1)\rceil+k+\lceil log(2+\frac{1}{2\varepsilon})\rceil+1$. We set the accuracy all to $\varepsilon$ in this paper; thus the qubit consumption of LS-QSVM is \begin{equation}\label{33.1} T\times(3+\lceil log(m+1)\rceil+\lceil log\varepsilon\rceil+\lceil log(2+\frac{1}{2\varepsilon})\rceil+1), \end{equation} where $T$ represents the number of the iterations. Let $Q=3+\lceil log(m+1)\rceil+\lceil log\varepsilon\rceil+\lceil log(2+\frac{1}{2\varepsilon})\rceil+1$, the space complexity of LS-QSVM and AE-QSVM respectively are $Q+\lceil log\frac{\pi+\sqrt{3}\pi}{\varepsilon}\rceil$ and $TQ$. That is to say, LS-QSVM have a higher space complexity when $T>1+\frac{\lceil log\frac{\pi+\sqrt{3}\pi}{\varepsilon}\rceil}{Q}$ compared with AE-QSVM. \subsection{Time complexity analysis of AE-QSVM } We now analyze the time complexity for building AE-QSVM. In the stage of the training process, the time is dominated by the quantum singular value decomposition \cite{Kerenidis2020Quantum}. We set $\kappa$ as the condition number (the largest eigenvalue divided by the smallest eigenvalue), and only normalized eigenvalues $\lambda_{j}$ in the interval $1/\kappa\leq|\lambda_{j}|\leq1$ are taken into account. The kernel matrix $F^{0}$ with time $O(log(mn))$ \cite{Rebentrost2014Quantum} is prepared. The running time of compute $e^{iF^{0}t_{0}}$ is $O(t_{0}^{2}\varepsilon^{-1})$, where $t_{0}$ is evolution time and $\varepsilon$ is the accuracy. Therefore, the phase estimation costs time $O(t^{2}_{0}\varepsilon^{-1}log(mn))$ which presents the major time complexity. The probability of getting $\lambda_{j}^{-1}$ determines iteration times of quantum singular value decomposition, and $O(\kappa^{2})$ iterations are needed to ensure the high success probability. However, only $O(\kappa)$ repetitions are performed to obtain the same success probability by amplitude amplification; thus the time complexity is $O(\kappa t_{0}^{2}\varepsilon^{-1}log(mn))$. For $t_{0}=O(\kappa\varepsilon^{-1})$, the complexity can be written as $O(\kappa^{3}\varepsilon^{-3}log(mn))$. In the stage of classifying a new sample, the complexity is dominated by quantum amplitude estimation. The complexity of performing unitary operator $\mathcal{Q}$ is $O(1)$ \cite{Nielsen2000Quantum}; thus the complexity of classification is $O(\kappa^{3}\varepsilon^{-3})$. Putting all the time together, the time complexity of AE-QSVM is $O(\kappa^{3}\varepsilon^{-3}(log(mn)+1))$. \section{Simulation}\label{S5} The main work of this paper is to solve the restriction problem of parameter matrix and the large resource consumption caused by quantum collapse when implementing the Swap Test. Therefore, our experiment is divided into two parts. \subsection{The simulation about the parameter matrix} In this subsection, we give an example to illustrate the problem of the matrix consisted of an input sample. In the LS-QSVM, the parameter matrix $F$ is a positive definite matrix or a positive semidefinite matrix. When $F$ is a positive semidefinite matrix, the inverse does not exist. To illustrate the validity of the algorithm used in subsection \ref{S4.1}, consider the following matrix: \begin{equation}\label{34} F_{1}= \begin{pmatrix}5&-1&3\\ -1&5&-3\\3&-3&3\end{pmatrix}. \end{equation} The eigenvalues of the matrix $F_{1}$ is $\lambda=0, 4, 9$, and their corresponding eigenvectors is \begin{eqnarray} \nonumber \boldsymbol\nu_{1}&=&\begin{pmatrix} 881/2158& -881/2158&-881/1079\end{pmatrix}^{\mathrm{ T }},\\ \nonumber\boldsymbol\nu_{2}&=&\begin{pmatrix} 985/1393& 985/1393&0\end{pmatrix}^{\mathrm{ T }},\\ \nonumber\boldsymbol\nu_{3}&=&\begin{pmatrix} -780/1351& 780/1351&-780/1351\end{pmatrix}^{\mathrm{ T }}. \end{eqnarray} The matrix $F_{1}$ is singular since the first eigenvalue $\lambda=0$, i.e., the eigenvalue decomposition can not be used to solve the quantum linear systems $F_{1}x=b$. According to the analysis in subsection \ref{S4.1}, we invert only the nonzero eigenvalues when the matrix $F_{1}$ is singular. \subsection{The simulation about the quantum resource consumption} Experiments are performed for the complete algorithm in this subsection. In theory, the high complexity in terms of space and time of LS-QSVM are caused by the Swap Test. Therefore, we first conduct experiments on the IBM platform for Swap Test. Then LS-QSVM and AE-QSVM are compared in both space and time. At first, we conduct the experiment on the IBM qiskit platform. The quantum circuit of Swap Test for classification is shown in Fig. \ref{fig1}. The probability of getting $|1\rangle$ is $P$ when we measure the ancillary qubit. $P$ can be obtained to an accuracy $\varepsilon$ by iterating $O(P(1-P)/\varepsilon^{2})$ as shown in Ref. \cite{Rebentrost2014Quantum}. The number of iterations is identical for probability $P$ and $1-P$ when achieving the same accuracy because of $0<P<1$. Therefore, we let $P=0.1$, $P=0.3$, and $P=0.5$. It can be seen from Fig. \ref{fig4} that the number of iterations increases as the accuracy increases. Specifically, the number of iterations will sharply increase as the error increases when $0<\varepsilon<0.1$. \begin{figure} \caption{The number of iterations along error for Swap Test} \label{fig4} \end{figure} Then, the qubit consumption is presented based on the above result. Fig. \ref{fig4} shows that a large number of iterations are required when using LS-QSVM for classification. Besides, the quantum state of each iteration cannot be used for the next iteration. Therefore, a large number of qubits are needed to be consumed. AE-QSVM does not need iteration, and the number of qubits consumption is shown in Eq. (\ref{33.4}). It can be seen from Fig. \ref{fig4} that the number of iterations required is different for the different $P$. In this section, we take the average of three iterations for the same accuracy. Table \ref{tab3} shows the number of qubits consumed by AE-QSVM and LS-QSVM for different sample sizes ($m$). It can be seen from Table \ref{tab3} that the number of qubits to be consumed by both algorithms increases as $m$ increases. However, there are more qubits consumed by algorithm LS-QSVM than that of algorithm AE-QSVM. Especially, as the accuracy increases, the total amount of qubits consumed by algorithm LS-QSVM rapidly increases, while the total number of qubits consumed by algorithm AE-QSVM remains unchanged. \begin{table*} \centering \caption{LS-QSVM compared with AE-QSVM about the number of qubit consumption} \setlength{\tabcolsep}{5pt} \begin{tabular}{|c|c| c|c|c|c|c|c|c|} \hline \diagbox{Algorithm}{Qubits}{Accuracy} & &0.70 &0.80 &0.90 &0.93 &0.95 &0.97 &0.99\\ \hline ~~~~~~AE-QSVM &\multirow{2}* {$m$=10}&14& 14 & 14 & 14 &14 & 14 &14 \\ ~~~~~~LS-QSVM & ~ &84& 300& 1000&1934&4667 &15334 &30000 \\ \hline ~~~~~~AE-QSVM &\multirow{2}* {$m$=100}&17 & 17 & 17 & 17 &17 & 17 &17 \\ ~~~~~~LS-QSVM & ~ &109& 390& 1300&2513&6067 &19933 &39000 \\ \hline ~~~~~~AE-QSVM &\multirow{2}* {$m$=1000}&20 & 20 & 20 & 20 &20 & 20 &20 \\ ~~~~~~LS-QSVM & ~ &134& 480& 1600&3034&7467 &34534 &48000 \\ \hline ~~~~~~AE-QSVM & \multirow{2}* {$m$=10000}&24 & 24 & 24 & 24 &24 & 24 &24 \\ ~~~~~~LS-QSVM & ~ &167& 600& 2000&3867&9334 &30667 &60000 \\ \hline \end{tabular} \label{tab3} \end{table*} Finally, the complexity of AE-QSVM and LS-QSVM are compared. It can be seen from the above experiment that extensive iterations are needed to achieve a high degree of accuracy for LS-QSVM. Although the complexity of inner estimation is higher than that of the Swap Test, the measurement is not needed in AE-QSVM. Therefore, we will compare the complexity of the two algorithms. In order to compare LS-QSVM and AE-QSVM, we use expectation in mathematics to describe the complexity with an accuracy $\varepsilon$, i.e.,: \begin{eqnarray} \nonumber &&O(\int_{0}^{1}(\kappa^{3}\varepsilon^{-3}log(mn)+log(n))\frac{P(1-P)} {\varepsilon^{2}}PdP) \\ &=&O(\frac{\kappa^{3}\varepsilon^{-3}log(mn)+log(n)}{12\varepsilon^{2}}). \end{eqnarray} The complexity of AE-QSVM is \begin{equation}\label{38} O(\kappa^{3}\varepsilon^{-3}(log(mn)+1)) \end{equation} as analyzed in the above section. Experiments are designed based on the six datasets: \emph{Tic~tac~toe} ($m=958$, $n=9$), \emph{Haberman} ($m=306$, $n=3$), \emph{Ionosphere} ($m=351,n=34$), \emph{Heart statlog} ($m=270$, $n=13$), \emph{Liver disorders} ($m=345$, $n=6$), and \emph{Bupa} ($m=345,n=6$). We first calculated the condition numbers of the six datasets and then calculated the complexity of the two methods according to Eq. (\ref{13}) and Eq. (\ref{14}) respectively. It can be seen from Table \ref{tab4} that the complexity of AE-QLSM is significantly lower than that of LS-QSVM when the accuracy is above $0.2$, and the complexity of the two algorithms differs little when the accuracy is $0.3$. However, the algorithm is meaningless if the error is too large. Therefore, our algorithm is superior than the LS-QSVM algorithm when the error is in the acceptable range. \begin{sidewaystable} \centering \caption{LS-QSVM compared with AE-QSVM on six datasets} \label{tab4} \setlength{\tabcolsep}{5pt} \begin{tabular}{|c|c| c|c|c|c|c|c|c|} \hline \diagbox{Algorithm}{Complexity}{ Accuracy} & Dataset &0.99 &0.97 &0.95 &0.93 &0.90 &0.80 &0.70\\ \hline AE-QSVM &\multirow{2}* {\emph{Tic~tac~toe}} & $1.32\times10^{24}$ & $4.89\times10^{22}$ & $1.06\times10^{22}$ & $3.85\times10^{21}$ & $1.32\times10^{21}$ & $1.65\times10^{20}$ & $4.89\times10^{19}$ \\ LS-QSVM & & $9.91\times10^{28}$ & $1.36\times10^{26}$ & $6.34\times10^{24}$ & $8.43\times10^{23}$ & $9.91\times10^{22}$ & $1.55\times10^{21}$ & $1.36\times10^{20}$ \\ \hline AE-QSVM &\multirow{2}* {\emph{Haberman}} & $3.65\times10^{27}$ & $1.35\times10^{26}$& $2.92\times10^{25}$ & $1.06\times10^{25}$ & $3.65\times10^{24}$ & $4.56\times10^{23}$ & $1.35\times10^{23}$ \\ LS-QSVM & & $2.67\times10^{32}$ & $3.66\times10^{29}$ & $1.71\times10^{28}$ & $2.67\times10^{26}$ & $6.51\times10^{28}$ & $4.17\times10^{24}$ & $3.66\times10^{23}$ \\ \hline AE-QSVM &\multirow{2}* {\emph{Ionosphere}} & $1.46\times10^{20}$ & $5.40\times10^{18}$ & $1.17\times10^{18}$ & $4.25\times10^{17}$ & $1.46\times10^{17}$ & $1.82\times10^{16}$ & $5.40\times10^{15}$ \\ LS-QSVM & & $1.10\times10^{25}$ & $1.51\times10^{22}$ & $7.03\times10^{20}$ & $9.34\times10^{19}$ & $1.10\times10^{19}$ & $1.72\times10^{17}$ & $1.51\times10^{16}$ \\ \hline AE-QSVM & \multirow{2}* {\emph{Heart statlog}}& $6.39\times10^{31}$ & $2.37\times10^{30}$ & $5.11\times10^{29}$ & $1.86\times10^{29}$ & $6.38\times10^{28}$ & $7.97\times10^{27}$ & $2.36\times10^{27}$ \\ LS-QSVM & & $4.75\times10^{36}$ & $6.51\times10^{33}$ & $3.03\times10^{32}$ & $4.29\times10^{31}$ & $4.74\times10^{30}$ & $7.41\times10^{28}$ & $6.50\times10^{27}$ \\ \hline AE-QSVM & \multirow{2}* {\emph{Liver disorders}} & $1.45\times10^{32}$ & $5.39\times10^{30}$ & $1.16\times10^{30}$ & $4.24\times10^{29}$ & $1.45\times10^{29}$ & $1.82\times10^{28}$ & $5.39\times10^{27}$ \\ LS-QSVM & & $1.07\times10^{37}$ & $1.47\times10^{34}$& $6.88\times10^{32}$ & $9.13\times10^{31}$ & $1.07\times10^{31}$ & $1.68\times10^{29}$ & $1.47\times10^{28}$ \\ \hline AE-QSVM & \multirow{2}* {\emph{Bupa}} & $2.14\times10^{29}$ & $7.92\times10^{27}$ & $1.71\times10^{27}$ & $6.23\times10^{26}$ & $2.14\times10^{26}$ & $2.67\times10^{25}$ & $7.92\times10^{24}$ \\ LS-QSVM & & $1.58\times10^{34}$ & $2.17\times10^{31}$ & $1.01\times10^{30}$ & $1.34\times10^{29}$ & $1.58\times10^{28}$ & $2.47\times10^{26}$ & $2.17\times10^{25}$ \\ \hline \end{tabular} \end{sidewaystable} \section{Discussion and conclusion}\label{S6} The quantum support vector machine based on quantum amplitude estimation is introduced in this paper. At first, the quantum singular value decomposition was used to train AE-QSVM which excludes the constraints of sparsity and is well structured for input matrix. Then, we used the quantum amplitude estimation instead of Swap Test to realize the classification so that the support vector machine was not needed to be repetitively performed. In AE-QSVM, auxiliary qubits are used to transform the probability amplitude into basis state; thus high accuracy can be achieved by adding the number of auxiliary qubits. Therefore, the algorithm AE-QSVM essentially reflects the superiority of quantum algorithms. The results from this study can extended towards the following directions in future work: (1) AE-QSVM can be extended to the nonlinear classifier. One of the most powerful uses of support vector machines is to perform the nonlinear classification \cite{Cortes1995Suppor}. One can use our proposed algorithm RN-QSVM to find a nonlinear classifier based on the Gaussian kernel \cite{Smola2000Generalized}: \begin{equation}\label{12.1} (K(A,B))_{ij}=e^{-\delta\|A_{i}^{'}-B_{\cdot j}\|^{2}},~~~~~~~~~ i=1,\ldots, m, j=1,\ldots k \end{equation} where $A\in R^{m\times n}$ and $B\in R^{n\times l}$, $\delta$ is a positive constant, and the kernel $K(A,B)$ maps $R^{m\times n} \times R^{n\times l}$ into $R^{m\times l}$. A quantum version of the Gaussian kernel was proposed by Bishwas $et~al.$ \cite{Bishwas2020Gaussian}. The running time complexity of the quantum Gaussian kernel is significantly shorter compared with its classical version. (2) The inner estimation based on amplitude estimation has low complexity compared with the Swap Test. Therefore, this generalized amplitude estimation can be used in other machine learning methods, for instance, neural networks and clustering. \ifCLASSOPTIONcompsoc \section*{Acknowledgments} \else \section*{Acknowledgment} \fi This work was supported by the National Key Research and Development Program of China under Grant 2020YFB2103800 and the National Natural Science Foundation of China under Grants 61502016, 61672092. \ifCLASSOPTIONcaptionsoff \fi \begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{Zhang2.PNG}}]{Rui Zhang} was born in Minquan County, Henan Province, China in 1994. She received the M.S. degree in School of Mathematics and Statistics, Henan University, China, in 2020. She is currently pursuing the Ph.D. degree in the School of Computer and Information Technology, Beijing Jiaotong University, China. Her research interest includes quantum computing and machine learning. \end{IEEEbiography} \begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{Wangj2.PNG}}]{Jian Wang} received the Ph.D. degree in computer science from Beijing University of Posts and Telecommunications, Beijing, China, in 2008. Since 2008, he has been an associate professor with School of Computer and Information Technology, Beijing, China. His research interest includes quantum computing and quantum machine learning, data privacy. \end{IEEEbiography} \begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{Jiang3.PNG}}]{Nan Jiang} received the Ph.D. degree in computer science from the Beijing University of Posts and Telecommunications, China, in 2006. From 2015 to 2016, she was a Visiting Scholar with the College of Science, Purdue University, USA. She is currently an Professor of computer science with the Beijing University of Technology, and the Beijing Key Laboratory of Trusted Computing. Her research interests include quantum image processing, quantum machine learning and information hiding. \end{IEEEbiography} \begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{Wangz5.PNG}}]{Zichen Wang} was born in Xuzhou of Jiangsu Province. He graduated from Xuzhou University of Technology. Now studying in Beijing University of technology for a master's degree. The current research direction is quantum machine learning. \end{IEEEbiography} \end{document}
\begin{document} \title{Tunable Gyromagnetic Augmentation of Nuclear Spins in Diamond} \author{R. M. Goldblatt} \author{A. M. Martin} \author{A. A. Wood} \email{[email protected]} \affiliation{School of Physics, University of Melbourne, Parkville Victoria 3010, Australia} \date{\today} \begin{abstract} Nuclear spins in solids exhibit long coherence times due to the small nuclear gyromagnetic ratio. This weak environmental coupling comes at the expense of slow quantum gate operations, which should be as fast as possible for many applications in quantum information processing and sensing. In this work, we use nitrogen-vacancy (NV) centers in diamond to probe the nuclear spins within dark paramagnetic nitrogen defects (P1 centers) in the diamond lattice. The gyromagnetic ratio of the P1 nuclear spin is augmented by hyperfine coupling to the electron spin, resulting in greatly enhanced coupling to radiofrequency control fields. We then demonstrate that this effect can be tuned by variation of an external magnetic field. Our work identifies regimes in which we are able to implement fast quantum control of dark nuclear spins, and lays the foundations for further inquiry into rapid control of long-lived spin qubits at room temperature. \end{abstract} \maketitle Nuclear spins in solid-state substrates have attracted considerable interest for applications in quantum information processing \cite{fuchs_quantum_2011, pla_coherent_2014, hensen_silicon_2020, muhonen_storing_2014, degen_quantum_2017} and quantum sensing \cite{maclaurin_measurable_2012, ledbetter_gyroscopes_2012, ajoy_stable_2012, liu_nanoscale_2019, soshenko_nuclear_2021, jarmola_demonstration_2021}, as they exhibit long coherence times at room temperature \cite{kane_silicon-based_1998, dutt_quantum_2007, ladd_quantum_2010, maurer_room-temperature_2012, zhong_optically_2015}. However, reduced coupling to magnetic noise comes with concomitantly weaker coupling to control fields, meaning gate operations are slow and error-prone. When coupled to optically active electron spins, such as the NV center in diamond \cite{doherty_nitrogen-vacancy_2013, schirhagl_nitrogen-vacancy_2014, wu_diamond_2016}, nuclear spins can be dynamically polarized~\cite{jacques_dynamic_2009}, controlled and measured, even down to the single-spin level ~\cite{neumann_single-shot_2010}, via hyperfine coupling to the electron spin. Nuclear spins coupled to an electron spin via a spin-mixing hyperfine interaction exhibit an enhanced coupling to resonant driving fields \cite{smeltzer_robust_2009, sangtawesin_hyperfine-enhanced_2016, degen_entanglement_2021, chen_measurement_2015, sangtawesin_quantum_2016, goldman_optical_2020}. This gyromagnetic augmentation has been demonstrated for nearby $^{13}$C nuclei ~\cite{childress_coherent_2006} and the intrinsic $^{14}$N nuclear spin hosted within the NV defect, enabling fast manipulation of nuclear spins \cite{chen_measurement_2015, sangtawesin_hyperfine-enhanced_2016} while still preserving the longer coherence times relative to the electron spin \cite{jarmola_robust_2020, wood_quantum_2021}. Gyromagnetic augmentation offers a means of tuning a qubit between regimes of strong and weak interaction, while still retaining long coherence times. More recently, substitutional nitrogen atoms in diamond (P1 centers), composed of a nitrogen nucleus coupled to an electron spin, have been identified as a promising platform for quantum information processing \cite{laraoui_nitrogen-vacancy-assisted_2012, belthangady_dressed-state_2013, knowles_demonstration_2016}. The ability to control and entangle individual electron and nuclear spins of P1 centers has been demonstrated at cryogenic temperatures \cite{degen_entanglement_2021}. In NV quantum sensing, magnetic noise from the P1 spin bath limits the coherence of the NV itself, particularly in diamonds with high nitrogen density ~\cite{de_lange_controlling_2012, knowles_observing_2014, bauch_decoherence_2020}. Driving the P1 bath using resonant radiofrequency (rf) fields~\cite{de_lange_universal_2010, de_lange_controlling_2012, knowles_observing_2014, hansom_environment-assisted_2014, bauch_ultralong_2018} has been demonstrated to mitigate P1 dephasing and preserve NV coherence. Such continuous dynamical decoupling also requires detailed knowledge of the spin bath frequency spectrum, which changes significantly with magnetic field. In this work, we use an ensemble of NV centers in diamond to experimentally characterize the electronic augmentation of optically-dark P1 nuclear spins in diamond, and demonstrate rapid control of the nuclear spin state at room-temperature. We show that the augmentation of the nuclear gyromagnetic ratio is dependent on the external magnetic field strength, enabling fast quantum gate operations on the P1 nuclear spins at low magnetic field strength where spin mixing is significant, and explaining the vanishing of P1 nuclear spin effects at higher field. Detailed characterization of the P1 bath has implications for schemes using P1 qubits in quantum information processing and harnessing continuous dynamical decoupling to eliminate P1-induced NV dephasing. A schematic of our experiment is depicted in Figure \ref{exp1} and is described in greater detail in refs. \cite{wood_anisotropic_2021} and \footnote{See Supplementary Information.}. A type 1b, $\langle 111 \rangle$-cut diamond sample with a $1.1\%$ natural abundance of $^{13}$C and 1\,ppm N concentration is mounted on the spindle of an electric motor, which acts in this case as a precision rotation stage. Current-carrying coils are used to generate a variable magnetic field of up to $100$ G aligned along the $\hat{z}$-axis. Microwaves for NV driving and rf for P1 control are produced by wires with diameters $20\,\upmu$m and $50\upmu$m respectively, arranged in a cross as shown in Fig. \ref{exp1}(a). The required rf pulses are produced by an I/Q modulated vector signal generator \footnote{See Supplementary information}. We use sample where coherence times are dominated by $^{13}$C spins, instead of a nitrogen dominated sample, so that the coherence time of the NV ensemble is long enough to allow observation of slow (several microsecond) Rabi oscillations. The P1 center features an electron spin ($S$ = 1/2) associated with an unpaired electron, coupled to an $I$ = 1 $^{14}$N nucleus (99.6\% nat. abundance). The P1 center exhibits a static Jahn-Teller (JT) distortion \cite{davies_dynamic_1979, davies_jahn-teller_1981,ammerlaan_reorientation_1981}, which results in four possible crystallographic orientations for the P1 quantization axis due to an elongation of one of the N-C bonds, as shown in Fig. \ref{exp1}(c). The interaction Hamiltonian for a single P1 center is given by: \begin{align} \begin{split} \label{HP1} H_{P1} ={}& -\gamma_e \vec{\boldsymbol{B}} \cdot \vec{\boldsymbol{S}} \, - \, \gamma_N \vec{\boldsymbol{B}} \cdot \vec{\boldsymbol{I}} + \, A_{\parallel} S_z I_z \\ & + A_{\perp} (S_x I_x + S_y I_y) \, + \, Q I_z^2 \, , \end{split} \end{align} where $\gamma_e / 2\pi = -2.8$ MHz/G and $\gamma_N / 2\pi = 307.7$ Hz/G are the gyromagnetic ratios of the electron and of the $^{14}$N nuclear spin, respectively. In Eq. \ref{HP1}, the hyperfine interaction between the electron spin and the $^{14}$N nucleus has been separated into an axial coupling term, $A_{\parallel} / 2\pi = 114$ MHz, and a transverse component, $A_{\perp} / 2\pi = 81.34$ MHz. The nuclear quadrupole coupling term, with $Q / 2\pi = -4.2$ MHz, defines the zero-field splitting between the $m_I = \pm 1$ and $m_I = 0$ states. Also, $\vec{\boldsymbol{S}} = (S_x, S_y, S_z)$ and $\vec{\boldsymbol{I}} = (I_x, I_y, I_z)$ are the electron and nuclear spin operators of the P1 center, and $\vec{\boldsymbol{B}}$ is an applied magnetic field. We assume the magnetic field is aligned approximately along a particular NV crystallographic orientation class (and, therefore, one of the P1 orientation axes), which we define as the $\hat{z}$-axis. The magnetic field has therefore a polar angle $\theta$ to the $\hat{z}$-axis, such that $\theta = 0^{\circ}$ for the P1 orientation class parallel to the magnetic field, which will be referred to as `on-axis' and $\theta = 109.5^{\circ}$, for the other three degenerate P1 orientations, denoted `off-axis'. The energies for the six P1 spin states are calculated from the Hamiltonian in Eq. \ref{HP1} and plotted as a function of magnetic field strength in Fig. \ref{deers2}(a) for both the on-axis and off-axis orientation classes. In the high magnetic field limit, $m_S = -1/2, +1/2$ and $m_I = -1, 0, +1$ are good quantum numbers, and we label the eigenstates of $H_{P1}$ as $\ket{m_S,m_I}$. At lower magnetic fields, the eigenstates of the Hamiltonian are superpositions of electronic and nuclear spin states. To account for this state-mixing at low fields, we identify the asymptotic eigenstates in the high field limit and label the states $\ket{a} - \ket{f}$ in descending order as shown in Fig. \ref{deers2}(a). \begin{figure} \caption{\label{exp1} \label{exp1} \end{figure} We use double electron-electron resonance (DEER) spectroscopy \cite{larsen_double_1993} to characterize the dark spin bath surrounding the NV centers, as depicted in Fig. \ref{deers2} (b). A spin-echo pulse sequence decouples the NV ensemble from its quasistatic environment, with the free evolution time of the sequence fixed at a $^{13}$C-induced revival time in order to ensure signal visibility~\cite{childress_coherent_2006}. An rf $\pi$-pulse recouples resonant spins in the environment to the NV centers as we sweep the frequency of the rf field. The results from a DEER experiment at $B =$100\,G are shown at the bottom of Fig. \ref{deers2} (b). The expected frequencies for all possible P1 spin transitions, calculated from Eq. (\ref{HP1}), are also shown. We observe six spectral features, which are consistent with the electron spin transitions in the theoretical spectrum (i.e. $\Delta m_S = \pm 1$ and $\Delta m_I = 0$). For both theory and experiment, the deeper amplitudes of the resonant peaks correspond to the off-axis P1s, due to increased contrast resulting from the three degenerate orientations. The DEER measurement was then repeated at a magnetic field of 35\,G, as shown at the top of Fig. \ref{deers2} (b), in which we see the same electron spin transitions. We also see additional spectral features, with comparable amplitudes, corresponding to nuclear spin transitions in which the nuclear spin projection changes ($\Delta m_I = \pm 1$) while the electron spin is conserved ($\Delta m_S = 0$). At a magnetic field of 100\,G, the nuclear spin transition features are indistinguishable from the background of the spectrum. Nuclear spectral features were also absent from previous DEER measurements at comparable magnetic fields \cite{de_lange_controlling_2012, knowles_demonstration_2016, bauch_ultralong_2018, pagliero_multispin-assisted_2018}, which suggests that the coupling of the P1 nuclear spin to the rf field depends on the magnitude of the external magnetic field. \begin{figure} \caption{\label{deers2} \label{deers2} \end{figure} To better understand the variation in rf coupling, we consider how the nuclear spin is coupled to the P1 electron spin. The augmentation factor, $\alpha \equiv \gamma_{\text{N,eff}} / \gamma_{\text{N,bare}}$, has been previously measured for a $^{14}$N nuclear spin coupled to the NV electron spin in the presence of a magnetic field that is parallel to the N-V axis \cite{chen_measurement_2015, sangtawesin_quantum_2016} and recently for off-axis field in the same system~\cite{wood_quantum_2021}. Here, the derivation of the augmentation factor is extended to the coupled electron and nuclear spin of the P1 center, and includes the case in which the magnetic field is not aligned with the electron spin quantization axis in order to account for the different orientation classes of the P1 ensemble. The contribution of the transverse hyperfine coupling, parametrized by $A_{\perp}$, dominates the P1 Hamiltonian at low magnetic fields. Hence, the effective nuclear gyromagnetic ratio is expected to increase towards lower field, where state mixing is most apparent. The amplitude of the DEER signal is indicative of the strength of the dipolar coupling between the NV and the P1 spin. As the effective nuclear gyromagnetic ratio reduces with increasing field, so does the amplitude of the DEER signal, as we observe in the difference between DEER measurements at 35\,G and 100\,G, presented in Fig. \ref{deers2}. Following Ref. \cite{chen_measurement_2015}, the Hamiltonian in Eq. \ref{HP1} can be diagonalized using $E = P^{-1} \hat{H} P$, where $E$ is a diagonal matrix of energy eigenvalues and $P$ is the matrix of the eigenvectors of the Hamiltonian. We compute the matrix elements describing the coupling between states in the basis where $\hat{H}$ is diagonal by transforming the rf Hamiltonian, $\hat{H}_\text{rf} = B_\text{rf}(t) (\gamma_E S_{x} + \gamma_{N} I_{x})$, into the diagonal basis. The time-dependence of $\hat{H}_\text{rf}$ is eliminated using the rotating-wave approximation. We can then compute the augmentation factor for a given nuclear spin transition, that is: \begin{equation} \label{alpha} \alpha_{a,b} = \frac{1}{\gamma_N B_\text{rf}} \bra{a} \tilde{H}_\text{rf} \ket{b} \end{equation} for transition $\ket{a} \rightarrow \ket{b}$, where $B_\text{rf}$ is the amplitude of the oscillating field. The $\tilde{H}_\text{rf}$ term denotes the interaction Hamiltonian in the diagonal basis defined by $P$, which implicitly contains within it the field-dependence of this expression. \begin{figure} \caption{ \label{aug3} \label{aug3} \end{figure} We then characterized the coupling between the P1 nuclear spin and the external rf field by measuring the P1 nuclear spin Rabi frequency as a function of magnetic field strength. Varying the length of the resonant rf pulse reveals Rabi oscillations of the nuclear spin (Fig. \ref{aug3}(a)). The spin-echo time was fixed at $^{13}$C revivals occurring at 45-65$\upmu$s to maintain consistent sensitivity between measurements as the magnetc field strength is changed. Using the labeling convention from Fig. \ref{deers2}(a), we identify two nuclear spin transitions, $\ket{a} \leftrightarrow \ket{b}$ and $\ket{d} \leftrightarrow \ket{e}$, which we denote as $ab$ and $de$. These particular transitions are well isolated from other resonant frequencies in the system across a range of magnetic fields and can therefore be individually addressed. Figure \ref{aug3} (b) shows the measured Rabi oscillations for the $ab$ and $de$ transitions for magnetic field strengths between 10\,G and 100\,G. The contrast obtained for the off-axis measurements is approximately three times greater than for on-axis, due to the greater population of off-axis classes. For both the off-axis and on-axis orientations, the Rabi frequency and amplitude of the oscillations can be seen to decrease with increasing field strength. At each magnetic field, the resonant frequency to drive each transition is extracted from precise frequency-domain DEER measurements using low rf power, ensuring that the observed changes in the amplitude and frequency are not caused by off-resonant driving. The orientation of the rf field with respect to the P1 axes has only a minor effect on the gyromagnetic augmentation~\footnote{See Supplemental information}. We fit a damped sinusoidal function of the form $S(t) = S(0) e^{(-t/T_D)^n} \cos{(\Omega t /2)}^2$ to the nuclear Rabi oscillation data, where $T_D$ is the effective decay time, to extract the Rabi frequency and oscillation amplitude for a given transition and magnetic field. The Rabi frequency is $\Omega = \alpha \,\gamma_N B_\text{rf}$, so for a constant rf field, the Rabi frequency scales with the effective gyromagnetic ratio of the nuclear spin as a function of the external magnetic field. The amplitude of the oscillation is determined by the coupling between the NV ensemble and the varying magnetic field produced by the nuclear spins. The magnetic moment of the nuclear spins is proportional to the effective gyromagnetic ratio and, therefore, the oscillation amplitude also scales with the augmentation factor. The fitted frequencies and amplitudes of the Rabi oscillations for transition $de$ are presented in Fig. \ref{aug3}(c,e), together with the normalized augmentation factor calculated from Eq. \ref{alpha}. In order to effectively compare the results across different P1 transitions, both the Rabi frequencies and amplitudes were normalized with respect to the maximum values, with $\Omega_\text{max} = 1.82$ MHz and $S(0)_\text{max} = 0.012$ observed at 20\,G. The off-axis and on-axis data were then scaled according to the fitted frequencies and amplitudes measured for each transition at 35\,G. There is excellent agreement between the measured Rabi frequencies and the theoretical values for the augmentation factor of the nuclear spin gyromagnetic ratio. This agreement demonstrates that the augmented gyromagnetic ratio of the $^{14}$N nuclear spins is well explained by the hyperfine mixing with the P1 electron spin, which decreases with increasing magnetic field. The analysis of the nuclear Rabi oscillations for transition $ab$ (Fig. \ref{aug3}(d,f)) also shows an overall decrease in the frequency and amplitude with increasing magnetic field, which is well modeled by the augmentation curve for this transition. Given the low amplitude for the off-axis measurements for transition $ab$, it was not possible to obtain statistically significant measurements for the on-axis orientation for this particular transition. Furthermore, certain magnetic field values were excluded from the analysis for each transition where the resonant frequency at that field strength was too close to other transition frequencies to individually resolve. The results from our previous DEER measurements (Fig. \ref{deers2}) indicated that the nuclear spin features disappear from the observed signal as the magnetic field is increased. This observation is confirmed by the measured Rabi oscillation amplitude, which decrease markedly as the magnetic field is increased from 10\,G to 100\,G. We have shown that the gyromagnetic ratio of the P1 nuclear spin is augmented by hyperfine coupling to the electron spin, and that the augmentation of the nuclear gyromagnetic ratio can be tuned by the external magnetic field. At low magnetic fields; that is, $<100$G, the augmentation factor increases sharply and we are able to perform rapid quantum control of the nuclear spin state, at Rabi frequencies comparable to the P1 or NV electron spin. A remaining open question, the subject of further work, concerns the consequences of gyromagnetic augmentation on the coherence of the P1 nuclear spins. As more electron spin character is mixed into the nuclear spin, the coupling to not only rf fields increases (as studied in this work) but also to magnetic field noise which suppresses spin coherence. However, there is reason to expect the augmented nuclear spins to still possess coherence times significantly exceeding that of the P1 or NV electron spin if not the bare nuclear spin coherence time. The NV nuclear spin augmentation factor is around $\alpha = 20$ at 500\,G, but the response to static magnetic fields is still set by the bare value of $\gamma_N$. As recently reported in Ref~\cite{degen_entanglement_2021}, augmented nuclear spin transitions at low field possess coherence times a factor of 4 greater than that of the P1 electron spin. Furthermore, rapid state control and long spin storage can potentially be combined with high speed magnetic shuttling to dynamically tune the nuclear spin interaction with control fields. In this work we examined a thermally distributed ensemble of P1 spins, but with augmented driving, rapid hyperpolarisation of the P1 nuclear spins may allow substantial enhancement, which is a possible avenue of future study. Our work also shows explicitly that the NV center is sensitive to the P1 nuclear spin state at weak magnetic fields, adding further channels for decoherence in NV quantum sensing. The detection of nuclear Rabi oscillations in the NV spin-echo signal implies that the P1 nuclear spins are a significant component in the interacting environment of the NV and will contribute to the dephasing of NV spins at low magnetic fields. Suppression of NV dephasing through spin-bath control and dynamical decoupling schemes \cite{de_lange_controlling_2012, bauch_ultralong_2018} could, therefore, be enhanced at lower magnetic fields by addressing the nuclear spins in addition to the P1 electron spins. At higher densities, the dipolar coupling between the NV and P1 electrons may be used to indirectly couple the associated nuclear spins, suggesting an interesting platform for quantum information processing~\cite{childress_diamond_2013}. In conclusion, we have characterized the magnetic-field induced augmentation of the gyromagnetic ratio for the P1 nuclear spin. We have found that at lower magnetic field magnitudes, we are able to enact fast quantum control of the nuclear spins within the defect. Tunable magnetic augmentation establishes a potential path forward for using the P1 nuclear spin as a qubit in quantum applications, by addressing the challenge of accessing and controlling nuclear spins that are typically well isolated from external fields. \begin{thebibliography}{49} \makeatletter \providecommand \@ifxundefined [1]{ \@ifx{#1\undefined} } \providecommand \@ifnum [1]{ \ifnum #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \@ifx [1]{ \ifx #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \natexlab [1]{#1} \providecommand \enquote [1]{``#1''} \providecommand \bibnamefont [1]{#1} \providecommand \bibfnamefont [1]{#1} \providecommand \citenamefont [1]{#1} \providecommand \href@noop [0]{\@secondoftwo} \providecommand \href [0]{\begingroup \@sanitize@url \@href} \providecommand \@href[1]{\@@startlink{#1}\@@href} \providecommand \@@href[1]{\endgroup#1\@@endlink} \providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode `\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax} \providecommand \@@startlink[1]{} \providecommand \@@endlink[0]{} \providecommand \url [0]{\begingroup\@sanitize@url \@url } \providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }} \providecommand \urlprefix [0]{URL } \providecommand \Eprint [0]{\href } \providecommand \doibase [0]{https://doi.org/} \providecommand \selectlanguage [0]{\@gobble} \providecommand \bibinfo [0]{\@secondoftwo} \providecommand \bibfield [0]{\@secondoftwo} \providecommand \translation [1]{[#1]} \providecommand \BibitemOpen [0]{} \providecommand \bibitemStop [0]{} \providecommand \bibitemNoStop [0]{.\EOS\space} \providecommand \EOS [0]{\spacefactor3000\relax} \providecommand \BibitemShut [1]{\csname bibitem#1\endcsname} \let\auto@bib@innerbib\@empty \bibitem [{\citenamefont {Fuchs}\ \emph {et~al.}(2011)\citenamefont {Fuchs}, \citenamefont {Burkard}, \citenamefont {Klimov},\ and\ \citenamefont {Awschalom}}]{fuchs_quantum_2011} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.~D.}\ \bibnamefont {Fuchs}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Burkard}}, \bibinfo {author} {\bibfnamefont {P.~V.}\ \bibnamefont {Klimov}},\ and\ \bibinfo {author} {\bibfnamefont {D.~D.}\ \bibnamefont {Awschalom}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {A quantum memory intrinsic to single nitrogen–vacancy centres in diamond}},\ }\bibfield {journal} {\bibinfo {journal} {Nature Phys}\ }\textbf {\bibinfo {volume} {7}},\ \href {https://doi.org/10.1038/nphys2026} {10.1038/nphys2026} (\bibinfo {year} {2011})\BibitemShut {NoStop} \bibitem [{\citenamefont {Pla}\ \emph {et~al.}(2014)\citenamefont {Pla}, \citenamefont {Mohiyaddin}, \citenamefont {Tan}, \citenamefont {Dehollain}, \citenamefont {Rahman}, \citenamefont {Klimeck}, \citenamefont {Jamieson}, \citenamefont {Dzurak},\ and\ \citenamefont {Morello}}]{pla_coherent_2014} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~J.}\ \bibnamefont {Pla}}, \bibinfo {author} {\bibfnamefont {F.~A.}\ \bibnamefont {Mohiyaddin}}, \bibinfo {author} {\bibfnamefont {K.~Y.}\ \bibnamefont {Tan}}, \bibinfo {author} {\bibfnamefont {J.~P.}\ \bibnamefont {Dehollain}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Rahman}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Klimeck}}, \bibinfo {author} {\bibfnamefont {D.~N.}\ \bibnamefont {Jamieson}}, \bibinfo {author} {\bibfnamefont {A.~S.}\ \bibnamefont {Dzurak}},\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Morello}},\ }\bibfield {title} {\bibinfo {title} {Coherent {Control} of a {Single} {$^{29}\mathrm{Si}$} {Nuclear} {Spin} {Qubit}},\ }\href {https://doi.org/10.1103/PhysRevLett.113.246801} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {113}},\ \bibinfo {pages} {246801} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hensen}\ \emph {et~al.}(2020)\citenamefont {Hensen}, \citenamefont {Wei~Huang}, \citenamefont {Yang}, \citenamefont {Wai~Chan}, \citenamefont {Yoneda}, \citenamefont {Tanttu}, \citenamefont {Hudson}, \citenamefont {Laucht}, \citenamefont {Itoh}, \citenamefont {Ladd}, \citenamefont {Morello},\ and\ \citenamefont {Dzurak}}]{hensen_silicon_2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Hensen}}, \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Wei~Huang}}, \bibinfo {author} {\bibfnamefont {C.-H.}\ \bibnamefont {Yang}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Wai~Chan}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Yoneda}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Tanttu}}, \bibinfo {author} {\bibfnamefont {F.~E.}\ \bibnamefont {Hudson}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Laucht}}, \bibinfo {author} {\bibfnamefont {K.~M.}\ \bibnamefont {Itoh}}, \bibinfo {author} {\bibfnamefont {T.~D.}\ \bibnamefont {Ladd}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Morello}},\ and\ \bibinfo {author} {\bibfnamefont {A.~S.}\ \bibnamefont {Dzurak}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {A silicon quantum-dot-coupled nuclear spin qubit}},\ }\href {https://doi.org/10.1038/s41565-019-0587-7} {\bibfield {journal} {\bibinfo {journal} {Nat. Nanotechnol.}\ }\textbf {\bibinfo {volume} {15}},\ \bibinfo {pages} {13} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Muhonen}\ \emph {et~al.}(2014)\citenamefont {Muhonen}, \citenamefont {Dehollain}, \citenamefont {Laucht}, \citenamefont {Hudson}, \citenamefont {Kalra}, \citenamefont {Sekiguchi}, \citenamefont {Itoh}, \citenamefont {Jamieson}, \citenamefont {McCallum}, \citenamefont {Dzurak},\ and\ \citenamefont {Morello}}]{muhonen_storing_2014} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~T.}\ \bibnamefont {Muhonen}}, \bibinfo {author} {\bibfnamefont {J.~P.}\ \bibnamefont {Dehollain}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Laucht}}, \bibinfo {author} {\bibfnamefont {F.~E.}\ \bibnamefont {Hudson}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Kalra}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Sekiguchi}}, \bibinfo {author} {\bibfnamefont {K.~M.}\ \bibnamefont {Itoh}}, \bibinfo {author} {\bibfnamefont {D.~N.}\ \bibnamefont {Jamieson}}, \bibinfo {author} {\bibfnamefont {J.~C.}\ \bibnamefont {McCallum}}, \bibinfo {author} {\bibfnamefont {A.~S.}\ \bibnamefont {Dzurak}},\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Morello}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {Storing quantum information for 30 seconds in a nanoelectronic device}},\ }\href {https://doi.org/10.1038/nnano.2014.211} {\bibfield {journal} {\bibinfo {journal} {Nature Nanotech}\ }\textbf {\bibinfo {volume} {9}},\ \bibinfo {pages} {986} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Degen}\ \emph {et~al.}(2017)\citenamefont {Degen}, \citenamefont {Reinhard},\ and\ \citenamefont {Cappellaro}}]{degen_quantum_2017} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Degen}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Reinhard}},\ and\ \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Cappellaro}},\ }\bibfield {title} {\bibinfo {title} {Quantum sensing},\ }\href {https://doi.org/10.1103/RevModPhys.89.035002} {\bibfield {journal} {\bibinfo {journal} {Rev. Mod. Phys.}\ }\textbf {\bibinfo {volume} {89}},\ \bibinfo {pages} {035002} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Maclaurin}\ \emph {et~al.}(2012)\citenamefont {Maclaurin}, \citenamefont {Doherty}, \citenamefont {Hollenberg},\ and\ \citenamefont {Martin}}]{maclaurin_measurable_2012} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Maclaurin}}, \bibinfo {author} {\bibfnamefont {M.~W.}\ \bibnamefont {Doherty}}, \bibinfo {author} {\bibfnamefont {L.~C.~L.}\ \bibnamefont {Hollenberg}},\ and\ \bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont {Martin}},\ }\bibfield {title} {\bibinfo {title} {Measurable {Quantum} {Geometric} {Phase} from a {Rotating} {Single} {Spin}},\ }\href {https://doi.org/10.1103/PhysRevLett.108.240403} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {108}},\ \bibinfo {pages} {240403} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ledbetter}\ \emph {et~al.}(2012)\citenamefont {Ledbetter}, \citenamefont {Jensen}, \citenamefont {Fischer}, \citenamefont {Jarmola},\ and\ \citenamefont {Budker}}]{ledbetter_gyroscopes_2012} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~P.}\ \bibnamefont {Ledbetter}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Jensen}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Fischer}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Jarmola}},\ and\ \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Budker}},\ }\bibfield {title} {\bibinfo {title} {Gyroscopes based on nitrogen-vacancy centers in diamond},\ }\href {https://doi.org/10.1103/PhysRevA.86.052116} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {86}},\ \bibinfo {pages} {052116} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ajoy}\ and\ \citenamefont {Cappellaro}(2012)}]{ajoy_stable_2012} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Ajoy}}\ and\ \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Cappellaro}},\ }\bibfield {title} {\bibinfo {title} {Stable three-axis nuclear-spin gyroscope in diamond},\ }\href {https://doi.org/10.1103/PhysRevA.86.062104} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {86}},\ \bibinfo {pages} {062104} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Liu}\ \emph {et~al.}(2019)\citenamefont {Liu}, \citenamefont {Ajoy},\ and\ \citenamefont {Cappellaro}}]{liu_nanoscale_2019} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.-X.}\ \bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Ajoy}},\ and\ \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Cappellaro}},\ }\bibfield {title} {\bibinfo {title} {Nanoscale {Vector} dc {Magnetometry} via {Ancilla}-{Assisted} {Frequency} {Up}-{Conversion}},\ }\href {https://doi.org/10.1103/PhysRevLett.122.100501} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {122}},\ \bibinfo {pages} {100501} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Soshenko}\ \emph {et~al.}(2021)\citenamefont {Soshenko}, \citenamefont {Bolshedvorskii}, \citenamefont {Rubinas}, \citenamefont {Sorokin}, \citenamefont {Smolyaninov}, \citenamefont {Vorobyov},\ and\ \citenamefont {Akimov}}]{soshenko_nuclear_2021} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {V.~V.}\ \bibnamefont {Soshenko}}, \bibinfo {author} {\bibfnamefont {S.~V.}\ \bibnamefont {Bolshedvorskii}}, \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Rubinas}}, \bibinfo {author} {\bibfnamefont {V.~N.}\ \bibnamefont {Sorokin}}, \bibinfo {author} {\bibfnamefont {A.~N.}\ \bibnamefont {Smolyaninov}}, \bibinfo {author} {\bibfnamefont {V.~V.}\ \bibnamefont {Vorobyov}},\ and\ \bibinfo {author} {\bibfnamefont {A.~V.}\ \bibnamefont {Akimov}},\ }\bibfield {title} {\bibinfo {title} {Nuclear {Spin} {Gyroscope} based on the {Nitrogen} {Vacancy} {Center} in {Diamond}},\ }\href {https://doi.org/10.1103/PhysRevLett.126.197702} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {126}},\ \bibinfo {pages} {197702} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jarmola}\ \emph {et~al.}(2021)\citenamefont {Jarmola}, \citenamefont {Lourette}, \citenamefont {Acosta}, \citenamefont {Birdwell}, \citenamefont {Blümler}, \citenamefont {Budker}, \citenamefont {Ivanov},\ and\ \citenamefont {Malinovsky}}]{jarmola_demonstration_2021} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Jarmola}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Lourette}}, \bibinfo {author} {\bibfnamefont {V.~M.}\ \bibnamefont {Acosta}}, \bibinfo {author} {\bibfnamefont {A.~G.}\ \bibnamefont {Birdwell}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Blümler}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Budker}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Ivanov}},\ and\ \bibinfo {author} {\bibfnamefont {V.~S.}\ \bibnamefont {Malinovsky}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {Demonstration of diamond nuclear spin gyroscope}},\ }\href {https://arxiv.org/abs/2107.04257v1} {\ (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kane}(1998)}]{kane_silicon-based_1998} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.~E.}\ \bibnamefont {Kane}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {A silicon-based nuclear spin quantum computer}},\ }\href {https://doi.org/10.1038/30156} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {393}},\ \bibinfo {pages} {133} (\bibinfo {year} {1998})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Dutt}\ \emph {et~al.}(2007)\citenamefont {Dutt}, \citenamefont {Childress}, \citenamefont {Jiang}, \citenamefont {Togan}, \citenamefont {Maze}, \citenamefont {Jelezko}, \citenamefont {Zibrov}, \citenamefont {Hemmer},\ and\ \citenamefont {Lukin}}]{dutt_quantum_2007} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~V.~G.}\ \bibnamefont {Dutt}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Childress}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Jiang}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Togan}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Maze}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Jelezko}}, \bibinfo {author} {\bibfnamefont {A.~S.}\ \bibnamefont {Zibrov}}, \bibinfo {author} {\bibfnamefont {P.~R.}\ \bibnamefont {Hemmer}},\ and\ \bibinfo {author} {\bibfnamefont {M.~D.}\ \bibnamefont {Lukin}},\ }\bibfield {title} {\bibinfo {title} {Quantum {Register} {Based} on {Individual} {Electronic} and {Nuclear} {Spin} {Qubits} in {Diamond}},\ }\href {https://doi.org/10.1126/science.1139831} {\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo {volume} {316}},\ \bibinfo {pages} {1312} (\bibinfo {year} {2007})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ladd}\ \emph {et~al.}(2010)\citenamefont {Ladd}, \citenamefont {Jelezko}, \citenamefont {Laflamme}, \citenamefont {Nakamura}, \citenamefont {Monroe},\ and\ \citenamefont {O’Brien}}]{ladd_quantum_2010} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {T.~D.}\ \bibnamefont {Ladd}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Jelezko}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Laflamme}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Nakamura}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Monroe}},\ and\ \bibinfo {author} {\bibfnamefont {J.~L.}\ \bibnamefont {O’Brien}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {Quantum computers}},\ }\href {https://doi.org/10.1038/nature08812} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {464}},\ \bibinfo {pages} {45} (\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Maurer}\ \emph {et~al.}(2012)\citenamefont {Maurer}, \citenamefont {Kucsko}, \citenamefont {Latta}, \citenamefont {Jiang}, \citenamefont {Yao}, \citenamefont {Bennett}, \citenamefont {Pastawski}, \citenamefont {Hunger}, \citenamefont {Chisholm}, \citenamefont {Markham}, \citenamefont {Twitchen}, \citenamefont {Cirac},\ and\ \citenamefont {Lukin}}]{maurer_room-temperature_2012} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.~C.}\ \bibnamefont {Maurer}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Kucsko}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Latta}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Jiang}}, \bibinfo {author} {\bibfnamefont {N.~Y.}\ \bibnamefont {Yao}}, \bibinfo {author} {\bibfnamefont {S.~D.}\ \bibnamefont {Bennett}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Pastawski}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Hunger}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Chisholm}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Markham}}, \bibinfo {author} {\bibfnamefont {D.~J.}\ \bibnamefont {Twitchen}}, \bibinfo {author} {\bibfnamefont {J.~I.}\ \bibnamefont {Cirac}},\ and\ \bibinfo {author} {\bibfnamefont {M.~D.}\ \bibnamefont {Lukin}},\ }\bibfield {title} {\bibinfo {title} {Room-{Temperature} {Quantum} {Bit} {Memory} {Exceeding} {One} {Second}},\ }\href {https://doi.org/10.1126/science.1220513} {\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo {volume} {336}},\ \bibinfo {pages} {1283} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Zhong}\ \emph {et~al.}(2015)\citenamefont {Zhong}, \citenamefont {Hedges}, \citenamefont {Ahlefeldt}, \citenamefont {Bartholomew}, \citenamefont {Beavan}, \citenamefont {Wittig}, \citenamefont {Longdell},\ and\ \citenamefont {Sellars}}]{zhong_optically_2015} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Zhong}}, \bibinfo {author} {\bibfnamefont {M.~P.}\ \bibnamefont {Hedges}}, \bibinfo {author} {\bibfnamefont {R.~L.}\ \bibnamefont {Ahlefeldt}}, \bibinfo {author} {\bibfnamefont {J.~G.}\ \bibnamefont {Bartholomew}}, \bibinfo {author} {\bibfnamefont {S.~E.}\ \bibnamefont {Beavan}}, \bibinfo {author} {\bibfnamefont {S.~M.}\ \bibnamefont {Wittig}}, \bibinfo {author} {\bibfnamefont {J.~J.}\ \bibnamefont {Longdell}},\ and\ \bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont {Sellars}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {Optically addressable nuclear spins in a solid with a six-hour coherence time}},\ }\href {https://doi.org/10.1038/nature14025} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {517}},\ \bibinfo {pages} {177} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Doherty}\ \emph {et~al.}(2013)\citenamefont {Doherty}, \citenamefont {Manson}, \citenamefont {Delaney}, \citenamefont {Jelezko}, \citenamefont {Wrachtrup},\ and\ \citenamefont {Hollenberg}}]{doherty_nitrogen-vacancy_2013} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~W.}\ \bibnamefont {Doherty}}, \bibinfo {author} {\bibfnamefont {N.~B.}\ \bibnamefont {Manson}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Delaney}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Jelezko}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Wrachtrup}},\ and\ \bibinfo {author} {\bibfnamefont {L.~C.~L.}\ \bibnamefont {Hollenberg}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {The nitrogen-vacancy colour centre in diamond}},\ }\href {https://doi.org/10.1016/j.physrep.2013.02.001} {\bibfield {journal} {\bibinfo {journal} {Physics Reports}\ }\bibinfo {series} {The nitrogen-vacancy colour centre in diamond},\ \textbf {\bibinfo {volume} {528}},\ \bibinfo {pages} {1} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Schirhagl}\ \emph {et~al.}(2014)\citenamefont {Schirhagl}, \citenamefont {Chang}, \citenamefont {Loretz},\ and\ \citenamefont {Degen}}]{schirhagl_nitrogen-vacancy_2014} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Schirhagl}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Chang}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Loretz}},\ and\ \bibinfo {author} {\bibfnamefont {C.~L.}\ \bibnamefont {Degen}},\ }\bibfield {title} {\bibinfo {title} {Nitrogen-{Vacancy} {Centers} in {Diamond}: {Nanoscale} {Sensors} for {Physics} and {Biology}},\ }\href {https://doi.org/10.1146/annurev-physchem-040513-103659} {\bibfield {journal} {\bibinfo {journal} {Annual Review of Physical Chemistry}\ }\textbf {\bibinfo {volume} {65}},\ \bibinfo {pages} {83} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wu}\ \emph {et~al.}(2016)\citenamefont {Wu}, \citenamefont {Jelezko}, \citenamefont {Plenio},\ and\ \citenamefont {Weil}}]{wu_diamond_2016} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Wu}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Jelezko}}, \bibinfo {author} {\bibfnamefont {M.~B.}\ \bibnamefont {Plenio}},\ and\ \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Weil}},\ }\bibfield {title} {{\selectlanguage {eng}\bibinfo {title} {Diamond {Quantum} {Devices} in {Biology}}},\ }\href {https://doi.org/10.1002/anie.201506556} {\bibfield {journal} {\bibinfo {journal} {Angew Chem Int Ed Engl}\ }\textbf {\bibinfo {volume} {55}},\ \bibinfo {pages} {6586} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jacques}\ \emph {et~al.}(2009)\citenamefont {Jacques}, \citenamefont {Neumann}, \citenamefont {Beck}, \citenamefont {Markham}, \citenamefont {Twitchen}, \citenamefont {Meijer}, \citenamefont {Kaiser}, \citenamefont {Balasubramanian}, \citenamefont {Jelezko},\ and\ \citenamefont {Wrachtrup}}]{jacques_dynamic_2009} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Jacques}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Neumann}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Beck}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Markham}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Twitchen}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Meijer}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Kaiser}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Balasubramanian}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Jelezko}},\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Wrachtrup}},\ }\bibfield {title} {\bibinfo {title} {Dynamic {Polarization} of {Single} {Nuclear} {Spins} by {Optical} {Pumping} of {Nitrogen}-{Vacancy} {Color} {Centers} in {Diamond} at {Room} {Temperature}},\ }\href {https://doi.org/10.1103/PhysRevLett.102.057403} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {102}},\ \bibinfo {pages} {057403} (\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Neumann}\ \emph {et~al.}(2010)\citenamefont {Neumann}, \citenamefont {Beck}, \citenamefont {Steiner}, \citenamefont {Rempp}, \citenamefont {Fedder}, \citenamefont {Hemmer}, \citenamefont {Wrachtrup},\ and\ \citenamefont {Jelezko}}]{neumann_single-shot_2010} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Neumann}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Beck}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Steiner}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Rempp}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Fedder}}, \bibinfo {author} {\bibfnamefont {P.~R.}\ \bibnamefont {Hemmer}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Wrachtrup}},\ and\ \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Jelezko}},\ }\bibfield {title} {\bibinfo {title} {Single-{Shot} {Readout} of a {Single} {Nuclear} {Spin}},\ }\href {https://doi.org/10.1126/science.1189075} {\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo {volume} {329}},\ \bibinfo {pages} {542} (\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Smeltzer}\ \emph {et~al.}(2009)\citenamefont {Smeltzer}, \citenamefont {McIntyre},\ and\ \citenamefont {Childress}}]{smeltzer_robust_2009} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Smeltzer}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {McIntyre}},\ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Childress}},\ }\bibfield {title} {\bibinfo {title} {Robust control of individual nuclear spins in diamond},\ }\href {https://doi.org/10.1103/PhysRevA.80.050302} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {80}},\ \bibinfo {pages} {050302} (\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sangtawesin}\ \emph {et~al.}(2016)\citenamefont {Sangtawesin}, \citenamefont {McLellan}, \citenamefont {Myers}, \citenamefont {Jayich}, \citenamefont {Awschalom},\ and\ \citenamefont {Petta}}]{sangtawesin_hyperfine-enhanced_2016} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Sangtawesin}}, \bibinfo {author} {\bibfnamefont {C.~A.}\ \bibnamefont {McLellan}}, \bibinfo {author} {\bibfnamefont {B.~A.}\ \bibnamefont {Myers}}, \bibinfo {author} {\bibfnamefont {A.~C.~B.}\ \bibnamefont {Jayich}}, \bibinfo {author} {\bibfnamefont {D.~D.}\ \bibnamefont {Awschalom}},\ and\ \bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont {Petta}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {Hyperfine-enhanced gyromagnetic ratio of a nuclear spin in diamond}},\ }\href {https://doi.org/10.1088/1367-2630/18/8/083016} {\bibfield {journal} {\bibinfo {journal} {New J. Phys.}\ }\textbf {\bibinfo {volume} {18}},\ \bibinfo {pages} {083016} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Degen}\ \emph {et~al.}(2021)\citenamefont {Degen}, \citenamefont {Loenen}, \citenamefont {Bartling}, \citenamefont {Bradley}, \citenamefont {Meinsma}, \citenamefont {Markham}, \citenamefont {Twitchen},\ and\ \citenamefont {Taminiau}}]{degen_entanglement_2021} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont {Degen}}, \bibinfo {author} {\bibfnamefont {S.~J.~H.}\ \bibnamefont {Loenen}}, \bibinfo {author} {\bibfnamefont {H.~P.}\ \bibnamefont {Bartling}}, \bibinfo {author} {\bibfnamefont {C.~E.}\ \bibnamefont {Bradley}}, \bibinfo {author} {\bibfnamefont {A.~L.}\ \bibnamefont {Meinsma}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Markham}}, \bibinfo {author} {\bibfnamefont {D.~J.}\ \bibnamefont {Twitchen}},\ and\ \bibinfo {author} {\bibfnamefont {T.~H.}\ \bibnamefont {Taminiau}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {Entanglement of dark electron-nuclear spin defects in diamond}},\ }\href {https://doi.org/10.1038/s41467-021-23454-9} {\bibfield {journal} {\bibinfo {journal} {Nat Commun}\ }\textbf {\bibinfo {volume} {12}},\ \bibinfo {pages} {3470} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Chen}\ \emph {et~al.}(2015)\citenamefont {Chen}, \citenamefont {Hirose},\ and\ \citenamefont {Cappellaro}}]{chen_measurement_2015} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Hirose}},\ and\ \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Cappellaro}},\ }\bibfield {title} {\bibinfo {title} {Measurement of transverse hyperfine interaction by forbidden transitions},\ }\href {https://doi.org/10.1103/PhysRevB.92.020101} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. B}\ }\textbf {\bibinfo {volume} {92}},\ \bibinfo {pages} {020101} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sangtawesin}(2016)}]{sangtawesin_quantum_2016} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Sangtawesin}},\ }\emph {\bibinfo {title} {Quantum {Control} of {Nuclear} {Spins} {Coupled} to {Nitrogen}-{Vacancy} {Centers} in {Diamond}}},\ \href {https://ui.adsabs.harvard.edu/abs/2016PhDT........97S} {Ph.D. thesis} (\bibinfo {year} {2016})\BibitemShut {NoStop} \bibitem [{\citenamefont {Goldman}\ \emph {et~al.}(2020)\citenamefont {Goldman}, \citenamefont {Patti}, \citenamefont {Levonian}, \citenamefont {Yelin},\ and\ \citenamefont {Lukin}}]{goldman_optical_2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Goldman}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Patti}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Levonian}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Yelin}},\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Lukin}},\ }\bibfield {title} {\bibinfo {title} {Optical {Control} of a {Single} {Nuclear} {Spin} in the {Solid} {State}},\ }\href {https://doi.org/10.1103/PhysRevLett.124.153203} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {124}},\ \bibinfo {pages} {153203} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Childress}\ \emph {et~al.}(2006)\citenamefont {Childress}, \citenamefont {Gurudev~Dutt}, \citenamefont {Taylor}, \citenamefont {Zibrov}, \citenamefont {Jelezko}, \citenamefont {Wrachtrup}, \citenamefont {Hemmer},\ and\ \citenamefont {Lukin}}]{childress_coherent_2006} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Childress}}, \bibinfo {author} {\bibfnamefont {M.~V.}\ \bibnamefont {Gurudev~Dutt}}, \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont {Taylor}}, \bibinfo {author} {\bibfnamefont {A.~S.}\ \bibnamefont {Zibrov}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Jelezko}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Wrachtrup}}, \bibinfo {author} {\bibfnamefont {P.~R.}\ \bibnamefont {Hemmer}},\ and\ \bibinfo {author} {\bibfnamefont {M.~D.}\ \bibnamefont {Lukin}},\ }\bibfield {title} {\bibinfo {title} {Coherent {Dynamics} of {Coupled} {Electron} and {Nuclear} {Spin} {Qubits} in {Diamond}},\ }\href {https://doi.org/10.1126/science.1131871} {\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo {volume} {314}},\ \bibinfo {pages} {281} (\bibinfo {year} {2006})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jarmola}\ \emph {et~al.}(2020)\citenamefont {Jarmola}, \citenamefont {Fescenko}, \citenamefont {Acosta}, \citenamefont {Doherty}, \citenamefont {Fatemi}, \citenamefont {Ivanov}, \citenamefont {Budker},\ and\ \citenamefont {Malinovsky}}]{jarmola_robust_2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Jarmola}}, \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Fescenko}}, \bibinfo {author} {\bibfnamefont {V.~M.}\ \bibnamefont {Acosta}}, \bibinfo {author} {\bibfnamefont {M.~W.}\ \bibnamefont {Doherty}}, \bibinfo {author} {\bibfnamefont {F.~K.}\ \bibnamefont {Fatemi}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Ivanov}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Budker}},\ and\ \bibinfo {author} {\bibfnamefont {V.~S.}\ \bibnamefont {Malinovsky}},\ }\bibfield {title} {\bibinfo {title} {Robust optical readout and characterization of nuclear spin transitions in nitrogen-vacancy ensembles in diamond},\ }\href {https://doi.org/10.1103/PhysRevResearch.2.023094} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Research}\ }\textbf {\bibinfo {volume} {2}},\ \bibinfo {pages} {023094} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wood}\ \emph {et~al.}(2021{\natexlab{a}})\citenamefont {Wood}, \citenamefont {Goldblatt}, \citenamefont {Scholten},\ and\ \citenamefont {Martin}}]{wood_quantum_2021} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~A.}\ \bibnamefont {Wood}}, \bibinfo {author} {\bibfnamefont {R.~M.}\ \bibnamefont {Goldblatt}}, \bibinfo {author} {\bibfnamefont {R.~E.}\ \bibnamefont {Scholten}},\ and\ \bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont {Martin}},\ }\bibfield {title} {\bibinfo {title} {Quantum control of nuclear spin qubits in a rapidly rotating diamond},\ }\href {http://arxiv.org/abs/2107.12577} {\bibfield {journal} {\bibinfo {journal} {arXiv:2107.12577 [quant-ph]}\ } (\bibinfo {year} {2021}{\natexlab{a}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Laraoui}\ \emph {et~al.}(2012)\citenamefont {Laraoui}, \citenamefont {Hodges},\ and\ \citenamefont {Meriles}}]{laraoui_nitrogen-vacancy-assisted_2012} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Laraoui}}, \bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Hodges}},\ and\ \bibinfo {author} {\bibfnamefont {C.~A.}\ \bibnamefont {Meriles}},\ }\bibfield {title} {\bibinfo {title} {Nitrogen-{Vacancy}-{Assisted} {Magnetometry} of {Paramagnetic} {Centers} in an {Individual} {Diamond} {Nanocrystal}},\ }\href {https://doi.org/10.1021/nl300964g} {\bibfield {journal} {\bibinfo {journal} {Nano Lett.}\ }\textbf {\bibinfo {volume} {12}},\ \bibinfo {pages} {3477} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Belthangady}\ \emph {et~al.}(2013)\citenamefont {Belthangady}, \citenamefont {Bar-Gill}, \citenamefont {Pham}, \citenamefont {Arai}, \citenamefont {Le~Sage}, \citenamefont {Cappellaro},\ and\ \citenamefont {Walsworth}}]{belthangady_dressed-state_2013} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Belthangady}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Bar-Gill}}, \bibinfo {author} {\bibfnamefont {L.~M.}\ \bibnamefont {Pham}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Arai}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Le~Sage}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Cappellaro}},\ and\ \bibinfo {author} {\bibfnamefont {R.~L.}\ \bibnamefont {Walsworth}},\ }\bibfield {title} {\bibinfo {title} {Dressed-{State} {Resonant} {Coupling} between {Bright} and {Dark} {Spins} in {Diamond}},\ }\href {https://doi.org/10.1103/PhysRevLett.110.157601} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {110}},\ \bibinfo {pages} {157601} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Knowles}\ \emph {et~al.}(2016)\citenamefont {Knowles}, \citenamefont {Kara},\ and\ \citenamefont {Atatüre}}]{knowles_demonstration_2016} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {H.~S.}\ \bibnamefont {Knowles}}, \bibinfo {author} {\bibfnamefont {D.~M.}\ \bibnamefont {Kara}},\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Atatüre}},\ }\bibfield {title} {\bibinfo {title} {Demonstration of a {Coherent} {Electronic} {Spin} {Cluster} in {Diamond}},\ }\href {https://doi.org/10.1103/PhysRevLett.117.100802} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {117}},\ \bibinfo {pages} {100802} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {de~Lange}\ \emph {et~al.}(2012)\citenamefont {de~Lange}, \citenamefont {van~der Sar}, \citenamefont {Blok}, \citenamefont {Wang}, \citenamefont {Dobrovitski},\ and\ \citenamefont {Hanson}}]{de_lange_controlling_2012} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {de~Lange}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {van~der Sar}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Blok}}, \bibinfo {author} {\bibfnamefont {Z.-H.}\ \bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Dobrovitski}},\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Hanson}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {Controlling the quantum dynamics of a mesoscopic spin bath in diamond}},\ }\href {https://doi.org/10.1038/srep00382} {\bibfield {journal} {\bibinfo {journal} {Sci Rep}\ }\textbf {\bibinfo {volume} {2}},\ \bibinfo {pages} {382} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Knowles}\ \emph {et~al.}(2014)\citenamefont {Knowles}, \citenamefont {Kara},\ and\ \citenamefont {Atatüre}}]{knowles_observing_2014} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {H.~S.}\ \bibnamefont {Knowles}}, \bibinfo {author} {\bibfnamefont {D.~M.}\ \bibnamefont {Kara}},\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Atatüre}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {Observing bulk diamond spin coherence in high-purity nanodiamonds}},\ }\href {https://doi.org/10.1038/nmat3805} {\bibfield {journal} {\bibinfo {journal} {Nature Mater}\ }\textbf {\bibinfo {volume} {13}},\ \bibinfo {pages} {21} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bauch}\ \emph {et~al.}(2020)\citenamefont {Bauch}, \citenamefont {Singh}, \citenamefont {Lee}, \citenamefont {Hart}, \citenamefont {Schloss}, \citenamefont {Turner}, \citenamefont {Barry}, \citenamefont {Pham}, \citenamefont {Bar-Gill}, \citenamefont {Yelin},\ and\ \citenamefont {Walsworth}}]{bauch_decoherence_2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Bauch}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Singh}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Lee}}, \bibinfo {author} {\bibfnamefont {C.~A.}\ \bibnamefont {Hart}}, \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont {Schloss}}, \bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont {Turner}}, \bibinfo {author} {\bibfnamefont {J.~F.}\ \bibnamefont {Barry}}, \bibinfo {author} {\bibfnamefont {L.~M.}\ \bibnamefont {Pham}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Bar-Gill}}, \bibinfo {author} {\bibfnamefont {S.~F.}\ \bibnamefont {Yelin}},\ and\ \bibinfo {author} {\bibfnamefont {R.~L.}\ \bibnamefont {Walsworth}},\ }\bibfield {title} {\bibinfo {title} {Decoherence of ensembles of nitrogen-vacancy centers in diamond},\ }\href {https://doi.org/10.1103/PhysRevB.102.134210} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. B}\ }\textbf {\bibinfo {volume} {102}},\ \bibinfo {pages} {134210} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {de~Lange}\ \emph {et~al.}(2010)\citenamefont {de~Lange}, \citenamefont {Wang}, \citenamefont {Ristè}, \citenamefont {Dobrovitski},\ and\ \citenamefont {Hanson}}]{de_lange_universal_2010} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {de~Lange}}, \bibinfo {author} {\bibfnamefont {Z.~H.}\ \bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Ristè}}, \bibinfo {author} {\bibfnamefont {V.~V.}\ \bibnamefont {Dobrovitski}},\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Hanson}},\ }\bibfield {title} {\bibinfo {title} {Universal {Dynamical} {Decoupling} of a {Single} {Solid}-{State} {Spin} from a {Spin} {Bath}},\ }\href {https://doi.org/10.1126/science.1192739} {\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo {volume} {330}},\ \bibinfo {pages} {60} (\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hansom}\ \emph {et~al.}(2014)\citenamefont {Hansom}, \citenamefont {Schulte}, \citenamefont {Le~Gall}, \citenamefont {Matthiesen}, \citenamefont {Clarke}, \citenamefont {Hugues}, \citenamefont {Taylor},\ and\ \citenamefont {Atatüre}}]{hansom_environment-assisted_2014} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Hansom}}, \bibinfo {author} {\bibfnamefont {C.~H.~H.}\ \bibnamefont {Schulte}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Le~Gall}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Matthiesen}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Clarke}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Hugues}}, \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont {Taylor}},\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Atatüre}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {Environment-assisted quantum control of a solid-state spin via coherent dark states}},\ }\href {https://doi.org/10.1038/nphys3077} {\bibfield {journal} {\bibinfo {journal} {Nature Phys}\ }\textbf {\bibinfo {volume} {10}},\ \bibinfo {pages} {725} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bauch}\ \emph {et~al.}(2018)\citenamefont {Bauch}, \citenamefont {Hart}, \citenamefont {Schloss}, \citenamefont {Turner}, \citenamefont {Barry}, \citenamefont {Kehayias}, \citenamefont {Singh},\ and\ \citenamefont {Walsworth}}]{bauch_ultralong_2018} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Bauch}}, \bibinfo {author} {\bibfnamefont {C.~A.}\ \bibnamefont {Hart}}, \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont {Schloss}}, \bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont {Turner}}, \bibinfo {author} {\bibfnamefont {J.~F.}\ \bibnamefont {Barry}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Kehayias}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Singh}},\ and\ \bibinfo {author} {\bibfnamefont {R.~L.}\ \bibnamefont {Walsworth}},\ }\bibfield {title} {\bibinfo {title} {Ultralong {Dephasing} {Times} in {Solid}-{State} {Spin} {Ensembles} via {Quantum} {Control}},\ }\bibfield {journal} {\bibinfo {journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {8}},\ \href {https://doi.org/10.1103/PhysRevX.8.031025} {10.1103/PhysRevX.8.031025} (\bibinfo {year} {2018})\BibitemShut {NoStop} \bibitem [{\citenamefont {Wood}\ \emph {et~al.}(2021{\natexlab{b}})\citenamefont {Wood}, \citenamefont {Goldblatt}, \citenamefont {Anderson}, \citenamefont {Hollenberg}, \citenamefont {Scholten},\ and\ \citenamefont {Martin}}]{wood_anisotropic_2021} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~A.}\ \bibnamefont {Wood}}, \bibinfo {author} {\bibfnamefont {R.~M.}\ \bibnamefont {Goldblatt}}, \bibinfo {author} {\bibfnamefont {R.~P.}\ \bibnamefont {Anderson}}, \bibinfo {author} {\bibfnamefont {L.~C.~L.}\ \bibnamefont {Hollenberg}}, \bibinfo {author} {\bibfnamefont {R.~E.}\ \bibnamefont {Scholten}},\ and\ \bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont {Martin}},\ }\bibfield {title} {\bibinfo {title} {Anisotropic electron-nuclear interactions in a rotating quantum spin bath},\ }\href {https://doi.org/10.1103/PhysRevB.104.085419} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. B}\ }\textbf {\bibinfo {volume} {104}},\ \bibinfo {pages} {085419} (\bibinfo {year} {2021}{\natexlab{b}})}\BibitemShut {NoStop} \bibitem [{Note1()}]{Note1} \BibitemOpen \bibinfo {note} {See Supplementary Information.}\BibitemShut {Stop} \bibitem [{Note2()}]{Note2} \BibitemOpen \bibinfo {note} {See Supplementary information}\BibitemShut {NoStop} \bibitem [{\citenamefont {Davies}(1979)}]{davies_dynamic_1979} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Davies}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {Dynamic {Jahn}-{Teller} distortions at trigonal optical centres in diamond}},\ }\href {https://doi.org/10.1088/0022-3719/12/13/019} {\bibfield {journal} {\bibinfo {journal} {J. Phys. C: Solid State Phys.}\ }\textbf {\bibinfo {volume} {12}},\ \bibinfo {pages} {2551} (\bibinfo {year} {1979})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Davies}(1981)}]{davies_jahn-teller_1981} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Davies}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {The {Jahn}-{Teller} effect and vibronic coupling at deep levels in diamond}},\ }\href {https://doi.org/10.1088/0034-4885/44/7/003} {\bibfield {journal} {\bibinfo {journal} {Rep. Prog. Phys.}\ }\textbf {\bibinfo {volume} {44}},\ \bibinfo {pages} {787} (\bibinfo {year} {1981})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ammerlaan}\ and\ \citenamefont {Burgemeister}(1981)}]{ammerlaan_reorientation_1981} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.~A.~J.}\ \bibnamefont {Ammerlaan}}\ and\ \bibinfo {author} {\bibfnamefont {E.~A.}\ \bibnamefont {Burgemeister}},\ }\bibfield {title} {\bibinfo {title} {Reorientation of {Nitrogen} in {Type}-{$\mathrm{I}b$} {Diamond} by {Thermal} {Excitation} and {Tunneling}},\ }\href {https://doi.org/10.1103/PhysRevLett.47.954} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {47}},\ \bibinfo {pages} {954} (\bibinfo {year} {1981})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Larsen}\ and\ \citenamefont {Singel}(1993)}]{larsen_double_1993} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.~G.}\ \bibnamefont {Larsen}}\ and\ \bibinfo {author} {\bibfnamefont {D.~J.}\ \bibnamefont {Singel}},\ }\bibfield {title} {\bibinfo {title} {Double electron–electron resonance spin–echo modulation: {Spectroscopic} measurement of electron spin pair separations in orientationally disordered solids},\ }\href {https://doi.org/10.1063/1.464916} {\bibfield {journal} {\bibinfo {journal} {J. Chem. Phys.}\ }\textbf {\bibinfo {volume} {98}},\ \bibinfo {pages} {5134} (\bibinfo {year} {1993})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pagliero}\ \emph {et~al.}(2018)\citenamefont {Pagliero}, \citenamefont {Rao}, \citenamefont {Zangara}, \citenamefont {Dhomkar}, \citenamefont {Wong}, \citenamefont {Abril}, \citenamefont {Aslam}, \citenamefont {Parker}, \citenamefont {King}, \citenamefont {Avalos}, \citenamefont {Ajoy}, \citenamefont {Wrachtrup}, \citenamefont {Pines},\ and\ \citenamefont {Meriles}}]{pagliero_multispin-assisted_2018} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Pagliero}}, \bibinfo {author} {\bibfnamefont {K.~R.~K.}\ \bibnamefont {Rao}}, \bibinfo {author} {\bibfnamefont {P.~R.}\ \bibnamefont {Zangara}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Dhomkar}}, \bibinfo {author} {\bibfnamefont {H.~H.}\ \bibnamefont {Wong}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Abril}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Aslam}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Parker}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {King}}, \bibinfo {author} {\bibfnamefont {C.~E.}\ \bibnamefont {Avalos}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Ajoy}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Wrachtrup}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Pines}},\ and\ \bibinfo {author} {\bibfnamefont {C.~A.}\ \bibnamefont {Meriles}},\ }\bibfield {title} {\bibinfo {title} {Multispin-assisted optical pumping of bulk {$^{13}\mathrm{C}$} nuclear spin polarization in diamond},\ }\href {https://doi.org/10.1103/PhysRevB.97.024422} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. B}\ }\textbf {\bibinfo {volume} {97}},\ \bibinfo {pages} {024422} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{Note3()}]{Note3} \BibitemOpen \bibinfo {note} {See Supplemental information}\BibitemShut {NoStop} \bibitem [{\citenamefont {Childress}\ and\ \citenamefont {Hanson}(2013)}]{childress_diamond_2013} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Childress}}\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Hanson}},\ }\bibfield {title} {{\selectlanguage {en}\bibinfo {title} {Diamond {NV} centers for quantum computing and quantum networks}},\ }\href {https://doi.org/10.1557/mrs.2013.20} {\bibfield {journal} {\bibinfo {journal} {MRS Bulletin}\ }\textbf {\bibinfo {volume} {38}},\ \bibinfo {pages} {134} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \end{thebibliography} \end{document}
\begin{document} {\mathfrak{m}}aketitle \begin{abstract} Weyl modules were originally defined for affine Lie algebras by Chari and Pressley in \cite{CP}. In this paper we extend the notion of Weyl modules for a Lie algebra ${\mathfrak{g}} \otimes A$, where ${\mathfrak{g}}$ is any Kac-Moody algebra and A is any finitely generated commutative associative algebra with unit over ${{\mathfrak{m}}athbb C}$, and prove a tensor product decomposition theorem generalizing \cite{CP}. \end{abstract} \section{Introduction} Let ${\mathfrak{g}} $ be a Kac-Moody Lie algebra and let ${\mathfrak{h}}$ be a Cartan subalgebra of ${\mathfrak{g}}$. Set ${\mathfrak{g}}' = [{\mathfrak{g}},{\mathfrak{g}}]$ and ${\mathfrak{h}}' ={\mathfrak{g}} \cap {\mathfrak{h}}$. Let ${\mathfrak{h}}''$ be a vector subspace of ${\mathfrak{h}}$ such that ${\mathfrak{h}}' \oplus {\mathfrak{h}}''= {\mathfrak{h}}$. Let $A$ be a finitely generated commutative associative algebra with unit over ${{\mathfrak{m}}athbb C}$. Denote $\stackrel{\sim}{{\mathfrak{g}}} \,= {\mathfrak{g}}' \otimes A \oplus {\mathfrak{h}}''$ and let ${\mathfrak{g}} = N^{-} \oplus {\mathfrak{h}} \oplus N^{+}$ be a standard triangular decomposition into positive and negative root subspaces and a Cartan subalgebra. Let $\stackrel{\sim}{N}^- = N^{-} \otimes A, \, \stackrel{\sim}{N}^+ = {N}^+ \otimes A$ and $\stackrel{\sim}{{\mathfrak{h}}} = {\mathfrak{h}}' \otimes A\oplus {\mathfrak{h}}''$. Consider a linear map $\psi : \stackrel{\sim}{{\mathfrak{h}}} \rightarrow {\mathbb C}$. In \cite{CP} Chari and Pressley defined the Weyl modules for the loop algebras, which are nothing but the maximal integrable highest weight modules. Feigin and Loktev \cite{FL} generalized the notion of Weyl module by replacing Laurent polynomial ring by any commutative associative algebra with unit and generalized the tensor decomposition theorem of \cite{CP}. Chari and Thang \cite{CTH} studied Weyl modules for double affine Lie algebra. In \cite{CFK}, a functorial approach used to study Weyl modules associated with the Lie algebra $\fma \otimes A$, where $\fma$ is finite dimensional simple Lie algebra and A is a commutative algebra with unit over ${{\mathfrak{m}}athbb C}$. Using this approach they \cite{CFK} defined a Weyl functor from category commutative associative algebra modules to simple Lie algebra modules, and studied tensor product properties of this functor. Neher and Savage \cite{ENSA} using generalized evaluation representation discussed more general case by replacing finite dimensional simple algebra with an infinite dimensional Lie algebra. Let $\tau = \fma \otimes A_n \oplus {\cal O}mega_{A_n}/dA_n$ be a toroidal algebra, where $\fma$ is finite dimensional simple Lie algebra and $A_n = {{\mathfrak{m}}athbb C}[t_1^{\pm},\cdots, t_n^{\pm}]$ is a Laurent polynomial ring in commutating $n$ variables(see \cite{E}). It is proved in \cite{ESTL} that any irreducible module with finite dimensional weight spaces of $\tau$ is in fact a module for ${\mathfrak{g}} \otimes A_{n-1}$ where ${\mathfrak{g}}$ is affinization of $\fma$. Thus it is important to study ${\mathfrak{g}} \otimes A_{n-1}$-modules. Rao and Futorny \cite{EV} initiated the study of ${\mathfrak{g}} \otimes A_{n-1}$-modules in their recent work. In our paper we consider the ${\mathfrak{g}} \otimes A$-module, where ${\mathfrak{g}}$ is any Kac-Moody Lie algebra and $A$ is any finitely generated commutative associative algebra with unit over ${{\mathfrak{m}}athbb C}$. Our work is kind of generalization of the tensor product results in \cite{CFK, FL, CP}. For a cofinite ideal $I$ of $A$ we define a module $M(\psi,I)$, and a Weyl module $W(\psi, I)$ of $\stackrel{\sim}{{\mathfrak{g}}}$ (Section \ref{s2}). The main result of the paper is the tensor product decomposition of $W(\psi, I)$, where $I$ is a finite intesection of maximal ideals. The paper is organised as follows. We begin with preliminaries by stating some basic facts about Kac-Moody algebras and Weyl modules. In Section \ref{s1} we define the modules $M(\psi,I)$ over $\stackrel{\sim}{{\mathfrak{g}}}$ and show that they have finite dimensional weight spaces and prove tensor decomposition theorem for them. Section \ref{s2} is devoted to the tensor decomposition theorem for the Weyl module $W(\psi,I)$ over $\stackrel{\sim}{{\mathfrak{g}}}$. \section{ PRELIMINARIES} Let $\fma$ be a finite dimensional simple Lie algebra of rank $r$ with a Cartan subalgebra $\fmh$. Let $ {\overset{\circ}{\Delta}}$ denote a root system of $\fma$ with respect to $\fmh$. Let ${\overset{\circ}{\Delta^+}}$ and ${\overset{\circ}{\Delta^-}}$ be a sets of positive and negative roots of $\fma$ respectively. Denote by $\alpha_1, \cdots , \alpha_r$ and $\alpha_1^{\vee},\cdots,\alpha_r^{\vee}$ a sets of simple roots and simple coroots of $\fma$. Let $\fma = {\mathfrak{n}}p \oplus \fmh \oplus {\mathfrak{n}}m$ be a triangular decomposition of $\fma$. Let $e_i$ and $f_i$ be the Chevalley generators of $\fma$. Let $\overset{\circ}{Q} = \oplus \, {{\mathfrak{m}}athbb Z} \, \alpha_i$ and ${\overset{\circ}{P}} = \{\lambda \in \fmh^{\ast} : \lambda (\alpha_{i}^{\vee})\in {{\mathfrak{m}}athbb Z} \}$ be the root and weight lattice of $\fma$ respectively. Set ${\overset{\circ}{P_+}} = \{\lambda \in \fmh^{\ast} : \lambda(\alpha_{i}^{\vee})\geq 0 \}$, the set of dominant integral weights of $\fma$. Recall that a $\fma$-module $V$ is said to be integrable if it is $\fmh$-diagonalisable and all the Chevalley generators $e_i$ and $f_i$, $1 \leq i \leq r$, act locally nilpotently on $V$. For commutative associative algebra with unit $A$, consider the Lie algebra algebra $\fma \otimes A$. We recall the definition of local Weyl module for $\fma \otimes A$ \cite{FL,CFK}. \begin{dfn}\rm Let $\psi : \fmh \otimes A \rightarrow {\mathbb C}$ be a linear map such that $\psi {\mathfrak{m}}id_{ \fmh} = \lambda ,$ $I$ a cofinite ideal of $A$. Then $W(\psi , I)$ is called a local Weyl module for $\fma \otimes A$ if there exists a nonzero $v \in W(\psi, I)$ such that\\ $U(\fma \otimes A)v = W(\psi, I), ({\mathfrak{n}}p \otimes A)v = 0, (h \otimes 1)v = \lambda(h)v$\\ $\psi{\mathfrak{m}}id_{\fmh \otimes I} = 0,{ (f_i \otimes 1) ^{\lambda(\alpha_{i}^{\vee})+1}}v = 0$, for $i = 1, \cdots, r .$ \end{dfn} It is shown in \cite{FL}(Proposition 4) that the local Weyl modules exists and can be obtained as quotient of global Weyl module. Let $\stackrel{\sim}{{\mathfrak{g}}}\, = \,{\mathfrak{g}}' \otimes A \oplus {\mathfrak{h}}'' $ is a Lie algebra with the following bracket operations: \begin{align*} [X \otimes a, Y\otimes b] &= [X,Y] \otimes ab ,\\ [h, X \otimes a] &= [h,X] \otimes a ,\\ [h,h'] &= 0 , \end{align*} where $X, Y \in {\mathfrak{g}}'$, $h, h' \in {\mathfrak{h}}''$ and $a, b \in A$. Let $\stackrel{\sim}{{\mathfrak{h}}} := {\mathfrak{h}}' \otimes A \oplus {\mathfrak{h}}''$ and $\stackrel{\sim}{{\mathfrak{g}}}\, = \,\stackrel{\sim}{N}^+ \oplus \stackrel{\sim}{{\mathfrak{h}}} \oplus \stackrel{\sim}{N}^-$ be a triangular decomposition of $\stackrel{\sim}{{\mathfrak{g}}}$, where $\stackrel{\sim}{N}^+ = {N}^+ \otimes A$ and $\stackrel{\sim}{N}^- = {N}^- \otimes A$. Let $\psi : \stackrel{\sim}{{\mathfrak{h}}} \rightarrow {{\mathfrak{m}}athbb C}$ be a linear map. \begin{dfn}\rm A module $V$ of $\stackrel{\sim}{{\mathfrak{g}}}$ is called highest weight module (of highest weight $\psi$) if $V$ is generated by a highest weight vector ${v}$ such that {\mathfrak{n}}oindent (1) $\stackrel{\sim}{N}^+ v=0.$\\ (2) $h \ {v} =\psi (h) {v}$ for $h \in \ \stackrel{\sim}{{\mathfrak{h}}}, \psi \in \stackrel{\sim}{{\mathfrak{h}}}^{\ast}$. \end{dfn} Let ${{\mathfrak{m}}athbb C}$ be the one dimensional representation of $\stackrel{\sim}{N}^+ \oplus \stackrel{\sim}{{\mathfrak{h}}}$ where $\stackrel{\sim}{N}^+ $ acts trivially and $\stackrel{\sim}{{\mathfrak{h}}}$ acts via $h.1 = \psi(h) 1$ for $\forall h \in \stackrel{\sim}{{\mathfrak{h}}}$. Define the induced module $$\displaystyle{ M(\psi) = U(\stackrel{\sim}{{\mathfrak{g}}}) \displaystyle{\bigotimes_{U(\stackrel{\sim}{N}^+ \oplus \stackrel{\sim}{{\mathfrak{h}}})}} {{\mathfrak{m}}athbb C}} \,.$$ Then $M(\psi)$ is highest weight module and has a unique irreducible quotient denoted by $V(\psi)$. \section{The modules $M(\psi, I)$ and its tensor decomposition} \label{s1} Let $\alpha_1, \ldots, \alpha_l$ be a set of simple roots of ${\mathfrak{g}}$ and $\Delta^+$ a set of corresponding positive roots. Let $Q = \displaystyle{\bigoplus_{i= 1}^{l}}{{{\mathfrak{m}}athbb Z} \alpha_i}$ be root lattice of ${\mathfrak{g}}$ and $Q_{+} = \displaystyle{\bigoplus_{i= 1}^{l}}{{{\mathfrak{m}}athbb Z}_{\geq 0} \alpha_i}.$ Let $\lambda \in {\mathfrak{h}}^{\ast} $ be a dominant integral weight of ${\mathfrak{g}}$. Consider $\alpha \in \Delta^+$ and assume $\alpha =\sum n_i \alpha_i$. Define an usual ordering on $\Delta^+$ by $\alpha \leq \beta$ for $\alpha, \beta \in \Delta^+$ if $\beta - \alpha \in Q_{+}$. Let $I$ be a cofinite ideal of $A$. Let $\{ I_\alpha, \alpha \in \Delta^+\}$ be a sequence of cofinite ideals of $A$ such that $I_{\alpha} \subseteq I$ and \\ (1) $\alpha \leq \beta {\mathbb R}ightarrow I_\beta \subseteq I_\alpha$.\\ (2) $I_\alpha I_\beta \subseteq I_{\alpha+\beta} $ if $\alpha +\beta \in \Delta^+$. For $\beta \in \Delta^+$ let $X_{-\beta}$ be a root vector corresponding to the root $-\beta$. For a cofinite ideal $I$ of $A$ set $X_{-\beta} I = X_{-\beta} \otimes I$. Let $\psi :\stackrel{\sim}{{\mathfrak{h}}} \rightarrow {\mathbb C}$ be a linear map such that $\psi{\mathfrak{m}}id_{{\mathfrak{h}}' \otimes I} =0 $, $\psi{\mathfrak{m}}id_{{\mathfrak{h}}}=\lambda \in {\mathfrak{h}}^*$ and $\lambda$ is dominant integral. \begin{dfn}\rm \label{df1} We will denote by $M(\psi, \{I_\alpha, \alpha \in \Delta^+\})$ the highest weight $\stackrel{\sim}{{\mathfrak{g}}}$-module with highest weight $\psi$ and highest weight vector $v$ such that $(X_{-\beta} I_\beta)v=0$ for all $\beta \in \Delta^+$. \end{dfn} We will show now the existence of modules $M(\psi, \{I_\alpha, \alpha \in \Delta^+\})$. Let $M(\psi)$ be the Verma module with a highest weight $\psi$ and a highest weight vector $v$. We will prove that the module generated by $X_{-\alpha}I_{\alpha}v$ is a proper submodule of $M(\psi)$ for all $\alpha \in \Delta_{+}$. We use induction on the height of $\alpha$. First recall that there is a cofinite ideal $I$ such that $\psi {\mathfrak{m}}id_{{\mathfrak{h}}^{\prime} \otimes I} = 0$ and by definition $I_{\alpha} \subseteq I$ and $I_{\alpha} \subseteq I_{\beta}$ if $\beta \leq \alpha$. Let us consider $X_{-\alpha_i} I_{\alpha_{i}}v$ for a simple root $\alpha_i$. We will prove that $X_{-\alpha_i} a v$ generates a proper submodule of $M(\psi)$ where $a \in I_{\alpha_{i}}$. Indeed we have $X_{\alpha} b X_{-\alpha_i} a v = 0$ for any simple root $\alpha {\mathfrak{n}}eq \alpha_i$ and $b \in A$. Let $N_i$ be the ${\mathfrak{g}}$-submodule generated by $X_{-\alpha_i}I_{\alpha_i}v$. Then $X_{\alpha_i}b X_{-\alpha_i} a v = h_{i} ba v = 0$ as $I_{\alpha_{i}} \subseteq I$ and $\psi {\mathfrak{m}}id_{{\mathfrak{h}}^{\prime} \otimes I} = 0$. So $M(\psi)/N_i {\mathfrak{n}}eq 0$ and the induction starts. Let $\beta \in \Delta_+$ and ${\mathfrak{m}}athrm{ht}(\beta) = n$. Let $N $ be the submodule generated by $\sum_{{\mathfrak{n}}u \in \Delta_+}{X_{-{\mathfrak{n}}u}I_{{\mathfrak{n}}u}}v$ where ${\mathfrak{m}}athrm{ht}{{\mathfrak{n}}u} < n$. Then by induction, $N$ is a proper submodule of $M(\psi)$. Now consider $X_{\alpha_{i}}b X_{-\beta} a v$ where $\alpha_i$ is a simple root, $b \in A$ and $a \in I_{\beta}$. But $X_{\alpha_{i}}b X_{-\beta} a v = X_{-\beta + \alpha_i} ba v$. Since ${\mathfrak{m}}athrm{ht}(\beta - \alpha_i) < n$ and $I_{\beta}$ is an ideal of $A$, we have $ba \in I_{\beta} \subseteq I_{\beta - \alpha_i}$. Hence, we see that $X_{-\beta + \alpha_i} ba v \in N$. Therefore, $ X_{-\beta} a v$ is a highest vector of $M(\psi)/N$, and hence generates a proper submodule. \begin{lmma} \label{l1} Let $\gamma_1, \ldots, \gamma_n, \beta \in \Delta^+$. Then $ B= X_{-\beta} I^{n+1}_\beta X_{-\gamma_1} a_1\ldots X_{-\gamma_n} a_n v=0$, for $ a_1, \ldots a_n \in A$ and each $\gamma_i \leq \beta$. \end{lmma} \begin{proof} We prove the statement by induction on $n$. For $ n=0$ the lemma follows from the definition of the module. We have $$B=X_{-\gamma_1} a_1 X_{-\beta} I^{n+1}_\beta X_{-\gamma_2} a_2 \ldots X_{-\gamma_n} a_n {v} + [X_{-\beta}, X_{-\gamma_1}] I^{n+1}_\beta X_{-\gamma_2}a_2 \ldots X_{-\gamma_n} a_n {v}\,.$$ The first term is zero by induction on $n$. Repeating the same argument $n$ times for the second term we end up with: \\ $B= [\ldots [X_{-\beta}, X_{-\gamma_{1}}], X_{-\gamma_2}], \ldots, X_{-\gamma_n}]I_\beta^{n+1} {v}$ . Assume $$ [\ldots [X_{-\beta}, X_{-\gamma_{1}}], X_{-\gamma_2}], \ldots, X_{-\gamma_n}]{\mathfrak{n}}eq 0.$$ Then it is a nonzero multiple of $ X_{-(\beta +\sum\gamma_i})$ and $ \beta +\sum\gamma_i$ is a root. As each $\gamma_i \leq \beta$ we have $I_\beta \subseteq I_{\gamma_i}$. Thus $$ I^{n+1}_\beta \subseteq I_\beta I_{\gamma_1} I_{\gamma_2}\ldots I_{\gamma_n} \subseteq I_{\beta +\sum \gamma_i}. $$ Since $$ X_{-(\beta +\sum\gamma_i}) I_{\beta +\sum \gamma_i} v=0, $$ it completes the proof of the lemma. \end{proof} \begin{ppsn} $M(\psi, \{I_\alpha, \alpha \in \Delta^+\})$ has finite dimensional weight spaces with respect to ${\mathfrak{h}}$. \end{ppsn} \begin{proof} Follows from Lemma \ref{l1} . \end{proof} We now construct a special sequence of cofinite ideals $I_\alpha, \alpha \in \Delta^+$. Let $I$ be any cofinite ideal of $A$. Let $\psi: \stackrel{\sim}{{\mathfrak{h}}} \, \rightarrow {\mathbb C}$ be a linear map such that $\psi {\mathfrak{m}}id_{{\mathfrak{h}}' \otimes I=0}$ and $\psi {\mathfrak{m}}id_{{\mathfrak{h}}} =\gamma$ a dominant integral weight. Let recall that for $\alpha \in \Delta_{+}$ with $\alpha = \sum_{i = 1}^{l} {m_i \alpha_i}$, define $N_{\lambda, \alpha} = \sum_{i = 1}^{l}{m_i \lambda(\alpha_{i}^{\vee})}.$ Let $I_\alpha =I^{N_{\lambda, \alpha}}$. Now if $\alpha\leq\beta$ then it implies that , $N_{\lambda, \alpha} \leq N_{\lambda, \beta}$ and hence $I_\beta \subseteq I_\alpha$. Suppose $\alpha, \beta \in \Delta^+$ such that $\alpha +\beta \in \Delta^+$. Then clearly $I_\alpha I_\beta =I_{\alpha+\beta}$. For this special sequence of ideals $I_{\alpha}$, define $M(\psi, I):= M(\psi, \{I_\alpha, \alpha \in \Delta^+\})$. Let $I$ and $J$ be coprime cofinite ideals of $A$. Consider linear maps $\psi_1, \psi_2:\stackrel{\sim}{{\mathfrak{h}}} {\mathfrak{m}}apsto {\mathbb C}$ such that $\psi_1 {\mathfrak{m}}id_{{\mathfrak{h}}'\otimes I} =0, \ \psi_2{\mathfrak{m}}id_{{\mathfrak{h}}'\otimes J} = 0$, $\psi {\mathfrak{m}}id_{ {\mathfrak{h}}} =\lambda$ and $\psi_2 {\mathfrak{m}}id_{{\mathfrak{h}}}={\mathfrak{m}}u$. Further assume that $\lambda$ and ${\mathfrak{m}}u$ are dominant integral weights. Let $M(\psi_1, I)$ and $M(\psi_2, J)$ be the corresponding highest weight modules. Now define the following new sequence of cofinite ideals of $A$. Let $K_\alpha =I^{N_{\lambda, \alpha}} \cap J^{N_{{\mathfrak{m}}u, \alpha}} \subseteq I \cap J$.\\ It is easy to check that:\\ (1) If $\alpha\leq \beta, \ \alpha, \beta \in \Delta^+$ then $K_\beta \subseteq K_\alpha$.\\ (2) $K_\alpha \ K_\beta \subseteq K_{\alpha+\beta}$ if $\alpha, \beta, \alpha +\beta \in \Delta^+$. Let $\psi =\psi_1 +\psi_2$, so that $\psi{\mathfrak{m}}id_{{\mathfrak{h}}' \otimes (I \cap J)} =0, \ \psi{\mathfrak{m}}id_{{\mathfrak{h}}} =\lambda +{\mathfrak{m}}u$. Then we have \begin{thm} \label{T1} As a $\widetilde{{\mathfrak{g}}}$-module $$ M(\psi_1 +\psi_2, \{K_\alpha, \alpha \in \Delta^+\}) \cong M(\psi, I) \otimes M(\psi_2, J) . $$ \end{thm} The following is standard but we include the proof for convenience of the reader. \begin{lmma} \label{l2} Let $I$ and $J$ be the coprime cofinite ideals of $A$. Then\\ a) $A=I^n +J^m$, for all $n,m \in {{\mathfrak{m}}athbb Z}_{\geq 1}$.\\ b) $A/(I^n \cap J^m) \cong$ $A/I^{n} \oplus A/J^{m} .$ \end{lmma} \begin{proof} $a)$ As $I$ and $J$ are coprime there exist $f \in I$ and $g \in J$ such that $f + g = 1$. Considering the expression $(f+g)^{m+n+1} = 1$ , we see that the left hand side is the sum of two elements of $I^{m}$ and $I^{n}$. $b)$ is clear from $a)$ and the Chinese reminder theorem. \end{proof} Assume $$ \begin{array}{lll} \dim A/I^{N_{\lambda,\alpha}} &=& m_\alpha\\ \dim A/J^{N_{{\mathfrak{m}}u,\alpha}} &=&n_\alpha\\ \dim A/I^{N_{\lambda,\alpha}} \cap J^{N_{{\mathfrak{m}}u,\alpha}} &=&k_\alpha \end{array} $$ then $$ m_\alpha +n_\alpha =k_\alpha $$ by the above lemma. {\mathfrak{n}}oindent {\bf Proof of Theorem \ref{T1}.} Let $a_{1,\alpha}, \ldots, a_{m_\alpha,\alpha}$ be a ${\mathbb C}$-basis of $A/I^{N_{\lambda,\alpha}}$. Let $a^1_\alpha, a^2_{\alpha}, \ldots,$ be a ${\mathbb C}$-basis of $I^{N_{\lambda,\alpha}}$. Then clearly $\stackrel{\sim}{N}^{-}$ has the following ${\mathbb C}$-basis: $$ \begin{array}{l} \{X_{-\alpha} a_{i,\alpha}, 1 \leq i \leq m_\alpha, \alpha \in \Delta^+\} \cup\\ \{ X_{-\alpha} a^i_\alpha, i \in {{\mathfrak{m}}athbb N}, \ \alpha \in \Delta^+\}. \end{array} $$ Let $U_\lambda, \ U^\lambda$ be the subspaces of $U(\stackrel{\sim}{N}^-)$ spanned by the ordered products of the first set and the second set respectively. Then by the PBW theorem we have $ U(\stackrel{\sim}{N}^-) = U_\lambda U^\lambda. $ Let $$ \begin{array}{lll} M &= &M(\psi_1 +\psi_2, \ \{K_\alpha, \alpha \in \Delta^+\}),\\ M_1 &=& M(\psi_1, I),\\ M_2&=& M(\psi_2, J).\\ \end{array} $$ It is easy to see that $M_1=U_\lambda U^\lambda v =U_\lambda v $ as $U^\lambda v = {{\mathfrak{m}}athbb C} \,v$. Since $M_1$ has finite dimensional weight subspaces we can define the character of $M_1$ as follows: $$ {\mathfrak{m}}athrm{Ch}\,M_1 =\sum_{\eta\in Q^+} \dim M_{1, \ \lambda-\eta}\, e^{-(\lambda-\eta)}. $$ Let $l_\alpha$ denote the multiplicity of the root $\alpha$.\\ It is standard that \\ $\dim M_{1,\lambda-n} =K^1_\eta$, where $K^1_\eta$ is given by $$ \prod_{\alpha \in \Delta^+}(1-e^{-\alpha)^{-m_\alpha l_\alpha}} =\sum_{\eta \in Q^+} K^1_\eta e^{-\eta}. $$ Also we have that $\dim M_{2,{\mathfrak{m}}u-\eta} =K^2_\eta$, where $$ \prod_{\alpha\in \Delta^+} (1-e^{-\alpha})^{-n_\alpha l_\alpha} =\sum_{\eta\in Q^+} K^2_\eta e^{-\eta}, $$ and $\dim M_{\lambda +{\mathfrak{m}}u-\eta} =K_\eta$, where $K_\eta$ is given by $$ \prod_{\alpha \in \Delta^+}(1-e^{-\alpha})^{-k_\alpha \ l_\alpha} =\sum_{\eta\in Q^+} K_\eta e^{-\eta} $$ (recall that $k_\alpha =m_\alpha +n_\alpha$). From the above calculations we see that $$ \dim \ M_{\lambda+{\mathfrak{m}}u-\eta} =\dim(M_1\otimes M_2)_{\lambda +{\mathfrak{m}}u-\eta}. $$ Thus to prove the theorem it is sufficient to show that there is a surjective $\widetilde{{\mathfrak{g}}}$-homomorphism from $M$ to $M_1 \otimes M_2$. Let $v_1$ and $v_2$ be the highest weight vectors of $M_1$ and $M_2$ respectively. Let $U$ be $\widetilde{{\mathfrak{g}}}$ submodule of $M_1 \otimes M_2$ generated by $v_1\otimes v_2$. It is easy to check that $(\psi_1 +\psi_2) (h' \otimes (I \cap J))=0$. Recall that $K_\alpha =I^{N_{\lambda,\alpha}} \cap J^{N_{{\mathfrak{m}}u,\alpha}}$. We have $X_{-\alpha} K_\alpha (v_1\otimes v_2)=0$ which immediately implies that $U$ is a quotient of $M$. Hence to complete the proof of the theorem it is sufficient to prove that $ U \simeq M_1 \otimes M_2$. Clearly, $M_1 \otimes M_2$ is linear span of vectors of the form $w_1 \otimes w_2$ where $$ \begin{array}{l} w_1 = X_{-\lambda_1} a_1\ldots X_{-\lambda_n} a_n v,\\ w_2 =X_{-\beta_1} b_1\ldots X_{-\beta_m}b_m v_2 \,. \end{array} $$ Let $\beta\in \Delta^+$. By the definition of $I_{\alpha}$ and the argument given in the proof of Lemma \ref{l1} it is easy to see that there exists $N >>0$ such that $$ X_{-\beta} I^N w_1=0, \ X_{-\beta} J^N w_2 =0 .\leqno{(a)} $$ Now recall that $A=I^N +J^N$. Let $1= f+g, \ f\in I^N, \ g\in J^N$. For any $h \in A$ write $ h= fh +gh$. We will use induction on $m+n$. Consider $$ X_{-\beta} fh (w_1 \otimes w_2) =w_1 \otimes X_{-\beta} fh \ w_2 \ (by \ (a)) $$ $$ \begin{array}{l} =w_1 \otimes X_{-\beta} (h-gh)w_2\\ =w_1 \otimes X_{-\beta} h \ w_2 \ by \ (by \ (a)). \end{array} $$ As $X_{-\beta} \ fh(w_1\otimes w_2) \in U$ (by induction $w_1 \otimes w_2 \in U$), we conclude that $w_1\otimes X_{-\beta} h \ w_2 \in U$. Similarly we have $X_{-\beta} hw_1\otimes \ w_2 \in U$. It easily follows now that $U\simeq M_1\otimes M_2$. This completes the proof of the theorem. \section{Weyl modules for loop Kac-Moody algebras and its tensor decomposition}\label{s2} In this section we define maximal integrable highest weight modules for $\widetilde{{\mathfrak{g}}}$ and prove a tensor product theorem for them. Recall that $M(\psi, I)$ is a highest weight module with a highest weight $\psi$ and a highest weight vector $v$. Further $\psi{\mathfrak{m}}id_{{\mathfrak{h}}' \otimes I}=0$ and $\psi{\mathfrak{m}}id_{{\mathfrak{h}}} =\lambda$ a dominant integral weight. Let $\alpha_1,\ldots, \alpha_l$ be simple roots and $\alpha^{\vee}_1, \ldots \alpha^{\vee}_l$ be the simple coroots. Let $\{ X_{\alpha_i}, \alpha^{\vee}_{i}, X_{-\alpha_i}\}$ be an ${\mathfrak{m}}athfrak{sl}_2$ copy corresponding to the simple root $\alpha_i$. \begin{dfn}\rm \label{dw} Let $W$ be a highest weight $\widetilde{{\mathfrak{g}}}$-module with a highest weight $\psi$ and a highest weight vector $v$ such that \\ (1) $\psi{\mathfrak{m}}id_{{\mathfrak{h}}' \otimes I}=0$,\\ (2) $\psi{\mathfrak{m}}id_{{\mathfrak{h}}} =\lambda$, \\ (3) $X_{-\alpha_i}^{\lambda(\alpha_i^{\vee})+1} v=0$ for $i=1,2, \ldots, l$. \end{dfn} It follows immediately that $W$ is an integrable ${\mathfrak{g}}$-module (see \cite{K}). We will prove below that such module exists and has finite dimensional weight spaces. By the result of \cite{FL} (see the proof of Proposition 6 and 16 of \cite{FL}) it follows that $$ X_{-\alpha_i} I^{\lambda(\alpha_i^{\vee})} \, v=0. $$ Let $\alpha \in \Delta^+$ and $X_{-\alpha} =[X_{-\alpha_{i_1}}, [\ldots [X_{-\alpha_{i_{n-1}}}, X_{-\alpha_{i_n}}]]],$ where $\sum \alpha_{i_j} =\alpha$. It is easy to check that $X_{-\alpha} I^{N_{\lambda,\alpha}} v=0$. Recall that $N_{\lambda, \alpha} =\sum n_i \lambda (\alpha_i^{\vee})$ if $\alpha =\sum n_i \alpha_i$. Thus $W$ is an integrable quotient of $M(\psi, I)$. Denote by $W(\psi, I)$ the maximal such quotient of $M(\psi, I)$ in the sense that any integrable quotient of $M(\psi, I)$ is a quotient of $W(\psi, I)$. In particular, $W(\psi, I)$ has finite dimensional weight spaces. We will prove at the end that for a cofinite ideal $I$ which is finite intersection of maximal ideals, $W(\psi, I)$ is non-zero by explicitly constructing its irreducible quotient. We will call $W(\psi, I)$ the \emph{Weyl module} associated with $\psi$ and $I$. Let $I$ and $J$ be coprime finite ideals of $A$. \begin{thm} \label{tw} $W(\psi_1 +\psi_2, \ I \cap J) \cong W(\psi, I) \otimes W(\psi, J)$ as $\widetilde{{\mathfrak{g}}}$-modules. \end{thm} To prove above theorem we need the following lemma. \begin{lmma} $W(\psi_1, I) \otimes W(\psi_2, J)$ is a quotient of $W(\psi_1 +\psi_2, \ I \cap J)$. \end{lmma} \begin{proof} Let $v_1, v_2$ be highest weight vectors of $W(\psi_1, I)$ and $W(\psi_2, J)$ respectively. As in the earlier argument we can prove that $W(\psi_1, I) \otimes W(\psi_2, J)$ is a cyclic module generated by $v_1 \otimes v_2$. Recall that $W(\psi_1 +\psi_2, \ I \cap J)$ is a maximal integrable quotient of $M(\psi_1 + \psi_2, I \cap J)$. But $W(\psi_1, I) \otimes W(\psi_2, J)$ is a integrable quotient of $M(\psi_1+ \psi_2, I \cap J)$. Hence $W(\psi_1, I) \otimes W( \psi_2, J)$ is a quotient of $W(\psi_1 + \psi_2, I \cap J)$. \end{proof} \emph{Proof of the Theorem \ref{tw}}: In view of the above lemma, it is sufficient to prove that $W(\psi_1 + \psi_2, I \cap J)$ is a quotient of $W(\psi_1, I) \otimes W(\psi_2, J)$. Let $K_i$ be the kernel of the map $M(\psi_i, I) \to W(\psi_i, I)$, $i=1, 2$. Then it is a standard fact that $\tilde{K} = K_1 \otimes M(\psi_2, J) + M(\psi_1, I) \otimes K_2$ is the kernel of the map $$M(\psi_1, I) \otimes M (\psi_2, J) \to W(\psi_1, I) \otimes W(\psi_2, J).$$ Let $V$ be any integrable quotient of $M(\psi_1, I) \otimes M(\psi_2, J)$ and $K$ be the kernel of the map $M(\psi_1, I) \otimes M(\psi_2, J) \to V$.\\ {\bf{Claim}} : $\tilde{K} \ \subseteq K$. This claim proves that $V$ is a quotient of $W( \psi_1, I) \otimes W(\psi_2, J)$. In particular, $W(\psi_1 + \psi_2, I \cap J)$ is a quotient of $ W(\psi_1, I) \otimes W(\psi_2, J)$ which completes the proof of the theorem.\\ {\bf{Proof of the claim}} : Since $V$ is ${\mathfrak{g}}$-integrable, it follows that the set of weights of $V$ is $W$- invariant and it is contained in $\lambda + {\mathfrak{m}}u - Q^+$. Here $Q^+$ is a monoid generated by simple roots, $W$ is the Weyl group corresponding to ${\mathfrak{g}}$. Let $m = (\lambda + {\mathfrak{m}}u )(\alpha_i^{\vee}) + 1$. Then $(X_{-\alpha_i} f)^m (v_1 \otimes v_2) = 0$ in $V$, $\forall \, f \in A$. Indeed, if this element is not zero then $\lambda + {\mathfrak{m}}u - m\alpha_i$ is a weight of $V$ implying that $\lambda + {\mathfrak{m}}u + \alpha_i$ is also a weight by the $W$-invariance property of weights. But this is a contradiction. Let now $N = {\mathfrak{m}}athrm{max}\{(\lambda + {\mathfrak{m}}u)(\alpha_i^{\vee})+1 : 1 \leq i \leq l\}$. As $I^{N} + J^{N} = A$ by Lemma \ref{l2}, choose $f \in I^{N}$ and $g \in J^{N} $ such that $f + g = 1$. Consider \begin{align*} B &= (X_{- \alpha_i} f)^N (v_1 \otimes v_2)\ \\ & ={\displaystyle{\sum_{k_1 + k_2 = N}}} C_i(X_{-\alpha_i} f)^{k_1} v_1 \otimes (X_{-\alpha_i}f)^{k_2} v_2\in K, \end{align*} with some constants $C_i$. Since $(X_{-\alpha_i} f) v_1 = 0$, it follows that $B = v_1 \otimes (X_{-\alpha_i} f)^N v_2 \in K$ and $B = v_1 \otimes (X_{-\alpha_i} (1 - g))^N v_2 \in K.$ But $(X_{-\alpha_i} g) v_2 = 0$. Hence $v_1 \otimes X^N_{-\alpha_i} v_2 \in K \,.$ Let $n_0$ be the least positive integer such that $v_1 \otimes X_{-\alpha_i}^{n_0} v_2 \in K$. But then $$X_{\alpha_i}(v_1 \otimes X_{-\alpha_i}^{n_0} v_2 ) = v_1 \otimes X_{\alpha_i}X_{-\alpha_i}^{n_0} v_2 = (n_0(\gamma_i - n_0 + 1)) v_1 \otimes X_{-\alpha_i}^{n_0-1} v_2 \in K,$$ where $\gamma_i = {\mathfrak{m}}u (\alpha_i^{\vee})$. By the minimality of $n_0$ it follows that $\gamma_i + 1= n_0$. Thus we have proved that $v_1 \otimes X_{-\alpha_i}^{{\mathfrak{m}}u(\alpha_i^{\vee})+1} v_2 \in K , \,\,\forall \,\,i \,.$ Similarly we can prove that $X_{-\alpha_i}^{\lambda(\alpha_i^{\vee})+1} v_1 \otimes v_2 \in K.$ Now by earlier argument we can conclude that $\tilde{K} \subseteq K$. This completes the proof of the claim. \begin{crlre} \label{max} Let $I$ be a cofinite ideal of $A$ such that $I = {\displaystyle{\bigcap_{i=1}^k}} {\mathfrak{m}}_i$, where ${\mathfrak{m}}_i$'s for $1 \leq i \leq k$ are distinct maximal ideals of $A$. Let $\psi_1, \cdots \psi_k$ be linear maps from $\widetilde{{\mathfrak{h}}} \to {\mathbb C}$ such that $\psi_i{\mathfrak{m}}id_{{\mathfrak{h}}' \otimes {\mathfrak{m}}_i} = 0$ and $\psi_i{\mathfrak{m}}id_{{\mathfrak{h}}} = \lambda_i$ a dominant integral weight. Put $\psi = {\displaystyle{\sum_{k}}}\psi_i$ and $\lambda = \sum \lambda_i$. Then $$W(\psi, I) \cong {\displaystyle{\bigotimes_{i=1}^k}}W(\psi_i, {\mathfrak{m}}_i).$$ \end{crlre} {\bf{Remark}}: See \cite{CFK}, for the similar tensor decomposition theorem of the local Weyl module for $\fma \otimes A$, where A is any commutative associative algebra with unit. {\bf{Remark}}: Module $W(\psi, I)$ is $\widehat{{\mathfrak{g}}}$-integrable. In fact, $(X^{\lambda(\alpha_i^{\vee})+1}_{-\alpha_i} \otimes f) v = 0$ for $\alpha_i$ simple and $f \in A$. Indeed, suppose it is non-zero. Then $\lambda - (\lambda (\alpha_i^{\vee}) + 1)\alpha_i$ is a weight of $W(\psi, I)$. Since the weight are $W$-invariant it follows that $\lambda +\alpha_i$ is a weight which is impossible. We now construct irreducible quotients of Weyl modules hence proving their existence. Let $I$ be a cofinite ideal of $A$ such that $\displaystyle{I = \bigcap_{i = 1}^{p}{{\mathfrak{m}}_{i}}}$, where ${\mathfrak{m}}_i$ are distinct maximal ideals of $A$. Now as $A$ is finitely generated over ${{\mathfrak{m}}athbb C}$, $A/{\mathfrak{m}}_i \cong {{\mathfrak{m}}athbb C}$ for $1 \leq i \leq p$. So by Chinese reminder theorem, there is a surjective homomorphism from $A$ to $\displaystyle{\bigoplus_{i=1}^{p}{A/{\mathfrak{m}}_i}}$. Hence we have have a surjective homomorphism $\Phi : {\mathfrak{g}}' \otimes A \rightarrow \displaystyle{\bigoplus_{i = 1}^{p}{{\mathfrak{g}}' \otimes A/{\mathfrak{m}}_i}} \cong {\mathfrak{g}}'_p = {\mathfrak{g}}' \oplus \cdots \oplus {\mathfrak{g}}'$(p-times) by $\Phi(x \otimes a) = (a_1 x,a_2 x, \cdots,a_p x)$, where $(a_1,a_2, \cdots, a_p) \in {{\mathfrak{m}}athbb C}^{p}$ is a image of $a$ from the map $A \rightarrow \bigoplus_{i=1}^{p}{A/{\mathfrak{m}}_i} \rightarrow {{\mathfrak{m}}athbb C}^{p}$. Now for $1 \leq i \leq p$, let $\psi_i$ be a linear map from $\widetilde{{\mathfrak{h}}}$ to ${{\mathfrak{m}}athbb C}$ such that $\psi_i {\mathfrak{m}}id {\mathfrak{h}} = \lambda_i$ where $\lambda_i \in P^{+}$. Then as ${\displaystyle{\bigotimes_{i=1}^p}} V(\lambda_i)$ is an irreducible integrable module for ${\mathfrak{g}}_p'$, it is also irreducible ${\mathfrak{g}}' \otimes A$-module via $\Phi$ and so for $\stackrel{\sim}{{\mathfrak{g}}}$(${\mathfrak{h}}''$ acts on tensor product via $\psi$) and the vector $v_{\lambda_1}\otimes \cdots \otimes v_{\lambda_p}$ and $\psi = \psi_1 + \cdots + \psi_p$ satisfy the the conditions of the definition \ref{dw} with $\displaystyle{I = \bigcap_{i = 1}^{p}{{\mathfrak{m}}_{i}}}$. {\bf {Open Problem}} : Compute the character of $W(\psi, I)$ which is $W$-invariant. By Corollary \ref{max} it is sufficient to compute the character of $W(\psi_i, {\mathfrak{m}}_i$), where ${\mathfrak{m}}_i$ is a maximal ideal. {\mathfrak{n}}ocite*{} School of mathematics, Tata Institute of Fundamental Research, Homi Bhabha Road, Mumbai 400005, India.\\ email: [email protected] Instituto de Mathem\'atica e Estat\'\i stica, Universidade de S\~ao Paulo, S\~ao Paulo, Brasil.\\ email: [email protected] School of mathematics, Tata Institute of Fundamental Research, Homi Bhabha Road, Mumbai 400005, India. email: [email protected] \end{document}
$\blacktriangleright$egin{document} \title{An odd categorification of $U_q(\sltwo)$} $\blacktriangleright$egin{abstract} We define a 2-category that categorifies the covering Kac-Moody algebra for $\mathfrak{sl}_2$ introduced by Clark and Wang. This categorification forms the structure of a super-2-category as formulated by Kang, Kashiwara, and Oh. The super-2-category structure introduces a ${\mathbbm Z}\times{\mathbbm Z}_{2}$-grading giving its Grothendieck group the structure of a free module over the group algebra of ${\mathbbm Z}\times{\mathbbm Z}_{2}$. By specializing the ${\mathbbm Z}_{2}$-action to $+1$ or to $-1$, the construction specializes to an ``odd'' categorification of $\mathfrak{sl}_2$ and to a supercategorification of $\mathfrak{osp}_{1|2}$, respectively. $\blacktriangleleft$nd{abstract} \setcounter{tocdepth}{1} \tableofcontents \section{Introduction} Categorical representation theory studies actions of Lie algebras and their associated quantum groups on categories, with generators acting by functors, and equations between elements lifting to isomorphisms of functors. Natural transformations between these functors exemplify the higher structure in 2-representation theory that cannot be accessed in traditional representation theory. Solving a conjecture of Igor Frenkel, the second author introduced a 2-category $U_qcev=U_qcev(\mathfrak{sl}_2)$ that categorifies the integral idempotented version $_{{\mathbbm A}c}U_qdot$ of the quantum enveloping algebra of $\mathfrak{sl}_2$~\cite{Lau1,Lau4}. This 2-category governs the higher structure in categorical actions of $U_qdot=U_qdot(\mathfrak{sl}_2)$. The categorification $U_qcev$ of $U_qdot$ is ubiquitous in 2-representation theory. In many of the known categorical representations of $U_qdot$ one can identify the higher structure needed to lift these actions to full 2-representations of the 2-category $U_qcev$. Examples of such 2-representations include modules over cyclotomic KLR-algebras~\cite{KK,Kash,CL,Web}, blocks of parabolic category $\mathcal{O}$~\cite{CR,FKS,Sussan,HS}, foam categories~\cite{Mac,MPT,LQR}, and derived categories of coherent sheaves on cotangent bundles to Grassmannians~\cite{CKL2,CKL3,CKL4,CL}. This 2-category also leads to a categorification of quantum $\mathfrak{sl}_2$ at prime roots of unity ~\cite{KQ,EQ} using the theory of Hopfological algebra introduced by Khovanov~\cite{KhHopf} and further developed by Qi in \cite{QYHopf}. The 2-category $U_qcev$ categorifying quantum $\mathfrak{sl}_2$ also plays a fundamental role in link homology theories. One of the original motivations for categorifying quantum groups was to provide a unified representation theoretic explanation of the link homology theories that categorify various quantum link invariants. Various steps in this direction have already been achieved~\cite{Web2,Cautis,LQR}. Khovanov homology is the simplest of these link homology theories, categorifying a certain normalization of the Jones polynomial~\cite{Kh1,Kh2}. Surrounding Khovanov homology is an intricate system of related combinatorial and geometric ideas. Everything from extended 2-dimensional TQFTs~\cite{Kh1,Kh2,LP3,Cap4,CMW}, planar algebras~\cite{BN1,BN2}, category $\mathcal{O}$~\cite{Strop1,Strop2,BrSt3,BFK}, coherent sheaves on quiver varieties~\cite{CK01}, matrix factorizations~\cite{KhR,KhR2}, homological mirror symmetry~\cite{SeSm,CK01}, arc algebras~\cite{Kh2,ChK,BrSt1,BrSt2,BrSt3,BrSt4}, Springer varieties~\cite{KhSp,Strop1,SW}, and 5-dimensional gauge theories~\cite{Witten,Witten2} appear in descriptions of Khovanov homology. The categorification $U_qcev(\mathfrak{sl}_2)$ of the idempotent version $U_qdot$ of the quantum enveloping algebra of $\mathfrak{sl}_2$ demonstrates commonalities between these approaches revealed in terms of higher representation theory. Given the many connections between Khovanov homology and the sophisticated structures described above, it is surprising to discover that there exists a distinct categorification of the Jones polynomial. Ozsv\'{a}th, Rasmussen, Szab\'{o} found an {$\blacktriangleleft$m odd} analogue of Khovanov homology~\cite{ORS}. This odd homology theory for links agrees with the original Khovanov homology when coefficients are taken modulo 2. Both of these theories categorify the Jones polynomial, and results of Shumakovitch~\cite{Shum} show that these categorified link invariants are not equivalent. Both can distinguish knots that are indistinguishable in the other theory. Motivated by the problem of defining odd categorified quantum groups to provide a higher representation theoretic explanation for odd Khovanov homology, the authors in collaboration with Mikhail Khovanov introduced an odd analogue of the nilHecke algebra. The nilHecke algebra plays a central role in the theory of categorified quantum groups, giving rise to an integral categorification of the negative half of $U_q(\mathfrak{sl}_2)$~\cite{Lau1,KL3,Rou2}. In the categorification $U_qcev$ of the entire quantum group $U_qdot$, the nilHecke algebra describes 2-endomorphisms of 1-morphisms $\mathcal{F}^n1bb_{\lambda}$, respectively $\mathcal{E}^n1bb_{\lambda}$. This algebra is also closely connected to the geometry of flag varieties and the combinatorics of symmetric functions. The purpose of this article is to extend the categorification of the negative half of $U_q(\mathfrak{sl}_2)$ via the odd nilHecke algebra to an odd categorification of all of $U_qdot(\mathfrak{sl}_2)$. \subsubsection*{Covering Kac-Moody algebras} The odd nilHecke algebra appears to have numerous connections to other areas of representation theory. It was independently introduced by Kang, Kashiwara and Tsuchioka~\cite{KKT} starting from the different perspective of trying to develop super analogues of KLR algebras. Their quiver Hecke superalgebras become isomorphic to affine Hecke-Clifford superalgebras or affine Sergeev superalgebras after a suitable completion, and the $\mathfrak{sl}_2$ case of their construction is isomorphic to the odd nilHecke algebra. Cyclotomic quotients of quiver Hecke superalgebras supercategorify certain irreducible representations of Kac-Moody algebras~\cite{KKO,KKO2}. A closely related spin Hecke algebra associated to the affine Hecke-Clifford superalgebra appeared in earlier work of Wang~\cite{Wang} and many of the essential features of the odd nilHecke algebra including skew-polynomials appears in this and related works~\cite{Wang2,KW1,KW2,KW4}. The odd nilHecke algebra has led to a number of surprising new structures including an odd analogue of the ring of symmetric functions~\cite{EK,EKL,EOddLR} and odd analogues of the cohomology groups of Grassmannians~\cite{EKL} and of Springer varieties~\cite{LR}. These structures possess combinatorics quite similar to those of their even counterparts. When coefficients are reduced modulo two the theories become identical, but the odd analogues possess an inherent non-commutativity making them distinct from the classical theory. Clark, Hill, and Wang have supplied convincing evidence that the odd nilHecke algebra and its generalizations are closely related to super Lie theory~\cite{HillWang,ClarkWang, CHW,CHW2,CFLW}. A key insight of their construction was the introduction of a parameter $\pi$ with $\pi^2=1$. They introduce the notion of a covering Kac-Moody algebra defined over ${\mathbbm Q}(q)[\pi]/(\pi^2-1)$ for certain Kac-Moody Lie algebras. A list of those finite and affine type Kac-Moody algebras admitting a covering algebra is shown in \cite[Table1]{HillWang}. The specialization to $\pi=1$ gives the quantum enveloping algebra of a Kac-Moody algebra and the specialization to $\pi=-1$ gives a quantum enveloping algebra of a Kac-Moody superalgebra. This idea led Hill and Wang to a novel bar involution $\overline{q} =\pi q^{-1}$ allowing the first construction of canonical bases for positive parts of Lie superalgebras. The canonical basis for the entire quantum supergroup $U_q(\mathfrak{osp}_{1|2})$ was constructed by Clark and Wang in \cite{ClarkWang} where the rank one case was fully developed. The covering algebra $U_qpi$ can be seen as a simultaneous generalization of the quantum group $U_q(\mathfrak{sl}_2)$ and the Lie superalgebra $U_q(\mathfrak{osp}_{1|2})$. This relationship is illustrated below. \[\xy (0,10)*+{U_qpi}="t"; (-15,-5)*+{U_q(\mathfrak{sl}_2)}="bl"; (15,-5)*+{U_q(\mathfrak{osp}_{1|2})}="br"; {\ar_{\pi \rightarrow 1} "t";"bl"}; {\ar^{\pi \rightarrow -1} "t";"br"}; $\blacktriangleleft$ndxy\] The existence of a canonical basis for the covering algebra $U_qpi$ led Clark and Wang to conjecture the existence of a categorification of this algebra. The main theorem of this paper (Theorem~\ref{thm_Groth}) confirms their conjecture. \subsubsection*{Covering link homologies} Given the interaction between categorified quantum groups and link homology, the existence of a categorification of covering Kac-Moody algebras hints at the possible existence of ``covering" link homology theories carrying an additional ${\mathbbm Z}_2$-grading. If the conjectured connection between categorified covering Kac-Moody algebras and odd link homologies exists, this would guide the search for new odd link homology theories. In particular, the covering Kac-Moody perspective suggests that there will not be an odd analog of $\mathfrak{sl}_n$-link homologies for $n>2$. Indeed, the only covering Kac-Moody algebras that exist in finite type correspond to the Lie algebras $\mathfrak{so}_{2n+1}$. The covering Kac-Moody explanation of odd Khovanov homology would then be the result of the Lie algebra coincidence $\mathfrak{sl}_2 = \mathfrak{so}_3$. The existence of odd link homologies associated to $\mathfrak{so}_{2n+1}$ appears to be further corroborated by the topological field theory construction of link homologies due to Witten. In a forthcoming article, Mikhaylov and Witten describe an extension of this work in which candidates for odd link homologies associated $\mathfrak{so}_{2n+1}$ naturally arise~\cite{MW}. This work utilizes the orthosymplectic supergroup suggesting a close interplay with covering Kac-Moody algebras. A covering homology was recently defined by Putyra \cite{Putyra} using a 2-category of chronological cobordisms. \subsubsection*{Diagrammatics for super-2-categories} The previously introduced graphical calculus for the odd nilHecke algebra required certain generators to skew commute. This requires one to keep careful track of the relative heights of generators in a diagram making graphical computations rather cumbersome. In Section \ref{sec-prelim} we translate Kang, Kashiwara, and Oh's theory of super-2-categories into a new diagrammatic framework. This formulation has the advantage that it allows us to recast the odd nilHecke algebra in a graphical calculus with full isotopy invariance at the cost of adding a new type of strand to the graphical calculus. From the perspective of these diagrammatics for super-2-categories, the skew commutativity of certain odd 2-morphisms is quite natural, allowing for a categorical manifestation of odd nilHecke algebras. \subsubsection*{Super-2-representation theory of quantum Kac-Moody superalgebras} As mentioned above, one of the main theorems of this paper is the construction of a super-2-category $U_qdotc$ which categorifies Clark and Wang's quantum covering $\mathfrak{sl}_2$. We also prove a 2-representation theoretic structure theorem analogous to a theorem of Cautis and the second author \cite{CL}: that a strong supercategorical action of $\mathfrak{sl}_2$ gives rise to a 2-representation of our super-2-category $U_qdotc$, the latter notion being $\blacktriangleleft$mph{a priori} stronger. This allows us to upgrade the Kang-Kashiwara-Oh action on cyclotomic quotients of odd nilHecke algebras to a 2-representation of $U_qdotc$. In \cite{Lau1}, the categorification $U_qdotcev$ of quantum $\mathfrak{sl}_2$ has generating 1-morphisms $\mathcal{E}1bbl$ and $1bbl\mathcal{F}$. These 1-morphisms are biadjoint up to grading shifts. It should come as no surprise, then, that the corresponding generating 1-morphisms of $U_qdotc$ are biadjoint up to grading $\blacktriangleleft$mph{and parity} shifts. Unlike shifts of the ${\mathbbm Z}$-grading, parity shifts are visible in our diagrammatics. Hence our biadjointness and cyclicity relations $\blacktriangleleft$qref{eq_biadjoint1},$\blacktriangleleft$qref{eq_biadjoint2},$\blacktriangleleft$qref{eqn-dot-cyclicity},$\blacktriangleleft$qref{eqn-crossing-cyclicity} are more involved than those in \cite{Lau1}. We expect that all the constructions of this paper---the definition of strong supercategorical actions, the definition of the super-2-category, and the theorem on upgrading the former to the latter---can be extended to all Kac-Moody types for which Kang, Kashiwara, and Tsuchioka \cite{KKT} have defined quiver Hecke superalgebras. $\blacktriangleright$igskip \noindent {$\blacktriangleright$f Acknowledgments:} A.P.E was supported by the NSF Graduate Research Fellowship Program and thanks Masaki Kashiwara for a helpful discussion on super-2-categories. A.D.L was partially supported by NSF grant DMS-1255334, the Alfred P. Sloan foundation, and by the John Templeton Foundation. A.D.L is grateful to Sabin Cautis for teaching him the methods used in Section~\ref{sec-formal} and to Edward Witten for discussions about gauge theoretic formulations of odd Khovanov homology. Both the authors would like to acknowledge partial support from Columbia University's RTG grant DMS-0739392. This paper builds directly on joint work with Mikhail Khovanov \cite{EKL}. \section{Preliminaries} \lambdaanglebel{sec-prelim} \subsection{Conventions}\lambdaanglebel{subsec-conventions} We will work with $({\mathbbm Z}\times{\mathbbm Z}_{2})$-graded algebras. The ${\mathbbm Z}$-degree of an element $x$ we will simply call $\blacktriangleleft$mph{degree} $|x|$ and the ${\mathbbm Z}_{2}$-degree we will call $\blacktriangleleft$mph{parity} $p(x)$. The braiding on the base category (graded $\dot{\mathbb{B}}bbk$-modules) is governed by parity; for instance, if $A$ is an algebra in this category, then the canonical multiplication on $A\otimes A$ is $\blacktriangleright$egin{equation*} (a\otimes b)(c\otimes d)=(-1)^{p(b)p(c)}(ac)\otimes(bd) $\blacktriangleleft$nd{equation*} for parity-homogeneous elements $a,b,c,d$. If $M$ is an $A$-module, we write ${\mathbbm P}i(M)$ for the its parity shift with the left action twisted by the parity involution on $A$, $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-parity-involution} \iota_A(a)=(-1)^{p(a)}a. $\blacktriangleleft$nd{equation} We write $M\ds{k}$ for the module obtained from $M$ by shifting all degrees down by $k$. Hom-spaces always refer to maps which preserve both degree and parity; if we write ${\rm Hom}^k$ for the space of maps of degree $k$ (so that ${\rm Hom}^0$ is the actual hom-space), then $\blacktriangleright$egin{equation*} {\rm Hom}^k(M,N)={\rm Hom}(M\ds{-k},N)={\rm Hom}(M,N\ds{k}). $\blacktriangleleft$nd{equation*} \subsection{Covering Kac-Moody algebras}\lambdaanglebel{subsec-covering-sl2} In the subsection, we present a modification of the constructions of \cite{CHW}. This ``covering'' form of $U_q=U_q(\mathfrak{sl}_2)$, which we denote by $U_qpi$, is the object we will categorify as the main result of this paper. Define the $(q,\pi)$-analogues of integers, factorials, and binomial coefficients by \[ [n]=\frac{(\pi q)^n-q^{-n}}{\pi q-q^{-1}},\qquad [a]!= \prod_{i=1}^{a}[i], \qquad \lambdaeft[\!\! $\blacktriangleright$egin{array}{c} n \\ a $\blacktriangleleft$nd{array} \!\!\right] = \frac{\prod_{i=1}^a[n+i-a]}{[a]!}. \] Note as in \cite{CHW} that $\lambdaeft[\!\! $\blacktriangleright$egin{array}{c} n \\ a $\blacktriangleleft$nd{array} \!\!\right] = \frac{[n]!}{[a]![n-a]!}$ for $n \mathfrak{g}eq a \mathfrak{g}eq 0$ and $[-n]=-\pi^n[n]$. Let $\mathcal{A}={\mathbbm Z}[q,q^{-1}]$, $\mathcal{A}_{\pi}={\mathbbm Z}[q,q^{-1},\pi]/(\pi^2-1)$, and ${\mathbbm Q}(q)^\pi={\mathbbm Q}(q)[\pi]/(\pi^2-1)$. $\blacktriangleright$egin{defn}\lambdaanglebel{defn-covering-sl2} The $\blacktriangleleft$mph{idempotented form} of $\blacktriangleleft$mph{quantum covering $\mathfrak{sl}_2$} is the (non-unital) ${\mathbbm Q}(q)^\pi$-algebra $U_qdotpi$ generated by orthogonal idempotents $\lambdabrace1l:\lambdaanglembda\in{\mathbbm Z}\rbrace$ and elements $\blacktriangleright$egin{equation} 1ltwo E1l=E1l=1ltwo E,\qquad1l F1ltwo=F1ltwo=1l F\qquad\lambdaanglembda\in{\mathbbm Z} $\blacktriangleleft$nd{equation} subject to the $\blacktriangleleft$mph{covering $\mathfrak{sl}_2$ relation}, $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-covering-sl2-relation} EF1l-\pi FE1l=[\lambda]1l. $\blacktriangleleft$nd{equation} The $\blacktriangleleft$mph{integral idempotented form} is the $\mathcal{A}_\pi$-subalgebra $_{{\mathbbm A}c}U_qdotpi\subsetU_qdotpi$ generated by the divided powers $\blacktriangleright$egin{equation} E^{(a)}1l=\frac{E^a1l}{[a]!},\quad1l F^{(a)}=\frac{1l F^a}{[a]!} $\blacktriangleleft$nd{equation} and the (idempotented) $q$-binomial coefficients $\blacktriangleright$egin{equation} \lambdaeft[\!\! $\blacktriangleright$egin{array}{c} n \\ a $\blacktriangleleft$nd{array} \!\!\right]1l $\blacktriangleleft$nd{equation} (we are making the shorthand identifications $E^21l=E1ltwo E1l$, etc.). $\blacktriangleleft$nd{defn} $\blacktriangleright$egin{rem} There is also a non-idempotented form of covering quantum $\mathfrak{sl}_2$, but we will not need it; see \cite{CHW}.$\blacktriangleleft$nd{rem} The $\blacktriangleleft$mph{Clark-Hill-Wang bar involution} on $U_qdotpi$ is defined by $\blacktriangleright$egin{equation*} \overline{q}=\pi q^{-1},\quad\overline{\pi}=\pi^{-1}=\pi $\blacktriangleleft$nd{equation*} (it does nothing to $1l$, $E$, and $F$). ``Anti-linear'' will be meant with respect to the bar involution. Note that $(q,\pi)$-integers $[\lambda]$ are bar-invariant. Define a linear anti-automorphism $\rho$ of $U_qdotpi$ by $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-defn-rho} \rho(q)=\pi q,\quad\rho(\pi)=\pi,\quad\rho(1l)=1l,\quad\rho(E1l)=q^{\lambda+1}1l F,\quad\rho(1l F)=\pi^{\lambda+1}q^{-\lambda-1}E1l $\blacktriangleleft$nd{equation} and an anti-linear anti-automorphism $\tau=\overline{\rho}$ by $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-defn-tau} \tau(q)=q^{-1},\quad\tau(\pi)=\pi,\quad\tau(1l)=1l,\quad\tau(E1l)=\pi^{\lambda+1}q^{-\lambda-1}1l F,\quad\tau(1l F)=q^{\lambda+1}E1l. $\blacktriangleleft$nd{equation} Define the bilinear form $(\cdot,\cdot)$ on $U_qdotpi$ by the properties $\blacktriangleright$egin{eqnarray} &\text{different idempotented parts are orthogonal}\\ &(fx,y)=f(x,y)=(x,fy)\\ &(ux,y)=(x,\rho(u)y)\\ &(x,y)=(y,x)\\ &(F^{(a)}1l,F^{(a)}1l)=\prod_{s=1}^a(1-\pi^sq^{-2s})^{-1} $\blacktriangleleft$nd{eqnarray} and the sesquilinear form $\lambdaangle\cdot,\cdot\rangle$ by $\blacktriangleright$egin{equation*} \lambdaangle x,y\rangle=\overline{(x,\overline{y})}. $\blacktriangleleft$nd{equation*} The following properties of $\lambdaangle\cdot,\cdot\rangle$ follow: $\blacktriangleright$egin{eqnarray} &\text{different idempotented parts are orthogonal}\\ &\lambdaangle\overline{f}x,y\rangle=f\lambdaangle x,y\rangle=\lambdaangle x,fy\rangle\\ \lambdaanglebel{eqn-tau-adjoint}&\lambdaangle ux,y\rangle=\lambdaangle x,\tau(u)y\rangle\\ &\lambdaangle x,y\rangle=\lambdaangle\overline{y},\overline{x}\rangle\\ \lambdaanglebel{eqn-form-on-F}&\lambdaangle F^{(a)}1l,F^{(a)}1l\rangle=\prod_{s=1}^a(1-\pi^sq^{2s})^{-1} $\blacktriangleleft$nd{eqnarray} This agrees with the bilinear form introduced by Lusztig~\cite[26.1.1]{Lus4} when $\pi=1$. Note that $\lambdaangle1l F,1l F\rangle=(1-\pi q^2)^{-1}$. $\blacktriangleright$egin{lem} $\lambdaangle E1l,E1l\rangle=(1-\pi q^2)^{-1}$.$\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} $\blacktriangleright$egin{equation*}$\blacktriangleright$egin{split} \lambdaangle E1l,E1l\rangle&\refequal{$\blacktriangleleft$qref{eqn-tau-adjoint}}\lambdaangle1l,\tau(E1l)E1l\rangle\\ &\refequal{$\blacktriangleleft$qref{eqn-defn-tau}}\lambdaangle1l,\pi^{\lambda+1}q^{-\lambda-1}FE1l\rangle\\ &\refequal{$\blacktriangleleft$qref{eqn-covering-sl2-relation}}\lambdaangle1l,\pi^\lambda q^{-\lambda-1}EF1l-\pi^\lambda q^{-\lambda-1}[\lambda]1l\rangle\\ &=\lambdaangle\tau^{-1}(E1_{\lambda-2})1l,\pi^\lambda q^{-\lambda-1}F1l\rangle-\pi^\lambda q^{-\lambda-1}[\lambda]\lambdaangle1l,1l\rangle\\ &\refequal{$\blacktriangleleft$qref{eqn-defn-tau}}\lambdaangle q^{-\lambda-1}1_{\lambda-2}F,\pi^\lambda q^{-\lambda-1}1_{\lambda-2}F\rangle-\pi^\lambda q^{-\lambda-1}[\lambda]\\ &\refequal{$\blacktriangleleft$qref{eqn-form-on-F}}\frac{\pi^\lambda q^{-2\lambda}}{1-\pi q^2}-\pi^\lambda q^{-\lambda-1}\frac{\pi^\lambda q^\lambda-q^{-\lambda}}{\pi q-q^{-1}}\\ &=\frac{1}{1-\pi q^2}. $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation*} $\blacktriangleleft$nd{proof} $\blacktriangleright$egin{rem} There are some rescaling degrees of freedom for the automorphisms and bilinear form defined above. Our automorphisms and bilinear form differ from those of \cite{CHW}. The specific choice of scaling given would have been difficult to fix without knowing the categorification of $U_qdotpi$. In particular, our automorphism $\tau$ is the decategorification of ``take the right adjoint''.$\blacktriangleleft$nd{rem} \subsection{Odd nilHecke algebras and cyclotomic quotients} \subsubsection{Odd nilHecke algebras categorify $_{{\mathbbm A}c}U_q^+(\mathfrak{sl}_2)$} The main theorem of this paper is the construction of a categorification of the algebra $U_qdotpi$ of Definition \ref{defn-covering-sl2}. One of the main ingredients in this construction is the categorification of $U_q^+$ by the $\blacktriangleleft$mph{odd nilHecke algebras} \cite{EKL,KKT}. This subsection will review these algebras as well as their $\blacktriangleleft$mph{cyclotomic quotients}, which categorify simple modules for $U_qdot$ \cite{KKO,KKO2}. Let $\dot{\mathbb{B}}bbk$ be a commutative ring (usually we take $\dot{\mathbb{B}}bbk={\mathbbm Z}$ or a field) and let $\blacktriangleright$egin{equation*} \mathrm{SPol}_n=\dot{\mathbb{B}}bbk\lambdaanglengle x_1,\lambdadots,x_n\ranglengle/(x_ix_j+x_jx_i\text{ if }i\neq j) $\blacktriangleleft$nd{equation*} be the $({\mathbbm Z}\times{\mathbbm Z}_{2})$-graded superalgebra of $\blacktriangleleft$mph{skew polynomials} in $n$ variables. Its generators are given degrees $|x_i|=2$, $p(x_i)=1$. The symmetric group $S_n$ acts on $\mathrm{SPol}_n$ by $\blacktriangleright$egin{equation*} w(x_i)=x_{w(i)},\quad w(fg)=w(f)w(g). $\blacktriangleleft$nd{equation*} For $i=1,\dots,n-1$, let $s_i=(i\quad i+1)$ be the $i$-th simple transposition in $S_n$ and define the $i$-th $\blacktriangleleft$mph{odd divided difference operator} $\partial_i$ to be the map $\mathrm{SPol}_n\rightarrow\mathrm{SPol}_n$ defined by $\blacktriangleright$egin{eqnarray} &\partial_i(x_j)=$\blacktriangleright$egin{cases}1&j=i,i+1\\0&\text{otherwise,}$\blacktriangleleft$nd{cases}\\ &\partial_i(fg)=\partial_i(f)g+(-1)^{|f|}s_i(f)\partial_i(g). $\blacktriangleleft$nd{eqnarray} (It is straightforward to check this is well defined.) For example, $\blacktriangleright$egin{equation*} \partial_1(x_1^2x_2)=\partial_1(x_1)x_1x_2-x_2\partial_1(x_1)x_2+x_2^2\partial_1(x_2)=x_1x_2. $\blacktriangleleft$nd{equation*} More generally, for any $f\in \mathrm{SPol}_n$ the action of the odd divided difference operator is given by the formula \[ \partial_i f = \frac{(x_{i+1} - x_i)f - (-1)^{|f|}s_i(f) (x_{i+1} - x_i) }{x_{i+1}^2-x_i^2}, \] see \cite[equation 4.19]{KKO}. These operators play a role analogous to that of the divided difference operators of Kostant-Kumar. $\blacktriangleright$egin{defn}\lambdaanglebel{defn-osym} The sub-superalgebra $\blacktriangleright$egin{equation} \mathrm{O\Lambda}_n=$\blacktriangleright$igcap_{i=1}^{n-1}\Bbbker(\partial_i) $\blacktriangleleft$nd{equation} of $\mathrm{SPol}_n$ is called the superalgebra of $\blacktriangleleft$mph{odd symmetric polynomials} in $n$ variables. $\blacktriangleleft$nd{defn} $\blacktriangleright$egin{defn}\lambdaanglebel{defn-onh} The $\blacktriangleleft$mph{odd nilHecke algebra} $\mathrm{ONH}_n$ in $n$ variables (or ``on $n$ strands'') is the sub-superalgebra of ${\rm End}_\dot{\mathbb{B}}bbk(\mathrm{SPol}_n)$ generated by the operators $x_1,\lambdadots,x_n$ (left multiplication by $x_i$) and $\partial_1,\lambdadots,\partial_{n-1}$. $\blacktriangleleft$nd{defn} The generators above have degrees $|x_i|=2$, $|\partial_i|=-2$, $p(x_i)=p(\partial_i)=1$. Fix a reduced expression $w=i_1\cdots i_r$ for each $w\in S_n$ and define $\blacktriangleright$egin{equation*} \partial_w=\partial_{i_1}\cdots\partial_{i_r}. $\blacktriangleleft$nd{equation*} This is independent of choice of reduced expression up to sign only (see the following Proposition). If $\alpha=(\alpha_1,\lambdadots,\alpha_n)$ is an $n$-tuple, write $x^\alpha$ for $x_1^{\alpha_1}\cdots x_n^{\alpha_n}$. The basic properties of $\mathrm{ONH}_n$ are as follows. $\blacktriangleright$egin{prop}[\cite{EKL}] $\blacktriangleright$egin{enumerate} \item Changing the choice of reduced expression only changes $\partial_w$ by a possible factor of $-1$, and $\blacktriangleright$egin{equation*} \partial_w\partial_{w'}=$\blacktriangleright$egin{cases}\pm\partial_{ww'}&$\blacktriangleleft$ll(w)+$\blacktriangleleft$ll(w')=$\blacktriangleleft$ll(ww'),\\0&\text{otherwise.}$\blacktriangleleft$nd{cases} $\blacktriangleleft$nd{equation*} \item The algebra $\mathrm{ONH}_n$ is a free $\dot{\mathbb{B}}bbk$-module. Either of the sets $\lambdabrace\partial_wx^\alpha:w\in S_n,\alpha_i\in{\mathbbm Z}_{\mathfrak{g}eq0}\rbrace$, $\lambdabrace x^\alpha\partial_w:w\in S_n,\alpha_i\in{\mathbbm Z}_{\mathfrak{g}eq0}\rbrace$ is a basis. \item The relations $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-onh-relations}$\blacktriangleright$egin{split} &x_ix_j+x_jx_i=0\text{ if }i\neq j,\qquad \partial_i\partial_j+\partial_j\partial_i=0\text{ if }|i-j|>1,\\ &\partial_i^2=0,\qquad \partial_i\partial_{i+1}\partial_i=\partial_{i+1}\partial_i\partial_{i+1},\\ &\partial_ix_j+x_j\partial_i=0,\text{ if }j\neq i,i+1,\\ &x_i\partial_i+\partial_ix_{i+1}=\partial_ix_i+x_{i+1}\partial_i=1 $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} hold and give a presentation of $\mathrm{ONH}_n$. \item There is an isomorphism of superalgebras $\blacktriangleright$egin{equation} \mathrm{ONH}_n{\rm co}ng{\rm End}_{\mathrm{O\Lambda}_n}(\mathrm{SPol}_n). $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{enumerate}$\blacktriangleleft$nd{prop} In the super-2-category we will construct in Subsection \ref{subsec-defn-odd-udot}, the odd nilHecke algebra $\mathrm{ONH}_n$ will play an important role in governing the endomorphisms of $\mathcal{E}^n1bbl$ and $1bbl\mathcal{F}^n$. It is often more convenient to work in a diagrammatic notation (which first arose, in fact, from the 2-categorical setting). Elements of $\mathrm{ONH}_n$ are $\dot{\mathbb{B}}bbk$-linear combinations of diagrams involving crossings and dots on $n$ strands with fixed endpoints, considered up to isotopies rel their boundaries that do not change the $\blacktriangleleft$mph{relative} heights of dots and crossings. For example, $\blacktriangleright$egin{equation*} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (0,0) [out=90, in=-90] to (2,2); \draw[thick] (1,0) [out=90, in=-90] to (0,2); \draw[thick] (2,0) .. controls (2,1) and (1,1) .. (1,2) node[pos=.2] () {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \;\; -\;\;2\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (0,0) -- (0,2) node[pos=.25] () {$\blacktriangleright$bullet} node[pos=.75] () {$\blacktriangleright$bullet}; \draw[thick] (1,0) [out=90, in=-90] to (1,2); \draw[thick] (2,0) [out=90, in=-90] to (2,2); $\blacktriangleleft$nd{tikzpicture}} \;\; \in \;\; \mathrm{ONH}_3. $\blacktriangleleft$nd{equation*} For a more discussion of diagrammatic algebra, see any of \cite{KL1,KhDiagrammatics,Lau4}. In graphical notation, the relations $\blacktriangleleft$qref{eqn-onh-relations} read $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-onh-relations-graphical}$\blacktriangleright$egin{split} &\hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (0,0) -- (0,2) node[pos=.75] () {$\blacktriangleright$bullet}; \node () at (1,1) {$\cdots$}; \draw[thick] (2,0) -- (2,2) node[pos=.25] () {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad+\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (0,0) -- (0,2) node[pos=.25] () {$\blacktriangleright$bullet}; \node () at (1,1) {$\cdots$}; \draw[thick] (2,0) -- (2,2) node[pos=.75] () {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad=\;\;0,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (0,0) -- (0,1) [out=90, in=-90] to (1,2); \draw[thick] (1,0) -- (1,1) [out=90, in=-90] to (0,2); \node () at (2,1) {$\cdots$}; \draw[thick] (3,0) [out=90, in=-90] to (4,1) -- (4,2); \draw[thick] (4,0) [out=90, in=-90] to (3,1) -- (3,2); $\blacktriangleleft$nd{tikzpicture}} \quad+\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (3,0) -- (3,1) [out=90, in=-90] to (4,2); \draw[thick] (4,0) -- (4,1) [out=90, in=-90] to (3,2); \node () at (2,1) {$\cdots$}; \draw[thick] (0,0) [out=90, in=-90] to (1,1) -- (1,2); \draw[thick] (1,0) [out=90, in=-90] to (0,1) -- (0,2); $\blacktriangleleft$nd{tikzpicture}} \quad= \;\; 0,\\ &\hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (0,0) [out=90, in=-90] to (1,1) [out=90, in=-90] to (0,2); \draw[thick] (1,0) [out=90, in=-90] to (0,1) [out=90, in=-90] to (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\;\; 0,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (0,0) [out=90, in=-90] to (2,2); \draw[thick] (1,0) [out=90, in=-90] to (0,1) [out=90, in=-90] to (1,2); \draw[thick] (2,0) [out=90, in=-90] to (0,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (0,0) [out=90, in=-90] to (2,2); \draw[thick] (1,0) [out=90, in=-90] to (2,1) [out=90, in=-90] to (1,2); \draw[thick] (2,0) [out=90, in=-90] to (0,2); $\blacktriangleleft$nd{tikzpicture}} \quad,\\ &\hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (0,0) -- (0,1) [out=90, in=-90] to (1,2); \draw[thick] (1,0) -- (1,1) [out=90, in=-90] to (0,2); \node () at (2,1) {$\cdots$}; \draw[thick] (3,0) -- (3,2) node[pos=.25] () {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad+\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (0,0) [out=90, in=-90] to (1,1) -- (1,2); \draw[thick] (1,0) [out=90, in=-90] to (0,1) -- (0,2); \node () at (2,1) {$\cdots$}; \draw[thick] (3,0) -- (3,2) node[pos=.75] () {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} = \;\; 0,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (1,0) -- (1,2) node[pos=.75] () {$\blacktriangleright$bullet}; \node () at (2,1) {$\cdots$}; \draw[thick] (3,0) [out=90, in=-90] to (4,1) -- (4,2); \draw[thick] (4,0) [out=90, in=-90] to (3,1) -- (3,2); $\blacktriangleleft$nd{tikzpicture}} \quad+\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick] (1,0) -- (1,2) node[pos=.25] () {$\blacktriangleright$bullet}; \node () at (2,1) {$\cdots$}; \draw[thick] (3,0) -- (3,1) [out=90, in=-90] to (4,2); \draw[thick] (4,0) -- (4,1) [out=90, in=-90] to (3,2); $\blacktriangleleft$nd{tikzpicture}} =\;\;0,\\ &\hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick] (0,0) .. controls (0,.5) and (1,.5) .. (1,1); \draw[thick] (1,0) .. controls (1,.5) and (0,.5) .. (0,1) node[pos=.75] () {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad+\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick] (0,0) .. controls (0,.5) and (1,.5) .. (1,1); \draw[thick] (1,0) .. controls (1,.5) and (0,.5) .. (0,1) node[pos=.25] () {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick] (0,0) -- (0,1); \draw[thick] (1,0) -- (1,1); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick] (0,0) .. controls (0,.5) and (1,.5) .. (1,1) node[pos=.25] () {$\blacktriangleright$bullet}; \draw[thick] (1,0) .. controls (1,.5) and (0,.5) .. (0,1); $\blacktriangleleft$nd{tikzpicture}} \quad+\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick] (0,0) .. controls (0,.5) and (1,.5) .. (1,1) node[pos=.75] () {$\blacktriangleright$bullet}; \draw[thick] (1,0) .. controls (1,.5) and (0,.5) .. (0,1); $\blacktriangleleft$nd{tikzpicture}} \quad. $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} One pleasant outcome of the super-2-categorical setting we will introduce in Subsection \ref{subsec-super-2-categories} is that we will be able to use diagrams which are equivalent modulo $\blacktriangleleft$mph{all} isotopies rel boundary, not just those which preserve relative heights of generators. There is a map of superalgebras $\iota_{a,b}:\mathrm{ONH}_a\otimes\mathrm{ONH}_b\hookrightarrow\mathrm{ONH}_{a+b}$ given by juxtaposing diagrams horizontally, placing the diagram on the left above the one on the right. On generators, $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-iota-onh-ab} \iota_{a,b}(x_i\otimes1)=x_i,\quad\iota_{a,b}(\partial_i\otimes1)=\partial_i,\quad\iota_{a,b}(1\otimes x_i)=x_{a+i},\quad\iota_{a,b}(1\otimes\partial_i)=\partial_{a+i}. $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{prop}[\cite{EKL}]\lambdaanglebel{prop-ekl-2} $\blacktriangleright$egin{enumerate} \item Let $\partialta=(n-1,n-2,\lambdadots,1,0)$ and $e_n=\partial_{w_0}x^\partialta$. Then $e_n$ is an idempotent in $\mathrm{ONH}_n$ and the left module $P_n=\mathrm{ONH}_ne_n$ is, up to grading shifts, the unique indecomposable projective $\mathrm{ONH}_n$-module. \item The regular representation of $\mathrm{ONH}_n$ decomposes as $\blacktriangleright$egin{equation} \mathrm{ONH}_n{\rm co}ng$\blacktriangleright$igoplus_{[n]!}(\mathrm{ONH}_ne_n)\lambdaangle\qbin{n}{2}\rangle. $\blacktriangleleft$nd{equation} Here, for a Laurent polynomial $f=\sum_jf_jq^j$, the notation $$\blacktriangleright$igoplus_fM$ means the direct sum $$\blacktriangleright$igoplus_jM\lambdaangle-j\rangle^{{\rm op}lus f_j}$. \item Induction and restriction along the map $\iota_{a,b}$ send finitely generated graded projectives to finitely generated graded projectives. In particular, $\blacktriangleright$egin{equation} {\mathrm{Ind}}_{a,b}^{a+b}(P_a\otimes P_b){\rm co}ng$\blacktriangleright$igoplus_{\qbins{a}{b}}P_{a+b}, $\blacktriangleleft$nd{equation} where we are using the shorthand notations $a$ for $\mathrm{ONH}_a$ and $a,b$ for $\mathrm{ONH}_a\otimes\mathrm{ONH}_b$. $\blacktriangleleft$nd{enumerate}$\blacktriangleleft$nd{prop} For any graded algebra $A$, consider the free $\mathcal{A}$-module with basis consisting of isomorphism classes of finitely generated indecomposable projective modules. The version of Grothenieck group with which we will concern ourselves is the quotient of this free module by the relations $\blacktriangleright$egin{itemize} \item $[B]=[A]+[C]$ if $B{\rm co}ng A{\rm op}lus C$ as graded modules, \item $[A\lambdaangle-1\rangle]=q[A]$. $\blacktriangleleft$nd{itemize} If $A$ is a graded superalgebra, we use $\mathcal{A}_\pi={\mathbbm Z}[q,q^{-1},\pi]/(\pi^2-1)$ instead of $\mathcal{A}$. The parity shift functor ${\mathbbm P}i$ acts on the Grothendieck group by the relation $[{\mathbbm P}i M]=\pi[M]$. Then $K_0(A)$ is naturally an $\mathcal{A}_\pi$-module spanned by all indecomposable projectives (considered up to isomorphism and grading shifts). Writing $K_0(\mathrm{ONH}_$\blacktriangleright$ullet)=$\blacktriangleright$igoplus_{n\mathfrak{g}eq0}K_0(\mathrm{ONH}_n)$, statement (3) of Proposition \ref{prop-ekl-2} implies that $[V]\otimes[W]{\rm co}lonto[{\mathrm{Ind}}_{a,b}^{a+b}(V\otimes W)]$ determines a homomorphism $\mathcal{A}$-modules $K_0(\mathrm{ONH}_a)\otimes_{\mathcal{A}}K_0(\mathrm{ONH}_b)\rightarrow K_0(\mathrm{ONH}_{a+b})$. By Proposition \ref{prop-ekl-2} (3), this map takes $\blacktriangleright$egin{equation*} [P_a]\otimes[P_b]{\rm co}lonto \qbin{a}{b}[P_{a+b}]. $\blacktriangleleft$nd{equation*} Restriction determines a map in the other direction. This nearly immediately implies the following. $\blacktriangleright$egin{thm}[\cite{EKL}] The map taking $E^{(a)}$ to $[P_a]$ is an isomorphism of $q$-Hopf algebras $\blacktriangleright$egin{equation} \xymatrix{U_q^+(\mathfrak{sl}_2)\ar[r]^-{\rm co}ng&K_0(\mathrm{ONH}_$\blacktriangleright$ullet)\otimes_{\mathcal{A}}{\mathbbm Q}(q).} $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{thm} With more work (the ``thick calculus'' of \cite{EKL}), the above theorem can be strengthened. $\blacktriangleright$egin{thm}[\cite{EKL}] The map taking $E^{(a)}$ to $[P_a]$ is an isomorphism of $q$-Hopf algebras $\blacktriangleright$egin{equation} \xymatrix{_{{\mathbbm A}c}U_q^+(\mathfrak{sl}_2)\ar[r]^-{\rm co}ng&K_0(\mathrm{ONH}_$\blacktriangleright$ullet).} $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{thm} \subsubsection{Cyclotomic quotients categorify $_{\mathcal{A}_\pi}V^\Lambda$} $\blacktriangleright$egin{defn} Let $\Lambda\mathfrak{g}eq0$ be a dominant integral weight for $\mathfrak{sl}_2$ (thought of as a non-negative integer). The corresponding $\blacktriangleleft$mph{cyclotomic quotient} $\mathrm{ONH}_n^\Lambda$ of $\mathrm{ONH}_n$ is defined to be the superalgebra $\blacktriangleright$egin{equation} \mathrm{ONH}_n^\Lambda=\mathrm{ONH}_n/(x_1^\Lambda). $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{defn} While explicit calculations in cyclotomic quotients are generally difficult, there is an isomorphism with a matrix algebra over a less complicated ring. $\blacktriangleright$egin{equation*} \mathrm{ONH}_n^\Lambda{\rm co}ng\mathrm{Mat}_{[n]!}(OH_{n,\Lambda}). $\blacktriangleleft$nd{equation*} The notation $\mathrm{Mat}_{[n]!}$ means the algebra of endomorphisms of $OH_{n,\Lambda}^{{\rm op}lus[n]!}$ as a $OH_{n,\Lambda}$-module. The algebra $OH_{n,\Lambda}$ is the $\blacktriangleleft$mph{odd Grassmannian algebra} of \cite{EKL}; it is graded local and serves as an odd analogue of the singular cohomology ring of the complex Grassmannian $\mathrm{Gr}(n,{\mathbbm C}^\Lambda)$. The maps $\iota_{a,b}$ are still well-defined on cyclotomic quotients, yielding analogous induction and restriction functors. These functors have been studied in depth by Kang-Kashiwara-Oh \cite{KKO,KKO2}. When $a=n$ and $b=1$, the resulting ``right-strand'' induction and restriction functors between $\mathrm{ONH}_n^\Lambda$ and $\mathrm{ONH}_{n+1}^\Lambda$ are related by natural isomorphisms that give a ``strong supercategorical action'' (see Subsection \ref{subsec-strong-supercat-action} or the even analogue in \cite{CL}) of $U_q(\mathfrak{sl}_2)$ on the supermodule categories for $\mathrm{ONH}_n^\Lambda$. Kang-Kashiwara-Oh prove this in both a super and a non-super setting. $\blacktriangleright$egin{thm}[\cite{KKO,KKO2}] Let $_{\mathcal{A}}V^\Lambda$, $_{\mathcal{A}_\pi}V^\Lambda$ be the simple modules of highest weight $q^\Lambda$ for $_{{\mathbbm A}c}U_q$, $_{{\mathbbm A}c}U_qpi$ respectively. $\blacktriangleright$egin{enumerate} \item There is an isomorphism of $\mathcal{A}$-modules $\blacktriangleright$egin{equation} \xymatrix{_{\mathcal{A}}V^\Lambda\ar[r]^-{\rm co}ng&K_0(\mathrm{ONH}^\Lambda_$\blacktriangleright$ullet)} $\blacktriangleleft$nd{equation} Under this isomorphism, right-strand induction and restriction between the algebras $\mathrm{ONH}_n^\Lambda$ decategorify to the action of $F$ and $E$ on, respectively. \item Let $K_0^{\text{super}}$ be $K_0$ considered as an $\mathcal{A}_\pi$-module, where $\pi$ is the decategorification of parity shift. Then there is an isomorphism of $\mathcal{A}_\pi$-modules $\blacktriangleright$egin{equation} \xymatrix{_{\mathcal{A}_\pi}V^\Lambda\ar[r]^-{\rm co}ng&K_0^{\text{super}}(\mathrm{ONH}^\Lambda_$\blacktriangleright$ullet)} $\blacktriangleleft$nd{equation} Under this isomorphism, right-strand induction and restriction between the superalgebras $\mathrm{ONH}_n^\Lambda$ decategorify to the action of $F$ and $E$ on, respectively. $\blacktriangleleft$nd{enumerate}$\blacktriangleleft$nd{thm} A natural question raised by this theorem is whether the Kang-Kashiwara-Oh strong supercategorical action can be replaced by a genuine super-2-representation---that is, whether there exists a super-2-category $U_qdotc$ such that: $\blacktriangleright$egin{enumerate} \item $U_qdotc$ categorifies the algebra $_{{\mathbbm A}c}U_qdotpi(\mathfrak{sl}_2)$; \item there is compatibility with the results of \cite{EKL}: the odd nilHecke algebra $\mathrm{ONH}_n$ naturally appears in the 2-hom-spaces between 1-morphisms lifting the elements $E^n1l$ or $F^n1l$ of $_{{\mathbbm A}c}U_qdotpi(\mathfrak{sl}_2)$; \item there is compatibility with the results of \cite{KKO,KKO2}: supermodules for $\mathrm{ONH}_n^\Lambda$ form a 2-representation of $U_qdotc$, and the 1-morphisms lifting $E$ and $F$ act as right-strand restriction and induction, respectively; \item the indecomposable 1-morphisms in $U_qdotc$ categorify the Clark-Wang canonical basis for $_{{\mathbbm A}c}U_qdot(\mathfrak{sl}_2)$. $\blacktriangleleft$nd{enumerate} The rest of this paper is devoted to answering this question in the affirmative. But first we must develop the setting of super-2-representation theory. \subsection{Super-2-categories}\lambdaanglebel{subsec-super-2-categories} In \cite{Lau1}, the second author constructed a 2-category which categorifies $U_qdot(\mathfrak{sl}_2)$. The proper framework for categorifying $U_qdotpi(\mathfrak{sl}_2)$ is that of super-2-categories. Our definition of super-2-category is slightly different than but equivalent to that of \cite{KKO2}. Although the example of super-bimodules is actually a bicategory and not a (strict) 2-category, we will treat all bicategories as strict by the appropriate analogue of Mac Lane's coherence theorem. \subsubsection{Supercategories, superfunctors, and supernatural transformations} In order to motivate the general definition of a super-2-category, we first work out an example in detail: that of supercategories, superfunctors, and supernatural transformations. $\blacktriangleright$egin{defn} A $\blacktriangleleft$mph{supercategory} is a category $\mathcal{C}$ equipped with a strong categorical action of ${\mathbbm Z}_{2}$. A \textit{superfunctor} is a morphism of strong categorical ${\mathbbm Z}_{2}$-actions.$\blacktriangleleft$nd{defn} Unpacking the above, the data of a supercategory consists of (see \cite{KKO,KKO2}): $\blacktriangleright$egin{itemize} \item a category $\mathcal{C}$, \item a functor ${\mathbbm P}si_\mathcal{C}:\mathcal{C}\rightarrow\mathcal{C}$, \item and a natural isomorphism $\xi_\mathcal{C}:{\mathbbm P}si_\mathcal{C}^2\rightarrow1bb_\mathcal{C}$. $\blacktriangleleft$nd{itemize} The only condition is that $\blacktriangleright$egin{itemize} \item $\xi_\mathcal{C}\otimes1bb_{{\mathbbm P}si_\mathcal{C}}=1bb_{{\mathbbm P}si_\mathcal{C}}\otimes\xi_\mathcal{C}$ as natural isomorphisms ${\mathbbm P}si_\mathcal{C}^3\rightarrow{\mathbbm P}si_\mathcal{C}$. $\blacktriangleleft$nd{itemize} Here we are using $\otimes$ for horizontal composition (sometimes we omit the $\otimes$). The data of a superfunctor $(F,\alpha_F):(\mathcal{C},{\mathbbm P}si_\mathcal{C})\rightarrow(\mathcal{D},{\mathbbm P}si_\mathcal{D})$ is: $\blacktriangleright$egin{itemize} \item a functor $F:\mathcal{C}\rightarrow\mathcal{D}$ and \item a natural isomorphism $\alpha_F:F\otimes{\mathbbm P}si_\mathcal{C}\rightarrow{\mathbbm P}si_\mathcal{D}\otimes F$, $\blacktriangleleft$nd{itemize} and the only condition is that $\blacktriangleright$egin{itemize} \item $1bb_F\otimes\xi_\mathcal{C}=(\xi_\mathcal{D}\otimes1bb_F)(1bb_{{\mathbbm P}si_\mathcal{D}}\otimes\alpha_F)(\alpha_F\otimes1bb_{{\mathbbm P}si_\mathcal{C}})$. $\blacktriangleleft$nd{itemize} Diagrammatically, we can draw $\mathcal{C},\mathcal{D}$ as regions, ${\mathbbm P}si_\mathcal{C},{\mathbbm P}si_\mathcal{D}$ as blue dashed lines, $F$ as a solid line, and $\alpha_F$ as a dashed-solid crossing: $\blacktriangleright$egin{equation*} \alpha_F=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) [out=90, in=-90] to (1,1); \draw[thick, color=blue, dashed] (1,0) [out=90, in=-90] to (0,1); $\blacktriangleleft$nd{tikzpicture}},\qquad \alpha_F^{-1}=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (1,0) [out=90, in=-90] to (0,1); \draw[thick, color=blue, dashed] (0,0) [out=90, in=-90] to (1,1); $\blacktriangleleft$nd{tikzpicture}}. $\blacktriangleleft$nd{equation*} Then the relations in the definition of a supercategory can be expressed diagrammatically as follows: We draw a dashed cap for $\xi_\mathcal{C}$ and a dashed cup for $\xi_\mathcal{C}^{-1}$. Then the equation $\xi_\mathcal{C}\otimes1bb_{{\mathbbm P}si_\mathcal{C}}=1bb_{{\mathbbm P}si_\mathcal{C}}\otimes\xi_\mathcal{C}$ reads $\blacktriangleright$egin{equation} \lambdaanglebel{eqn-psi-3-psi} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,1.5); \draw[thick, color=blue, dashed] (.5,0) .. controls (.5,.8) and (1.5,.8) .. (1.5,0); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick, color=blue, dashed] (1.5,0) -- (1.5,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad, $\blacktriangleleft$nd{equation} the fact that $\xi_\mathcal{C}$ and $\xi_\mathcal{C}^{-1}$ are inverses is encoded by $\blacktriangleright$egin{equation} \lambdaanglebel{eqn-dashed-bubble} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,1) .. controls (0,1.8) and (1,1.8) .. (1,1); \draw[thick, color=blue, dashed] (0,1) .. controls (0,.2) and (1,.2) .. (1,1); $\blacktriangleleft$nd{tikzpicture}} \quad=\varnothing $\blacktriangleleft$nd{equation} and $\blacktriangleright$egin{equation} \lambdaanglebel{eqn-dashed-cup-cap} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick, color=blue, dashed] (0,2) .. controls (0,1.2) and (1,1.2) .. (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0, 2); \draw[thick, color=blue, dashed] (1,0) -- (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad, $\blacktriangleleft$nd{equation} and the fact that $\alpha_F$ and $\alpha_F^{-1}$ are inverses is encoded by $\blacktriangleright$egin{equation} \lambdaanglebel{eqn-alpha-alpha-inverse} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) [out=90, in=-90] to (1,1) [out=90, in=-90] to (0,2); \draw[thick, color=blue, dashed] (1,0) [out=90, in=-90] to (0,1) [out=90, in=-90] to (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) -- (0,2); \draw[thick, color=blue, dashed] (1,0) -- (1,2); $\blacktriangleleft$nd{tikzpicture}} \qquad\text{and}\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) [out=90, in=-90] to (1,1) [out=90, in=-90] to (0,2); \draw[thick] (1,0) [out=90, in=-90] to (0,1) [out=90, in=-90] to (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,2); \draw[thick] (1,0) -- (1,2); $\blacktriangleleft$nd{tikzpicture}}\quad. $\blacktriangleleft$nd{equation} These relations already imply as well $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,1.5); \draw[thick, color=blue, dashed] (.5,1.5) .. controls (.5,.7) and (1.5,.7) .. (1.5,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,1.5) .. controls (0,.7) and (1,.7) .. (1,1.5); \draw[thick, color=blue, dashed] (1.5,0) -- (1.5,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,.75) .. controls (0,1.15) and (.5,1.15) .. (.5,.75) .. controls (.5,.35) and (1,.35) .. (1,.75) -- (1,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (1,0) -- (1,.75) .. controls (1,1.15) and (.5,1.15) .. (.5,.75) .. controls (.5,.35) and (0,.35) .. (0,.75) -- (0,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad. $\blacktriangleleft$nd{equation} In particular, $\xi,\xi^{-1}$ are a biadjoint pair. Then the relation $1bb_F\otimes\xi_\mathcal{C}=(\xi_\mathcal{D}\otimes1bb_F)(1bb_{{\mathbbm P}si_\mathcal{D}}\otimes\alpha_F)(\alpha_F\otimes1bb_{{\mathbbm P}si_\mathcal{C}})$ in the definition of a superfunctor is equivalent to: $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-defn-superfunctor} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) -- (0,1.5); \draw[thick, color=blue, dashed] (.5,0) .. controls (.5,.8) and (1.5,.8) .. (1.5,0); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) [out=90, in=-90] to (1.5,1.5); \draw[thick, color=blue, dashed] (.75,0) [out=90, in=-90] to (0,1) .. controls (0,1.8) and (1,1.8) .. (1.5,.5) -- (1.5,0); $\blacktriangleleft$nd{tikzpicture}}\quad. $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{lem}\lambdaanglebel{lem-dashed-cyclicity} Any superfunctor $(F,\alpha_F):(\mathcal{C},{\mathbbm P}si_\mathcal{C})\rightarrow(\mathcal{D},{\mathbbm P}si_\mathcal{D})$ satisfies pitchfork-cyclicity with respect to the biadjunction $\xi\dashv\xi^{-1}\dashv\xi$.$\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} In equations, the statement of the lemma is that the following both hold: $\blacktriangleright$egin{equation*}$\blacktriangleright$egin{split} &(1bb_F\otimes\xi_\mathcal{C})(\alpha_F^{-1}\otimes1bb_{{\mathbbm P}si_\mathcal{C}})=(\xi_\mathcal{D}\otimes1bb_F)(1bb_{{\mathbbm P}si_\mathcal{D}}\otimes\alpha_F),\\ &(\alpha_F\otimes1bb_{{\mathbbm P}si_\mathcal{C}})(1bb_F\otimes\xi^{-1})=(1bb_{{\mathbbm P}si_\mathcal{D}}\otimes\alpha_F^{-1})(\xi^{-1}\otimes1bb_F). $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation*} Or, diagrammatically, $\blacktriangleright$egin{equation*} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick] (.5,0) [out=90, in=-90] to (0,1) -- (0,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick] (.5,0) [out=90, in=-90] to (1,1) -- (1,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,1.5) .. controls (0,.7) and (1,.7) .. (1,1.5); \draw[thick] (.5,1.5) [out=-90, in=90] to (0, .5) -- (0,0); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,1.5) .. controls (0,.7) and (1,.7) .. (1,1.5); \draw[thick] (.5,1.5) [out=-90, in=90] to (1,.5) -- (1,0); $\blacktriangleleft$nd{tikzpicture}} \quad. $\blacktriangleleft$nd{equation*} The first one follows from a diagrammatic calculation: $\blacktriangleright$egin{equation*} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.75) and (1,.75) .. (1,0); \draw[thick] (.5,0) [out=90, in=-90] to (0,1) -- (0,3); $\blacktriangleleft$nd{tikzpicture}} \quad\refequal{$\blacktriangleleft$qref{eqn-dashed-bubble}}\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick] (.5,0) [out=90, in=-90] to (0,1) -- (0,3); \draw[thick, color=blue, dashed] (.5,1.5) .. controls (.5,2.25) and (1.5,2.25) .. (1.5,1.5) .. controls (1.5,.75) and (.5,.75) .. (.5,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad\refequal{$\blacktriangleleft$qref{eqn-defn-superfunctor}}\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick] (.5,0) [out=90, in=-90] to (0,1) -- (0,1) [out=90, in=-90] to (1.5,3); \draw[thick, color=blue, dashed] (0,2.5) .. controls (0,3.3) and (1,3.3) .. (1,2.8); \draw[thick, color=blue, dashed] (1.5,1.5) .. controls (1.5,.75) and (.5,.75) .. (.5,1.2); \draw[thick, color=blue, dashed] (.5,1.2) -- (0,2.5); \draw[thick, color=blue, dashed] (1.5,1.5) -- (1,2.8); $\blacktriangleleft$nd{tikzpicture}} \quad\refequal{$\blacktriangleleft$qref{eqn-dashed-cup-cap}}\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) [out=90, in=-90] to (.5,1.2); \draw[thick, color=blue, dashed] (1,0) [out=90, in=-90] to (1.5,1.5); \draw[thick] (.5,0) [out=90, in=-90] to (0,1) -- (0,1) [out=90, in=-90] to (1.5,3); \draw[thick, color=blue, dashed] (0,2.5) .. controls (0,3.3) and (1,3.3) .. (1,2.8); \draw[thick, color=blue, dashed] (.5,1.2) -- (0,2.5); \draw[thick, color=blue, dashed] (1.5,1.5) -- (1,2.8); $\blacktriangleleft$nd{tikzpicture}} \quad\refequal{$\blacktriangleleft$qref{eqn-alpha-alpha-inverse}}\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick] (.5,0) [out=90, in=-90] to (1,1) -- (1,3); $\blacktriangleleft$nd{tikzpicture}} \quad. $\blacktriangleleft$nd{equation*} The second is proved similarly. $\blacktriangleleft$nd{proof} The added utility in explicitly drawing the superfunctor ${\mathbbm P}si$ is that, in later examples, ${\mathbbm P}si$ will act nontrivially in a way we will want to keep visual track of. See the discussion of the super-bimodule super-2-category below. $\blacktriangleright$egin{example} Setting $\alpha_{\mathbbm P}si=-1bb_{{\mathbbm P}si_\mathcal{C}^2}$, the pair $({\mathbbm P}si_\mathcal{C},\alpha_{\mathbbm P}si)$ is a superfunctor for any supercategory $(\mathcal{C},{\mathbbm P}si_\mathcal{C})$. We draw this as $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) [out=90, in=-90] to (1,1); \draw[thick, color=blue, dashed] (1,0) [out=90, in=-90] to (0,1); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad-\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,1); \draw[thick, color=blue, dashed] (.5,0) -- (.5,1); $\blacktriangleleft$nd{tikzpicture}} \quad. $\blacktriangleleft$nd{equation}$\blacktriangleleft$nd{example} $\blacktriangleright$egin{defn}\lambdaanglebel{defn-supernatural-trans} A $\blacktriangleleft$mph{supernatural transformation} between the superfunctors $(F,\alpha_F),(G,\alpha_G):(\mathcal{C},{\mathbbm P}si_\mathcal{C})\rightarrow(\mathcal{D},{\mathbbm P}si_\mathcal{D})$ is a natural transformation $\varphi:F\rightarrow G$ which commutes with the natural isomorphisms $\alpha_F,\alpha_G$ in the sense that the diagram $\blacktriangleright$egin{equation} \xymatrix{ F\otimes{\mathbbm P}si_\mathcal{C}\ar[rr]^-{\varphi\otimes1bb_{\mathbbm P}si}\ar[d]^-{\alpha_F}&&G\otimes{\mathbbm P}si_\mathcal{C}\ar[d]^-{\alpha_G}\\ {\mathbbm P}si_\mathcal{D}\otimes F\ar[rr]^-{1bb_{\mathbbm P}si\otimes\varphi}&&{\mathbbm P}si_\mathcal{D}\otimes G } $\blacktriangleleft$nd{equation} commutes.$\blacktriangleleft$nd{defn} Diagrammatically, if we draw the functors $F,G$ as solid lines, this means: $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) -- (0,.75) [out=90, in=-90] to (.5,1.5); \draw[thick, color=blue, dashed] (.5,0) -- (.5,.75) [out=90, in=-90] to (0,1.5); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (0,.5) {\small$\varphi$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) [out=90, in=-90] to (.5,.75) -- (.5,1.5); \draw[thick, color=blue, dashed] (.5,0) [out=90, in=-90] to (0,.75) -- (0,1.5); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (.5,1) {\small$\varphi$}; $\blacktriangleleft$nd{tikzpicture}} \quad. $\blacktriangleleft$nd{equation} This sort of diagram occurs when $\varphi$ is an $\blacktriangleleft$mph{even} supernatural transformation. If $\vartheta:F\rightarrow{\mathbbm P}si G$ is an $\blacktriangleleft$mph{odd} supernatural transformation, the diagrammatic presentation of Definition \ref{defn-supernatural-trans} is: $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) -- (0,.75) [out=90, in=-90] to (.5,1.5) -- (.5,2); \draw[thick, color=blue, dashed] (.5,0) -- (.5,.75) [out=90, in=-90] to (0,1.5) -- (0,2); \draw[thick, color=blue, dashed] (0,.8) [out=135, in=-90] to (-.5,1.5) -- (-.5,2); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (0,.5) {\small$\varphi$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) [out=90, in=-90] to (.5,.75) -- (.5,2); \draw[thick, color=blue, dashed] (.5,0) [out=90, in=-90] to (0,.75) -- (0,2); \draw[thick, color=blue, dashed] (.5,1.3) [out=135, in=-90] to (-.5,2); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (.5,1) {\small$\varphi$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad-\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) [out=90, in=-90] to (.5,.75) -- (.5,2); \draw[thick, color=blue, dashed] (.5,0) [out=90, in=-90] to (-.5,1) -- (-.5,2); \draw[thick, color=blue, dashed] (.5,1.3) [out=135, in=-90] to (0,2); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (.5,1) {\small$\varphi$}; $\blacktriangleleft$nd{tikzpicture}} \quad. $\blacktriangleleft$nd{equation} In particular, if $\vartheta:F\rightarrow{\mathbbm P}si F'$, $\varphi:G\rightarrow{\mathbbm P}si G'$ are two odd supernatural transformations, then $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-odd-commute-1} (\vartheta\otimes1bb_{G'})(1bb_F\otimes\varphi)=(1bb_{F'}\otimes\varphi)(\vartheta\otimes1bb_G) $\blacktriangleleft$nd{equation} as maps $FG\rightarrow{\mathbbm P}si F'{\mathbbm P}si G'$, but $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-odd-commute-2}$\blacktriangleright$egin{split} &(\xi\otimes1bb_{F'G'})(1bb_{\mathbbm P}si\otimes\vartheta\otimes1bb_{G'})(\alpha_F\otimes1bb_{G'})(1bb_F\otimes\varphi)\\ &\qquad=(\xi\otimes1bb_{F'G'})(\alpha_{\mathbbm P}si\otimes1bb_{F'G'})(1bb_{\mathbbm P}si\otimes\alpha_{F'}\otimes1bb_{G'})(1bb_{{\mathbbm P}si F'}\otimes\varphi)(\vartheta\otimes1bb_G)\\ &\qquad=-(\xi\otimes1bb_{F'G'})(1bb_{\mathbbm P}si\otimes\alpha_{F'}\otimes1bb_{G'})(1bb_{{\mathbbm P}si F'}\otimes\varphi)(\vartheta\otimes1bb_G) $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} as maps $FG\rightarrow F'G'$. Diagrammatically, equations $\blacktriangleleft$qref{eqn-odd-commute-1} and $\blacktriangleleft$qref{eqn-odd-commute-2} are expressed as: $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-s2c-commute-1} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) -- (0,2); \draw[thick, color=blue, dashed] (0,1) [out=135, in=-90] to (-.5,1.5) -- (-.5,2); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (0,1) {\small$\vartheta$}; \draw[thick] (1,0) -- (1,2); \draw[thick, color=blue, dashed] (1,.5) [out=135, in=-90] to (.5, 1) -- (.5,2); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (1,.5) {\small$\varphi$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) -- (0,2); \draw[thick, color=blue, dashed] (0,.5) [out=135, in=-90] to (-.5,1) -- (-.5,2); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (0,.5) {\small$\vartheta$}; \draw[thick] (1,0) -- (1,2); \draw[thick, color=blue, dashed] (1,1) [out=135, in=-90] to (.5, 1.5) -- (.5,2); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (1,1) {\small$\varphi$}; $\blacktriangleleft$nd{tikzpicture}}\quad, $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-s2c-commute-2} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) -- (0,2); \draw[thick, color=blue, dashed] (0,1.2) [out=135, in=-90] to (-.5,1.7); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (0,1.2) {\small$\vartheta$}; \draw[thick] (1,0) -- (1,2); \draw[thick, color=blue, dashed] (1,.5) [out=135, in=-90] to (-1,1.5) -- (-1,1.7); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (1,.5) {\small$\varphi$}; \draw[thick, color=blue, dashed] (-.5,1.7) [out=90, in=90] to (-1,1.7); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) -- (0,2); \draw[thick, color=blue, dashed] (0,.5) [out=135, in=-90] to (-.5,1) -- (-.5,1.75); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (0,.5) {\small$\vartheta$}; \draw[thick] (1,0) -- (1,2); \draw[thick, color=blue, dashed] (1,1) [out=135, in=-90] to (-1,1.75); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (1,1) {\small$\varphi$}; \draw[thick, color=blue, dashed] (-.5,1.75) [out=90, in=90] to (-1,1.75); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad-\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) -- (0,2); \draw[thick, color=blue, dashed] (0,.5) [out=135, in=-90] to (-1,1.5) -- (-1,1.75); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (0,.5) {\small$\vartheta$}; \draw[thick] (1,0) -- (1,2); \draw[thick, color=blue, dashed] (1,1) [out=135, in=-90] to (-.5,1.75); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (1,1) {\small$\varphi$}; \draw[thick, color=blue, dashed] (-.5,1.75) [out=90, in=90] to (-1,1.75); $\blacktriangleleft$nd{tikzpicture}} \quad. $\blacktriangleleft$nd{equation} \subsubsection{Super-2-categories in general}\lambdaanglebel{subsubsec-super-2-cat-general} Having treated the example of supercategories, superfunctors, and supernatural transformations in detail both algebraically and diagrammatically, we now recall the definition of a super-2-category from~\cite[Definition 7.9]{KKO2}. $\blacktriangleright$egin{defn}\lambdaanglebel{defn-super-2-cat-second} A $\blacktriangleleft$mph{super-2-category} $\mathcal{C}$ consists of: $\blacktriangleright$egin{itemize} \item a collection of $\blacktriangleleft$mph{objects} (or $\blacktriangleleft$mph{0-morphisms}), \item for each pair of objects $X,Y$ a collection of $\blacktriangleleft$mph{1-morphisms} denoted ${\rm Hom}(X,Y)$, and \item for each pair of 1-morphisms with the same domain and codomain $F,G:X\rightarrow Y$ a collection of $\blacktriangleleft$mph{2-morphisms} denoted ${\rm Hom}(F,G)$; \item for objects $X,Y,Z$, compositions maps for 1-morphisms ${\rm Hom}(Y,Z)\times{\rm Hom}(X,Y)\rightarrow{\rm Hom}(X,Z)$ which we write as $(G,F){\rm co}lonto G\otimes F$ or simply $(G,F){\rm co}lonto GF$, \item for 1-morphisms $F,G,H\in{\rm Hom}(X,Y)$, a vertical composition map ${\rm Hom}(G,H)\times{\rm Hom}(F,G)\rightarrow{\rm Hom}(F,H)$ which we write as $(\vartheta,\varphi){\rm co}lonto\vartheta\varphi$, \item for 1-morphisms $F,F'\in{\rm Hom}(X,Y)$ and $G,G'\in{\rm Hom}(Y,Z)$, a horizontal composition map ${\rm Hom}(G\otimes F',G'\otimes F')\times{\rm Hom}(G\otimes F,G\otimes F')$ denoted $(\vartheta\otimes1bb_{F'},1bb_G\otimes\varphi){\rm co}lonto\vartheta\otimes\varphi$, \item for each object $X$ an $\blacktriangleleft$mph{identity 1-morphism} $1bb_X\in{\rm Hom}(X,X)$, \item for each 1-morphism $F$ an $\blacktriangleleft$mph{identity 2-morphism} $1bb_F\in{\rm Hom}(F,F)$, \item for each object $X$ a $\blacktriangleleft$mph{parity shift 1-morphism} ${\mathbbm P}si_X\in{\rm Hom}(X,X)$ and an invertible 2-morphism $\xi\in{\rm Hom}({\mathbbm P}si_X^2,1bb_X)$, and \item for each 1-morphism $F:X\rightarrow Y$ a 2-morphism $\alpha_F\in{\rm Hom}(F{\mathbbm P}si_X,{\mathbbm P}si_YF)$. $\blacktriangleleft$nd{itemize} These data are subject to the following conditions: $\blacktriangleright$egin{itemize} \item horizontal composition is associative, and $1bb_X$ is a unit for each object $X$; \item vertical composition is associative, and $1bb_F$ is a unit for each 1-morphism $F$; \item interchange law: if $F,F',F'',G,G',G''$ are 1-morphisms and $\alpha,\alpha',$\blacktriangleright$eta,$\blacktriangleright$eta'$ are 2-morphisms such that the compositions $\blacktriangleright$egin{equation*} (\alpha'\otimes$\blacktriangleright$eta')(\alpha\otimes$\blacktriangleright$eta),\quad(\alpha'\alpha)\otimes($\blacktriangleright$eta'$\blacktriangleright$eta) $\blacktriangleleft$nd{equation*} both make sense, then these two expressions are equal; \item for all objects $X$, the 2-morphism $\xi_X:{\mathbbm P}si_X^2\rightarrow1bb_X$ is invertible; \item if $GF$ is the composition of the 1-morphisms $F$ and $G$, then $\alpha_{G\otimes F}=(\alpha_G\otimes1bb_F)(1bb_G\otimes\alpha_F$); \item for all 1-morphisms $F:X\rightarrow Y$, the 2-morphism $\alpha_F:F\otimes{\mathbbm P}si_X\rightarrow{\mathbbm P}si_Y\otimes F$ is invertible; \item for all objects $X$, there is an equality $\xi_X\otimes1bb_{{\mathbbm P}si_X}=1bb_{{\mathbbm P}si_X}\otimes\xi_X$ of 2-morphisms ${\mathbbm P}si_X^3\rightarrow{\mathbbm P}si_X$; \item for all 1-morphisms $F:X\rightarrow Y$, $\blacktriangleright$egin{equation} 1bb_F\otimes\xi_X=(\xi_Y\otimes1bb_F)(1bb_{{\mathbbm P}si_Y}\otimes\alpha_F)(\alpha_F\otimes1bb_{{\mathbbm P}si_X}); $\blacktriangleleft$nd{equation} \item for all objects $X$, $\alpha_{\xi_X}=-1bb_{\xi_X^2}$; \item for all 2-morphisms $\vartheta:F\rightarrow G$, $\alpha_G\vartheta=\vartheta\alpha_F$. $\blacktriangleleft$nd{itemize} $\blacktriangleleft$nd{defn} $\blacktriangleright$egin{example} There is a super-2-category $\mathcal{SC}at$ whose objects are supercategories, 1-morphisms are superfunctors, and 2-morphisms are supernatural transformations.$\blacktriangleleft$nd{example} The general diagrammatics for super-2-categories are exactly as described above for the example $\mathcal{SC}at$. $\blacktriangleright$egin{example} There is a super-2-category $\mathcal{SB}im$ whose objects are superalgebras, 1-morphisms are super-bimodules, and 2-morphisms are (even) homomorphisms of super-bimodules. The parity shift 1-morphism is denoted ${\mathbbm P}i$; if $M$ is an $(A,B)$-super-bimodule, then ${\mathbbm P}i M$ is the $(A,B)$-super-bimodule whose underlying space is $M$ with the ${\mathbbm Z}_{2}$ grading reversed and the left $A$-action twisted by the parity involution on $A$. That is, $\blacktriangleright$egin{equation*} a\in A\text{ acts on }{\mathbbm P}i M\text{ by sending }m\text{ to }\iota_A(a)m=(-1)^{p(a)}am. $\blacktriangleleft$nd{equation*} Horizontal compositions of 1-morphisms is given by the tensor product of super-bimodules. There is also the obvious ${\mathbbm Z}$-graded variant of this super-2-category.$\blacktriangleleft$nd{example} In the formalism of super-2-categories, the generator of ``the ${\mathbbm Z}_2$-action'' is the family of 1-morphisms $\lambdabrace{\mathbbm P}si_X\rbrace$. As equations $\blacktriangleleft$qref{eqn-s2c-commute-1}, $\blacktriangleleft$qref{eqn-s2c-commute-2} show, this leads to nontrivial signs between 2-morphisms. These signs are a more general behavior than the ordinary commutativity resulting from the interchange law for 2-categories. A basic example: for any object $X$ of a super-2-category, the commutative algebra ${\rm End}(1bb_X)$ is now the even part of a supercommutative superalgebra ${\mathbbm P}i{\rm End}(1bb_X):={\rm End}(1bb_X{\rm op}lus{\mathbbm P}si_X)$. \subsection{Super-2-functors} $\blacktriangleright$egin{defn} A super-2-functor $\mathcal{G}:\mathcal{C}\rightarrow\mathcal{D}$ consists of the following data: $\blacktriangleright$egin{itemize} \item a function $\mathcal{G}:\text{Ob}(\mathcal{C})\rightarrow\text{Ob}(\mathcal{D})$, \item for each pair $X,Y\in\text{Ob}(\mathcal{C})$, a functor ${\rm Hom}(X,Y)\rightarrow{\rm Hom}(\mathcal{G}(X),\mathcal{G}(Y))$ $\blacktriangleleft$nd{itemize} such that: $\blacktriangleright$egin{itemize} \item for $X\in\text{Ob}(\mathcal{C})$, the functor ${\rm Hom}(X,X)\rightarrow{\rm Hom}(\mathcal{G}(X),\mathcal{G}(X))$ takes ${\mathbbm P}si_X$ to ${\mathbbm P}si_{\mathcal{G}(X)}$ and $\xi_X$ to $\xi_{\mathcal{G}(X)}$, \item for $X,Y\in\text{Ob}(\mathcal{C})$, the functor ${\rm Hom}(X,Y)\rightarrow{\rm Hom}(\mathcal{G}(X),\mathcal{G}(Y))$ takes $\alpha_F$ to $\alpha_{\mathcal{G}(F)}$. $\blacktriangleleft$nd{itemize} $\blacktriangleleft$nd{defn} Or, in other words, a super-2-functor maps objects to objects, 1-morphisms to 1-morphisms, and 2-morphisms, compatibly with horizontal composition, vertical composition, parity shift 1-morphisms, and ``dashed-solid'' crossings. \subsection{Super diagram conventions} \lambdaanglebel{subsec-superconventions} A consequence of the axioms $\blacktriangleright$egin{equation*}$\blacktriangleright$egin{split} &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,1.5); \draw[thick, color=blue, dashed] (.5,0) .. controls (.5,.8) and (1.5,.8) .. (1.5,0); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick, color=blue, dashed] (1.5,0) -- (1.5,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,1) .. controls (0,1.8) and (1,1.8) .. (1,1); \draw[thick, color=blue, dashed] (0,1) .. controls (0,.2) and (1,.2) .. (1,1); $\blacktriangleleft$nd{tikzpicture}} \quad=\varnothing,\\ &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) [out=90, in=-90] to (1,2); \draw[thick, color=blue, dashed] (1,0) [out=90, in=-90] to (0,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad-\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,2); \draw[thick, color=blue, dashed] (1,0) -- (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick, color=blue, dashed] (0,2) .. controls (0,1.2) and (1,1.2) .. (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0, 2); \draw[thick, color=blue, dashed] (1,0) -- (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad, $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation*} is that for each object $X$ in a super-2-category $\mathcal{C}$, the space of 2-morphisms between 1-morphisms ${\mathbbm P}si^a_X$ and ${\mathbbm P}si^b_X$ with $a$ congruent to $b$ modulo two consisting only of dashed lines is one dimensional. The relations imply that any diagram pairing the $a+b$ endpoints with no intersecting strands represents the same 2-morphism in $\mathcal{C}$. Choosing any such diagram we obtain a canonical isomorphism ${\mathbbm P}si^a_X\rightarrow{\mathbbm P}si^b_X$ whenever $a$ is congruent to $b$ modulo two. In this case we introduce a shorthand to simplify our graphical calculus. We express this isomorphism using a thickened strand $\blacktriangleright$egin{equation*} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, double distance=1pt, dashed] (0,0) -- (0,1.2) node[above, blue](){$\scriptstyle b$}; \node[blue] at (0,-.25){$\scriptstyle a$}; $\blacktriangleleft$nd{tikzpicture}}\quad \text{representing} \quad \xy (0,-8)*+{{\mathbbm P}si_X^a}="1" ; (0,8)*+{{\mathbbm P}si_X^b}="2"; {\ar@{->} "1"; "2"}; $\blacktriangleleft$ndxy \qquad\text{ for }a$\blacktriangleleft$quiv b\text{ mod }2. $\blacktriangleleft$nd{equation*} In most cases the labelling of the source and target will be clear from the context and we will often omit these labels. It will also be convenient to allow negative labels for these thick strands with the interpretation that a thick strand with source labeled $a$ and target labelled $b$ represents the canonical isomorphism between ${\mathbbm P}si_X^{|a|}$ and ${\mathbbm P}si_X^{|b|}$. \section{Odd categorified structures and their actions} \subsection{Strong supercategorical actions}\lambdaanglebel{subsec-strong-supercat-action} We will concern ourselves with two ``strong'' notion of categorification of $U_qdotpi$, the covering version of Lusztig's idempotented form of quantum $\mathfrak{sl}_2$ at a generic parameter $q$. The first is that of a strong supercategorical action, which we define in this section. The second, which is $\blacktriangleleft$mph{a priori} stronger, is a 2-functor from a certain super-2-category $U_qdotc$. The first main result of this paper is to prove that a strong supercategorical action always extends to such a 2-functor. $\blacktriangleright$egin{defn} \lambdaanglebel{def_strong} Let $\mathcal{C}$ be a graded idempotent complete $\dot{\mathbb{B}}bbk$-linear 2-category such that $\blacktriangleright$egin{itemize} \item The objects of $\mathcal{C}$ are indexed by the integral weights of $\mathfrak{sl}_2$ (which we identify with ${\mathbbm Z}$); write $1b_\lambdaanglembda$ for the identity 1-morphism of the object $\lambdaanglembda$; \item For each weight $\lambdaanglembda$ there are 1-morphisms $\blacktriangleright$egin{equation*} \mathtt{E}1bl:\lambdaanglembda\rightarrow\lambdaanglembda+2,\qquad 1bl\mathtt{F}:\lambdaanglembda+2\rightarrow\lambdaanglembda,\qquad {\mathbbm P}i1bl:\lambdaanglembda\rightarrow\lambdaanglembda. $\blacktriangleleft$nd{equation*} We also assume that a right adjoint to $\mathtt{E}1bl$ exists and that there are fixed adjunctions $\blacktriangleright$egin{equation} 1bl\mathtt{F}\ads{-\lambda-1}\dashv\mathtt{E}1bl,\qquad {\mathbbm P}i1bl\dashv{\mathbbm P}i1bl, $\blacktriangleleft$nd{equation} and natural isomorphisms $\blacktriangleright$egin{equation} \alpha_\mathtt{E}:\mathtt{E}{\mathbbm P}i\rightarrow{\mathbbm P}i\mathtt{E},\qquad\alpha_\mathtt{F}:\mathtt{F}{\mathbbm P}i\rightarrow{\mathbbm P}i\mathtt{F}. $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{itemize} All 1-morphisms of $\mathcal{C}$ are generated from $\mathtt{E},\mathtt{F},{\mathbbm P}i$ by taking direct sums, compositions, and grading shifts. A $\blacktriangleleft$mph{strong supercategorical action} of $U_qdotpi$ on $\mathcal{C}$ consists of the following data and conditions: $\blacktriangleright$egin{enumerate} \item (Integrability) The object $\lambdaanglembda+2r$ is isomorphic to the zero object for $r\lambdal0$ and for $r\mathfrak{g}g0$. \item \lambdaanglebel{co:hom} (Brick condition) ${\rm Hom}_\mathcal{C}(1bl,{\mathbbm P}i^k1bl\ads{$\blacktriangleleft$ll})=0$ if $$\blacktriangleleft$ll<0$ and is one-dimensional if $$\blacktriangleleft$ll=0$ and $k=0$. Moreover, the space of 2-morphisms between any two 1-morphisms if finite dimensional. \item (Covering isomorphisms) We are given isomorphisms in $\mathcal{C}$: $\blacktriangleright$egin{align} \lambdaanglebel{eq:EF-rel} &\mathtt{F}\mathtt{E}1bl{\rm co}ng\mathtt{E}{\mathbbm P}i\mathtt{F}1bl{\rm op}lus $\blacktriangleright$igoplus_{k=0}^{-\lambdaanglembda-1}{\mathbbm P}i^{\lambda+1+k}1bl\ads{-\lambdaanglembda-1-2k} &\text{if }\lambdaanglembda\lambdaeq0,\\ &\mathtt{E}\mathtt{F}1bl{\rm co}ng\mathtt{F}{\mathbbm P}i\mathtt{E}1bl{\rm op}lus $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1}{\mathbbm P}i^k 1bl\ads{\lambdaanglembda-1-2k} &\text{if }\lambdaanglembda\mathfrak{g}eq0.\lambdaanglebel{eq:FE-rel} $\blacktriangleleft$nd{align} \item \lambdaanglebel{co:oddNil} (Odd nilHecke action)There are 2-morphisms $X:\mathtt{E}\rightarrow{\mathbbm P}i\mathtt{E}$ and $T:\mathtt{E}^2\rightarrow{\mathbbm P}i\mathtt{E}^2$ such that for each $n\mathfrak{g}eq1$, the 2-morphisms $\blacktriangleright$egin{equation}$\blacktriangleright$egin{split} &X_i=\alpha_\mathtt{E}^{-i+1}(1b^{i-1}\otimes X\otimes1b^{n-i}):\mathtt{E}^n\rightarrow{\mathbbm P}i\mathtt{E}^n,\\ &T_i=\alpha_\mathtt{E}^{-i+1}(1b^{i-1}\otimes T\otimes1b^{n-i-1}):\mathtt{E}^n\rightarrow{\mathbbm P}i\mathtt{E}^n. $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} generate an action of $\mathrm{ONH}_n$ on ${\mathbbm P}i{\rm END}(\mathtt{E}^n)$. $\blacktriangleleft$nd{enumerate} $\blacktriangleleft$nd{defn} This last piece of data, the odd nilHecke action, is the key ingredient that makes the action ``strong''; the idea behind this observation goes back to the pioneering work of Chuang and Rouquier \cite{CR}. The relations ensuring an action of the odd nilHecke algebra are given diagrammatically in equations $\blacktriangleleft$qref{eq:oddnilquad}--$\blacktriangleleft$qref{eq:onil-dot}. {$\blacktriangleright$f{Important convention.}} The integrability condition above implies that ``most'' objects are isomorphic to the zero object. If $\lambda$ is the zero object then, by definition, ${\rm Hom}_{\mathcal{C}}(1bl,{\mathbbm P}i^k1bl \lambdaangle l \rangle) = 0$ for all $l$. So, to be precise, condition (\ref{co:hom}) above should say that ${\rm Hom}_{\mathcal{C}}(1bl,1bl)$ is one-dimensional if $\lambda$ is non-zero. There are many other such instances later in this paper. The convention is that any statement about a certain Hom being non-zero assumes that all objects involved are non-zero (otherwise the Hom space is automatically zero). For a 1-morphism $u$, let $u^L$ (respectively $u^R$) denote its left (respectively right) adjoint. The requirement that $1bl\mathtt{F} \ads{-\lambda-1} \dashv\mathtt{E}1bl$ implies that $(\mathtt{E} 1bl)^L = 1bl \mathtt{F} \lambdaangle -\lambdaanglembda -1 \rangle$ and $\lambdaeft(\mathtt{F}1bl\right)^R = \mathtt{E}1b_{\lambda-2} \lambdaangle -\lambda+1\rangle$. In what follows we make use of the fact that $(u^L)^R=u$ and $(v^R)^L=v$ for all 1-morphisms $u,v$ and that the adjunctions give rise to isomorphisms $\blacktriangleright$egin{alignat}{3} {\rm Hom}(ux,y) &\; {\rm co}ng \;& {\rm Hom} (x, u^R y), &\qquad {\rm Hom}(x,uy)&\; {\rm co}ng \;& {\rm Hom} (u^Lx, y), \notag\\ {\rm Hom}(xv,y) &\; {\rm co}ng \;& {\rm Hom}(x,yv^L), &\qquad {\rm Hom}(x,yv) &\; {\rm co}ng \;& {\rm Hom}(xv^R,y). $\blacktriangleleft$nd{alignat} \subsubsection{Cancellation property} The fact that the space of maps between any two 1-morphisms in a strong supercategorical action is finite dimensional means that the Krull-Schmidt property holds. This means that any 1-morphism has a unique direct sum decomposition (see Section 2.2 of \cite{Rin}). In particular, this means that if $A,B,C$ are morphisms and $V$ is a ${\mathbbm Z}$-graded vector space then we have the following cancellation laws (see Section 4 of \cite{CK3}): $\blacktriangleright$egin{eqnarray*} A {\rm op}lus B {\rm co}ng A {\rm op}lus C &{\mathbbm R}ightarrow& B {\rm co}ng C \\ A \otimes_\Bbbk V {\rm co}ng B \otimes_\Bbbk V &{\mathbbm R}ightarrow& A {\rm co}ng B. $\blacktriangleleft$nd{eqnarray*} A brick in a (graded) category is an indecomposable object $A$ such that ${\rm End}^k(A)=0$ for $k<0$ and ${\rm End}(A)={\rm End}^0(A){\rm co}ng \dot{\mathbb{B}}bbk$. For example, by Lemma \ref{lem:E} below, $\mathtt{E} 1b_{\mu}$ is a brick. \subsection{Definition of the super-2-category $U_qdotc$}\lambdaanglebel{subsec-defn-odd-udot} The whole of this subsection is a definition of the super-2-category $\dot{\mathcal{U}}_{q,\pi}(\mathfrak{sl}_2)$, or $U_qdotc$ for short. This is entirely analogous to the definition in Section 5.2 of \cite{Lau1}. \subsubsection{Objects and 1-morphisms} The super-2-category $U_qdotc$ will be the idempotent completion (Karoubi envelope) of a combinatorially defined graded $\dot{\mathbb{B}}bbk$-linear super-2-category $U_qc$. The objects of $U_qc$ are indexed by integral weights for $\mathfrak{sl}_2$ (as usual, identified with ${\mathbbm Z}$) and the 1-morphisms are direct sums, compositions, and degree shifts of the following generating 1-morphisms: $\blacktriangleright$egin{equation} \mathcal{E}1bbl:\lambdaanglembda\rightarrow\lambdaanglembda+2,\qquad 1bbl\mathcal{F}:\lambdaanglembda+2\rightarrow\lambdaanglembda,\qquad {\mathbbm P}i1bbl:\lambdaanglembda\rightarrow\lambdaanglembda, $\blacktriangleleft$nd{equation} where ${\mathbbm P}i1bbl={\mathbbm P}si1bbl$ is the parity shift 1-morphisms for the object $\lambdaanglembda$. These generating 1-morphisms are expressed diagrammatically as strands: $\blacktriangleright$egin{equation}$\blacktriangleright$egin{split} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) -- (0,2) node[pos=.5, right](){\small$\lambdaanglembda$} node[pos=.5, left](){\small$\lambdaanglembda+2$}; $\blacktriangleleft$nd{tikzpicture}} \;\;=\;\; \mathcal{E}1bbl,\qquad \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0,0) -- (0,2) node[pos=.5, right](){\small$\lambdaanglembda+2$} node[pos=.5, left](){\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \;\;=\;\;\mathcal{F}1bbltwo,\qquad \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,2) node[pos=.5, right, black](){\small$\lambdaanglembda$} node[pos=.5, left, black](){\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \;\;=\;\;{\mathbbm P}i1bbl. $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} \subsubsection{Generating 2-morphisms} We give $U_qc$ the following generating 2-morphisms: $\blacktriangleright$egin{equation} \lambdaanglebel{eq_generators} $\blacktriangleright$egin{split} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) -- (0,1.5) node[pos=.5, shape=coordinate](DOT){} node[pos=.25, left](){\small$\lambdaanglembda+2$} node[pos=.25, right](){\small$\lambdaanglembda$}; \draw[thick, color=blue, dashed] (DOT) [out=135, in=-90] to (-.5,1.5); \node at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad x_\mathcal{E}:\mathcal{E}1bbl\ds{-2}\rightarrow{\mathbbm P}i\mathcal{E}1bbl, &\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0,0) -- (0,1.5) node[pos=.5, shape=coordinate](DOT){} node[pos=.25, left](){\small$\lambdaanglembda$} node[pos=.25, right](){\small$\lambdaanglembda+2$}; \draw[thick, color=blue, dashed] (DOT) [out=135, in=-90] to (.5,1.5); \node at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad x_\mathcal{F}:\mathcal{F}1bbl\ds{-2}\rightarrow\mathcal{F}{\mathbbm P}i1bbl,\\ \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.5) and (.5,1) .. (.5,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, left](){\small$\lambdaanglembda+4$}; \draw[thick, color=blue, dashed] (CROSSING) [out=135, in=-90] to (-.5,1.5); \draw[thick, ->] (.5,0) .. controls (.5,.5) and (0,1) .. (0,1.5) node[pos=.25, right](){\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \partial_\mathcal{E}:\mathcal{E}^21bbl\ds{2}\rightarrow{\mathbbm P}i\mathcal{E}^21bbl, &\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0,0) .. controls (0,.5) and (.5,1) .. (.5,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, left](){\small$\lambdaanglembda$}; \draw[thick, color=blue, dashed] (CROSSING) [out=45, in=-90] to (1,1.5); \draw[thick, <-] (.5,0) .. controls (.5,.5) and (0,1) .. (0,1.5) node[pos=.25, right](){\small$\lambdaanglembda+4$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \partial_\mathcal{F}:\mathcal{F}^21bbl\ds{2}\rightarrow\mathcal{F}^2{\mathbbm P}i1bbl,\\ \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (1,0) .. controls (1,.8) and (0,.8) .. (0,0) node[pos=.25, right](){\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad$\blacktriangleleft$psilonsilon:\mathcal{F}\mathcal{E}1bbl\ds{-\lambdaanglembda-1}\rightarrow1bbl, &\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (1,1) .. controls (1,.2) and (0,.2) .. (0,1) node[pos=.25, right](){\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad\widetilde{e}a:1bbl\ds{\lambdaanglembda-1}\rightarrow\mathcal{E}\mathcal{F}1bbl. $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} For each weight there are two more generating 2-morphisms $\blacktriangleright$egin{equation} \lambdaanglebel{eq_generators_cont} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (1,0) .. controls (1,.8) and (0,.8) .. (0,0) node[pos=.25, right](){\small$\lambdaanglembda$} node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick, color=blue, double distance=1pt, dashed] (TOPCAP) -- (.5,1) node[above, blue]{$\scriptstyle \lambdaanglembda-1$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad$\blacktriangleleft$psilonsilon':\mathcal{E}\mathcal{F}1bbl\ds{\lambdaanglembda-1}\rightarrow{\mathbbm P}i^{\lambdaanglembda-1}1bbl,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (1,1) .. controls (1,.2) and (0,.2) .. (0,1) node[pos=.25, right](){\small$\lambdaanglembda$} node[pos=.5, shape=coordinate](BOTTOMCUP){}; \draw[thick, color=blue, double distance=1pt, dashed] (BOTTOMCUP) -- (.5,1) node[pos=1, above, blue]{$\scriptstyle \lambdaanglembda+1$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad\widetilde{e}a':1bbl\ds{-\lambdaanglembda-1}\rightarrow\mathcal{F}{\mathbbm P}i^{\lambdaanglembda+1}\mathcal{E}1bbl, $\blacktriangleleft$nd{equation} where double dashed lines are defined as in Section~\ref{subsec-superconventions}. There are other 2-morphisms which are implicitly defined by virtue of $U_qc$ being a super-2-category. A few of these are: $\blacktriangleright$egin{equation} \lambdaanglebel{eq-dashed-solid-cross} $\blacktriangleright$egin{split} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0) node[pos=.75, right, black]{\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad\xi:{\mathbbm P}i^21bbl\rightarrow1bbl, &\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,1) .. controls (0,.2) and (1,.2) .. (1,1) node[pos=.75, right, black]{\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad\xi^{-1}:1bbl\rightarrow{\mathbbm P}i^21bbl,\\ \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.5) and (.5,1) .. (.5,1.5) node[pos=.5, right](){\small$\lambdaanglembda$} node[pos=.5, left](){\small$\lambdaanglembda+2$}; \draw[thick, color=blue, dashed] (.5,0) .. controls(.5,.5) and (0,1) .. (0,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \alpha_\mathcal{E}:\mathcal{E}{\mathbbm P}i1bbl\rightarrow{\mathbbm P}i\mathcal{E}1bbl, &\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (.5,0) .. controls (.5,.5) and (0,1) .. (0,1.5) node[pos=.5, right](){\small$\lambdaanglembda$} node[pos=.5, left](){\small$\lambdaanglembda+2$}; \draw[thick, color=blue, dashed] (0,0) .. controls(0,.5) and (.5,1) .. (.5,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad\alpha_\mathcal{E}^{-1}:{\mathbbm P}i\mathcal{E}1bbl\rightarrow\mathcal{E}{\mathbbm P}i1bbl,\\ \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0,0) .. controls (0,.5) and (.5,1) .. (.5,1.5) node[pos=.5, right](){\small$\lambdaanglembda+2$} node[pos=.5, left](){\small$\lambdaanglembda$}; \draw[thick, color=blue, dashed] (.5,0) .. controls(.5,.5) and (0,1) .. (0,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \alpha_\mathcal{F}:1bbl\mathcal{F}{\mathbbm P}i\rightarrow1bbl{\mathbbm P}i\mathcal{F}, &\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (.5,0) .. controls (.5,.5) and (0,1) .. (0,1.5) node[pos=.5, right](){\small$\lambdaanglembda+2$} node[pos=.5, left](){\small$\lambdaanglembda$}; \draw[thick, color=blue, dashed] (0,0) .. controls(0,.5) and (.5,1) .. (.5,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \alpha_\mathcal{F}^{-1}:1bbl{\mathbbm P}i\mathcal{F}\rightarrow1bbl\mathcal{F}{\mathbbm P}i.\\ $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} The following diagram constructed from $$\blacktriangleleft$psilonsilon'$ will play the role of counit in the adjunction $\mathcal{F}{\mathbbm P}i\dashv\mathcal{E}$: $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (1,0) .. controls (1,.8) and (0,.8) .. (0,0) node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick, color=blue, double distance=1pt, dashed] (TOPCAP) .. controls (.5,1.3) and (1.5,1.3) .. (1.5,.5) -- (1.5,0); \node[blue] at (.4,1.2) {$\scriptstyle \lambdaanglembda-1$}; \node at (2.1,.6) {\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}}\quad. $\blacktriangleleft$nd{equation} \subsubsection{Defining relations among generating 2-morphisms} The definition of a super-2-category ensures that all diagrams enjoy invariance for all planar isotopies of the dashed strands. Thus we are free to write $\blacktriangleright$egin{equation*} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) .. controls (0,.5) and (.5, .5) .. (.5,1) node[pos=.5](CROSSING1){}; \draw[thick, ->] (.5,1) .. controls (.5,1.5) and (0,1.5) .. (0,2) node[pos=.5](CROSSING2){}; \draw[thick, ->] (.5,0) .. controls (.5,.5) and (0, .5) .. (0,1) .. controls (0,1.5) and (.5,1.5) .. (.5,2); \draw[thick, color=blue, dashed] (CROSSING1) [out=180, in=-90] to (-.5,1) [out=90, in=180] to (CROSSING2); $\blacktriangleleft$nd{tikzpicture}} \qquad\text{in place of}\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) .. controls (0,.5) and (.5, .5) .. (.5,1) node[pos=.5](CROSSING1){}; \draw[thick] (.5,1) .. controls (.5,1.5) and (0,1.5) .. (0,2) node[pos=.5](CROSSING2){}; \draw[thick, ->] (0,2) -- (0,2.5); \draw[thick, ->] (.5,0) .. controls (.5,.5) and (0, .5) .. (0,1) .. controls (0,1.5) and (.5,1.5) .. (.5,2) -- (.5,2.5); \draw[thick, color=blue, dashed] (CROSSING1) [out=135, in=-90] to (-1,1.5) -- (-1,2) .. controls (-1,2.35) and (-.5,2.35) .. (-.5,2) [out=-90, in=135] to (CROSSING2); $\blacktriangleleft$nd{tikzpicture}}\quad. $\blacktriangleleft$nd{equation*} The relations we impose on the 2-morphisms of $U_qc$ are: $\blacktriangleright$egin{itemize} \item relations expressing the adjunctions $1bbl\mathcal{F}\dashv\mathcal{E}1bbl\dashv1bbl\mathcal{F}{\mathbbm P}i^{\lambdaanglembda+1}$ $\blacktriangleright$egin{equation} \lambdaanglebel{eq_biadjoint1} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick,->] (-1.5,0) .. controls (-1.5,.8) and (-.5,.8) .. (-.5,0) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, ->-=0.15] (.5,0) -- (.5,1.25); \draw[thick, ->] (-1.5,-1.25) -- (-1.5,0); \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(.2,1.5) and ++(0,1).. (Y); \node at (.7,-1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad = \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->-=0.5] (0,-1.25) -- (0,1.25); \node at (1,0.4) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad,\qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick,->] (-1.5,0) .. controls (-1.5,-.8) and (-.5,-.8) .. (-.5,0) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, ->-=0.15] (.5,0) -- (.5,-1.25); \draw[thick, ->] (-1.5,1.25) -- (-1.5,0); \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(.2,1) and ++(0,1.5).. (Y); \node at (.7,1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad = \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->-=0.5] (0,1.25) -- (0,-1.25); \node at (1,-0.4) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{equation}\lambdaanglebel{eq_biadjoint2} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0); \draw[thick,->] (1.5,0) .. controls (1.5,.8) and (.5,.8) .. (.5,0); \draw[thick, ->-=0.15] (-.5,0) -- (-.5,1.25); \draw[thick, ->] (1.5,-1.25) -- (1.5,0); \node at (1.5,1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad = \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->-=0.5] (0,-1.25) -- (0,1.25); \node at (1,0.4) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad,\qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick,->] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0); \draw[thick, ->-=0.15] (-.5,0) -- (-.5,-1.25); \draw[thick, ->] (1.5,1.25) -- (1.5,0); \node at (1.5,-1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad = \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->-=0.5] (0,1.25) -- (0,-1.25); \node at (1,-0.4) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{equation} \item the odd cyclicity relations for dots and crossings: $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-dot-cyclicity} (-1)^{\lambdaanglembda} \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-1.5,0) .. controls (-1.5,-.8) and (-.5,-.8) .. (-.5,0) node[pos=0.5, shape=coordinate](Y){} node[pos=1, shape=coordinate](DOT){}; \draw[thick, ->-=0.15] (.5,0) -- (.5,-1.5); \draw[thick, ->] (-1.5,1.5) -- (-1.5,0); \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(.1,1) and ++(-.2,1.5).. (Y); \draw[color=blue, thick, dashed] (DOT) to[out=120, in=-90] (-0.75,1.5); \draw[line width=0mm] (-1.5,0) .. controls (-1.5,-.8) and (-.5,-.8) .. (-.5,0) node[pos=1](){$\blacktriangleright$bullet}; \node at (-.6,-1) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture} } \quad=\quad \lambdaeft\{ $\blacktriangleright$egin{array}{ll} \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1, shape=coordinate](DOT){}; \draw[thick, ->-=0.15] (-.5,0) -- (-.5,-1.5); \draw[thick, ->] (1.5,1.5) -- (1.5,0); \draw[color=blue, thick, dashed] (DOT) .. controls ++(-1.4,-2) and ++(.2,-2) .. (2.25,0) to (2.25,1.5); \draw[line width=0mm] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1](){$\blacktriangleright$bullet}; \node at (.3,1.2) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \quad+\quad2\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, <-] (-2,-1.5) -- (-2,1.5); \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.9) and (0.5,0.9) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.9) and (0.5,-0.9) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda$\;}; \draw[color=blue, thick, dashed] (Z) to[out=160, in=-90] (-1.25,1.5) ; \node[blue] at (-1.25,0.8){$\scriptstyle $\;}; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.9) and (0.5,0.9) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.9) and (0.5,-0.9) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (.8,-1) {$\lambdaanglembda+1$}; $\blacktriangleleft$nd{tikzpicture}} & \lambda>0, \\ & \\ \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1, shape=coordinate](DOT){}; \draw[thick, ->-=0.15] (-.5,0) -- (-.5,-1.5); \draw[thick, ->] (1.5,1.5) -- (1.5,0); \draw[color=blue, thick, dashed] (DOT) .. controls ++(-1.4,-2) and ++(.2,-2) .. (2.25,0) to (2.25,1.5); \draw[line width=0mm] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1](){$\blacktriangleright$bullet}; \node at (.3,1.2) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \quad+\quad2\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, <-] (2,-1.5) -- (2,1.5); \draw[thick, ->] (0.5,0) .. controls (0.5,0.9) and (-0.5,0.9) .. (-0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.9) and (-0.5,-0.9) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda$\;}; \draw[color=blue, thick, dashed] (Z) .. controls ++(-1.4,.7) and ++(.1,-1) .. (2.75,1.5) ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \draw[line width=0mm] (0.5,0) .. controls (0.5,-0.9) and (-0.5,-0.9) .. (-0.5,0) node[pos=0.2]{$\blacktriangleright$bullet}; \draw[line width=0mm] (0.5,0) .. controls (0.5,0.9) and (-0.5,0.9) .. (-0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (3,-1) {$\lambdaanglembda+1$}; $\blacktriangleleft$nd{tikzpicture} } & \lambda<0, \\ & \\ - \;\; \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1, shape=coordinate](DOT){}; \draw[thick, ->-=0.15] (-.5,0) -- (-.5,-1.5); \draw[thick, ->] (1.5,1.5) -- (1.5,0); \draw[color=blue, thick, dashed] (DOT) .. controls ++(-1.4,-2) and ++(.2,-2) .. (2.25,0) to (2.25,1.5); \draw[line width=0mm] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1](){$\blacktriangleright$bullet}; \node at (.3,1.2) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy& \lambda=0. $\blacktriangleleft$nd{array} \right. $\blacktriangleleft$nd{equation} (Note that relations $\blacktriangleleft$qref{eq_biadjoint1}, $\blacktriangleleft$qref{eq_biadjoint2}, $\blacktriangleleft$qref{eqn-dot-cyclicity} imply local cap and cup dot slide relations that can be found in Section~\ref{subsubsec-half_cyclic}.) $\blacktriangleright$egin{align}\lambdaanglebel{eqn-crossing-cyclicity} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,1); \draw[thick] (0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (1.5,0); \draw[thick] (-0.5,0) .. controls ++(0,-1.5) and ++(0,-1.5) .. (2.5,0); \draw[thick, ->-=0.5] (2.5,1) -- (2.5,0); \draw[thick, ->-=0.5] (1.5,1) -- (1.5,0); \draw[color=blue, thick, dashed] (X) to [out=180, in=-90](-1.5,1); \node at (1.5,-1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad &= \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick,->] (-0.5,1) .. controls ++(0,-0.5) and ++(0,0.5) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,1) .. controls ++(0,-0.5) and ++(0,0.5) .. (-0.5,0); \draw[thick] (-0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (-1.5,0); \draw[thick] (0.5,0) .. controls ++(0,-1.5) and ++(0,-1.5) .. (-2.5,0); \draw[thick, ->-=0.5] (-2.5,0) -- (-2.5,1); \draw[thick, ->-=0.5] (-1.5,0) -- (-1.5,1); \draw[color=blue, thick, dashed] (X) .. controls ++(1.5,0) and ++(1,0) .. (0,-1.5) .. controls ++(-3.5,-.5) and ++(0,-1) .. (-3,1); \node at (1.5,-1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick, ->] (-0.5,0) .. controls ++(-0,-0.5) and ++(0,0.5) .. (0.5,-1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,-0.5) and ++(0,0.5) .. (-0.5,-1); \draw[thick] (0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,0); \draw[thick] (-0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (2.5,0); \draw[thick, ->-=0.5] (2.5,-1) -- (2.5,0); \draw[thick, ->-=0.5] (1.5,-1) -- (1.5,0); \draw[color=blue, thick, dashed] (X) to [out=0, in=90](1,-1); \node at (3,1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad &= \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick,->] (-0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,0); \draw[thick] (-0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (-1.5,0); \draw[thick] (0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (-2.5,0); \draw[thick, ->-=0.5] (-2.5,0) -- (-2.5,-1); \draw[thick, ->-=0.5] (-1.5,0) -- (-1.5,-1); \draw[color=blue, thick, dashed] (X) .. controls ++(-.4,0.2) and ++(0,0.5) .. (-1,-1); \node at (1,1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,1) .. controls ++(-0,-0.5) and ++(0,0.5) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,1) .. controls ++(0,-0.5) and ++(0,0.5) .. (-0.5,0); \draw[thick] (0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (1.5,0) node[pos=0.5, shape=coordinate](inCAP){}; \draw[thick] (-0.5,0) .. controls ++(0,-1.5) and ++(0,-1.5) .. (2.5,0) node[pos=0.44, shape=coordinate](L){} node[pos=0.51, shape=coordinate](R){} node[pos=0.55, shape=coordinate](outCAP){}; \draw[thick, ->-=0.5] (2.5,0) -- (2.5,1); \draw[thick, ->-=0.5] (1.5,0) -- (1.5,1); \draw[color=blue, thick, dashed] (X) .. controls ++(.4,-.2) and ++(0,-.5) .. (1,1); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,.7) .. (1.5,-0.5) .. controls ++(0,-0.3) and ++(0,.5) .. (outCAP); \node at (2,-1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad &=\qquad (-1)^{\lambda} \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,1); \draw[thick] (-0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (-1.5,0) node[pos=0.5, shape=coordinate](inCAP){}; \draw[thick] (0.5,0) .. controls ++(0,-1.5) and ++(0,-1.5) .. (-2.5,0) node[pos=0.44, shape=coordinate](L){} node[pos=0.51, shape=coordinate](R){} node[pos=0.55, shape=coordinate](outCAP){};; \draw[thick, ->-=0.5] (-2.5,1) -- (-2.5,0); \draw[thick, ->-=0.5] (-1.5,1) -- (-1.5,0); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,.7) .. (-1.5,-0.5) .. controls ++(0,-0.3) and ++(0,.5) .. (outCAP); \draw[color=blue, thick, dashed] (X) to [out=180, in=-90](-1,1); \node at (0,-1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick,->] (0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,0); \draw[thick] (0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,0) node[pos=0.5, shape=coordinate](inCAP){};; \draw[thick] (-0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (2.5,0) node[pos=0.51, shape=coordinate](L){} node[pos=0.56, shape=coordinate](R){} node[pos=0.5, shape=coordinate](outCAP){};; \draw[thick, ->-=0.5] (2.5,0) -- (2.5,-1); \draw[thick, ->-=0.5] (1.5,0) -- (1.5,-1); \draw[color=blue, thick, dashed] (X) .. controls ++(-1.5,0) and ++(0,-1) .. (-0.5,1.75); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,-.7) .. (0,1) .. controls ++(0,.5) and ++(0,.5) .. (outCAP); \node at (2.3,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad &= \quad (-1)^{\lambda} \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick,->] (-0.5,0) .. controls ++(0,-0.5) and ++(0,0.5) .. (0.5,-1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,-0.5) and ++(0,0.5) .. (-0.5,-1); \draw[thick] (-0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (-1.5,0) node[pos=0.5, shape=coordinate](inCAP){}; \draw[thick] (0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (-2.5,0) node[pos=0.51, shape=coordinate](L){} node[pos=0.56, shape=coordinate](R){} node[pos=0.5, shape=coordinate](outCAP){}; \draw[thick, ->-=0.5] (-2.5,-1) -- (-2.5,0); \draw[thick, ->-=0.5] (-1.5,-1) -- (-1.5,0); \draw[color=blue, thick, dashed] (X) .. controls ++(1.5,-.2) and ++(0,-1) .. (.5,1.75); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,-.7) .. (0,1) .. controls ++(0,.5) and ++(0,.5) .. (outCAP); \node at (-2.3,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{align} \item The odd nilHecke algebra relations: $\blacktriangleright$egin{eqnarray} \lambdaanglebel{eq:oddnilquad} &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (0,0) .. controls (0,.5) and (.5, .5) .. (.5,1) node[pos=.5](CROSSING1){}; \draw[thick, ->] (.5,1) .. controls (.5,1.5) and (0,1.5) .. (0,2) node[pos=.5](CROSSING2){}; \draw[thick, ->] (.5,0) .. controls (.5,.5) and (0, .5) .. (0,1) .. controls (0,1.5) and (.5,1.5) .. (.5,2); \draw[thick, color=blue, dashed] (CROSSING1) [out=180, in=-90] to (-1,2); \draw[thick, color=blue, dashed] (CROSSING2) [out=180, in=-90] to (-.5,2); $\blacktriangleleft$nd{tikzpicture}} \quad=0,\\ &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,1) and (1,1) .. (1,2) node[pos=.5, shape=coordinate](CROSSING2){}; \draw[thick, ->] (1,0) .. controls (1,1) and (0,1) .. (0,2); \draw[thick] (.5,0) .. controls (.5,.5) and (0,.5) .. (0,1) node[pos=.65, shape=coordinate](CROSSING1){}; \draw[thick, ->] (0,1) .. controls (0,1.5) and (.5,1.5) .. (.5,2) node[pos=.35, shape=coordinate](CROSSING3){}; \draw[thick, color=blue, dashed] (CROSSING1) [out=180, in=-90] to (-1.5,2); \draw[thick, color=blue, dashed] (CROSSING2) [out=180, in=-90] to (-1,2); \draw[thick, color=blue, dashed] (CROSSING3) [out=180, in=-90] to (-.5,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,1) and (1,1) .. (1,2) node[pos=.5, shape=coordinate](CROSSING2){}; \draw[thick, ->] (1,0) .. controls (1,1) and (0,1) .. (0,2); \draw[thick] (.5,0) .. controls (.5,.5) and (1,.5) .. (1,1) node[pos=.65, shape=coordinate](CROSSING1){}; \draw[thick, ->] (1,1) .. controls (1,1.5) and (.5,1.5) .. (.5,2) node[pos=.35, shape=coordinate](CROSSING3){}; \draw[thick, color=blue, dashed] (CROSSING1) [out=180, in=-90] to (-1.5,2); \draw[thick, color=blue, dashed] (CROSSING2) [out=180, in=-90] to (-1,2); \draw[thick, color=blue, dashed] (CROSSING3) [out=180, in=-90] to (-.5,2); $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{eqnarray} $\blacktriangleright$egin{align} \lambdaanglebel{eq:onil-dot} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, shape=coordinate](DOT){}; \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5); \draw[thick, color=blue, dashed] (DOT) [out=180, in=-90] to (-1,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-.5,1.5); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad-\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5); \draw[thick, color=blue, dashed] (DOT) [out=180, in=-90] to (-1,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-.5,1.5); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} &\quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) -- (0,1.5); \draw[thick, ->] (.5,0) -- (.5,1.5); \draw[thick, color=blue, dashed] (-.75,1.5) .. controls (-.75,1.15) and (-.25,1.15) .. (-.25,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad,\\ \notag \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5); \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) [out=180, in=-90] to (-.5,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-1,1.5); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad-\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5); \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) [out=180, in=-90] to (-.5,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-1,1.5); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} &\quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) -- (0,1.5); \draw[thick, ->] (.5,0) -- (.5,1.5); \draw[thick, color=blue, dashed] (-.75,1.5) .. controls (-.75,1.15) and (-.25,1.15) .. (-.25,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad, $\blacktriangleleft$nd{align} as well as the corresponding relations on downwards-oriented strands. $\blacktriangleleft$nd{itemize} We also define left- and right-crossing diagrams by using adjoints: $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (.5,0) .. controls ++(0,.75) and ++(0,-.75) .. (-.5,1.5); \draw[thick, ->] (.5,1.5) .. controls ++(0,-.75) and ++(0,.75)..(-.5,0) node[pos=.5, shape=coordinate](CROSSING){}; \draw[thick, color=blue, dashed] (CROSSING) to (0,-0); \node at (1,.75) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad := \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (-0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,1); \draw[thick] (0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (1.5,0); \draw[thick] (.5,2) -- (.5,1); \draw[thick, ->-=0.5] (1.5,2) -- (1.5,0); \draw[thick] (-0.5,1) .. controls ++(0,0.5) and ++(0,0.5) .. (-1.5,1); \draw[thick] (-.5,0) -- (-.5,-1); \draw[thick, ->-=0.5] (-1.5,1) -- (-1.5,-1); \draw[color=blue, thick, dashed] (X) .. controls ++(-1,.4) and ++(0,1).. (-1,-1); \node at (1.8,-0.8) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \qquad \qquad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (-.5,0) .. controls ++(0,.75) and ++(0,-.75) .. (.5,1.5); \draw[thick, ->] (-.5,1.5) .. controls ++(0,-.75) and ++(0,.75)..(.5,0) node[pos=.5, shape=coordinate](CROSSING){}; \draw[thick, color=blue, dashed] (CROSSING) to (0,1.5); \node at (1,.75) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad := \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,1); \draw[thick] (-0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (-1.5,0) node[pos=.48, shape=coordinate](RD){} node[pos=.48, shape=coordinate](LD){} node[pos=.5, shape=coordinate](bCROSS){}; \draw[thick] (-.5,2) -- (-.5,1); \draw[thick, ->-=0.5] (-1.5,2) -- (-1.5,0); \draw[thick] (0.5,1) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,1) node[pos=.5, shape=coordinate](tCROSS){};; \draw[thick] (.5,0) -- (.5,-1); \draw[thick, ->-=0.5] (1.5,1) -- (1.5,-1); \draw[color=blue, thick, dashed] (X) .. controls ++(-1.3,0) and ++(0,-1).. (-1,2); \draw[color=blue, thick, double distance=1pt, dashed] (bCROSS) .. controls ++(-.1,3) and ++(0.1,.75) ..(tCROSS); \node at (2.1,.5) {$\lambda$}; \node[blue] at (1.5,1.8) {$\scriptstyle \lambda-1$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} \subsubsection{Dotted bubble relations} $\blacktriangleright$egin{itemize} \item Bubbles of negative degree are zero: $\blacktriangleright$egin{equation} \lambdaanglebel{eq:positivity} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) -- (0,1.25); \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.1){$\scriptstyle m$\;}; \node[blue] at (.5,1.1){$\scriptstyle \lambda-1$\;}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (1.3,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \;\; = 0 \quad \text{for $0 \lambdaeq m <\lambda-1$}, \qquad \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(0,.6) and ++(0,-.5) .. (-.5, 1) node[pos=0.85,left]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Y) to[out=00, in=-90] (1,1) ; \node[blue] at (1.45,0.8){$\scriptstyle m$\;}; \node at (-1,0) {$\lambdaanglembda$}; \node at (Y) {$$\blacktriangleright$bullet$}; $\blacktriangleleft$nd{tikzpicture} } \;\; = 0 \quad \text{for $0 \lambdaeq m <-\lambda-1$}. $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{itemize} The remaining relations in the 2-category $U_qc$ are conveniently expressed using several useful conventions for depicting dotted bubbles. The first convention is to depict dotted bubbles in a manner that emphasizes their degree. For $m \mathfrak{g}eq 0$, the degree $m$ clockwise and counter-clockwise bubbles are drawn as follows: $\blacktriangleright$egin{align} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.5,1.2){$\scriptstyle m$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (1.1,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \qquad \qquad \xy (0,-2)*{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (1,1.25) ; \node[blue] at (1.3,0.9){$\scriptstyle m$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (-.5,.9) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy $\blacktriangleleft$nd{align} where we employ the conventions $\blacktriangleright$egin{align} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.5,1.2){$\scriptstyle m$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (1.1,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } &:= \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) -- (0,1.25); \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-1.8,1.1){$\scriptstyle m+(\lambda-1)$\;}; \node[blue] at (.65,1.1){$\scriptstyle -\lambda+1$\;}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (1.3,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } && \text{for $\lambda \lambdaeq 0$ and $-(\lambda-1) \lambdaeq m$}, \\ \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (1,1.25) ; \node[blue] at (1.3,0.9){$\scriptstyle m$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (-.5,.9) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } & := \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(0,.6) and ++(0,-.5) .. (-.5, 1) node[pos=0.85,left]{$\scriptstyle \lambda+1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Y) to[out=00, in=-90] (.9,1) ; \node[blue] at (1.85,0.8){$\scriptstyle m-(\lambda+1)$\;}; \node at (1.4,-.2) {$\lambda$}; \node at (Y) {$$\blacktriangleright$bullet$}; $\blacktriangleleft$nd{tikzpicture} } && \text{for $\lambda \mathfrak{g}eq 0$ and $-\lambda-1 \lambdaeq m$}. $\blacktriangleleft$nd{align} Notice that some labels for dots on the left-hand side appear to involve a negative number of dots, though when the two dots are combined their sum is positive. The dot 2-morphism is not invertible, rather the equations above define formal symbols that will simplify the presentation of the graphical calculus. As in the graphical calculus for the 2-category $U_qcev$ it is extremely convenient to extend this notation even further by defining formal symbols $\blacktriangleright$egin{align} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.5,1.2){$\scriptstyle m$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (1.1,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } && \text{for $\lambda \lambdaeq 0$ and $0 \lambdaeq m \lambdaeq -\lambda$}, \\ \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (1,1.25) ; \node[blue] at (1.3,0.9){$\scriptstyle m$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (-.5,.9) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } && \text{for $\lambda \mathfrak{g}eq 0$ and $0 \lambdaeq m \lambdaeq \lambda $}, $\blacktriangleleft$nd{align} that are defined inductively by the following equations: $\blacktriangleright$egin{align}\lambdaanglebel{eq:fake-bubble} \sum_{f+g=m} (-1)^{g} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.2){$\scriptstyle f$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture} } \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (1,1.25) ; \node[blue] at (1.3,0.9){$\scriptstyle g$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (-.5,1.1) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy\;\; &\;\; = \;\; \partialta_{m,0}1bb_{1bbl} &\text{for $\lambda >0$ and $m \lambdaeq \lambda$, } \\ \sum_{f+g=m} (-1)^{g} \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (1,1.25) ; \node[blue] at (1.3,1.1){$\scriptstyle f$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (-.25,1.1) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.2){$\scriptstyle g$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture} } &\;\; = \;\; \partialta_{m,0}1bb_{1bbl} & \text{for $\lambda<0$ and $m \lambdaeq -\lambda$,} \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle -1$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (1,.75) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } \;\; = \quad \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -1$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (1,.65) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy &\;\; = \;\; 1bb_{1bb_{0}} & \text{if $\lambda=0$}. $\blacktriangleleft$nd{align} These symbols are called fake bubbles because the labels of the dots are negative, though the total degree is positive as they are defined above in terms of nonzero real bubbles. Fake bubbles are explained in greater detail in Section~\ref{subsec:fake-bubbles}. \subsubsection{More defining relations for $\lambdaanglembda >0$} $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-mixed-R2-relation-1} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) to (-0.5,2); \draw[thick, <-] (0.5,0) to (0.5,2); \node at (1,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad - \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1); \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[color=blue, thick, dashed] (Y) -- (X); \node at (1,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad + \quad \sum_{ \xy (0,2)*{\scriptstyle f_1+f_2+f_3}; (0,-1)*{\scriptstyle = \lambda-1}; $\blacktriangleleft$ndxy} (-1)^{f_3} \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](DOT){} node[pos=0.42, shape=coordinate](L){} node[pos=0.5, shape=coordinate](M){} node[pos=0.58, shape=coordinate](R){}; \draw[thick, ->] (1.9,1) .. controls ++(0,0.6) and ++(0,0.6) .. (1.1,1) node[pos=0.05, shape=coordinate](Z){}; \draw[thick] (1.9,1) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (1.1,1) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.5) and ++(-.2,.3) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-.5,.4) and ++(.2,.8) .. (R) ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \draw[thick, <-] (-0.5,2.25) .. controls ++(0,-.8) and ++(0,-.8) .. (0.5,2.25) node[pos=0.2, shape=coordinate](tDOT){}; \draw[color=blue, thick, double distance=1pt, dashed] (M) .. controls ++(.4,1.4) and ++(-.5,-1) .. (-1.1,1.8) to[out=90, in=140] (tDOT); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.65,0) and ++(-.25,.3) .. (L); \node at (tDOT){$\blacktriangleright$bullet}; \node at (DOT){$\blacktriangleright$bullet}; \node[blue] at (.6,1.4){$\scriptstyle f_3$}; \node[blue] at (-1.35,1.45){$\scriptstyle f_1$}; \node[blue] at (-1.0,.30){$\scriptstyle f_2$}; \node at (1,2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{equation} -\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, dashed] (X) -- (0,0); \draw[color=blue, thick, dashed] (Y) -- (0,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,2) to (-0.5,0); \draw[thick, <-] (0.5,2) to (0.5,0); \draw[color=blue, thick, dashed] (0,0) -- (0,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{align} \lambdaanglebel{eq:lgz-curl} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[thick, ->] (-0.5,1) .. controls ++(0,.6) and ++(0,.6) .. (0.5,1) node[pos=0.5, shape=coordinate](Y){}; \draw[color=blue, thick, dashed] (X) -- (0,0); \draw[color=blue, thick, double distance=1pt,dashed] (Y) -- (0,2);; \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} &\quad =\quad 0 & \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (-0.5,1) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, dashed] (Y) -- (0,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} &\quad =\quad 0 . $\blacktriangleleft$nd{align} \subsubsection{More defining relations for $\lambdaanglembda <0$} $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) to (-0.5,2); \draw[thick, ->] (0.5,0) to (0.5,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad - \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, dashed] (Y) .. controls ++(.1,.4) and ++(.1,.4) .. (-.6,1.6) .. controls ++(0,-.3) and ++(0,.3) ..(0,1) .. controls ++(0,-.3) and ++(0,.3) .. (-.6,.4) .. controls ++(.1,-.4) and ++(.1,-.4) .. (X); $\blacktriangleleft$nd{tikzpicture} } \quad + \quad \sum_{ \xy (0,2)*{\scriptstyle f_1+f_2+f_3}; (0,-1)*{\scriptstyle = -\lambda-1}; $\blacktriangleleft$ndxy} (-1)^{f_3} \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0.5,0) .. controls ++(0,0.8) and ++(0,0.8) .. (-0.5,0) node[pos=0.15, shape=coordinate](DOT){}; \draw[thick, ->] (1.1,.75) .. controls ++(-0,0.6) and ++(0,0.6) .. (1.9,.75) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (1.1,.75) .. controls ++(0,-0.6) and ++(0,-0.6) .. (1.9,.75) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[thick, ->] (0.8,2.25) -- (0.8,2.5); \draw[thick] (-0.8,2.25) -- (-0.8,2.5); \draw[thick] (0.8,2.25) .. controls ++(0,-.8) and ++(0,-.8) .. (-0.8,2.25) node[pos=0.15, shape=coordinate](tDOT){} node[pos=0.42, shape=coordinate](RCUP){} node[pos=0.5, shape=coordinate](MCUP){} node[pos=0.58, shape=coordinate](LCUP){}; \draw[color=blue, thick, double distance=1pt, dashed] (tDOT) ..controls ++(-.3,.3) and ++(0,.4) .. (RCUP) ; \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls ++(-.3,.2) and ++(0,-.5) .. (-1,1) .. controls ++(0,1.7) and ++(.1,.7) .. (MCUP) ; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-.3,.4) and ++(0,-.4) .. (-.75,1.5) .. controls ++(0,.5) and ++(0,.4) .. (LCUP) ; \node at (tDOT){$\blacktriangleright$bullet}; \node at (DOT){$\blacktriangleright$bullet}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node[blue] at (.5,1.25){$\scriptstyle f_3$}; \node[blue] at (.6,2.35){$\scriptstyle f_1$}; \node[blue] at (-1.0,.40){$\scriptstyle f_2$}; \node at (1,2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{equation} -\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1); \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[color=blue, thick, dashed] (Y) .. controls ++(.1,-.5) and ++(-.1,-.5) .. (-.6,1.5) .. controls ++(0,.3) and ++(0,-.4) ..(0,2); \draw[color=blue, thick, dashed] (X) .. controls ++(.1,.4) and ++(-.1,.4) .. (-.6,.5) .. controls ++(0,-.3) and ++(0,.4) ..(0,0); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) to (-0.5,2); \draw[thick, <-] (0.5,0) to (0.5,2); \draw[color=blue, thick, dashed] (0,0) -- (0,2); $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{align} \lambdaanglebel{eq:llz-curl} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1); \draw[thick, ->] (0.5,1) .. controls ++(0,.6) and ++(0,.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](Y){}; \draw[color=blue, thick, dashed] (X) .. controls ++(.1,.4) and ++(-.1,.4) .. (-.6,.5) .. controls ++(0,-.3) and ++(0,.4) ..(0,0); \node at (-1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} &\quad =\quad 0 & \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (-0.5,1) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, double distance=1pt,dashed] (X) .. controls ++(0,1.25) and ++(0,-.75) .. (-1,2); \draw[color=blue, thick, dashed] (Y) -- (0,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} &\quad =\quad 0 . $\blacktriangleleft$nd{align} \subsubsection{More defining relations for $\lambdaanglembda =0$} \[ \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) to (-0.5,2); \draw[thick, <-] (0.5,0) to (0.5,2); \node at (1,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad - \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1); \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[color=blue, thick, dashed] (Y) -- (X); \node at (1,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \qquad \qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,2) to (-0.5,0); \draw[thick, <-] (0.5,2) to (0.5,0); \draw[color=blue, thick, dashed] (0,0) -- (0,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad - \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, dashed] (X) -- (0,0); \draw[color=blue, thick, dashed] (Y) -- (0,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \] At this point, the definition of $U_qc$ is complete. \subsection{Implicit relations among generating 2-morphisms} \lambdaanglebel{subsec:additional} \subsubsection{Relations involving dashed strands} The following relations are already present from the definition of a super-2-category, but we spell them out for completeness: $\blacktriangleright$egin{itemize} \item $\xi$ and $\xi^{-1}$ are biadjoint: $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,.75) .. controls (0,1.15) and (.5,1.15) .. (.5,.75) .. controls (.5,.35) and (1,.35) .. (1,.75) -- (1,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (1,0) -- (1,.75) .. controls (1,1.15) and (.5,1.15) .. (.5,.75) .. controls (.5,.35) and (0,.35) .. (0,.75) -- (0,1.5); $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} \item the biadjunction $\xi\dashv\xi^{-1}\dashv\xi$ enjoys pitchfork-cyclicity for all 1-morphisms (by Lemma \ref{lem-dashed-cyclicity}, which is stated for $\mathcal{SC}at$ but holds in any super-2-category): $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick] (.5,0) [out=90, in=-90] to (0,1) -- (0,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick] (.5,0) [out=90, in=-90] to (1,1) -- (1,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,1.5) .. controls (0,.7) and (1,.7) .. (1,1.5); \draw[thick] (.5,1.5) [out=-90, in=90] to (0, .5) -- (0,0); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,1.5) .. controls (0,.7) and (1,.7) .. (1,1.5); \draw[thick] (.5,1.5) [out=-90, in=90] to (1,.5) -- (1,0); $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} where in either equation both strands can be upwards-oriented or both strands can be downwards-oriented. \item $\xi^{-1}$ is, in fact, the inverse of $\xi$: $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.8) and (1,.8) .. (1,0); \draw[thick, color=blue, dashed] (0,2) .. controls (0,1.2) and (1,1.2) .. (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0, 2); \draw[thick, color=blue, dashed] (1,0) -- (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,1) .. controls (0,1.8) and (1,1.8) .. (1,1); \draw[thick, color=blue, dashed] (0,1) .. controls (0,.2) and (1,.2) .. (1,1); $\blacktriangleleft$nd{tikzpicture}} \quad=\varnothing. $\blacktriangleleft$nd{equation} \item $\alpha_\mathcal{E}^{-1}$ is, in fact, the inverse of $\alpha_\mathcal{E}$: $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) [out=90, in=-90] to (1,1) [out=90, in=-90] to (0,2); \draw[thick, color=blue, dashed] (1,0) [out=90, in=-90] to (0,1) [out=90, in=-90] to (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) -- (0,2); \draw[thick, color=blue, dashed] (1,0) -- (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) [out=90, in=-90] to (1,1) [out=90, in=-90] to (0,2); \draw[thick, ->] (1,0) [out=90, in=-90] to (0,1) [out=90, in=-90] to (1,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) -- (0,2); \draw[thick, ->] (1,0) -- (1,2); $\blacktriangleleft$nd{tikzpicture}}\quad. $\blacktriangleleft$nd{equation} And likewise for $\alpha_\mathcal{F}^{\pm1}$ (reflect the above equations about a horizontal axis). \item Relations that involves pulling $\alpha$'s through generating 2-morphisms: $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,1) and (1,1) .. (1,2) node[pos=.5, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) -- (.5,2) node[pos=0](){$\blacktriangleright$bullet}; \draw[thick, color=blue, dashed] (.5,0) .. controls (.5,.5) and (0,.5) .. (0,1) -- (0,2); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,1) and (1,1) .. (1,2) node[pos=.5, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) .. controls ++(-0.75,.1) and ++(0,-.5).. (.5,2) node[pos=0](){$\blacktriangleright$bullet}; \draw[thick, color=blue, dashed] (1,0) -- (1,.65) .. controls ++(0,1) and ++(0,-.5) .. (0,2); $\blacktriangleleft$nd{tikzpicture}} \quad,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls ++(0,1) and ++(0,-0.5) ..(2,2); \draw[thick, ->] (2,1) .. controls ++(0,0.5) and ++(0,-0.5) .. (1,2) node[pos=.5, shape=coordinate](CROSSING){}; \draw[thick, color=blue, dashed] (CROSSING) to[out=180, in=-90] (0.25,2); \draw[thick] (2,1) .. controls ++(0,-0.5) and ++(0,0.5) ..(1,0); \draw[thick, color=blue, dashed] (2,0) .. controls ++(0,.75) and ++(0,-.75) .. (-0.5,2); $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls ++(0,0.5) and ++(0,-1) ..(2,2); \draw[thick, ->] (0,1) .. controls ++(0,0.5) and ++(0,-0.5) .. (1,2); \draw[thick] (0,1) .. controls ++(0,-0.5) and ++(0,0.5) ..(1,0) node[pos=.5, shape=coordinate](CROSSING){}; \draw[thick, color=blue, dashed] (CROSSING) .. controls ++(-0.5,-.1) and ++(0,-.2) .. (-0.5,1) .. controls ++(0,.4) and ++(0,-.5) ..(0.25,2); \draw[thick, color=blue, dashed] (2,0) .. controls ++(0,.75) and ++(0,-.75) .. (-0.5,2); $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} the analogues of the previous two for $\mathcal{F}$ (reflect about a horizontal axis), $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (.5,0) -- (.5,.5) .. controls (.5,.85) and (0,.85) .. (0,.5) -- (0,0); \draw[thick, color=blue, dashed] (1,0) -- (1,1) .. controls (1,1.25) and (-.5,1.25) .. (-.5,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (.5,0) -- (.5,.5) .. controls (.5,.85) and (0,.85) .. (0,.5) -- (0,0); \draw[thick, color=blue, dashed] (1,0) .. controls (1,.5) and (-.5,.5) .. (-.5,1) -- (-.5,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad,\qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (.5,0) -- (.5,.5) .. controls (.5,.85) and (0,.85) .. (0,.5) node[pos=.5, shape=coordinate](TOPCAP){} -- (0,0); \draw[thick, color=blue, dashed] (1,0) -- (1,1) .. controls (1,1.25) and (-.5,1.25) .. (-.5,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (TOPCAP) -- (.25,1.5) node[pos=1, above](){$\scriptstyle \lambdaanglembda+1$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (.5,0) -- (.5,.5) .. controls (.5,.85) and (0,.85) .. (0,.5) node[pos=.5, shape=coordinate](TOPCAP){} -- (0,0); \draw[thick, color=blue, dashed] (1,0) .. controls (1,.5) and (-.5,.5) .. (-.5,1) -- (-.5,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (TOPCAP) -- (.25,1.5) node[pos=1, above](){$\scriptstyle \lambdaanglembda+1$}; $\blacktriangleleft$nd{tikzpicture}} \quad, $\blacktriangleleft$nd{equation} as well as the apparent analogues of these diagrams with $\alpha^{-1}$ in place of $\alpha$, with cups in place of caps, and both. $\blacktriangleleft$nd{itemize} \subsubsection{Inductive dot slide } We now deduce some helpful relations in $U_qc$, cf. Section 5.4 of \cite{Lau1}. We refer to the equation $\blacktriangleright$egin{align} \lambdaanglebel{eq:inductive-dot} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, shape=coordinate](DOT){}; \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (DOT) [out=180, in=-90] to (-1,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-.5,1.5); \node() at (DOT) {$\blacktriangleright$bullet}; \node[blue] at (-1.3,1.35) {$\scriptstyle m$}; $\blacktriangleleft$nd{tikzpicture}} \quad-\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (DOT) [out=180, in=-90] to (-1,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-.5,1.5); \node at (DOT) {$\blacktriangleright$bullet}; \node[blue] at (-1.3,1.35) {$\scriptstyle m$}; $\blacktriangleleft$nd{tikzpicture}} &\quad =\quad \sum_{f+g=m-1} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) -- (0,1.5) node[pos=.3, shape=coordinate](LD){}; \draw[thick, ->] (.5,0) -- (.5,1.5) node[pos=.5, shape=coordinate](RD){};; \draw[thick, color=blue, dashed] (-1.25,1.5) .. controls ++(-0,-.4) and ++(0,-.4) .. (-.25,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (LD) .. controls ++(-.5,.2) and ++(0,-1) .. (-1.5,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (RD) .. controls ++(-.5,.2) and ++(0,-.7) .. (-.75,1.5); \node at (LD) {$\blacktriangleright$bullet};\node at (RD) {$\blacktriangleright$bullet}; \node[blue] at (-1.75,1.35) {$\scriptstyle f$}; \node[blue] at (-.8,.9) {$\scriptstyle g$}; $\blacktriangleleft$nd{tikzpicture}} \\ \notag \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5); \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, color=blue, double distance=1pt,dashed] (DOT) [out=180, in=-90] to (-1,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-.5,1.5); \node() at (DOT) {$\blacktriangleright$bullet}; \node[blue] at (-1.3,1.35) {$\scriptstyle m$}; $\blacktriangleleft$nd{tikzpicture}} \quad-\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5); \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, shape=coordinate](DOT){}; \draw[thick, color=blue, double distance=1pt,dashed] (DOT) [out=180, in=-90] to (-1,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-.5,1.5); \node() at (DOT) {$\blacktriangleright$bullet}; \node[blue] at (-1.3,1.35) {$\scriptstyle m$}; $\blacktriangleleft$nd{tikzpicture}} &\quad=\quad \sum_{f+g=m-1} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) -- (0,1.5) node[pos=.6, shape=coordinate](LD){}; \draw[thick, ->] (.5,0) -- (.5,1.5) node[pos=.25, shape=coordinate](RD){};; \draw[thick, color=blue, dashed] (-1.25,1.5) .. controls ++(-0,-.4) and ++(0,-.4) .. (-.25,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (LD) .. controls ++(-.5,.2) and ++(0,-.7) .. (-.75,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (RD) .. controls ++(-.5,.2) and ++(0,-1) .. (-1.5,1.5); \node at (LD) {$\blacktriangleright$bullet};\node at (RD) {$\blacktriangleright$bullet}; \node[blue] at (-1.75,1.35) {$\scriptstyle f$}; \node[blue] at (-.8,.9) {$\scriptstyle g$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{align} as the inductive dot slide formula. These equations follow by induction from $\blacktriangleleft$qref{eq:onil-dot}. \subsubsection{Deriving the other curl relation} Notice that relations $\blacktriangleleft$qref{eq:lgz-curl} and $\blacktriangleleft$qref{eq:llz-curl} above only specify the value for one orientation of the curl depending on wether $\lambda$ is positive or negative. Here we show that the other curl relation can be derived from the relations above. Note that the relations below utilize fake bubbles. $\blacktriangleright$egin{prop}[Other curls] \lambdaanglebel{prop:othercurl} The following curl relations $\blacktriangleright$egin{align} \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (0.5,1) -- (0.5,2); \draw[thick] (0.5,-.5) -- (0.5,0); \draw[thick] (-1.5,0) -- (-1.5,1); \draw[thick] (0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,1); \draw[thick, ->] (-0.5,1) .. controls ++(0,0.6) and ++(0,0.6) .. (-1.5,1); \draw[thick, ->] (-1.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (-0.5,0) node[pos=0.5, shape=coordinate](CUP){}; \draw[color=blue, thick, dashed] (X) .. controls ++(-1.2,0) and ++(0,-.9) ..(0,2); \draw[color=blue, thick, double distance=1pt,dashed] (CUP) .. controls ++(-0,1.5) and ++(0,-1) ..(-.5,2); \node at (0,-0.25) {$\lambda$}; \node[blue] at (-1,1.8) {$\scriptstyle \lambda-1$}; $\blacktriangleleft$nd{tikzpicture}} &\;\; = \;\; \sum_{f+g=\lambda} (-1)^{g} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (2.25,-1) -- (2.25,1.25) node[pos=0.5, shape=coordinate](D){}; \draw[color=blue, thick,double distance=1pt, dashed] (D) to[out=150,in=-90] (1.75,1.25); \draw[thick, ->] (1.1,0) .. controls ++(0,0.6) and ++(0,0.6) .. (.3,0) node[pos=0.05, shape=coordinate](C){}; \draw[thick] (1.1,0) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (.3,0) node[pos=0.5, shape=coordinate](A){} node[pos=0.2, shape=coordinate](B){}; \draw[color=blue, thick, double distance=1pt, dashed] (A) .. controls++(-.1,.5) and ++(-.2,.3) .. (B) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick,double distance=1pt, dashed] (C) .. controls ++(-.7,.2) and ++(0,-.8) .. (1.25,1.25) node[pos=0.9,left]{$\scriptstyle f$\;}; \node at (B) {$\blacktriangleright$bullet}; \node at (D) {$\blacktriangleright$bullet}; \node at (C) {$\blacktriangleright$bullet}; \node[blue] at (1.95,1) {$\scriptstyle g$}; \node at (-0,-.5) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } & \text{for $\lambda\mathfrak{g}eq 0$,} \\ \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (-0.5,1) -- (-0.5,2); \draw[thick] (-0.5,-.5) -- (-0.5,0); \draw[thick] (1.5,0) -- (1.5,1); \draw[thick] (-0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,1); \draw[thick, ->] (0.5,1) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,1) node[pos=0.5, shape=coordinate](CUP){}; \draw[thick, ->] (1.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (0.5,0); \draw[color=blue, thick, dashed] (X) .. controls ++(-1,0) and ++(0,-.9) ..(-1,2); \draw[color=blue, thick, double distance=1pt,dashed] (CUP) -- (1,2); \node at (0,-0.25) {$\lambda$}; \node[blue] at (.5,1.8) {$\scriptstyle \lambda-1$}; $\blacktriangleleft$nd{tikzpicture}} &\;\; = \;\; \sum_{f+g=\lambda} (-1)^{f} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-1.75,-1) -- (-1.75,1.25) node[pos=0.5, shape=coordinate](D){}; \draw[color=blue, thick,double distance=1pt, dashed] (D) to[out=150,in=-90] (-2.25,1.25); \draw[thick, ->] (-0.4,0) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[bend left] (-1,1.25); \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (D) {$\blacktriangleright$bullet}; \node[blue] at (-2.5,1) {$\scriptstyle f$}; \node[blue] at (-1.25,1) {$\scriptstyle g$}; \node at (-1.25,-.5) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } & \text{for $\lambda\lambdaeq 0$,} $\blacktriangleleft$nd{align} hold in $U_qc$. $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} For $\lambda=0$ the above equations are just the definition of the fake bubbles in weight $\lambda=0$. For $\lambda > 0$ we can add a cup with $\lambda$ dots to the bottom of relation $\blacktriangleleft$qref{eqn-mixed-R2-relation-1} to get $\blacktriangleright$egin{equation} \lambdaanglebel{eq:lcurl1} \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick, ->] (-0.5,0) to (-0.5,2); \draw[thick, <-] (0.5,0) to (0.5,2); \node at (1,1.5) {$\lambda$}; \draw[thick] (-0.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,0) node[pos=0, shape=coordinate](D){}; \draw[color=blue, thick, double distance=1pt, dashed] (D) to[out=160, in=-90] (-1,2); \node at (D) {$\blacktriangleright$bullet}; \node[blue] at (-1.3,1.8){$\scriptstyle \lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad -\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1); \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[color=blue, thick, dashed] (Y) -- (X); \node at (1,1.5) {$\lambda$}; \draw[thick] (-0.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,0) node[pos=0, shape=coordinate](D){}; \draw[color=blue, thick, double distance=1pt, dashed] (D) to[out=160, in=-90] (-1,2); \node at (D) {$\blacktriangleright$bullet}; \node[blue] at (-1.3,1.8){$\scriptstyle \lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad + \quad \sum_{ \xy (0,2)*{\scriptstyle f_1+f_2+f_3}; (0,-1)*{\scriptstyle = \lambda-1}; $\blacktriangleleft$ndxy} (-1)^{f_3} \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](DOT){} node[pos=0.42, shape=coordinate](L){} node[pos=0.5, shape=coordinate](M){} node[pos=0.58, shape=coordinate](R){}; \draw[thick, ->] (1.9,1) .. controls ++(0,0.6) and ++(0,0.6) .. (1.1,1) node[pos=0.05, shape=coordinate](Z){}; \draw[thick] (1.9,1) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (1.1,1) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.5) and ++(-.2,.3) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-.5,.4) and ++(.2,.8) .. (R) ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \draw[thick, <-] (-0.5,2.25) .. controls ++(0,-.8) and ++(0,-.8) .. (0.5,2.25) node[pos=0.2, shape=coordinate](tDOT){}; \draw[color=blue, thick, double distance=1pt, dashed] (M) .. controls ++(.4,1.4) and ++(-.5,-1) .. (-1.1,1.8) to[out=90, in=140] (tDOT); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.65,0) and ++(-.25,.3) .. (L); \node at (tDOT){$\blacktriangleright$bullet}; \node at (DOT){$\blacktriangleright$bullet}; \node[blue] at (.6,1.45){$\scriptstyle f_3$}; \node[blue] at (-1.05,1.1){$\scriptstyle f_1$}; \node[blue] at (-.85,.65){$\scriptstyle f_2$}; \node at (1,2) {$\lambda$}; \draw[thick] (-0.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,0) node[pos=0.05, shape=coordinate](D){}; \draw[color=blue, thick, double distance=1pt, dashed] (D) to[out=160, in=-90] (-1.75,2.25); \node at (D) {$\blacktriangleright$bullet}; \node[blue] at (-2,1.8){$\scriptstyle \lambda$}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{equation} Then sliding one of the dots through the second term using $\blacktriangleleft$qref{eq:rightdotslide} we get a sum of two terms \[ \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1); \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[color=blue, thick, dashed] (Y) -- (X); \node at (1,1.5) {$\lambda$}; \draw[thick] (-0.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,0) node[pos=0, shape=coordinate](D){}; \draw[color=blue, thick, double distance=1pt, dashed] (D) to[out=160, in=-90] (-1,2); \node at (D) {$\blacktriangleright$bullet}; \node[blue] at (-1.3,1.8){$\scriptstyle \lambda$}; $\blacktriangleleft$nd{tikzpicture} } \; = \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=1, shape=coordinate](tD){}; \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[color=blue, thick, dashed] (Y) -- (X); \node at (1,1.5) {$\lambda$}; \draw[thick] (-0.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,0) node[pos=0, shape=coordinate](D){}; \draw[color=blue, thick, double distance=1pt, dashed] (D) to[out=160, in=-90] (-1.25,2); \draw[color=blue, thick, dashed] (tD) .. controls ++(-1.4,.3) and ++(0,-.6) .. (-1,2); \node at (D) {$\blacktriangleright$bullet}; \node at (tD) {$\blacktriangleright$bullet}; \node[blue] at (-1.8,1.7){$\scriptstyle \lambda-1$}; $\blacktriangleleft$nd{tikzpicture} } \; + \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[thick] (-0.5,1) .. controls ++(0,-0.5) and ++(0,-0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](CUP){}; \draw[color=blue, thick, dashed] (Y) .. controls ++(.1,-.6) and ++(0,-.6) .. (-1,2); \draw[thick, ->] (-0.5,-.25) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.5,-.25) node[pos=0.5, shape=coordinate](CAP){}; \draw[thick] (-0.5,-.25) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,-.25) node[pos=0.05, shape=coordinate](D){}; \draw[color=blue, thick, double distance=1pt, dashed] (CUP) .. controls++(0,.5) and ++(-0,.5) .. (.5,.7) .. controls ++(0,-.3) and ++(0,.3) .. (CAP) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (D) to[out=160, in=-90] (-1.25,2); \node at (1,1.5) {$\lambda$}; \node at (D) {$\blacktriangleright$bullet}; \node[blue] at (-1.7,1.8){$\scriptstyle \lambda-1$}; $\blacktriangleleft$nd{tikzpicture} } \; = \;\; (-1)^{\lambda-1} \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[thick,->] (-0.5,1) .. controls ++(0,-0.5) and ++(0,-0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](CUP){}; \draw[color=blue, thick, dashed] (Y) .. controls ++(.1,-.6) and ++(0,-.6) .. (-1,2); \draw[color=blue, thick, double distance=1pt, dashed] (CUP) .. controls++(0,.5) and ++(-0,-.9) .. (-1.25,2); \node at (1,1.5) {$\lambda$}; \node[blue] at (-1.7,1.8){$\scriptstyle \lambda-1$}; $\blacktriangleleft$nd{tikzpicture} } \] where the first diagram is zero by the dot sliding relation, the positivity of bubbles axiom~$\blacktriangleleft$qref{eq:positivity}, and the curl relation $\blacktriangleleft$qref{eq:lgz-curl}. The second can be simplified using that degree zero bubbles are multiplication by 1. The last term in $\blacktriangleleft$qref{eq:lcurl1} can be rewritten as \[ \sum_{\xy (0,2)*{\scriptstyle f_1+f_2+f_3}; (0,-1)*{\scriptstyle = \lambda-1}; $\blacktriangleleft$ndxy} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (1.2,1.5) .. controls ++(0,-.8) and ++(0,-.8) .. (.4,1.5) node[pos=0.85, shape=coordinate](D){}; \draw[color=blue, thick,double distance=1pt, dashed] (D) to[out=160,in=-90] (0,1.5); \draw[thick, ->] (-1.1,-.25) .. controls ++(-0,0.6) and ++(0,0.6) .. (-.3,-.25) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-1.1,-.25) .. controls ++(0,-0.6) and ++(0,-0.6) .. (-0.3,-.25) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.5,above]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, dashed, double distance=1pt] (Z) .. controls ++(-.9,0) and ++(0,-.7) ..(-1.5,1.5) node[pos=0.9,left]{$\scriptstyle f_2+1$\;};; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \draw[thick, ->] (1.1,0) .. controls ++(0,0.6) and ++(0,0.6) .. (.3,0) node[pos=0.05, shape=coordinate](C){}; \draw[thick] (1.1,0) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (.3,0) node[pos=0.5, shape=coordinate](A){} node[pos=0.2, shape=coordinate](B){}; \draw[color=blue, thick, double distance=1pt, dashed] (A) .. controls++(-.1,.5) and ++(-.2,.3) .. (B) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick,double distance=1pt, dashed] (C) .. controls ++(-.3,.4) and ++(-.1,-1) .. (-.75,1.5) node[pos=0.9,left]{$\scriptstyle f_3$\;}; \node[blue] at (-.25,1.4){$\scriptstyle f_1$\;}; \node at (B) {$\blacktriangleright$bullet}; \node at (C) {$\blacktriangleright$bullet};\node at (D) {$\blacktriangleright$bullet}; \node at (1.75,.8) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } \quad = \quad - \sum_{f=0}^{\lambda-1} (-1)^{\lambda-f} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (1.2,1.5) .. controls ++(0,-.8) and ++(0,-.8) .. (.4,1.5) node[pos=0.85, shape=coordinate](D){}; \draw[color=blue, thick,double distance=1pt, dashed] (D) to[out=160,in=-90] (0,1.5); \draw[thick, ->] (1.1,0) .. controls ++(0,0.6) and ++(0,0.6) .. (.3,0) node[pos=0.05, shape=coordinate](C){}; \draw[thick] (1.1,0) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (.3,0) node[pos=0.5, shape=coordinate](A){} node[pos=0.2, shape=coordinate](B){}; \draw[color=blue, thick, double distance=1pt, dashed] (A) .. controls++(-.1,.5) and ++(-.2,.3) .. (B) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick,double distance=1pt, dashed] (C) .. controls ++(-.3,.4) and ++(-.1,-1) .. (-.75,1.5) node[pos=0.9,left]{$\scriptstyle \lambda-f$\;}; \node[blue] at (-.25,1.4){$\scriptstyle f$\;}; \node at (B) {$\blacktriangleright$bullet}; \node at (C) {$\blacktriangleright$bullet};\node at (D) {$\blacktriangleright$bullet}; \node at (1.75,.8) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } \] using relation $\blacktriangleleft$qref{eq:fake-bubble}. Combining these bubble terms with the term on the right of $\blacktriangleleft$qref{eq:lcurl1} and using the adjoint structure completes the claim. The case $\lambda<0$ is proven similarly. $\blacktriangleleft$nd{proof} $\blacktriangleright$egin{cor} (Dotted curls)\lambdaanglebel{cor:dotcurl} For $m \mathfrak{g}eq 0$ the following dotted curl relations $\blacktriangleright$egin{align} \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (0.5,1) -- (0.5,2); \draw[thick] (0.5,-.5) -- (0.5,0); \draw[thick] (-1.5,0) -- (-1.5,1); \draw[thick] (0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,1); \draw[thick, ->] (-0.5,1) .. controls ++(0,0.6) and ++(0,0.6) .. (-1.5,1); \draw[thick] (-1.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (-0.5,0) node[pos=0.5, shape=coordinate](CUP){} node[pos=.9, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (X) .. controls ++(-1.2,0) and ++(0,-1.2) ..(0,2); \draw[color=blue, thick, double distance=1pt,dashed] (CUP) .. controls ++(-0,.5) and ++(0,-1.6) ..(-2,2); \draw[color=blue, thick, double distance=1pt,dashed] (DOT) .. controls ++(-.3,.8) and ++(0,-.8) ..(-1.5,2); \node at (0,-0.25) {$\lambda$}; \node at (DOT) {$\blacktriangleright$bullet}; \node[blue] at (-2.4,1.8) {$\scriptstyle \lambda-1$}; \node[blue] at (-1.2,1.8) {$\scriptstyle m$}; $\blacktriangleleft$nd{tikzpicture}} &\;\; = \;\; \sum_{f+g=m+\lambda} (-1)^{g} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.9] \draw[thick, ->] (2.25,-1) -- (2.25,1.25) node[pos=0.5, shape=coordinate](D){}; \draw[color=blue, thick,double distance=1pt, dashed] (D) to[out=150,in=-90] (1.75,1.25); \draw[thick, ->] (1.1,0) .. controls ++(0,0.6) and ++(0,0.6) .. (.3,0) node[pos=0.05, shape=coordinate](C){}; \draw[thick] (1.1,0) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (.3,0) node[pos=0.5, shape=coordinate](A){} node[pos=0.2, shape=coordinate](B){}; \draw[color=blue, thick, double distance=1pt, dashed] (A) .. controls++(-.1,.5) and ++(-.2,.3) .. (B) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick,double distance=1pt, dashed] (C) .. controls ++(-.7,.2) and ++(0,-.8) .. (1.25,1.25) node[pos=0.9,left]{$\scriptstyle f$\;}; \node at (B) {$\blacktriangleright$bullet}; \node at (D) {$\blacktriangleright$bullet}; \node at (C) {$\blacktriangleright$bullet}; \node[blue] at (1.95,1) {$\scriptstyle g$}; \node at (-0,-.5) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } & \text{for $\lambda\mathfrak{g}eq 0$,} \\ \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (-0.5,1) .. controls ++(0,.3) and ++(0,-.3).. (0.5,2); \draw[thick] (-0.5,-.5) -- (-0.5,0); \draw[thick] (1.5,0) -- (1.5,1); \draw[thick] (-0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){} node[pos=1, shape=coordinate](MD){}; \draw[thick, ->] (0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,1); \draw[thick, ->] (0.5,1) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,1) node[pos=0.5, shape=coordinate](CUP){}; \draw[thick, ->] (1.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (0.5,0); \draw[color=blue, thick, dashed] (X) .. controls ++(-1,0) and ++(0,-.9) ..(-1.5,2); \draw[color=blue, thick, double distance=1pt,dashed] (CUP) .. controls ++(0,.3) and ++(0,-.3) .. (-0.5,2); \draw[color=blue, thick, double distance=1pt,dashed] (MD) .. controls ++(-.3,.4) and ++(0,-.5) .. (-1,2); \node at (MD) {$\blacktriangleright$bullet}; \node at (0,-0.25) {$\lambda$}; \node[blue] at (1,1.8) {$\scriptstyle \lambda-1$}; \node[blue] at (-1.2,1.8) {$\scriptstyle m$}; $\blacktriangleleft$nd{tikzpicture}} &\;\; = \;\; \sum_{f+g=m-\lambda} (-1)^{f} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.9] \draw[thick, ->] (-1.75,-1) -- (-1.75,1.25) node[pos=0.3, shape=coordinate](D){}; \draw[color=blue, thick,double distance=1pt, dashed] (D) to[out=150,in=-90] (-3,1.25); \draw[thick, ->] (-0.4,0) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1.5,.2) and ++(0,-.5) .. (-2.25,1.25) node[pos=0.9, left] {$\scriptstyle g$}; \node[blue] at (-3.3,1.1) {$\scriptstyle f$}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (D) {$\blacktriangleright$bullet}; \node at (-1.25,-.5) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } & \text{for $\lambda\lambdaeq 0$,} $\blacktriangleleft$nd{align} hold in $U_qc$. $\blacktriangleleft$nd{cor} $\blacktriangleright$egin{proof} This follows immediately from Proposition~\ref{prop:othercurl} and the inductive dot slide formula $\blacktriangleleft$qref{eq:inductive-dot}. $\blacktriangleleft$nd{proof} The utility of fake bubbles is demonstrated by the previous corollary since the summation on the right-hand side involves both real and fake bubbles. Notice the similarity between Corollary \ref{cor:dotcurl} and the following Proposition. $\blacktriangleright$egin{prop}[Dotted curls] \lambdaanglebel{prop-dotted-curl}The following dotted curl relations $\blacktriangleright$egin{align} \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (0.5,1.5) -- (0.5,2); \draw[thick] (0.5,-.5) -- (0.5,.5); \draw[thick] (-0.5,0) -- (-0.5,.5) node[pos=.2, shape=coordinate](MD){} node[pos=1, shape=coordinate](TD){}; \draw[thick] (-1.5,0) -- (-1.5,1.5); \draw[thick] (0.5,.5) .. controls ++(-0,0.5) and ++(0,-0.5) .. (-0.5,1.5) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,.5) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,1.5); \draw[thick, ->] (-0.5,1.5) .. controls ++(0,0.6) and ++(0,0.6) .. (-1.5,1.5); \draw[thick] (-1.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (-0.5,0) node[pos=0.5, shape=coordinate](CUP){} node[pos=.8, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (X) .. controls ++(-.8,.3) and ++(-.3,.4) ..(TD); \draw[color=blue, thick, double distance=1pt,dashed] (CUP) .. controls ++(-0,.5) and ++(-.3,.2) ..(DOT); \draw[color=blue, thick, double distance=1pt,dashed] (MD) .. controls ++(.4,0) and ++(0,-1.4) ..(-2,2); \node at (0,-0.25) {$\lambda$}; \node at (DOT) {$\blacktriangleright$bullet};\node at (MD) {$\blacktriangleright$bullet};\node at (TD) {$\blacktriangleright$bullet}; \node[blue] at (-2.5,1.8) {$\scriptstyle m+\lambda$}; $\blacktriangleleft$nd{tikzpicture}} &\;\; = \;\; \sum_{f+g=m+\lambda} (-1)^{g} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.9] \draw[thick, ->] (2.25,-1) -- (2.25,1.25) node[pos=0.5, shape=coordinate](D){}; \draw[color=blue, thick,double distance=1pt, dashed] (D) to[out=150,in=-90] (1.75,1.25); \draw[thick, ->] (1.1,0) .. controls ++(0,0.6) and ++(0,0.6) .. (.3,0) node[pos=0.05, shape=coordinate](C){}; \draw[thick] (1.1,0) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (.3,0) node[pos=0.5, shape=coordinate](A){} node[pos=0.2, shape=coordinate](B){}; \draw[color=blue, thick, double distance=1pt, dashed] (A) .. controls++(-.1,.5) and ++(-.2,.3) .. (B) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick,double distance=1pt, dashed] (C) .. controls ++(-.7,.2) and ++(0,-.8) .. (1.25,1.25) node[pos=0.9,left]{$\scriptstyle f$\;}; \node at (B) {$\blacktriangleright$bullet}; \node at (D) {$\blacktriangleright$bullet}; \node at (C) {$\blacktriangleright$bullet}; \node[blue] at (1.95,1) {$\scriptstyle g$}; \node at (-0,-.5) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } & \text{for $\lambda<0$,} \\ \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick, ->] (-0.5,1) -- (-0.5,2); \draw[thick] (-0.5,-.5) -- (-0.5,0); \draw[thick] (1.5,0) -- (1.5,1); \draw[thick] (-0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){} node[pos=1, shape=coordinate](MD){} node[pos=0.7, shape=coordinate](BD){}; \draw[thick] (0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,1); \draw[thick, ->] (0.5,1) .. controls ++(-.2,0.8) and ++(.2,0.8) .. (1.5,1) node[pos=0.2, shape=coordinate](LC){} node[pos=0.58, shape=coordinate](C){}; \draw[thick, ->] (1.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (0.5,0); \draw[color=blue, thick, dashed] (X) .. controls ++(-1.5,.4) and ++(-.2,.5) ..(BD); \draw[color=blue, thick, double distance=1pt,dashed] (LC) .. controls ++(-.2,.7) and ++(.2,.7) .. (C); \draw[color=blue, thick, double distance=1pt,dashed] (MD) .. controls ++(-.3,.4) and ++(0,-.5) .. (-1,2) node[pos=0.85, left]{$\scriptstyle m-\lambda$}; \node at (0,-0.25) {$\lambda$}; \node at (LC) {$\blacktriangleright$bullet};\node at (MD) {$\blacktriangleright$bullet};\node at (BD) {$\blacktriangleright$bullet}; \node[blue] at (1.54,1.8) {$\scriptstyle \lambda-1$}; $\blacktriangleleft$nd{tikzpicture}} &\;\; = \;\; \sum_{f+g=m-\lambda} (-1)^{f} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.9] \draw[thick, ->] (-1.75,-1) -- (-1.75,1.25) node[pos=0.3, shape=coordinate](D){}; \draw[color=blue, thick,double distance=1pt, dashed] (D) to[out=150,in=-90] (-3,1.25); \draw[thick, ->] (-0.4,0) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1.5,.2) and ++(0,-.5) .. (-2.25,1.25) node[pos=0.9, left] {$\scriptstyle g$}; \node[blue] at (-3.3,1.1) {$\scriptstyle f$}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (D) {$\blacktriangleright$bullet}; \node at (-1.25,-.5) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } & \text{for $\lambda>0$,} $\blacktriangleleft$nd{align} hold in $U_qc$. $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} These relations follow immediately from the relations in $U_qc$, those in Proposition~\ref{prop:othercurl} above, and the inductive dot slide formula $\blacktriangleleft$qref{eq:inductive-dot}. $\blacktriangleleft$nd{proof} The dotted curl relations derived above imply that the equations in equation $\blacktriangleleft$qref{eq:fake-bubble} defining the fake bubbles switch form as $m$ grows large. More precisely, we have the following Proposition. $\blacktriangleright$egin{prop} The following relations $\blacktriangleright$egin{align} \lambdaanglebel{eq_biginfgrass1} \sum_{f+g=m} (-1)^{g} \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (1,1.25) ; \node[blue] at (1.3,1.1){$\scriptstyle f$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (-.25,1.1) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.2){$\scriptstyle g$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture} } &\;\; = \;\; \partialta_{m,0}1bb_{1bbl} & \text{for $\lambda>0$ and $\lambda <m $.} \\ \lambdaanglebel{eq_biginfgrass2} \sum_{f+g=m} (-1)^{g} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.2){$\scriptstyle f$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture} } \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (1,1.25) ; \node[blue] at (1.3,0.9){$\scriptstyle g$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (-.5,1.1) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy\;\; &\;\; = \;\; \partialta_{m,0}1bb_{1bbl} &\text{for $\lambda <0$ and $-\lambda < m$, } $\blacktriangleleft$nd{align} hold in $U_qc$. $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} Equation $\blacktriangleleft$qref{eq_biginfgrass1} follows by simplifying the diagram \[ \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (-1.5,0) -- (-1.5,1); \draw[thick] (0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,1) node[pos=1, shape=coordinate](MD){}; \draw[thick, ->] (-0.5,1) .. controls ++(0,0.6) and ++(0,0.6) .. (-1.5,1); \draw[thick] (-1.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (-0.5,0) node[pos=0.5, shape=coordinate](CUP){} node[pos=.9, shape=coordinate](DOT){}; \draw[thick, ->] (0.5,1) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,1) node[pos=0.5, shape=coordinate](CUP2){}; \draw[thick, ->] (1.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (0.5,0); \draw[color=blue, thick, dashed] (X) .. controls ++(-1.2,0) and ++(0,-1.2) ..(-0.5,2); \draw[color=blue, thick, double distance=1pt,dashed] (CUP) .. controls ++(-0,.5) and ++(0,-1.6) ..(-2,2); \draw[color=blue, thick, double distance=1pt,dashed] (CUP2) .. controls ++(0,.3) and ++(0,-.3) .. (1,2); \draw[color=blue, thick, double distance=1pt,dashed] (MD) .. controls ++(-.3,.4) and ++(0,-.5) .. (0,2); \draw[color=blue, thick, double distance=1pt,dashed] (DOT) .. controls ++(-.3,.8) and ++(0,-.8) ..(-1.5,2); \draw[thick] (1.5,0) -- (1.5,1); \node at (0,-0.25) {$\lambda$}; \node at (DOT) {$\blacktriangleright$bullet}; \node at (MD) {$\blacktriangleright$bullet}; \node[blue] at (-2.4,1.8) {$\scriptstyle \lambda-1$}; \node[blue] at (1.5,1.8) {$\scriptstyle \lambda-1$}; \node[blue] at (-1.1,1.8) {$\scriptstyle m_1$}; \node[blue] at (.4,1.8) {$\scriptstyle m_2$}; $\blacktriangleleft$nd{tikzpicture}} \] in two possible ways using the dotted curl relations from Corollary~\ref{cor:dotcurl} and Proposition ~\ref{prop-dotted-curl}. The proof of $\blacktriangleleft$qref{eq_biginfgrass2} is proven similarly. $\blacktriangleleft$nd{proof} \subsection{An upper bound on the size of Homs in $U_qc$} \lambdaanglebel{subsec:upper} \subsubsection{Graded 2-hom-spaces} For each pair of 1-morphisms $x$ and $y$ of $U_qc$ there is a ${\mathbbm Z}$-graded vector space $\blacktriangleright$egin{equation} {\rm HOM}_{U_qc}(x,y) := $\blacktriangleright$igoplus_{t \in {\mathbbm Z}} {\rm Hom}(x, y\lambdaangle t\rangle). $\blacktriangleleft$nd{equation} Using the super-2-category structure of $U_qc$ we can form the $({\mathbbm Z} \times {\mathbbm Z}_{2})$-graded vector space \[ {\mathbbm P}i{\rm HOM}_{U_qc}(x,y) := $\blacktriangleright$igoplus_{t \in {\mathbbm Z}} {\rm Hom}(x, y\lambdaangle t\rangle) {\rm op}lus $\blacktriangleright$igoplus_{t \in {\mathbbm Z}}{\rm Hom}(x, {\mathbbm P}i y\lambdaangle t\rangle) \] between 1-morphisms $x$ and $y$. The graded dimension of this graded vector space is defined by \[ \dim_{q,\pi} \lambdaeft({\mathbbm P}i{\rm HOM}_{U_qc(x,y)} \right) = \sum_{t \in {\mathbbm Z}} q^t\dim {\rm Hom}(x, y \lambdaangle t \rangle ) +\pi\sum_{t \in {\mathbbm Z}} q^t\dim {\rm Hom}(x, {\mathbbm P}i y\lambdaangle t \rangle ). \] \subsubsection{Spanning sets for endomorphisms of $1bbl$} Following the arguments in \cite[Section 8]{Lau1} the relations from Section~\ref{subsec:additional} give rise to spanning sets for the space of homs between arbitrary 1-morphisms in $U_qc$. Let \[\Xi := \dot{\mathbb{B}}bbk \lambdaangle z_1,z_2,\dots, z_a,\dots \rangle / (z_a z_b - (-1)^{a \cdot b} z_b z_a \; \text{for all $a,b$}) \] denote the $({\mathbbm Z} \otimes {\mathbbm Z}_{2})$-graded superalgebra generated by symbols $z_a$ of degree $|z_a|=2a$ and $p(z_a)=a$. The superalgebra $\Xi$ is supercommutative. If $2\in\dot{\mathbb{B}}bbk^\times$ it is graded-commutative (tensor product of a polynomial algebra in the $z_a$'s for $a$ even with an exterior algebra in the $z_a$'s for $a$ odd). If $2=0$ in $\dot{\mathbb{B}}bbk$ then it is a polynomial ring. Any monomial $z_{a_1} \dots z_{a_k}$ in $\Xi$ can be identified with a 2-morphism in ${\mathbbm P}i{\rm HOM}_{U_qc}(1bbl,1bbl)$ via the assignment sending $z_a$ to the 2-morphism \[ $\blacktriangleright$egin{array}{ccc} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (.75,1.25) ; \node[blue] at (1.1,1.1){$\scriptstyle a$\;}; \draw[line width=0mm] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.2]{$\blacktriangleright$bullet}; \draw[line width=0mm] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0.0]{$\blacktriangleright$bullet}; \node at (-1,.3) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } \quad\text{for $\lambda\lambdaeq0 $,}& \qquad \text{and}\qquad & \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.2){$\scriptstyle a$\;}; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (-1.2,-.2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad\text{for $\lambda\mathfrak{g}eq 0$}. $\blacktriangleleft$nd{array} \] This assignment gives rise to a degree-preserving superalgebra homomorphism mapping multiplication in $\Xi$ to horizontal juxtaposition of diagrams coming from horizontal composition in ${\mathbbm P}i{\rm HOM}_{U_qc}(1bbl,1bbl)$. Arguing as in \cite[Proposition 8.2]{Lau1} gives the following: $\blacktriangleright$egin{prop} The map $\blacktriangleright$egin{equation} \lambdaanglebel{eq_ltol} \Xi \rightarrow {\mathbbm P}i{\rm HOM}_{U_qc}(1bbl,1bbl). $\blacktriangleleft$nd{equation} described above is a surjective homomorphism of graded superalgebras. $\blacktriangleleft$nd{prop} In particular, any diagram representing a 2-morphism ${\rm HOM}_{U_qc}(1bbl, {\mathbbm P}i^{s} 1bbl)$ can be reduced to a linear combination of products of non-nested dotted bubbles of the same orientation whose dashed lines never intersect. \[ $\blacktriangleright$egin{array}{ccc} \lambda\lambdaeq0 & \qquad \qquad &\lambda\mathfrak{g}eq 0 \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (.75,1.25) ; \node[blue] at (1.1,1.1){$\scriptstyle a_1$\;}; \draw[line width=0mm] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.2]{$\blacktriangleright$bullet}; \draw[line width=0mm] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0.0]{$\blacktriangleright$bullet}; \node at (-1,.3) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (.75,1.25) ; \node[blue] at (1.1,1.1){$\scriptstyle a_2$\;}; \draw[line width=0mm] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.2]{$\blacktriangleright$bullet}; \draw[line width=0mm] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0.0]{$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture} } \dots \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (.75,1.25) ; \node[blue] at (1.1,1.1){$\scriptstyle a_k$\;}; \draw[line width=0mm] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.2]{$\blacktriangleright$bullet}; \draw[line width=0mm] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0.0]{$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture} } & \qquad \qquad & \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.2){$\scriptstyle a_1$\;}; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (-1.2,-.2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.2){$\scriptstyle a_2$\;}; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \dots \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.2){$\scriptstyle a_k$\;}; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{array} \] The image of the monomial basis of $\Xi$ under the surjective homomorphism $\blacktriangleleft$qref{eq_ltol} is a homogeneous spanning set of the graded $\dot{\mathbb{B}}bbk$-module ${\mathbbm P}i{\rm HOM}_{U_qc}(1bbl, 1bbl)$. We call such a monomial in the image of $\Xi$ a {$\blacktriangleleft$m bubble monomial}. Define the power series $\blacktriangleright$egin{equation} \lambdaanglebel{eq-def-xi}$\blacktriangleright$egin{split} \xi_{\mathbbm Z} &:= \prod_{a=1}^{\infty} (1-(\pi q^2)^{2a})^{-1},\\ \xi_0 &:= \prod_{a=1}^{\infty} (1-(\pi q^2)^{2a})^{-1}\cdot\prod_{a=1}^{\infty} (1+(\pi q^2)^{2a-1}),\\ \xi_2 &:= \prod_{a=1}^{\infty} (1-(\pi q^2)^a)^{-1}. $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} $\blacktriangleright$egin{cor} \lambdaanglebel{cor:homonel} If $\dot{\mathbb{B}}bbk={\mathbbm Z}$ (respectively $\dot{\mathbb{B}}bbk$ is a field of characteristic $2$, respectively $\dot{\mathbb{B}}bbk$ is a field of characteristic other than $2$), then $\blacktriangleright$egin{equation*} \dim_{q,\pi} {\mathbbm P}i{\rm HOM}_{U_qc}(1bbl, 1bbl) \lambdaeq \xi $\blacktriangleleft$nd{equation*} termwise and ${\mathbbm P}i{\rm HOM}_{U_qc}(1bbl, 1bbl)$ is graded local. Here $\xi=\xi_{\mathbbm Z}$ (respectively $\xi=\xi_2$, respectively $\xi=\xi_0$). In the case $\dot{\mathbb{B}}bbk={\mathbbm Z}$, by $\dim$ we mean free rank (and say nothing about torsion). $\blacktriangleleft$nd{cor} \subsubsection{Other spanning sets} \lambdaanglebel{subsubsec-otherspanning} Let $$\blacktriangleleft$psilonsilon = ($\blacktriangleleft$psilonsilon_1,\dots, $\blacktriangleleft$psilonsilon_k)$ denote a {$\blacktriangleleft$m covering sequence} consisting of symbols $$\blacktriangleleft$psilonsilon_i$ in the alphabet $\{ -, +, \circ \}$. Let ${\rm CSeq}$ denote the set of all such covering sequences. We write ${\rm Seq}$ for those sequences $$\blacktriangleleft$psilonsilon$ with each $$\blacktriangleleft$psilonsilon_i$ in the sub-alphabet $\{ -,+\}$ and refer to such sequences as {$\blacktriangleleft$m signed sequences}. Covering sequences index 1-morphisms in $U_qc$ by setting $\mathcal{E}_{+} := \mathcal{E}$, $\mathcal{E}_{-} := \mathcal{F}$ and $\mathcal{E}_{\circ} := {\mathbbm P}i$, so that \[ \mathcal{E}_{$\blacktriangleleft$psilonsilon}1bbl := \mathcal{E}_{$\blacktriangleleft$psilonsilon_1} \dots \mathcal{E}_{$\blacktriangleleft$psilonsilon_k}1bbl. \] The empty sequence $$\blacktriangleleft$psilonsilon=$\blacktriangleleft$mptyset$ corresponds to the element $\mathcal{E}_{$\blacktriangleleft$mptyset}1bbl = 1bbl$. Every covering sequence $$\blacktriangleleft$psilon$ determines a signed sequence denoted $\underline{$\blacktriangleleft$psilon}$ since the super-2-category structure implies every 1-morphism of the form $\mathcal{E}_{$\blacktriangleleft$psilonsilon}1bbl$ is canonically isomorphic to a 1-morphism ${\mathbbm P}i^s\mathcal{E}_{\underline{$\blacktriangleleft$psilonsilon}}1bbl$ where $s$ is the number of $\circ$ symbols in $$\blacktriangleleft$psilonsilon$ and $\underline{$\blacktriangleleft$psilonsilon}$ is the signed sequence obtained from $$\blacktriangleleft$psilonsilon$ by removing all $\circ$ symbols. A diagram representing a 2-morphism in ${\rm HOM}_{U_qc}(\mathcal{E}_{$\blacktriangleleft$psilonsilon}1bbl, \mathcal{E}_{$\blacktriangleleft$psilonsilon'}1bbl)$ has lower boundary labelled by the covering sequence $$\blacktriangleleft$psilonsilon$ and upper boundary labelled by $$\blacktriangleleft$psilonsilon'$ (where lines labelled $+$ are oriented up, lines labelled $-$ are oriented down, and lines labelled $\circ$ are dashed). \[ \xy (0,0)*{ $\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0,0) .. controls (0,1) and (1,1) .. (1,2) node[pos=.5, shape=coordinate](CROSSING2){}; \draw[thick, ->] (1,0) .. controls (1,1) and (0,1) .. (0,2); \draw[thick] (.5,0) .. controls (.5,.5) and (1,.5) .. (1,1) node[pos=.65, shape=coordinate](CROSSING1){}; \draw[thick, ->] (1,1) .. controls (1,1.5) and (.5,1.5) .. (.5,2) node[pos=.35, shape=coordinate](CROSSING3){} node[pos=.65, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (CROSSING1) [out=180, in=-90] to (-1,2); \draw[thick, color=blue, dashed] (CROSSING2) [out=180, in=-90] to (-.5,2); \draw[thick, color=blue, dashed] (CROSSING3) .. controls ++(-.9,.1) and ++(-.4,.4) .. (DOT); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}}; (24,2)*{$\blacktriangleright$egin{tikzpicture}[scale=0.75] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, dashed] (Z) to[out=180, in=90] (-1,1.5) ; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}}; (0,-13)*{-}; (5,-13)*{+}; (11,-13)*{+}; (-10,13)*{\circ}; (-5,13)*{\circ}; (0,13)*{+}; (5,13)*{+}; (10,13)*{-}; (18,13)*{\circ}; (55,-13)*{\mathcal{E}_{-++}1bbl := \mathcal{F}\mathcal{E}\mathcal{E}1bbl}; (60,13)*{\mathcal{E}_{\circ\circ++-\circ}1bbl := {\mathbbm P}i {\mathbbm P}i\mathcal{E}\mathcal{E}\mathcal{F}{\mathbbm P}i1bbl}; $\blacktriangleleft$ndxy \] In order to define the spanning set we first recall the spanning sets from \cite[Section 3.2]{KL3} for the original 2-category $U_qcev$. Mark $m$ points $1 \times \{0\},2 \times \{0\}, \lambdadots, m \times \{0\}$ on the lower boundary ${\mathbbm R} \times \{0\}$ of the strip ${\mathbbm R} \times [0,1]$ and $k$ points $1 \times \{1\},2 \times \{1\}, \lambdadots, k \times \{1\}$ on the upper boundary ${\mathbbm R} \times \{1\}$. Assuming $m+k$ is even, choose an immersion of $\frac{m+k}{2}$ strands into ${\mathbbm R}\times [0,1]$ with these $m+k$ points as the endpoints, and such that critical values for the height function are isolated, distinct, and nondegenerate. Orient each strand. Then endpoints inherit orientations from the strands. Orientation at lower and upper endpoints define signed sequences $$\blacktriangleleft$psilonsilon, $\blacktriangleleft$psilonsilon' \in {\rm Seq}$. We consider immersions modulo boundary-preserving homotopies and call them pairings between signed sequences $$\blacktriangleleft$psilonsilon$ and $$\blacktriangleleft$psilonsilon'$, or simply $($\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilonsilon')$-pairings. Critical values are allowed to momentarily pass each other in homotopies. There is a bijection between $($\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilonsilon')$-pairings and complete matchings of $m+k$ points such that the two points in each matching pair have compatible orientations. A minimal diagram $D$ of a $($\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilonsilon')$-pairing is a generic immersion that realizes the pairing such that strands have no self--intersections and any two strands intersect at most once. We consider minimal diagrams up to boundary--preserving isotopies. For each $($\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilonsilon')$-pairing fix a choice of minimal diagram $D$ and denote by $p($\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilonsilon')$ the set of the minimal diagram representatives of $($\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilonsilon')$-pairings. For each diagram $D$ in $p($\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilon')$ choose an interval on each arc, away from the intersections. Let $B'_{$\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilon'}$ denote the union over all $D$, of diagrams built from $D$ by putting an arbitrary number of dots on each of the intervals. Forgetting the dashed lines in a bubble monomial from $\Xi$ gives rise to a 2-endomorphisms of the 1-morphisms $1bbl$ in the (even) 2-category $U_qcev$. Let $B_{$\blacktriangleleft$psilon,$\blacktriangleleft$psilon',\lambdaanglembda}$ be the set obtained from $B'_{$\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilon'}$ by labelling the rightmost region by the weight $\lambdaanglembda$ and placing an arbitrary bubble monomial of $U_qcev$ in the rightmost region. Proposition 3.11 of \cite{KL3} shows that the sets $B_{$\blacktriangleleft$psilon,$\blacktriangleleft$psilon',\lambdaanglembda}$ give a basis for the space of 2-morphisms ${\rm HOM}_{U_qcev}(\mathcal{E}_{$\blacktriangleleft$psilon}1bbl, \mathcal{E}_{$\blacktriangleleft$psilonsilon'}1bbl)$ in the 2-category $U_qcev$. To each $D \in B_{$\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilon',\lambdaanglembda}$ we refer to each crossing and dot in $D$ as an {$\blacktriangleleft$m internal vertex} of the diagram $D$. For the covering 2-category $U_qc$ we need some additional structure to describe spanning sets. Given two covering sequences $$\blacktriangleleft$psilonsilon, $\blacktriangleleft$psilon' \in {\rm CSeq}$ with associated signed sequences $\underline{$\blacktriangleleft$psilon}, \underline{$\blacktriangleleft$psilon'} \in {\rm Seq}$, let $D$ be an element of $B_{\underline{$\blacktriangleleft$psilon},\underline{$\blacktriangleleft$psilon'},\lambdaanglembda}$. An {$\blacktriangleleft$m assignment of ${\mathbbm P}i$-data} to $D$ relative to the covering sequences $$\blacktriangleleft$psilonsilon$ and $$\blacktriangleleft$psilon'$ is a pairing $\zeta$ on the union of the set of internal vertices of $D$ and the set of points labelled by $\circ$ on the boundary. Diagrammatically, pairs are joined by non-intersecting dashed arcs in generic position relative to the diagram $D$. Dashed lines leaving an internal vertex that intersect solid strands connected to the same internal vertex are always assumed to intersect in a clock-wise fashion: \[ \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) -- (0,1.5) node[pos=.35, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) .. controls ++(-.4,.2) and ++(0,-.3) .. (-.5,1) .. controls ++(0,.3) and ++(0,-.5) .. (.5,1.5); \node at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \qquad \qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.5) and (1,1) .. (1,1.5) node[pos=.5, shape=coordinate](CROSSING){}; \draw[thick, color=blue, dashed] (CROSSING).. controls ++(-1.2,.1) and ++(0,-.5) .. (.5,1.5); \draw[thick, ->] (1,0) .. controls (1,.5) and (0,1) .. (0,1.5); $\blacktriangleleft$nd{tikzpicture}} \] Such pairs $(D,\zeta)$ determine a 2-morphism, denoted $D^{\zeta}(\lambdaanglembda)$, in $U_qc$. The above conventions ensure that any choice of ${\mathbbm P}i$-data on a fixed $D$ represents the same 2-morphism in $U_qc$. $\blacktriangleright$egin{example} Several examples of ${\mathbbm P}i$-data are shown below for a diagram $D$ in $B_{-++,\circ\circ++-\circ, \lambdaanglembda}$. \[ \xy (0,0)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, <-] (0,0) .. controls ++(0,1) and ++(0,-1) .. (1.4,3) node[pos=.5, shape=coordinate](CROSSING2){}; \draw[thick, ->] (1.4,0) .. controls ++(0,1) and ++(0,-1) .. (0,3); \draw[thick] (.7,0) .. controls ++(0,.5) and ++(0,-.5) .. (1.4,1.5) node[pos=.56, shape=coordinate](CROSSING1){}; \draw[thick, ->] (1.4,1.5) .. controls ++(0,.5) and ++(0,-.5) .. (.7,3) node[pos=.41, shape=coordinate](CROSSING3){} node[pos=.65, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (CROSSING1) [out=180, in=-90] to (-1,3); \draw[thick, color=blue, dashed] (CROSSING2) [out=180, in=-90] to (-.5,3); \draw[thick, color=blue, dashed] (CROSSING3) .. controls ++(-1.2,.1) and ++(-.6,.4) .. (DOT); \node() at (DOT) {$\blacktriangleright$bullet}; \draw[thick, ->] (2.2,.6) .. controls ++(-0,0.6) and ++(0,0.6) .. (3,.6) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (2.2,.6) .. controls ++(0,-0.6) and ++(0,-0.6) .. (3,.6) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, dashed] (Z) .. controls ++(-1,.1) and ++(0,-.8) .. (2.1,3) ; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}}; $\blacktriangleleft$ndxy \qquad \xy (0,0)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, <-] (0,0) .. controls ++(0,1) and ++(0,-1) .. (1.4,3) node[pos=.5, shape=coordinate](CROSSING2){}; \draw[thick, ->] (1.4,0) .. controls ++(0,1) and ++(0,-1) .. (0,3); \draw[thick] (.7,0) .. controls ++(0,.5) and ++(0,-.5) .. (1.4,1.5) node[pos=.56, shape=coordinate](CROSSING1){}; \draw[thick, ->] (1.4,1.5) .. controls ++(0,.5) and ++(0,-.5) .. (.7,3) node[pos=.41, shape=coordinate](CROSSING3){} node[pos=.65, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (CROSSING1) [out=180, in=-90] to (-1,3); \draw[thick, color=blue, dashed] (CROSSING2) .. controls ++(-1.2,.2) and ++(-.6,.3) .. (CROSSING3); \draw[thick, color=blue, dashed] (-.5,3) .. controls ++(0,-.4) and ++(-.4,.2) .. (DOT); \node() at (DOT) {$\blacktriangleright$bullet}; \draw[thick, ->] (2.2,.6) .. controls ++(-0,0.6) and ++(0,0.6) .. (3,.6) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (2.2,.6) .. controls ++(0,-0.6) and ++(0,-0.6) .. (3,.6) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, dashed] (Z) .. controls ++(-1,.1) and ++(0,-.8) .. (2.1,3) ; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}}; $\blacktriangleleft$ndxy \qquad \xy (0,0)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, <-] (0,0) .. controls ++(0,1) and ++(0,-1) .. (1.4,3) node[pos=.5, shape=coordinate](CROSSING2){}; \draw[thick, ->] (1.4,0) .. controls ++(0,1) and ++(0,-1) .. (0,3); \draw[thick] (.7,0) .. controls ++(0,.5) and ++(0,-.5) .. (1.4,1.5) node[pos=.56, shape=coordinate](CROSSING1){}; \draw[thick, ->] (1.4,1.5) .. controls ++(0,.5) and ++(0,-.5) .. (.7,3) node[pos=.41, shape=coordinate](CROSSING3){} node[pos=.65, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (-1,3) .. controls ++(0,-.8) and ++(-.6,.2) .. (CROSSING3); \draw[thick, color=blue, dashed] (CROSSING2) .. controls ++(-1.5,.4) and ++(0,-1.5) ..(2.1,3); \draw[thick, color=blue, dashed] (-.5,3) .. controls ++(0,-.4) and ++(-.4,.2) .. (DOT); \node() at (DOT) {$\blacktriangleright$bullet}; \draw[thick, ->] (2.2,.6) .. controls ++(-0,0.6) and ++(0,0.6) .. (3,.6) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (2.2,.6) .. controls ++(0,-0.6) and ++(0,-0.6) .. (3,.6) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, dashed] (Z) .. controls ++(-.2,.1) and ++(0,-.4) .. (1.6,1) .. controls ++(-.4,.7) and ++(-.8,.2) .. (CROSSING1) ; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}}; $\blacktriangleleft$ndxy \] $\blacktriangleleft$nd{example} Denote by $B_{$\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilon',\lambdaanglembda}^{\pi}$ the set obtained by fixing a choice of ${\mathbbm P}i$-data $\zeta$ for each diagram $D$ in $B_{$\blacktriangleleft$psilonsilon,$\blacktriangleleft$psilon',\lambdaanglembda}$. Arguing as in \cite[Section 3.2.3]{KL3} we have the following: $\blacktriangleright$egin{prop} The set $B_{$\blacktriangleleft$psilon,$\blacktriangleleft$psilon',\lambdaanglembda}^{\pi}$ is a homogeneous spanning set for the graded $\dot{\mathbb{B}}bbk$-module ${\rm HOM}_{U_qc}(\mathcal{E}_{$\blacktriangleleft$psilonsilon}1bbl, \mathcal{E}_{$\blacktriangleleft$psilonsilon'}1bbl)$. $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{prop} \lambdaanglebel{conj-spanning-2} If $\dot{\mathbb{B}}bbk$ is a field of characteristic $2$, then $B_{$\blacktriangleleft$psilon,$\blacktriangleleft$psilon',\lambdaanglembda}^{\pi}$ is a homogeneous basis for the graded $\dot{\mathbb{B}}bbk$-vector space ${\rm HOM}_{U_qc}(\mathcal{E}_{$\blacktriangleleft$psilonsilon}1bbl, \mathcal{E}_{$\blacktriangleleft$psilonsilon'}1bbl)$.$\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} Given a 2-morphism $D^\zeta(\lambdaanglembda)$, let $\text{ev}(D^\zeta(\lambdaanglembda))$ be the 2-morphism in the (non-super) 2-category $U_qcev\otimes_Z\dot{\mathbb{B}}bbk$ obtained by deleting all dashed lines. Since the 2-hom relations in $U_qc\otimes_{\mathbbm Z}\dot{\mathbb{B}}bbk$ and $U_qcev\otimes_{\mathbbm Z}\dot{\mathbb{B}}bbk$ are identical, it follows that their 2-hom-spaces have equal graded dimensions over $\dot{\mathbb{B}}bbk$. Since graded 2-hom space dimensions in $U_qcev\otimes_{\mathbbm Z}\dot{\mathbb{B}}bbk$ are in accord with $\blacktriangleleft$qref{eqn-graded-dim-conj} (with the $\pi$'s removed), the claim follows.$\blacktriangleleft$nd{proof} It follows that over a field of characteristic $2$, given two signed sequences $$\blacktriangleleft$psilon, $\blacktriangleleft$psilon' \in {\rm Seq}$, the graded dimension of ${\mathbbm P}i{\rm HOM}_{U_qc}(\mathcal{E}_{$\blacktriangleleft$psilonsilon}1bbl, \mathcal{E}_{$\blacktriangleleft$psilonsilon'}1bbl)$ is given by $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-graded-dim-conj} \dim_{q,\pi}{\mathbbm P}i{\rm HOM}_{U_qc}(\mathcal{E}_{$\blacktriangleleft$psilonsilon}1bbl, \mathcal{E}_{$\blacktriangleleft$psilonsilon'}1bbl) = \sum_{D^{\zeta} \in B_{$\blacktriangleleft$psilon,$\blacktriangleleft$psilon',\lambdaanglembda}} q^{\deg (D^{\zeta}(\lambda))} + \pi \sum_{D^{\zeta} \in B_{$\blacktriangleleft$psilon,\circ$\blacktriangleleft$psilon',\lambdaanglembda}} q^{\deg (D^{\zeta}(\lambda))}, $\blacktriangleleft$nd{equation} where $\deg (D^{\zeta}(\lambda))$ denotes the $q$-degree of the 2-morphism $D^{\zeta}(\lambda)$ given by the sum of the degrees of each generator from $\blacktriangleleft$qref{eq_generators} and $\blacktriangleleft$qref{eq_generators_cont}. $\blacktriangleright$egin{conj} \lambdaanglebel{conj-spanning} Let $\widetilde{B}_{$\blacktriangleleft$psilon,$\blacktriangleleft$psilon',\lambdaanglembda}^{\pi}\subseteq B_{$\blacktriangleleft$psilon,$\blacktriangleleft$psilon',\lambdaanglembda}^{\pi}$ be the subset consisting of diagrams in which all odd degree bubbles have distinct degrees (i.e., there are no ``repeated'' odd bubbles). $\blacktriangleright$egin{itemize} \item If $\dot{\mathbb{B}}bbk$ is a field of characteristic not equal to $2$, then $\widetilde{B}_{$\blacktriangleleft$psilon,$\blacktriangleleft$psilon',\lambdaanglembda}^{\pi}$ is a homogeneous basis for the graded $\dot{\mathbb{B}}bbk$-vector space ${\rm HOM}_{U_qc}(\mathcal{E}_{$\blacktriangleleft$psilonsilon}1bbl, \mathcal{E}_{$\blacktriangleleft$psilonsilon'}1bbl)$. \item If $\dot{\mathbb{B}}bbk={\mathbbm Z}$, then the graded ${\mathbbm Z}$-module ${\rm HOM}_{U_qc}(\mathcal{E}_{$\blacktriangleleft$psilonsilon}1bbl, \mathcal{E}_{$\blacktriangleleft$psilonsilon'}1bbl)$ has free part with homogeneous basis $\widetilde{B}_{$\blacktriangleleft$psilon,$\blacktriangleleft$psilon',\lambdaanglembda}^{\pi}$ and torsion part the ${\mathbbm Z}_2$-vector space on the homogeneous basis $B_{$\blacktriangleleft$psilon,$\blacktriangleleft$psilon',\lambdaanglembda}^{\pi}\setminus\widetilde{B}_{$\blacktriangleleft$psilon,$\blacktriangleleft$psilon',\lambdaanglembda}^{\pi}$. $\blacktriangleleft$nd{itemize} $\blacktriangleleft$nd{conj} If Conjecture \ref{conj-spanning} holds, then equations analogous to $\blacktriangleleft$qref{eqn-graded-dim-conj} hold in these cases. Working over ${\mathbbm Z}$, then, we have $\blacktriangleright$egin{equation*} {\mathbbm P}i{\rm HOM}_{U_qc}(\mathcal{E}_{$\blacktriangleleft$psilonsilon}1bbl, \mathcal{E}_{$\blacktriangleleft$psilonsilon'}1bbl){\rm co}ng{\mathbbm Z}^{f(q)}{\rm op}lus({\mathbbm Z}_{2})^{g(q)} $\blacktriangleleft$nd{equation*} as graded ${\mathbbm Z}$-modules, where $f,g$ are Laurent polynomials satisfying $\blacktriangleright$egin{equation*} \dim_q{\mathbbm P}i{\rm HOM}_{U_qcev}(\text{ev}(\mathcal{E}_{$\blacktriangleleft$psilonsilon}1bbl), \text{ev}(\mathcal{E}_{$\blacktriangleleft$psilonsilon'}1bbl))=f(q)+g(q). $\blacktriangleleft$nd{equation*} Every 2-hom space in $U_qc$ can be expressed as the tensor product of a part consisting of strands diagrams with no bubbles and several bubbles (non-nested with the same orientation). It will follow from the results of Subsection \ref{subsec-consequences} that the strands part is a free ${\mathbbm Z}$-module. Thus Conjecture \ref{conj-spanning}, taken over ${\mathbbm Z}$, is reduced to the following weaker conjecture. $\blacktriangleright$egin{conj} \lambdaanglebel{conj-free} The map $\Xi\rightarrow{\mathbbm P}i{\rm HOM}_{U_qc}(1bb_\lambdaanglembda,1bb_\lambdaanglembda)$ of $\blacktriangleleft$qref{eq_ltol} is an isomorphism of graded ${\mathbbm Z}$-modules. $\blacktriangleleft$nd{conj} \subsection{Karoubi envelopes and 2-representations} \lambdaanglebel{subsec:2reps} \subsubsection{Karoubi envelopes} Recall that the Karoubi envelope $Kar(\mathcal{C})$ of a category $\mathcal{C}$ is an enlargement of the category $\mathcal{C}$ in which all idempotents split (see \cite[Section 9]{Lau1} and references therein). There is a fully faithful functor $\mathcal{C} \rightarrow Kar(\mathcal{C})$ that is universal with respect to functors which split idempotents in $\mathcal{C}$. This means that if $F{\rm co}lon \mathcal{C} \rightarrow \mathcal{D}$ is any functor where all idempotents split in $\mathcal{D}$, then $F$ extends uniquely (up to isomorphism) to a functor $\tilde{F} {\rm co}lon Kar(\mathcal{C}) \rightarrow \mathcal{D}$ (see for example \cite{Bor}, Proposition 6.5.9). $\blacktriangleright$egin{defn} Define the additive $\dot{\mathbb{B}}bbk$-linear super-2-category $U_qdotc$ to have the same objects as $U_qc$ and hom additive $\dot{\mathbb{B}}bbk$-linear 1-hom-categories given by ${\rm Hom}_{U_qdotc}(\lambdaanglembda,\lambdaanglembda') = Kar\lambdaeft({\rm Hom}_{U_qc}(\lambdaanglembda,\lambdaanglembda')\right)$. The fully-faithful additive $\dot{\mathbb{B}}bbk$-linear functors ${\rm Hom}_{U_qc}(\lambdaanglembda,\lambdaanglembda') \rightarrow {\rm Hom}_{U_qdotc}(\lambdaanglembda,\lambdaanglembda')$ combine to form an additive $\dot{\mathbb{B}}bbk$-linear 2-functor $U_qc \rightarrow U_qdotc$ universal with respect to splitting idempotents in the hom categories ${\rm Hom}_{U_qdotc}(\lambdaanglembda,\lambdaanglembda')$. The composition functor ${\rm Hom}_{U_qdotc}(\lambdaanglembda,\lambdaanglembda') \times {\rm Hom}_{U_qdotc}(\lambdaanglembda',\lambdaanglembda'') \rightarrow {\rm Hom}_{U_qdotc}(\lambdaanglembda,\lambdaanglembda'')$ is induced by the universal property of the Karoubi envelope from the composition functor for $U_qc$. The 2-category $U_qdotc$ has graded 2-hom-spaces given by $\blacktriangleright$egin{equation} {\rm HOM}_{U_qdotc}(x,y) := $\blacktriangleright$igoplus_{t\in {\mathbbm Z}}{\rm Hom}_{U_qdotc}(x,y \lambdaangle t\rangle). $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{defn} \subsubsection{Divided powers} Consider the 2-morphism \[ \overline{\partial}_{+,i} \quad := \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->] (-1.5,0) -- (-1.5,1.5); \draw[thick, ->] (1.5,0) -- (1.5,1.5); \node at (-2.25,.75){$\cdots$}; \node at (2.25,.75){$\cdots$}; \node at (3.6,.75){$\lambda$}; \draw[thick, ->] (3,0) -- (3,1.5); \draw[thick, ->] (-3,0) -- (-3,1.5); \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.75) and (0.5,0.75) .. (0.5,1.5) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](dot){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.75) and (-0.5,0.75) .. (-0.5,1.5); \draw[color=blue, thick, dashed] (X) .. controls++(-.5,0) and ++(-.65,.3) .. (dot); \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.75) and (0.5,0.75) .. (0.5,1.5) node[pos=0.2]{\tikz \draw[fill=black] circle (0.45ex);}; $\blacktriangleleft$nd{tikzpicture} } \] in ${\rm END}_{U_qc}(\mathcal{E}^a1bbl)$ (the dot is on the $i$-th strand). Let $w_0=s_{i_1} \dots s_{i_k}$ denote a reduced decomposition of the longest word in the symmetric group $S_a$. It follows from \cite[Proposition 3.6]{EKL} that the element $e_{+,a}:=\overline{\partial}_{+,w_0}= \overline{\partial}_{+,i_1}\dots \overline{\partial}_{+,i_k}$ is an idempotent 2-morphism in $U_qc$. Likewise, define 2-morphisms $\overline{\partial}_{-,i}$ in ${\rm END}_{1bblU_qc}(\mathcal{F}^a)$ by rotating the diagram for $\overline{\partial}_{+,i}$ 180 degrees. Let $e_{-,a}:= \overline{\partial}_{-,i}$ denote the corresponding idempotent in ${\rm END}_{U_qc}(1bbl\mathcal{F}^a)$. Following \cite{EKL} we introduce divided powers $\blacktriangleright$egin{align} \mathcal{E}^{(a)}1bbl &:= \lambdaeft( \mathcal{E}^a1bbl \lambdaangle \qbin{a}{2}\rangle, e_{+,a} \right) \\ \mathcal{F}^{(b)}1bbl &:= \lambdaeft( \mathcal{F}^b1bbl \lambdaangle \qbin{b}{2}\rangle, e_{-,b} \right) $\blacktriangleleft$nd{align} in the Karoubi envelope $U_qdotc$. Adding ${\mathbbm P}i$-data to diagrams in \cite[Section 4.4]{EKL} as explained in Section~\ref{subsubsec-otherspanning} prove the following proposition. $\blacktriangleright$egin{prop} The following relations holds in $U_qdotc$. $\blacktriangleright$egin{align} \lambdaanglebel{eq_EaEb} \mathcal{E}^{(a)} \mathcal{E}^{(b)}1bbl &= $\blacktriangleright$igoplus_{\qbins{a+b}{a}}\mathcal{E}^{(a+b)}1bbl, &\\ \lambdaanglebel{eq_FaFb} \mathcal{F}^{(a)}\mathcal{F}^{(b)}1bbl &= $\blacktriangleright$igoplus_{\qbins{a+b}{a}}\mathcal{F}^{(a+b)}1bbl. & $\blacktriangleleft$nd{align} $\blacktriangleleft$nd{prop} \subsubsection{2-representations} $\blacktriangleright$egin{defn} A 2-representation of $U_qdotpi(\mathfrak{sl}_2)$ is a graded additive $\dot{\mathbb{B}}bbk$-linear super-2-functor $U_qdotc\rightarrow \mathcal{C}$ for some graded, additive super-2-category $\mathcal{C}$. $\blacktriangleleft$nd{defn} When all of the 1-hom-categories ${\rm Hom}_{\mathcal{C}}(x,y)$ between objects $x$ and $y$ of $\mathcal{C}$ are idempotent complete, in other words $Kar(\mathcal{C}) {\rm co}ng \mathcal{C}$, any graded additive $\dot{\mathbb{B}}bbk$-linear super-2-functor $U_qc \rightarrow \mathcal{C}$ extends uniquely to a 2-representation of $U_qdotc$. All abelian categories are idempotent complete. \subsection{Grothendieck groups of super-2-categories} \subsubsection{Grothendieck groups} The notion of Grothendieck group we will use is the split Grothendieck group of a ${\mathbbm Z}$-graded super-2-category. Let $\mathcal{C}$ be a graded additive super-2-category with translations, in the obvious sense generalizing \cite[Definition 5.1]{Lau1}; $U_qc$ is an example of such a category. Elements of $K_0(\mathcal{C})$ are spanned by the classes $[X]$ of 1-morphisms $X$ of $\mathcal{C}$. Identity 1-morphisms $1bb_{\lambda}:\lambda\rightarrow \lambda$ naturally become orthogonal idempotents $1_{\lambda}=[1bb_{\lambda}]$ of $K_0(\mathcal{C})$. So if $X: \mu \rightarrow \lambda$ and $Y:\mu'\rightarrow \lambda'$ are 1-morphisms in $\mathcal{C}$, then $\blacktriangleright$egin{equation*} [X][Y]=[1bb_{\lambda}X1bb_{\mu}][1bb_{\lambda'}Y1bb_{\mu'}]=1_{\lambda}[X]1_{\mu}1_{\lambda'}[Y]1_{\mu'} =\partialta_{\mu \lambda'}1_{\lambda}[X]1_{\mu}[Y]1_{\mu'}. $\blacktriangleleft$nd{equation*} If $\mu=\lambda'$, that is if $X$ and $Y$ are composable, then this equals $1_{\lambda}[XY]1_{\mu'}$. In this way, $K_0(\mathcal{C})$ naturally becomes an idempotented algebra. The relations in $K_0(\mathcal{C})$, as in \cite{Lau1}, are determined by isomorphisms of 2-morphisms in $\mathcal{C}$: $\blacktriangleright$egin{equation*} [$\blacktriangleright$eta]=[\alpha]+[\mathfrak{g}amma]\text{ if }$\blacktriangleright$eta{\rm co}ng\alpha{\rm op}lus\mathfrak{g}amma $\blacktriangleleft$nd{equation*} in any of the 1-hom-categories of $\mathcal{C}$. The abelian group $K_0(\mathcal{C})$ is made into a module over $\mathcal{A}_\pi={\mathbbm Z}[q,q^{-1},\pi]/(\pi^2-1)$ by having the ${\mathbbm Z}$-grading act on the classes of 1-morphisms by multiplication by $q$, $\blacktriangleright$egin{equation*} [X\lambdaangle-1\rangle]=q[X], $\blacktriangleleft$nd{equation*} and having parity shifts act by multiplication by $\pi$, $\blacktriangleright$egin{equation*} [{\mathbbm P}i X]=\pi[X]. $\blacktriangleleft$nd{equation*} The space of homs between any two objects in ${\rm Hom}_{U_qdotc}(\lambdaanglembda,\mu)$ is a finite-dimensional $\dot{\mathbb{B}}bbk$-vector space. In particular, the Krull-Schmidt decomposition theorem holds, and an indecomposable object of ${\rm Hom}_{U_qdotc}(\lambdaanglembda,\mu)$ has the form $(\mathcal{E}_{$\blacktriangleleft$psilon}1bbl\lambdaangle t\rangle,e)$ for some minimal/primitive idempotent $e$. Any presentation of $1=e_1+\dots+e_k$ into the sum of minimal mutually-orthogonal idempotents gives rise to a decomposition $\blacktriangleright$egin{equation} \mathcal{E}_{$\blacktriangleleft$psilon}1bbl\lambdaangle t\rangle {\rm co}ng $\blacktriangleright$igoplus_{r=1}^{k}(\mathcal{E}_{$\blacktriangleleft$psilon}1bbl\lambdaangle t\rangle,e_r) $\blacktriangleleft$nd{equation} into a direct sum of indecomposable objects of ${\rm Hom}_{U_qdotc}(\lambdaanglembda,\mu)$. Any object of ${\rm Hom}_{U_qdotc}(\lambdaanglembda,\mu)$ has a unique presentation, up to permutation of factors and isomorphisms, as a direct sum of indecomposables. Choose one representative ${b}$ for each isomorphism class of indecomposables, up to grading shifts, and denote by $\dot{\mathcal{B}}(\lambdaanglembda,\mu)$ the set of these representatives. Then $\{[{b}]\}_b$ is a basis of $K_0$\blacktriangleright$ig({\rm Hom}_{U_qdotc}(\lambdaanglembda,\mu)$\blacktriangleright$ig)$, viewed as a free $\mathcal{A}_\pi$-module. Composition functors $\blacktriangleright$egin{equation} {\rm Hom}_{U_qdotc}(\lambdaanglembda,\lambdaanglembda') \times {\rm Hom}_{U_qdotc}(\lambdaanglembda',\lambdaanglembda'') \lambdaongrightarrow {\rm Hom}_{U_qdotc}(\lambdaanglembda,\lambdaanglembda'') $\blacktriangleleft$nd{equation} induce $\mathcal{A}_\pi$-bilinear maps $\blacktriangleright$egin{equation} K_0$\blacktriangleright$ig({\rm Hom}_{U_qdotc}(\lambdaanglembda,\lambdaanglembda')$\blacktriangleright$ig) \otimes K_0$\blacktriangleright$ig({\rm Hom}_{U_qdotc}(\lambdaanglembda',\lambdaanglembda'')$\blacktriangleright$ig) \lambdaongrightarrow K_0$\blacktriangleright$ig({\rm Hom}_{U_qdotc}(\lambdaanglembda,\lambdaanglembda'')$\blacktriangleright$ig) $\blacktriangleleft$nd{equation} turning $K_0(U_qdotc)$ into a $\mathcal{A}_\pi$-linear additive category with objects $\lambdaanglembda\in {\mathbbm Z}$. Multiplication in this basis has structure coefficients in ${\mathbbm N}[q,q^{-1},\pi]/(\pi^2-1)$. \subsubsection{$K_0(U_qc)$ admits a map to $U_qdotpi$}\lambdaanglebel{subsubsec-k0-map} $\blacktriangleright$egin{prop} \lambdaanglebel{prop_coveringrelsU} There are explicit 2-isomorphisms $\blacktriangleright$egin{align} \mathcal{E} \mathcal{F} 1bbl &{\rm co}ng \mathcal{F}{\mathbbm P}i \mathcal{E} 1bbl $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1} {\mathbbm P}i^k1bbl \lambdaangle \lambda-1-2k \rangle &\text{for $\lambda \mathfrak{g}eq 0$ } \\ \mathcal{E}{\mathbbm P}i\mathcal{F} 1bbl &{\rm co}ng \mathcal{F} \mathcal{E} 1bbl$\blacktriangleright$igoplus_{k=0}^{-\lambda-1}{\mathbbm P}i^{\lambda+1+k} 1bbl \lambdaangle -\lambda-1-2k \rangle &\text{for $\lambda \lambdaeq 0$} $\blacktriangleleft$nd{align} in the 2-category $U_qc$. $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} The relations in the 2-category $U_qc$ imply the for $\lambda\mathfrak{g}eq 0$ the 2-morphism $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[semithick, <-] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[semithick, ->] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[color=blue, thick, dashed] (X) to (0,0); $\blacktriangleleft$nd{tikzpicture}}\;\; $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->-=0.15, ->] (0.5,.2) .. controls (0.6,-0.8) and (-0.6,-0.8) .. (-0.5,.2) node[pos=0.85, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (Y) .. controls++(-.5,.2) and ++(0,.4) .. (-1,-1) node[pos=0.75,left]{$\scriptstyle k$}; \draw[line width=0mm] (0.5,.2) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,.2) node[pos=0.85]{\tikz \draw[fill=black] circle (0.4ex);}; $\blacktriangleleft$nd{tikzpicture} }:\mathcal{F}{\mathbbm P}i \mathcal{E} 1bbl $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1} {\mathbbm P}i^k1bbl \lambdaangle \lambda-1-2k \rangle \rightarrow \mathcal{E} \mathcal{F} 1bbl $\blacktriangleleft$nd{equation} is an isomorphism with inverse $\blacktriangleright$egin{equation} - \; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, dashed] (Y) -- (0,2); $\blacktriangleleft$nd{tikzpicture} }\;\; \;\; $\blacktriangleright$igoplus \;\; \sum_{j=0}^{\lambda-1-k} (-1)^j \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](DOT){} node[pos=0.42, shape=coordinate](L){} node[pos=0.5, shape=coordinate](M){} node[pos=0.58, shape=coordinate](R){}; \draw[thick, ->] (1.9,1) .. controls ++(0,0.6) and ++(0,0.6) .. (1.1,1) node[pos=0.05, shape=coordinate](Z){}; \draw[thick] (1.9,1) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (1.1,1) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.5) and ++(-.2,.3) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-.5,.4) and ++(.2,.8) .. (R) ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \draw[color=blue, thick, double distance=1pt, dashed] (M) -- (0,1.6); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.65,0) and ++(-.25,.3) .. (L); \node at (DOT){$\blacktriangleright$bullet}; \node[blue] at (.6,1.4){$\scriptstyle j$}; \node[blue] at (-.3,1.5){$\scriptstyle k$}; \node[blue] at (-1.25,.20){$\scriptstyle \lambda-1 -k-j$}; \node at (-1,1.2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } {\rm co}lon \mathcal{E} \mathcal{F} 1bbl\rightarrow \mathcal{F}{\mathbbm P}i \mathcal{E} 1bbl $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1} {\mathbbm P}i^k1bbl \lambdaangle \lambda-1-2k \rangle. $\blacktriangleleft$nd{equation} Likewise, if $\lambda \lambdae 0$ then the map $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, dashed] (Y) .. controls ++(.1,.4) and ++(.1,.4) .. (-.6,1.6) .. controls ++(0,-.3) and ++(0,.3) ..(0,1); $\blacktriangleleft$nd{tikzpicture} }\;\; $\blacktriangleright$igoplus_{k=0}^{-\lambda-1} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->-=0.15, ->] (-0.7,.5) .. controls ++(-.1,-1) and ++(.1,-1) .. (0.7,.5) node[pos=0.85, shape=coordinate](Y){} node[pos=0.55, shape=coordinate](M){} node[pos=0.44, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (Y) .. controls++(-.5,.3) and ++(0,.5) .. (M) node[pos=0.15,above]{$\scriptstyle k$}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(0,.55) and ++(0,.55) .. (-.6,-.25) .. controls ++(0,-.3) and ++(0,.4) ..(0,-1); \node at (Y){\tikz \draw[fill=black] circle (0.4ex);}; $\blacktriangleleft$nd{tikzpicture} }:\mathcal{E}{\mathbbm P}i\mathcal{F} 1bbl $\blacktriangleright$igoplus_{k=0}^{-\lambda-1}{\mathbbm P}i^{\lambda+1+k} 1bbl \lambdaangle -\lambda-1-2k \rangle \rightarrow \mathcal{F} \mathcal{E} 1bbl $\blacktriangleleft$nd{equation} is an isomorphism with inverse $\blacktriangleright$egin{equation} - \; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[color=blue, thick, dashed] (0,1) .. controls ++(0,-.3) and ++(0,.3) .. (-.6,.4) .. controls ++(.1,-.4) and ++(.1,-.4) .. (X); $\blacktriangleleft$nd{tikzpicture} }\;\; \;\; $\blacktriangleright$igoplus \;\; \sum_{j=0}^{-\lambda-1-k}(-1)^{j} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick,->-=0.8] (0.5,.25) -- (0.5,.5); \draw[thick,->-=0.55] (-0.5,.5) -- (-0.5,.25); \draw[thick] (0.5,.5) .. controls ++(.1,.8) and ++(-.1,.8) .. (-0.5,.5) node[pos=0.1, shape=coordinate](DOT){}; \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.5,.4) and ++(0,-1) .. (-.75,1.75); \node at (DOT){$\blacktriangleright$bullet}; \node[blue] at (0,1.60){$\scriptstyle \lambda-1 -k-j$}; \node at (-1,.7) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.4,0) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[bend left] (-1,1); \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node[blue] at (-.75,.9) {$\scriptstyle j$}; $\blacktriangleleft$nd{tikzpicture} } {\rm co}lon\mathcal{F} \mathcal{E} 1bbl \rightarrow\mathcal{E}{\mathbbm P}i\mathcal{F} 1bbl $\blacktriangleright$igoplus_{k=0}^{-\lambda-1}{\mathbbm P}i^{\lambda+1+k} 1bbl \lambdaangle -\lambda-1-2k \rangle. $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{proof} $\blacktriangleright$egin{prop}\lambdaanglebel{prop-gamma} The assignment $E_{$\blacktriangleleft$psilon}1_{\lambda} {\rm co}lonto [\mathcal{E}_{$\blacktriangleleft$psilon}1bbl]$ for each covering sequence $$\blacktriangleleft$psilon \in {\rm CSeq}$ defines an $\mathcal{A}_\pi$-algebra homomorphism $\blacktriangleright$egin{equation} \mathfrak{g}amma {\rm co}lon _{{\mathbbm A}c}U_qdotpi \rightarrow K_0(U_qdotc). $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} $K_0(U_qdotc)$ is a free $\mathcal{A}_\pi$-module, so it is enough to check that the assignment above extends to a homomorphism of ${\mathbbm Q}(q)^\pi$-algebras $\blacktriangleright$egin{equation}\lambdaanglebel{gamma-field} \mathfrak{g}amma_{{\mathbbm Q}(q)^\pi} {\rm co}lon U_qdotpi {\lambdaongrightarrow} K_0(U_qdotc)\otimes_{\mathcal{A}_\pi}{\mathbbm Q}(q)^\pi. $\blacktriangleleft$nd{equation} Proposition \ref{prop_coveringrelsU} shows that defining relations of $U_qdotpi$ lift to 2-isomorphisms of 1-morphisms in $U_qdotc$ and, therefore, descend to relations in the Grothendieck group $K_0(U_qdotc)$. Restricting $\mathfrak{g}amma_{{\mathbbm Q}(q)^\pi}$ to $_{{\mathbbm A}c}U_qdotpi$ gives a homomorphism of $\mathcal{A}_\pi$-algebras with the image of the homomorphism lying in $K_0(U_qdotc)$. $\blacktriangleleft$nd{proof} $\blacktriangleright$egin{prop} The following relations holds in $U_qdotc$. $\blacktriangleright$egin{align} \lambdaanglebel{eq_FaEb} \mathcal{F}^{(a)}\mathcal{E}^{(b)}1bbl&= $\blacktriangleright$igoplus_{j=0}^{\min(a,b)} $\blacktriangleright$igoplus_{\qbins{a-b-n}{j}} \mathcal{E}^{(b-j)}\mathcal{F}^{(a-j)}1bbl, & \text{if $\lambda < -2a+2$}\\ \lambdaanglebel{eq_EaFb} \mathcal{E}^{(a)}\mathcal{F}^{(b)}1bbl&= $\blacktriangleright$igoplus_{j=0}^{\min(a,b)}$\blacktriangleright$igoplus_{\qbins{a-b+n}{j}}\mathcal{F}^{(b-j)}\mathcal{E}^{(a-j)}1bbl & \text{if $\lambda > 2b-2$}. $\blacktriangleleft$nd{align} $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} By the Krull-Schmidt theorem, the 1-morphisms $\mathcal{F}^b\mathcal{E}^a1bbl$ and $\mathcal{E}^a\mathcal{F}^b1bbl$ have unique decompositions into indecomposables. These equations follow from the covering $\mathfrak{sl}_2$ isomorphisms applied to $\mathcal{F}^b\mathcal{E}^a1bbl$ and $\mathcal{E}^a\mathcal{F}^b1bbl$. $\blacktriangleleft$nd{proof} \section{Formal structures in strong supercategorical actions}\lambdaanglebel{sec-formal} The previous section introduced a super-2-category $U_qdotc$ as well as the notion of a strong supercategorical action of $\mathfrak{sl}_2$. An action of $U_qdotc$ determines a strong supercategorical action. The goal of this section is to show that the converse is also true. The key idea is to use the brick condition for endomorphisms of $1bl$ in the definition of a strong supercategorical action together with the covering $\mathfrak{sl}_2$-isomorphisms to control the sizes of homs between other 1-morphisms. This control of hom spaces forces the covering $\mathfrak{sl}_2$-isomorphisms $\blacktriangleleft$qref{eq:EF-rel} and $\blacktriangleleft$qref{eq:FE-rel} to take a rigid form compatible with the isomorphisms $\blacktriangleleft$qref{prop_coveringrelsU} in the definition of $U_qc$. Throughout Sections \ref{sec-formal}--\ref{sec:proofsl2}, suppose $\mathtt{E},\mathtt{F}$ are functors defining a strong supercategorical action on a 2-category $\mathcal{C}$. All string diagrams in these sections depict 2-morphisms in the 2-category $\mathcal{C}$. These are $\blacktriangleleft$mph{not} relations in $U_qdotc$; our goal is to prove that the defining relations of $U_qdotc$ hold. \subsection{Consequences of $\mathfrak{sl}_2$-relations} \lambdaanglebel{subsec:consequences} The structure of a strong action on $\mathcal{C}$ imposes strong conditions on the hom-spaces between various maps. Units and counits for various adjoint structures are formally determined from this data. For $\lambda \mathfrak{g}eq 0$ equation $\blacktriangleleft$qref{eq:FE-rel} together with the adjoint structure formally fixes a choice of 2-morphism $U_qcapr {\rm co}lon \mathtt{E}\mathtt{F}1bltwo\ads{\lambda+1} \rightarrow {\mathbbm P}i^{\lambda+1}1bltwo$ via the adjoint pairing: $\blacktriangleright$egin{align*} &{\rm Hom}(\mathtt{E} \mathtt{F} 1bltwo \lambdaangle \lambdaanglembda+1 \rangle, {\mathbbm P}i^{\lambda+1} 1bltwo) \\ &\qquad {\rm co}ng {\rm Hom}(\mathtt{F} {\mathbbm P}i\mathtt{E} 1bltwo $\blacktriangleright$igoplus_{[\lambdaanglembda+2]}1bltwo\lambdaangle \lambdaanglembda+1 \rangle, {\mathbbm P}i^{\lambda+1}1bltwo) \\ &\qquad {\rm co}ng {\rm Hom}((\mathtt{E} 1bltwo)^L {\mathbbm P}i\mathtt{E} 1bltwo \ads{2 \lambdaanglembda+4},{\mathbbm P}i^{\lambda+1}1bltwo) $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda+1} {\rm Hom}({\mathbbm P}i^{k} 1bltwo \lambdaangle 2(\lambda+1)-2k \rangle, {\mathbbm P}i^{\lambda+1}1bltwo )\\ & \qquad {\rm co}ng {\rm Hom}({\mathbbm P}i\mathtt{E} 1bltwo, {\mathbbm P}i^{\lambda+1}\mathtt{E} 1bltwo \lambdaangle -2\lambdaanglembda-4 \rangle) {\rm op}lus {\rm Hom}({\mathbbm P}i^{\lambda+1}1bltwo, {\mathbbm P}i^{\lambda+1}1bltwo), $\blacktriangleleft$nd{align*} where all summands are zero in the second to last equation except when $k=\lambda+1$. We take $U_qcapr$ to be the identity map in the second summand of the last equation. This corresponds to the projection out of the top degree summand of $1bltwo$ in $\mathtt{E}\mathtt{F}1bltwo$ \[ \xy (35,10)*+{{\mathbbm P}i^{\lambda+1}1bltwo }="l1"; (-5,20)*+{\mathtt{F}{\mathbbm P}i\mathtt{E}1bltwo \lambdaangle \lambdaanglembda+1 \rangle }="t1"; (-5,10)*+{{\mathbbm P}i^{\lambda+1} 1bltwo\lambdaangle 0 \rangle }="t2"; (-5,0)*+{{\mathbbm P}i^{\lambda}1bltwo\lambdaangle 2\rangle }="t3"; (-5,-20)*+{1bltwo\lambdaangle 2(\lambda +1) \rangle }="t4"; (-5,15)*{{\rm op}lus}; (-5,5)*{{\rm op}lus}; (-5,-5)*{{\rm op}lus}; (-5,-15)*{{\rm op}lus}; (-5,-9)*{\vdots}; {\ar "t2";"l1"}; $\blacktriangleleft$ndxy \] Likewise, for $\lambda \mathfrak{g}eq 0$ define the map $U_qcupl: 1bltwo \lambdaangle \lambdaanglembda+1 \rangle \rightarrow \mathtt{E} \mathtt{F} 1bltwo$ as the inclusion into the lowest degree summand $1bltwo$ in $\mathtt{E} \mathtt{F} 1bltwo$. \[ \xy (-45,-20)*+{ 1bltwo \lambdaangle \lambdaanglembda+1 \rangle}="l1"; (-5,20)*+{\mathtt{F}{\mathbbm P}i\mathtt{E}1bltwo}="t1"; (-5,10)*+{{\mathbbm P}i^{\lambda+1} 1bltwo\lambdaangle -\lambda -1 \rangle }="t2"; (-5,0)*+{{\mathbbm P}i^{\lambda}1bltwo\lambdaangle -\lambda +1\rangle }="t3"; (-5,-20)*+{1bltwo\lambdaangle \lambda +1 \rangle }="t4"; (-5,15)*{{\rm op}lus}; (-5,5)*{{\rm op}lus}; (-5,-5)*{{\rm op}lus}; (-5,-15)*{{\rm op}lus}; (-5,-9)*{\vdots}; {\ar "l1"; "t4"}; $\blacktriangleleft$ndxy \] The map $U_qcapl {\rm co}lon \mathtt{F} \mathtt{E} 1bl \rightarrow 1bl\ads{\lambda+1}$ is defined by adjunction $\blacktriangleright$egin{align*} &{\rm Hom}( \mathtt{F}\mathtt{E} 1bl \lambdaangle -\lambdaanglembda-1 \rangle, 1bl) \\&\qquad {\rm co}ng {\rm Hom}( \mathtt{F} (\mathtt{F}1bltwo)^R 1bl, 1bl) {\rm co}ng {\rm Hom}( \mathtt{F} 1bltwo, 1bl \mathtt{F}1bltwo) \\ &\qquad {\rm co}ng {\rm Hom}( (\mathtt{E}1bl)^L1bltwo \lambdaangle \lambdaanglembda+1 \rangle, \mathtt{F}1bltwo) {\rm co}ng {\rm Hom}( 1bltwo \lambdaangle \lambdaanglembda+1 \rangle, \mathtt{E} \mathtt{F}1bltwo) $\blacktriangleleft$nd{align*} as the mate under adjunction to the map $1bltwo\lambdaangle \lambda+1\rangle \rightarrow \mathtt{E}\mathtt{F}1bltwo$ defined above. (For more on mates under adjunction see~\cite{ks1}.) The map $U_qcupr: {\mathbbm P}i^{\lambda+1}1bl \lambdaangle -\lambdaanglembda-1 \rangle \rightarrow \mathtt{F}\mathtt{E} 1bl$ is more difficult to define. Its definition requires an inductive procedure that utilizes the integrability assumption in the definition of a strong action. This map will be defined in the process of proving proposition~\ref{prop:adjoints}. Recall that $[-\lambda]=-\pi^{\lambda}[\lambda]$. For negative weight spaces $(\lambda \lambdaeq 0)$ we have $\blacktriangleright$egin{equation} \mathtt{F}\mathtt{E}1bl= \mathtt{E}{\mathbbm P}i \mathtt{F}1bl {\rm op}lus $\blacktriangleright$igoplus_{k=0}^{-\lambdaanglembda-1}{\mathbbm P}i^{\lambdaanglembda+1+k} 1bl \lambdaangle -\lambdaanglembda -1 -2k\rangle . $\blacktriangleleft$nd{equation} Thus a map $U_qcupr: {\mathbbm P}i^{\lambda+1}1bl \lambdaangle -\lambdaanglembda-1 \rangle \rightarrow \mathtt{F}\mathtt{E} 1bl$ is formally determined as the inclusion \[ \xy (-45,-20)*+{{\mathbbm P}i^{\lambda+1}1bl \lambdaangle -\lambdaanglembda-1 \rangle}="l1"; (-5,20)*+{\mathtt{E}{\mathbbm P}i\mathtt{F}1bl}="t1"; (-5,10)*+{{\mathbbm P}i^{(\lambda+1)+(-\lambda-1)}1bl\lambdaangle \lambda +1 \rangle }="t2"; (-5,0)*+{{\mathbbm P}i^{(\lambda+1)+(-\lambda-2)}1bl\lambdaangle \lambda +3\rangle }="t3"; (-5,-20)*+{{\mathbbm P}i^{\lambda+1}1bl\lambdaangle -\lambda -1 \rangle .}="t4"; (-5,15)*{{\rm op}lus}; (-5,5)*{{\rm op}lus}; (-5,-5)*{{\rm op}lus}; (-5,-15)*{{\rm op}lus}; (-5,-9)*{\vdots}; {\ar "l1"; "t4"}; $\blacktriangleleft$ndxy \] Likewise, define the map $U_qcapl: \mathtt{F}\mathtt{E} 1bl\rightarrow 1bl \lambdaangle \lambdaanglembda+1 \rangle$ as the projection: \[ \xy (45,10)*+{ 1bl \lambdaangle \lambdaanglembda+1 \rangle .}="l1"; (-5,20)*+{\mathtt{E}{\mathbbm P}i\mathtt{F}1bl}="t1"; (-5,10)*+{{\mathbbm P}i^{(\lambda+1)+(-\lambda-1)}1bl\lambdaangle \lambda +1 \rangle }="t2"; (-5,0)*+{{\mathbbm P}i^{(\lambda+1)+(-\lambda-2)}1bl\lambdaangle \lambda +3\rangle }="t3"; (-5,-20)*+{{\mathbbm P}i^{\lambda+1}1bl\lambdaangle -\lambda -1 \rangle . }="t4"; (-5,15)*{{\rm op}lus}; (-5,5)*{{\rm op}lus}; (-5,-5)*{{\rm op}lus}; (-5,-15)*{{\rm op}lus}; (-5,-9)*{\vdots}; {\ar "t2"; "l1"}; $\blacktriangleleft$ndxy \] The map $U_qcupl{\rm co}lon 1bltwo \ads{\lambda+1} \rightarrow \mathtt{E}\mathtt{F}1bltwo$ is then determined by adjunction. For negative weight space the final adjunction map $U_qcapr {\rm co}lon \mathtt{E}\mathtt{F}1bltwo\ads{\lambda+1} \rightarrow {\mathbbm P}i^{\lambda+1}1bltwo$ will be defined using the integrability assumption below. \subsection{Adjoint induction hypothesis and its consequences} We now make use of the integrability assumption in the definition of a strong supercategorical action. For positive weight spaces $(\lambdaanglembda \mathfrak{g}eq 0)$ we proceed by decreasing induction on $\lambdaanglembda$ starting from the highest weight. For negative weight spaces $(\lambda \lambdaeq 0)$ we perform increasing induction on $\lambda$ starting from the lowest weight. To simplify the exposition we focus on the case $\lambdaanglembda \mathfrak{g}eq 0$. Throughout this section we make the following assumption: \noindent\fbox { \parbox{\lambdainewidth}{ {$\blacktriangleright$f Adjoint induction hypothesis:} \newline Fix $\lambda \mathfrak{g}eq 0$. By induction assume that if $\mu > \lambdaanglembda$ we have $\blacktriangleright$egin{equation} \lambdaanglebel{eq:ind_hyp} (\mathtt{E} 1b_{\mu})^R {\rm co}ng 1b_{\mu} \mathtt{F} {\mathbbm P}i^{\mu+1}\lambdaangle\mu+1 \rangle \text{ and } \lambdaeft(1b_{\mu}\mathtt{F}\right)^L = {\mathbbm P}i^{\mu+1}1b_{\mu}\mathtt{E} \lambdaangle \mu+1\rangle. $\blacktriangleleft$nd{equation} } } \noindent For $\mu \mathfrak{g}g 0$ or $\mu \lambdal 0$ the weight $\mu$ is zero by the integrability assumption so that $\blacktriangleleft$qref{eq:ind_hyp} vacuously holds since both 1-morphisms are zero. In this section we derive a number of consequences of the adjoint induction hypothesis $\blacktriangleleft$qref{eq:ind_hyp} culminating in the proof that $\blacktriangleleft$qref{eq:ind_hyp} holds for $\mu=\lambda$. The reader should recall the `Important Convention' from Definition~\ref{def_strong}. $\blacktriangleright$egin{lem}\lambdaanglebel{lem:E} Assuming the adjoint induction hypothesis of $\blacktriangleleft$qref{eq:ind_hyp}, if $\mu \mathfrak{g}e \lambda$ then ${\rm Hom}(\mathtt{E} 1b_{\mu}, {\mathbbm P}i^k\mathtt{E} 1b_{\mu} \lambdaangle $\blacktriangleleft$ll \rangle)$ is zero for all $k$ if $$\blacktriangleleft$ll < 0$ and one-dimensional if $$\blacktriangleleft$ll=k=0$. Likewise for ${\rm Hom}(1b_{\mu} \mathtt{F}{\mathbbm P}i^k,1b_{\mu} \mathtt{F} \lambdaangle $\blacktriangleleft$ll \rangle)$. $\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} We prove the result for $\mathtt{E}$ by (decreasing) induction on $\mu$ (the result for $\mathtt{F}$ follows by adjunction). We have $\blacktriangleright$egin{align*} & {\rm Hom}(\mathtt{E} 1b_{\mu}, {\mathbbm P}i^k\mathtt{E} 1b_{\mu} \lambdaangle $\blacktriangleleft$ll \rangle) \\ &\qquad {\rm co}ng {\rm Hom}( 1b_{\mu+2}, {\mathbbm P}i^k \mathtt{E}1b_{\mu}(\mathtt{E} 1b_{\mu})^L \lambdaangle $\blacktriangleleft$ll \rangle) \\ &\qquad {\rm co}ng {\rm Hom}( 1b_{\mu+2} , {\mathbbm P}i^{k}\mathtt{E} \mathtt{F}1b_{\mu+2} \ads{$\blacktriangleleft$ll-\mu-1}) \\ &\qquad {\rm co}ng {\rm Hom}( 1b_{\mu+2}, {\mathbbm P}i^{k}\mathtt{F} {\mathbbm P}i \mathtt{E} 1b_{\mu+2} \ads{$\blacktriangleleft$ll-\mu-1} ) {\rm op}lus {\rm Hom}(1b_{\mu+2}, $\blacktriangleright$igoplus_{[\mu+2]}{\mathbbm P}i^{k} 1b_{\mu+2} \lambdaangle $\blacktriangleleft$ll -\mu-1\rangle) \\ &\qquad {\rm co}ng {\rm Hom}( 1b_{\mu+2},\mathtt{F} {\mathbbm P}i^{k+1} \mathtt{E} 1b_{\mu+2} \ads{$\blacktriangleleft$ll-\mu-1} ) {\rm op}lus {\rm Hom}(1b_{\mu+2}, $\blacktriangleright$igoplus_{[\mu+2]}{\mathbbm P}i^{k} 1b_{\mu+2} \lambdaangle $\blacktriangleleft$ll -\mu-1\rangle) \\ &\qquad {\rm co}ng {\rm Hom}((1b_{\mu+2}\mathtt{F})^L1b_{\mu+2},{\mathbbm P}i^{k+1}\mathtt{E}1b_{\mu+2} \ads{$\blacktriangleleft$ll-\mu-1}) {\rm op}lus {\rm Hom}(1b_{\mu+2}, $\blacktriangleright$igoplus_{[\mu+2]}{\mathbbm P}i^{k} 1b_{\mu+2} \lambdaangle $\blacktriangleleft$ll -\mu-1\rangle)) \\ &\qquad {\rm co}ng {\rm Hom}({\mathbbm P}i^{\mu+3}\mathtt{E}1b_{\mu+2}\ads{\mu+3}, {\mathbbm P}i^{k+1}\mathtt{E} 1b_{\mu+2} \ads{$\blacktriangleleft$ll-\mu-1}) {\rm op}lus $\blacktriangleright$igoplus_{j=0}^{\mu+1}{\rm Hom}(1b_{\mu+2}, {\mathbbm P}i^{k+j} 1b_{\mu+2} \lambdaangle $\blacktriangleleft$ll -2j\rangle) \\ &\qquad {\rm co}ng {\rm Hom}(\mathtt{E}1b_{\mu+2}, {\mathbbm P}i^{\mu+k+4}\mathtt{E} 1b_{\mu+2} \ads{$\blacktriangleleft$ll-2\mu-4}) {\rm op}lus $\blacktriangleright$igoplus_{j=0}^{\mu+1}{\rm Hom}(1b_{\mu+2}, {\mathbbm P}i^{k+j} 1b_{\mu+2} \lambdaangle $\blacktriangleleft$ll -2j\rangle), $\blacktriangleleft$nd{align*} where we used the isomorphisms $\blacktriangleleft$qref{eq-dashed-solid-cross} in the fourth line and the adjoint induction hypothesis $\blacktriangleleft$qref{eq:ind_hyp} on the fifth line. By induction the first term above is zero and, by condition (\ref{co:hom}) of Definition \ref{def_strong}, all the terms in the direct sum are zero unless $k=0$ and $$\blacktriangleleft$ll=0$. In that case we get ${\rm Hom}(1b_{\mu+2}, 1b_{\mu+2}) {\rm co}ng \Bbbk$ and we are done. $\blacktriangleleft$nd{proof} $\blacktriangleright$egin{lem}\lambdaanglebel{lem:EE} Assuming the adjoint induction hypothesis $\blacktriangleleft$qref{eq:ind_hyp}, if $\mu \mathfrak{g}e \lambda-2$ then ${\rm Hom}(\mathtt{E} \mathtt{E} 1b_{\mu}, {\mathbbm P}i^k\mathtt{E} \mathtt{E} 1b_{\mu} \lambdaangle $\blacktriangleleft$ll \rangle)$ is zero for all $k$ and $$\blacktriangleleft$ll \lambdaeq -2$ unless $$\blacktriangleleft$ll=-2$ and $k=1$ in which case it is one dimensional. $\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} The proof is by (decreasing) induction on $\mu$. We have $\blacktriangleright$egin{align*} & {\rm Hom}(\mathtt{E} \mathtt{E} 1b_{\mu}, {\mathbbm P}i^k\mathtt{E} \mathtt{E} 1b_{\mu}) \\ &\quad {\rm co}ng {\rm Hom}(\mathtt{E} 1b_{\mu+2}, {\mathbbm P}i^k\mathtt{E} \mathtt{E} 1b_{\mu}(\mathtt{E} 1b_{\mu})^L) \\ &\quad {\rm co}ng {\rm Hom}(\mathtt{E}1b_{\mu+2}, {\mathbbm P}i^k\mathtt{E} \mathtt{E} \mathtt{F}1b_{\mu+2}\lambdaangle -\mu-1 \rangle) \\ &\quad {\rm co}ng {\rm Hom}(\mathtt{E} 1b_{\mu+2}, {\mathbbm P}i^k\mathtt{E} \mathtt{F} {\mathbbm P}i \mathtt{E} 1b_{\mu+2} \lambdaangle -\mu-1 \rangle) $\blacktriangleright$igoplus_{j=0}^{\mu+1} {\rm Hom}(\mathtt{E} 1b_{\mu+2}, {\mathbbm P}i^{k}\mathtt{E}{\mathbbm P}i^j 1b_{\mu+2} \lambdaangle \mu+1-2j-\mu-1 \rangle) \\ &\quad {\rm co}ng {\rm Hom}( \mathtt{E} 1b_{\mu+2}, {\mathbbm P}i^k\mathtt{F} {\mathbbm P}i\mathtt{E}{\mathbbm P}i\mathtt{E} 1b_{\mu+2} \lambdaangle -\mu-1 \rangle) $\blacktriangleright$igoplus_{j=0}^{\mu+3} {\rm Hom}(\mathtt{E} 1b_{\mu+2}, {\mathbbm P}i^{k+j+1}\mathtt{E} 1b_{\mu+2} \lambdaangle \mu+3-2j-\mu-1 \rangle) \\ & \qquad \qquad $\blacktriangleright$igoplus_{j=0}^{\mu+1} {\rm Hom}(\mathtt{E} 1b_{\mu+2}, {\mathbbm P}i^{k+j} \mathtt{E} 1b_{\mu+2} \lambdaangle -2j \rangle) \\ &\quad {\rm co}ng {\rm Hom}(\mathtt{E} \mathtt{E} 1b_{\mu+2},{\mathbbm P}i^k \mathtt{E} \mathtt{E} 1b_{\mu+2} \lambdaangle -2\mu-6 \rangle) $\blacktriangleright$igoplus_{j=0}^{\mu+3} {\rm Hom}(\mathtt{E} 1b_{\mu+2}, {\mathbbm P}i^{k+j+1}\mathtt{E} 1b_{\mu+2} \lambdaangle -2j+2 \rangle) \\ & \qquad \qquad $\blacktriangleright$igoplus_{j=0}^{\mu+1} {\rm Hom}(\mathtt{E} 1b_{\mu+2}, {\mathbbm P}i^{k+j}\mathtt{E} 1b_{\mu+2} \lambdaangle -2j \rangle). $\blacktriangleleft$nd{align*} Shifting by $\lambdaangle $\blacktriangleleft$ll \rangle$ where $$\blacktriangleleft$ll < -2$ we find that the first term is zero by induction and the others are zero by Lemma \ref{lem:E}. If $$\blacktriangleleft$ll=-2$ the same vanishing holds with the exception of the term in the middle summation when $j=0$ and $k=1$ which yields ${\rm End}(\mathtt{E} 1b_{\mu}) {\rm co}ng \Bbbk$. $\blacktriangleleft$nd{proof} $\blacktriangleright$egin{lem} \lambdaanglebel{lem:FEtEF} Assuming the adjoint induction hypothesis $\blacktriangleleft$qref{eq:ind_hyp}, if $\mu \mathfrak{g}e \lambda$ then ${\rm Hom}(\mathtt{F}{\mathbbm P}i\mathtt{E}1b_{\lambda}, {\mathbbm P}i^{k}\mathtt{E}\mathtt{F} 1b_{\lambda} \ads{$\blacktriangleleft$ll})$ is zero for all $k$ if $$\blacktriangleleft$ll <0$ and one-dimensional if $k=0$ and $$\blacktriangleleft$ll=0$. $\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} Use the adjoint induction hypothesis to reduce to Lemma~\ref{lem:EE}. $\blacktriangleleft$nd{proof} Lemma~\ref{lem:FEtEF} above implies that the map $\mathtt{F} {\mathbbm P}i\mathtt{E}1bl \rightarrow \mathtt{E}\mathtt{F} 1bl {\rm co}ng\mathtt{F} {\mathbbm P}i\mathtt{E} 1bl {\rm op}lus_{[\lambda]} 1bl $ which includes $\mathtt{F} {\mathbbm P}i\mathtt{E}1bl$ into the $\mathtt{F} {\mathbbm P}i\mathtt{E} 1bl$ summand of $\mathtt{E} \mathtt{F} 1bl$ is unique up to scalar multiple so that it must induce an isomorphism between the $\mathtt{F} {\mathbbm P}i\mathtt{E} 1bl$ summand. This inclusion must also induce the zero map from the $\mathtt{F} {\mathbbm P}i\mathtt{E} 1bl$ summand on the right to any summand $1bl \lambdaangle \lambda-1-2k \rangle$ on the right. $\blacktriangleright$egin{lem} \lambdaanglebel{lem:EEF} Assuming the adjoint induction hypothesis $\blacktriangleleft$qref{eq:ind_hyp}, if $\mu \mathfrak{g}e \lambda$ then $${\rm Hom}(\mathtt{E} \mathtt{E} \mathtt{F} 1b_{\mu}\lambdaangle \mu+1 \rangle, \mathtt{E}{\mathbbm P}i^{\mu}1b_{\mu} ) {\rm co}ng \Bbbk.$$ $\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} Moving the $\mathtt{F}$ past the $\mathtt{E}$'s using the covering ${\mathfrak{sl}}_2$ relation $\blacktriangleleft$qref{eqn-covering-sl2-relation} we get $\blacktriangleright$egin{align*} & {\rm Hom}(\mathtt{E} \mathtt{E} \mathtt{F}1b_{\mu}\lambdaangle \mu+1 \rangle, \mathtt{E}{\mathbbm P}i^{\mu} 1b_{\mu} ) \\ &\qquad {\rm co}ng {\rm Hom}( \mathtt{F}{\mathbbm P}i \mathtt{E} {\mathbbm P}i\mathtt{E} 1b_{\mu}\lambdaangle \mu+1 \rangle, {\mathbbm P}i^{\mu} \mathtt{E} 1b_{\mu} ) $\blacktriangleright$igoplus_{k=0}^{\mu+1} {\rm Hom}({\mathbbm P}i^{k+1} \mathtt{E} 1b_{\mu}\lambdaangle 2(\mu+1)-2k\rangle, \mathtt{E} {\mathbbm P}i^{\mu} 1b_{\mu}) \\ \notag & \qquad \qquad $\blacktriangleright$igoplus_{k=0}^{\mu-1} {\rm Hom}(\mathtt{E}{\mathbbm P}i^{k} 1b_{\mu} \lambdaangle 2\mu-2k\rangle, \mathtt{E}{\mathbbm P}i^{\mu} 1b_{\mu} ). $\blacktriangleleft$nd{align*} By Lemma \ref{lem:E} all the terms in middle and last summations are zero except in the middle summation when $k=\mu+1$ in which case we get ${\rm Hom}({\mathbbm P}i^{\mu+2}\mathtt{E}1b_{\mu},\mathtt{E}{\mathbbm P}i^{\mu}1b_{\mu}) {\rm co}ng {\rm End}({\mathbbm P}i^{\mu}\mathtt{E} 1b_{\mu}) {\rm co}ng \Bbbk$ spanned by the identity. The first summand is isomorphic to $\blacktriangleright$egin{equation*}$\blacktriangleright$egin{split} {\rm Hom}(\mathtt{F} \mathtt{E} \mathtt{E} 1b_{\mu}\lambdaangle \mu+1 \rangle, {\mathbbm P}i^{\mu+2} \mathtt{E} 1b_{\mu} ) &{\rm co}ng {\rm Hom}(\mathtt{E}\mathtt{E} 1b_{\mu} \lambdaangle \mu+1 \rangle, {\mathbbm P}i^{\mu+2}(1b_{\mu+2} \mathtt{F})^R \mathtt{E} 1b_{\mu} ) \notag \\ &{\rm co}ng {\rm Hom}(\mathtt{E} \mathtt{E} 1b_{\mu}\lambdaangle \mu +1\rangle, {\mathbbm P}i^{\mu+2}\mathtt{E} \mathtt{E} 1b_{\mu} \lambdaangle -\mu-3 \rangle)\notag \\ &{\rm co}ng {\rm Hom}(\mathtt{E} \mathtt{E} 1b_{\mu}, {\mathbbm P}i^{\mu+2}\mathtt{E} \mathtt{E} 1b_{\mu} \lambdaangle -2\mu -4 \rangle)\notag $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation*} which vanishes by Lemma~\ref{lem:EE} since $\mu \mathfrak{g}eq \lambda \mathfrak{g}e 0$. $\blacktriangleleft$nd{proof} \subsubsection{Induced maps} The data of a strong supercategorical action formally determines properties of various 2-morphisms. Recall \cite{EKL} that in the Karoubi envelope of the odd nilHecke algebra we define $\mathtt{E}^{(2)}1bl:= (\mathtt{E}^21bl \lambdaangle 1 \rangle, e_2)$ where \[ e_2:= \xy (0,0)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.75) and (0.5,0.75) .. (0.5,1.5) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](dot){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.75) and (-0.5,0.75) .. (-0.5,1.5); \draw[color=blue, thick, dashed] (X) .. controls++(-.5,0) and ++(-.65,.3) .. (dot); \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.75) and (0.5,0.75) .. (0.5,1.5) node[pos=0.2]{\tikz \draw[fill=black] circle (0.45ex);}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \] is idempotent. From the definition of the thick calculus in \cite{EKL} we have the following isomorphism in the Karoubi envelope: \[ \xy (-25,0)*+{\mathtt{E}^{(2)}1bl \lambdaangle 1 \rangle}="L"; (25,0)*+{{\mathbbm P}i\mathtt{E}^{(2)}1b \lambdaangle -1 \rangle}="R"; (0,20)*+{(\mathtt{E}^21b, \mathrm{Id})}="T"; (0,-20)*+{(\mathtt{E}^21b, \mathrm{Id})}="B"; (0,0)*{$\blacktriangleright$igoplus}; {\ar^{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.75) and (0.5,0.75) .. (0.5,1.5) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](dot){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.75) and (-0.5,0.75) .. (-0.5,1.5); \draw[color=blue, thick, dashed] (X) .. controls++(-.65,.2) and ++(-.65,.3) .. (dot); \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.75) and (0.5,0.75) .. (0.5,1.5) node[pos=0.2]{\tikz \draw[fill=black] circle (0.4ex);}; $\blacktriangleleft$nd{tikzpicture}} "L";"T"}; {\ar_{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.75) and (0.5,0.75) .. (0.5,1.5) node[pos=0.5, shape=coordinate](X){} node[pos=0.75, shape=coordinate](dot){} node[pos=0.2, shape=coordinate](bldot){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.75) and (-0.5,0.75) .. (-0.5,1.5); \draw[color=blue, thick, dashed] (X) .. controls++(-.5,00.1) and ++(-1.95,.35) .. (dot); \draw[color=blue, thick, dashed] (bldot) to[out=100, in=90] (-1,0); \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.75) and (0.5,0.75) .. (0.5,1.5) node[pos=0.75]{\tikz \draw[fill=black] circle (0.4ex);} node[pos=0.2]{\tikz \draw[fill=black] circle (0.4ex);}; $\blacktriangleleft$nd{tikzpicture}} "R";"T"}; {\ar_{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.75) and (0.5,0.75) .. (0.5,1.5) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.75) and (-0.5,0.75) .. (-0.5,1.5); \draw[color=blue, thick, dashed] (X) to [out=190,in=-90](-1,1.5) ; $\blacktriangleleft$nd{tikzpicture} } "B";"R"}; {\ar^{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.75) and (0.5,0.75) .. (0.5,1.5) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](dot){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.75) and (-0.5,0.75) .. (-0.5,1.5); \draw[color=blue, thick, dashed] (X) .. controls++(-.65,.2) and ++(-.65,.3) .. (dot); \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.75) and (0.5,0.75) .. (0.5,1.5) node[pos=0.2]{\tikz \draw[fill=black] circle (0.4ex);}; $\blacktriangleleft$nd{tikzpicture}} "B";"L"}; $\blacktriangleleft$ndxy \] $\blacktriangleright$egin{lem} \lambdaanglebel{lem_niliso} The map $\sUupdot \sUup: \mathtt{E} \mathtt{E} 1bl \rightarrow {\mathbbm P}i\mathtt{E} \mathtt{E} 1bl \lambdaangle 2 \rangle$ induces an isomorphism $\phi$ \[ \xy (-5,20)*+{{\mathbbm P}i\mathtt{E}^{(2)} 1bl \lambdaangle -1 \rangle}="t1"; (-5,10)*+{\mathtt{E}^{(2)} 1bl \lambdaangle 1 \rangle}="t2"; (-5,15)*{{\rm op}lus}; (35,10)*+{{\mathbbm P}i^2\mathtt{E}^{(2)} 1bl \lambdaangle 1 \rangle}="r1"; (35,00)*+{{\mathbbm P}i\mathtt{E}^{(2)} 1bl \lambdaangle 3 \rangle }="r2"; (35,5)*{{\rm op}lus}; {\ar^{\phi} "t2"; "r1"}; $\blacktriangleleft$ndxy \] on the $\mathtt{E}^{(2)}1bl$ summand. $\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} The claim follows immediately from the axioms of the odd nilHecke algebra. $\blacktriangleleft$nd{proof} The following lemma is the key technical result that allows us to define the other adjunction map. $\blacktriangleright$egin{lem}\lambdaanglebel{lemXind} If $\lambda > 1$, then the map $\sUupdot \sUdown: \mathtt{E} \mathtt{F} 1bl \rightarrow {\mathbbm P}i\mathtt{E} \mathtt{F} 1bl \lambdaangle 2 \rangle$ induces isomorphisms $\phi_k$ shown below for all $0 \lambdaeq k \lambdaeq \lambdaanglembda-2$. Similarly, if $\lambdaanglembda < -1$, then $\sUdown \sUupdot: \mathtt{F} \mathtt{E} 1bl \rightarrow\mathtt{F} {\mathbbm P}i\mathtt{E} 1bl \lambdaangle 2 \rangle$ induces isomorphisms $\psi_k$ shown below for all $0 \lambdaeq k \lambdaeq -\lambdaanglembda-2$. \[ \xy (-5,20)*+{\mathtt{F}{\mathbbm P}i\mathtt{E}1bl}="t1"; (-5,10)*+{{\mathbbm P}i^{\lambdaanglembda-1}1bl\lambdaangle -\lambda +1 \rangle }="t2"; (-5,0)*+{{\mathbbm P}i^{\lambdaanglembda-2}1bl\lambdaangle -\lambda +3\rangle }="t3"; (-5,-10)*+{{\mathbbm P}i^{\lambdaanglembda-3}1bl\lambdaangle -\lambda +5 \rangle }="t4"; (-5,-30)*+{{\mathbbm P}i1bl\lambdaangle \lambda -3 \rangle }="t5"; (-5,-40)*+{1bl\lambdaangle \lambda -1 \rangle }="t6"; (-5,-35)*{{\rm op}lus}; (-5,15)*{{\rm op}lus}; (-5,5)*{{\rm op}lus}; (-5,-5)*{{\rm op}lus}; (-5,-15)*{{\rm op}lus}; (-5,-25)*{{\rm op}lus}; (-5,-18)*{\vdots}; (35,10)*+{{\mathbbm P}i\mathtt{F}{\mathbbm P}i\mathtt{E}1bl\lambdaangle 2\rangle}="r1"; (35,00)*+{{\mathbbm P}i^{\lambdaanglembda}1bl\lambdaangle -\lambda +3 \rangle }="r2"; (35,-10)*+{{\mathbbm P}i^{\lambdaanglembda-1}1bl\lambdaangle -\lambda +5\rangle }="r3"; (35,-20)*+{{\mathbbm P}i^{\lambdaanglembda-3}1bl\lambdaangle -\lambda +5 \rangle }="r4"; (35,-40)*+{{\mathbbm P}i^21bl\lambdaangle \lambda -1 \rangle }="r5"; (35,-50)*+{{\mathbbm P}i1bl\lambdaangle \lambda +1 \rangle }="r6"; (35,-45)*{{\rm op}lus}; (35,5)*{{\rm op}lus}; (25,-5)*{{\rm op}lus}; (35,-15)*{{\rm op}lus}; (35,-25)*{{\rm op}lus}; (35,-35)*{{\rm op}lus}; (35,-28)*{\vdots}; {\ar^{\phi_{\lambda-2}} "t3"; "r2"}; {\ar^{\phi_{\lambda-3}} "t4"; "r3"}; {\ar^{\phi_0} "t6"; "r5"}; $\blacktriangleleft$ndxy \qquad \quad \xy (-5,20)*+{\mathtt{E}{\mathbbm P}i\mathtt{F}1bl}="t1"; (-5,10)*+{1bl\lambdaangle \lambda +1 \rangle }="t2"; (-5,0)*+{{\mathbbm P}i1bl\lambdaangle \lambda +3\rangle }="t3"; (-5,-10)*+{{\mathbbm P}i^21bl\lambdaangle \lambda +5 \rangle }="t4"; (-5,-30)*+{{\mathbbm P}i^{\lambdaanglembda-2}1bl\lambdaangle -\lambda -3 \rangle }="t5"; (-5,-40)*+{{\mathbbm P}i^{\lambdaanglembda+1}1bl\lambdaangle -\lambda -1 \rangle }="t6"; (-5,-35)*{{\rm op}lus}; (-5,15)*{{\rm op}lus}; (-5,5)*{{\rm op}lus}; (-5,-5)*{{\rm op}lus}; (-5,-15)*{{\rm op}lus}; (-5,-25)*{{\rm op}lus}; (-5,-18)*{\vdots}; (35,10)*+{{\mathbbm P}i\mathtt{E}{\mathbbm P}i\mathtt{F}1bl\lambdaangle 2\rangle}="r1"; (35,00)*+{{\mathbbm P}i1bl\lambdaangle \lambda +3 \rangle }="r2"; (35,-10)*+{{\mathbbm P}i^{2}1bl\lambdaangle \lambda +5\rangle }="r3"; (35,-20)*+{{\mathbbm P}i^{3}1bl\lambdaangle \lambda +5 \rangle }="r4"; (35,-40)*+{{\mathbbm P}i^{\lambdaanglembda+1}1bl\lambdaangle -\lambda -1 \rangle }="r5"; (35,-50)*+{{\mathbbm P}i^{\lambdaanglembda+2}1bl\lambdaangle -\lambda +1 \rangle }="r6"; (35,-45)*{{\rm op}lus}; (35,5)*{{\rm op}lus}; (25,-5)*{{\rm op}lus}; (35,-15)*{{\rm op}lus}; (35,-25)*{{\rm op}lus}; (35,-35)*{{\rm op}lus}; (35,-28)*{\vdots}; {\ar^{\psi_{-\lambda-2}} "t3"; "r2"}; {\ar^{\psi_{-\lambda-3}} "t4"; "r3"}; {\ar^{\psi_0} "t6"; "r5"}; $\blacktriangleleft$ndxy \] Note that if $\lambdaanglembda = -1, 0 ,1$ the statement above is vacuous (which is why we only consider $\lambdaanglembda > 1$ and $\lambdaanglembda < -1$). $\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} It is easy to see that the morphisms $\phi_k {\rm co}lon {\mathbbm P}i^k 1bl \ads{\lambda-1-2k}\rightarrow{\mathbbm P}i^k 1bl \ads{\lambda-1-2k}$ coming from the map $\sUupdot \sUdown$ are the only possible isomorphisms on summands of $\mathtt{E}\mathtt{F}1bl$ isomorphic to ${\mathbbm P}i^k 1bl \ads{\lambda-1-2k}$. This follows because such summands cannot exist in $\mathtt{F}{\mathbbm P}i\mathtt{E}1bl$ since the space of projections onto a summand $\blacktriangleright$egin{equation*}$\blacktriangleright$egin{split} {\rm Hom}(\mathtt{F}{\mathbbm P}i\mathtt{E}1bl, {\mathbbm P}i^k1bl \ads{\lambda-1-2k}) &{\rm co}ng {\rm Hom}({\mathbbm P}i\mathtt{E}1bl, (1bl\mathtt{F})^R{\mathbbm P}i^{k}1bl \ads{\lambda-1-2k})\\ &{\rm co}ng {\rm Hom}(\mathtt{E}1bl, {\mathbbm P}i^{k+1}\mathtt{E}1bl \ads{-2-2k}) $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation*} is zero dimensional by Lemma~\ref{lem:E} for all $0 \lambdaeq k \lambdaeq \lambda-2$. Temporarily assume that $\mathtt{E}1b_{\lambda-2}$ is indecomposable. Observe that $\blacktriangleright$egin{align} \mathtt{E} \mathtt{F} \mathtt{E} 1b_{\lambdaanglembda-2} &{\rm co}ng \mathtt{F} {\mathbbm P}i\mathtt{E} \mathtt{E} 1b_{\lambdaanglembda-2} $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1} {\mathbbm P}i^k\mathtt{E} 1b_{\lambdaanglembda-2} \lambdaangle \lambdaanglembda-1-2k \rangle . $\blacktriangleleft$nd{align} We argue that $\mathtt{F}{\mathbbm P}i\mathtt{E}\mathtt{E}1b_{\lambda-2}$ contains no summands isomorphic to ${\mathbbm P}i^k\mathtt{E}1b_{\lambda-2}\ads{\lambda-1-2k}$ for $0 < k \lambdaeq \lambda-2$. Using the cancellation property for indecomposable 1-morphisms the number of isomorphisms $\phi_k$ above is the same as the number of isomorphisms $$\phi'_{k} {\rm co}lon {\mathbbm P}i^k\mathtt{E}1b_{\lambda-2} \ads{\lambda-1-2k} \rightarrow {\mathbbm P}i^k\mathtt{E}1b_{\lambda-2} \ads{\lambda-1-2k}$$ induced by the map $\blacktriangleright$egin{equation} \lambdaanglebel{eq_EFE} \sUupdot\sUdown\sUup: \mathtt{E} \mathtt{F} \mathtt{E} 1b_{\lambdaanglembda-2} \rightarrow {\mathbbm P}i \mathtt{E} \mathtt{F} \mathtt{E} 1b_{\lambdaanglembda-2} \lambdaangle 2 \rangle. $\blacktriangleleft$nd{equation} If $\mathtt{F}{\mathbbm P}i\mathtt{E}\mathtt{E}1b_{\lambda-2}$ contained a summand isomorphic to ${\mathbbm P}i^k\mathtt{E}1b_{\lambda-2}\ads{\lambda-1-2k}$, then there would be a projection $\blacktriangleright$egin{equation*}$\blacktriangleright$egin{split} {\rm Hom}(\mathtt{F}{\mathbbm P}i\mathtt{E}\mathtt{E}1b_{\lambda-2}, \mathtt{E}1b_{\lambda-2}\ads{l}) &{\rm co}ng {\rm Hom}(\mathtt{E}\mathtt{E}1b_{\lambda-2}, (1b_{\lambda}\mathtt{F})^R \mathtt{E}1b_{\lambda-2}\ads{$\blacktriangleleft$ll})\\ &{\rm co}ng {\rm Hom}(\mathtt{E}\mathtt{E}1b_{\lambda-2}, \mathtt{E}\mathtt{E}1b_{\lambda-2}\ads{$\blacktriangleleft$ll-\lambda-1}), $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation*} which can only exist if $$\blacktriangleleft$ll=\lambda-1$ by Lemma~\ref{lem:EE}. By the remarks following Lemma~\ref{lem:FEtEF}, the inclusion map $\mathtt{F}{\mathbbm P}i\mathtt{E} \mathtt{E} 1b_{\lambda-2} \rightarrow \mathtt{E}\mathtt{F}\mathtt{E}1b_{\lambda-2}$ induces the zero map on all summands $(1bl\ads{\lambda-1-2k})\mathtt{E}1b_{\lambda-2}$. Thus $\blacktriangleleft$qref{eq_EFE} cannot induce any isomorphisms $\phi'_k$ from the $\mathtt{F}{\mathbbm P}i\mathtt{E}\mathtt{E}1b_{\lambda-2}$ summand. We show the maps $\phi'_k$ are isomorphisms by relating them to different maps. By Lemma~\ref{lem_niliso}, the map $\blacktriangleright$egin{equation}\lambdaanglebel{eq_EEF} \sUupdot \sUup \sUdown: \mathtt{E} \mathtt{E} \mathtt{F} 1b_{\lambdaanglembda-2} \rightarrow {\mathbbm P}i\mathtt{E} \mathtt{E} \mathtt{F} 1b_{\lambdaanglembda-2} \lambdaangle 2 \rangle $\blacktriangleleft$nd{equation} induces an isomorphism on the summand of the form $\mathtt{E}^{(2)}\mathtt{F}1b_{\lambdaanglembda-2}\lambdaangle 1\rangle$. But by repeatedly applying $\blacktriangleleft$qref{eq:EF-rel} and using the unique decomposition property it follows that for $\lambdaanglembda-2 \mathfrak{g}eq 0$ there is an isomorphism $\blacktriangleright$egin{equation} \mathtt{E}^{(2)} \mathtt{F} 1b_{\lambdaanglembda-2}\ads{1} {\rm co}ng \mathtt{F} \mathtt{E}^{(2)} 1b_{\lambdaanglembda-2} \ads{1} $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-2} {\mathbbm P}i^k \mathtt{E} 1b_{\lambdaanglembda-2} \lambdaangle \lambdaanglembda-1-2k \rangle $\blacktriangleleft$nd{equation} so that the number of isomorphisms $$\phi''_k {\rm co}lon {\mathbbm P}i^k \mathtt{E} 1b_{\lambdaanglembda-2} \ads{\lambdaanglembda-1-2k} \rightarrow {\mathbbm P}i^k \mathtt{E} 1b_{\lambdaanglembda-2} \ads{\lambdaanglembda-1-2k} $$ induced by $\blacktriangleleft$qref{eq_EEF} is at least one for each $0 \lambdaeq k \lambdaeq \lambda-2$. On the other hand, by decomposing $\mathtt{E}\mathtt{F}1b_{\lambdaanglembda-2}$, the map in $\blacktriangleleft$qref{eq_EEF} induces the map \[ \sUupdot\sUdown\sUup $\blacktriangleright$igoplus_{i=0}^{\lambdaanglembda-3} \sUupdot: \mathtt{E} \mathtt{F} {\mathbbm P}i\mathtt{E} 1b_{\lambdaanglembda-2} $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-3} {\mathbbm P}i^k\mathtt{E} 1b_{\lambdaanglembda-2} \lambdaangle \lambdaanglembda-3-2k \rangle \rightarrow {\mathbbm P}i\mathtt{E} \mathtt{F} {\mathbbm P}i\mathtt{E} 1b_{\lambdaanglembda-2} \lambdaangle 2 \rangle $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-3} {\mathbbm P}i^{k+1}\mathtt{E} 1b_{\lambdaanglembda-2} \lambdaangle \lambdaanglembda-5-2k \rangle. \] For each $0 \lambdaeq k \lambdaeq \lambdaanglembda-2$ the map \[ \sUupdot: {\mathbbm P}i^k\mathtt{E} 1b_{\lambdaanglembda-2} \lambdaangle \lambdaanglembda-3-2k \rangle \rightarrow {\mathbbm P}i^{k+1}\mathtt{E} 1b_{\lambdaanglembda-2} \lambdaangle \lambdaanglembda-5-2k \rangle \] is clearly never an isomorphism since the parities and gradings do not match. Hence the only contribution to the number of isomorphisms $\phi''_k$ comes from the map $\blacktriangleleft$qref{eq_EFE}, showing that $\phi''_k$ is an isomorphism if and only if $\phi'_k$ is an isomorphism. If $\mathtt{E}1b_{\lambda-2}$ is not indecomposable then suppose that $X$ is an indecomposable summand of $\mathtt{E}1b_{\lambda-2}$ with multiplicity $m$. The same argument above shows that the number of isomorphisms $\phi_k$ is equal to $m$ times the number of isomorphisms induced on summands of $X$ by $\phi'_k$. The arguments above show this number must be at least $m$. $\blacktriangleleft$nd{proof} From condition $\blacktriangleleft$qref{co:hom} in the definition of a strong supercategorical action it follows that negative degree diagrams with only dashed lines for endpoints must be equal to the zero 2-morphism. In particular, the diagrams below are negative degree dotted bubbles. \[ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) -- (0,1.25); \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.1){$\scriptstyle m$\;}; \node[blue] at (.5,1.1){$\scriptstyle \lambda-1$\;}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (1.3,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \;\; = 0 \quad \text{for $0 \lambdaeq m <\lambda-1$}, \qquad \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) -- (0, -1.25) node[pos=0.85,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Y) to[out=00, in=-90] (1,1) ; \node[blue] at (1.45,0.8){$\scriptstyle m$\;}; \draw[line width=0mm] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.2]{$\blacktriangleright$bullet}; \node at (-1,.3) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } \;\; = 0 \quad \text{for $0 \lambdaeq m <-\lambda-1$}. \] As explained in Section~\ref{subsec:consequences}, the $\mathfrak{sl}_2$-relations formally determine a map ${\mathbbm P}i^{\lambda+1}1bl\lambdaangle -\lambda-1\rangle \rightarrow \mathtt{F}\mathtt{E}1bl$ when $\lambda<0$. It more convenient to work with a related map $1bl\lambdaangle -\lambda-1\rangle \rightarrow \mathtt{F}{\mathbbm P}i^{\lambda+1}\mathtt{E}1bl$ defined as follows: \[ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, <-] (0.5,0) .. controls ++(0,-0.8) and ++(-0,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) -- (0, 0) node[pos=0.95,above]{$\scriptstyle \lambda+1$\;}; \node at (-1,-.6) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } \quad := \quad \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, <-] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (X).. controls ++(.1,-.5) and ++(0,-.5) .. (-.5, -.5) .. controls ++(0,.3) and ++(0,-.3) .. (0,0); \node at (-1,-.8) {$\lambdaanglembda$}; \node[blue] at (.5,-.9) {$\scriptstyle \lambdaanglembda+1$}; $\blacktriangleleft$nd{tikzpicture}}; $\blacktriangleleft$ndxy \] The next Corollary shows that degree zero dotted bubbles are non-zero multiples of the identity. $\blacktriangleright$egin{cor} \lambdaanglebel{cor:degz-bubbles} The maps $\blacktriangleright$egin{align} \xy (0,5)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0); \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (1,0.5) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy &{\rm co}lon 1bl \rightarrow 1bl \qquad \text{for $\lambda >0$,} & \qquad \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,1) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[line width=0mm ] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (1,0.6) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }&{\rm co}lon 1bl \rightarrow 1bl \qquad \text{for $\lambda <0$,} $\blacktriangleleft$nd{align} are both equal to non-zero multiples of $1b_{1bl}$. $\blacktriangleleft$nd{cor} $\blacktriangleright$egin{proof} As usual, we prove the case $\lambda > 0$ (the case $\lambda < 0$ follows similarly). By construction, the map $U_qcupl{\rm co}lon 1bl \rightarrow \mathtt{E} \mathtt{F} 1bl \lambdaangle -\lambda+1 \rangle$ is an isomorphism between the summand $1bl$ on the left and the corresponding $1bl$ on the right hand side. Then by Lemma \ref{lemXind}, applying $^{\textcolor[rgb]{0.00,0.00,1.00}{\lambdaanglembda-1}}U_qupdotsU_qdown: \mathtt{E} \mathtt{F} 1bl \lambdaangle -\lambda+1 \rangle \rightarrow {\mathbbm P}i^{\lambda-1}\mathtt{E} \mathtt{F} 1bl \lambdaangle \lambda-1 \rangle$ induces an isomorphism between the summands $1bl$ on either side. Finally, again by construction, the map $U_qcapr {\rm co}lon \mathtt{E} \mathtt{F} 1bl \lambdaangle \lambda-1 \rangle \rightarrow {\mathbbm P}i^{\lambda-1}1bl$ is an isomorphism between the summand ${\mathbbm P}i^{\lambda-1}1bl$ on the right side and the corresponding top degree summand ${\mathbbm P}i^{\lambda-1}1bl$ on the left hand side. Thus the composition \[1bl \xrightarrow{ U_qcupl} \mathtt{E} \mathtt{F} 1bl \lambdaangle -\lambda+1 \rangle \xrightarrow{\textcolor[rgb]{0.00,0.00,1.00}{\lambdaanglembda-1} U_qupdots U_qdown} {\mathbbm P}i^{\lambda-1}\mathtt{E} \mathtt{F} 1bl \lambdaangle \lambda-1 \rangle \xrightarrow{ $\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[semithick, ->] (-0.5,0) .. controls (-0.5,0.65) and (0.5,0.65) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(.1,.4) and ++(0,1).. (-1,0); $\blacktriangleleft$nd{tikzpicture} } 1bl \] is an isomorphism and this completes the proof since ${\rm Hom}(1bl, 1bl) {\rm co}ng \Bbbk$. $\blacktriangleleft$nd{proof} \subsubsection{Defining the last adjunction} We are still assuming, for simplicity, that $\lambda \mathfrak{g}e 0$. Recall that we defined all but one of the adjunction maps, namely the 2-morphism $U_qcupr$ in (\ref{eq:B}). This map cannot be defined formally by adjunction. And since $\lambda \mathfrak{g}e 0$, the 1-morphism $\mathtt{F} \mathtt{E} 1bl$ is indecomposable so one cannot define it as an inclusion. To overcome this problem we construct this 2-morphism by defining an up-down crossing and composing it with the $U_qcupl$. Let $U_qcrossr$ denote the map $ \mathtt{E}\mathtt{F} 1bl {\rm co}ng\mathtt{F} {\mathbbm P}i\mathtt{E} 1bl {\rm op}lus_{[\lambda]} 1bl \rightarrow\mathtt{F} {\mathbbm P}i\mathtt{E}1bl $ which projects onto the $\mathtt{F}{\mathbbm P}i \mathtt{E} 1bl$ summand of $\mathtt{E} \mathtt{F} 1bl$. Note that this map is not unique because there exist non-zero maps ${\rm op}lus_{[\lambda]} 1bl \rightarrow\mathtt{F} \mathtt{E} 1bl$ but this ambiguity will not matter. Then we define a map $U_qcuprm {\rm co}lon 1bl \rightarrow \mathtt{F}{\mathbbm P}i^{\lambda+1}\mathtt{E}1bl\lambdaangle \lambda+1\rangle$ as the composite $\blacktriangleright$egin{equation}\lambdaanglebel{eq:C} \xy (-25,0)*+{\scriptstyle 1bl}="1"; (-5,0)*+{\scriptstyle\mathtt{E} \mathtt{F} 1bl \lambdaangle -\lambda+1 \rangle }="2"; (25,0)*+{\scriptstyle {\mathbbm P}i^{\lambda}\mathtt{E} \mathtt{F} 1bl \lambdaangle \lambda +1 \rangle}="3"; (60,0)*+{\scriptstyle {\mathbbm P}i^{\lambda}\mathtt{F} {\mathbbm P}i \mathtt{E} 1bl\lambdaangle \lambda +1\rangle}="4"; (105,0)*+{\scriptstyle \mathtt{F} {\mathbbm P}i^{\lambda+1} \mathtt{E} 1bl\lambdaangle \lambda +1\rangle.}="5"; {\ar^-{ \vcenter{\xy (0,0)*{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[color=blue, thick, double distance=1pt, dashed] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1); \draw[semithick, <-] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); $\blacktriangleleft$nd{tikzpicture}}; $\blacktriangleleft$ndxy}\; \sUupbb \sUup} "4"; "5"}; {\ar^-{\xy (0,0)*{\sUupbU_qcrossr}; $\blacktriangleleft$ndxy\;} "3"; "4"}; {\ar^-{\lambdaU_qupdotsU_qdown} "2"; "3"}; {\ar^-{U_qcupl} "1"; "2"}; $\blacktriangleleft$ndxy $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{rem} In the even case, it is implicit in the ``curl-relations" \cite[Proposition 5.4]{Lau1} that given one of the adjunction maps, the other adjunction map is fixed as above. This approach to the second adjunctions later appears in \cite[Section 4.1.4]{Rou2}. This form of the second adjunction was verified in the context of cyclotomic quotients by Kashiwara~\cite{Kash}. $\blacktriangleleft$nd{rem} $\blacktriangleright$egin{lem}\lambdaanglebel{lem:A'} If $\mu \mathfrak{g}e \lambda$, then the two maps \[ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (1.5,0.8) .. (1.5,0) node[pos=0.5, shape=coordinate](L){} node[pos=0.33, shape=coordinate](I){}; \draw[color=blue, thick, double distance=1pt, dashed] (L) to [out=90, in=-90] (1.5,2); \draw[color=blue, thick, dashed] (I) .. controls++(-1.6,-.1) and ++(.15,-.5) .. (1,2); \draw[thick] (.5,0) .. controls (.5,.7) and (-.5,.5).. (-.5,1.2); \draw[thick, ->] (-.5,1.2) -- (-.5,2); \node[blue] at (1.9,1.8){$\scriptstyle \mu-1$}; \node at (1.9,0.4) {$\mu$}; $\blacktriangleleft$nd{tikzpicture} \qquad, \qquad $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (1.5,0.8) .. (1.5,0) node[pos=0.42, shape=coordinate](L){} node[pos=0.5, shape=coordinate](M){} node[pos=0.7, shape=coordinate](R){}; \draw[color=blue, thick, dashed] (R) .. controls ++(.2,.4) and ++(-.15,.4) .. (M); \draw[color=blue, thick, double distance=1pt, dashed] (L) to [out=90, in=-90] (1.5,2); \draw[thick] (.5,0) .. controls (.5,.7) and (1.5,.3).. (1.5,1); \draw[thick, ->] (1.5,1) .. controls (1.5,1.7) and (.5,1.3).. (.5,2); \node[blue] at (1.8,1.8){$\scriptstyle \mu$}; \node at (1.9,0.4) {$\mu$}; $\blacktriangleleft$nd{tikzpicture} \] are non-zero multiples of a 2-morphism spanning the 1-dimensional 2-hom-space $ {\rm Hom}(\mathtt{E} \mathtt{E} \mathtt{F} 1b_{\mu} \lambdaangle \mu+1 \rangle, \mathtt{E} {\mathbbm P}i^{\mu} 1b_{\mu})$ from Lemma~\ref{lem:EEF}. $\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} We just need to show that both maps are non-zero. Suppose the map on the left is zero. Adding a dot at the top of the middle upward pointing strand and sliding it past the crossing using the odd nilHecke relation~$\blacktriangleleft$qref{eq:onil-dot} one gets two terms. \[ \xy (0,0)*{ $\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (1.5,0.8) .. (1.5,0) node[pos=0.5, shape=coordinate](L){} node[pos=0.33, shape=coordinate](I){}; \draw[color=blue, thick, double distance=1pt, dashed] (L) to [out=90, in=-90] (1.5,2); \draw[color=blue, thick, dashed] (I) .. controls++(-1.6,-.2) and ++(.15,-.5) .. (1,2); \draw[thick] (.5,0) .. controls (.5,.7) and (-.5,.5).. (-.5,1); \draw[thick, ->] (-.5,1) -- (-.5,2) node[pos=0.32, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (DOT) to [out=180, in=-90] (-1,2); \draw[, line width=0mm] (-.5,1) -- (-.5,2) node[pos=0.32](){$\blacktriangleright$bullet}; \node[blue] at (1.8,1.8){$\scriptstyle \mu-1$}; \node at (1.9,0.4) {$\mu$}; $\blacktriangleleft$nd{tikzpicture}}; $\blacktriangleleft$ndxy \quad = \quad \xy (0,0)*{ $\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (1.5,0.8) .. (1.5,0) node[pos=0.5, shape=coordinate](L){} node[pos=0.33, shape=coordinate](I){}; \draw[color=blue, thick, double distance=1pt, dashed] (L) to [out=90, in=-90] (1.5,2); \draw[color=blue, thick, dashed] (I) .. controls++(-1.5,-.2) and ++(.15,-.5) .. (1,2); \draw[thick] (.5,0) .. controls (.5,.7) and (-.5,.5).. (-.5,1) node[pos=0.15, shape=coordinate](DOT){}; \draw[thick, ->] (-.5,1) -- (-.5,2); \draw[color=blue, thick, dashed] (DOT) .. controls ++(-1.7,-0.2) and ++(0,-1).. (-1,2); \draw[line width=0mm] (.5,0) .. controls (.5,.7) and (-.5,.5).. (-.5,1) node[pos=0.15](){$\blacktriangleright$bullet}; \node[blue] at (1.8,1.8){$\scriptstyle \mu-1$}; \node at (1.9,0.4) {$\mu$}; $\blacktriangleleft$nd{tikzpicture}}; $\blacktriangleleft$ndxy \quad + \quad \xy (0,0)*{ $\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (1.5,0.8) .. (1.5,0) node[pos=0.5, shape=coordinate](L){}; \draw[color=blue, thick, double distance=1pt, dashed] (L) to [out=90, in=-90] (1.5,2); \draw[color=blue, thick, dashed] (-1,2) to [out=-90, in=-90] (1,2); \draw[thick, ->] (-.5,0) -- (-.5,2); \node[blue] at (1.8,1.8){$\scriptstyle \mu-1$}; \node at (1.9,0.4) {$\mu$}; $\blacktriangleleft$nd{tikzpicture}}; $\blacktriangleleft$ndxy \] One term is again zero (because it is the composition of a dot and the original map) and the other is just the composite of several isomorphisms and the adjoint map which cannot be zero because it is the projection of $\mathtt{E} 1b_{\mu}$ out of the highest degree summand inside $\mathtt{E} \mathtt{E} \mathtt{F} 1b_{\mu} \lambdaangle \mu+1 \rangle$. Thus the map is non-zero. On the other hand, the map on the right is the composition \[ \xy (40,0)*+{{\mathbbm P}i^{\mu}\mathtt{E} 1b_{\mu}}="1"; (0,0)*+{\mathtt{E} \mathtt{F}{\mathbbm P}i \mathtt{E} 1b_{\mu} \lambdaangle \mu+1 \rangle}="2"; (-50,0)*+{ \mathtt{E} \mathtt{E} \mathtt{F} 1b_{\mu} \lambdaangle \mu+1 \rangle}="3"; {\ar^-{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[semithick, ->] (-0.5,0) .. controls (-0.5,0.65) and (0.5,0.65) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) -- (0,1); \draw[semithick,->] (1.5,0) -- (1.5,1); \draw[thick, dashed, blue] (1,0) -- (1,1); $\blacktriangleleft$nd{tikzpicture}} "2";"1"}; {\ar^-{\sUupU_qcrossr} "3";"2"}; $\blacktriangleleft$ndxy \] together with several isomorphisms. The first map in the above composite is induced by the projection of $\mathtt{E} \mathtt{F} 1b_{\mu}{\rm co}ng\mathtt{F}{\mathbbm P}i\mathtt{E} 1b_{\mu} {\rm op}lus_{[\mu]} 1b_{\mu}$ into $\mathtt{F} {\mathbbm P}i\mathtt{E} 1b_{\mu} $ and the second map is induced by the is the projection of $\mathtt{E} 1b_{\mu}$ out of the top degree summand of $\mathtt{E} 1b_{\mu}$ in $(\mathtt{E} \mathtt{F})\mathtt{E} 1b_{\mu}$. Since the domain of the second projection is in the image of the first, this map is also non-zero. $\blacktriangleleft$nd{proof} $\blacktriangleright$egin{prop} \lambdaanglebel{prop:adjoints} The 2-morphisms $\blacktriangleright$egin{equation} U_qcapr: \mathtt{E} \mathtt{F} 1bltwo \lambdaangle \lambdaanglembda+1 \rangle \rightarrow {\mathbbm P}i^{\lambda+1}1bltwo, \hspace{1.0cm} U_qcupl: 1bltwo \lambdaangle \lambdaanglembda+1 \rangle \rightarrow \mathtt{E} \mathtt{F} 1bltwo, $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{equation} U_qcapl: \mathtt{F} \mathtt{E} 1bl \rightarrow 1bl \lambdaangle \lambdaanglembda+1 \rangle, \hspace{1.0cm} U_qcuprm: 1bl \rightarrow\mathtt{F}{\mathbbm P}i^{\lambdaanglembda+1} \mathtt{E} 1bl \lambdaangle \lambdaanglembda+1 \rangle $\blacktriangleleft$nd{equation} defined above satisfy the adjunction relations (\ref{eq_biadjoint1}) and (\ref{eq_biadjoint2}) up to non-zero multiples. $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} We prove one of the adjunction axioms (the second one follows formally). Since ${\rm End}(\mathtt{E}1bl) {\rm co}ng \Bbbk$ it suffices to show that the left side of (\ref{eq_biadjoint2}) is non-zero. Now, ${\rm Hom}( 1bltwo\lambdaangle \lambda+1 \rangle,\mathtt{E}\mathtt{F}1bltwo) {\rm co}ng {\rm Hom}(\mathtt{E} 1bl, \mathtt{E} 1bl) {\rm co}ng \Bbbk$ by Lemma \ref{lem:E}. So the map $U_qcupl: 1bltwo \lambdaangle \lambda+1 \rangle \rightarrow \mathtt{E} \mathtt{F} 1bltwo$ must be equal to the adjunction map (up to a multiple). Since $U_qcapl: \mathtt{F} \mathtt{E}1bl \rightarrow 1bl\lambdaangle \lambda+1 \rangle$ is defined to be the adjunction map their composition must be non-zero. To show that the left side of (\ref{eq_biadjoint1}) is non-zero we use the definition of the cup given in $\blacktriangleleft$qref{eq:C}. It follows from Lemma~\ref{lem:A'} that \[ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick,->] (-1.5,0) .. controls (-1.5,.8) and (-.5,.8) .. (-.5,0) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, ->-=0.15] (.5,0) -- (.5,1.25); \draw[thick, ->] (-1.5,-1.25) -- (-1.5,0); \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(.2,1.5) and ++(0,1).. (Y); \node at (.7,-1) {$\lambda$}; \node[blue] at (-1.4,1) {$\scriptstyle \lambda+1$}; $\blacktriangleleft$nd{tikzpicture} } \quad := \quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-1.5,1) .. controls (-1.5,1.6) and (-.5,1.6) .. (-.5,1) node[pos=0.45, shape=coordinate](LY){} node[pos=0.55, shape=coordinate](Y){}; \draw[thick ] (-0.5,0) .. controls (-0.5,-0.5) and (0.5,-0.5) .. (0.5,0); \draw[color=blue, thick, double distance=1pt, dashed] (Y) .. controls ++(0,.5) and ++(.1,.6).. (-.2,1) .. controls ++(0,-.6) and ++(-1,.4) .. (-0.5,0); \draw[color=blue, thick, dashed] (LY) .. controls ++(-.1,1.2) and ++(.2,1).. (.2,1) .. controls ++(0,.-.3) and ++(0,.4) .. (X); \node at (-0.5,0) {$\blacktriangleright$bullet}; \draw[thick, <-] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[thick, ->-=0.3] (.5,1) -- (.5,2); \draw[thick, ->-=0.8] (-1.5,-1) -- (-1.5,1); \node[blue] at (-1,0){$\scriptstyle \lambda$}; \node at (1.4,0.4) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad = \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[thick] (0.5,1) .. controls (0.5,1.6) and (1.5,1.6) .. (1.5,1) node[pos=0.45, shape=coordinate](Y){} node[pos=0.56, shape=coordinate](RY){}; \draw[thick] (0.5,0) .. controls (0.5,-.6) and (1.5,-.6) .. (1.5,0) node[pos=0.2, shape=coordinate](Z){}; \draw[thick, ->-=-0.5] (1.5,1) -- (1.5,0); \draw[thick] (-0.5,-.75) -- (-0.5,0); \draw[thick, ->-=0.5] (-0.5,1) -- (-0.5,2.5); \draw[color=blue, thick, dashed] (X) .. controls ++(-1.4,-0.1) and ++(0,-.4) .. (0,1.5) .. controls ++(0,.5) and ++(0,.6) .. (Y); \draw[color=blue, thick, dashed,double distance=1pt,] (Z) .. controls ++(-2,0.3) and ++(0,-.5) .. (-1,1.5) .. controls ++(0,1) and ++(.1,1.3) .. (RY); \node at (Z) {$\blacktriangleright$bullet}; \node[blue] at (.25,0){$\scriptstyle \lambda$}; \node at (2,0.4) {$\lambda$}; \node at (-2,1) {$\Bbbkappa$}; $\blacktriangleleft$nd{tikzpicture} } \] for some non-zero scalar $\Bbbkappa$. Moving one of the dots through the crossing using the odd nilHecke relation~$\blacktriangleleft$qref{eq:onil-dot} gives a sum of two diagrams. One is zero since it is the composite of a dot and an endomorphism of $\mathtt{E} 1bl$ of degree $-2$. The other is a scalar multiple of the identity on $\mathtt{E}1bl$ and the non-zero degree zero bubble from Corollary \ref{cor:degz-bubbles}. $\blacktriangleleft$nd{proof} Thus we get that \newline \noindent\fbox { \parbox{\lambdainewidth}{ \[ (\mathtt{E} 1bl)^R {\rm co}ng 1bl \mathtt{F}{\mathbbm P}i^{\lambda+1} \lambdaangle \lambda+1 \rangle \text{ and } (\mathtt{E} 1bl)^L {\rm co}ng 1bl \mathtt{F} \lambdaangle -\lambda-1 \rangle, \] \[ \lambdaeft(\mathtt{F}1bl\right)^L = {\mathbbm P}i^{\lambda-1}1bl\mathtt{E} \lambdaangle \lambda-1\rangle\text{ and } \lambdaeft(\mathtt{F}1bl\right)^R = 1bl\mathtt{E} \lambdaangle -\lambda+1\rangle, \] } } \noindent which completes the induction step for the adjoint induction hypothesis~\ref{eq:ind_hyp}. We show that these adjunction maps are unique up to a multiple. $\blacktriangleright$egin{cor}\lambdaanglebel{cor:EFidhoms} Given a strong supercategorical action of $\mathfrak{sl}_2$ we have $${\rm Hom}(\mathtt{E} \mathtt{F} 1bltwo \lambdaangle \lambda+1 \rangle, {\mathbbm P}i^{\lambda+1}1bltwo) {\rm co}ng \Bbbk, \hspace{1.0cm} {\rm Hom}(1bltwo \lambdaangle \lambda+1 \rangle, \mathtt{E} \mathtt{F} 1bltwo) {\rm co}ng \Bbbk,$$ $${\rm Hom}(\mathtt{F} \mathtt{E} 1bl, 1bl \lambdaangle \lambda+1 \rangle) {\rm co}ng \Bbbk, \hspace{1.0cm} {\rm Hom}(1bl, {\mathbbm P}i^{\lambda+1}\mathtt{F} \mathtt{E} 1bl \lambdaangle \lambda+1 \rangle) {\rm co}ng \Bbbk.$$ $\blacktriangleleft$nd{cor} $\blacktriangleright$egin{proof} We calculate the first space (the other three are similar). We have $\blacktriangleright$egin{eqnarray*} {\rm Hom}(\mathtt{E} \mathtt{F} 1bltwo \lambdaangle n+1 \rangle, {\mathbbm P}i^{\lambda+1}1bltwo) &{\rm co}ng& {\rm Hom}(\mathtt{F} 1bltwo \lambdaangle \lambda+1 \rangle, {\mathbbm P}i^{\lambda+1}(\mathtt{E} 1bl)_R 1bltwo) \\ &{\rm co}ng& {\rm Hom}(\mathtt{F} 1bltwo, {\mathbbm P}i^{2(\lambda+1)}\mathtt{F} 1bltwo) \\ &{\rm co}ng& {\rm Hom}(\mathtt{F} 1bltwo, \mathtt{F} 1bltwo) {\rm co}ng \Bbbk. $\blacktriangleleft$nd{eqnarray*} $\blacktriangleleft$nd{proof} We summarize the results of this subsection with the following: $\blacktriangleright$egin{thm}\lambdaanglebel{thm:lradj} Given a strong supercategorical action of $\mathfrak{sl}_2$ the left and right adjoints of $\mathtt{E}$ are isomorphic up to specified shifts and parity. More precisely, the units and counits of these adjunctions are given by the 2-morphisms below which are uniquely determined up to a scalar and induce the adjunction maps. $\blacktriangleright$egin{equation}\lambdaanglebel{eq:A} U_qcapr: \mathtt{E} \mathtt{F} 1bltwo \lambdaangle \lambdaanglembda+1 \rangle \rightarrow {\mathbbm P}i^{\lambda+1}1bltwo \hspace{1.0cm} U_qcupl: 1bltwo \lambdaangle \lambdaanglembda+1 \rangle \rightarrow \mathtt{E} \mathtt{F} 1bltwo $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{equation}\lambdaanglebel{eq:B} U_qcapl: \mathtt{F} \mathtt{E} 1bl \rightarrow 1bl \lambdaangle \lambdaanglembda+1 \rangle \hspace{1.0cm} U_qcuprm: 1bl \rightarrow\mathtt{F} {\mathbbm P}i^{\lambdaanglembda+1}\mathtt{E} 1bl \lambdaangle \lambdaanglembda+1 \rangle. $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{thm} \subsection{A specific form for $\mathfrak{sl}_2$ isomorphisms} At this point we know that both the left and right adjoint of $\mathtt{F}$ is $\mathtt{E}$ (up to a specified shift and parity). Consequently we can prove the following. $\blacktriangleright$egin{lem}\lambdaanglebel{lem:homs} For any $n \in {\mathbbm Z}$ we have $${\rm Hom}(\mathtt{E} {\mathbbm P}i\mathtt{F} 1bl,\mathtt{F}\mathtt{E} 1bl) {\rm co}ng \Bbbk {\rm co}ng {\rm Hom}(\mathtt{F} {\mathbbm P}i \mathtt{E} 1bl, \mathtt{E} \mathtt{F} 1bl).$$ $\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} We prove that ${\rm Hom}(\mathtt{E} {\mathbbm P}i\mathtt{F} 1bl,\mathtt{F}\mathtt{E} 1bl) {\rm co}ng \Bbbk$ (the other case follows similarly). One has $\blacktriangleright$egin{eqnarray*} {\rm Hom}(\mathtt{E} {\mathbbm P}i\mathtt{F} 1bl,\mathtt{F}\mathtt{E} 1bl) &{\rm co}ng& {\rm Hom}({\mathbbm P}i\mathtt{F}(\mathtt{E} 1bl)^R 1bltwo, (\mathtt{E} 1b_{\lambdaanglembda-2})^R \mathtt{F}1bltwo) \\ &{\rm co}ng& {\rm Hom}({\mathbbm P}i\mathtt{F}\mathtt{F}{\mathbbm P}i^{\lambda+1} 1bltwo \lambdaangle \lambda+1 \rangle,\mathtt{F} {\mathbbm P}i^{\lambda-1}\mathtt{F} 1bltwo \lambdaangle \lambda-1 \rangle) {\rm co}ng \Bbbk $\blacktriangleleft$nd{eqnarray*} where the last isomorphism follows from (the adjoint of) Lemma~\ref{lem:EE} together with the parity isomorphisms. $\blacktriangleleft$nd{proof} Subsequently, we denote these maps as $\blacktriangleright$egin{align} \lambdaanglebel{eq:sideways} & \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[semithick, <-] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[semithick, ->] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[color=blue, thick, dashed] (X) to (0,0); $\blacktriangleleft$nd{tikzpicture}}\;\; {\rm co}lon \mathtt{F} {\mathbbm P}i\mathtt{E} 1bl \rightarrow \mathtt{E} \mathtt{F} 1bl, & \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[semithick, ->] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[semithick, <-] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[color=blue, thick, dashed] (X) .. controls ++(.1,.5) and ++(0,.5) .. (-.5,.5) .. controls ++(0,-.3) and ++(0,.3) .. (0,0); $\blacktriangleleft$nd{tikzpicture}} \;\; {\rm co}lon \mathtt{E} {\mathbbm P}i \mathtt{F} 1bl \rightarrow\mathtt{F}\mathtt{E} 1bl. $\blacktriangleleft$nd{align} For the moment these maps are uniquely defined only up to a non-zero scalar. $\blacktriangleright$egin{cor} \lambdaanglebel{cor:1} If $\lambda \mathfrak{g}e 0$ then the map $\blacktriangleright$egin{equation}\lambdaanglebel{eq:iso1} \zeta\;\;:=\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[semithick, <-] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[semithick, ->] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[color=blue, thick, dashed] (X) to (0,0); $\blacktriangleleft$nd{tikzpicture}}\;\; $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->-=0.15, ->] (0.5,.2) .. controls (0.6,-0.8) and (-0.6,-0.8) .. (-0.5,.2) node[pos=0.85, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (Y) .. controls++(-.5,.2) and ++(0,.4) .. (-1,-1) node[pos=0.75,left]{$\scriptstyle k$}; \draw[line width=0mm] (0.5,.2) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,.2) node[pos=0.85]{\tikz \draw[fill=black] circle (0.4ex);}; $\blacktriangleleft$nd{tikzpicture} }:\mathtt{F}{\mathbbm P}i \mathtt{E} 1bl $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1} {\mathbbm P}i^k1bl \lambdaangle \lambda-1-2k \rangle \rightarrow \mathtt{E} \mathtt{F} 1bl $\blacktriangleleft$nd{equation} is an isomorphism. Likewise, if $\lambda \lambdae 0$ then the map $\blacktriangleright$egin{equation}\lambdaanglebel{eq:iso2} \zeta\;\;:=\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[semithick, ->] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[semithick, <-] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[color=blue, thick, dashed] (X) .. controls ++(.1,.5) and ++(0,.5) .. (-.5,.5) .. controls ++(0,-.3) and ++(0,.3) .. (0,0); $\blacktriangleleft$nd{tikzpicture}} \;\; $\blacktriangleright$igoplus_{k=0}^{-\lambda-1} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->-=0.15, ->] (-0.7,.5) .. controls ++(-.1,-1) and ++(.1,-1) .. (0.7,.5) node[pos=0.85, shape=coordinate](Y){} node[pos=0.55, shape=coordinate](M){} node[pos=0.44, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (Y) .. controls++(-.5,.3) and ++(0,.5) .. (M) node[pos=0.15,above]{$\scriptstyle k$}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(0,.55) and ++(0,.55) .. (-.6,-.25) .. controls ++(0,-.3) and ++(0,.4) ..(0,-1); \node at (Y){\tikz \draw[fill=black] circle (0.4ex);}; $\blacktriangleleft$nd{tikzpicture} }:\mathtt{E}{\mathbbm P}i\mathtt{F} 1bl $\blacktriangleright$igoplus_{k=0}^{-\lambda-1}{\mathbbm P}i^{\lambda+1+k} 1bl \lambdaangle -\lambda-1-2k \rangle \rightarrow \mathtt{F} \mathtt{E} 1bl $\blacktriangleleft$nd{equation} is an isomorphism. $\blacktriangleleft$nd{cor} $\blacktriangleright$egin{proof} We prove the case $\lambda \mathfrak{g}e 0$ (the case $\lambda \lambdae 0$ is proved similarly). We know that $$\mathtt{E} \mathtt{F} 1bl {\rm co}ng\mathtt{F} {\mathbbm P}i\mathtt{E} 1bl $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1}{\mathbbm P}i^k 1bl \lambdaangle \lambda-1-2k \rangle$$ and by Lemma \ref{lem:homs} the map $U_qcrossl $ must induce an isomorphism between the $\mathtt{F} {\mathbbm P}i\mathtt{E} 1bl$ summands and must induce the zero map from the $\mathtt{F} {\mathbbm P}i\mathtt{E} 1bl$ summand on the left to any summand ${\mathbbm P}i^k1bl \lambdaangle \lambda-1-2k \rangle$ on the right. It remains to show that $$\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->-=0.15, ->] (0.5,.2) .. controls (0.6,-0.8) and (-0.6,-0.8) .. (-0.5,.2) node[pos=0.85, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (Y) .. controls++(-.5,.2) and ++(0,.4) .. (-1,-1) node[pos=0.75,left]{$\scriptstyle k$}; \draw[line width=0mm] (0.5,.2) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,.2) node[pos=0.85]{\tikz \draw[fill=black] circle (0.4ex);}; $\blacktriangleleft$nd{tikzpicture} }$ induces an isomorphism between the summands ${\mathbbm P}i^k1bl \lambdaangle \lambda-1-2k \rangle$ on either side. Since ${\rm Hom}(1bl, 1bl \lambdaangle $\blacktriangleleft$ll \rangle) = 0$ if $$\blacktriangleleft$ll < 0$ it follows that the induced map $$$\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1}{\mathbbm P}i^k 1bl \lambdaangle \lambda-1-2k \rangle \rightarrow $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1}{\mathbbm P}i^k 1bl \lambdaangle \lambda-1-2k \rangle$$ is upper triangular (when expressed as a matrix). We show that the maps on the diagonal are isomorphisms between the summands ${\mathbbm P}i^k 1bl \lambdaangle \lambda-1-2k \rangle$ on either side. Now, by construction, the map $\;U_qcupl: 1bl \ads{\lambda-1} \rightarrow \mathtt{E} \mathtt{F} 1bl$ is an isomorphism onto the summand $1bl \ads{\lambda-1}$ on the right side. Consequently, by Lemma \ref{lemXind}, the composition $$1bl \lambdaangle \lambda-1-2k \rangle \xrightarrow{U_qcupl} \mathtt{E} \mathtt{F} 1bl \lambdaangle -2k \rangle \xrightarrow{\textcolor[rgb]{0.00,0.00,1.00}{k}U_qupdotsU_qdown} {\mathbbm P}i^k\mathtt{E} \mathtt{F} 1bl $$ must also induce an isomorphism between the summands $1bl \lambdaangle \lambda-1-2k \rangle$ on either side. This proves that all the diagonal entries are isomorphisms so we are done. $\blacktriangleleft$nd{proof} \subsection{Endomorphisms of $\mathtt{E} 1bl$} In an arbitrary strong supercategorical action on a 2-category $\mathcal{C}$ there may be many additional 2-morphisms in $\mathcal{C}$ that are not composites of caps, cups, dots or crossings. The Lemma below is an important technical result that limits the form of 2-endomorphisms of $\mathtt{E} 1bl$. $\blacktriangleright$egin{lem}\lambdaanglebel{lem:main} Suppose $\mu < |\lambda+2|$ (or $\mu=1$ and $\lambda=-1$) and $f \in {\rm Hom}^{2\mu}(\mathtt{E} 1bl, {\mathbbm P}i^{\mu}\mathtt{E} 1bl)$. If $\lambda \mathfrak{g}e -1$ then $f$ is of the form $\blacktriangleright$egin{equation}\lambdaanglebel{eq:main1} \sum_{i=0}^{\mu} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (1,0) -- (1,2) node[pos=0.5, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) to [out=180, in=-90] (0,2); \draw[line width=0mm] (1,0) -- (1,2) node[pos=0.5](){$\blacktriangleright$bullet}; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (-1,.5) {$f_i$}; \draw[color=blue, thick, double distance=1pt, dashed] (fi) to (-1,2); \node at (1.8,1.5) {$ \lambda$}; \node[blue] at (.3,1.8) {$\scriptstyle i$}; \node[blue] at (-1.7,1.8) {$\scriptstyle\mu-i$}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{equation} where $f_i \in {\rm Hom}^{2\mu-2i}(1bltwo, {\mathbbm P}i^{\mu-i}1bltwo)$. Similarly, if $\lambda \lambdae -1$ (or $\mu=2$ and $\lambda=-1$) then $f$ is of the form $\blacktriangleright$egin{equation}\lambdaanglebel{eq:main2} \sum_{i=0}^{\mu} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (1,-.25) -- (1,2) node[pos=0.7, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) to [out=180, in=-90] (0,2); \draw[line width=0mm] (1,-.25) -- (1,2) node[pos=0.7](){$\blacktriangleright$bullet}; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (2,.25) {$f_i$}; \draw[color=blue, thick, double distance=1pt, dashed] (fi) .. controls ++(0,1) and ++(0,-1.8) .. (-1,2); \node at (3,.5) {$\lambda$}; \node[blue] at (.3,1.8) {$\scriptstyle i$}; \node[blue] at (-1.5,1.8) {$\scriptstyle\mu-i$}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{equation} where $f_i \in {\rm Hom}^{2\mu-2i}(1bl, {\mathbbm P}i^{\mu-i}1bl)$. By adjunction there are analogous results for $f \in {\rm Hom}^{2\mu}(\mathtt{F} {\mathbbm P}i^{\mu}1bl,\mathtt{F} 1bl)$. $\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} We prove the case $\lambda \mathfrak{g}e -1$ (the case $\lambda \lambdae -1$ is proved similarly). We will deal with the special case $\mu=1$ and $\lambda=-1$ at the end. We have $\blacktriangleright$egin{eqnarray*} & &{\rm Hom}^{2\mu}(\mathtt{E} 1bl, {\mathbbm P}i^{\mu}\mathtt{E} 1bl) \\ &{\rm co}ng& {\rm Hom}^{2\mu}(1bltwo, {\mathbbm P}i^{\mu}\mathtt{E} (\mathtt{E} 1bl)^L 1bltwo) \\ &{\rm co}ng& {\rm Hom}^{2\mu}(1bltwo, {\mathbbm P}i^{\mu}\mathtt{E} \mathtt{F} 1bltwo \lambdaangle -(\lambda+1) \rangle) \\ &{\rm co}ng& {\rm Hom}^{2\mu}\lambdaeft(1bltwo, {\mathbbm P}i^{\mu} \lambdaeft($\blacktriangleright$igoplus_{[\lambda+2]} 1bltwo \lambdaangle -(\lambda+1) \rangle {\rm op}lus\mathtt{F}{\mathbbm P}i \mathtt{E} 1bltwo \lambdaangle -(\lambda+1) \rangle\right) \right) \\ &{\rm co}ng& {\rm Hom}^{2\mu-\lambda-1}(1bltwo, {\mathbbm P}i^{\mu} $\blacktriangleright$igoplus_{[\lambda+2]} 1bltwo) {\rm op}lus {\rm Hom}^{2\mu}((\mathtt{F} 1b_{\lambda+4})^L, {\mathbbm P}i^{\mu+1} \mathtt{E} 1bltwo \lambdaangle -(\lambda+1) \rangle) \\ &{\rm co}ng& {\rm Hom}^{2\mu-\lambda-1}(1bltwo,{\mathbbm P}i^{\mu} $\blacktriangleright$igoplus_{[\lambda+2]} 1bltwo) {\rm op}lus {\rm Hom}^{2\mu}({\mathbbm P}i^{\lambda+3}\mathtt{E} 1bltwo \lambdaangle \lambda+3 \rangle, {\mathbbm P}i^{\mu+1}\mathtt{E} 1bltwo \lambdaangle -(\lambda+1) \rangle) \\ &{\rm co}ng& {\rm Hom}^{2\mu-\lambda-1}(1bltwo, $\blacktriangleright$igoplus_{[\lambda+2]}{\mathbbm P}i^{\mu} 1bltwo) $\blacktriangleleft$nd{eqnarray*} where the last line follows since $\mu<(\lambda+2)$ meaning ${\rm Hom}^{2\mu}({\mathbbm P}i^{\lambda+3}\mathtt{E} 1bltwo, {\mathbbm P}i^{\mu+1} \mathtt{E} 1bltwo \lambdaangle -2(\lambda+2) \rangle) = 0.$ Note that in the third isomorphism above we use the inverse of the isomorphism in $\blacktriangleleft$qref{eq:iso1}. Keeping track of degrees, we find that $\blacktriangleright$egin{equation}\lambdaanglebel{eq:2} {\rm Hom}^{2\mu}(\mathtt{E} 1bl, {\mathbbm P}i^{\mu}\mathtt{E} 1bl) {\rm co}ng $\blacktriangleright$igoplus_{k = 0}^{\lambda+1} {\rm Hom}(1bltwo, {\mathbbm P}i^{k+\mu}1bltwo \lambdaangle 2(\mu-k) \rangle). $\blacktriangleleft$nd{equation} If $f \in {\rm Hom}^{2\mu}(\mathtt{E} 1bl, {\mathbbm P}i^{\mu}\mathtt{E} 1bl)$ then we denote the map induced by adjunction \[ f' \in {\rm Hom}^{2\mu}(1bltwo, {\mathbbm P}i^{\mu}\mathtt{E} \mathtt{F} 1bltwo \lambdaangle -(\lambda+1) \rangle) \] and the induced maps on the right side of (\ref{eq:2}) by $f_k' \in {\rm Hom}(1bltwo, {\mathbbm P}i^{k+\mu} 1bltwo \lambdaangle 2(\mu-k) \rangle)$. Now let us trace through the series of isomorphisms above in order to explicitly identify $f'$ with $f_k'$. The critical isomorphism is the third one where one uses the isomorphism $\blacktriangleleft$qref{eq:iso1} from Corollary \ref{cor:1}. Thus we find that $f'$ corresponds to the composite \[ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, <-] (-0.5,2) .. controls (-0.5,1) and (0.5,1) .. (0.5,2) node[pos=0.2, shape=coordinate](tDOT){}; \draw[color=blue, thick, double distance=1pt, dashed] (-1.4,.8) .. controls ++(0,1) and ++(-.5,.5) .. (tDOT); \draw[color=blue, thick, double distance=1pt, dashed] (-1.6,.8) -- (-1.6,2); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-1.5,0.75) {$\scriptstyle \;\; f_k' \;\;$}; \draw[line width=0mm] (-0.5,2) .. controls (-0.5,1) and (0.5,1) .. (0.5,2) node[pos=0.2](){$\blacktriangleright$bullet}; \node[blue] at (-1,1.45){$\scriptstyle k$}; \node[blue] at (-1.8,1.8){$\scriptstyle \mu$}; \node at (.6,0.7) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } {\rm co}lon 1bltwo \rightarrow {\mathbbm P}i^{\mu}\mathtt{E} \mathtt{F} 1bltwo \lambdaangle 2\mu-(\lambda+1) \rangle. \] Setting \[ \hackcenter{ $\blacktriangleright$egin{tikzpicture} \draw[color=blue, thick, double distance=1pt, dashed] (-1.5,.8) -- (-1.5,2); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-1.5,0.75) {$\scriptstyle f_k $}; \node[blue] at (-2,1.8){$\scriptstyle \mu-k$}; \node at (-.6,1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad :=\quad \hackcenter{ $\blacktriangleright$egin{tikzpicture} \draw[color=blue, thick, double distance=1pt, dashed] (-1.6,.8) .. controls ++(0,.5) and ++(0,.5) .. (-1.2,.8); \draw[color=blue, thick, double distance=1pt, dashed] (-1.8,.8) -- (-1.8,2); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-1.5,0.75) {$\scriptstyle \quad f_k' \quad$}; \node[blue] at (-1.2,1.35){$\scriptstyle k$}; \node[blue] at (-1.5,1.8){$\scriptstyle \mu-k$}; \node at (-.6,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \] and using the adjunction which relates $f$ and $f'$ completes the proof. If $\mu=2$ and $\lambda=-1$ then the long calculation above yields $${\rm Hom}^2(\mathtt{E} 1b_{-1}, {\mathbbm P}i \mathtt{E} 1b_{-1}) {\rm co}ng {\rm Hom}^2(1b_1,{\mathbbm P}i 1b_1) {\rm op}lus {\rm Hom}^2(\mathtt{E} 1b_1, {\mathbbm P}i \mathtt{E} 1b_1).$$ The space of maps on the right is one-dimensional and is induced by the dot. The result follows. $\blacktriangleleft$nd{proof} \section{Odd cyclic biadjointness}\lambdaanglebel{sec:proofcycbiadjoint} Recall that at this point, adjunction maps are only determined up to a scalar. In this section we rescale them so that caps and cups are adjoint to each other and so that $\blacktriangleright$egin{align} \xy (0,5)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0); \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (1,0.5) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy &= 1b_{1bl} \qquad \text{for $\lambda >0$,} & \qquad \qquad \xy (0,0)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,1) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[line width=0mm ] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (1,0.6) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy &= 1b_{1bl} \qquad \text{for $\lambda <0$.} $\blacktriangleleft$nd{align} Recall that negative degree dotted bubbles are zero while a dotted bubble of degree zero must be a non-zero multiple of the identity map by Corollary~\ref{cor:degz-bubbles} (note that for $\lambda=0$ there are no degree zero dotted bubbles). By rescaling the adjunction maps we can ensure the degree zero bubbles satisfy the conditions above. More precisely, we rescale in the following order: \[ $\blacktriangleright$egin{tabular}{|l|c|c|c|c|} \hline $ \lambda \mathfrak{g}eq 0$ & \xy (0,-3)*{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-.75,0) .. controls ++(0,-1) and ++(0,-1) .. (.75,0) node[pos=0.5, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) -- (0,-1.4); \node at (1.2,-.7) {$\lambda-1$}; \node at (0,-.25) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy & \xy (0,-3)*{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-.75,0) .. controls ++(0,1) and ++(0,1) .. (.75,0) node[pos=0.5, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) -- (0,1.4); \node at (1.2,.7) {$\lambda+1$}; \node at (0,.25) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy & \xy (0,-3)*{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (.75,0) .. controls ++(0,-1) and ++(0,-1) .. (-.75,0); \node at (1.2,-.7) {$\lambda+1$}; \node at (0,-.25) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy & \xy (0,-3)*{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (.75,0) .. controls ++(0,1) and ++(0,1) .. (-.75,0); \node at (1.2,.7) {$\lambda-1$}; \node at (0,.25) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \\& & & &\\ \hline & \;\;\txt{ fixed arbitrarily} \;\; & \;\;\txt{ determined by \\adjunction}\;\; & \;\;\txt{ fixed by value\\ of bubble}\;\; & \;\;\txt{ determined by\\ adjunction}\;\; \\ \hline $\blacktriangleleft$nd{tabular} \] \[ $\blacktriangleright$egin{tabular}{|l|c|c|c|c|} \hline $ \lambda < 0$ & \xy (0,-3)*{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (.75,0) .. controls ++(0,-1) and ++(0,-1) .. (-.75,0); \node at (1.2,-.7) {$\lambda+1$}; \node at (0,-.25) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy & \xy (0,-3)*{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (.75,0) .. controls ++(0,1) and ++(0,1) .. (-.75,0); \node at (1.2,.7) {$\lambda-1$}; \node at (0,.25) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy & \xy (0,-3)*{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-.75,0) .. controls ++(0,-1) and ++(0,-1) .. (.75,0) node[pos=0.5, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) -- (0,-1.4); \node at (1.2,-.7) {$\lambda-1$}; \node at (0,-.25) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy & \xy (0,-3)*{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-.75,0) .. controls ++(0,1) and ++(0,1) .. (.75,0) node[pos=0.5, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) -- (0,1.4); \node at (1.2,.7) {$\lambda+1$}; \node at (0,.25) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \\& & & &\\ \hline & \;\;\txt{ fixed arbitrarily}\;\; & \;\;\txt{ determined by \\adjunction}\;\; & \;\;\txt{ fixed by value\\ of bubble}\;\; & \;\;\txt{ determined by\\ adjunction}\;\; \\ \hline $\blacktriangleleft$nd{tabular} \] The only time this rescaling fails is when $\lambda=0$ above. In that case, the rescaling for $\lambda \mathfrak{g}eq 0$ fixes the value of the positive bubble in weight $\lambda=1$, so that $\blacktriangleright$egin{equation} \lambdaanglebel{eq_bub1} \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.6] \draw[thick, ->, , ->-=0.03] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0); \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0); \node at (1,0.5) {$+1$}; $\blacktriangleleft$nd{tikzpicture} }= 1b_{1bl}, $\blacktriangleleft$nd{equation} where $+1$ denotes the outside region of the bubble. However, the rescaling for $\lambda=0$ fails to rescale the positive degree bubble in region $\lambda=-1$. This bubble is multiplication by some arbitrary scalar $c_{-1}$: $\blacktriangleright$egin{equation} \lambdaanglebel{eq_defcmone} \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.6] \draw[thick, ->, , ->-=0.03] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0); \node at (1,0.5) {$-1$}; $\blacktriangleleft$nd{tikzpicture} } = c_{-1} 1b_{1bl} $\blacktriangleleft$nd{equation} where $-1$ denotes the outside region of the bubble. We will show in Proposition~\ref{prop_free_param} that in fact $c_{-1} = 1$. \subsection{Odd cyclicity for dots} \subsubsection{Defining downward oriented dots} We will make repeated use of the 2-morphism $x_\mathcal{F}'$, which is defined diagrammatically by $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-x-fc-prime} \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick, <-] (0,0) -- (0,3) node[pos=.5, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (DOT) [out=-45, in=90] to (.75,0); \draw (DOT) -- (DOT) node[pos=0](){$\blacktriangleright$bullet}; \node at (-.8,2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1, shape=coordinate](DOT){}; \draw[thick, ->-=0.15] (-.5,0) -- (-.5,-1.5); \draw[thick, ->] (1.5,1.5) -- (1.5,0); \draw[color=blue, thick, dashed] (DOT) to[out=200, in=90] (.25,-1.5); \draw[line width=0mm] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1](){$\blacktriangleright$bullet}; \node at (.3,1.2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \qquad \text{or} \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.7] \draw[thick, <-] (0,0) -- (0,3) node[pos=.5, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (DOT) .. controls ++(.3,-1) and ++(0,-2.5) .. (.75,3); \draw (DOT) -- (DOT) node[pos=0](){$\blacktriangleright$bullet}; \node at (-1.2,2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1, shape=coordinate](DOT){}; \draw[thick, ->-=0.15] (-.5,0) -- (-.5,-1.5); \draw[thick, ->] (1.5,1.5) -- (1.5,0); \draw[color=blue, thick, dashed] (DOT) .. controls ++(-1.4,-2) and ++(.2,-2) .. (2.25,0) to (2.25,1.5); \draw[line width=0mm] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1](){$\blacktriangleright$bullet}; \node at (.3,1.2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy . $\blacktriangleleft$nd{equation} Above, we defined a dot on a down-strand using the left-oriented adjunction. The next lemma computes the difference between this and the analogous right-oriented picture. $\blacktriangleright$egin{lem}\lambdaanglebel{lem-x-fc} $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-x-fc-right} (-1)^{\lambdaanglembda} \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-1.5,0) .. controls (-1.5,-.8) and (-.5,-.8) .. (-.5,0) node[pos=0.5, shape=coordinate](Y){} node[pos=1, shape=coordinate](DOT){}; \draw[thick, ->-=0.15] (.5,0) -- (.5,-1.5); \draw[thick, ->] (-1.5,1.5) -- (-1.5,0); \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(.1,1) and ++(-.2,1.5).. (Y); \draw[color=blue, thick, dashed] (DOT) to[out=120, in=-90] (-0.75,1.5); \draw[line width=0mm] (-1.5,0) .. controls (-1.5,-.8) and (-.5,-.8) .. (-.5,0) node[pos=1](){$\blacktriangleright$bullet}; \node at (-.6,-1) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture} } \quad=\quad \lambdaeft\{ $\blacktriangleright$egin{array}{ll} \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.7] \draw[thick, <-] (0,0) -- (0,3) node[pos=.5, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (DOT) .. controls ++(.3,-1) and ++(0,-2.5) .. (.75,3); \draw (DOT) -- (DOT) node[pos=0](){$\blacktriangleright$bullet}; \node at (-1.2,2) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture}} \quad+\quad2\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, <-] (-2,-1.5) -- (-2,1.5); \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.9) and (0.5,0.9) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.9) and (0.5,-0.9) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda$\;}; \draw[color=blue, thick, dashed] (Z) to[out=160, in=-90] (-1.25,1.5) ; \node[blue] at (-1.25,0.8){$\scriptstyle $\;}; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.9) and (0.5,0.9) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.9) and (0.5,-0.9) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (.8,-1) {$\lambdaanglembda+1$}; $\blacktriangleleft$nd{tikzpicture}} & \lambda>0, \\ & \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.7] \draw[thick, <-] (0,0) -- (0,3) node[pos=.5, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (DOT) .. controls ++(.3,-1) and ++(0,-2.5) .. (.75,3); \draw (DOT) -- (DOT) node[pos=0](){$\blacktriangleright$bullet}; \node at (-1.2,2) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture}} \quad+\quad2\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, <-] (2,-1.5) -- (2,1.5); \draw[thick, ->] (0.5,0) .. controls (0.5,0.9) and (-0.5,0.9) .. (-0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.9) and (-0.5,-0.9) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda$\;}; \draw[color=blue, thick, dashed] (Z) .. controls ++(-1.4,.7) and ++(.1,-1) .. (2.75,1.5) ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \draw[line width=0mm] (0.5,0) .. controls (0.5,-0.9) and (-0.5,-0.9) .. (-0.5,0) node[pos=0.2]{$\blacktriangleright$bullet}; \draw[line width=0mm] (0.5,0) .. controls (0.5,0.9) and (-0.5,0.9) .. (-0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (3,-1) {$\lambdaanglembda+1$}; $\blacktriangleleft$nd{tikzpicture} } & \lambda<0, \\ & \\ - \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.7] \draw[thick, <-] (0,0) -- (0,3) node[pos=.5, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (DOT) .. controls ++(.3,-1) and ++(0,-2.5) .. (.75,3); \draw (DOT) -- (DOT) node[pos=0](){$\blacktriangleright$bullet}; \node at (-1.2,2) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture}}& \lambda=0. $\blacktriangleleft$nd{array} \right. $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{lem} $\blacktriangleright$egin{proof} By Lemma~\ref{lem:main}, there is an equation of the form $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-x-fc-right-gamma} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-1.5,0) .. controls (-1.5,-.8) and (-.5,-.8) .. (-.5,0) node[pos=0.5, shape=coordinate](Y){} node[pos=1, shape=coordinate](DOT){}; \draw[thick, ->-=0.15] (.5,0) -- (.5,-1.5); \draw[thick, ->] (-1.5,1.5) -- (-1.5,0); \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(.1,1) and ++(-.2,1.5).. (Y); \draw[color=blue, thick, dashed] (DOT) to[out=120, in=-90] (-0.75,1.5); \node at (DOT){$\blacktriangleright$bullet}; \node at (-.6,-1) {$\mu$}; $\blacktriangleleft$nd{tikzpicture} } \quad=\quad\mathfrak{g}amma_0\quad \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0); \draw[thick] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1, shape=coordinate](DOT){}; \draw[thick, ->-=0.15] (-.5,0) -- (-.5,-1.5); \draw[thick, ->] (1.5,1.5) -- (1.5,0); \draw[color=blue, thick, dashed] (DOT) .. controls ++(-1.4,-2) and ++(.2,-2) .. (2.25,0) to (2.25,1.5); \draw[line width=0mm] (1.5,0) .. controls (1.5,-.8) and (.5,-.8) .. (.5,0) node[pos=1](){$\blacktriangleright$bullet}; \node at (.5,1.2) {$\mu$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \quad+\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.7] \draw[color=blue, thick, dashed] (.75,1.6) -- (.75,3); \draw[thick, <-] (0,0) -- (0,3); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (.75,1.5) {\small$\mathfrak{g}amma_1$}; \node at (1.3,0.5) {$\mu+2$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} for all $\mu \mathfrak{g}eq -1$. Given any 2-morphism $$\blacktriangleright$eta:1b_{\mu}\mathtt{F}\rightarrow1b_{\mu}\mathtt{F}{\mathbbm P}i\lambdaangle 2\rangle$, we can form the diagram $\blacktriangleright$egin{equation*} $\blacktriangleright$egin{tikzpicture} \draw[thick] (1,.7) .. controls (1,0) and (0,0) .. (0,.7) to (0,1); \draw[thick, ->-=0.1] (0,1) to (0,2) .. controls (0,2.7) and (1,2.7) .. (1,2) [out=-90, in=90] to (1,1.3) node[pos=0, shape=coordinate](MDOTS){} node[pos=0, below, left](){$\scriptstyle m$} node[pos=.5, shape=coordinate](CAPTOP){}; \draw[color=blue, thick, double distance=1pt, dashed] (CAPTOP) -- (.5,3) node[blue, above, pos=1](){$\scriptstyle \mu+1$}; \draw[color=blue, thick, dashed] (1.3,1.3) -- (1.3,3); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (1,1) {\small$\quad$\blacktriangleright$eta\quad$}; \draw[blue, thick, double distance=1pt, dashed] (MDOTS) [out=135, in=-90] -- (-.5,3) node[blue, above, pos=1](){$\scriptstyle m$}; \draw (MDOTS) -- (MDOTS) node[pos=0](){$\blacktriangleright$bullet}; \node at (2,0.5) {$\mu+2$}; $\blacktriangleleft$nd{tikzpicture} $\blacktriangleleft$nd{equation*} by closing the 2-morphism off with $m \mathfrak{g}eq 0$ dots. Applied to equation $\blacktriangleleft$qref{eqn-x-fc-right-gamma}, this yields $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-bumpy-bubbles} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[color=blue, thick, double distance=1pt, dashed] (.25,.7) .. controls (.25,1.8) and (.75,1.8) .. (.75,1.4); \draw[thick, <-] (1,0) [out=90, in=-90] to (1,1) .. controls (1,1.5) and (.5,1.5) .. (.5,1) .. controls (.5,.5) and (0,.5) .. (0,1) [out=90, in=-90] to (0,2) node[pos=0, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (DOT) [out=135, in=-90] to (.3, 3); \draw[thick] (0,2) .. controls (0,2.5) and (-.5,2.5) .. (-.5,2) node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick] (-.5,2) -- (-.5,0) node[pos=.25, shape=coordinate](MDOTS){}; \draw[thick] (-.5,0) .. controls (-.5,-.75) and (1,-.75) .. (1,0); \draw[color=blue, thick, double distance=1pt, dashed] (MDOTS) [out=135, in=-90] to (-1,3); \draw (-1,3) -- (-1,3) node[pos=1, blue, above](){$\scriptstyle m$}; \draw[color=blue, thick, double distance=1pt, dashed] (TOPCAP) -- (-.25,3) node[pos=1, blue, above](){$\scriptstyle \mu+1$}; \node at (DOT) {$\blacktriangleright$bullet};\node at (MDOTS) {$\blacktriangleright$bullet}; \node at (.25,-0.1) {$\mu$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad\mathfrak{g}amma_0 \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-=0.2] (0,0) [out=90, in=-90] to (0,1) .. controls (0,1.5) and (.5,1.5) .. (.5,1) .. controls (.5,.5) and (1,.5) .. (1,1) [out=90, in=-90] to (1,2) node[pos=.1, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (DOT) [out=-135, in=90] to (.25, .5) .. controls (.25,0) and (1.25,0) .. (1.25,1) [out=80, in=-90] to (1.5,3); \draw[thick] (1,2) .. controls (1,2.75) and (-.5,2.75) .. (-.5,2) node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick] (-.5,2) -- (-.5,0) node[pos=.25, shape=coordinate](MDOTS){}; \draw[thick] (-.5,0) .. controls (-.5,-.25) and (0,-.25) .. (0,0); \draw[color=blue, thick, double distance=1pt, dashed] (TOPCAP) -- (.25,3) node[pos=1, blue, above](){$\scriptstyle \mu+1$}; \draw[color=blue, thick, double distance=1pt, dashed] (MDOTS) [out=135, in=-90] to (-1,3); \draw (-1,3) -- (-1,3) node[pos=1, blue, above](){$\scriptstyle m$}; \node at (DOT) {$\blacktriangleright$bullet};\node at (MDOTS) {$\blacktriangleright$bullet}; \node at (.25,2) {$\mu$}; $\blacktriangleleft$nd{tikzpicture}} \quad+\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0,0) -- (0,2); \draw[thick] (0,0) .. controls (0,-.5) and (-1,-.5) .. (-1,0); \draw[thick] (-1,0) -- (-1,2) node[pos=.75, shape=coordinate](MDOTS){}; \draw[thick] (-1,2) .. controls (-1,2.5) and (0,2.5) .. (0,2) node[pos=.5, shape=coordinate](TOPCAP){}; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (.5,1) {\small$\mathfrak{g}amma_1$}; \draw[color=blue, thick, dashed] (.5,1.3) -- (.5,3); \draw[color=blue, thick, double distance=1pt, dashed] (MDOTS) [out=135, in=-90] to (-1.5,3); \draw (-1.5,3) -- (-1.5,3) node[blue, above](){$\scriptstyle m$}; \draw[color=blue, thick, double distance=1pt, dashed] (TOPCAP) -- (-.5,3) node[blue, above](){$\scriptstyle \mu+1$}; \node at (MDOTS) {$\blacktriangleright$bullet}; \node at (.75,-0.1) {$\mu+2$}; $\blacktriangleleft$nd{tikzpicture}} \quad. $\blacktriangleleft$nd{equation} On the left-hand side, resolve the $\mu+1$ crossings (accruing a factor of $(-1)^{\mu+1}$), move the caps to the left $\mu+1$ strands, and use the right-oriented adjunction to get rid of the left cap and the cup in the middle; what remains is a bubble with $m+1$ dots on the left. The first term on the right-hand side is more interesting: $\blacktriangleright$egin{equation*} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-=0.2] (0,0) [out=90, in=-90] to (0,1) .. controls (0,1.5) and (.5,1.5) .. (.5,1) .. controls (.5,.5) and (1,.5) .. (1,1) [out=90, in=-90] to (1,2) node[pos=.1, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (DOT) [out=-135, in=90] to (.25, .5) .. controls (.25,0) and (1.25,0) .. (1.25,1) [out=80, in=-90] to (1.5,3); \draw[thick] (1,2) .. controls (1,2.75) and (-.5,2.75) .. (-.5,2) node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick] (-.5,2) -- (-.5,0) node[pos=.25, shape=coordinate](MDOTS){}; \draw[thick] (-.5,0) .. controls (-.5,-.25) and (0,-.25) .. (0,0); \draw[color=blue, thick, double distance=1pt, dashed] (TOPCAP) -- (.25,3) node[pos=1, blue, above](){$\scriptstyle \mu+1$}; \draw[color=blue, thick, double distance=1pt, dashed] (MDOTS) [out=135, in=-90] to (-1,3); \draw (-1,3) -- (-1,3) node[pos=1, blue, above](){$\scriptstyle m$}; \draw (DOT) -- (DOT) node[pos=0](){$\blacktriangleright$bullet}; \node at (MDOTS) {$\blacktriangleright$bullet}; \node at (.25,2) {$\mu$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (1,0) .. controls (1,-.7) and (0,-.7) .. (0,0) node[pos=.8, shape=coordinate](DOT){}; \draw[thick, ->] (0,0) .. controls (0,.7) and (1,.7) .. (1,0) node[pos=.2, shape=coordinate](DOTS){} node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick, color=blue, double distance=1pt, dashed] (DOTS) [out=135, in=-90] to (-.5,1); \draw (-.5,1) -- (-.5,1) node[blue, above](){$\scriptstyle m$}; \draw[color=blue, double distance=1pt, dashed, thick] (TOPCAP) -- (.5,1) node[blue, above, pos=1](){$\scriptstyle \mu+1$}; \draw[color=blue, dashed, thick] (DOT) [out=-135, in=90] to (-.3,-.5); \draw[color=blue, dashed, thick] (-.3,-.5) [out=-90, in=180] to (.5,-.75); \draw[color=blue, dashed, thick] (.5,-.75) [out=0, in=-90] to (1.5, 0) [in=-90, out=90] to (1.5,1); \node at (DOT) {$\blacktriangleright$bullet};\node at (DOTS) {$\blacktriangleright$bullet}; \node at (.75,-1.1) {$\mu+2$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (1,0) .. controls (1,-.7) and (0,-.7) .. (0,0) node[pos=.8, shape=coordinate](DOT){}; \draw[thick, ->] (0,0) .. controls (0,.7) and (1,.7) .. (1,0) node[pos=.2, shape=coordinate](DOTS){} node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick, color=blue, double distance=1pt, dashed] (DOTS) [out=135, in=-90] to (-.5,1); \draw (-.5,1) -- (-.5,1) node[blue, above](){$\scriptstyle m$}; \draw[color=blue, double distance=1pt, dashed, thick] (TOPCAP) -- (.5,1) node[blue, above, pos=1](){$\scriptstyle \mu+1$}; \draw[color=blue, dashed, thick] (DOT) [out=-135, in=90] to (-.5,-1); \draw[color=blue, dashed, thick] (-.5,-1) .. controls (-.5,-1.25) and (0,-1.25) .. (0,-1); \draw[color=blue, dashed, thick] (0,-1) [out=90, in=-90] to (-1,0) [out=90, in=-135] to (1,1); \node at (DOT) {$\blacktriangleright$bullet};\node at (DOTS) {$\blacktriangleright$bullet}; \node at (.75,-1.1) {$\mu+2$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad(-1)^{m+\mu} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (1,0) .. controls (1,-.7) and (0,-.7) .. (0,0) node[pos=1, shape=coordinate](DOTS){}; \draw[thick, ->] (0,0) .. controls (0,.7) and (1,.7) .. (1,0) node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick, color=blue, double distance=1pt, dashed] (DOTS) [out=135, in=-90] to (-.5,1); \draw (-.5,1) -- (-.5,1) node[blue, above](){$\scriptstyle m+1$}; \draw[color=blue, double distance=1pt, dashed, thick] (TOPCAP) -- (.5,1) node[blue, above, pos=1](){$\scriptstyle \mu+1$}; \node at (DOTS) {$\blacktriangleright$bullet}; \node at (.75,-1.1) {$\mu+2$}; $\blacktriangleleft$nd{tikzpicture}} \quad. $\blacktriangleleft$nd{equation*} Plugging this back into $\blacktriangleleft$qref{eqn-bumpy-bubbles}, $\blacktriangleright$egin{equation} \lambdaanglebel{eq:bubble} -(-1)^\mu(1+\mathfrak{g}amma_0(-1)^m) \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (1,0) .. controls (1,-.7) and (0,-.7) .. (0,0) node[pos=1, shape=coordinate](DOTS){}; \draw[thick, ->] (0,0) .. controls (0,.7) and (1,.7) .. (1,0) node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick, color=blue, double distance=1pt, dashed] (DOTS) [out=135, in=-90] to (-.5,1); \draw (-.5,1) -- (-.5,1) node[blue, above](){$\scriptstyle m+1$}; \draw[color=blue, double distance=1pt, dashed, thick] (TOPCAP) -- (.5,1) node[blue, above, pos=1](){$\scriptstyle \mu+1$}; \node at (DOTS) {$\blacktriangleright$bullet}; \node at (.75,-1) {$\mu+2$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (1,0) .. controls (1,-.7) and (0,-.7) .. (0,0) node[pos=1, shape=coordinate](DOTS){}; \draw[thick, ->] (0,0) .. controls (0,.7) and (1,.7) .. (1,0) node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick, color=blue, double distance=1pt, dashed] (DOTS) [out=135, in=-90] to (-.5,1); \draw (-.5,1) -- (-.5,1) node[blue, above](){$\scriptstyle m$}; \draw[color=blue, double distance=1pt, dashed, thick] (TOPCAP) -- (.5,1) node[blue, above, pos=1](){$\scriptstyle \mu+1$}; \node at (DOTS) {$\blacktriangleright$bullet}; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (1.5,0) {\small$\mathfrak{g}amma_1$}; \draw[color=blue, thick, dashed] (1.5,.3) -- (1.5,1); \node at (.75,-1) {$\mu+2$}; $\blacktriangleleft$nd{tikzpicture}} \quad. $\blacktriangleleft$nd{equation} Taking $m=\mu\mathfrak{g}eq 0$, the bubble on the right-hand side is zero. Thus $\mathfrak{g}amma_0=(-1)^{\mu+1}$. Taking $m=\mu+1$, the bubble on the right-hand side equals $1$, so $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (1.5,0) {\small$\mathfrak{g}amma_1$}; \draw[color=blue, thick, dashed] (1.5,.3) -- (1.5,1.5); $\blacktriangleleft$nd{tikzpicture}} \quad=\quad2(-1)^{\mu+1} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \mu+1$\;}; \draw[color=blue, thick, dashed] (Z) to[bend left] (-1,1) ; \node[blue] at (-1.25,0.8){$\scriptstyle $\;}; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (1,-0.8) {$\mu+2$}; $\blacktriangleleft$nd{tikzpicture}}, $\blacktriangleleft$nd{equation} completing the proof of $\blacktriangleleft$qref{eqn-x-fc-right} for $\lambda>0$. The proof for $\lambda<0$ is similar. The case when $\lambda=0$ requires more care. Inserting equation~$\blacktriangleleft$qref{eqn-x-fc-right-gamma} for $\mu=-1$ into $$\blacktriangleright$eta$ in the diagram \[ $\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (-0.5,1) -- (-0.5,1.5); \draw[thick] (-0.5,-0.5) -- (-0.5,0); \draw[thick] (1.5,0) -- (1.5,1); \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[thick, ->] (0.5,1) .. controls ++(0,0.6) and ++(0,0.6) .. (1.5,1); \draw[thick, ->] (1.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,0); \draw[color=blue, thick, dashed] (X) to [out=180, in=-90](-1.5,1.5); \draw[color=blue, thick, dashed] (1.9,.5) -- (1.9,1.5); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (1.5,.5) {\small$\quad$\blacktriangleright$eta\quad$}; $\blacktriangleleft$nd{tikzpicture} \] and simplifying using zig-zag identities gives the equation: \[ \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (-0.5,1) -- (-0.5,1.5); \draw[thick] (-0.5,-1) -- (-0.5,0); \draw[thick] (1.5,0) -- (1.5,1); \draw[thick] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){} node[pos=1, shape=coordinate](DOT){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[thick, ->] (0.5,1) .. controls ++(0,0.6) and ++(0,0.6) .. (1.5,1); \draw[thick, ->] (1.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,0); \draw[color=blue, thick, dashed] (X) to [out=180, in=-90](-1.5,1.5); \draw[color=blue, thick, dashed] (DOT) to [out=170, in=-90] (0 ,1.5); \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=1]{$\blacktriangleright$bullet}; \node at (-1.2,-0.5) {$\mu$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \mathfrak{g}amma_0 \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (-0.5,1) -- (-0.5,1.5); \draw[thick] (-0.5,-1) -- (-0.5,0); \draw[thick] (1.5,0) -- (1.5,1); \draw[thick] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[thick, ->] (0.5,1) .. controls ++(0,0.6) and ++(0,0.6) .. (1.5,1); \draw[thick, ->] (1.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,0) node[pos=.9, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (X) to [out=180, in=-90](-1.5,1.5); \draw[color=blue, thick, dashed] (DOT) .. controls ++(-.6,.8) and ++(-1.6,0) .. (1,-1) .. controls ++(1.6,0) and ++(0,-1) .. (2.25,1.5); \node at (DOT) {$\blacktriangleright$bullet}; \node at (-1.2,-0.5) {$\mu$}; $\blacktriangleleft$nd{tikzpicture} } \;\; + \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (-0.5,1) -- (-0.5,1.5); \draw[thick] (-0.5,-1) -- (-0.5,0); \draw[thick] (1.5,0) -- (1.5,1); \draw[thick] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){} node[pos=1, shape=coordinate](DOT){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[thick, ->] (0.5,1) .. controls ++(0,0.6) and ++(0,0.6) .. (1.5,1); \draw[thick, ->] (1.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,0); \draw[color=blue, thick, dashed] (X) to [out=180, in=-90](-1.5,1.5); \node at (-1.2,-0.5) {$\mu$}; \draw[color=blue, thick, dashed] (2.5,.5) -- (2.5,1.5); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] () at (2.5,0.5) {\small$\mathfrak{g}amma_1$}; $\blacktriangleleft$nd{tikzpicture}}. \] The right-curl diagram on the right is zero by Lemma~\ref{lem:E} since it is a 2-morphism of degree $-2$. Simplifying the remaining equation using the odd nilHecke dot slide equation and removing terms containing a dot composed with a degree $-2$ right twist curl we find that $\mathfrak{g}amma_0=-1$. Substituting this into $\blacktriangleleft$qref{eq:bubble} with $m=\mu+1=0$ shows that $\mathfrak{g}amma_1$ is the zero 2-morphism. $\blacktriangleleft$nd{proof} \subsubsection{Half cyclicity relations} \lambdaanglebel{subsubsec-half_cyclic} It is often more convenient to use Lemma~\ref{lem-x-fc} in the following form. $\blacktriangleright$egin{cor}\lambdaanglebel{cor-dot-cyclicity} The following local relations hold in any strong supercategorical action. $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-dot-cyclicity-left-cap} \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.9] \draw[thick, ->] (1,0) .. controls (1,.8) and (0,.8) .. (0,0) node[pos=.8, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) [out=-30, in=90] to (.5,0); \node at (DOT){$\blacktriangleright$bullet}; \node at (1.2,0.8) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (1,0) .. controls (1,.8) and (0,.8) .. (0,0) node[pos=.2, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) [out=--140, in=90] to (.5,0); \node at (DOT){$\blacktriangleright$bullet}; \node at (1.2,0.8) {$\lambda-1$}; $\blacktriangleleft$nd{tikzpicture}} \qquad \qquad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (1,0) .. controls (1,-.8) and (0,-.8) .. (0,0) node[pos=.8, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) .. controls ++(-.6,.5) and ++(0,.7) ..(-.25,-1); \node at (DOT){$\blacktriangleright$bullet}; \node at (1.2,-0.8) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (1,0) .. controls (1,-.8) and (0,-.8) .. (0,0) node[pos=.2, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) to[out=-30, in=90] (1.25,-1); \node at (DOT){$\blacktriangleright$bullet}; \node at (-.3,-0.8) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-dot-cyclicity-right-cap} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0,0) .. controls (0,.8) and (1,.8) .. (1,0) node[pos=.5, shape=coordinate](TOPCAP){} node[pos=.15, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed, double distance=1pt] (TOPCAP) -- (.5,1.5) node[pos=0.8,blue, right](){$\scriptstyle \lambdaanglembda$}; \draw[thick, color=blue, dashed] (DOT) [out=135, in=-90] to (-.25,1.5); \node at (DOT){$\blacktriangleright$bullet}; \node at (1.4,0.8) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \lambdaeft\{ $\blacktriangleright$egin{array}{ll} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0,0) .. controls (0,.8) and (1,.8) .. (1,0) node[pos=.5, shape=coordinate](TOPCAP){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed, double distance=1pt] (TOPCAP) -- (.5,1.5) node[pos=0.8,blue, left](){$\scriptstyle \lambdaanglembda$}; \draw[thick, color=blue, dashed] (DOT) .. controls ++(.6,-.5) and ++(0,-1) .. (1.25,1.5); \draw (DOT) -- (DOT) node(){$\blacktriangleright$bullet}; \node at (-.7,0.8) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture}} \quad+\quad 2\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-2,-.5) .. controls ++(0,.7) and ++(0,.7) .. (-1,-.5) node[pos=.5, shape=coordinate](TOPCAP){} node[pos=.25, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed, double distance=1pt] (TOPCAP) -- (-1.5,1) node[pos=0.8,blue, left](){$\scriptstyle \lambdaanglembda$}; \draw[thick, ->] (-0.4,0) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda$\;}; \draw[color=blue, thick, dashed] (Z) to[bend left] (-1,1) ; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (1,-.5) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture}} & \lambda>0 \\ & \\ \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0,0) .. controls (0,.8) and (1,.8) .. (1,0) node[pos=.5, shape=coordinate](TOPCAP){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed, double distance=1pt] (TOPCAP) -- (.5,1.5) node[pos=0.8,blue, left](){$\scriptstyle \lambdaanglembda$}; \draw[thick, color=blue, dashed] (DOT) .. controls ++(.6,-.5) and ++(0,-1) .. (1.25,1.5); \draw (DOT) -- (DOT) node(){$\blacktriangleright$bullet}; \node at (-.7,0.8) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture}} \quad+\quad 2\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-1.4,-0.5) .. controls ++(-.1,1.5) and ++(.1,1.5) .. (1.4,-0.5) node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick, color=blue, dashed, double distance=1pt] (TOPCAP) -- (0,1.25) node[pos=0.8,blue, left](){$\scriptstyle \lambdaanglembda$}; \draw[thick, ->] (0.4,0) .. controls ++(0,0.6) and ++(0,0.6) .. (-0.4,0) node[pos=0.05, shape=coordinate](Z){}; \draw[thick] (0.4,0) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (-0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.5) and ++(-.2,.3) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda$\;}; \draw[color=blue, thick, dashed] (Z) .. controls ++(-1,.4) and ++(.1,-1) .. (1,1) ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (2,.3) {$\lambdaanglembda+1$}; $\blacktriangleleft$nd{tikzpicture} } & \lambda<0 \\ & \\-\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0,0) .. controls (0,.8) and (1,.8) .. (1,0) node[pos=.5, shape=coordinate](TOPCAP){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed, double distance=1pt] (TOPCAP) -- (.5,1.5) node[pos=0.8,blue, left](){$\scriptstyle \lambdaanglembda$}; \draw[thick, color=blue, dashed] (DOT) .. controls ++(.6,-.5) and ++(0,-1) .. (1.25,1.5); \draw (DOT) -- (DOT) node(){$\blacktriangleright$bullet}; \node at (-.7,0.8) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture}}& \lambda=0 $\blacktriangleleft$nd{array} \right. $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-dot-cyclicity-right-cup} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick] (0,1) .. controls (0,.3) and (1,.3) .. (1,1) node[pos=.5, shape=coordinate](BOTTOMCUP){} node[pos=.85, shape=coordinate](DOT){}; \draw[thick, ->] (1,1) -- (1,1.5); \draw[thick] (0,1) -- (0,1.5); \draw[thick, color=blue, dashed] (DOT) [out=135, in=-90] to (.75,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (BOTTOMCUP) -- (.5,1.5) node[pos=.8, left, blue](){$\scriptstyle \lambdaanglembda$}; \node at (DOT){$\blacktriangleright$bullet};; \node at (1.5,.3) {$\lambdaanglembda-1$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \lambdaeft\{ $\blacktriangleright$egin{array}{ll} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick] (0,1) .. controls (0,.3) and (1,.3) .. (1,1) node[pos=.5, shape=coordinate](BOTTOMCUP){} node[pos=.15, shape=coordinate](DOT){}; \draw[thick, ->] (1,1) -- (1,1.5); \draw[thick] (0,1) -- (0,1.5); \draw[thick, color=blue, dashed] (DOT) [out=45, in=-90] to (.25,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (BOTTOMCUP) -- (.5,1.5) node[pos=.8, right, blue](){$\scriptstyle \lambdaanglembda$}; \node at (DOT){$\blacktriangleright$bullet}; \node at (1.5,.3) {$\lambdaanglembda-1$}; $\blacktriangleleft$nd{tikzpicture}} \quad + \quad 2 \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick] (-1.6,0.5) -- (-1.6,1); \draw[thick, ->] (1.6,0.5) -- (1.6,1); \draw[thick] (-1.6,0.5) .. controls ++(-.1,-2) and ++(.1,-2) .. (1.6,0.5) node[pos=.5, shape=coordinate](TOPCAP){}; \draw[thick, color=blue, dashed, double distance=1pt] (TOPCAP) .. controls ++(0,.5) and ++(0,-1.5) .. (1,1) node[pos=0.8,blue, right](){$\scriptstyle \lambdaanglembda$}; \draw[thick, ->] (-0.4,0) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda$\;}; \draw[color=blue, thick, dashed] (Z) to[bend left] (-1,1) ; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (2,-.6) {$\lambdaanglembda-1$}; $\blacktriangleleft$nd{tikzpicture} }& \lambda>0 \\ & \\ \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick] (0,1) .. controls (0,.3) and (1,.3) .. (1,1) node[pos=.5, shape=coordinate](BOTTOMCUP){} node[pos=.15, shape=coordinate](DOT){}; \draw[thick, ->] (1,1) -- (1,1.5); \draw[thick] (0,1) -- (0,1.5); \draw[thick, color=blue, dashed] (DOT) [out=45, in=-90] to (.25,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (BOTTOMCUP) -- (.5,1.5) node[pos=.8, right, blue](){$\scriptstyle \lambdaanglembda$}; \node at (DOT){$\blacktriangleright$bullet}; \node at (1.5,.3) {$\lambdaanglembda-1$}; $\blacktriangleleft$nd{tikzpicture}} \quad + \quad 2\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (.5,1) .. controls ++(0,-.8) and ++(0,-.8) .. (2,1) node[pos=.5, shape=coordinate](BOTTOMCUP){}; \draw[thick, color=blue, double distance=1pt, dashed] (BOTTOMCUP) to[out=90, in=-90] (1.5,1); \node[ blue] at (1.65,.8){$\scriptstyle \lambdaanglembda$}; \draw[thick, ->] (0.4,0) .. controls ++(0,0.6) and ++(0,0.6) .. (-0.4,0) node[pos=0.05, shape=coordinate](Z){}; \draw[thick] (0.4,0) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (-0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.5) and ++(-.2,.3) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda$\;}; \draw[color=blue, thick, dashed] (Z) .. controls ++(-1,.4) and ++(.1,-1) .. (1,1) ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (1.8,-.3) {$\lambdaanglembda-1$}; $\blacktriangleleft$nd{tikzpicture}} & \lambda<0 \\ & \\ - \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick] (0,1) .. controls (0,.3) and (1,.3) .. (1,1) node[pos=.5, shape=coordinate](BOTTOMCUP){} node[pos=.15, shape=coordinate](DOT){}; \draw[thick, ->] (1,1) -- (1,1.5); \draw[thick] (0,1) -- (0,1.5); \draw[thick, color=blue, dashed] (DOT) [out=45, in=-90] to (.25,1.5); \draw[thick, color=blue, double distance=1pt, dashed] (BOTTOMCUP) -- (.5,1.5) node[pos=.8, right, blue](){$\scriptstyle \lambdaanglembda$}; \node at (DOT){$\blacktriangleright$bullet}; \node at (1.5,.3) {$\lambdaanglembda-1$}; $\blacktriangleleft$nd{tikzpicture}}& \lambda=0 $\blacktriangleleft$nd{array} \right. $\blacktriangleleft$nd{equation} $\blacktriangleleft$nd{cor} $\blacktriangleright$egin{proof} Applying a right cap to equation $\blacktriangleleft$qref{eqn-x-fc-right} gives $\blacktriangleright$egin{equation} (-1)^{\lambdaanglembda}\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[color=blue, thick, double distance=1pt, dashed] (.25,.6) .. controls (.25,1.8) and (.75,1.8) .. (.75,1.4); \draw[thick, <-] (1,0) [out=90, in=-90] to (1,1) .. controls (1,1.5) and (.5,1.5) .. (.5,1) .. controls (.5,.5) and (0,.5) .. (0,1) [out=90, in=-90] to (0,2) node[pos=0, shape=coordinate](DOT){}; \draw[color=blue, thick, dashed] (DOT) [out=135, in=-90] to (.2, 2.5); \draw[thick] (0,2) .. controls (0,2.35) and (-.5,2.35) .. (-.5,2) node[pos=.5, shape=coordinate](TOPCAP2){}; \draw[thick] (-.5,0) -- (-.5,2); \draw[thick, color=blue, double distance=1pt, dashed] (TOPCAP2) -- (-.25,2.5) node[pos=0.9, left, blue](){$\scriptstyle \lambdaanglembda$}; \draw (DOT) -- (DOT) node[pos=0](){$\blacktriangleright$bullet}; \node at (1.5,2) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0,0) [out=90, in=-90] to (0,1) .. controls (0,1.5) and (.5,1.5) .. (.5,1) .. controls (.5,.5) and (1,.5) .. (1,1) [out=90, in=-90] to (1,1.25); node[pos=.1, shape=coordinate](DOT){}; \draw[thick] (-.5,1.25) .. controls (-.5,2.3) and (1,2.3) .. (1,1.25) node[pos=.5, shape=coordinate](TOPCAP2){}; \draw[thick] (-.5,0) -- (-.5,1.25); \draw[thick, color=blue, double distance=1pt, dashed] (TOPCAP2) -- (.25,2.5) node[pos=0.9,left, blue](){$\scriptstyle \lambdaanglembda$}; \draw[color=blue, thick, dashed] (DOT) [out=-135, in=90] to (.25, .5) .. controls (.25,0) and (1.25,0) .. (1.25,1) [out=80, in=-90] to (1.5,2.5); \draw (DOT) -- (DOT) node[pos=0](){$\blacktriangleright$bullet}; \node at (1.8,0.2) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture}} \quad+\quad 2 \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-2,-.5) .. controls ++(0,.7) and ++(0,.7) .. (-1,-.5) node[pos=.5, shape=coordinate](TOPCAP){} node[pos=.25, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed, double distance=1pt] (TOPCAP) -- (-1.5,1) node[pos=0.8,blue, left](){$\scriptstyle \lambdaanglembda$}; \draw[thick, ->] (-0.4,0) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda$\;}; \draw[color=blue, thick, dashed] (Z) to[bend left] (-1,1) ; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (1,-.5) {$\lambda+1$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} for $\lambda>0$. Using isotopy and resolving $\lambdaanglembda$ dashed-dashed crossings on the left-hand side, this becomes exactly $\blacktriangleleft$qref{eqn-dot-cyclicity-right-cap}. Equation~$\blacktriangleleft$qref{eqn-dot-cyclicity-right-cap} for other values of $\lambda$ is proven similarly. The left cap equation $\blacktriangleleft$qref{eqn-x-fc-right} can be proven in a similar manner using $\blacktriangleleft$qref{eqn-x-fc-right} and right cap equation proven above. The cup equations are proven similarly. $\blacktriangleleft$nd{proof} \subsection{Odd cyclicity for crossings} \subsubsection{Defining downward oriented crossings} In any strong supercategorical action we can define a downward oriented crossing using the easy adjunctions as follows. $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} $\blacktriangleright$egin{scope}[shift={(0,0)},rotate=180] \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5); \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5) node[pos=.5, shape=coordinate](CROSSING){}; \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-.5,1.5); \node at (.8,.75) {$\lambda$}; $\blacktriangleleft$nd{scope} $\blacktriangleleft$nd{tikzpicture} } \quad := \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->] (-0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,1); \draw[thick] (0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (1.5,0); \draw[thick] (-0.5,0) .. controls ++(0,-1.5) and ++(0,-1.5) .. (2.5,0); \draw[thick, ->-=0.5] (2.5,2) -- (2.5,0); \draw[thick, ->-=0.5] (1.5,2) -- (1.5,0); \draw[thick] (-0.5,1) .. controls ++(0,0.5) and ++(0,0.5) .. (-1.5,1); \draw[thick] (0.5,1) .. controls ++(0,1.5) and ++(0,1.5) .. (-2.5,1); \draw[thick, ->-=0.5] (-2.5,1) -- (-2.5,-1.5); \draw[thick, ->-=0.5] (-1.5,1) -- (-1.5,-1.5); \draw[color=blue, thick, dashed] (X) .. controls ++(-1,.4) and ++(0,1).. (-.75,-1.5); \node at (4,-1) {$\lambda+4$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} It follows from Corollary~\ref{cor-dot-cyclicity} and the definition of the downward oriented dot from $\blacktriangleleft$qref{eqn-x-fc-prime} that with this definition of the downward oriented crossing we have the downward oriented nilHecke axioms. In particular, the equations \[ \hackcenter{$\blacktriangleright$egin{tikzpicture}$\blacktriangleright$egin{scope}[shift={(0,0)},rotate=180] \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, shape=coordinate](DOT){}; \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5); \draw[thick, color=blue, dashed] (DOT) [out=180, in=-90] to (-1,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-.5,1.5); \node() at (DOT) {$\blacktriangleright$bullet};$\blacktriangleleft$nd{scope} $\blacktriangleleft$nd{tikzpicture}} \quad-\quad \hackcenter{$\blacktriangleright$egin{tikzpicture}$\blacktriangleright$egin{scope}[shift={(0,0)},rotate=180] \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5); \draw[thick, color=blue, dashed] (DOT) [out=180, in=-90] to (-1,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-.5,1.5); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{scope} $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} $\blacktriangleright$egin{scope}[shift={(0,0)},rotate=180] \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5); \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) [out=180, in=-90] to (-.5,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-1,1.5); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{scope} $\blacktriangleleft$nd{tikzpicture}} \quad-\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} $\blacktriangleright$egin{scope}[shift={(0,0)},rotate=180] \draw[thick, ->] (0,0) .. controls (0,.75) and (.5,.75) .. (.5,1.5); \draw[thick, ->] (.5,0) .. controls (.5,.75) and (0,.75) .. (0,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) [out=180, in=-90] to (-.5,1.5); \draw[thick, color=blue, dashed] (CROSSING) [out=180, in=-90] to (-1,1.5); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{scope} $\blacktriangleleft$nd{tikzpicture}} \quad=\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} $\blacktriangleright$egin{scope}[shift={(0,0)},rotate=180] \draw[thick, ->] (0,0) -- (0,1.5); \draw[thick, ->] (.5,0) -- (.5,1.5); \draw[thick, color=blue, dashed] (-.75,1.5) .. controls (-.75,1.15) and (-.25,1.15) .. (-.25,1.5); $\blacktriangleleft$nd{scope} $\blacktriangleleft$nd{tikzpicture}} \] holds in any strong supercategorical action. \subsubsection{Half cyclicity for crossings} $\blacktriangleright$egin{prop} The following relations hold in any strong supercategorical action. $\blacktriangleright$egin{alignat}{2}\lambdaanglebel{eq-half-crossing-cycl} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (-0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,1); \draw[thick] (0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (1.5,0); \draw[thick] (-0.5,0) .. controls ++(0,-1.5) and ++(0,-1.5) .. (2.5,0); \draw[thick, ->-=0.5] (2.5,1) -- (2.5,0); \draw[thick, ->-=0.5] (1.5,1) -- (1.5,0); \draw[color=blue, thick, dashed] (X) to [out=180, in=-90](-1.5,1); \node at (1.5,-1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \;\; &= \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.7] \draw[thick,->] (-0.5,1) .. controls ++(0,-0.5) and ++(0,0.5) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,1) .. controls ++(0,-0.5) and ++(0,0.5) .. (-0.5,0); \draw[thick] (-0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (-1.5,0); \draw[thick] (0.5,0) .. controls ++(0,-1.5) and ++(0,-1.5) .. (-2.5,0); \draw[thick, ->-=0.5] (-2.5,0) -- (-2.5,1); \draw[thick, ->-=0.5] (-1.5,0) -- (-1.5,1); \draw[color=blue, thick, dashed] (X) .. controls ++(1.5,0) and ++(1,0) .. (0,-1.5) .. controls ++(-3.5,-.5) and ++(0,-1) .. (-3,1); \node at (1.5,-1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} &\qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick] (-0.5,1) .. controls ++(-0,-0.5) and ++(0,0.5) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,1) .. controls ++(0,-0.5) and ++(0,0.5) .. (-0.5,0); \draw[thick] (0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (1.5,0) node[pos=0.5, shape=coordinate](inCAP){}; \draw[thick] (-0.5,0) .. controls ++(0,-1.5) and ++(0,-1.5) .. (2.5,0) node[pos=0.44, shape=coordinate](L){} node[pos=0.51, shape=coordinate](R){} node[pos=0.5, shape=coordinate](outCAP){}; \draw[thick, ->-=0.5] (2.5,0) -- (2.5,1); \draw[thick, ->-=0.5] (1.5,0) -- (1.5,1); \draw[color=blue, thick, dashed] (X) .. controls ++(.4,-.2) and ++(0,-.5) .. (1,1); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,.7) .. (.5,-0.5) .. controls ++(0,-0.3) and ++(.1,.6) .. (outCAP); \node at (2,-1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \;\; &= \;\; (-1)^{\lambda+1} \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,1); \draw[thick] (-0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (-1.5,0) node[pos=0.5, shape=coordinate](inCAP){}; \draw[thick] (0.5,0) .. controls ++(0,-1.5) and ++(0,-1.5) .. (-2.5,0) node[pos=0.44, shape=coordinate](L){} node[pos=0.51, shape=coordinate](R){} node[pos=0.55, shape=coordinate](outCAP){};; \draw[thick, ->-=0.5] (-2.5,1) -- (-2.5,0); \draw[thick, ->-=0.5] (-1.5,1) -- (-1.5,0); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,.7) .. (-1.5,-0.5) .. controls ++(0,-0.3) and ++(0,.5) .. (outCAP); \draw[color=blue, thick, dashed] (X) to [out=180, in=-90](-1,1); \node at (0,-1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \notag \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.7] \draw[thick, ->] (-0.5,0) .. controls ++(-0,-0.5) and ++(0,0.5) .. (0.5,-1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,-0.5) and ++(0,0.5) .. (-0.5,-1); \draw[thick] (0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,0); \draw[thick] (-0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (2.5,0); \draw[thick, ->-=0.5] (2.5,-1) -- (2.5,0); \draw[thick, ->-=0.5] (1.5,-1) -- (1.5,0); \draw[color=blue, thick, dashed] (X) to [out=0, in=90](1,-1); \node at (2.7,1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \;\; &= \;\; \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture} [scale=0.7] \draw[thick,->] (-0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,0); \draw[thick] (-0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (-1.5,0); \draw[thick] (0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (-2.5,0); \draw[thick, ->-=0.5] (-2.5,0) -- (-2.5,-1); \draw[thick, ->-=0.5] (-1.5,0) -- (-1.5,-1); \draw[color=blue, thick, dashed] (X) .. controls ++(-.4,0.2) and ++(0,0.5) .. (-1,-1); \node at (1,1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } & \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick,->] (0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,0); \draw[thick] (0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,0) node[pos=0.5, shape=coordinate](inCAP){};; \draw[thick] (-0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (2.5,0) node[pos=0.51, shape=coordinate](L){} node[pos=0.56, shape=coordinate](R){} node[pos=0.5, shape=coordinate](outCAP){};; \draw[thick, ->-=0.5] (2.5,0) -- (2.5,-1); \draw[thick, ->-=0.5] (1.5,0) -- (1.5,-1); \draw[color=blue, thick, dashed] (X) .. controls ++(-1.5,0) and ++(0,-1) .. (-0.5,1.75); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,-.7) .. (0,1) .. controls ++(0,.5) and ++(0,.5) .. (outCAP); \node at (2.3,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \;\; &= \;\; (-1)^{\lambda-1} \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick,->] (-0.5,0) .. controls ++(0,-0.5) and ++(0,0.5) .. (0.5,-1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,-0.5) and ++(0,0.5) .. (-0.5,-1); \draw[thick] (-0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (-1.5,0) node[pos=0.5, shape=coordinate](inCAP){}; \draw[thick] (0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (-2.5,0) node[pos=0.51, shape=coordinate](L){} node[pos=0.56, shape=coordinate](R){} node[pos=0.5, shape=coordinate](outCAP){}; \draw[thick, ->-=0.5] (-2.5,-1) -- (-2.5,0); \draw[thick, ->-=0.5] (-1.5,-1) -- (-1.5,0); \draw[color=blue, thick, dashed] (X) .. controls ++(1.5,-.2) and ++(0,-1) .. (.5,1.75); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,-.7) .. (-2,1) .. controls ++(0,.5) and ++(0,.5) .. (outCAP); \node at (-.25,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{alignat} $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} Using Lemma~\ref{lem:EE} together with the biadjoint structure, it follows that each of the equations above must hold up to a scalar multiple since each of the hom-spaces are non-zero and each map involved is adjoint to a crossing which is assumed to be non-zero. For example, $\blacktriangleright$egin{align*} &{\rm Hom}(1b_{\lambda+4}, {\mathbbm P}i \mathtt{E}^2 \mathtt{F}^2 1b_{\lambda+4} \lambdaangle -2(\lambda +3)\rangle) \\ &= {\rm Hom}(1b_{\lambda+4}(\mathtt{F}1b_{\lambda+4})^R, {\mathbbm P}i \mathtt{E}^2 1bl \lambdaangle -2(\lambda +2)\rangle) = {\rm Hom}(\mathtt{E}^21bl, {\mathbbm P}i \mathtt{E}^2 1bl \lambdaangle -2\rangle) {\rm co}ng \dot{\mathbb{B}}bbk. $\blacktriangleleft$nd{align*} To solve for the exact scalar we (pre)compose with a dot, or a dot and a crossing, and simplify using the odd nilHecke relations together with the dot cyclicity relations from Corollary~\ref{cor-dot-cyclicity}. For example, to prove the last equality in $\blacktriangleleft$qref{eq-half-crossing-cycl} assume that $\blacktriangleright$egin{equation} \lambdaanglebel{eq-c2} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick,->] (0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,0); \draw[thick] (0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,0) node[pos=0.5, shape=coordinate](inCAP){};; \draw[thick] (-0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (2.5,0) node[pos=0.51, shape=coordinate](L){} node[pos=0.56, shape=coordinate](R){} node[pos=0.5, shape=coordinate](outCAP){};; \draw[thick, ->-=0.5] (2.5,0) -- (2.5,-1); \draw[thick, ->-=0.5] (1.5,0) -- (1.5,-1); \draw[color=blue, thick, dashed] (X) .. controls ++(-1.5,0) and ++(0,-1) .. (-0.5,1.75); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,-.7) .. (0,1) .. controls ++(0,.5) and ++(0,.5) .. (outCAP); \node at (2.3,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \Bbbkappa \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick,->] (-0.5,0) .. controls ++(0,-0.5) and ++(0,0.5) .. (0.5,-1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,-0.5) and ++(0,0.5) .. (-0.5,-1); \draw[thick] (-0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (-1.5,0) node[pos=0.5, shape=coordinate](inCAP){}; \draw[thick] (0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (-2.5,0) node[pos=0.51, shape=coordinate](L){} node[pos=0.56, shape=coordinate](R){} node[pos=0.5, shape=coordinate](outCAP){}; \draw[thick, ->-=0.5] (-2.5,-1) -- (-2.5,0); \draw[thick, ->-=0.5] (-1.5,-1) -- (-1.5,0); \draw[color=blue, thick, dashed] (X) .. controls ++(1.5,-.2) and ++(0,-1) .. (.5,1.75); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,-.7) .. (-2,1) .. controls ++(0,.5) and ++(0,.5) .. (outCAP); \node at (-2.5,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} for some non-zero scalar $\Bbbkappa$. This implies \[ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick,<-] (2.5,-2) -- (2.5,-1); \draw[thick, <-] (1.5,-2) -- (1.5,-1); \draw[thick] (0.5,-2) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,-1) node[pos=1, shape=coordinate](DOT){} node[pos=0.5, shape=coordinate](Y){}; \draw[thick, ->] (-0.5,-2) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,-1); \draw[thick,->] (0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,-1) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,0); \draw[thick] (0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,0) node[pos=0.5, shape=coordinate](inCAP){};; \draw[thick] (-0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (2.5,0) node[pos=0.51, shape=coordinate](L){} node[pos=0.56, shape=coordinate](R){} node[pos=0.5, shape=coordinate](outCAP){};; \draw[thick] (2.5,0) -- (2.5,-1); \draw[thick] (1.5,0) -- (1.5,-1); \draw[color=blue, thick, dashed] (X) .. controls ++(-1.5,0) and ++(0,-1) .. (-0.5,1.75); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,-.7) .. (0,1) .. controls ++(0,.5) and ++(0,.5) .. (outCAP); \draw[thick, color=blue, dashed] (DOT) .. controls ++(-1.2,.5) and ++(0,-.5) .. (-1,1.75); \node at (2.3,1.5) {$\lambda$}; \node at (DOT){$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \Bbbkappa \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick,<-] (0.5,-2) -- (0.5,-1); \draw[thick, <-] (-0.5,-2) -- (-0.5,-1); \draw[thick] (-1.5,-2) .. controls ++(0,0.5) and ++(0,-0.5) .. (-2.5,-1) node[pos=1, shape=coordinate](DOT){} node[pos=0.5, shape=coordinate](Y){}; \draw[thick] (-2.5,-2) .. controls ++(0,0.5) and ++(0,-0.5) .. (-1.5,-1); \draw[thick] (-0.5,0) .. controls ++(0,-0.5) and ++(0,0.5) .. (0.5,-1) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (0.5,0) .. controls ++(0,-0.5) and ++(0,0.5) .. (-0.5,-1); \draw[thick] (-0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (-1.5,0) node[pos=0.5, shape=coordinate](inCAP){}; \draw[thick] (0.5,0) .. controls ++(0,1.5) and ++(0,1.5) .. (-2.5,0) node[pos=0.51, shape=coordinate](L){} node[pos=0.56, shape=coordinate](R){} node[pos=0.5, shape=coordinate](outCAP){}; \draw[thick, ->-=0.5] (-2.5,-1) -- (-2.5,0); \draw[thick, ->-=0.5] (-1.5,-1) -- (-1.5,0); \draw[color=blue, thick, dashed] (X) .. controls ++(1.5,-.2) and ++(0,-1) .. (.5,1.75); \draw[color=blue, thick, double distance=1pt, dashed] (inCAP) .. controls++(0,.7) and ++(0,-.7) .. (-2,1) .. controls ++(0,.5) and ++(0,.5) .. (outCAP); \draw[thick, color=blue, dashed] (DOT) .. controls ++(-1,.3) and ++(0,-.5) .. (-3.25,1.75); \node at (-2.5,1.5) {$\lambda$}; \node at (DOT){$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \] which after simplification implies that $\Bbbkappa = (-1)^{\lambda-1}$. $\blacktriangleleft$nd{proof} \subsubsection{Cyclicity for sideways crossings} \lambdaanglebel{subsec-half-sideways} We now fix the free scalar multiple in the definition of sideways crossings from $\blacktriangleleft$qref{eq:sideways}. Define sideways crossings using the adjoint structure $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (.5,0) .. controls ++(0,.75) and ++(0,-.75) .. (-.5,1.5); \draw[thick, ->] (.5,1.5) .. controls ++(0,-.75) and ++(0,.75)..(-.5,0) node[pos=.5, shape=coordinate](CROSSING){}; \draw[thick, color=blue, dashed] (CROSSING) to (0,-0); \node at (1,.75) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad := \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (-0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,1); \draw[thick] (0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (1.5,0); \draw[thick] (.5,2) -- (.5,1); \draw[thick, ->-=0.5] (1.5,2) -- (1.5,0); \draw[thick] (-0.5,1) .. controls ++(0,0.5) and ++(0,0.5) .. (-1.5,1); \draw[thick] (-.5,0) -- (-.5,-1); \draw[thick, ->-=0.5] (-1.5,1) -- (-1.5,-1); \draw[color=blue, thick, dashed] (X) .. controls ++(-1,.4) and ++(0,1).. (-1,-1); \node at (1.8,-0.8) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \qquad \qquad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (-.5,0) .. controls ++(0,.75) and ++(0,-.75) .. (.5,1.5); \draw[thick, ->] (-.5,1.5) .. controls ++(0,-.75) and ++(0,.75)..(.5,0) node[pos=.5, shape=coordinate](CROSSING){}; \draw[thick, color=blue, dashed] (CROSSING) to (0,1.5); \node at (1,.75) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad := \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.7] \draw[thick, ->] (0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,1); \draw[thick] (-0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (-1.5,0) node[pos=.48, shape=coordinate](RD){} node[pos=.48, shape=coordinate](LD){} node[pos=.5, shape=coordinate](bCROSS){}; \draw[thick] (-.5,2) -- (-.5,1); \draw[thick, ->-=0.5] (-1.5,2) -- (-1.5,0); \draw[thick] (0.5,1) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,1) node[pos=.5, shape=coordinate](tCROSS){};; \draw[thick] (.5,0) -- (.5,-1); \draw[thick, ->-=0.5] (1.5,1) -- (1.5,-1); \draw[color=blue, thick, dashed] (X) .. controls ++(-1.3,0) and ++(0,-1).. (-1,2); \draw[color=blue, thick, double distance=1pt, dashed] (bCROSS) .. controls ++(-.1,3) and ++(0.1,.75) ..(tCROSS); \node at (2.1,.5) {$\lambda$}; \node[blue] at (1.5,1.8) {$\scriptstyle \lambda-1$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} for all weights $\lambdaanglembda$. It follows immediately from these definitions that the following relations hold. $\blacktriangleright$egin{align} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0).. controls ++(-0,-0.5) and ++(0,0.5) ..(0.5,-1) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (0.5,0).. controls ++(0,-0.5) and ++(0,0.5) ..(-0.5,-1); \draw[thick] (-0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (-1.5,0) node[pos=.5, shape=coordinate](CUP){}; \draw[thick, ->-=0.5] (-1.5,-1) -- (-1.5,0); \draw[thick, ->] (.5,0) -- (.5,0.75); \draw[color=blue, thick, dashed] (X) -- (0,.75); \draw[color=blue, thick, double distance=1pt, dashed] (CUP) -- (-1,.75); \node at (1,-.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad &= \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->](-0.5,-1) .. controls ++(-0,0.5) and ++(0,-0.5) ..(0.5,-.25) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-0.5,-.25) .. controls ++(0,-0.5) and ++(0,0.5) .. (0.5,-1); \draw[thick] (0.5,-.25) .. controls ++(0,0.3) and ++(0,0.3) .. (1.5,-.25) node[pos=.5, shape=coordinate](CUP){}; \draw[thick, ->-=0.5] (1.5,-.25) -- (1.5,-1); \draw[thick,, ->] (-.5,-.25) to[out=90, in=-90] (0.5,0.75); \draw[color=blue, thick, dashed] (X) .. controls ++(-.5,0) and ++(0,-.5) .. (-.75,0) .. controls ++(0,.3) and ++(0,-.4) .. (0,.75); \draw[color=blue, thick, double distance=1pt, dashed] (CUP) .. controls ++(0,.3) and ++(0,-.6).. (-.75,.75); \node at (1.3,.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad & \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->](-0.5,1.25) .. controls ++(-0,-0.5) and ++(0,0.5) ..(0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,1.25); \draw[thick] (0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (1.5,0) node[pos=.5, shape=coordinate](CUP){}; \draw[thick, ->-=0.5] (1.5,0 ) -- (1.5,1.25); \draw[thick] (-.5,0) -- (-.5,-0.5); \draw[color=blue, thick, double distance=1pt, dashed] (CUP) -- (1,1.25); \draw[color=blue, thick, dashed] (X) to (0,1.25); \node at (0,-.25) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad &= \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (0.5,1.25) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,1.25); \draw[thick] (-0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (-1.5,0) node[pos=.5, shape=coordinate](CUP){}; \draw[thick, ->-=0.5] (-1.5,1.25) -- (-1.5,0); \draw[thick] (.5,0) -- (.5,-0.5); \draw[color=blue, thick, double distance=1pt, dashed] (CUP) .. controls ++(0,1.25) and ++(0,-.5) .. (0,1.25); \draw[color=blue, thick, dashed] (X) .. controls ++(-1.3,0) and ++(0,-.3).. (-1,1.25); \node at (1,.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,0).. controls ++(-0,-0.5) and ++(0,0.5) ..(-0.5,-1.25) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-0.5,0) .. controls ++(0,-0.5) and ++(0,0.5) .. (0.5,-1.25); \draw[thick] (0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (1.5,0) node[pos=.5, shape=coordinate](CUP){}; \draw[thick, ->-=0.5] (1.5,-1.25) -- (1.5,0); \draw[thick,, ->] (-.5,0) -- (-.5,0.5); \draw[color=blue, thick, dashed] (X) -- (0,-1.25); \node at (-1,-.5) {$\lambda+2$}; $\blacktriangleleft$nd{tikzpicture} } \quad &= \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,-1.25) .. controls ++(-0,0.5) and ++(0,-0.5) ..(-0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-0.5,-1.25).. controls ++(0,0.5) and ++(0,-0.5) ..(0.5,0); \draw[thick] (-0.5,0) .. controls ++(0,0.5) and ++(0,0.5) .. (-1.5,0) node[pos=.5, shape=coordinate](CUP){}; \draw[thick, ->] (-1.5,0) -- (-1.5,-1.25); \draw[thick, ->] (.5,0) -- (.5,0.5); \draw[color=blue, thick, dashed] (X) .. controls ++(-.3,.1) and ++(0,.5) .. (-1,-1.25); \node at (1,-.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad & \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, <-](-0.5,1.25) .. controls ++(-0,-0.5) and ++(0,0.5) ..(0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,1.25); \draw[thick] (0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (1.5,0) node[pos=.5, shape=coordinate](CUP){}; \draw[thick, ->-=0.5] (1.5,1.25) -- (1.5, 0); \draw[thick] (-.5,0) -- (-.5,-0.5); \draw[color=blue, thick, dashed] (X) .. controls ++(-.5,.3) and ++(0,.5)..(-1,-.5); \node at (1,.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad &= \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0.5,1.25) .. controls ++(-0,-0.5) and ++(0,0.5) ..(-0.5,0) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0).. controls ++(0,0.5) and ++(0,-0.5) ..(-0.5,1.25); \draw[thick] (-0.5,0) .. controls ++(0,-0.5) and ++(0,-0.5) .. (-1.5,0) node[pos=.5, shape=coordinate](CUP){}; \draw[thick, ->-=0.5] (-1.5,0) -- (-1.5,1.25); \draw[thick] (.5,0) -- (.5,-0.5); \draw[color=blue, thick, dashed] (X) -- (0,-.5); \node at (1,.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{align} Similar equations hold for a downward oriented line in the middle of a cap and cup. It is straightforward to derive directly from the definitions that the following equalities hold. $\blacktriangleright$egin{equation}\lambdaanglebel{eq:leftdotslide} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (.5,1.25).. controls ++(0,-.75) and ++(0,.75) .. (-.5,0) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, ->] (.5,0) .. controls ++(0,.75) and ++(0,-.75) .. (-.5,1.25); \draw[thick, color=blue, dashed] (DOT) to[out=-20, in=90] (-.2,0); \draw[thick, color=blue, dashed] (CROSSING) to[out=-90, in=90] (.2,0); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad-\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (.5,1.25).. controls ++(0,-.75) and ++(0,.75) .. (-.5,0) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, shape=coordinate](DOT){}; \draw[thick, ->] (.5,0) .. controls ++(0,.75) and ++(0,-.75) .. (-.5,1.25); \draw[thick, color=blue, dashed] (DOT) .. controls ++(.3,-.3) and ++(0,.4) .. (-.2,0); \draw[thick, color=blue, dashed] (CROSSING) to[out=-90, in=90] (.2,0); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (.5,1.25).. controls ++(0,-.75) and ++(0,.75) .. (-.5,0); \draw[thick, ->] (.5,0) .. controls ++(0,.75) and ++(0,-.75) .. (-.5,1.25) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.2, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) .. controls ++(-.1,.2) and ++(0,.4) .. (0,0); \draw[thick, color=blue, dashed] (CROSSING) to[out=-90, in=90] (-.25,0); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad- \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (.5,1.25).. controls ++(0,-.75) and ++(0,.75) .. (-.5,0); \draw[thick, ->] (.5,0) .. controls ++(0,.75) and ++(0,-.75) .. (-.5,1.25) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, color=blue, dashed] (DOT) .. controls ++(-1.3,.5) and ++(0,.4) .. (.2,0); \draw[thick, color=blue, dashed] (CROSSING) to[out=-90, in=90] (-.25,0); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (.5,1.25).. controls ++(0,-.6) and ++(0,-.6) .. (-.5,1.25); \draw[thick, ->] (.5,0) .. controls ++(0,.6) and ++(0,.6) .. (-.5,0); \draw[thick, color=blue, dashed] (.2,0) .. controls ++(0,.3) and ++(0,.3) .. (-.2,0); $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{equation}\lambdaanglebel{eq:rightdotslide} \hackcenter{$\blacktriangleright$egin{tikzpicture} $\blacktriangleright$egin{scope}[shift={(0,0)},rotate=180] \draw[thick, ->] (.5,1.25).. controls ++(0,-.75) and ++(0,.75) .. (-.5,0) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.75, shape=coordinate](DOT){}; \draw[thick, ->] (.5,0) .. controls ++(0,.75) and ++(0,-.75) .. (-.5,1.25); \draw[thick, color=blue, dashed] (DOT) to[out=-20, in=90] (-.2,0); \draw[thick, color=blue, dashed] (CROSSING) to[out=-90, in=90] (.2,0); \node() at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{scope} $\blacktriangleleft$nd{tikzpicture}} \quad-\quad \hackcenter{$\blacktriangleright$egin{tikzpicture} $\blacktriangleright$egin{scope}[shift={(0,0)},rotate=180] \draw[thick, ->] (.5,1.25).. controls ++(0,-.75) and ++(0,.75) .. (-.5,0) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, shape=coordinate](DOT){}; \draw[thick, ->] (.5,0) .. controls ++(0,.75) and ++(0,-.75) .. (-.5,1.25); \draw[thick, color=blue, dashed] (DOT) to[out=-20, in=90] (-.2,0); \draw[thick, color=blue, dashed] (CROSSING) to[out=-90, in=90] (.2,0); \node() at (DOT) {$\blacktriangleright$bullet};$\blacktriangleleft$nd{scope} $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-.5,1.5).. controls ++(0,-.8) and ++(0,-.8) .. (.5,1.5) node[pos=.5, shape=coordinate](tCUP){}; \draw[thick, ->] (-.5,0) .. controls ++(0,.4) and ++(0,.4) .. (.5,0) node[pos=.5, shape=coordinate](bCAP){}; \draw[color=blue, thick, double distance=1pt, dashed] (tCUP) .. controls ++(0,.4) and ++(0,.4)..(.5,.7) .. controls ++(0,-.2) and ++(0,.3) .. (bCAP) ; \draw[thick, color=blue, dashed] (.2,1.5) .. controls ++(0,-.25) and ++(0,-.25) .. (-.2,1.5); $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} Note that the other version of $\blacktriangleleft$qref{eq:rightdotslide} is more complicated and the formula for sliding a dot on the downward oriented strand has additional terms. \section{Proof of the extended ${\mathfrak{sl}}_2$ relations}\lambdaanglebel{sec:proofsl2} In this section we show that the formal inverses of the $\mathfrak{sl}_2$ isomorphisms defined in Corollary ~\ref{cor:1} agree with the version of the inverses given by the 2-category $U_qc$. \subsection{Fake bubbles} \lambdaanglebel{subsec:fake-bubbles} We introduce a shorthand notation for representing certain 2-morphisms in $U_qc$. These 2-morphisms are defined inductively by an equation analogous to the equation \[ \sum_{r=0}^m (-1)^re_r h_{m-r} = \partialta_{m,r} \] relating elementary and complete symmetric functions. \subsubsection{The case of $\lambdaanglembda >0$} For all $\lambda>0$ the equation $\blacktriangleright$egin{equation} \lambdaanglebel{eq:fake-bubble-p} \sum_{j=0}^{m} (-1)^j \xy (0,0)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (1.5,0) {$B_j$}; \draw[color=blue, thick, double distance=1pt, dashed] (Fj) to (1.5,1.25); \node[blue] at (1.8,0.8){$\scriptstyle j$\;}; \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.4,1.2){$\scriptstyle m-j$\;}; \node at (Y) {$\blacktriangleright$bullet};\node at (Z) {$\blacktriangleright$bullet}; \node at (2.3,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \;\; = \;\; (-1)^m \partialta_{m,0}1b_{1bl} $\blacktriangleleft$nd{equation} inductively defines 2-morphisms $B_j {\rm co}lon 1bl \rightarrow {\mathbbm P}i^j 1bl \lambdaangle 2j\rangle$ for $0 \lambdaeq j \lambdaeq m$. It is clear that this definition is independent of the weight $\lambda$ for all $\lambdaanglembda>0$. $\blacktriangleright$egin{example} Several examples are given below. $\blacktriangleright$egin{align*} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-0,0) {$B_0$}; \node at (.7,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } & \;\; =\;\; 1b_{1bl} \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-0,0) {$B_1$}; \draw[color=blue, thick, dashed] (Fj) to (-0,1.25); \node[blue] at (.3,0.8){}; \node at (.7,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } & \;\; =\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.4,0) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, dashed] (Z) to[bend left] (-1,1) ; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (.8,-.25) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-0,0) {$B_2$}; \draw[color=blue, thick, dashed, double distance=1pt,] (Fj) to (-0,1.25); \node[blue] at (.3,0.8){$\scriptstyle 2$\;}; \node at (.7,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } & \;\; =\;\; \; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.4,0) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, dashed] (Z) to[bend left] (-1,1) ; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.4,0) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, dashed] (Z) to[bend left] (-1,1) ; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (.8,-.25) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \;\; - \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.4,0) .. controls ++(-0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt,dashed] (Z) to[bend left] (-1,1) ; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (.8,-.25) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{align*} $\blacktriangleleft$nd{example} \subsubsection{The case of $\lambdaanglembda <0$} For $\lambda<0$ inductively define 2-morphisms $\overline{B_j} {\rm co}lon 1bl \rightarrow {\mathbbm P}i^j 1bl\lambdaangle 2j\rangle$ by the equation $\blacktriangleright$egin{equation} \lambdaanglebel{eq:fake-bubble-n} \sum_{j=0}^{m} (-1)^{j}\;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (2,0) {$\overline{B_j}$}; \draw[color=blue, thick, double distance=1pt, dashed] (Fj) to (2,1.25); \node[blue] at (2.3,0.8){$\scriptstyle j$\;}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (1,1.25) ; \node[blue] at (.4,0.9){$\scriptstyle m-j$\;}; \node at (Y) {$\blacktriangleright$bullet};\node at (X) {$\blacktriangleright$bullet}; \node at (-1,-.3) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \;\; = \;\; \partialta_{m,0}1b_{1bl}. $\blacktriangleleft$nd{equation} Again it is clear that these 2-morphisms are independent of the weight $\lambda$ for all $\lambda<0$, except for the case when $\lambda=-1$. In this case, the definition of the 2-morphisms $\overline{B_j}$ depends on the free parameter $c_{-1}$ from $\blacktriangleleft$qref{eq_defcmone} corresponding to the degree zero bubble. $\blacktriangleright$egin{example} Several examples for $\lambda<-1$ are given below. $\blacktriangleright$egin{align*} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.8] \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-0,0) {$\overline{B_0}$}; \node at (.7,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } & \;\; =\;\; 1b_{1bl} \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-0,0) {$\overline{B_1}$}; \draw[color=blue, thick, dashed] (Fj) to (-0,1.25); \node[blue] at (.3,0.8){}; \node at (.7,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } & \;\; =\;\; \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0.4,0) .. controls ++(0,0.6) and ++(0,0.6) .. (-0.4,0) node[pos=0.05, shape=coordinate](Z){}; \draw[thick] (0.4,0) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (-0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.5) and ++(-.2,.3) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, dashed] (Z) .. controls ++(-1,.4) and ++(.1,-1) .. (1,1) ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (1.5,.3) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-0,0) {$\overline{B_2}$}; \draw[color=blue, thick, dashed, double distance=1pt,] (Fj) to (-0,1.25); \node[blue] at (.3,0.8){$\scriptstyle 2$\;}; \node at (.7,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } & \;\; =\;\; \; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0.4,0) .. controls ++(0,0.6) and ++(0,0.6) .. (-0.4,0) node[pos=0.05, shape=coordinate](Z){}; \draw[thick] (0.4,0) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (-0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.5) and ++(-.2,.3) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, dashed] (Z) .. controls ++(-1,.4) and ++(.1,-1) .. (1,1) ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0.4,0) .. controls ++(0,0.6) and ++(0,0.6) .. (-0.4,0) node[pos=0.05, shape=coordinate](Z){}; \draw[thick] (0.4,0) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (-0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.5) and ++(-.2,.3) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, dashed] (Z) .. controls ++(-1,.4) and ++(.1,-1) .. (1,1) ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (1.5,.3) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \;\; - \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0.4,0) .. controls ++(0,0.6) and ++(0,0.6) .. (-0.4,0) node[pos=0.05, shape=coordinate](Z){}; \draw[thick] (0.4,0) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (-0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.5) and ++(-.2,.3) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, dashed,double distance=1pt,] (Z) .. controls ++(-1,.4) and ++(.1,-1) .. (1,1) node[pos=0.9, left] {$\scriptstyle 2$} ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (1.5,.3) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{align*} $\blacktriangleleft$nd{example} \subsubsection{The case of $\lambdaanglembda =0$} By Lemma~\ref{lem:E} the degree zero curls in weight $\lambda=0$ must be scalar multiples of the identity map. We define parameters by the equations $\blacktriangleright$egin{equation} \lambdaanglebel{eq:c0} \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (0.5,1) -- (0.5,1.5); \draw[thick] (0.5,-.5) -- (0.5,0); \draw[thick] (-1.5,0) -- (-1.5,1); \draw[thick,->] (0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,1); \draw[thick, ->] (-0.5,1) .. controls ++(0,0.6) and ++(0,0.6) .. (-1.5,1); \draw[thick, ->] (-1.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (-0.5,0) node[pos=0.5, shape=coordinate](CUP){}; \draw[color=blue, thick, dashed] (X) .. controls ++(-1.2,0) and ++(0,.9) ..(CUP); \node at (0,-0.25) {$0$}; \node at (1,-0.25) {$+2$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad c^-_0 \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick,->] (0,-.5) -- (0,1.5); \node at (-.5,-0.25) {$0$}; \node at (.5,-0.25) {$+2$}; $\blacktriangleleft$nd{tikzpicture}}, \qquad \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick] (-0.5,1) -- (-0.5,1.5); \draw[thick] (-0.5,-.5) -- (-0.5,0); \draw[thick] (1.5,0) -- (1.5,1); \draw[thick,->] (-0.5,0) .. controls ++(-0,0.5) and ++(0,-0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,1); \draw[thick, ->] (0.5,1) .. controls ++(0,0.6) and ++(0,0.6) .. (1.5,1) node[pos=0.5, shape=coordinate](CUP){}; \draw[thick, ->] (1.5,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,0); \draw[color=blue, thick, dashed] (X) .. controls ++(-2,.3) and ++(.1,.7) ..(CUP); \node at (0,-0.25) {$0$}; \node at (-1,-0.25) {$+2$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad -c^+_0 \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} [scale=0.8] \draw[thick,->] (0,-.5) -- (0,1.5); \node at (-.5,-0.25) {$-2$}; \node at (.5,-0.25) {$0$}; $\blacktriangleleft$nd{tikzpicture}}, $\blacktriangleleft$nd{equation} for some coefficients $c_0^+$ and $c_0^-$. It is convenient to introduce fake bubbles in weight $\lambda=0$ defined by setting $\blacktriangleright$egin{align} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle -1$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (1,.75) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } &\;\; = \;\; c_0^+1b_{1b_{0}} & \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -1$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (1,.65) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy &\;\; = \;\; c_0^-1b_{1b_{0}}. $\blacktriangleleft$nd{align} \subsubsection{Notation for all $\lambdaanglembda$} We refer to the maps $B_j$ for $0 \lambdaeq j \lambdaeq \lambda-1$ and $\overline{B_j}$ for $0 \lambdaeq j \lambdaeq -\lambda-1$ as {$\blacktriangleleft$m odd fake bubbles} because they are analogues of fake bubbles from the even case. It is convenient to introduce a different notation for these odd fake bubbles. This new notation makes it possible to express various equations in a uniform manner independent of whether the weight $\lambda$ is positive or negative. We write \[ $\blacktriangleright$egin{array}{ccc} \lambda>0 & \qquad \qquad &\lambda<0 \\ \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (1,1) ; \node[blue] at (1.3,0.8){$\scriptstyle j$\;}; \draw[line width=0mm] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.2]{$\blacktriangleright$bullet}; \draw[line width=0mm] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0.0]{$\blacktriangleright$bullet}; \node at (-1,.3) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} } \quad := \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-0,0) {$B_j$}; \draw[color=blue, thick, double distance=1pt, dashed] (Fj) to (0,1.25); \node at (.7,.5) {$\lambda$}; \node[blue] at (.3,1.1){$\scriptstyle j$\;}; $\blacktriangleleft$nd{tikzpicture} } & \qquad \qquad & \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.65,1.2){$\scriptstyle j$\;}; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \draw[line width=0mm] (-0.5,0) (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node at (1.2,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad := \quad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-0,0) {$\overline{B_j}$}; \draw[color=blue, thick, double distance=1pt, dashed] (Fj) to (0,1.25); \node at (.7,.5) {$\lambda$}; \node[blue] at (.3,1.1){$\scriptstyle j$\;}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{array} \] for all $0 \lambdaeq j\lambdaeq |\lambda|-1$. The drawback of this notation is that it appears to involve a negative number of dots. We never allow negative dots in diagrams involving 2-morphisms. Whenever a negative number of dots is encountered in a dotted bubble diagram the dotted bubble is interpreted as an odd fake bubble defined above. The advantage of introducing this notation for odd fake bubbles is that equations $\blacktriangleleft$qref{eq:fake-bubble-p} and $\blacktriangleleft$qref{eq:fake-bubble-n} can be expressed as: $\blacktriangleright$egin{align}\lambdaanglebel{eq:fake-bubble-last} \sum_{f+g=m} (-1)^{g} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.2){$\scriptstyle f$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture} } \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (1,1.25) ; \node[blue] at (1.3,0.9){$\scriptstyle g$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (-.5,1.1) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy\;\; &\;\; = \;\; \partialta_{m,0}1b_{1bl} &\text{for $\lambda >0$,} \\ \sum_{f+g=m} (-1)^{g} \xy (0,-2)*{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (0.5,0) .. controls (0.5,0.8) and (-0.5,0.8) .. (-0.5,0) node[pos=0, shape=coordinate](Z){}; \draw[thick] (0.5,0) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.7) and ++(-.2,.4) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-1,.7) and ++(.1,-1) .. (1,1.25) ; \node[blue] at (1.3,1.1){$\scriptstyle f$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node at (-.25,1.1) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.5,0) .. controls (-0.5,-0.8) and (0.5,-0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) to[out=180, in=90] (-1,1.25) ; \node[blue] at (-.6,1.2){$\scriptstyle g$\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture} } &\;\; = \;\; \partialta_{m,0}1b_{1bl} & \text{for $\lambda<0$,} $\blacktriangleleft$nd{align} for $0 \lambdaeq m \lambdaeq |\lambda|-1$. The relations in the 2-category $U_qc$ are also conveniently expressed in terms of fake bubbles. \subsection{A general form for the inverse map} By Corollary~\ref{cor:1} the map $\blacktriangleright$egin{equation} \zeta\;\;:=\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[semithick, <-] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[semithick, ->] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[color=blue, thick, dashed] (X) to (0,0); $\blacktriangleleft$nd{tikzpicture}}\;\; $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->-=0.15, ->] (0.5,.2) .. controls (0.6,-0.8) and (-0.6,-0.8) .. (-0.5,.2) node[pos=0.85, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (Y) .. controls++(-.5,.2) and ++(0,.4) .. (-1,-1) node[pos=0.75,left]{$\scriptstyle k$}; \draw[line width=0mm] (0.5,.2) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,.2) node[pos=0.85]{\tikz \draw[fill=black] circle (0.4ex);}; $\blacktriangleleft$nd{tikzpicture} }:\mathtt{F}{\mathbbm P}i \mathtt{E} 1bl $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1} {\mathbbm P}i^k1bl \lambdaangle \lambda-1-2k \rangle \rightarrow \mathtt{E} \mathtt{F} 1bl $\blacktriangleleft$nd{equation} is invertible. We describe its inverse $\zeta^{-1}$ diagrammatically as follows: $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.55] \draw[thick, ->-=0.12, ->-=0.95] (-0.6,-0.1) .. controls ++(-0,0.75) and ++(0,-0.75) .. (0.6,2.1); \draw[thick, ->-=0.12, ->-=0.95] (-0.6,2.1).. controls ++(0,-0.75) and ++(0,0.75) ..(0.6,-0.1); \draw[color=blue, thick, dashed] (0,1) -- (0,2.1) ; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (0,1) {$\zeta(\lambda)$}; $\blacktriangleleft$nd{tikzpicture} }\;\; $\blacktriangleright$igoplus_{k=0}^{\lambda-1} \lambdaeft( \quad\;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.55] \draw[thick, ->-=0.12, ->-=0.95] (-0.6,-0.1) .. controls ++(-0.1,1.3) and ++(0.1,1.3) .. (0.6,-0.1); \draw[color=blue, thick, dashed, double distance=1pt] (0,1) -- (0,2.4) ; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (0,1) {$\zeta(\lambda-1-k)$}; \node[blue] at (.3,2.1) {$\scriptstyle k$}; $\blacktriangleleft$nd{tikzpicture} } \;\; \;\;\right) : \mathtt{E} \mathtt{F} 1bl \rightarrow\mathtt{F}{\mathbbm P}i \mathtt{E} 1bl $\blacktriangleright$igoplus_{k=0}^{\lambdaanglembda-1} {\mathbbm P}i^k1bl \lambdaangle \lambda-1-2k \rangle . $\blacktriangleleft$nd{equation} Likewise, for $\lambda \lambdaeq 0$ the inverse of $\blacktriangleright$egin{equation} \zeta\;\;:=\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[semithick, ->] (-0.5,0) .. controls (-0.5,0.5) and (0.5,0.5) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[semithick, <-] (0.5,0) .. controls (0.5,0.5) and (-0.5,0.5) .. (-0.5,1); \draw[color=blue, thick, dashed] (X) .. controls ++(.1,.5) and ++(0,.5) .. (-.5,.5) .. controls ++(0,-.3) and ++(0,.3) .. (0,0); $\blacktriangleleft$nd{tikzpicture}} \;\; $\blacktriangleright$igoplus_{k=0}^{-\lambda-1} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.6] \draw[thick, ->-=0.15, ->] (-0.7,.5) .. controls ++(-.1,-1) and ++(.1,-1) .. (0.7,.5) node[pos=0.85, shape=coordinate](Y){} node[pos=0.55, shape=coordinate](M){} node[pos=0.44, shape=coordinate](X){}; \draw[color=blue, thick, double distance=1pt, dashed] (Y) .. controls++(-.5,.3) and ++(0,.5) .. (M) node[pos=0.15,above]{$\scriptstyle k$}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls ++(0,.55) and ++(0,.55) .. (-.6,-.25) .. controls ++(0,-.3) and ++(0,.4) ..(0,-1); \node at (Y){\tikz \draw[fill=black] circle (0.4ex);}; $\blacktriangleleft$nd{tikzpicture} }:\mathtt{E}{\mathbbm P}i\mathtt{F} 1bl $\blacktriangleright$igoplus_{k=0}^{-\lambda-1}{\mathbbm P}i^{\lambda+1+k} 1bl \lambdaangle -\lambda-1-2k \rangle \rightarrow \mathtt{F} \mathtt{E} 1bl $\blacktriangleleft$nd{equation} can be expressed as $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, ->-=0.12, ->-=0.95] (0.6,-0.1) .. controls ++(0,0.75) and ++(0,-0.75) .. (-0.6,2.1); \draw[thick, ->-=0.12, ->-=0.95] (0.6,2.1).. controls ++(0,-0.75) and ++(0,0.75) ..(-0.6,-0.1); \draw[color=blue, thick, dashed] (0,1) -- (0,2.1) ; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (0,1) {$\zeta(\lambda)$}; $\blacktriangleleft$nd{tikzpicture} }\;\; $\blacktriangleright$igoplus_{k=0}^{-\lambda-1} \lambdaeft( \quad\;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, ->-=0.12, ->-=0.95] (0.6,-0.1) .. controls ++(0.1,1.3) and ++(-0.1,1.3) .. (-0.6,-0.1); \draw[color=blue, thick, dashed, double distance=1pt] (0,1) -- (0,2.4) ; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (0,1) {$\zeta(-\lambda-1-k)$}; \node[blue] at (1.1,2.1) {$\scriptstyle \lambda+1+k$}; $\blacktriangleleft$nd{tikzpicture} } \;\; \;\;\right) : \mathtt{F} \mathtt{E} 1bl \rightarrow\mathtt{E}{\mathbbm P}i\mathtt{F} 1bl $\blacktriangleright$igoplus_{k=0}^{-\lambda-1}{\mathbbm P}i^{\lambda+1+k} 1bl \lambdaangle -\lambda-1-2k \rangle . $\blacktriangleleft$nd{equation} Condition (3) of Definition~\ref{def_strong} only requires the {\it{existence}} of isomorphisms between the two 1-morphisms on either side. However, the space of 2-morphisms between a pair of 1-morphisms in $\mathcal{C}$ could contain maps that cannot be expressed using 2-morphisms from the strong supercategorical action of $\mathfrak{sl}_2$, i.e. using dots, crossings, caps, and cups. In the next proposition we show that this is not the case for the 2-morphisms giving the isomorphism $\zeta^{-1}$. $\blacktriangleright$egin{prop} \lambdaanglebel{prop_form-of-inv} The isomorphism $\zeta^{-1}$ for $\lambda \mathfrak{g}eq 0$ has the form $\blacktriangleright$egin{equation} \lambdaanglebel{eq_phi-inverses-U} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, ->-=0.12, ->-=0.95] (-0.6,-0.1) .. controls ++(-0,0.75) and ++(0,-0.75) .. (0.6,2.1); \draw[thick, ->-=0.12, ->-=0.95] (-0.6,2.1).. controls ++(0,-0.75) and ++(0,0.75) ..(0.6,-0.1); \draw[color=blue, thick, dashed] (0,1) -- (0,2.1); \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (0,1) {$\zeta(\lambda)$}; \node at (1.3,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \;\;=\;\; $\blacktriangleright$eta_{\lambda} \; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, ->-=0.12, ->-=0.95] (-0.6,-0.1) .. controls ++(-0,0.75) and ++(0,-0.75) .. (0.6,2.1); \draw[thick, ->-=0.12, ->-=0.95] (-0.6,2.1).. controls ++(0,-0.75) and ++(0,0.75) ..(0.6,-0.1); \draw[color=blue, thick, dashed] (0,1) -- (0,2.1) ; \node at (1,1) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} }\;\; \qquad \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, ->-=0.12, ->-=0.95] (-0.6,-0.1) .. controls ++(-0.1,1.3) and ++(0.1,1.3) .. (0.6,-0.1); \draw[color=blue, thick, dashed, double distance=1pt] (0,1) -- (0,2.4) ; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (0,1) {$\zeta(\lambda-1-k)$}; \node[blue] at (.3,2.1) {$\scriptstyle k$}; \node at (2.3,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \;\; = \;\; \sum_{j=0}^{\lambda-1-k} (-1)^j \xy (0,0)*{ $\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](DOT){} node[pos=0.42, shape=coordinate](L){} node[pos=0.5, shape=coordinate](M){} node[pos=0.58, shape=coordinate](R){}; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (1.25,0.65) {$\scriptstyle B_j$}; \draw[color=blue, thick, double distance=1pt, dashed] (Fj) to [out=90, in=90] (R); \draw[color=blue, thick, double distance=1pt, dashed] (M) to (0,1.5); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.65,0) and ++(-.25,.3) .. (L); \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node[blue] at (-.3,1.5){$\scriptstyle k$}; \node[blue] at (1.15,1.4){$\scriptstyle j$}; \node[blue] at (-1.45,.20){$\scriptstyle \lambda-1 -k-j$}; \node at (-1,1.2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy $\blacktriangleleft$nd{equation} for some $\lambdaanglembda$-dependent coefficients $$\blacktriangleright$eta_{\lambdaanglembda} \in \dot{\mathbb{B}}bbk^{\times}$. For $$\blacktriangleleft$ll \lambdaeq 0$ the isomorphism $\zeta^{-1}$ has the form $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, ->-=0.12, ->-=0.95] (0.6,-0.1) .. controls ++(0,0.75) and ++(0,-0.75) .. (-0.6,2.1); \draw[thick, ->-=0.12, ->-=0.95] (0.6,2.1).. controls ++(0,-0.75) and ++(0,0.75) ..(-0.6,-0.1); \draw[color=blue, thick, dashed] (0,1) -- (0,2.1) ; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (0,1) {$\zeta(\lambda)$}; $\blacktriangleleft$nd{tikzpicture} } \;\;=\;\; $\blacktriangleright$eta_{\lambda} \; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[color=blue, thick, dashed] (0,1) .. controls ++(0,-.3) and ++(0,.3) .. (-.6,.4) .. controls ++(.1,-.4) and ++(.1,-.4) .. (X); $\blacktriangleleft$nd{tikzpicture} } \;\; \qquad \qquad \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, ->-=0.12, ->-=0.95] (0.6,-0.1) .. controls ++(0.1,1.3) and ++(-0.1,1.3) .. (-0.6,-0.1); \draw[color=blue, thick, dashed, double distance=1pt] (0,1) -- (0,2.4) ; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (0,1) {$\zeta(-\lambda-1-k)$}; \node[blue] at (.3,2.1) {$\scriptstyle k$}; \node at (2.3,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \;\; = \;\; \sum_{j=0}^{-\lambda-1-k}(-1)^{j} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick,->-=0.8] (0.5,.25) -- (0.5,.5); \draw[thick,->-=0.55] (-0.5,.5) -- (-0.5,.25); \draw[thick] (0.5,.5) .. controls ++(.1,.8) and ++(-.1,.8) .. (-0.5,.5) node[pos=0.1, shape=coordinate](DOT){}; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (1.5,0.75) {$\scriptstyle \overline{B_j}$}; \draw[color=blue, thick, double distance=1pt, dashed] (Fj) .. controls ++(0,.4) and ++(0,-.6) .. (1.25,1.75); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.5,.4) and ++(0,-1) .. (-.75,1.75); \node at (DOT){$\blacktriangleright$bullet}; \node[blue] at (1.5,1.6){$\scriptstyle j$}; \node[blue] at (0,1.60){$\scriptstyle \lambda-1 -k-j$}; \node at (-1,.7) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{equation} for $$\blacktriangleright$eta_{\lambdaanglembda} \in \dot{\mathbb{B}}bbk^{\times}$. $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} The first equation in $\blacktriangleleft$qref{eq_phi-inverses-U} follows immediately from Lemma~\ref{lem:homs}. For the second claim take adjoints in Lemma~\ref{lem:main} equation $\blacktriangleleft$qref{eq:main1} so that $\blacktriangleright$egin{equation} \lambdaanglebel{eq_phi-inverses} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.55] \draw[thick, ->-=0.12, ->-=0.95] (-0.6,-0.1) .. controls ++(-0.1,1.3) and ++(0.1,1.3) .. (0.6,-0.1); \draw[color=blue, thick, dashed, double distance=1pt] (0,1) -- (0,2.4) ; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (0,1) {$\zeta(\lambda-1-k)$}; \node[blue] at (.3,2.1) {$\scriptstyle k$}; \node at (2.3,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \;\; = \;\; \sum_{j=0}^{\lambda-1-k} \xy (0,3)*{ $\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](DOT){} node[pos=0.42, shape=coordinate](L){} node[pos=0.5, shape=coordinate](M){} node[pos=0.58, shape=coordinate](R){}; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (-1.75,0.75) {$\scriptstyle f_j'(\lambda -1 -k)$}; \draw[color=blue, thick, double distance=1pt, dashed] (Fj) to [out=90, in=90] (M); \draw[color=blue, thick, double distance=1pt, dashed] (R) to[bend right] (.5,1.5); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.65,0) and ++(-.25,.3) .. (L); \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node[blue] at (.7,1.5){$\scriptstyle k$}; \node[blue] at (-1.55,1.6){$\scriptstyle j$}; \node[blue] at (-1.45,.20){$\scriptstyle \lambda-1 -k-j$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \;\; :\mathtt{E}\mathtt{F} 1bl \rightarrow 1bl \lambdaangle $\blacktriangleleft$ll-1-2k \rangle $\blacktriangleleft$nd{equation} for some 2-morphisms $f_j'(\lambda-1-k) \in {\rm Hom}_{U_qc}(1bl,{\mathbbm P}i^k1bl\lambdaangle 2j \rangle)$. After absorbing additional scalars we can rewrite this as $\blacktriangleright$egin{equation} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, ->-=0.12, ->-=0.95] (-0.6,-0.1) .. controls ++(-0.1,1.3) and ++(0.1,1.3) .. (0.6,-0.1); \draw[color=blue, thick, dashed, double distance=1pt] (0,1) -- (0,2.4) ; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (0,1) {$\zeta(\lambda-1-k)$}; \node[blue] at (.3,2.1) {$\scriptstyle k$}; \node at (2.3,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \;\; = \;\; \sum_{j=0}^{\lambda-1-k} (-1)^{j} \xy (0,3)*{ $\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](DOT){} node[pos=0.42, shape=coordinate](L){} node[pos=0.5, shape=coordinate](M){} node[pos=0.58, shape=coordinate](R){}; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (1.55,0.5) {$\scriptstyle f_j(\lambda -1 -k)$}; \draw[color=blue, thick, double distance=1pt, dashed] (Fj) to [out=90, in=80] (R); \draw[color=blue, thick, double distance=1pt, dashed] (M) to (0,1.5); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.65,0) and ++(-.25,.3) .. (L); \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node[blue] at (-.35,1.4){$\scriptstyle k$}; \node[blue] at (1.35,1.4){$\scriptstyle j$}; \node[blue] at (-1.45,.20){$\scriptstyle \lambda-1 -k-j$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy $\blacktriangleleft$nd{equation} for some 2-morphisms $f_j(\lambda-1-k) \in {\rm Hom}_{U_qc}(1bl,{\mathbbm P}i^k1bl\lambdaangle 2j \rangle)$. The component of $\zeta^{-1} \zeta$ mapping the summand ${\mathbbm P}i^{$\blacktriangleleft$ll}1bl\lambdaangle $\blacktriangleleft$ll-1-2$\blacktriangleleft$ll\rangle$ to the summand ${\mathbbm P}i^k1bl\lambdaangle \lambda-1-2k\rangle$ is given by the composite $\blacktriangleright$egin{equation} \xy (-45,0)*+{{\mathbbm P}i^{$\blacktriangleleft$ll}1bl\lambdaangle \lambda-1-2$\blacktriangleleft$ll\rangle}="1"; (0,0)*+{\mathtt{E}\mathtt{F}1bl}="2"; (75,0)*+{{\mathbbm P}i^{k}1bl\lambdaangle \lambda-1-2k \rangle .}="3"; {\ar^{ \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.9] \draw[thick, ->-=0.15, ->] (0.5,.2) .. controls (0.6,-0.8) and (-0.6,-0.8) .. (-0.5,.2) node[pos=0.85, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (Y) .. controls++(-.5,.2) and ++(0,.4) .. (-1,-1) node[pos=0.75,left]{$\scriptstyle $\blacktriangleleft$ll$}; \draw[line width=0mm] (0.5,.2) .. controls (0.5,-0.8) and (-0.5,-0.8) .. (-0.5,.2) node[pos=0.85]{\tikz \draw[fill=black] circle (0.4ex);}; $\blacktriangleleft$nd{tikzpicture} } } "1";"2"}; {\ar^-{ \xy (0,3)*{ $\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](DOT){} node[pos=0.42, shape=coordinate](L){} node[pos=0.5, shape=coordinate](M){} node[pos=0.58, shape=coordinate](R){}; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (1.55,0.5) {$\scriptstyle f_j(\lambda -1 -k)$}; \draw[color=blue, thick, double distance=1pt, dashed] (Fj) to [out=90, in=80] (R); \draw[color=blue, thick, double distance=1pt, dashed] (M) to (0,1.5); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.65,0) and ++(-.25,.3) .. (L); \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \node[blue] at (-.35,1.4){$\scriptstyle k$}; \node[blue] at (1.35,1.4){$\scriptstyle j$}; \node[blue] at (-1.45,.20){$\scriptstyle \lambda-1 -k-j$}; \node at (-1.5,.80){$\sum_j$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy } "2";"3"}; $\blacktriangleleft$ndxy $\blacktriangleleft$nd{equation} The condition $\zeta^{-1} \zeta = 1b$ implies that this composite must equal $\partialta_{$\blacktriangleleft$ll,k} 1b_{{\mathbbm P}i^k 1bl \lambdaangle \lambda-1-2k\rangle}$. Assume that $$\blacktriangleleft$ll \mathfrak{g}eq k+j \mathfrak{g}eq k$ or else the composite will contain a negative degree bubble which always equal to zero. Then closing off $k$ of the dashed lines and bending up the remaining $$\blacktriangleleft$ll-k$ dashed lines coming out of the first diagram we get \[ \sum_{j=0}^{\lambda-1-k} (-1)^j \xy (0,3)*{ $\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](DOT){} node[pos=0.41, shape=coordinate](L){} node[pos=0.5, shape=coordinate](M){} node[pos=0.58, shape=coordinate](R){}; \draw[thick] (-0.5,0) .. controls (-0.5,-1) and (0.5,-1) .. (0.5,0) node[pos=0.05, shape=coordinate](mDOT){} node[pos=0.2, shape=coordinate](lDOT){}; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (1.75,0.5) {$\scriptstyle f_j(\lambda -1 -k)$}; \draw[color=blue, thick, double distance=1pt, dashed] (Fj) to [out=90, in=70] (R); \draw[color=blue, thick, double distance=1pt, dashed] (mDOT) .. controls++(-1.5,.3) and ++(-0.1,1) .. (M); \draw[color=blue, thick, double distance=1pt, dashed] (lDOT) .. controls++(-1.5,.3) and ++(-0,-1) .. (-1,1.75); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.65,0) and ++(-.25,.3) .. (L); \draw[line width=0mm] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1]{$\blacktriangleright$bullet}; \draw[line width=0mm] (-0.5,0) .. controls (-0.5,-1) and (0.5,-1) .. (0.5,0) node[pos=0.05]{$\blacktriangleright$bullet} node[pos=0.2]{$\blacktriangleright$bullet}; \node[blue] at (-.1,1.1){$\scriptstyle k$}; \node[blue] at (1.75,1.3){$\scriptstyle j$}; \node[blue] at (-.65,1.6){$\scriptstyle $\blacktriangleleft$ll-k$}; \node at (1.3,-.25){$\lambda$}; $\blacktriangleleft$nd{tikzpicture} }; $\blacktriangleleft$ndxy \quad = \quad \partialta_{$\blacktriangleleft$ll,k} 1b_{1bl \lambdaangle \lambda-1-2k\rangle}. \] By varying $$\blacktriangleleft$ll$ for fixed $k$, it is possible to rewrite each of the 2-morphisms $f_j($\blacktriangleleft$ll-1-k)$ as products of bubbles. For example, by setting $$\blacktriangleleft$ll=k$ it follows that the degree zero 2-morphism $f_{0}($\blacktriangleleft$ll-1-k)$ is multiplication by the scalar $1$. Continuing by induction, decreasing $$\blacktriangleleft$ll$ shows that all the $f_j($\blacktriangleleft$ll-1-k)$ can be rewritten as a linear combination of 2-morphisms in the image of the generating 2-morphisms of the 2-category $U_qc$. Notice that the above diagram contains a negative degree bubble if $j>($\blacktriangleleft$ll-k)$, so we can restrict the sum for $0 \lambdaeq j \lambdaeq ($\blacktriangleleft$ll-k)$. Comparing this equation to the defining equations $\blacktriangleleft$qref{eq:fake-bubble-last} for the fake bubbles we must have $f_j(\lambda-1-k) = B_j$ for $0 \lambdaeq $\blacktriangleleft$ll, k \lambdaeq \lambda-1$. For $\lambda \lambdaeq 0$ the adjoint of Lemma~\ref{lem:main} equation $\blacktriangleleft$qref{eq:main2} implies $\blacktriangleright$egin{equation} \hackcenter{ $\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, ->-=0.12, ->-=0.95] (0.6,-0.1) .. controls ++(0.1,1.3) and ++(-0.1,1.3) .. (-0.6,-0.1); \draw[color=blue, thick, dashed, double distance=1pt] (0,1) -- (0,2.4) ; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (fi) at (0,1) {$\zeta(-\lambda-1-k)$}; \node[blue] at (.3,2.1) {$\scriptstyle k$}; \node at (2.3,0) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad = \quad \sum_{j=0}^{-\lambda-1-k} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick,->-=0.8] (0.5,.25) -- (0.5,.5); \draw[thick,->-=0.55] (-0.5,.5) -- (-0.5,.25); \draw[thick] (0.5,.5) .. controls ++(.1,.8) and ++(-.1,.8) .. (-0.5,.5) node[pos=0.1, shape=coordinate](DOT){}; \node[draw, thick, fill=blue!20,rounded corners=4pt,inner sep=3pt] (Fj) at (1.75,0.75) {$\scriptstyle f_j'(-\lambda-1-k)$}; \draw[color=blue, thick, double distance=1pt, dashed] (Fj) .. controls ++(0,.4) and ++(0,-.6) .. (1.25,1.75); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.5,.4) and ++(0,-1) .. (-.75,1.75); \node at (DOT){$\blacktriangleright$bullet}; \node[blue] at (1.5,1.6){$\scriptstyle j$}; \node[blue] at (0,1.60){$\scriptstyle \lambda-1 -k-j$}; \node at (-1,.7) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{equation} for some 2-morphisms $f_j'(-\lambda-1-k){\rm co}lon \mathtt{F}\mathtt{E}1bl \rightarrow {\mathbbm P}i^j1bl\lambdaangle 2j \rangle$. Again, by considering the composite map $\pi^{\lambda+1+$\blacktriangleleft$ll}1bl\lambdaangle -\lambda-1-2$\blacktriangleleft$ll\rangle \rightarrow \mathtt{F}\mathtt{E}1bl \rightarrow \pi^{\lambda+1+k}1bl\lambdaangle -l-1-2k\rangle$ the 2-morphisms $f_j'(-\lambda-1-k)$ can be related to the fake bubbles $\overline{B_j}$. $\blacktriangleleft$nd{proof} \subsection{Relations resulting from the ${\mathfrak{sl}}_2$ commutator relation} In this section we collect a set of relations that follow from the general form of the inverse $\zeta^{-1}$ of $\zeta$. We also uniquely solve for the coefficients $$\blacktriangleright$eta_{\lambda}$ and $c_{-1}$. Observe that $\zeta^{-1}$ in Proposition~\ref{prop_form-of-inv} is the inverse of $\zeta$ if and only if the following relations hold in $\mathcal{C}$: \subsubsection{Relations for $\lambdaanglembda >0$} In addition to the equations relating part of the inverse to fake bubbles, we have the following relations. \[ \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) to (-0.5,2); \draw[thick, <-] (0.5,0) to (0.5,2); \node at (1,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad $\blacktriangleright$eta_{\lambda} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1); \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[color=blue, thick, dashed] (Y) -- (X); \node at (1,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \quad + \quad \sum_{ \xy (0,2)*{\scriptstyle f_1+f_2+f_3}; (0,-1)*{\scriptstyle = \lambda-1}; $\blacktriangleleft$ndxy} (-1)^{f_3} \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.8) and (0.5,0.8) .. (0.5,0) node[pos=0.1, shape=coordinate](DOT){} node[pos=0.42, shape=coordinate](L){} node[pos=0.5, shape=coordinate](M){} node[pos=0.58, shape=coordinate](R){}; \draw[thick, ->] (1.9,1) .. controls ++(0,0.6) and ++(0,0.6) .. (1.1,1) node[pos=0.05, shape=coordinate](Z){}; \draw[thick] (1.9,1) .. controls ++(0,-0.6) and ++(-0,-0.6) .. (1.1,1) node[pos=0.5, shape=coordinate](X){} node[pos=0.2, shape=coordinate](Y){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(-.1,.5) and ++(-.2,.3) .. (Y) node[pos=0.9,right]{$\scriptstyle -\lambda-1$\;}; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-.5,.4) and ++(.2,.8) .. (R) ; \node[blue] at (1.25,0.8){$\scriptstyle $\;}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \draw[thick, <-] (-0.5,2.25) .. controls ++(0,-.8) and ++(0,-.8) .. (0.5,2.25) node[pos=0.2, shape=coordinate](tDOT){}; \draw[color=blue, thick, double distance=1pt, dashed] (M) .. controls ++(.4,1.4) and ++(-.5,-1) .. (-1.1,1.8) to[out=90, in=140] (tDOT); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls++(-.65,0) and ++(-.25,.3) .. (L); \node at (tDOT){$\blacktriangleright$bullet}; \node at (DOT){$\blacktriangleright$bullet}; \node[blue] at (.6,1.4){$\scriptstyle f_3$}; \node[blue] at (-1.35,1.45){$\scriptstyle f_1$}; \node[blue] at (-1.0,.30){$\scriptstyle f_2$}; \node at (1,2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \] $\blacktriangleright$egin{equation} \lambdaanglebel{eq:FEtEF-beta} $\blacktriangleright$eta_{\lambda} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, dashed] (X) -- (0,0); \draw[color=blue, thick, dashed] (Y) -- (0,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,2) to (-0.5,0); \draw[thick, <-] (0.5,2) to (0.5,0); \draw[color=blue, thick, dashed] (0,0) -- (0,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} We simplify the remaining relations omitting several relations that follow from those below using odd nilHecke relations. For all $0 \lambdaeq m < \lambda$ $\blacktriangleright$egin{align} \lambdaanglebel{eq:curldiep} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=1, shape=coordinate](DOT){};; \draw[thick, ->] (-0.5,1) .. controls ++(0,.6) and ++(0,.6) .. (0.5,1) node[pos=0.5, shape=coordinate](Y){}; \draw[color=blue, thick, dashed] (X) -- (0,0); \draw[color=blue, thick, double distance=1pt,dashed] (Y) -- (0,2); \draw[color=blue, thick, double distance=1pt,dashed] (DOT) to[bend left](-1,2); \node at (1,0.5) {$\lambda$}; \node[blue] at (-.7,1.8) {$\scriptstyle m$}; \node at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} &\quad =\quad 0 & \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (-0.5,1) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){} node[pos=0, shape=coordinate](DOT){};; \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, dashed] (Y) -- (0,2); \draw[color=blue, thick, double distance=1pt,dashed] (DOT) to[bend left](-1,2); \node[blue] at (-.7,1.8) {$\scriptstyle m$}; \node at (1,0.5) {$\lambda$}; \node at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} &\quad =\quad 0 . $\blacktriangleleft$nd{align} Note that the two equations above already follow from Lemma~\ref{lem:E} using the adjunctions. \subsubsection{Relations for $\lambdaanglembda <0$} In addition to the equations relating part of the inverse to fake bubbles in weights $\lambda<0$, we have the following relations. $\blacktriangleright$egin{equation} \lambdaanglebel{eq:EFp} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) to (-0.5,2); \draw[thick, ->] (0.5,0) to (0.5,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad $\blacktriangleright$eta_{\lambda} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, dashed] (Y) .. controls ++(.1,.4) and ++(.1,.4) .. (-.6,1.6) .. controls ++(0,-.3) and ++(0,.3) ..(0,1) .. controls ++(0,-.3) and ++(0,.3) .. (-.6,.4) .. controls ++(.1,-.4) and ++(.1,-.4) .. (X); $\blacktriangleleft$nd{tikzpicture} } \quad + \quad \sum_{ \xy (0,2)*{\scriptstyle f_1+f_2+f_3}; (0,-1)*{\scriptstyle = -\lambda-1}; $\blacktriangleleft$ndxy} (-1)^{f_3} \;\; \hackcenter{ $\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0.5,0) .. controls ++(0,0.8) and ++(0,0.8) .. (-0.5,0) node[pos=0.15, shape=coordinate](DOT){}; \draw[thick, ->] (1.1,.75) .. controls ++(-0,0.6) and ++(0,0.6) .. (1.9,.75) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (1.1,.75) .. controls ++(0,-0.6) and ++(0,-0.6) .. (1.9,.75) node[pos=0.1, shape=coordinate](Z){}; \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda-1$\;}; \draw[thick, ->] (0.8,2.25) -- (0.8,2.5); \draw[thick] (-0.8,2.25) -- (-0.8,2.5); \draw[thick] (0.8,2.25) .. controls ++(0,-.8) and ++(0,-.8) .. (-0.8,2.25) node[pos=0.15, shape=coordinate](tDOT){} node[pos=0.42, shape=coordinate](RCUP){} node[pos=0.5, shape=coordinate](MCUP){} node[pos=0.58, shape=coordinate](LCUP){}; \draw[color=blue, thick, double distance=1pt, dashed] (tDOT) ..controls ++(-.3,.3) and ++(0,.4) .. (RCUP) ; \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls ++(-.3,.2) and ++(0,-.5) .. (-1,1) .. controls ++(0,1.7) and ++(.1,.7) .. (MCUP) ; \draw[color=blue, thick, double distance=1pt, dashed] (Z) .. controls ++(-.3,.4) and ++(0,-.4) .. (-.75,1.5) .. controls ++(0,.5) and ++(0,.4) .. (LCUP) ; \node at (tDOT){$\blacktriangleright$bullet}; \node at (DOT){$\blacktriangleright$bullet}; \node at (Y) {$\blacktriangleright$bullet}; \node at (Z) {$\blacktriangleright$bullet}; \node[blue] at (.5,1.25){$\scriptstyle f_3$}; \node[blue] at (.6,2.35){$\scriptstyle f_1$}; \node[blue] at (-1.0,.40){$\scriptstyle f_2$}; \node at (1,2) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{equation} $\blacktriangleright$egin{equation} $\blacktriangleright$eta_{\lambda} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1); \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[color=blue, thick, dashed] (Y) .. controls ++(.1,-.5) and ++(-.1,-.5) .. (-.6,1.5) .. controls ++(0,.3) and ++(0,-.4) ..(0,2); \draw[color=blue, thick, dashed] (X) .. controls ++(.1,.4) and ++(-.1,.4) .. (-.6,.5) .. controls ++(0,-.3) and ++(0,.4) ..(0,0); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) to (-0.5,2); \draw[thick, <-] (0.5,0) to (0.5,2); \draw[color=blue, thick, dashed] (0,0) -- (0,2); $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} For $0 \lambdaeq m < -\lambda$ the curl relations $\blacktriangleright$egin{align} \lambdaanglebel{eq:curldien} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=1, shape=coordinate](DOT){};; \draw[thick, ->] (0.5,1) .. controls ++(0,.6) and ++(0,.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](Y){}; \draw[color=blue, thick, dashed] (X) .. controls ++(.1,.4) and ++(-.1,.4) .. (-.6,.5) .. controls ++(0,-.3) and ++(0,.4) ..(0,0); \draw[color=blue, thick, double distance=1pt,dashed] (DOT) to[bend right](1,2); \node at (-1,0.5) {$\lambda$}; \node[blue] at (.7,1.8) {$\scriptstyle m$}; \node at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} &\quad =\quad 0 & \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick] (-0.5,1) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){} node[pos=0, shape=coordinate](DOT){};; \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, double distance=1pt,dashed] (X) .. controls ++(0,1.25) and ++(0,-.75) .. (-1,2); \draw[color=blue, thick, dashed] (Y) -- (0,2); \draw[color=blue, thick, double distance=1pt,dashed] (DOT) .. controls ++(-.75,0) and ++(0,-1) ..(-1.5,2); \node[blue] at (-1.8,1.8) {$\scriptstyle m$}; \node at (1,0.5) {$\lambda$}; \node at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} &\quad =\quad 0 . $\blacktriangleleft$nd{align} hold in any strong supercategorical action. \subsubsection{Relations for $\lambdaanglembda =0$} Invertibility of $\zeta$ for $\lambda=0$ implies that in any strong supercategorical action the relations $\blacktriangleright$egin{equation} \lambdaanglebel{eq:EFtoFEzero} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) to (-0.5,2); \draw[thick, <-] (0.5,0) to (0.5,2); \node at (1,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad $\blacktriangleright$eta_{0} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1); \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[color=blue, thick, dashed] (Y) -- (X); \node at (1,1.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture} } \qquad \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,2) to (-0.5,0); \draw[thick, <-] (0.5,2) to (0.5,0); \draw[color=blue, thick, dashed] (0,0) -- (0,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad $\blacktriangleright$eta_{0} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, dashed] (X) -- (0,0); \draw[color=blue, thick, dashed] (Y) -- (0,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{equation} holds. Note this is equation is consistent with the alternative equation $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (-0.5,0) to (-0.5,2); \draw[thick, <-] (0.5,0) to (0.5,2); \draw[color=blue, thick, dashed] (0,0) -- (0,2); $\blacktriangleleft$nd{tikzpicture}} \quad = \quad $\blacktriangleright$eta_{0} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1); \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[color=blue, thick, dashed] (Y) .. controls ++(.1,-.5) and ++(-.1,-.5) .. (-.6,1.5) .. controls ++(0,.3) and ++(0,-.4) ..(0,2); \draw[color=blue, thick, dashed] (X) .. controls ++(.1,.4) and ++(-.1,.4) .. (-.6,.5) .. controls ++(0,-.3) and ++(0,.4) ..(0,0); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \qquad \qquad\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) to (-0.5,2); \draw[thick, ->] (0.5,0) to (0.5,2); \node at (1,0.5) {$\lambda$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad $\blacktriangleright$eta_{0} \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[thick, ->] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[color=blue, thick, dashed] (Y) .. controls ++(.1,.4) and ++(.1,.4) .. (-.6,1.6) .. controls ++(0,-.3) and ++(0,.3) ..(0,1) .. controls ++(0,-.3) and ++(0,.3) .. (-.6,.4) .. controls ++(.1,-.4) and ++(.1,-.4) .. (X); $\blacktriangleleft$nd{tikzpicture} } $\blacktriangleleft$nd{equation} \subsection{Finding the free parameters} We can now solve for the remaining free parameters using the relations derived so far. $\blacktriangleright$egin{prop} \lambdaanglebel{prop_free_param} In any strong supercategorical action the free parameters must be fixed as follows. $\blacktriangleright$egin{enumerate} \item The coefficient $c_{-1}$ from $\blacktriangleleft$qref{eq_defcmone} is equal to 1. \item For all values of $\lambda$ where $1bl$ is not zero the coefficients from Proposition ~\ref{prop_form-of-inv} satisfy $$\blacktriangleright$eta_{\lambda}=-1$ for all $\lambda$. \item The coefficients $c_0^+$ and $c_0^-$ from $\blacktriangleleft$qref{eq:c0} are both equal to one. $\blacktriangleleft$nd{enumerate} $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} To find the coefficient $c_{-1}$ note that \[ 0 \;\; =\;\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[thick] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, ->] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[thick] (-0.5,2) .. controls ++(0,.4) and ++(0,.4) .. (-1.5,2) node[pos=0.5, shape=coordinate](CAP){}; \draw[thick] (-0.5,0) .. controls ++(0,-.4) and ++(0,-.4) .. (-1.5,0); \draw[thick, ->-=0.5] (-1.5,2) -- (-1.5,0); \draw[thick, ->] (.5,2) -- (.5,2.5); \draw[thick] (.5,0) -- (.5,-.5); \draw[color=blue, thick, dashed] (X) .. controls ++(-1.2,-.2) and ++(-1.2,.2) .. (Y); \node at (1,0.5) {$-1$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, <-] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1); \draw[thick] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2); \draw[thick] (0.5,2) .. controls ++(0,.4) and ++(0,.4) .. (1.5,2) node[pos=0.5, shape=coordinate](CAP){}; \draw[thick] (0.5,0) .. controls ++(0,-.4) and ++(0,-.4) .. (1.5,0); \draw[thick, ->-=0.5] (1.5,0) -- (1.5,2) node[pos=0.85, shape=coordinate](DOT){}; \draw[thick, ->] (-.5,2) -- (-.5,2.5); \draw[thick] (-.5,0) -- (-.5,-.5); \draw[color=blue, thick, dashed] (X) -- (Y); \node at (-1,0.5) {$+1$}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \frac{1}{$\blacktriangleright$eta_1} \;\; \lambdaeft( (c_- -1) \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, ->] (0,-1) -- (0,2); \node at (-.4,0.5) {$+1$}; $\blacktriangleleft$nd{tikzpicture}} \quad \right) \] where the last equality follows from $\blacktriangleleft$qref{eq:EFp}. Using $\blacktriangleleft$qref{eq:FEtEF-beta} and the relations from Section~\ref{subsec-half-sideways} we have $\blacktriangleright$egin{eqnarray}\lambdaanglebel{eqn-one-over-betal} \frac{1}{$\blacktriangleright$eta_{\lambda}} \; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (1.5,-1) -- (1.5,1); \draw[color=blue, thick, dashed] (1,-1) -- (1,1); \draw[thick, ->] (-0.4,0) .. controls ++(0,0.6) and ++(0,0.6) .. (0.4,0) node[pos=0.5, shape=coordinate](X){} node[pos=0.1, shape=coordinate](Y){}; \draw[thick] (-0.4,0) .. controls ++(0,-0.6) and ++(0,-0.6) .. (0.4,0); \draw[color=blue, thick, double distance=1pt, dashed] (X) .. controls++(0,.65) and ++(-.65,.3) .. (Y) node[pos=0.15,right]{$\scriptstyle \lambda+1$\;}; \node at (.25,-0.75) {$\lambdaanglembda+2$}; \node at (Y) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad = \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick, <-] (-0.5,0) .. controls (-0.5,0.4) and (0.5,0.6) .. (0.5,1) node[pos=0.5, shape=coordinate](X){}; \draw[thick, ->] (0.5,0) .. controls (0.5,0.4) and (-0.5,0.6) .. (-0.5,1); \draw[thick] (-0.5,1) .. controls (-0.5,1.4) and (0.5,1.6) .. (0.5,2) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, <-] (0.5,1) .. controls (0.5,1.4) and (-0.5,1.6) .. (-0.5,2); \draw[thick] (-0.5,2) .. controls ++(0,.4) and ++(0,.4) .. (-1.5,2) node[pos=0.5, shape=coordinate](CAP){}; \draw[thick] (-0.5,0) .. controls ++(0,-.4) and ++(0,-.4) .. (-1.5,0); \draw[thick, ->-=0.5] (-1.5,0) -- (-1.5,2) node[pos=0.85, shape=coordinate](DOT){}; \draw[thick, ->] (.5,2) -- (.5,2.5); \draw[thick] (.5,0) -- (.5,-.5); \draw[color=blue, thick, dashed] (X) -- (0,-.5); \draw[color=blue, thick, dashed] (Y) -- (0,2.5); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls ++(-.9,.3) and ++(.1,.7) .. (CAP) node[pos=0.35,left]{$\scriptstyle \lambda+1$\;}; \node at (1,0.5) {$\lambda$}; \node at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick] (0.5,-.25) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,.75) node[pos=0.5, shape=coordinate](X){} node[pos=0.9, shape=coordinate](LDOT){}; \draw[thick] (-0.5,-.25) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,.75); \draw[thick] (0.5,1.25) .. controls ++(0,.5) and ++(0,-.5) .. (-0.5,2.25) node[pos=0.5, shape=coordinate](Y){}; \draw[thick, ->] (-0.5,1.25) .. controls ++(0,.5) and ++(0,-.5) .. (0.5,2.25) node[pos=0.15, shape=coordinate](DOT){}; \draw[thick] (0.5,2.25) .. controls ++(0,.4) and ++(0,.4) .. (1.5,2.25) node[pos=0.5, shape=coordinate](CAP){}; \draw[thick] (0.5,-.25) .. controls ++(0,-.4) and ++(0,-.4) .. (1.5,-.25); \draw[thick, ->-=0.5] (1.5,2.25) -- (1.5,-.25); \draw[thick, ->] (-.5,2.25) -- (-.5,3); \draw[thick] (-.5,-.25) -- (-.5,-.5); \draw[thick] (-.5,.75) -- (-.5,1.25) node[pos=0.6, shape=coordinate](MDOT){}; \draw[thick] (.5,.75) -- (.5,1.25); \draw[color=blue, thick, dashed] (X) to[out=180, in=90] (-1,-.5); \draw[color=blue, thick, dashed] (Y) to[out=190, in=-100] (-1,3); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls ++(-1,.8) and ++(.1,.7) .. (CAP) node[pos=0.85,above]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, dashed] (LDOT) .. controls ++(-.6,.4) and ++(-.6,.5) .. (MDOT); \node at (2,0.5) {$\lambda$}; \node at (DOT) {$\blacktriangleright$bullet};\node at (LDOT) {$\blacktriangleright$bullet};\node at (MDOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} $\blacktriangleleft$nd{eqnarray} for all $\lambda \mathfrak{g}eq 0$. Now carefully applying the inductive dot slide formula, we can slide all of the $\lambda+1$ dots through the top crossing. The term in which all the dots slide through the crossing is zero by the quadratic odd nilHecke relation. What remains is a symmetric sum of terms with $\lambda$ dots where the crossing has been resolved. By (the adjoint of) $\blacktriangleleft$qref{eq:curldiep}, all the terms in this sum are zero except for the term in which all of the $\lambda$ dots are in the curl. Hence the rightmost diagram in $\blacktriangleleft$qref{eqn-one-over-betal} equals $\blacktriangleright$egin{equation} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.8] \draw[thick] (0.5,-.25) .. controls ++(0,0.5) and ++(0,-0.5) .. (-0.5,.75) node[pos=0.5, shape=coordinate](X){}; \draw[thick] (-0.5,-.25) .. controls ++(0,0.5) and ++(0,-0.5) .. (0.5,.75); \draw[thick] (0.5,1.5) .. controls ++(0,.4) and ++(0,.4) .. (1.5,1.5) node[pos=0.5, shape=coordinate](CAP){}; \draw[thick] (0.5,-.25) .. controls ++(0,-.4) and ++(0,-.4) .. (1.5,-.25); \draw[thick, ->-=0.5] (1.5,1.5) -- (1.5,-.25); \draw[thick, ->] (-.5,1.5) -- (-.5,3); \draw[thick] (-.5,-.25) -- (-.5,-.5); \draw[thick] (-.5,.75) -- (-.5,1.5); \draw[thick] (.5,.75) -- (.5,1.5) node[pos=0.65, shape=coordinate](DOT){} node[pos=0, shape=coordinate](LDOT){}; \draw[color=blue, thick, dashed] (X) to[out=180, in=90] (-1,-.5); \draw[color=blue, thick, double distance=1pt, dashed] (DOT) .. controls ++(-1,.8) and ++(.1,.7) .. (CAP) node[pos=0.85,above]{$\scriptstyle \lambda-1$\;}; \draw[color=blue, thick, dashed] (LDOT) .. controls ++(-1,.4) and ++(.2,-.5) .. (-1.25,1.5) .. controls ++(0,.4) and ++(0,.4) .. (-1.85,1.75) .. controls ++(0,-.5) and ++(0,-1.5) .. (-1.25,3); \node at (2,0.5) {$\lambda$}; \node at (DOT) {$\blacktriangleright$bullet};\node at (LDOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad = \quad - \;\; \hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (1.75,-1) -- (1.75,1); \draw[color=blue, thick, dashed] (1,-1) -- (1,1); \node at (2,-0) {$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}}, $\blacktriangleleft$nd{equation} by the odd nilHecke dot slide and the fact that the curl with only $\lambda-1$ dots is zero. Since the degree zero bubble is equal to multiplication by 1, this shows that $$\blacktriangleright$eta_{\lambda}=-1$ for $\lambda>0$ and $1/$\blacktriangleright$eta_{0}=c_0^+$. A similar calculation for $\lambda<0$ implies that $$\blacktriangleright$eta_{\lambda}=-1$ for $\lambda<0$ and that $1/$\blacktriangleright$eta_0 = c_0^-$. Capping off $\blacktriangleleft$qref{eq:EFtoFEzero} with no dots and simplifying implies that $-$\blacktriangleright$eta_0 c_0^+ c_0^- =1$, completing the proof. $\blacktriangleleft$nd{proof} \subsection{Main theorem of 2-representations} We summarize the results of this section with the following theorem. $\blacktriangleright$egin{thm} \lambdaanglebel{thm-main2rep} A strong supercategorical action of $\mathfrak{sl}_2$ on $\mathcal{C}$ induces a 2-representation $U_qdotc \rightarrow \mathcal{C}$. $\blacktriangleleft$nd{thm} \section{The action on cyclotomic quotients} \subsection{Defining the action} Let $R(n)$ be the odd nilHecke algebra on $n$ strands and, for a dominant integral weight $\Lambda$, let $R^\Lambda(n)$ be the corresponding cyclotomic quotient. Let $\mathcal{ONH}^\Lambda$ be the full sub-super-2-category of $\mathcal{SB}im$ whose objects are the algebras $R^\Lambda(n)$ for all $n\mathfrak{g}eq0$. Set $\lambdaanglembda=\Lambda-2n$; this is the weight corresponding to $R^\Lambda(n)$ when we view $R^\Lambda(n)$ as a categorified weight space. There are three steps to the argument in this section: $\blacktriangleright$egin{enumerate} \item Kang, Kashiwara, and Oh nearly prove in \cite{KKO,KKO2} that there is a strong supercategorical action of $\mathfrak{sl}_2$ on $\mathcal{ONH}^\Lambda$. They omit only the brick condition $\blacktriangleleft$qref{co:hom} of Definition~\ref{def_strong}. The 1-morphisms $\mathtt{E},\mathtt{F}$ are the bimodule kernels of restriction and induction along the maps $\iota_{n,1}$ of $\blacktriangleleft$qref{eqn-iota-onh-ab}, respectively. The parity functor is the bimodule ${\mathbbm P}i R^\Lambda(n)$ as defined in Subsection \ref{subsec-conventions} (parity shift and twist the left action by the parity involution). \item To verify condition $\blacktriangleleft$qref{co:hom} of Definition~\ref{def_strong}, observe that in the action of \cite{KKO,KKO2}, a weight $\lambda$ is mapped to the cyclotomic quotient ring $R^\Lambda(n)$. Since $R^\Lambda(n)$ is graded Morita equivalent to an odd Grassmannian ring \cite{EKL} and odd Grassmannian rings are graded local, $\blacktriangleright$egin{equation*} \dim_q\lambdaeft({\mathbbm P}i{\rm HOM}_{\mathcal{ONH}^\Lambda}(R^\Lambda(n),R^\Lambda(n))\right)\in1+{\mathbbm N}[\pi,q]. $\blacktriangleleft$nd{equation*} In other words, $R^\Lambda(n)$ is a brick. \item By Theorem~\ref{thm-main2rep}, this gives a 2-representation $U_qdotc\rightarrow\mathcal{ONH}^\Lambda$. $\blacktriangleleft$nd{enumerate} We spend the rest of this section writing down how the resulting 2-functor acts, mostly explicitly (right-oriented caps and cups are difficult to write down). A subset of these details give an explicit description of the strong supercategorical action of \cite{KKO,KKO2}. For short, we will sometimes abbreviate $R^\Lambda(n)$ by simply $n$ when there is no chance of confusion. For instance, ``$(n,n+1)$-bimodule'' means ``$(R^\Lambda(n),R^\Lambda(n+1))$-bimodule.'' There is a morphism of super-2-categories mapping $\mathcal{SB}im$ to $\mathcal{SC}at$ sending a superalgebra to its supermodule category and a super bimodule to the superfunctor of tensoring with the super bimodule. We will go between these two languages freely. In particular, we will sometimes identify the $(A,B)$-bimodule $M$ with the superfunctor $M\otimes\mbox{--}$ from $B\text{-mod}$ to $A\text{-mod}$ (for us, ${\mathbbm Z}$-graded supermodule categories). \subsubsection{On objects} The integral weight $\lambdaanglembda$ is sent to the superalgebra $R^\Lambda(n)$, where $\lambdaanglembda=\Lambda-2n$. Note that $R^\Lambda(n)=0$ unless $0\lambdaeq n\lambdaeq\Lambda$. \subsubsection{On 1-morphisms} $\blacktriangleright$egin{itemize} \item $\mathcal{E}1bbl{\rm co}lonto{\mathrm{Res}}^{n+1}_n$, or the $(n,n+1)$-bimodule $_nR^\Lambda(n+1)$ \item $1bbl\mathcal{F}{\rm co}lonto{\mathrm{Ind}}^{n+1}_n$, or the $(n+1,n)$-bimodule $R^\Lambda(n+1)_n$ \item ${\mathbbm P}i1bbl{\rm co}lonto{\mathbbm P}i_n$ $\blacktriangleleft$nd{itemize} In the above, ${\mathbbm P}i_n$ is short for ${\mathbbm P}i_{R^\Lambda(n)}$; the endofunctor ${\mathbbm P}i_A$ of $A\text{-mod}$ shifts the ${\mathbbm Z}_{2}$-grading by one and twists the left action by the parity involution $\iota_A$ (see \ref{eqn-parity-involution}). \subsubsection{On 2-morphisms} The super-2-category structure morphisms $\xi^{\pm1},\alpha_F^{\pm1}$ are sent to the corresponding structure morphisms in $\mathcal{SB}im$, but we will write all of them explicitly for completeness. $\blacktriangleright$egin{itemize} \item On $\xi^{\pm1}$: $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-action-xi}$\blacktriangleright$egin{split} &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,0) .. controls (0,.7) and (1,.7) .. (1,0); $\blacktriangleleft$nd{tikzpicture}} \quad\lambdaanglembda\quad\text{is sent to}\quad\xi:{\mathbbm P}i^21bl\rightarrow1bl,\\ &{\mathbbm P}i^2R^\Lambda(n)\rightarrow R^\Lambda(n),\qquad x{\rm co}lonto x,\\ &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, color=blue, dashed] (0,1) .. controls (0,.3) and (1,.3) .. (1,1); $\blacktriangleleft$nd{tikzpicture}} \quad\lambdaanglembda\quad\text{is sent to}\quad\xi^{-1}:1bl\rightarrow{\mathbbm P}i^21bl,\\ &R^\Lambda(n)\rightarrow{\mathbbm P}i^2R^\Lambda(n),\qquad x{\rm co}lonto x, $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} \item On $\alpha_\mathtt{E}^{\pm1}$, $\alpha_\mathtt{F}^{\pm1}$, and $\alpha_{\mathbbm P}i=\alpha_{\mathbbm P}i^{-1}$: $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-action-alpha}$\blacktriangleright$egin{split} \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, ->] (0,0) .. controls (0,1) and (1,1) .. (1,2) node[pos=.5, left](){\small$\lambdaanglembda+2$\;\;} node[pos=.5, right](){\;\;\small$\lambdaanglembda$}; \draw[thick, color=blue, dashed] (1,0) .. controls (1,1) and (0,1) .. (0,2); $\blacktriangleleft$nd{tikzpicture}} \quad\text{is sent to}\quad \alpha_\mathtt{E}:\mathtt{E}{\mathbbm P}i1bl&\rightarrow{\mathbbm P}i\mathtt{E}1bl,\\ _{n-1}R^\Lambda(n){\mathbbm P}i&\rightarrow{\mathbbm P}i_{n-1}R^\Lambda(n),\qquad y{\rm co}lonto (-1)^{p(y)}y\\ \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, color=blue, dashed] (0,0) .. controls (0,1) and (1,1) .. (1,2); \draw[thick, ->] (1,0) .. controls (1,1) and (0,1) .. (0,2) node[pos=.5, left](){\small$\lambdaanglembda+2$\;\;} node[pos=.5, right](){\;\;\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \quad\text{is sent to}\quad \alpha_\mathtt{E}^{-1}:{\mathbbm P}i\mathtt{E}1bl&\rightarrow\mathtt{E}{\mathbbm P}i1bl,\\ {\mathbbm P}i_{n-1}R^\Lambda(n)&\rightarrow_{n-1}R^\Lambda(n){\mathbbm P}i,\qquad y{\rm co}lonto(-1)^{p(y)}y\\ \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, <-] (0,0) .. controls (0,1) and (1,1) .. (1,2) node[pos=.5, left](){\small$\lambdaanglembda$\;\;} node[pos=.5, right](){\;\;\small$\lambdaanglembda+2$}; \draw[thick, color=blue, dashed] (1,0) .. controls (1,1) and (0,1) .. (0,2); $\blacktriangleleft$nd{tikzpicture}} \quad\text{is sent to}\quad \alpha_\mathtt{F}:1bl\mathtt{F}{\mathbbm P}i&\rightarrow1bl{\mathbbm P}i\mathtt{F},\\ _nR^\Lambda(n-1){\mathbbm P}i&\rightarrow{\mathbbm P}i_{n-1}R^\Lambda(n),\qquad y{\rm co}lonto (-1)^{p(y)}y\\ \hackcenter{$\blacktriangleright$egin{tikzpicture}[scale=0.5] \draw[thick, color=blue, dashed] (0,0) .. controls (0,1) and (1,1) .. (1,2); \draw[thick, <-] (1,0) .. controls (1,1) and (0,1) .. (0,2) node[pos=.5, left](){\small$\lambdaanglembda$\;\;} node[pos=.5, right](){\;\; \small$\lambdaanglembda+2$}; $\blacktriangleleft$nd{tikzpicture}} \quad\text{is sent to}\quad \alpha_\mathtt{F}^{-1}:1bl{\mathbbm P}i\mathtt{F}&\rightarrow1bl\mathtt{F}{\mathbbm P}i,\\ {\mathbbm P}i R^\Lambda(n)_{n-1}&\rightarrow R^\Lambda(n)_{n-1}{\mathbbm P}i,\qquad y{\rm co}lonto(-1)^{p(y)}y. $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} \item On dots: $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-action-dots}$\blacktriangleright$egin{split} &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) -- (0,1.5) node[pos=.5, shape=coordinate](DOT){} node[pos=.25, left](){\small$\lambdaanglembda+2$\;\;} node[pos=.25, right](){\;\;\small$\lambdaanglembda$}; \draw[thick, color=blue, dashed] (DOT) [out=135, in=-90] to (-.5,1.5); \node at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad\text{is sent to}\quad x_\mathtt{E}:\mathtt{E}1bl\rightarrow{\mathbbm P}i\mathtt{E}1bl,\\ _{n-1}R^\Lambda(n)&\rightarrow{\mathbbm P}i_{n-1}R^\Lambda(n),\qquad y{\rm co}lonto x_ny,\\ &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0,0) -- (0,1.5) node[pos=.5, shape=coordinate](DOT){} node[pos=.25, left](){\small$\lambdaanglembda$\;\;} node[pos=.25, right](){\;\;\small$\lambdaanglembda+2$}; \draw[thick, color=blue, dashed] (DOT) [out=135, in=-90] to (.5,1.5); \node at (DOT) {$\blacktriangleright$bullet}; $\blacktriangleleft$nd{tikzpicture}} \quad\text{is sent to}\quad x_\mathtt{F}:1bl\mathtt{F}\rightarrow1bl\mathtt{F}{\mathbbm P}i,\\ R^\Lambda(n)_{n-1}&\rightarrow R^\Lambda(n)_{n-1}{\mathbbm P}i,\qquad y{\rm co}lonto yx_n. $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} \item On up- and down-crossings: $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-action-crossings}$\blacktriangleright$egin{split} &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (0,0) .. controls (0,.5) and (.5,1) .. (.5,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, left](){\small$\lambdaanglembda+4$\;\;}; \draw[thick, color=blue, dashed] (CROSSING) [out=135, in=-90] to (-.5,1.5); \draw[thick, ->] (.5,0) .. controls (.5,.5) and (0,1) .. (0,1.5) node[pos=.25, right](){\;\;\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \quad\text{is sent to}\quad \tau_\mathtt{E}:\mathtt{E}^21bl\rightarrow{\mathbbm P}i\mathtt{E}^21bl,\\ _{n-2}R^\Lambda(n)&\rightarrow{\mathbbm P}i_{n-2}R^\Lambda(n),\qquad y{\rm co}lonto \tau_{n-1}y\\ &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, <-] (0,0) .. controls (0,.5) and (.5,1) .. (.5,1.5) node[pos=.5, shape=coordinate](CROSSING){} node[pos=.25, left](){\small$\lambdaanglembda$\;\;}; \draw[thick, color=blue, dashed] (CROSSING) [out=135, in=-90] to (-.5,1.5); \draw[thick, <-] (.5,0) .. controls (.5,.5) and (0,1) .. (0,1.5) node[pos=.25, right](){\;\;\small$\lambdaanglembda+4$}; $\blacktriangleleft$nd{tikzpicture}} \quad\text{is sent to}\quad \tau_\mathtt{F}:1bl\mathtt{F}^2\rightarrow1bl\mathtt{F}^2{\mathbbm P}i,\\ R^\Lambda(n)_{n-2}&\rightarrow R^\Lambda(n)_{n-2}{\mathbbm P}i,\qquad y{\rm co}lonto y\tau_{n-1}. $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} \item On the left cap and left cup: $\blacktriangleright$egin{equation}\lambdaanglebel{eqn-action-left-cap-cup}$\blacktriangleright$egin{split} &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (1,0) .. controls (1,.8) and (0,.8) .. (0,0) node[pos=.25, right](){\;\;\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \quad\text{is sent to}\quad$\blacktriangleleft$psilonsilon:\mathtt{F}\mathtt{E}1bl\rightarrow1bl,\\ &R^\Lambda(n)\otimes_{n-1}R^\Lambda(n)\rightarrow R^\Lambda(n),\qquad w\otimes y{\rm co}lonto wy\\ &\hackcenter{$\blacktriangleright$egin{tikzpicture} \draw[thick, ->] (1,1) .. controls (1,.2) and (0,.2) .. (0,1) node[pos=.25, right](){\;\;\small$\lambdaanglembda$}; $\blacktriangleleft$nd{tikzpicture}} \quad\text{is sent to}\quad\widetilde{e}a:1bl\rightarrow\mathtt{E}\mathtt{F}1bl,\\ &R^\Lambda(n)\rightarrow\hspace{.01in}_nR^\Lambda(n+1)_n,\qquad w{\rm co}lonto w.\\ $\blacktriangleleft$nd{split}$\blacktriangleleft$nd{equation} \item The action of the right cap and right cup is determined, as described in Section \ref{sec-formal}, by the action of the left cap and left cup. See Section 8 of \cite{KKO} for descriptions of these maps. $\blacktriangleleft$nd{itemize} \subsubsection{Summary} We summarize this subsection as follows. $\blacktriangleright$egin{thm} For each dominant integral weight $\Lambda$, there is a super-2-functor $\blacktriangleright$egin{equation} U_qdotc\rightarrow\mathcal{ONH}^\Lambda $\blacktriangleleft$nd{equation} defined by equations $\blacktriangleleft$qref{eqn-action-xi}--$\blacktriangleleft$qref{eqn-action-left-cap-cup}. After taking Grothendieck groups, this action becomes the action of $U_qdotpi$ on its integrable simple module $_{\mathcal{A}_\pi}V^\Lambda$.$\blacktriangleleft$nd{thm} \subsection{Consequences of the action}\lambdaanglebel{subsec-consequences} We have constructed an upper bound for the space of Homs in the 2-category $U_qc$ in Section~\ref{subsec:upper}. The existence of the 2-representation of $U_qc$ into cyclotomic quotients provides a lower bound for the space of Homs that can be explicitly computed in certain degrees. For example, by varying the integral weight $\Lambda$ the fact that $R^\Lambda(n)$ is a brick in the cyclotomic 2-category immediately implies the following corollary. $\blacktriangleright$egin{cor}\lambdaanglebel{cor-ef-bricks} In the 2-category $U_qdotc$ the 1-morphisms $1bbl$ are bricks. That is, \[ {\rm Hom}_{U_qdotc}(1bbl, 1bbl \lambdaangle $\blacktriangleleft$ll\rangle) {\rm co}ng \lambdaeft\{$\blacktriangleright$egin{array}{ll} 0 & $\blacktriangleleft$ll<0 \\ \dot{\mathbb{B}}bbk & $\blacktriangleleft$ll=0. $\blacktriangleleft$nd{array}\right. \] $\blacktriangleleft$nd{cor} \section{(De)categorification} \subsection{Indecomposables in $U_qdotc$} $\blacktriangleright$egin{prop} The divided power 1-morphisms $\mathcal{E}^{(a)}1bbl$ and $\mathcal{F}^{(b)}1bbl$ are bricks for all $a,b \mathfrak{g}eq 0$. $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} This follows by induction from Corollaries~\ref{cor:homonel} and \ref{cor-ef-bricks} following arguments similar to those in \cite[Lemma 4.9]{CKL2}. $\blacktriangleleft$nd{proof} $\blacktriangleright$egin{prop} \lambdaanglebel{prop_indecomp} The 1-morphisms $\blacktriangleright$egin{enumerate}[(i)] \item $\mathcal{E}^{(a)}\mathcal{F}^{(b)}1bbl\lambdaangle s\rangle \quad $ for $a$,$b\in {\mathbbm N}$, $\lambda,s \in{\mathbbm Z}$, $\lambda\lambdaeq b-a$, \item $\mathcal{F}^{(b)}\mathcal{E}^{(a)}1bbl\lambdaangle s\rangle \quad$ for $a$,$b\in{\mathbbm N}$, $\lambda,s \in{\mathbbm Z}$, $\lambda\mathfrak{g}eq b-a$, $\blacktriangleleft$nd{enumerate} are indecomposable. Furthermore, these indecomposables are not isomorphic unless $\lambda=b-a$ in which case $ \mathcal{E}^{(a)}\mathcal{F}^{(b)}1bb_{b-a}\lambdaangle s\rangle {\rm co}ng \mathcal{F}^{(b)}\mathcal{E}^{(a)}1bb_{b-a}\lambdaangle s\rangle$. In other words, the indecomposable projectives of the super-2-category $U_qdotc$ categorify the canonical basis of \cite{ClarkWang}. $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} It is straightforward to show that these 1-morphisms are bricks using adjointness and the fact that divided powers are bricks (see for example \cite[Proposition 9.9]{Lau1}). This implies indecomposability. Furthermore, one can show that the space of Homs between any two such elements is positively graded (in $q{\mathbbm N}[\pi,q]$) implying that they are pairwise non-isomorphic (see for example \cite[Proposition 9.9]{Lau1}). $\blacktriangleleft$nd{proof} $\blacktriangleright$egin{prop} \lambdaanglebel{prop_Uindec}\hspace{2in} $\blacktriangleright$egin{enumerate}[(i)] \item \lambdaanglebel{item_indec1} Every 1-morphism $x$ in ${\rm Hom}_{U_qdotc}(\lambda,\lambda')$ decomposes as a direct sum of indecomposable 1-morphisms of the form $\blacktriangleright$egin{eqnarray} 1bb_{\lambda'}{\mathbbm P}i^s \mathcal{E}^{(a)}\mathcal{F}^{(b)}1bbl\lambdaangle t\rangle &\quad& \text{for $a$,$b\in {\mathbbm N}$, $s,t \in{\mathbbm Z}$, $\lambda\lambdaeq b-a$, } \notag\\ 1bb_{\lambda'}{\mathbbm P}i^s \mathcal{F}^{(b)}\mathcal{E}^{(a)}1bbl\lambdaangle t\rangle &\quad& \text{for $a$,$b\in{\mathbbm N}$, $s,t \in{\mathbbm Z}$, $\lambda \mathfrak{g}eq b-a$,} \lambdaanglebel{eq_Bdot} $\blacktriangleleft$nd{eqnarray} where $\lambdaanglembda'=\lambdaanglembda-2(b-a)$. \item \lambdaanglebel{item_indec2} The direct sum decomposition of $x \in {\rm Hom}_{U_qdotc}(\lambda,\lambda')$ is essentially unique, meaning that the indecomposables and their multiplicities are unique up to reordering the factors. \item \lambdaanglebel{item_indec3}The morphisms in $\blacktriangleleft$qref{item_indec1} $\blacktriangleleft$qref{eq_Bdot} above are the only indecomposables in $U_qdotc$ up to isomorphism. \item \lambdaanglebel{item_indec4} The 1-morphisms $\mathcal{E}^{(a)}\mathcal{F}^{(b)}1bb_{b-a}\lambdaangle t \rangle$ and $\mathcal{F}^{(b)}\mathcal{E}^{(a)}1bb_{b-a}\lambdaangle t\rangle$ are isomorphic in $U_qdotc$. $\blacktriangleleft$nd{enumerate} $\blacktriangleleft$nd{prop} $\blacktriangleright$egin{proof} To prove $\blacktriangleleft$qref{item_indec1} it suffices to show that any element $x=1bb_{\lambda'} {\mathbbm P}i^{s_1}\mathcal{E}^{\alpha_1}\mathcal{F}^{$\blacktriangleright$eta_1}{\mathbbm P}i^{s_2}\mathcal{E}^{\alpha_2} \cdots \mathcal{F}^{$\blacktriangleright$eta_{k-1}}{\mathbbm P}i^{s_k}\mathcal{E}^{\alpha_k}\mathcal{F}^{$\blacktriangleright$eta_k}1bbl\lambdaangle t \rangle$ in $U_qc$ decomposes as a sum of elements in $\blacktriangleleft$qref{eq_Bdot}. Using the super-2-category structure the 1-morphism $x$ is isomorphic to a 1-morphism of the form $x'=1bb_{\lambda'} {\mathbbm P}i^{s'}\mathcal{E}^{\alpha_1}\mathcal{F}^{$\blacktriangleright$eta_1}\mathcal{E}^{\alpha_2} \cdots \mathcal{F}^{$\blacktriangleright$eta_{k-1}}\mathcal{E}^{\alpha_k}\mathcal{F}^{$\blacktriangleright$eta_k}1bbl\lambdaangle t \rangle$ for $s'=s_1+s_2+\dots + s_k$. Following the arguments in \cite[Proposition 9.10]{Lau1} completes the proof of (\ref{item_indec1}). The Krull-Schmidt theorem then establishes $\blacktriangleleft$qref{item_indec2}, and $\blacktriangleleft$qref{item_indec3} (see Chapter I of \cite{Benson}). The proof of $\blacktriangleleft$qref{item_indec4} is identical to the proof of \cite[Corollary 9.11]{Lau1}. $\blacktriangleleft$nd{proof} \subsection{Main theorem---categorification} $\blacktriangleright$egin{thm} \lambdaanglebel{thm_Groth} The split Grothendieck group $K_0(U_qdotc)$ is isomorphic as an $\mathcal{A}_\pi$-module to the integral covering algebra $_{{\mathbbm A}c}U_qdotpi$ introduced by Clark and Wang \cite{ClarkWang}. $\blacktriangleleft$nd{thm} $\blacktriangleright$egin{proof} Since $U_qdotc$ has the Krull-Schmidt property (Proposition~\ref{prop_Uindec}), its Grothendieck group is freely generated as an $\mathcal{A}_\pi$-module by the isomorphism classes of indecomposables with no shifts. We have shown that these isomorphism classes of indecomposables correspond bijectively to elements in the canonical basis of the covering algebra introduced by Clark and Wang. Therefore, the homomorphism $\mathfrak{g}amma {\rm co}lon _{{\mathbbm A}c}U_qpi \rightarrow K_0(U_qdotc)$ from Proposition~\ref{prop-gamma} is an isomorphism. $\blacktriangleleft$nd{proof} $\blacktriangleright$egin{thebibliography}{10} $\blacktriangleright$ibitem{BN1} D.~Bar-Natan. \newblock On {K}hovanov's categorification of the {J}ones polynomial. \newblock {$\blacktriangleleft$m Algebr. Geom. Topol.}, 2:337--370 (electronic), 2002. \newblock \href{http://arxiv.org/abs/0201043}{arXiv:0201043}. $\blacktriangleright$ibitem{BN2} D.~Bar-Natan. \newblock Khovanov's homology for tangles and cobordisms. \newblock {$\blacktriangleleft$m Geom. Topol.}, 9:1443--1499, 2005. \newblock \href{http://arxiv.org/abs/0410495}{arXiv:0410495}. $\blacktriangleright$ibitem{Benson} D.~Benson. \newblock {$\blacktriangleleft$m Representations and cohomology. {I}: {B}asic representation theory of finite groups and associative algebras}, volume~30 of {$\blacktriangleleft$m Cambridge Studies in Advanced Mathematics}. \newblock Cambridge U. Press, second edition, 1998. $\blacktriangleright$ibitem{BFK} J.~Bernstein, I.~B. Frenkel, and M.~Khovanov. \newblock A categorification of the {T}emperley-{L}ieb algebra and {S}chur quotients of {U}(${\mathfrak{sl}}_2$) via projective and {Z}uckerman functors. \newblock {$\blacktriangleleft$m Selecta Math. (N.S.)}, 5(2):199--241, 1999. $\blacktriangleright$ibitem{Bor} F.~Borceux. \newblock {$\blacktriangleleft$m Handbook of categorical algebra. 1}, volume~50 of {$\blacktriangleleft$m Encyclopedia of Mathematics and its Applications}. \newblock Cambridge University Press, Cambridge, 1994. $\blacktriangleright$ibitem{BrSt2} J.~Brundan and C.~Stroppel. \newblock Highest weight categories arising from {K}hovanov's diagram algebra. {II}. {K}oszulity. \newblock {$\blacktriangleleft$m Transform. Groups}, 15(1):1--45, 2010. \newblock \href{http://arxiv.org/abs/0806.3472}{arXiv:0806.3472}. $\blacktriangleright$ibitem{BrSt1} J.~Brundan and C.~Stroppel. \newblock Highest weight categories arising from {K}hovanov's diagram algebra {I}: cellularity. \newblock {$\blacktriangleleft$m Mosc. Math. J.}, 11(4):685--722, 821--822, 2011. \newblock \href{http://arxiv.org/abs/0806.1532}{arXiv:0806.1532}. $\blacktriangleright$ibitem{BrSt3} J.~Brundan and C.~Stroppel. \newblock Highest weight categories arising from {K}hovanov's diagram algebra {III}: category {$\mathcal{O}$}. \newblock {$\blacktriangleleft$m Represent. Theory}, 15:170--243, 2011. \newblock \href{http://arxiv.org/abs/0812.1090}{arXiv:0812.1090}. $\blacktriangleright$ibitem{BrSt4} J.~Brundan and C.~Stroppel. \newblock Highest weight categories arising from {K}hovanov's diagram algebra {IV}: the general linear supergroup. \newblock {$\blacktriangleleft$m J. Eur. Math. Soc. (JEMS)}, 14(2):373--419, 2012. \newblock \href{http://arxiv.org/abs/0907.2543}{arXiv:0907.2543}. $\blacktriangleright$ibitem{Cap4} C.~Caprau. \newblock {$\rm sl(2)$} tangle homology with a parameter and singular cobordisms. \newblock {$\blacktriangleleft$m Algebr. Geom. Topol.}, 8(2):729--756, 2008. \newblock \href{http://arxiv.org/abs/0707.3051}{arXiv:0707.3051}. $\blacktriangleright$ibitem{Cautis} S.~Cautis. \newblock Clasp technology to knot homology via the affine {G}rassmannian. \newblock 2012. \newblock \href{http://arxiv.org/abs/1207.2074}{arXiv:1207.2074}. $\blacktriangleright$ibitem{CK01} S.~Cautis and J.~Kamnitzer. \newblock Knot homology via derived categories of coherent sheaves. {I}. {T}he {${\mathfrak{sl}}(2)$}-case. \newblock {$\blacktriangleleft$m Duke Math. J.}, 142(3):511--588, 2008. \newblock \href{http://arxiv.org/abs/0701194}{arXiv:0701194}. $\blacktriangleright$ibitem{CK3} S.~Cautis and J.~Kamnitzer. \newblock Braiding via geometric {L}ie algebra actions. \newblock {$\blacktriangleleft$m Compos. Math.}, 148(2):464--506, 2012. \newblock arXiv:1001.0619. $\blacktriangleright$ibitem{CKL3} S.~Cautis, J.~Kamnitzer, and A.~Licata. \newblock Derived equivalences for cotangent bundles of grassmannians via categorical sl(2) actions. \newblock 2009. \newblock \href{http://arxiv.org/abs/0902.1797}{arXiv:0902.1797}. $\blacktriangleright$ibitem{CKL2} S.~Cautis, J.~Kamnitzer, and A.~Licata. \newblock Coherent sheaves and categorical {$\mathfrak{sl}_2$} actions. \newblock {$\blacktriangleleft$m Duke Math. J.}, 154(1):135--179, 2010. \newblock \href{http://arxiv.org/abs/0902.1796}{arXiv:0902.1796}. $\blacktriangleright$ibitem{CKL4} S.~Cautis, J.~Kamnitzer, and A.~Licata. \newblock Coherent sheaves on quiver varieties and categorification. \newblock 2011. \newblock \href{http://arxiv.org/abs/1104.0352}{arXiv:1104.0352}. $\blacktriangleright$ibitem{CL} S.~Cautis and A.~D. Lauda. \newblock Implicit structure in 2-representations of quantum groups. \newblock 2011. \newblock \href{http://arxiv.org/abs/1111.1431}{arXiv:math.QA/1111.1431}. $\blacktriangleright$ibitem{ChK} Y.~Chen and M.~Khovanov. \newblock An invariant of tangle cobordisms via subquotients of arc rings, 2006. \newblock arXiv:math/0610054. $\blacktriangleright$ibitem{CR} J.~Chuang and R.~Rouquier. \newblock Derived equivalences for symmetric groups and sl(2)-categorification. \newblock {$\blacktriangleleft$m Annals of Mathematics}, (167):245--298, 2008. \newblock \href{http://arxiv.org/abs/math/0407205}{arXiv:math.RT/0407205}. $\blacktriangleright$ibitem{CMW} D.~Clark, S.~Morrison, and K.~Walker. \newblock Fixing the functoriality of {K}hovanov homology. \newblock {$\blacktriangleleft$m Geom. Topol.}, 13(3):1499--1582, 2009. \newblock \href{http://arxiv.org/abs/0701339}{arXiv:0701339}. $\blacktriangleright$ibitem{CFLW} S.~Clark, Z.~Fan, Y.~Li, and W.~Wang. \newblock Quantum supergroups {III}. {T}wistors. \newblock To appear in {$\blacktriangleleft$m Communications in Mathematical Physics}. \newblock 2013. \newblock \href{http://arxiv.org/abs/1307.7056}{arXiv:1307.7056 }. $\blacktriangleright$ibitem{CHW} S.~Clark, D.~Hill, and W.~Wang. \newblock Quantum supergroups {I}. {F}oundations. \newblock To appear in {$\blacktriangleleft$m Transformation Groups}. \newblock 2013. \newblock \href{http://arxiv.org/abs/1301.1665}{arXiv:1301.1665}. $\blacktriangleright$ibitem{CHW2} S.~Clark, D.~Hill, and W.~Wang. \newblock Quantum supergroups {II}. {C}anonical basis. \newblock 2013. \newblock \href{http://arxiv.org/abs/1304.7837 }{arXiv:1304.7837 }. $\blacktriangleright$ibitem{ClarkWang} S.~Clark and W.~Wang. \newblock Canonical basis for quantum {$\mathfrak{osp}(1|2)$}. \newblock {$\blacktriangleleft$m Lett. Math. Phys.}, 103(2):207--231, 2013. \newblock \href{http://arxiv.org/abs/1204.3940}{arXiv:math.QA/1204.3940}. $\blacktriangleright$ibitem{EQ} B.~Elias and Y.~Qi. \newblock An approach to categorification of some small quantum groups {II}. \newblock 2013. \newblock \href{http://arxiv.org/abs/1302.5478}{arXiv:1302.5478}. $\blacktriangleright$ibitem{EOddLR} A.~P. Ellis. \newblock The odd {L}ittlewood-{R}ichardson rule. \newblock {$\blacktriangleleft$m Journal of Algebraic Combinatorics}, 2012. \newblock \href{http://arxiv.org/abs/1111.3932}{arXiv:math.QA/11113932}. $\blacktriangleright$ibitem{EK} A.~P. Ellis and M.~Khovanov. \newblock The {H}opf algebra of odd symmetric functions. \newblock {$\blacktriangleleft$m Advances in Mathematics}, 231(2):965--999, 2012. \newblock \href{http://arxiv.org/abs/1107.5610}{arXiv:math.QA/1107.5610}. $\blacktriangleright$ibitem{EKL} A.~P. Ellis, M.~Khovanov, and A.~Lauda. \newblock The odd nil{H}ecke algebra and its diagrammatics. \newblock {$\blacktriangleleft$m International Mathematics Research Notices}, 2012. \newblock \href{http://arxiv.org/abs/1111.1320}{arXiv:math.QA/1111.1320}. $\blacktriangleright$ibitem{FKS} I.~B. Frenkel, M.~Khovanov, and C.~Stroppel. \newblock A categorification of finite-dimensional irreducible representations of quantum sl(2) and their tensor products. \newblock {$\blacktriangleleft$m Selecta Math. (N.S.)}, 12(3-4):379--431, 2006. \newblock \href{http://arxiv.org/abs/0511467}{math.QA/0511467}. $\blacktriangleright$ibitem{HS} D.~Hill and J.~Sussan. \newblock The {K}hovanov-{L}auda 2-category and categorifications of a level two quantum {$\mathfrak{sl}_n$} representation. \newblock {$\blacktriangleleft$m Int. J. Math. Math. Sci.}, 2010. \newblock \href{http://arxiv.org/abs/0910.2496}{arXiv:0910.2496}. $\blacktriangleright$ibitem{HillWang} D.~Hill and W.~Wang. \newblock Categorification of quantum {K}ac-{M}oody superalgebras. \newblock 2012. \newblock To appear in {$\blacktriangleleft$m Trans. AMS}, \href{http://arxiv.org/abs/1202.2769}{arXiv:math.QA/1202.2769}. $\blacktriangleright$ibitem{KK} S.-J. Kang and M.~Kashiwara. \newblock Categorification of highest weight modules via {K}hovanov-{L}auda-{R}ouquier algebras. \newblock 2011. \newblock \href{http://arxiv.org/abs/1102.4677}{arXiv:math.QA/1102.4677}. $\blacktriangleright$ibitem{KKO} S.-J. Kang, M.~Kashiwara, and S.-J. Oh. \newblock Supercategorification of quantum {K}ac--{M}oody algebras. \newblock {$\blacktriangleleft$m Adv. Math.}, 242:116--162, 2013. \newblock \href{http://arxiv.org/abs/1206.5933}{arXiv:math.RT/1206.5933}. $\blacktriangleright$ibitem{KKO2} S.-J. Kang, M.~Kashiwara, and S.-J. Oh. \newblock Supercategorification of quantum {K}ac-{M}oody algebras {II}. \newblock 2013. \newblock \href{http://arxiv.org/abs/1303.1916}{arXiv:math.RT/1303.1916}. $\blacktriangleright$ibitem{KKT} S.-J. Kang, M.~Kashiwara, and S.~Tsuchioka. \newblock Quiver {H}ecke superalgebras. \newblock 2011. \newblock \href{http://arxiv.org/abs/1107.1039}{arXiv:math.QA/1107.1039v1}. $\blacktriangleright$ibitem{Kash} M.~Kashiwara. \newblock Biadjointness in cyclotomic {K}hovanov-{L}auda-{R}ouquier algebras. \newblock {$\blacktriangleleft$m Publ. Res. Inst. Math. Sci.}, 48(3):501--524, 2012. \newblock \href{http://arxiv.org/abs/1111.5898}{arXiv:1111.5898}. $\blacktriangleright$ibitem{ks1} G.~M. Kelly and R.~Street. \newblock Review of the elements of {$2$}-categories. \newblock In {$\blacktriangleleft$m Category Seminar (Proc. Sem., Sydney, 1972/1973)}, pages 75--103. Lecture Notes in Math., Vol. 420. Springer, Berlin, 1974. $\blacktriangleright$ibitem{KW1} T.~Khongsap and W.~Wang. \newblock {H}ecke-{C}lifford algebras and spin {H}ecke algebras {I}: {T}he classical affine type. \newblock {$\blacktriangleleft$m Transf. Groups}, 13:389--412, 2008. \newblock \href{http://arxiv.org/abs/0704.0201}{arXiv:math.RT/0704.0201}. $\blacktriangleright$ibitem{KW2} T.~Khongsap and W.~Wang. \newblock {H}ecke-{C}lifford algebras and spin {H}ecke algebras {II}: {T}he rational double affine type. \newblock {$\blacktriangleleft$m Pacific J. Math.}, 238:73--103, 2008. \newblock \href{http://arxiv.org/abs/0710.5877}{arXiv:math.RT/0710.5877}. $\blacktriangleright$ibitem{KW4} T.~Khongsap and W.~Wang. \newblock {H}ecke-{C}lifford algebras and spin {H}ecke algebras {IV}: {O}dd double affine type. \newblock {$\blacktriangleleft$m SIGMA}, 5, 2009. \newblock \href{http://arxiv.org/abs/0810.2068}{arXiv:math.RT/0810.2068}. $\blacktriangleright$ibitem{Kh1} M.~Khovanov. \newblock A categorification of the {J}ones polynomial. \newblock {$\blacktriangleleft$m Duke Math. J.}, 101(3):359--426, 2000. \newblock math.QA/9908171. $\blacktriangleright$ibitem{Kh2} M.~Khovanov. \newblock A functor-valued invariant of tangles. \newblock {$\blacktriangleleft$m Algebr. Geom. Topol.}, 2:665--741 (electronic), 2002. \newblock math.QA/0103190. $\blacktriangleright$ibitem{KhSp} M.~Khovanov. \newblock Crossingless matchings and the cohomology of {$(n,n)$} {S}pringer varieties. \newblock {$\blacktriangleleft$m Commun. Contemp. Math.}, 6(4):561--577, 2004. \newblock arXiv:math/0202110. $\blacktriangleright$ibitem{KhHopf} M.~Khovanov. \newblock Hopfological algebra and categorification at a root of unity: the first steps. \newblock 2005. \newblock \href{http://arxiv.org/abs/0509083}{arXiv:math/0509083}. $\blacktriangleright$ibitem{KhDiagrammatics} M.~Khovanov. \newblock Categorifications from planar diagrammatics. \newblock 2010. \newblock \href{http://arxiv.org/abs/1008.5084}{arXiv:math.QA/1008.5084}. $\blacktriangleright$ibitem{KL1} M.~Khovanov and A.~Lauda. \newblock A diagrammatic approach to categorification of quantum groups {I}. \newblock {$\blacktriangleleft$m Representation Theory}, 13:309--347, 2009. \newblock \href{http://arxiv.org/abs/0803.4121}{arXiv:math.QA/0803.4121}. $\blacktriangleright$ibitem{KL3} M.~Khovanov and A.~Lauda. \newblock A diagrammatic approach to categorification of quantum groups {III}. \newblock {$\blacktriangleleft$m Quantum Topology}, 1:1--92, 2010. \newblock \href{http://arxiv.org/abs/0807.3250}{arXiv:math.QA/0807.3250}. $\blacktriangleright$ibitem{KQ} M.~Khovanov and Y.~Qi. \newblock An approach to categorification of some small quantum groups. \newblock 2012. \newblock \href{http://arxiv.org/abs/1208.0616}{arXiv:math.QA/1208.0616}. $\blacktriangleright$ibitem{KhR} M.~Khovanov and L.~Rozansky. \newblock Matrix factorizations and link homology. \newblock {$\blacktriangleleft$m Fund. Math.}, 199(1):1--91, 2008. \newblock \href{http://arxiv.org/abs/0401268}{arXiv:0401268}. $\blacktriangleright$ibitem{KhR2} M.~Khovanov and L.~Rozansky. \newblock Matrix factorizations and link homology. {II}. \newblock {$\blacktriangleleft$m Geom. Topol.}, 12(3):1387--1425, 2008. \newblock \href{http://arxiv.org/abs/0505056}{arXiv:0505056}. $\blacktriangleright$ibitem{Lau1} A.~Lauda. \newblock A categorification of quantum sl(2). \newblock {$\blacktriangleleft$m Adv. Math.}, 225(6):3327--3424, 2010. \newblock \href{http://arxiv.org/abs/0803.3652}{arXiv:math.QA/0803.3652}. $\blacktriangleright$ibitem{LR} A.~Lauda and H.~Russell. \newblock Oddification of the cohomology of type {A} {S}pringer varieties. \newblock 2012. \newblock \href{http://arxiv.org/abs/1203.0797}{arXiv:math.RT/1203.0797}. $\blacktriangleright$ibitem{Lau4} A.~D. Lauda. \newblock An introduction to diagrammatic algebra and categorified quantum ${\mathfrak{sl}}_2$. \newblock {$\blacktriangleleft$m Bulletin Inst. Math. Academia Sinica}, 7:165--270, 2012. \newblock \href{http://arxiv.org/abs/1106.2128}{arXiv:1106.2128}. $\blacktriangleright$ibitem{LP3} A.~D. Lauda and H.~Pfeiffer. \newblock Open-closed {TQFTS} extend {K}hovanov homology from links to tangles. \newblock {$\blacktriangleleft$m J. Knot Theory Ramifications}, 18(1):87--150, 2009. $\blacktriangleright$ibitem{LQR} A.D. Lauda, H.~Queffelec, and D.E.V. Rose. \newblock Khovanov homology is a skew howe 2-representation of categorified quantum sl (m). \newblock {$\blacktriangleleft$m arXiv preprint arXiv:1212.6076}, 2012. \newblock \href{http://arxiv.org/abs/1212.6076}{arXiv:1212.6076}. $\blacktriangleright$ibitem{Lus4} G.~Lusztig. \newblock {$\blacktriangleleft$m Introduction to quantum groups}, volume 110 of {$\blacktriangleleft$m Progress in Mathematics}. \newblock Birkh\"auser Boston Inc., Boston, MA, 1993. $\blacktriangleright$ibitem{Mac} M.~Mackaay. \newblock sl(3)-foams and the {K}hovanov-{L}auda categorification of quantum sl(k). \newblock 2009. \newblock \href{http://arxiv.org/abs/0905.2059}{arXiv:0905.2059}. $\blacktriangleright$ibitem{MPT} M.~Mackaay, W.~Pan, and D.~Tubbenhauer. \newblock The $sl_3$ web algebra. \newblock 2012. \newblock \href{http://arxiv.org/abs/1206.2118}{arXiv:1206.2118v2}. $\blacktriangleright$ibitem{ORS} P.~Ozsv\'{a}th, J.~Rasmussen, and Z.~Szab\'{o}. \newblock Odd {K}hovanov homology. \newblock 2007. \newblock \href{http://arxiv.org/abs/0710.4300}{arXiv:math.QA/0710.4300}. $\blacktriangleright$ibitem{Putyra} K.~Putyra. \newblock A 2-category of chronological cobordisms and odd {K}hovanov homology \newblock 2013. \newblock \href{http://arxiv.org/abs/1310.1895}{arXiv:math.QA/1310.1895}. $\blacktriangleright$ibitem{QYHopf} Y.~Qi. \newblock Hopfological algebra. \newblock 2012. \newblock \href{http://arxiv.org/abs/1205.1814}{arXiv:math.KT/1205.1814}. $\blacktriangleright$ibitem{Rin} C.~Ringel. \newblock Tame algebras and integral quadratic forms. \newblock {$\blacktriangleleft$m Lecture Notes in Mathematics}, 1099, 1984. $\blacktriangleright$ibitem{Rou2} R.~Rouquier. \newblock 2-{K}ac-{M}oody algebras, 2008. \newblock \href{http://arxiv.org/abs/0812.5023}{arXiv:math.RT/0812.5023}. $\blacktriangleright$ibitem{SeSm} P.~Seidel and I.~Smith. \newblock A link invariant from the symplectic geometry of nilpotent slices. \newblock {$\blacktriangleleft$m Duke Math. J.}, 134(3):453--514, 2006. \newblock \href{http://arxiv.org/abs/0405089}{arXiv:0405089}. $\blacktriangleright$ibitem{Shum} A.~Shumakovitch. \newblock Patterns in odd {K}hovanov homology. \newblock {$\blacktriangleleft$m J. Knot Theory Ramifications}, 20(1):203--222, 2011. \newblock \href{http://arxiv.org/abs/1101.5607}{arXiv:1101.5607}. $\blacktriangleright$ibitem{Strop2} C.~Stroppel. \newblock Categorification of the {T}emperley-{L}ieb category, tangles, and cobordisms via projective functors. \newblock {$\blacktriangleleft$m Duke Math. J.}, 126(3):547--596, 2005. $\blacktriangleright$ibitem{Strop1} C.~Stroppel. \newblock Parabolic category {$O$}, perverse sheaves on {G}rassmannians, {S}pringer fibres and {K}hovanov homology. \newblock {$\blacktriangleleft$m Compos. Math.}, 145(4):954--992, 2009. \newblock \href{http://arxiv.org/abs/0608234}{arXiv:0608234}. $\blacktriangleright$ibitem{SW} C.~Stroppel and B.~Webster. \newblock 2-block {S}pringer fibers: convolution algebras and coherent sheaves, 2008. \newblock \href{http://arxiv.org/abs/0802.1943}{arXiv:0802.1943}. $\blacktriangleright$ibitem{Sussan} J.~Sussan. \newblock Category {O} and sl(k) link invariants. \newblock page 131, 2007. \newblock Thesis (Ph.D.)--Yale University. $\blacktriangleright$ibitem{MW} V.Mikhaylov and E.~Witten. \newblock Branes and supergroups, 2013. \newblock To appear. $\blacktriangleright$ibitem{Wang2} W.~Wang. \newblock Spin {H}ecke algebras of finite and affine types. \newblock {$\blacktriangleleft$m Adv. Math.}, 212(2):723--748, 2007. \newblock \href{http://arxiv.org/abs/math/0611950}{arXiv:math/0611950}. $\blacktriangleright$ibitem{Wang} W.~Wang. \newblock Double affine {H}eke algebras for the spin symmetric group. \newblock {$\blacktriangleleft$m Math. Res. Lett.}, 16:1071--1085, 2009. \newblock \href{http://arxiv.org/abs/math/0608074}{arXiv:math.RT/0608074}. $\blacktriangleright$ibitem{Web} B.~Webster. \newblock Knot invariants and higher representation theory {I}: {D}iagrammatic and geometric categorification of tensor products, 2010. \newblock \href{http://arxiv.org/abs/1001.2020}{arXiv:math.QA/1001.2020}. $\blacktriangleright$ibitem{Web2} B.~Webster. \newblock Knot invariants and higher representation theory {II}: {T}he categorification of quantum knot invariants, 2010. \newblock \href{http://arxiv.org/abs/1005.4559}{arXiv:math.QA/1005.4559}. $\blacktriangleright$ibitem{Witten2} E.~Witten. \newblock Khovanov homology and gauge theory, 2011. \newblock \href{http://arxiv.org/abs/1108.3103}{arXiv:1108.3103}. $\blacktriangleright$ibitem{Witten} E.~Witten. \newblock Fivebranes and knots. \newblock {$\blacktriangleleft$m Quantum Topol.}, 3(1):1--137, 2012. $\blacktriangleleft$nd{thebibliography} $\blacktriangleleft$nd{document}
\begin{document} \begin{array}selineskip 13pt \title{Deterministic Quantum Distribution of a {\bm d}\,-ary key} \author{Anita Eusebi} \email{anita.eusebi(at)unicam.it} \affiliation{Dipartimento di Matematica ed Informatica, Universit\`{a} di Camerino, I-62032 Camerino, Italy} \author{Stefano Mancini} \email{stefano.mancini(at)unicam.it} \affiliation{Dipartimento di Fisica, Universit\`{a} di Camerino, I-62032 Camerino, Italy} \begin{abstract} We present an extension to a $d$-ary alphabet of a recently proposed deterministic quantum key distribution protocol. It relies on the use of mutually unbiased bases in prime power dimension $d$, for which we provide an explicit expression. Then, by considering a powerful individual attack, we show that the security of the protocol is maximal for $d=3$. \end{abstract} \pacs{03.67.Dd, 03.65.Fd} \maketitle \section{Introduction} Quantum Key Distribution (QKD) is recognized to complement the One Time Pad to a secure system for reliable transfer of confidential information \cite{PW98}. A paradigm for QKD (not exploiting entanglement) is the pioneering BB84 protocol \cite{BB84}. It allows two remote parties (Alice and Bob) to share a secret key by a \emph{unidirectional} use of a quantum channel (supplemented by a public authenticated classical channel). Protocols like BB84 have a \emph{probabilistic} character, in the sense that, on each use of the quantum channel, the sender (Alice) is not sure that the encoded symbol will be correctly decoded by the receiver (Bob). Tipically, this only happens with probability $1/2$. Recently a new generation of protocols has been introduced making the QKD process \emph{deterministic} \cite{BEKW02, BF02, CL04, LM05}. In this case Alice is sure about the fact that Bob will exactly decode the symbol she has encoded. This paradigm shift has been realized by a \emph{bidirectional} use of the quantum channel. These new generation protocols are more versatile than the old generation ones and are supposed to outperform them. As much as like extensions of BB84 to larger alphabets have been developed \cite{BT99,CBKG01}, there is a persistent aim to also extend the protocol of \cite{LM05} to larger alphabets, that is to higher dimensions. A construction has been recently devised for a tri-dimensional alphabet \cite{SLW06, SW07}, and then another for a continuous infinite-dimensional alphabet \cite{PMBL06}. Here we present a protocol that realizes an extension of the deterministic protocol of \cite{LM05} to a $d$-ary alphabet. Since our construction is based on Mutually Unbiased Bases (MUB) \cite{I81, WF89, BBRV01, KR03}, it holds only for prime power dimensions $d$. We will provide an explicit expression for MUB encompassing powers of both even and odd primes, by correcting the one given in \cite{D05}. We then consider a powerful individual attack on the forward and backward path of the quantum channel and we show that the security for $d=3,4,5$ is higher than that at $d=2$ and is maximal for $d=3$. \section{Qudits and Mutually Unbiased Bases} Let us consider a qudit, i.e., a $d$-dimensional quantum system, and indicate with $\mathcal{H}_{d}$ the associated Hilbert space. A set of orthonormal bases in $\mathcal{H}_{d}$ is called a set of \emph{Mutually Unbiased Bases} (MUB) if the absolute value of the inner product of any two vectors from different bases is $1/\sqrt{d}$ \cite{I81, WF89, BBRV01, KR03}. It is known that in $\mathcal{H}_{d}$, when $d$ is prime power, there exists a maximal set of $d+1$ MUB \cite{I81, WF89, BBRV01, KR03}. Here, we focus on this case. From now on we assume that $d=p^m$, with $p$ a prime number and $m$ positive integer, and we denote the $d+1$ MUB of $ \mathcal{H}_{d}$ by $\ket{v_t^{k}}$, with $k = 0, 1, \ldots, d$ and $t = 0, 1, \ldots, d-1$ labelling the basis and the vector in it respectively. Thus, for every $k, k' = 0, 1, \ldots, d$ and every $t, t' = 0, 1, \ldots, d-1$, the following equality holds: \begin{equation} \label{mub} \left| \strut\smash{\bracket{v_t^{k}}{v_{t'}^{k'}}} \right| =\frac{1}{\sqrt{d}}\left(1-\delta_{k,k'}\right) +\delta_{t,t'}\delta_{k,k'}, \end{equation} where $\delta$ stands for the Kronecker delta. We deal with the Galois field $G=\mathbb{F}(p^m)$ of $d$ elements. We denote by $\oplus$ and $\odot$ respectively the addition and the multiplication in the field $G$ (by $\ominus$ and $\oslash$ the subtraction and the division in $G$). Usually, an element of $G$ is represented by a $m$-tuple $(g_0, g_1, \ldots, g_{m-1})$ of integers modulo $p$. According to this representation, $\oplus$ corresponds to the componentwise addition modulo $p$. Following \cite{D05}, we identify $G$ with $\{0, 1, \ldots, d-1\}$, paying attention to distinguish the operations in the field from the usual ones. Namely, we identify $(g_0, g_1, \ldots, g_{m-1})$ with the integer $g=\sum_{n=0}^{m-1}g_{n}p^{n}$. This allows us to consider the vector label $t$ in $\ket{v_t^{k}}$ as an element of $G$. Let us denote the $p$-th root of unity by \begin{equation} \omega = e^{i2\pi /p}. \end{equation} It is proved in \cite{D05} that \begin{equation} \label{OperExp_G} \omega^{j} \cdot \omega^{l} = \omega^{j \oplus l} \quad \textnormal{with } j, l \in G \end{equation} and \begin{equation} \label{la4} \sum_{j=0}^{d-1} \omega^{j \odot l} = d\,\delta_{l,0} \quad \textnormal{with } l \in G. \end{equation} We choose $\{\ket{v_t^0}\}_{t=0, \ldots, d-1}$ as the computational basis and use the explicit formula given in \cite{D05} to express the vectors of any other basis in the following compact way: \begin{equation} \label{ket_d} \ket{v^k_t} = {1 \over \sqrt d} \sum_{q=0}^{d-1} \omega^{\ominus q \odot t} (\omega^{(k-1) \odot q \odot q})^{\frac{1}{2}} \ket{v^0_q}, \end{equation} where $k = 1, \ldots, d$ and $t = 0, 1, \ldots, d-1$. In particular for $k=1$: \begin{equation} \ket{v^1_t} = {1 \over \sqrt d} \sum_{q=0}^{d-1} \omega^{\ominus q \odot t} \ket{v^0_q}. \end{equation} As it is pointed out in \cite{D05}, for $p$ odd the square root coincides with the division of the exponent by 2 in $G$ and it is uniquely determined. On the contrary, for $p=2$ it is necessary to unambiguosly determine the square root's sign. This is given by (see Appendix) \begin{equation} \label{srs} (\omega^{(j-1) \odot q \odot q})^{\frac{1}{2}} = \prod_{\textstyle{n=0 \atop q_n \neq 0}}^{m-1} \! i^{(j-1)\odot 2^n \odot 2^n} \omega^{(j-1) \odot 2^n \odot (q \text{ mod } 2^n)}. \end{equation} With this in mind, the expression (\ref{ket_d}) satisfies the condition (\ref{mub}) of MUB, for $d$ any prime power, both even and odd (see Appendix for the proof). Notice that this does not happen in \cite{D05} in the even case. Hence, in the following we will make use of (\ref{ket_d}) without distinguishing the two cases. \section{The protocol} Moving from the protocol of \cite{LM05}, we consider Bob sending to Alice a qudit state randomly chosen from the set $\{\ket{v_{t}^{k}}\}^{k = 1, \ldots, d}_{t = 0, \ldots, d-1}$ of MUB. Then, whatever is the state, Alice has to encode a symbol belonging to a $d$-ary alphabet $A = \{0, \ldots, d-1\}$ in such a way that Bob will be able to unambiguously decode it (deterministic character of the protocol). The alphabet $A$ can be identified with the Galois field $G$. Moreover, let us consider the unitary transformations $V_0^a$ for $a \in A$, defined by \begin{equation} V^a_0\, \ket{v^0_t} = \omega^{t \odot a} \ket{v^0_t}, \end{equation} which can be regarded as the generalized Pauli $Z$ operators. Then, Alice encoding operation will be the shift operation realized by the operator $V^a_0$ with $a\in A$ on all the MUB but the computational one, that is for $k > 0$: \begin{equation} V^a_0\, \ket{v^k_t} = {1 \over \sqrt d} \sum_{q=0}^{d-1} \omega^{\ominus q \odot (t \ominus a)} (\omega^{(k-1) \odot q \odot q})^{\frac{1}{2}} \ket{v^0_q} = \ket{v^k_{t \ominus a}}. \end{equation} In such a case, Bob receiving back the state $\ket{v^k_{t \ominus a}}$ can unambiguously determine $a$ by means of a projective measurement onto the $k$-th basis. In fact, he will get the value \begin{equation} \label{bval} b = t \ominus a \end{equation} from which, knowing $t$, he can extract $a$. Then, the protocol runs as follows: \begin{itemize} \item[1.] Bob randomly prepares one of the $d^2$ qudit states $\ket{v_{t}^{k}}$, with $k = 1, \ldots, d$ and $t = 0, \ldots, d-1$, and sends it to Alice. \item[2.] Alice, upon receiving the qudit state has two options. \begin{itemize} \item[a)] With probability $c \neq 0$, she performs a measurement by projecting over a randomly chosen basis among the $d$ bases with $k = 1, \ldots, d$ (\textit{Control Mode}). She then sends back to Bob the resulting state. \item[b)] With probability $1- c$, she encodes a symbol $a \in A$ by applying the unitary operator $V_0^{a}$ (\textit{Message Mode}). She then sends back to Bob the resulting state. \end{itemize} \item[3.] Bob, upon receiving back the qudit state, performs a measurement by projecting over the basis to which the qudit state initially belonged. \item[4.] At the end of the transmission, Alice publicly declares on which runs she performed the control mode and on which others the message mode. In the first case, Alice announces the bases over which she measured. Then, by public discussion, a comparison of Alice's and Bob's measurements results is performed over coincident bases. In the ideal case (noiseless channels and no eavesdropping) their results must coincide. In the message mode runs, Bob gets the encoded symbol $a$ as discussed above. \end{itemize} Notice at the above point 2. the deterministic character of the protocol given by the possibility for Alice, besides to decide when to encode, to determine the message (key) sequence, since she knows that Bob will unambigously decode each character of the message (key). \section{Security of the protocol} Among individual attacks the most elementary one is the \emph{Intercept-Resend}. Suppose Eve, to learn Alice's operation, performs projective measurements on both paths of the traveling qudit, randomly choosing the measuring basis. She will steal the whole information for each message mode run, indipedently from the chosen basis. However, in each control mode run with coincident bases for Alice and Bob, she can guess the correct basis with probability $1/d$, and in this case she is not detected at all. If otherwise Eve chooses the wrong basis, she still has a probability $1/d$ to evade detection on the forward path and probability $1/d$ on the backward path, leading to an overall probability $1/d^{2}$ to remain undetected. This means that the double test of Alice and Bob reveals Eve with probability $(d^2-1)(d-1)/d^4$, including the cases of non-coincident bases. We are going to prove the security of the protocol against a more powerful individual attack. Quite generally, in individual attacks Eve lets the carrier of information interact with an ancilla system she has prepared and then try to gain information by measuring the ancilla. In this protocol she has to do that two times, in the forward path (to gain information about the state Bob sends to Alice) and in the backward path (to gain information about the state Alice sends back to Bob, hence about Alice's transformation). Moreover, by using the same ancilla in the forward and backward path, Eve could benefit from quantum interference effects (see Fig.~\ref{protocol_scheme}). In particular, we consider the unitary transformation describing the attack as controlled shifts $\{V^l_0\}_{l\in A}$, where the controller is the traveling qudit, while the target is in the Eve's hands. That is, $C\{V^l_0\}_{l\in A}:\mathcal{H}_{d}\otimes\mathcal{H}_{d} \to\mathcal{H}_{d}\otimes\mathcal{H}_{d}$ defined as follows: \begin{equation} \label{CV^a_0} \ket{v^1_{t_1}} \ket{v^1_{t_2}}\longarrow{C\{V^l_0\}_{l\in A}} \ket{v^1_{t_1}} V^{l=t_1}_0 \ket{v^1_{t_2}} = \ket{v^1_{t_1}} \ket{v^1_{{t_2} \ominus {t_1}}}. \end{equation} We remark that, in this definition, the controller as well as the target states are considered in the dual basis for the sake of simplicity. Other choices (except the computational basis) will give the same final results. Then, we consider Eve intervening in the forward path with $(C\{V^l_0\}_{l\in A})^{-1}$, defined by \begin{equation} \ket{v^1_{t_1}}\ket{v^1_{t_2}}\longarrow{(C\{V^l_0\}_{l\in A})^{-1}} \ket{v^1_{t_1}} V^{\ominus {t_1}}_0 \ket{v^1_{t_2}} = \ket{v^1_{t_1}} \ket{v^1_{{t_2} \ominus ({\ominus t_1})}} = \ket{v^1_{t_1}} \ket{v^1_{{t_2} \oplus {t_1}}}, \end{equation} and with $C\{V^l_0\}_{l\in A}$ in the backward path. \begin{figure} \caption{The scheme summarizing our protocol. Labels $\mathcal{B} \label{protocol_scheme} \end{figure} \subsection{Message Mode} Now, let us analyze in detail the transformations of the quantum states on an entire message mode run. \noindent {\it Attack on the forward path.} The initial Bob state is one of the $d^{2}$ states $\ket{v_{t}^{k}}$, with $k = 1, \ldots, d$ and $t = 0, \ldots, d-1$. Then, Eve initially prepares the ancilla state $\ket{v_{0}^{1}}_{\mathcal E}$ in the dual basis and performs the controlled operation. Hence, we get \begin{equation} \label{1attack} \ket{v^k_t}_{\mathcal B} \ket{v^1_0}_{\mathcal E} \longarrow{(C\{V^l_0\}_{l\in A})^{-1}} \sum_{h=0}^{d-1} \bracket{v^1_h}{v^k_t} \ket{v^1_h}_{\mathcal B} \ket{v^1_0}_{\mathcal E} = \sum_{h=0}^{d-1} \bracket{v^1_h}{v^k_t} \ket{v^1_h}_{\mathcal B} \ket{v^1_h}_{\mathcal E}. \end{equation} \noindent {\it Encoding.} The Bob's qudit state undergoes the shift $V_0^a$ with $a \in A$, then from (\ref{1attack}) we get \begin{equation} \label{cod} \arrow{V^a_0} \ \sum_{h=0}^{d-1} \bracket{v^1_h}{v^k_t} \ket{v^1_{h \ominus a}}_{\mathcal B} \ket{v^1_h}_{\mathcal E}. \end{equation} \noindent {\it Attack on the backward path.} The state (\ref{cod}) undergoes a $C\{V^l_0\}_{l\in A}$ operation, hence we have \begin{equation} \label{2attack} \longarrow{C\{V^l_0\}_{l\in A}} \ \sum_{h=0}^{d-1} \bracket{v^1_h}{v^k_t} \ket{v^1_{h \ominus a}}_{\mathcal B} \ket{v^1_{h \ominus (h \ominus a)}}_{\mathcal E} = \sum_{h=0}^{d-1} \bracket{v^1_h}{v^k_t} \ket{v^1_{h \ominus a}}_{\mathcal B} \ket{v^1_a}_{\mathcal E} = \ket{v^k_{t \ominus a}}_{\mathcal B} \ket{v^1_a}_{\mathcal E}. \end{equation} Then, Eve measures her ancilla system by projecting in the dual basis, according to the chosen initial ancilla state. We notice that the controlled operations performed by Eve, as well as her final measurement, left unchanged Bob's qudit state. Hence, Bob's measurement by projection in the $k$-th basis to which the initial state belonged, always allows him to obtain the symbol $a$ Alice has encoded [see (\ref{bval})]. On the other hand, Eve gets $\ket{v^1_a}$ with probability 1 as the result of her measurement. Therefore, she is able to exactly determine the encoded symbol $a$ as well and she steals the whole information, quantified in bits, \begin{equation} \label{I_E} I_\mathcal{E} = \log_2{d} \end{equation} on each message mode run. \subsection{Control Mode} We would like to evaluate the probability $P_{\mathcal E}$ Alice and Bob have to reveal Eve on each control mode run. Alice and Bob only compare the results of their measurements when, by public discussion, they agree on the used basis. Let us focus on the case Alice and Bob use the same basis $k$, keeping in mind that it happens with probability $1/d$. The situation is different for $k=1$ and $k \neq 1$, due to the Eve's choice of using the dual basis for her ancilla. \begin{itemize} \item[1)] For $k=1$, on the forward path we have \begin{equation} \ket{v_t^{1}}_{\mathcal B}\ket{v^1_0}_{\mathcal E} \longarrow{(C\{V^l_0\}_{l\in A})^{-1}} \ket{v^1_t}_{\mathcal B} \ket{v^1_t}_{\mathcal E}. \end{equation} Alice, measuring in the dual basis, gets ${\begin{array}r t}$ with probability 1 and projects into $\ket{\begin{array}r t}_{\mathcal B}$. On the backward path we have \begin{equation} \ket{v^1_t}_{\mathcal B} \ket{v^1_t}_{\mathcal E} \; \longarrow{C\{V^l_0\}_{l\in A}} \; \ket{v^1_t}_{\mathcal B} \ket{v^1_{t \ominus t}}_{\mathcal E} = \ket{v^1_t}_{\mathcal B} \ket{v^1_0}_{\mathcal E}. \end{equation} Bob, in turn, by measuring in the dual basis gets $t$ with probability 1. Thus, Alice and Bob have perfect correlation and $P_{\mathcal E}=0$. \item[2)] For $k = 2, \ldots, d$, we get on the forward path \begin{equation} \label{CM_2)1} \ket{v^k_t}_{\mathcal B} \ket{v^1_0}_{\mathcal E} = \sum_{h=0}^{d-1} \bracket{v^1_h}{v^k_t} \ket{v^1_h}_{\mathcal B} \ket{v^1_0}_{\mathcal E} \longarrow{(C\{V^l_0\}_{l\in A})^{-1}} \sum_{h=0}^{d-1} \bracket{v^1_h}{v^k_t} \ket{v^1_h}_{\mathcal B} \ket{v^1_h}_{\mathcal E}. \end{equation} By expressing the vectors of the dual basis in terms of the basis $k$ used by Bob, we rewrite the right hand side of (\ref{CM_2)1}) as \begin{equation} \label{CM_2)k} \sum_{h=0}^{d-1} \bracket{v^1_h}{v^k_t} \sum_{s=0}^{d-1} \bracket{v^k_s}{v^1_h} \ket{v^k_s}_{\mathcal B} \ket{v^1_h}_{\mathcal E}. \end{equation} At this point Alice measures in the basis $k$. The result of her measurement is to project into $\ket{v^k_{t'}}$, whatever $t' \in A$ is, with probability \begin{equation} \sum_{h=0}^{d-1} |\bracket{v^1_h}{v^k_t} \bracket{v^k_{t'}}{v^1_h}|^2 = \sum_{h=0}^{d-1} |\bracket{v^1_h}{v^k_t}|^2 |\bracket{v^k_{t'}}{v^1_h}|^2 = \sum_{h=0}^{d-1} {1 \over d^2} = {1 \over d} \end{equation} according to definition of MUB. Among the $d$ possibilities we distinguish two cases. \begin{itemize} \item [a)] $t' = t$, occurring with probability $1/d$, for which the resulting state from (\ref{CM_2)k}) is \begin{equation} \label{result_a} \sqrt d \sum_{h=0}^{d-1} \bracket{v^1_h}{v^k_t} \bracket{v^k_t}{v^1_h} \ket{v^k_t}_{\mathcal B} \ket{v^1_h}_{\mathcal E} = \frac{1}{\sqrt d} \sum_{h=0}^{d-1} \ket{v^k_t}_{\mathcal B} \ket{v^1_h}_{\mathcal E}. \end{equation} We have now to apply the $C\{V^l_0\}_{l\in A}$ operation of the backward path. Thus, (\ref{result_a}) transforms as follows \begin{equation} \ket{v^k_t}_{\mathcal B} {1 \over {\sqrt d}} \sum_{h=0}^{d-1} \ket{v^1_h}_{\mathcal E} = \sum_{h'=0}^{d-1} \bracket{v^1_{h'}}{v^k_t} \ket{v^1_{h'}}_{\mathcal B} {1 \over {\sqrt d}} \sum_{h=0}^{d-1} \ket{v^1_h}_{\mathcal E} \end{equation} \begin{equation} \longarrow{C\{V^l_0\}_{l\in A}} \sum_{h'=0}^{d-1} \bracket{v^1_{h'}}{v^k_t} \ket{v^1_{h'}}_{\mathcal B} {1 \over {\sqrt d}} \sum_{h=0}^{d-1} \ket{v^1_{h \ominus h'}}_{\mathcal E} = \ket{v^k_t}_{\mathcal B} {1 \over {\sqrt d}} \sum_{r=0}^{d-1} \ket{v^1_r}_{\mathcal E}, \end{equation} where $r = h \ominus h'$. It results that Eve's attack does not alter the eigenvector $\ket{v_t^k}_{\mathcal B}$. Hence, Bob upon his measurement will get $t$ with probability 1. Then, neither Alice nor Bob outwit Eve's attacks. \item [b)] $t' \neq t$, occurring with probability $(d-1)/d$, for which Alice, getting a state different from the one initially sent by Bob, outwits Eve in the forward path. Hence, in this case, we do not need to explicitly evaluate the state change in the backward path. \end{itemize} \end{itemize} In summary, from the analyzed cases, we have: \begin{itemize} \item $1/d$ the probability with which Bob and Alice measure in the same basis $k$; \item $(d-1)/d$ the probability of Bob choosing the initial state $\ket{v_t^k}$ from any basis but the dual one, that is $k \neq 1$; \item $(d-1)/d$ the probability that the state $\ket{v_t^k}$ sent by Bob gives a measurement result $\ket{v_{t'}^k}$ with $t' \neq t$ to Alice. \end{itemize} We then conclude that the probability for Alice and Bob to outwit Eve on each control mode run is \begin{equation} \label{P_E} P_{\mathcal E}= \frac{1}{d} \cdot \frac{d-1}{d} \cdot \frac{d-1}{d}= {\frac{(d-1)^{2}}{d^{3}}} \, . \end{equation} In Fig.~\ref{graf_Pe(new)} we show the behavior of $P_{\mathcal E}$ versus the order $d$ of the alphabet. Interestingly enough, the values of $P_{\mathcal E}$ at $d=3,4,5$ are higher than that at $d=2$. In particular, $P_{\mathcal E}$ has a maximum at $d=3$ showing that this dimension represents the optimal compromise between two different trends. On the one hand, the probability $(d-1)^2/d^2$ of revealing Eve in each successful control mode run (that is when the bases of Alice and Bob coincide) increases towards 1 when increasing the dimension $d$. On the other hand, the efficiency of the whole control process decreases according to the probability $1/d$ for each control mode run to succeed. \begin{figure} \caption{The probability $P_{\mathcal E} \label{graf_Pe(new)} \end{figure} \section{Concluding remarks} We have proposed a deterministic cryptographic protocol working with a $d$-ary alphabet and exploiting a bidirectional quantum channel. When considering an attack performed by means of controlled operations on both directions of the quantum channel, we have found that Eve can steal the total amount of information $I_{\mathcal E}$ (see (\ref{I_E})), while the probability $P_{\mathcal E}$ to outwit her presents a maximum for $d=3$ (see (\ref{P_E})). Contrarily to probabilistic protocols, the deterministic nature of this protocol also allows the realization of Quantum Direct Communication (QDC) between legitimate users \cite{BEKW02, BF02, CL04, LM05}. In this case Alice and Bob (after authentication) can communicate directly the meaningful message without encryption. However, for this kind of communication only an asymptotic security can be proven. In fact, if we assume that Eve wants to perform her attack on each message mode run, without having been detected in the previous control mode runs, then the probability is given by following geometric series: \begin{equation} (1-c) + c(1-P_{\mathcal E})(1-c) + c^{2}(1-P_{\mathcal E})^{2}(1-c) + \ldots =\frac{1-c}{1-c(1-P_{\mathcal E})}. \end{equation} Thus, being $I_{\mathcal E}$ the quantity of information that Eve eavesdrops in a single attack, the probability that she successfully eavesdrops an amount of information $I$ is \begin{equation} \label{fmla_QDC} \left(\frac{1-c}{1-c\big(1-P_{\mathcal E}\big)}\right) ^{\!I/I_{\mathcal E}}, \end{equation} with $I_{\mathcal E}$ and $P_{\mathcal E}$ given in (\ref{I_E}) and (\ref{P_E}) respectively. We observe that such a probability exponentially decreases towards 0 as a function of $I$ for each given dimension $d$. So, (\ref{fmla_QDC}) expresses the asymptotic security of the direct communication use of the protocol. However, in this case the probability for Alice and Bob to detect Eve before she can eavesdrop a fixed amount of information, that is the complement of probability in (\ref{fmla_QDC}), is maximal for $d=2$. It is interesting to notice that the optimal dimension depends on the specific task of the protocol (QKD or QDC). Therefore, we believe that this work might open up new horizons for deterministic cryptographic protocols involving finite dimensional systems. \appendix \section{} By referring to \cite{D05}, let us denote by $V^{j}_{l}$ the operators given by the composition of the shifts in the computational and the dual basis, that is \begin{equation} V^{j}_{l} = V^{j}_{0} \cdot V^{0}_{l} = \sum_{t=0}^{d-1}\omega^{(t\oplus l)\odot j} \ket{t \oplus l}\bra{t}. \end{equation} This set of operators coincides with the Generalized Pauli Group (see \cite{BBRV01}). The $V^{j}_{l}$'s are $d^2$ unitary transformations which satify the following composition law \begin{equation} \label{prV} V^{j}_{l} \cdot V^{j'}_{l'} = \omega^{(l \odot j')} V^{j \oplus j'}_{l \oplus l'}, \end{equation} and, up to phases, they form $d+1$ commuting subgroups of $d$ elements that have only the identity in common. The $k$-th subgroup, with $k=0, \ldots, d$, admits $\{\ket{v_t^k}\}_{t=0,\ldots, d-1}$ as diagonalizing basis. Its elements are denoted by $U^{k}_{l}$ with $l=0, \ldots, d-1$, and they are required to satisfy: \begin{equation} \label{prU} U^{k}_{l \oplus l'} = U^{k}_{l} \cdot U^{k}_{l'} \, , \end{equation} \begin{equation} \label{diag} U^{k}_{l} = \sum_{t=0}^{d-1}\omega^{t\odot l}\ket{v_t^k}\bra{v_t^k} \, , \end{equation} \begin{equation} \label{ph} U^{k}_{l} = V^{(k-1)\odot l}_{l} \quad \text{up to a phase which is 1 for $l=0$}. \end{equation} It is important to point out that (\ref{prV}), (\ref{prU}), (\ref{diag}) and (\ref{ph}) must be guaranteed at the same time. In \cite{D05}, the following relation is obtained from them: \begin{equation} \label{DetSgn} U^{k}_{l} = (\omega^{\ominus (k-1)\odot l \odot l})^{\frac{1}{2}} \, V^{(k-1)\odot l}_{l} \end{equation} In the odd prime power case such expression is completely determined and the phase is a $p$-th root of unity. In fact the square root can be interpreted as the division of the exponent by 2 in the Galois field $G$. This is no longer true in the even prime power case. In this case the phase is not a $2$-nd root of unity but a $4$-th root of unity, that is it can also assume the values $\pm i$, other than $\pm 1$. Moreover, the sign of it is still undetermined. The determination of such sign provided in \cite{D05} is uncorrect. Below we correctly develop the last step of (32) in \cite{D05} getting the right sign, and consequently the square root's sign in (\ref{ket_d}), as indicated in (\ref{srs}). First of all, we observe that for $p=2$ we have $\omega=-1$. In \cite{D05} it has been implicitly chosen the determination of the square root of $\omega^{(k-1) \odot 2^n \odot 2^n}$ as to be $i^{(k-1)\odot 2^n \odot 2^n}$. Then, we have: \begin{equation}a U^k_l & \!\!\! = \!\!\! & \prod^{m-1}_{n=0} U^k_{l_n \odot 2^n} = \prod^{m-1}_{n=0} {(U^k_{2^n})}^{l_n} \nonumber \\ & \!\!\! = \!\!\! & \prod^{m-1}_{\textstyle{n=0 \atop l_n \neq 0}} \! {(\omega^{(k-1) \odot 2^n \odot 2^n})}^{\frac{1}{2}} (V^{(k-1) \odot 2^n}_{2^n}) \nonumber \\ & \!\!\! = \!\!\! & \left( \prod^{m-1}_{\vrule height9pt width0pt \smash{\textstyle{n=0 \atop l_n \neq 0}}} \! i^{(k-1) \odot 2^n \odot 2^n} \! \right) \! \! \left( \prod^{m-1}_{\vrule height9pt width0pt \smash{\textstyle{n=0 \atop l_n \neq 0}}} \! V^{(k-1) \odot 2^n}_{2^n} \! \right). \end{equation}a Let $n_0, n_1, \ldots n_h$ be the indices $n_j$ such that $l_{n_j}=1$. By taking into account (\ref{prV}), the second product can be rewritten as follows. \begin{equation}a \prod^{m-1}_{\textstyle{n=0 \atop l_n \neq 0}} (V^{(k-1) \odot 2^n}_{2^n}) & \!\!\! = \!\!\! & \prod^{h}_{j=0} V^{(k-1) \odot 2^{n_j}}_{2^{n_j}} \nonumber \\[-6pt] & \!\!\! = \!\!\! & \left( \prod^{h}_{j=1} \omega^{(k-1) \odot (2^{n_0} \oplus 2^{n_1} \oplus \ldots \oplus 2^{n_{j-1}}) \odot 2^{n_j}} \! \right) V^{(k-1) \odot (2^{n_0} \oplus \ldots \oplus 2^{n_{h}})} _{(2^{n_0} \oplus \ldots \oplus 2^{n_{h}})} \nonumber \\[2pt] & \!\!\! = \!\!\! & \left( \prod^{h}_{j=0} \omega^{(k-1) \odot 2^{n_j} \odot (l \text{ mod } 2^{n_j})} \! \right) V^{(k-1) \odot l}_{l} \nonumber \\[2pt] & \!\!\! = \!\!\! & \left( \prod^{m-1}_{\vrule height9pt width0pt \smash{\textstyle{n=0 \atop l_n \neq 0}}} \omega^{(k-1) \odot 2^{n} \odot (l \text{ mod } 2^{n})} \! \right) V^{(k-1) \odot l}_{l}. \end{equation}a Then, we have: \begin{equation} U^k_l = \left( \prod^{m-1}_{\vrule height9pt width0pt \smash{\textstyle{n=0 \atop l_n \neq 0}}} \! i^{(k-1) \odot 2^n \odot 2^n} \omega^{(k-1) \odot 2^{n} \odot (l \text{ mod } 2^{n})} \! \right) V^{(k-1) \odot l}_{l}. \end{equation} This gives the correct determination of square root's sign in the phase as in (\ref{srs}), which can be rewritten as \begin{equation} \label{srs2} \prod_{\vrule height9pt width0pt \smash{\textstyle{n=0 \atop l_n \neq 0}}}^{m-1} \! i^{(k-1)\odot 2^n \odot 2^n} \omega^{(k-1) \odot 2^n \odot (l \text{ mod } 2^n)}= \prod_{n=0}^{m-1} (-1)^{\sum_{h=0}^{n-1}l_n l_h (k-1) \odot 2^n \odot 2^h} i^{l_n(k-1)\odot 2^n \odot 2^n}. \end{equation} Now, by referring to (\ref{OperExp_G}), we remark that an analogous property does not hold for powers of $i$ with exponents in $G$. The reader can easily check that \begin{equation} \label{*} i^j \cdot i^l = (-1)^{j l} i^{j \oplus l} = (-1)^ {j_0 l_0} i^{j \oplus l}. \end{equation} From ($\ref{*}$) it follows that \begin{equation} \label{**} (\omega^{(k-1) \odot l \odot l})^{1/2} (\omega^{(k'-1) \odot l \odot l})^{1/2} = \phi(k,k',l) \, (\omega^{((k-1) \oplus (k'-1)) \odot l \odot l})^{\frac{1}{2}}, \end{equation} where we have defined \begin{equation} \phi(k,k',l) = (-1)^{\sum_{n=0}^{m-1}l_n((k-1)\odot 2^n \odot 2^n)((k'-1)\odot 2^n \odot 2^n)}. \end{equation} In fact, by using (\ref{srs2}) and (\ref{*}), \begin{equation}a & & \kern-75pt (\omega^{(k-1) \odot l \odot l})^{\frac{1}{2}} (\omega^{(k'-1) \odot l \odot l})^{\frac{1}{2}} \nonumber \\[2pt] & & \kern-75pt \kern20pt {} = \, \prod_{n=0}^{m-1} (-1)^{\sum_{h=0}^{n-1}l_nl_h (k-1) \odot 2^n \odot 2^h} i^{l_n(k-1)\odot 2^n \odot 2^n} \nonumber \\[-6pt] & & \kern-75pt \kern60pt {} \times (-1)^{\sum_{h=0}^{n-1}l_nl_h (k'-1) \odot 2^n \odot 2^h} i^{l_n(k'-1)\odot 2^n \odot 2^n} \nonumber \end{equation}a \begin{equation}a & & \kern20pt {} = \, \prod_{n=0}^{m-1} (-1)^{\sum_{h=0}^{n-1}l_n l_h ((k-1)\oplus(k'-1)) \odot 2^n \odot 2^h} \nonumber \\[-6pt] & & \kern60pt {} \times (-1)^{(l_n(k-1)\odot 2^n \odot 2^n)(l_n(k'-1)\odot 2^n \odot 2^n)} \, i^{l_n((k-1) \oplus(k'-1))\odot 2^n \odot 2^n} \nonumber \\[8pt] & & \kern20pt {} = \, \phi(k,k',l) \, (\omega^{((k-1) \oplus (k'-1)) \odot l \odot l})^{\frac{1}{2}}. \end{equation}a \noindent In particular, by assuming $k' = k$ in (\ref{**}), we get the conjugate of $(\omega^{(k-1) \odot q \odot q})^{\frac{1}{2}}$ as \begin{equation} \phi(k,k,q) \, (\omega^{(k-1) \odot q \odot q})^{\frac{1}{2}}. \end{equation} Consequently, the correct expression for the inner products $\bracket{v_{t'}^{k'}}{v_t^k}$ with $k,k' \geq 1$ is the following (which does not coincide with (28) in \cite{D05}): \begin{equation}a \label{InnerProd} \bracket{v_{t'}^{k'}}{v_t^k} & \!\!\! = \!\!\! & {1 \over d} \sum_{q=0}^{d-1} \omega^{q \odot t} (\omega^{(k-1) \odot q \odot q})^{\frac{1}{2}} \, \omega^{q \odot t'} \phi(k',k',q) \, (\omega^{(k'-1) \odot q \odot q})^{\frac{1}{2}} \nonumber \\ & \!\!\! = \!\!\! & {1 \over d} \sum_{q=0}^{d-1} \phi(k,k',q) \, \phi(k',k',q) \, \omega^{q \odot (t \oplus t')} (\omega^{((k-1)\oplus (k'-1)) \odot q \odot q})^{\frac{1}{2}}. \end{equation}a In order to prove the MUB condition, we state the following elementary properties of the function $\phi$: \begin{equation}a \phi(k,k',0) & \!\!\! = \!\!\! & 1 \label{prop1}\\ \phi(k',k,q) & \!\!\! = \!\!\! & \phi(k,k',q) \label{prop2}\\ \phi(k,k',q) \, \phi(k,k',q') & \!\!\! = \!\!\! & \phi(k,k',q\oplus q') \label{prop3}\\ \phi(k,k,q) & \!\!\! = \!\!\! & \omega^{(k-1) \odot q \odot q} \label{prop4} \end{equation}a The first and the second one come from the very definition of $\phi$, the third one comes from the fact that $q_n + q'_n \text{ mod } 2 = (q \oplus q')_n$ and the fourth one from (\ref{**}) for $k'=k$. We also need to verify that the following equality, corresponding to (37) in \cite{D05}, \begin{equation} \label{37Durt} (\omega^{(k-1) \odot q \odot q})^{\frac{1}{2}} (\omega^{(k-1) \odot q' \odot q'})^{\frac{1}{2}} = \omega^{(k-1) \odot q \odot q'} (\omega^{(k-1) \odot (q \oplus q') \odot (q \oplus q')})^{\frac{1}{2}} \end{equation} holds with the correct determination of square root's sign given by (\ref{srs2}) (this does not happen with wrong determination of the sign given in \cite{D05}). Let us consider the left hand side. It turns out to be \begin{equation}a & & \kern-10pt (\omega^{(k-1) \odot q \odot q})^{\frac{1}{2}} (\omega^{(k-1) \odot q' \odot q'})^{\frac{1}{2}} \nonumber \\ & & \kern-5pt {} = \, \prod_{n=0}^{m-1} (-1)^{\sum_{h=0}^{n-1} q_n q_h (k-1) \odot 2^n \odot 2^h} (-1)^{\sum_{h=0}^{n-1}q'_n q'_h (k-1) \odot 2^n \odot 2^h} i^{q_n (k-1) \odot 2^n \odot 2^n} i^{q'_n (k-1) \odot 2^n \odot 2^n} \nonumber \\ & & \kern-5pt {} = \, \prod_{n=0}^{m-1} (-1)^{\sum_{h=0}^{n-1} (q_n q_h + q'_n q'_h ) (k-1) \odot 2^n \odot 2^h} (-1)^{q_n q'_n (k-1) \odot 2^n \odot 2^n} i^{(q \oplus q')_n (k-1) \odot 2^n \odot 2^n} \nonumber \\[4pt] & & \kern-5pt {} = \, (-1)^{ \left( \sum_{n=0}^{m-1}q_n q'_n (k-1) \odot 2^n \odot 2^n \right) + \left( \sum_{n=0}^{m-1}\sum_{h=0}^{n-1} (q_n q_h + q'_n q'_h ) (k-1) \odot 2^n \odot 2^h \right)} \nonumber \\[-4pt] & & \kern-5pt \kern220pt {} \times \prod_{n=0}^{m-1} \! i^{(q \oplus q')_n (k-1) \odot 2^n \odot 2^n}. \end{equation}a For the right hand side, we have: \begin{equation}a & & \omega^{(k-1) \odot q \odot q'} (\omega^{(k-1) \odot (q \oplus q') \odot (q \oplus q')})^{\frac{1}{2}} \nonumber \\[4pt] & & \kern20pt {} = \, (-1)^{(k-1) \odot (\sum_{n=0}^{m-1} q_n 2^n) \odot (\sum_{n=0}^{m-1} q'_n 2^n)} \nonumber \\[-2pt] & & \kern60pt {} \times \prod_{n=0}^{m-1} (-1)^{\sum_{h=0}^{n-1}(q \oplus q')_n (q \oplus q')_h (k-1) \odot 2^n \odot 2^h} i^{(q \oplus q')_n (k-1)\odot 2^n \odot 2^n} \nonumber \\[4pt] & & \kern20pt {} = \, (-1)^{ \left(\sum_{n=0}^{m-1}\sum_{h=0}^{m-1} q_n q'_h (k-1) \odot 2^n \odot 2^h \right) + \left(\sum_{n=0}^{m-1}\sum_{h=0}^{n-1} (q \oplus q')_n (q \oplus q')_h (k-1) \odot 2^n \odot 2^h \right)} \nonumber \\[-2pt] & & \kern60pt {} \times \prod_{n=0}^{m-1} i^{(q \oplus q')_n (k-1)\odot 2^n \odot 2^n}. \end{equation}a At this point, (\ref{37Durt}) derives from the following sequence of equalities mod $2$: \begin{equation}a & & \kern-20pt \left( \sum_{n=0}^{m-1}\sum_{h=0}^{m-1} q_n q'_h 2^n \odot 2^h \! \right) + \left( \sum_{n=0}^{m-1}\sum_{h=0}^{n-1} (q \oplus q')_n (q \oplus q')_h 2^n \odot 2^h \! \right) \nonumber \\ & & {} = \, \left( \sum_{n=0}^{m-1}\sum_{h=0}^{m-1} q_n q'_h 2^n \odot 2^h \! \right) + \left( \sum_{n=0}^{m-1}\sum_{h=0}^{n-1} (q_n + q'_n) (q_h + q'_h) 2^n \odot 2^h \! \right) \nonumber \\ & & {} = \, \left( \sum_{n=0}^{m-1} q_n q'_n 2^n \odot 2^n \! \right) + \left( \sum_{n=0}^{m-1}\sum_{h=0}^{n-1} (q_n q_h + q'_n q'_h) 2^n \odot 2^h \! \right). \end{equation}a Finally, we can prove the MUB condition for even prime power. From (\ref{InnerProd}), by using in the order (\ref{prop2}), (\ref{prop3}), (\ref{37Durt}) and (\ref{prop4}), and then relabelling the sum indices, we have \begin{equation}a & & \kern-10pt \bracket{v_{t'}^{k'}}{v_t^k} \bracket{v_t^k}{v_{t'}^{k'}} \nonumber \\[4pt] & & \kern-10pt \kern20pt {} = \, {1 \over d^2} \sum_{q,q'=0}^{d-1} \phi(k,k',q) \, \phi(k',k',q) \, \phi(k,k',q') \, \phi(k,k,q') \nonumber \\[-4pt] & & \kern-10pt \kern80pt {} \times \omega^{q \odot (t \oplus t')} \omega^{q' \odot (t \oplus t')} (\omega^{((k-1)\oplus (k'-1)) \odot q \odot q})^{\frac{1}{2}} (\omega^{((k-1) \oplus (k'-1)) \odot q' \odot q'})^{\frac{1}{2}} \nonumber \\ & & \kern-10pt \kern20pt {} = \, {1 \over d^2} \sum_{q,q'=0}^{d-1} \phi(k,k',q \oplus q') \, \phi(k,k,q \oplus q') \, \phi(k,k,q) \, \phi(k',k',q) \nonumber \\[-4pt] & & \kern-10pt \kern80pt {} \times \omega^{(q \oplus q') \odot (t \oplus t')} \omega^{((k-1) \oplus (k'-1)) \odot q \odot q'} (\omega^{((k-1)\oplus (k'-1)) \odot (q \oplus q') \odot (q \oplus q')})^{\frac{1}{2}} \nonumber \\ & & \kern-10pt \kern20pt {} = \, {1 \over d^2} \sum_{q,h=0}^{d-1} \phi(k,k',h) \, \phi(k,k,h) \, \omega^{(k-1) \odot q \odot q} \omega^{(k'-1) \odot q \odot q} \nonumber \\[-4pt] & & \kern-10pt \kern80pt {} \times \omega^{h \odot (t \oplus t')} \omega^{((k-1) \oplus (k'-1)) \odot q \odot (q \oplus h)} (\omega^{((k-1)\oplus (k'-1)) \odot h \odot h})^{\frac{1}{2}}. \nonumber \end{equation}a \noindent Now, by collecting the terms without $q$ and then using (\ref{la4}), the previous expression can be rewritten as \begin{equation}a & & \kern-20pt {1 \over d^2} \sum_{h=0}^{d-1} \phi(k,k',h) \, \phi(k,k,h) \, \omega^{h \odot (t \oplus t')} (\omega^{((k-1)\oplus (k'-1)) \odot h \odot h})^{\frac{1}{2}} \sum_{q=0}^{d-1} \omega^{((k-1) \oplus (k'-1)) \odot q \odot h} \nonumber \end{equation}a \begin{equation}a & & {} = \, {1 \over d} \sum_{h=0}^{d-1} \phi(k,k',h) \, \phi(k,k,h) \, \omega^{h \odot (t \oplus t')} (\omega^{((k-1)\oplus (k'-1)) \odot h \odot h})^{\frac{1}{2}} \delta_{((k-1) \oplus (k'-1)) \odot h,0}. \nonumber \end{equation}a At this point we can conclude as follows, by separating the cases $k \neq k'$ and $k = k'$ and then using (\ref{prop1}), (\ref{prop3}) and (\ref{la4}). \begin{equation}a & & \kern-40pt {1 \over d} (1 - \delta_{k,k'}) \phi(k,k',0) \, \phi(k,k,0) + {1 \over d} \delta_{k,k'} \sum_{h=0}^{d-1} \phi(k,k,h) \, \phi(k,k,h) \, \omega^{h \odot (t \oplus t')} \nonumber \\ & & \kern-40pt \kern20pt {} = \, {1 \over d} (1 - \delta_{k,k'}) + {1 \over d} \delta_{k,k'} \sum_{h=0}^{d-1} \omega^{h \odot (t \oplus t')} = \, {1 \over d} (1 - \delta_{k,k'}) + \delta_{k,k'} \delta_{t,t'}. \end{equation}a This gives (\ref{mub}), q.e.d. \acknowledgments We are grateful to T. Durt for correspondence on the subject of MUB and to M. Lucamarini, R. Piergallini and C. Toffalori for interesting discussions and a careful reading of the ms. \end{document}
\begin{document} \title[Derived $A$-infinity algebras in an operadic context]{Derived $A$-infinity algebras in an operadic context} \author{Muriel Livernet} \address{Universit\'e Paris 13, Sorbonne Paris Cit\'e, LAGA, CNRS (UMR 7539), 99 avenue Jean-Baptiste Cl\'ement, F-93430 Villetaneuse, France} \email{[email protected]} \author{Constanze Roitzheim} \address{School of Mathematics, Statistics and Actuarial Science, University of Kent, Cornwallis, Canterbury, Kent, CT2 7NF, UK} \email{[email protected]} \author{Sarah Whitehouse} \address{School of Mathematics and Statistics, Hicks Building, University of Sheffield, S3 7RH, England} \email{[email protected]} \keyword{Operads} \keyword{A-infinity algebras} \keyword{Koszul duality} \subject{primary}{msc2000}{18D50} \subject{primary}{msc2000}{16E45} \subject{primary}{msc2000}{18G55} \subject{primary}{msc2000}{18D10} {\mathrm d}ate{\today} \begin{abstract} Derived $A$-infinity algebras were developed recently by Sagave. Their advantage over classical $A$-infinity algebras is that no projectivity assumptions are needed to study minimal models of differential graded algebras. We explain how derived $A$-infinity algebras can be viewed as algebras over an operad. More specifically, we describe how this operad arises as a resolution of the operad ${\mathrm d}As$ encoding bidgas, i.e. bicomplexes with an associative multiplication. This generalises the established result describing the operad $A_\infty$ as a resolution of the operad ${\mathcal A}s$ encoding associative algebras. We further show that Sagave's definition of morphisms agrees with the infinity-morphisms of $dA_\infty$-algebras arising from operadic machinery. We also study the operadic homology of derived $A$-infinity algebras. \end{abstract} \maketitle \setcounter{tocdepth}{2} \tableofcontents \section*{Introduction} Mathematical areas in which $A_\infty$-structures arise range from geometry, topology and representation theory to mathematical physics. One important application is to the study of differential graded algebras via $A_\infty$-structures on their homology algebras. This is the theory of minimal models established by Kadeishvili in the 1980s \cite{Kad79}. However, the results concerning minimal models all have rather restrictive projectivity assumptions. To bypass these projectivity assumptions, Sagave recently developed the notion of derived $A_\infty$-algebras~\linebreak \cite{Sag10}. Compared to classical $A_\infty$-algebras, derived $A_\infty$-algebras are equipped with an additional grading. Using this definition one can define projective resolutions that are compatible with $A_\infty$-structures. With these, Sagave established a notion of minimal models for differential graded algebras (dgas) whose homology is not necessarily projective. Sagave's descriptions of derived $A_\infty$-structures are largely formula-based. In this paper, we provide an alternative description of these structures using operads. It is not hard to write down an operad $dA_\infty$ that encodes derived $A_\infty$-structures, but we also explain the context into which this operad fits. The category we are going to work in is the category ${\rm BiCompl}_v$ of bicomplexes with no horizontal differential. We will start from an operad ${\mathrm d}As$ in this category encoding bidgas, that is, monoids in bicomplexes (see Definition~\ref{def:bidga}). Our main theorem shows that derived $A_\infty$-algebras are algebras over the operad $$dA_\infty=({\mathrm d}As)_\infty= \Omega((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}).$$ This means that the operad $dA_\infty$ is a minimal model of a well-known structure. We can summarize our main result and its relation to the classical case in the following table. \begin{center} \begin{tabular}{ccc} underlying category&operad $\mathcal{O}$&$\mathcal{O}$-algebra\\ \hline differential graded $\mathbf k$-modules&${\mathcal A}s$&dga\\ &$A_\infty$&$A_\infty$-algebra\\ \hline ${\rm BiCompl}_v$&${\mathrm d}As$&bidga\\ &$dA_\infty$&derived $A_\infty$-algebra\\ \hline \end{tabular} \end{center} We hope that this provides a useful way of thinking about derived $A_\infty$-structures. It should allow many operadic techniques to be applied to their study and we give two examples. Firstly, we note a simple consequence of the homotopy transfer theorem. Secondly we develop operadic homology of derived $A_\infty$-algebras and relate this to formality of dgas. This paper is organised as follows. We start by recalling some previous results in Section \ref{sec:review}. In the first part we summarise some definitions, conventions and results about derived $A_\infty$-algebras. The second part is concerned with classical $A_\infty$-algebras. We look at the operad ${\mathcal A}s$ encoding associative algebras and summarise how to obtain the operad $A_\infty$ as a resolution of ${\mathcal A}s$. In Section~\ref{sec:dAs} we generalise this to the operad ${\mathrm d}As$. More precisely, this operad lives in the category of bicomplexes with trivial horizontal differential. It encodes bidgas and can be described as the composition of the operad of dual numbers and ${\mathcal A}s$ using a distributive law. The main result of this section is computing its Koszul dual cooperad. Section~\ref{sec:dAinfty} contains our main result. We describe the operad $dA_\infty$ encoding derived $A_\infty$-algebras and show that it agrees with the cobar construction of the reduced Koszul dual cooperad of ${\mathrm d}As$. In Section~\ref{sec:infmorphims} we consider $\infty$-morphisms and show that they coincide with the derived $A_\infty$-morphisms defined by Sagave. We also give an immediate application of the operadic approach, by deducing the existence of a $dA_\infty$-algebra structure on the vertical homology of a bidga over a field from the homotopy transfer theorem. In Section~\ref{sec:HH}, we study the operadic homology of derived $A_\infty$-algebras. By comparing this to the previously defined Hochschild cohomology of~\cite{RoiWhi11}, we deduce a criterion for intrinsic formality of a dga. We conclude with a short section outlining some areas for future investigation. The second author was supported by EPSRC grant EP/G051348/1. \section{A review of known results}\label{sec:review} Throughout this paper let $\mathbf k$ denote a commutative ring unless stated otherwise. All operads considered are non-symmetric. \subsection{Derived \texorpdfstring{$A_\infty$}{A-infinity}-algebras} We are going to recall some basic definitions and results regarding derived $A_\infty$-algebras. This is just a brief recollection; we refer to~\cite{Sag10} and~\cite{RoiWhi11} for more details. We start by considering $(\mathbb{N},\mathbb{Z})$-bigraded $\mathbf k$-modules \[ A = \bigoplus\limits_{i \in \mathbb{N}, j \in \mathbb{Z}} A^j_i. \] The lower grading is called the \emph{horizontal degree} and the upper grading the \emph{vertical degree}. Note that the horizontal grading is homological whereas the vertical grading is cohomological. A morphism of bidegree $(u,v)$ is then a morphism of bigraded modules that lowers the horizontal degree by $u$ and raises the vertical degree by $v$. We are observing the \emph{Koszul sign rule}, that is \[ (f \otimes g)(x\otimes y) = (-1)^{pi+qj} f(x) \otimes g(y) \] if $g$ has bidegree $(p,q)$ and $x$ has bidegree $(i,j)$. Here we have adopted the grading conventions used in~\cite{RoiWhi11}. We can now say what a derived $A_\infty$-algebra is. \begin{defn}\cite{Sag10} A {\it derived $A_\infty$-structure} (or {\it $dA_\infty$-structure} for short) on an $(\mathbb{N}$,$\mathbb{Z})$-bigraded $\mathbf k$-module $A$ consists of $\mathbf k$-linear maps \[ m_{ij}: A^{\otimes j} \longrightarrow A \] of bidegree $(i,2-(i+j))$ for each $i \ge 0$, $j\ge 1$, satisfying the equations \begin{equation}\label{dobjectequation} \sum\limits_{\substack{ u=i+p, v=j+q-1 \\ j=1+r+t}} (-1)^{rq+t+pj} m_{ij} (1^{\otimes r} \otimes m_{pq} \otimes 1^{\otimes t}) = 0 \end{equation} for all $u\ge 0$ and $v \ge 1$. A {\it $dA_\infty$-algebra} is a bigraded $\mathbf k$-module together with a $dA_\infty$-structure. \end{defn} \begin{defn}\cite{Sag10}\label{def:morphism} A map of $dA_\infty$-algebras from $(A, m^A)$ to $(B, m^B)$ consists of a family of $\mathbf k$-module maps $f_{ij}: A^{\otimes j}\rightarrow B$ of bidegree $(i,1-i-j)$ with $i\geq 0, j\geq 1$, satisfying \begin{equation}\label{dmapequation} \sum\limits_{\substack{ u=i+p, v=j+q-1 \\ j=1+r+t}} (-1)^{rq+t+pj} f_{ij} (1^{\otimes r} \otimes m^A_{pq} \otimes 1^{\otimes t}) = \\ \sum\limits_{\substack{ u=i+p_1+\cdots+p_j,\\ v=q_1+\cdots+q_j }} (-1)^{\sigma} m^{B}_{ij}(f_{p_1q_1}\otimes\cdots\otimes f_{p_jq_j}), \end{equation} with $$ \sigma=u+\sum\limits_{k=1}^{j-1}(p_k+q_k)(j+k)+q_k(\sum\limits_{s=k+1}^j p_s+q_s). $$ \end{defn} Sagave does not define composition of maps of $dA_\infty$-algebras directly in terms of this definition. Instead this is done via a certain reformulation as maps on the reduced tensor algebra; see~\cite[4.5]{Sag10}. It follows that $dA_\infty$-algebras form a category. Examples of $dA_\infty$-algebras include classical $A_\infty$-algebras, which are derived $A_\infty$-algebras concentrated in horizontal degree 0. Other examples are bicomplexes and bidgas, in the sense of the following definition. \begin{defn} \label{def:bidga} A \emph{bidga} is a derived $A_\infty$-algebra with $m_{ij}=0$ for $i+j \geq 3$. A \emph{morphism of bidgas} is a morphism of derived $A_\infty$-algebras $f_{ij}$ with $f_{ij}=0$ for $i+j\geq 2$. \end{defn} Sagave notes that this is equivalent to saying that a bidga is a monoid in the category of bicomplexes. For derived $A_\infty$-algebras, the analogue of a quasi-isomorphism is called an $E_2$-equivalence. To explain this, we need to discuss twisted chain complexes. The terminology \emph{multicomplex} is also used for a twisted chain complex. \begin{defn}\label{def:twistedchaincx} A \emph{twisted chain complex} $C$ is an $({\mathbb N},{\mathbb Z})$-bigraded $\mathbf k$-module with differentials $d_i^C:C\longrightarrow C$ of bidegree $(i, 1-i)$ for $i\geq 0$ satisfying $$\sum_{i+p=u} (-1)^{i}d_i^Cd_p^C=0$$ for $u\geq 0$. A \emph{map of twisted chain complexes} $C\longrightarrow D$ is a family of maps $f_i:C\longrightarrow D$ of bidegree $(i,-i)$ satisfying $$ \sum_{i+p=u} (-1)^{i} f_id^C_p = \sum_{i+p=u} d^D_i f_p. $$ The composition of maps $f:E\to F$ and $g: F\to G$ is defined by $(gf)_u=\sum_{i+p=u}g_if_p$ and the resulting category is denoted ${\rm{tCh}}_k$. \end{defn} A derived $A_\infty$-algebra has an underlying twisted chain complex, specified by the maps $m_{i1}$ for $i\geq 0$. If $f: C\longrightarrow D$ is a map of twisted chain complexes, then $f_0$ is a $d_0$-chain map and $H_*^v(f_0)$ induces a $d_1$-chain map. \begin{defn}\label{def:equivs} A map $f: C\longrightarrow D$ of twisted chain complexes is an \emph{$E_1$-equivalence} if $H_t^v(f_0)$ is an isomorphism for all $t\in{\mathbb Z}$ and an \emph{$E_2$-equivalence} if $H_s^h(H_t^v(f_0))$ is an isomorphism for all $s\in{\mathbb N}$, $t\in{\mathbb Z}$. \end{defn} The first main advantage of derived $A_\infty$-structures over $A_\infty$-structures is that one has a reasonable notion of a minimal model for differential graded algebras without any projectivity assumptions on the homology. \begin{thm}\cite{Sag10} Let $A$ be a dga over $\mathbf k$. Then there is a degreewise $\mathbf k$-projective $dA_\infty$-algebra $E$ together with an $E_2$-equivalence $E \longrightarrow A$ such that \begin{itemize} \item $E$ is minimal (i.e.~$m_{01}=0$), \item $E$ is unique up to $E_2$-equivalence, \item together with the differential $m_{11}$ and the multiplication $m_{02}$, $E$ is a termwise $\mathbf k$-projective resolution of the graded algebra $H^*(A)$. \end{itemize} \end{thm} The second and third authors then gave the analogue of Kadeishvili's formality criterion for dgas using Hochschild cohomology. They describe derived $A_\infty$-structures in terms of a Lie algebra structure on morphisms of the underlying $\mathbf k$-module $A$. Then they use this Lie algebra structure to define Hochschild cohomology for a large class of derived $A_\infty$-algebras and eventually reach the following result~\cite[Theorem 4.4]{RoiWhi11}. Recall that a dga is called intrinsically formal if any other dga $A'$ such that $H^*(A)\cong H^*(A')$ as associative algebras is quasi-isomorphic to $A$. \begin{thm}\cite{RoiWhi11} Let $A$ be a dga and $E$ its minimal model with $dA_\infty$-structure $m$. By $\tilde{E}$, we denote the underlying bidga of $E$, i.e. $\tilde{E}=E$ as $\mathbf k$-modules together with $dA_\infty$-structure $\tilde{m}=m_{11}+m_{02}$. If \[ HH^{m,2-m}_{bidga}(\tilde{E},\tilde{E})=0 \quad\quad\mbox{for\ } m \ge 3, \] then $A$ is intrinsically formal. \end{thm} \subsection{The operad \texorpdfstring{${\mathcal A}s$}{As}} The goal of our paper is to describe derived $A_\infty$-algebras as algebras over an operad, and to show that this operad is a minimal model of a certain Koszul operad. The operad in question is an operad called ${\mathrm d}As$ (defined in Section~\ref{sec:dAs}), which is a generalisation of the operad ${\mathcal A}s$ that encodes associative algebras. So let us recall this strategy for ${\mathcal A}s$ itself. For this subsection only, let $\mathbf k$ be a field. We work in the category of (cohomologically) differential graded $\mathbf k$-vector spaces, denoted ${\mathrm d}gkvs$. We will use the notation $\mathcal F(M)$ for the free (non-symmetric) operad generated by a collection $M=\{M(n)\}_{n\geq 1}$ of graded $\mathbf k$-vector spaces. It is weight graded by the number $s$ of vertices in the planar tree representation of elements of $\mathcal F(M)$ and we denote by $\mathcal F_{(s)}(M)$ the corresponding graded $\mathbf k$-vector space. We denote by $\mathcal P(M,R)$ the operad defined by generators and relations, $\mathcal F(M)/(R)$. A \emph{quadratic operad} is an operad such that $R\subset \mathcal F_{(2)}(M)$. \begin{defn} The operad ${\mathcal A}s$ in ${\mathrm d}gkvs$ is given by \[ {\mathcal A}s= \mathcal P(\mathbf k\mu, \mathbf k as) \] where $\mu$ is a binary operation concentrated in degree zero, and $as=\mu \circ_1 \mu - \mu \circ_2 \mu$. The differential is trivial. \end{defn} It is easy to verify that an ${\mathcal A}s$-algebra structure on the differential graded $\mathbf k$-vector space $A$, i.e. a morphism of dg operads $${\mathcal A}s \xrightarrow{\Phi} {{\rm End}}_A,$$ endows $A$ with the structure of an associative dga, with multiplication $$\Phi(\mu): A^{\otimes 2} \longrightarrow A.$$ \begin{thm} The operad ${\mathcal A}s$ is a Koszul operad, i.e.~the map of operads in ${\mathrm d}gkvs$ \[ \Omega({\mathcal A}s^{\scriptstyle \text{\rm !`}}) \longrightarrow {\mathcal A}s \] is a quasi-isomorphism. Furthermore, an algebra over $\Omega({\mathcal A}s^{\scriptstyle \text{\rm !`}})$ is precisely an $A_\infty$-algebra. \end{thm} Here, a quasi-isomorphism of operads is a quasi-isomorphism of dg-$\mathbf k$-vector spaces in each arity degree. We do not recall the definitions of the Koszul dual cooperad $(-)^{\scriptstyle \text{\rm !`}}$ or the cobar construction $\Omega(-)$ here. (This is going to be discussed in greater detail for our computations later). Let us just mention now that the cobar construction of a cooperad is a free graded operad endowed with a differential built from the cooperad structure, so we can think of the map above as a free resolution of the operad ${\mathcal A}s$. This result can be proved using beautiful geometric and combinatorial methods such as the Stasheff cell complex. Unfortunately, the derived case will not be as obviously geometric. Our aim is to create an analogue of the above for the derived case. The first step is to consider working in a different category - instead of differential graded $\mathbf k$-vector spaces, we consider a category of graded chain complexes over a commutative ring $\mathbf k$. The role of ${\mathcal A}s$ in this case is going to be played by an operad ${\mathrm d}As$, which encodes bidgas rather than associative dgas. The first goal is showing that ${\mathrm d}As$ is a Koszul operad, i.e. that \[ ({\mathrm d}As)_\infty := \Omega((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}) \longrightarrow {\mathrm d}As \] is a quasi-isomorphism of operads in an appropriate category. We are going to achieve this by ``splitting'' ${\mathrm d}As$ into two parts, namely the operad of dual numbers and ${\mathcal A}s$ itself, via a distributive law. Secondly, we are going to compute the generators and differential of $({\mathrm d}As)_\infty$ explicitly, so we can read off that $({\mathrm d}As)_\infty$-algebras give exactly derived $A_\infty$-algebras in the sense of Sagave. Our work will show that the operad controlling derived $A_\infty$-algebras can be seen as a free resolution of the operad encoding bidgas, in the same sense that the classical $A_\infty$-operad is a free resolution of the operad encoding associative dgas. \section{The operad \texorpdfstring{${\mathrm d}As$}{dAs}}\label{sec:dAs} In the first part of this section, we recall some basic notions about the Koszul dual cooperad of a given operad and we compute the Koszul dual of ${\mathrm d}As$. Further details can be found in~\cite{Fresse04}, which covers Koszul duality for operads over a general commutative ground ring. We also refer to the book of Loday and Vallette~\cite{LodVal12}. We are first going to specify the category we work in. Again, let $\mathbf k$ be a commutative ring. \subsection{Vertical bicomplexes and operads in vertical bicomplexes}\label{SS:verticalbicomplex} \begin{defn} The category of \emph{vertical bicomplexes} ${\rm BiCompl}_v$ consists of bigraded $\mathbf k$-modules as above together with a vertical differential \[ d_A: A^j_i \longrightarrow A^{j+1}_{i} \] of bidegree $(0,1)$. The morphisms are those morphisms of bigraded modules commuting with the vertical differential. We denote by ${\rm Hom}(A,B)$ the set of morphisms (preserving the bigrading) from $A$ to $B$. If $c,d \in A$ have bidegree $(c_1,c_2)$ and $(d_1,d_2)$ respectively we denote by $|c||d|$ the integer $c_1d_1+c_2d_2$. We define a degree shift operation on ${\rm BiCompl}_v$ as follows. Let $A \in {\rm BiCompl}_v$. Then $sA$ is defined as $$(sA)_i^j=A_i^{j+1}$$ with $$d_{sA}(sx)=-s(d_Ax).$$ So if $c \in A$ is of bidegree $(c_1, c_2)$, then $sc \in sA$ is of bidegree $(c_1, c_2-1)$. This shift is compatible with the embedding of differential graded complexes into ${\rm BiCompl}_v$ given by $C^l_0=C^l$ and $C^l_k=0$, if $k>0$. The tensor product of two vertical bicomplexes $A$ and $B$ is given by $$ (A\otimes B)_u^v=\bigoplus_{i+p=u,\, j+q=v}A_i^j\otimes B_p^q, $$ with $d_{A\otimes B}=d_A\otimes 1+1\otimes d_B:(A\otimes B)_u^v\to (A\otimes B)_u^{v+1}$. \end{defn} Note that ${\rm BiCompl}_v$ is isomorphic to the category of $\mathbb{N}$-graded chain complexes of $\mathbf k$-modules. There are two other sorts of morphism that we will consider later and we introduce notation for these now. (Various alternative choices of notation are used in the literature.) Let $A$ and $B$ be two vertical bicomplexes. We write ${\rm Hom}_\mathbf k$ for morphisms of $\mathbf k$-modules. We will denote by ${\rm Mor}(A,B)$ the vertical bicomplex given by $${\rm Mor}(A,B)_u^{v}=\prod_{\alpha,\beta} {\rm Hom}_\mathbf k(A^\beta_{\alpha},B^{\beta+v}_{\alpha-u}),$$ with vertical differential given by $\partial_{{\rm Mor}}(f)= d_Bf - (-1)^{j}f d_A$ for $f$ of bidegree $(l,j)$. We will denote by ${\textbf{Hom}}(A,B)$ the (cohomologically) graded complex given by $${\textbf{Hom}}(A,B)^k=\prod_{\alpha,\beta} {\rm Hom}_\mathbf k(A^\beta_{\alpha},B^{\beta+k}_{\alpha}),$$ with the same differential as above. One has $$ {\rm Hom}(A,B)={\rm Mor}(A,B)_0^0 \qquad\text{ and }\qquad {\textbf{Hom}}(A,B)^*={\rm Mor}(A,B)_0^*. $$ \begin{defn} A \emph{collection} in ${\rm BiCompl}_v$ is a collection $A(n)_{n\geq 1}$ of vertical bicomplexes. We denote by $\mathcal C{\rm BiCompl}_v$ the category of collections of vertical bicomplexes. This category is endowed with a monoidal structure, the plethysm given by, for any two collections $M$ and $N$, \[ (M\circ N)(n)=\bigoplus_{k,\ l_1+\cdots+l_k=n} M(k)\otimes N(l_1)\otimes\cdots\otimes N(l_k). \] The unit for the plethysm is given by the collection \[ I(n)=\begin{cases} 0, & \text{ if } n\not=1, \\ \mathbf k \text { concentrated in bidegree } (0,0), & \text{ if } n=1.\end{cases} \] Given two collections $A$ and $B$ in ${\rm BiCompl}_v$, one can consider again the three collections \begin{itemize} \item ${\rm Hom}(A,B)(n):=\{{\rm Hom}(A(n),B(n)\}_{n\geq 1}$ in the category of $\mathbf k$-modules, \item ${\rm Mor}(A,B)(n):=\{{\rm Mor}(A(n),B(n)\}_{n\geq 1}$ in the category of vertical bicomplexes and \item ${\textbf{Hom}}(A,B)(n):=\{{\textbf{Hom}}(A(n),B(n)\}_{n\geq 1}$ in the category of complexes. \end{itemize} \end{defn} \begin{defn} A (non-symmetric) \emph{operad} in ${\rm BiCompl}_v$ is a monoid in $\mathcal C{\rm BiCompl}_v$. This is the usual definition of operads in the symmetric monoidal category $({\rm BiCompl}_v,\otimes)$. \end{defn} For a vertical bicomplex $A$, the \emph{endomorphism operad} ${\rm End}_A$ is the operad in vertical bicomplexes given by ${\rm End}_A(n)={\rm Mor}(A^{\otimes n},A)$, where the operad structure is given by the composition of morphisms, as usual. \subsection{The operad \texorpdfstring{${\mathrm d}As$}{dAs}} We now describe the operad in ${\rm BiCompl}_v$ that encodes bidgas. \begin{defn}The operad ${\mathrm d}As$ in ${\rm BiCompl}_v$ is defined as $\mathcal P(M_{{\mathrm d}As},R_{{\mathrm d}As})$ where $$M_{{\mathrm d}As}(n)=\begin{cases} 0, & \text{if } n>2, \\ \mathbf k m_{02} \text{ concentrated in bidegree } (0,0), & \text {if } n=2, \\ \mathbf k m_{11} \text{ concentrated in bidegree } (1,0), & \text {if } n=1, \end{cases}$$ and $$R_{{\mathrm d}As}=\mathbf k (m_{02}\circ_1 m_{02}-m_{02}\circ_2 m_{02})\mathpzc olus \mathbf k m_{11}^2 \mathpzc olus \mathbf k (m_{11}\circ_1 m_{02}-m_{02}\circ_1 m_{11} - m_{02}\circ_2 m_{11}),$$ with trivial vertical differential. \end{defn} This operad is clearly quadratic. The following result is now essentially a matter of definitions, but we include the details for completeness. \begin{prop}\label{prop:bidga} The category of ${\mathrm d}As$-algebras in ${\rm BiCompl}_v$ is isomorphic to the category of bidgas. \end{prop} \begin{proof} A ${\mathrm d}As$-algebra structure on a vertical bicomplex $A$ is given by a morphism of operads \[ \theta: {\mathrm d}As \longrightarrow {\rm End}_A. \] Since $A$ is a vertical bicomplex, it is $({\mathbb N},{\mathbb Z})$-graded and comes with a vertical differential $d_A=d^v$ of bidegree $(0,1)$. From the images of the operad generators we have morphisms \begin{align*} m&=\theta(m_{02}):A^{\otimes 2}\longrightarrow A,\\ d^h&=\theta(m_{11}):A\longrightarrow A, \end{align*} of bidegree $(0,0)$ and $(1,0)$ respectively. The operad relations tell us precisely that $m$ is associative, that $d^h$ is a differential and that $d^h$ is a derivation with respect to $m$. The fact that $\theta$ is a morphism of operads in ${\rm BiCompl}_v$, and that the differential on each ${\mathrm d}As(n)$ is trivial, gives us two further relations: \begin{align*} \partial_{{\rm Mor}}(m)&=0,\\ \partial_{{\rm Mor}}(d^h)&=0. \end{align*} The first of these relations tells us that $d^v$ is a derivation with respect to $m$ and the second that $d^vd^h-d^hd^v=0$. This gives $A$ precisely the structure of a bidga (with exactly Sagave's sign conventions). A morphism of ${\mathrm d}As$-algebras $f:A\longrightarrow B$ is a map of vertical bicomplexes which also commutes with $m$ and $d^h$. This is precisely a morphism of bidgas. \end{proof} Let us describe the operad ${\mathrm d}As$ in a little more detail. Let $m_k$ denote any $(k-1)$-fold composite of $m_{02}$. (Because of the associativity relation, $m_k$ does not depend on the choice of composition.) Due to the ``Leibniz rule relation'' every element of ${\mathrm d}As$ in arity $k$ can be written as a $\mathbf k$-linear combination of the elements \[ m_k(m_{11}^{\epsilon_1},...,m_{11}^{\epsilon_k}) \] with $\epsilon_i \in \mathbb{Z}/2$. The partial composition is given by \begin{multline} m_l(m_{11}^{\epsilon_1},...,m_{11}^{\epsilon_l}) \circ_i m_k(m_{11}^{{\mathrm d}elta_1},...,m_{11}^{{\mathrm d}elta_k} )\\ = (-1)^{\alpha}\nonumber \begin{cases} \sum\limits_{s=1}^k (-1)^{\beta} m_{k+l-1}(m_{11}^{\epsilon_1},...,m_{11}^{\epsilon_{i-1}},m_{11}^{{\mathrm d}elta_1},...,m_{11}^{{\mathrm d}elta_s + 1},...,m_{11}^{{\mathrm d}elta_k}, m_{11}^{\epsilon_{i+1}},...,m_{11}^{\epsilon_l}), & \text{ if } \epsilon_i=1, \\ m_{k+l-1}(m_{11}^{\epsilon_1},...,m_{11}^{\epsilon_{i-1}},m_{11}^{{\mathrm d}elta_1},...,m_{11}^{{\mathrm d}elta_s },...,m_{11}^{{\mathrm d}elta_k}, m_{11}^{\epsilon_{i+1}},...,m_{11}^{\epsilon_l}), & \text{ if } \epsilon_i=0, \end{cases} \end{multline} where $\alpha = \left(\sum\limits_{j=i+1}^l \epsilon_j\right)\left(\sum\limits_{r=1}^k {\mathrm d}elta_r\right)$ and $\beta= \sum\limits_{r=1}^{s-1} {\mathrm d}elta_r$. We see that we have an isomorphism of bigraded $\mathbf k$-modules, \[ {\mathrm d}As(n) \cong \mathbf k[x_1,...,x_n]/(x_1^2,...,x_n^2), \qquad |x_i|=(1,0) \] determined by assigning the monomial $x_1^{\epsilon_1}{\mathrm d}ots x_n^{\epsilon_n}$ to the element $m_n(m_{11}^{\epsilon_1},...,m_{11}^{\epsilon_n})$. Let $\mathcal D$ denote the operad of dual numbers in the category of vertical bicomplexes, namely \[ \mathcal D = \mathcal P(\mathbf k m_{11}, \mathbf k m_{11}^2) \] with trivial differential. We can now reformulate the above description of ${\mathrm d}As$ in terms of plethysm and distributive laws; see~\cite[8.6]{LodVal12}. \begin{lem}\label{lem:distributive} The map \[ \varphi: \mathcal D \circ {\mathcal A}s \longrightarrow {\mathcal A}s \circ \mathcal D \] determined by \[ \varphi: m_{11}\circ_1 m_{02} \mapsto m_{02} \circ_1 m_{11} +m_{02} \circ_2 m_{11} \] defines a distributive law, such that the induced operad structure on $ {\mathcal A}s \circ \mathcal D$ coincides with the operad ${\mathrm d}As$. \end{lem} \begin{proof} We adopt the notation and terminology of~\cite[8.6.3]{LodVal12}. We define $$ \varphi:\mathbf k m_{11}\circ_{(1)}\mathbf k m_{02}\longrightarrow \mathbf k m_{02}\circ_{(1)}\mathbf k m_{11} $$ as above. This gives a rewriting rule for the quadratic operads $\mathcal{D}$ and ${\mathcal A}s$ and it is clear that ${\mathrm d}As$ is isomorphic to ${\mathcal A}s\vee_\varphi \mathcal D$. From the description of the operad ${\mathrm d}As$ above, we see that the induced map ${\mathcal A}s\circ\mathcal{D}\longrightarrow {\mathcal A}s\vee_\varphi \mathcal D\cong {\mathrm d}As$ is an isomorphism. So, by~\cite[Proposition 8.6.4]{LodVal12}, $\varphi$ induces a distributive law and an isomorphism of operads ${\mathcal A}s\circ\mathcal{D}\longrightarrow {\mathcal A}s\vee_\varphi \mathcal D$. \end{proof} For $\mathcal P = \mathcal P(M,R)$ a quadratic operad, the \emph{Koszul dual cooperad} $\mathcal P^{\scriptstyle \text{\rm !`}}$ of $\mathcal P$ is given by \[ \mathcal P^{\scriptstyle \text{\rm !`}} = {\mathcal C}^c(sM,s^2 R). \] Here ${\mathcal C}^c(E,R)$ denotes the cooperad cogenerated by $E$ with corelations $R$. (For a description see \cite[Section 7.1.4]{LodVal12}.) There are two ways of describing the cooperad $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}$, either by describing the distributive law $$\mathcal D^{\scriptstyle \text{\rm !`}}\circ {\mathcal A}s^{\scriptstyle \text{\rm !`}}\rightarrow {\mathcal A}s^{\scriptstyle \text{\rm !`}}\circ \mathcal D^{\scriptstyle \text{\rm !`}}$$ or by describing the elements of ${\mathcal C}^c(s(\mathbf k m_{11}\mathpzc olus\mathbf k m_{02}), s^2R_{{\mathrm d}As})$ in the cofree cooperad ${\mathcal F}^c(s(\mathbf k m_{11}\mathpzc olus\mathbf k m_{02}))$. The first description implies that for every $n$, $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(n)$ is a free $\mathbf k$-module. \begin{prop}\label{prop:distributivedual} The underlying collection of the cooperad ${{\mathrm d}As}^{\scriptstyle \text{\rm !`}}$ is isomorphic to that of $${\mathcal D}^{\scriptstyle \text{\rm !`}}\circ {\mathcal A}s^{\scriptstyle \text{\rm !`}}=\mathbf k[\mu_{11}]\circ {\mathcal A}s^{\scriptstyle \text{\rm !`}}$$ where $\mu_{11}$ has bidegree $(1,-1)$. Hence, as a $\mathbf k$-module, $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(n)$ is free with basis given by elements $\nu_{in}$ of bidegree $(i,1-i-n)$. These elements are in 1-to-1 correspondence with the elements $s(m_{11})^i\circ \mu_n$ in $\mathcal D^{\scriptstyle \text{\rm !`}}\circ{\mathcal A}s^{\scriptstyle \text{\rm !`}}$. \end{prop} \begin{proof} The first part of the claim follows from Lemma~\ref{lem:distributive}, since ${\mathrm d}As\cong {\mathcal A}s\vee_{\varphi}\mathcal{D}$ and by~\cite[Proposition 8.6.15]{LodVal12}, there is an isomorphism of underlying collections $({\mathcal A}s\vee_{\varphi}\mathcal{D})^{\scriptstyle \text{\rm !`}}\cong \mathcal{D}^{\scriptstyle \text{\rm !`}}\circ{\mathcal A}s^{\scriptstyle \text{\rm !`}}$. The cooperad structures of $\mathcal D^{\scriptstyle \text{\rm !`}}$ and ${\mathcal A}s^{\scriptstyle \text{\rm !`}}$ are well-known and can be shown by induction with the methods used in Theorem~\ref{T:cooperad}. In arity $n$, ${\mathcal A}s^{\scriptstyle \text{\rm !`}}(n)$ is a free $\mathbf k$-module on the generator $\mu_n$. The element $\mu_n$ has bidegree $(0,1-n)$. The cooperad $\mathcal D^{\scriptstyle \text{\rm !`}}$ is concentrated in arity 1. It is the free cooperad on the generator $sm_{11}$. This implies that $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(n)$ is free on the images $\nu_{in}$ in $({\mathrm d}As)^{\scriptstyle \text{\rm !`}}(n)$ of the generators $$(sm_{11})^i\circ\mu_n \in \mathcal (D^{\scriptstyle \text{\rm !`}}\circ{\mathcal A}s^{\scriptstyle \text{\rm !`}})(n).$$ We can read off a generator's bidegree as \[ |\nu_{in}| = i(|m_{11}| + |s|) + |\mu_n| = (i,1-i-n). \] \end{proof} \begin{nota} Let $\mathcal{C}$ be a cooperad and $c \in \mathcal{C}(n)$. We are going to describe the cocomposition \[ \Delta: \mathcal{C} \longrightarrow \mathcal{C} \circ \mathcal{C}. \] We write \[ \Delta(c)=\sum\limits_{j,|I|=n} c_j; c_I. \] Here, $I = (i_1,...,i_j)$ is a $j$-tuple with $|I|=i_1+\cdots+i_j$, and \[ c_I=c_{i_1}\otimes\cdots\otimes c_{i_j} \in \mathcal{C}^{\otimes j}. \] If $\mathcal{C}={\mathcal F}^c(V)$ is a cofree cooperad cogenerated by a collection $V$, then it has a description in terms of trees whose vertices are labelled by elements of $V$; see~\cite[5.8.7]{LodVal12}. Moreover if $V(n)$ is a free $\mathbf k$-module for each $n$, then so is $\mathcal{C}(n)$, and a basis as a free $\mathbf k$-module is given by planar trees whose vertices are labelled by a basis of $V$. If the root of such a tree has arity $k$ and is labelled by $v$ we denote it by $v(t^1,\ldots,t^k)$ where $t^1,\ldots, t^k$ are elements of $\mathcal{C}={\mathcal F}^c(V)$. Remembering that \[ \Delta(t^r)=\sum t^r_{j_r};t^r_{I_r} \] one obtains the formula \begin{equation}\label{F:deltacoop} \Delta(v(t^1,\ldots,t^k))=1;v(t^1,\ldots,t^k)+ \sum (-1)^{\sum\limits_{r=1}^{k-1} |t^r_{I_r}|(\sum\limits_{s=r+1}^k |t^s_{j_s}|)} v(t^1_{j_1},\ldots,t^k_{j_k}); t^1_{I_1}\otimes\cdots\otimes t^k_{I_k}. \end{equation} \end{nota} We now compute the full structure of $({\mathrm d}As)^{\scriptstyle \text{\rm !`}}$. From Proposition~\ref{prop:distributivedual} we already know the structure of its underlying bigraded $\mathbf k$-modules, and we can use (\ref{F:deltacoop}) to write down the cocomposition of its basis elements. We remark that we have chosen to work directly with the cooperad $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}$, rather than with the operad $({\mathrm d}As)^{!}$. This is to avoid taking linear duals, which can be badly behaved over a general ground ring. \begin{thm}\label{T:cooperad} The cooperad $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}$ is a sub-cooperad of ${\mathcal F}^c(sM_{{\mathrm d}As})$ with trivial differential. Its underlying collection consists of free $\mathbf k$-modules with basis $\{\mu_{ij}, i\geq 0,j\geq 1\}$ such that $\mu_{01}$ is the identity of the cooperad, $\mu_{02}=sm_{02}$ and $\mu_{11}=sm_{11}\in {\mathcal F}^c(sM_{{\mathrm d}As})$. The other $\mu_{ij}$ are defined inductively via $$\begin{array}{rcll} \mu_{i1}&=&\mu_{11}(\mu_{i-1,1}), & \text{for\ } i\geq 1, \\ \mu_{0n}&=&\sum\limits_{p+q=n}(-1)^{p(q+1)}\mu_{02}(\mu_{0p},\mu_{0q}), & \text{for\ } n\geq 2, \\ \mu_{ij}&=&\mu_{11}(\mu_{i-1,j})+\sum\limits_{r+t=i\atop{s+w=j}}(-1)^{|s\mu_{rs}||\mu_{tw}|+rw} \mu_{02}(\mu_{rs},\mu_{tw}), & \text{for\ } i\geq 1, j\geq 2. \end{array}$$ The element $\mu_{ij}$ has bidegree $(i,1-i-j)$. These elements satisfy \begin{equation}\label{F:Delta} \Delta(\mu_{uv})=\sum\limits_{i+p_1+\cdots+p_j=u\atop{q_1+\cdots+q_j=v}} (-1)^{X\left((p_1,q_1), {\mathrm d}ots, (p_j,q_j)\right)} \mu_{ij};\mu_{p_1q_1}\otimes\cdots\otimes \mu_{p_j q_j}, \end{equation} where \begin{equation} \begin{aligned}\label{F:sign} X\left((p_1,q_1), {\mathrm d}ots, (p_j,q_j)\right) &= & \sum\limits_{k=1}^{j-1} |s\mu_{p_kq_k}|(\sum\limits_{l=k+1}^j |\mu_{p_lq_l}|)+ \sum\limits_{k=1}^{j-1} p_k(\sum\limits_{l=k+1}^j q_l)\\ & = & \sum\limits_{k=1}^{j-1} \Big( (p_k+q_k)(j+k)+ q_k\sum\limits_{l=k+1}^j (p_l + q_l) \Big). \end{aligned} \end{equation} \end{thm} \begin{proof} Firstly we are going to show that those inductively defined elements form a sub-cooperad of $\mathcal F^c(sM_{{\mathrm d}As})$. Then we will see that this sub-cooperad contains the quadratic relations $s^2R_{{\mathrm d}As}$. Together with Proposition \ref{prop:distributivedual}, this means that it must be $({\mathrm d}As)^{\scriptstyle \text{\rm !`}}$ itself. For the first part we have to prove formula (\ref{F:Delta}), which is done by induction on $u+v$. One has $$\Delta(\mu_{u1})=\sum\limits_{i+p=u} \mu_{i1};\mu_{p1}$$ which is proved by induction from the definition $$\mu_{u1}=\mu_{11}(\mu_{u-1,1}).$$ The case of $\Delta(\mu_{0v})$ is similar to the general case $\Delta(\mu_{uv})$, so we only prove formula (\ref{F:Delta}) for $u\geq 1, v\geq 2$. We would like to prove that $$\Delta(\mu_{uv})=\sum (-1)^{X(I)} \mu_{ij};\mu_I,$$ where the sum is taken over $i,j, I=((p_1,q_1),\ldots,(p_j,q_j))$ such that $i+\sum_k p_k=u, \sum_k q_k=v$. \noindent By formula~(\ref{F:deltacoop}) we have \begin{equation}\label{F:sumformula} \Delta(\mu_{uv})=\Delta\left(\mu_{11}(\mu_{u-1,v})+\sum\limits_{r+t=u, s+w=v} (-1)^{|s\mu_{rs}||\mu_{tw}|+rw} \mu_{02}(\mu_{rs},\mu_{tw})\right). \end{equation} We will evaluate the summands on the right hand side of the above formula separately using induction together with formula~(\ref{F:deltacoop}). Assume that we have proved (\ref{F:Delta}) for all $\mu_{kl}$ with $k+l<u+v$. This implies that \[ \Delta(\mu_{u-1,v})=\sum\limits (-1)^{X(I)} \mu_{i-1,j};\mu_I. \] Applying formula~(\ref{F:deltacoop}) allows us to relate this to $\Delta(\mu_{11}(\mu_{u-1,v}))$ with the result that \[ \Delta(\mu_{11}(\mu_{u-1,v}))=\mu_{01};\mu_{11}(\mu_{u-1,v})+\sum\limits_{}(-1)^0(-1)^{X(I)}\mu_{11}(\mu_{i-1,j});\mu_I. \] Thus we have computed the first summand of (\ref{F:sumformula}). As for the second summand, the induction assumption gives us \[ \Delta(\mu_{rs})=\sum\limits_{}(-1)^{X(I_1)}\mu_{\rho\tau};\mu_{I_1} \,\,\,\,\mbox{and}\,\,\,\, \Delta(\mu_{tw})=\sum\limits_{}(-1)^{X(I_2)}\mu_{\gamma{\mathrm d}elta};\mu_{I_2} \] with $I_1=((p_1,q_1),\ldots,(p_\tau q_\tau))$ and $I_2=((p_{\tau+1},q_{\tau+1}),\ldots,(p_j,q_j))$. Putting this in~(\ref{F:deltacoop}) gives \begin{align*} \Delta(\mu_{02}(\mu_{rs},\mu_{tw}))= &\sum\limits_{} (-1)^{\sum\limits_{k=1}^\tau|\mu_{p_kq_k}||\mu_{\gamma{\mathrm d}elta}|}(-1)^{X(I_1)+X(I_2)} \mu_{02}(\mu_{\rho\tau},\mu_{\gamma{\mathrm d}elta});\mu_{I_1}\otimes\mu_{I_2}\\ &+\mu_{01};\mu_{02}(\mu_{rs},\mu_{tw}). \end{align*} We will feed these computations back into (\ref{F:sumformula}) and work out the signs to obtain the desired (\ref{F:Delta}). Let $i\geq 1$ and $j\geq 2$. We are interested in computing the signs in front of elements of the type $\mu_{11}(\mu_{i-1,j});\mu_I$ and of the type $\mu_{02}(\mu_{\rho\tau},\mu_{\gamma{\mathrm d}elta});\mu_I$ where \[ \begin{array}{rll} \rho+\gamma &=&i, \\ \tau+{\mathrm d}elta&=&j, \\ I&=&((p_1,q_1),\ldots,(p_j,q_j)). \end{array} \] \noindent For the first type the sign is $(-1)^{X(I)}$. For the second type the sign is of the form $(-1)^Y$ where $Y$ is computed mod 2: \begin{align*} Y&=|s\mu_{rs}||\mu_{tw}|+rw + \sum\limits_{k=1}^{\tau}|\mu_{p_kq_k}||\mu_{\gamma{\mathrm d}elta}|+ X(I_1)+X(I_2) \\ &=|s\mu_{rs}||\mu_{tw}|+rw + \sum\limits_{k=1}^{\tau} |\mu_{p_kq_k}||\mu_{\gamma{\mathrm d}elta}|+ \sum\limits_{k=1}^{\tau-1} |s\mu_{p_kq_k}|(\sum\limits_{l=k+1}^\tau |\mu_{p_lq_l}|)+ \sum\limits_{k=1}^{\tau-1} p_k(\sum\limits_{l=k+1}^\tau q_l)\\ &\quad+\sum\limits_{k=\tau+1}^{j-1} |s\mu_{p_kq_k}|(\sum\limits_{l=k+1}^j |\mu_{p_lq_l}|)+ \sum\limits_{k=\tau+1}^{j-1} p_k(\sum\limits_{l=k+1}^j q_l). \end{align*} Let us now simplify the sign $Y$. Using the equalities \begin{align*} |\mu_{tw}|&=|\mu_{\gamma{\mathrm d}elta}|+\sum\limits_{k=\tau+1}^j |\mu_{p_kq_k}|, &\rho+\sum\limits_{k=1}^\tau p_k&=r,\\ |\mu_{rs}|&=|\mu_{\rho\tau}|+\sum\limits_{k=1}^\tau |\mu_{p_kq_k}|, &\sum\limits_{l=\tau+1}^{j} q_l&=w,\\ \end{align*} one gets \begin{align*} Y&=X(I)+|s\mu_{rs}||\mu_{tw}|+rw + \sum\limits_{k=1}^{\tau} |s\mu_{p_kq_k}|(\sum\limits_{l=\tau+1}^j |\mu_{p_lq_l}|)\\ &\qquad\qquad\qquad\qquad\qquad+\sum\limits_{k=1}^{\tau} |\mu_{p_kq_k}|(|\mu_{\gamma{\mathrm d}elta}|)+(\sum\limits_{l=1}^\tau p_k)(\sum_{l=\tau+1}^{j} q_l) \\ &=X(I)+|s\mu_{\rho\tau}||\mu_{\gamma{\mathrm d}elta}|+(|s\mu_{\rho\tau}|+\tau|s|)(\sum\limits_{k=\tau+1}^j| \mu_{p_kq_k}|)+\rho w\\ &=X(I)+|s\mu_{\rho\tau}||\mu_{\gamma{\mathrm d}elta}|+\rho({\mathrm d}elta-w)+\rho w \\ &=X(I)+|s\mu_{\rho\tau}||\mu_{\gamma{\mathrm d}elta}|+\rho{\mathrm d}elta. \end{align*} Putting this together, we obtain a summand of the form \[ (-1)^{X(I)} ( \mu_{11}(\mu_{i-1, j});\mu_I+\sum\limits_{\rho+\gamma=i\atop{\tau+{\mathrm d}elta=j}}(-1)^{|s\mu_{\rho\tau}||\mu_{\gamma{\mathrm d}elta}|+\rho{\mathrm d}elta} \mu_{02}(\mu_{\rho\tau},\mu_{\gamma{\mathrm d}elta});\mu_I)=(-1)^{X(I)} \mu_{ij};\mu_I, \] for $i\geq 1$ and $j\geq 2$. If $j=1$, we are interested in computing the sign in front of the element of the type $\mu_{11}(\mu_{i-1,1});\mu_{u-i,v}$ if $i\geq 1$ or in front of $\mu_{01};\mu_{uv}$ if $i=0$. In the first case one still gets $(-1)^{X(I)}$ with $I=(u-i,v)$ as well as in the second case. If $i=0$ and $j>1$ we are interested in computing the sign in front of the elements of the type $\mu_{02}(\mu_{0\tau},\mu_{0{\mathrm d}elta});\mu_I$ where $\tau+{\mathrm d}elta=j$ which has already been computed and coincides with the desired sign. Consequently formula~(\ref{F:Delta}) is proved. Hence the collection of $\mu_{ij}$'s forms a sub-cooperad of the free cooperad ${\mathcal F}^c(sM_{{\mathrm d}As})$. Furthermore it contains $s^2R_{{\mathrm d}As}$, since \[ \begin{array}{rll} \mu_{03} &=&sm_{02}\circ_1 sm_{02}-sm_{02}\circ_2 sm_{02}, \\ \mu_{12}&=&sm_{11}\circ_1 sm_{02}-sm_{02}\circ_1sm_{11}-sm_{02}\circ_2sm_{11}, \\ \mbox{and}\,\,\, \mu_{21}&=&sm_{11}\circ_1sm_{11}. \end{array} \] We also know that its $\mathbf k$-module structure coincides with the $\mathbf k$-module structure of $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}$, since the $\mathbf k$-basis elements $\mu_{in}$ are in bijection with the $\nu_{in}$ of Proposition~\ref{prop:distributivedual}. As a consequence, the cooperad described is the cooperad $({\mathrm d}As)^{\scriptstyle \text{\rm !`}}$. \end{proof} \begin{cor}\label{cor:infinitesimal} The infinitesimal cocomposition on $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}$ is given by \[ \Delta_{(1)}(\mu_{uv}) =\sum\limits_{i+p=u \atop{r+q+t=v, r+1+t=j}}(-1)^{r(1-p-q)+pt}\mu_{ij};1^{\otimes r}\otimes \mu_{pq}\otimes 1^{\otimes t}. \] \qed \end{cor} \section{Derived \texorpdfstring{$A_\infty$}{A-infinity}-structures}\label{sec:dAinfty} In this section we will prove our main result, Theorem~\ref{thm:main}, describing derived $A_\infty$-algebras as algebras over the operad $({\mathrm d}As)_\infty$. Again~\cite{Fresse04} is our main reference for the cobar construction of a cooperad over a general ground ring. We will also interpret our description in terms of coderivations and compare with Sagave's approach. \subsection{The operad \texorpdfstring{$dA_\infty$}{dA-infinity}} We would now like to encode derived $A_\infty$-algebras via an operad. Recall from Section~\ref{sec:review} that a derived $A_\infty$-structure on a bigraded module $A$ consists of morphisms \[ m_{uv}: (A^{\otimes v})^*_* \longrightarrow A^{*+2-u-v}_{*-u} \] such that for $u\geq 0, v\geq 1$, \[ \sum\limits_{\substack{u=i+p, v=j+q-1,\\ j=1+r+t}} (-1)^{rq+t+pj} m_{ij}(1^{\otimes r} \otimes m_{pq} \otimes 1^{\otimes t}) =0. \] If one considers $-m_{01}$ as an internal differential of $A$ the relation reads \begin{multline*} (-m_{01})(m_{uv})- (-1)^{u+v} \sum_{r+t+1=v}m_{uv}(1^{\otimes r} \otimes (-m_{01}) \otimes 1^{\otimes t})=\\ (-1)^{u}\sum\limits_{\substack{u=i+p, v=j+q-1\\ j=1+r+t, (i,j)\not=(0,1),(p,q)\not=(0,1)}} (-1)^{rq+t+pj} m_{ij}(1^{\otimes r} \otimes m_{pq} \otimes 1^{\otimes t}). \end{multline*} \begin{defn}\label{def:dainfty} The operad $dA_\infty$ in ${\rm BiCompl}_v$ is defined as the free operad $$ \mathcal F(\mathbf k m_{uv}:u\geq 0, v\geq 1, (u,v)\neq (0,1)), $$ together with the differential \[ \partial_\infty(m_{uv})= (-1)^{u}\sum\limits_{\substack{u=i+p, v=j+q-1,\\ j=1+r+t,\\ (i,j),(p,q)\neq(0,1)}} (-1)^{rq+t+pj} m_{ij}(1^{\otimes r} \otimes m_{pq} \otimes 1^{\otimes t}). \] \end{defn} Hence it is easily verified that an algebra over the operad $dA_\infty$ in ${\rm BiCompl}_v$ is a derived $A_\infty$-algebra in the above sense. For a coaugmented cooperad $\mathcal C$, the \emph{cobar construction} $\Omega(\mathcal C)$ of $\mathcal C$ is the operad defined as $\mathcal F(s^{-1} \overline{\mathcal C})$, where $\overline{\mathcal C}$ is the cokernel of the coaugmentation, together with the differential $\partial_\Omega = d_1 + d_2$. Here, $d_2$ is induced by the infinitesimal cocomposition map $\Delta_{(1)}$ of $\mathcal C$ and $d_1$ is induced by the internal differential of $\mathcal C$ itself. Note that in our case $\mathcal C = d{\mathcal A}s$, this internal differential is trivial. We can now state the main result of our paper. \begin{thm}\label{thm:main} The operads $({\mathrm d}As)_\infty = \Omega((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}})$ and $dA_\infty$ agree. Hence, a derived $A_\infty$-algebra is a $({\mathrm d}As)_\infty$-algebra. \end{thm} \begin{proof} By definition, $\Omega((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}})$ is the free operad on the shift of $\overline{(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}}$. Let us denote its generators by \[ \rho_{ij}=s^{-1}\mu_{ij}, \qquad\text{for\ }i\geq 0, j\geq 1, i+j\not=1. \] The elements $\mu_{ij}$ were described in Theorem \ref{T:cooperad}. The element $\rho_{ij}$ obviously has bidegree $(i,2-i-j)$. Recall that if $\mathcal C$ is a coaugmented cooperad then the differential on $\Omega(\mathcal C)$ is obtained from $\Delta_{(1)}$ as follows. Assume $$\Delta_{(1)}(c)=\sum c_i;1^{\otimes r}\otimes c_j\otimes 1^{\otimes t},$$ then $$\partial_\Omega (s^{-1} c)=\sum (-1)^{|s^{-1}||c_i|} s^{-1}c_i(1^{\otimes r}\otimes s^{-1}c_j\otimes 1^{\otimes t}).$$ From Corollary \ref{cor:infinitesimal} one gets \begin{equation}\label{E:useful} \begin{aligned} \partial_\Omega(\rho_{uv})&=-\sum\limits_{\substack{u=i+p, v=j+q-1,\\ j=1+r+t,\\ (i,j),(p,q)\neq(0,1)}} (-1)^{r(1-p-q)+pt+i+j} \rho_{ij}(1^{\otimes r} \otimes \rho_{pq} \otimes 1^{\otimes t})\\ &=(-1)^{u}\sum\limits_{\substack{u=i+p, v=j+q-1,\\ j=1+r+t,\\ (i,j),(p,q)\neq(0,1)}} (-1)^{rq+pj+t} \rho_{ij}(1^{\otimes r} \otimes \rho_{pq} \otimes 1^{\otimes t}). \end{aligned} \end{equation} This is the definition \ref{def:dainfty} of the operad $dA_\infty$. \end{proof} Recall that a quadratic operad $\mathcal P$ is \emph{Koszul} if the map of operads \[ \mathcal P_\infty := \Omega(\mathcal P^{\scriptstyle \text{\rm !`}}) \longrightarrow \mathcal P \] is a quasi-isomorphism. \begin{prop}\label{prop:Koszul} The operad ${\mathrm d}As$ is Koszul. Thus, $dA_\infty$ is a minimal model of ${\mathrm d}As$. \end{prop} \begin{proof} We know that ${\mathrm d}As = \mathcal D \circ {\mathcal A}s$ by Proposition~\ref{prop:distributivedual}. The operads $\mathcal D$ and ${\mathcal A}s$ are Koszul. Using Theorem 8.6.11 of~\cite{LodVal12}, ${\mathrm d}As$ is Koszul. \end{proof} \begin{rem} If we do not put in the multiplication and consider just the operad $\mathcal{D}_\infty=\Omega\mathcal{D}^{\scriptstyle \text{\rm !`}}$ in ${\rm BiCompl}_v$, we obtain an operad whose algebras are precisely the twisted chain complexes. This can be seen either directly as a bigraded version of~\cite[10.3.17]{LodVal12} or by tracing just the $j=1$ parts of the structure through our results. \end{rem} \subsection{Coderivations and Sagave's approach} We now relate derived $A_\infty$-structures to coderivations. In the classical case, an $A_\infty$-structure on the differential graded $\mathbf k$-module $A$ is equivalent to a coderivation of degree $+1$ on the reduced tensor coalgebra \[ d: \overline{\mathcal{T}}^c(sA) \longrightarrow \overline{\mathcal{T}}^c(sA) \,\,\,\,\,\mbox{such that}\,\,\,\,\, d^2=0. \] Sagave generalised this viewpoint to derived $A_\infty$-algebras in the following way \cite[Section 4]{Sag10}. A derived $A_\infty$-structure on the bigraded $\mathbf k$-module $A$ is equivalent to a coderivation of degree $+1$ \[ \xymatrix{ \overline{\mathcal{T}}^c(sA) \ar[rr]^d \ar[d]_{\Delta} && \overline{\mathcal{T}}^c(sA) \ar[d]_{\Delta} \\ \overline{\mathcal{T}}^c (sA) \otimes \overline{\mathcal{T}}^c(sA)\ar[rr]^{d\otimes 1+1\otimes d} &&\overline{\mathcal{T}}^c(sA) \otimes \overline{\mathcal{T}}^c(sA)\\ } \] such that $(\mathcal{T}^c(SA),d)$ is a twisted chain complex, see Definition \ref{def:twistedchaincx}, \cite[Lemma 4.1]{Sag10}. The definition of a differential of a twisted cochain complex differs from the condition $d^2=0$ by signs. Our approach varies from this. In the setting of associative algebras in dg-$\mathbf k$-modules, one has $$ {\mathcal A}s^{\scriptstyle \text{\rm !`}}(A)=\overline{\mathcal{T}}^c(sA). $$ However, $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(A)$ is \emph{not} given by $\overline{\mathcal{T}}^c(sA)$ in the derived setting - we showed its structure in Theorem \ref{T:cooperad}. So in our setting, a derived $A_\infty$-structure on the vertical bicomplex $A$ is given by a coderivation of degree $+1$ \[ \xymatrix{ (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(A) \ar[d]_{\Delta_{(1)}} \ar[rr]^{d} && (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(A) \ar[d]_{\Delta_{(1)}} \\ ((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}} \circ_{(1)} (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}})(A) \ar[rr]^{d\circ_{(1)}1+1\circ_{(1)} d} && ((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}} \circ_{(1)} (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}})(A) \\ } \] such that $d^2=0$. Comparing those two equivalent conditions we see the following. Sagave's description has the advantage of a much easier coalgebra structure while the complexity of the derived $A_\infty$-structure is encoded in the more complicated condition that a coderivation has to satisfy. In our description, a coderivation has to satisfy the relatively simple condition $d^2=0$ while the complexity lies in the more complicated coalgebra structure. \section{Infinity morphisms and an application}\label{sec:infmorphims} The main purpose of this section is to describe $\infty$-morphisms of $(d{\mathcal A}s)_\infty=dA_\infty$-algebras, and to prove that they coincide with the derived $A_\infty$-morphisms defined by Sagave. At the end of the section, we give an application of the homotopy transfer theorem. \subsection{Infinity morphisms} Using the language of operads, the natural notion of morphism between two $dA_\infty$-algebras $A$ and $B$ is a map $f:A\rightarrow B$ respecting the algebra structure. This is the notion of a strict morphism. However, in the context of $\mathcal P_\infty$-algebras where $\mathcal P$ is a Koszul operad, there is also a more general notion of $\infty$-morphism, which is more relevant to the homotopy theory of $P_\infty$-algebras; see, for example,~\cite[Section 10.2]{LodVal12}. In the case of $A_\infty$-algebras, this gives rise to the usual notion of $A_\infty$-morphism between two $A_\infty$-algebras $A$ and $B$ and this can be formulated as a morphism of differential graded coalgebras between the bar constructions of $A$ and $B$. As seen at the end of the previous section, a $dA_\infty$-structure $m$ on the vertical bicomplex $A$ is equivalent to a square-zero coderivation $D_m$ of degree $+1$ on the $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}$-coalgebra $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(A)$. This coalgebra corresponds to the bar construction for $A_\infty$-algebras in our framework. This lends itself to the following definition. \begin{defn} Let $(A,m)$ and $(B,m')$ be $dA_\infty$-algebras. An \emph{$\infty$-morphism of $dA_\infty$-algebras} is a morphism \[ F: ((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(A), D_m) \longrightarrow ((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(B),D_{m'}) \] of $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}$-coalgebras. \end{defn} We will interpret this definition in terms of twisting morphisms, but first, we give a recollection of some facts based on the book of Loday and Vallette, adapted to the category of vertical bicomplexes. We will need these as a basis for our computation. \begin{defn}\label{def:convolution} Let $(\mathcal C, d_{\mathcal C})$ be a cooperad and $(\mathcal P, d_{\mathcal P})$ an operad in vertical bicomplexes. Following the notation of Section~\ref{SS:verticalbicomplex}, we consider the collection in complexes ${\textbf{Hom}}(\mathcal C,\mathcal P)$. It is a differential graded operad called the \emph{convolution operad}. There is an operation $\star$ on ${\textbf{Hom}}(\mathcal C,\mathcal P)$ defined by \[ f \star g: \mathcal C \xrightarrow{\Delta_{(1)}} \mathcal C \circ_{(1)} \mathcal C \xrightarrow{f \circ_{(1)} g} \mathcal P \circ_{(1)} \mathcal P \xrightarrow{\gamma_{(1)}} \mathcal P, \] where $\Delta_{(1)}$ and $\gamma_{(1)}$ are respectively the infinitesimal cocomposition and composition maps. As in~\cite[6.4.4]{LodVal12}, this determines the structure of a differential graded pre-Lie algebra on $\prod_n{\textbf{Hom}}(\mathcal C, \mathcal P)(n)$. The associated differential graded Lie algebra is called the \emph{convolution Lie algebra}. \end{defn} \begin{defn} A \emph{twisting morphism} is an element $\alpha$ of degree $1$ in the complex ${\textbf{Hom}}(\mathcal C, \mathcal P)$ satisfying the Maurer-Cartan equation \[ \partial(\alpha) + \alpha \star\alpha = 0. \] We denote the set of twisting morphisms by ${\rm Tw}(\mathcal C, \mathcal P)$. \end{defn} By construction, the cobar construction $\Omega$ satisfies \[ {\rm Hom}_{{\rm BiCompl}_v-op}(\Omega(\mathcal C), \mathcal P) \cong {\rm Tw}(\mathcal C, \mathcal P), \] where the left-hand side means morphisms of operads in vertical bicomplexes. This means that a $dA_\infty$-structure $m$ on the vertical bicomplex $A$, that is, a square-zero coderivation $D_m$ of degree $+1$ on the $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}$-coalgebra $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(A)$ as seen at the end of the previous section, is equivalent to a twisting morphism \[ \varphi_m \in {\rm Tw}( (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}_A). \] Let $A$ and $B$ be vertical bicomplexes, and let ${\rm End}^A_B$, a collection in vertical bicomplexes, be given by \[ {{\rm End}}^A_B(n)={\rm Mor}(A^{\otimes n}, B). \] The vertical differential is given by \[ \partial(f)= d_B f -(-1)^j \sum\limits_{v=0}^{n-1} f(1^{\otimes v} \otimes d_A \otimes 1^{n-v-1}) \] for $f$ in arity $n$ and bidegree $(i,j)$. For $f \in {\textbf{Hom}}( (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}^A_B)$ and $\varphi \in {\textbf{Hom}}((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}_A)$, the map $f\ast \varphi$ is given by the composite \[ f \ast \varphi: (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}} \xrightarrow{\Delta_{(1)}} (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}} \circ_{(1)} (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}} \xrightarrow{f \circ_{(1)} \varphi} {\rm End}^A_B \circ_{(1)} {\rm End}_A \xrightarrow{\rho} {\rm End}^A_B \] where $\rho$ is induced by the composition of maps. Similarly, for $\psi \in {\textbf{Hom}}((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}_B)$ and $f$ as above, $\psi \circledast f$ is given by \[ \psi \circledast f: (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}} \xrightarrow{\Delta} (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}} \circ (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}} \xrightarrow{\psi \circ f} {\rm End}_B \circ {\rm End}^A_B \xrightarrow{\lambda} {\rm End}^A_B \] where $\lambda$ is given by composition of maps. Now let $$\varphi_{m^A} \in {\rm Tw}((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}_A) \,\,\,\mbox{and}\,\,\, \varphi_{m^B} \in {\rm Tw}((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}_B)$$ be $dA_\infty$-structures on the vertical bicomplexes $A$ and $B$ respectively. By \cite[Theorem 10.2.6]{LodVal12}, an $\infty$-morphism \[ F: (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(A) \longrightarrow (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(B) \] of $dA_\infty$-algebras is equivalent to an element $f \in {\textbf{Hom}}( (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}^A_B)$ of degree $0$ such that \[ f \ast \varphi_{m^A} - \varphi_{m^B} \circledast f = \partial(f). \] (note that the vertical bicomplex $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}(n)$ has trivial differential). Taking this into account we arrive at the following. \begin{thm} An $\infty$-morphism $f: A \longrightarrow B$ of $dA_\infty$-algebras is a morphism of derived $A_\infty$-algebras as defined by Sagave, that is, a collection of maps \[ f_{uv}: A^{\otimes v} \longrightarrow B \] of bidegree $(u, 1-u-v)$ satisfying equation ~(\ref{dmapequation}) of Definition~\ref{def:morphism}. \end{thm} \begin{proof} Assume that $f: (d{\mathcal A}s)^{\scriptstyle \text{\rm !`}} \longrightarrow {\rm End}^A_B$ satisfies \[ f \ast \varphi_{m^A} - \varphi_{m^B} \circledast f = \partial(f). \] We know the structure of $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}$ from Theorem \ref{T:cooperad}. The underlying $\mathbf k$-module of $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}$ is free on generators $\mu_{uv}$ of bidegree $(u,1-u-v)$. Write \[ f_{uv} := f(\mu_{uv}) \] and recall that $\varphi_{m^A}(\mu_{ij})=m^A_{ij}$ and $\varphi_{m^B}(\mu_{ij})=m^B_{ij}$. Using the formulas given by Theorem \ref{T:cooperad}, Corollary \ref{cor:infinitesimal} and because $\varphi_{m^A}$ is of bidegree $(0,1)$ we obtain \begin{align*} (f \ast \varphi_{m^A})(\mu_{uv})=&\sum\limits_{u=i+p \atop{v=j+q-1, j=r+t+1}} (-1)^{r(1-p-q)+pt+1+i+j} f_{ij}(1^{\otimes r} \otimes m^A_{pq} \otimes 1^{\otimes t})\\ =&\sum\limits_{u=i+p \atop{v=j+q-1, j=r+t+1}}(-1)^{rq+pj+t+u} f_{ij}(1^{\otimes r} \otimes m^A_{pq} \otimes 1^{\otimes t}) \end{align*} and \[ (\varphi_{m^B} \circledast f)(\mu_{uv})=\sum\limits_{} (-1)^X m_{ij}^B(f_{p_1q_1} \otimes \cdots \otimes f_{p_j q_j}) \] where \[ X=X((p_1,q_1),...,(p_j,q_j)) = \sum\limits_{k=1}^{j-1} \Big( (p_k+q_k)(j+k)+ q_k\sum\limits_{l=k+1}^j (p_l + q_l)\Big). \] Also, \[ \partial_{End}(f)(\mu_{uv})= d_B f_{uv} -(-1)^{1+u+v} \sum\limits_{l=0}^{v-1} f_{uv}(1^{\otimes l} \otimes d_A \otimes 1^{v-l-1}). \] With $d_A=m_{01}^A$ and $d_B=m_{01}^B$, this equals \[ \partial_{End}(f)(\mu_{uv})= m_{01}^B(f_{uv}) -(-1)^{1+u+v} \sum\limits_{l=0}^{v-1} f_{uv}(1^{\otimes l} \otimes m^A_{01} \otimes 1^{v-l-1}). \] Putting this together, we arrive at \[ (-1)^u\sum\limits_{u=i+p \atop{v=j+q-1, 1+r+t=j}}(-1)^{rq+t+pj} f_{ij}(1^{\otimes r} \otimes m^A_{pq} \otimes 1^{\otimes t}) = \sum\limits_{} (-1)^u(-1)^\sigma m_{ij}^B(f_{p_1q_1} \otimes \cdots \otimes f_{p_j q_j}) \] which is exactly formula~(\ref{dmapequation}) of Sagave's definition. \end{proof} \subsection{The homotopy transfer theorem for \texorpdfstring{${\mathrm d}As$}{dAs} } As an immediate application of our operadic description, we can apply the homotopy transfer theorem; see~\cite[Section 10.3]{LodVal12}. To do so, we will need to now work over a ground field. Although this takes us out of the context which motivated the introduction of derived $A_\infty$-algebras, it nonetheless gives us a new family of examples. Let $\mathcal{P}$ be a Koszul operad, $W$ a $\mathcal{P}_\infty$-algebra and $V$ a homotopy retract of $W$. Recall that a $\mathcal{P}_\infty$-structure on $W$ is equivalent to an element $\varphi \in {\rm Tw}(\mathcal{P}^{\scriptstyle \text{\rm !`}}, {\rm End}_W)$. The homotopy transfer theorem~\cite[Theorem 10.3.6]{LodVal12} says that the homotopy retract $V$ can be given a $\mathcal{P}_\infty$-structure by the twisting morphism given by the following composite \[ \mathcal{P}^{\scriptstyle \text{\rm !`}} \xrightarrow{\Delta} \mathcal{F}^c(\bar{\mathcal{P}}^{\scriptstyle \text{\rm !`}}) \xrightarrow{\mathcal{F}^c(s\varphi) } \mathcal{F}^c(s{\rm End}_W) \xrightarrow{\Psi} {\rm End}_V. \] (The map $\Delta$ is the coproduct map defined in \cite[5.8.12]{LodVal12}.) Moreover there is a standard way to interpret this formula in terms of the combinatorics of trees. We adopt the usual notation for this setting: we have the inclusion $i:V\to W$ and projection $p:W\to V$ such that $pi$ is the identity on $V$, and a homotopy $h:W\to W$ between $ip$ and the identity on $W$, $1_W- ip = d_Wh + hd_W$. As a special case, we consider $\mathcal{P}={\mathrm d}As$ and we let $V=A$ be a bidga over a field. The vertical homology $W=H^v(A)$ of $A$ is a homotopy retract and we therefore obtain a derived $A_\infty$-algebra structure on this. Write $d_h=m_{11}$ for the horizontal differential and $m=m_{02}$ for the multiplication. Making the transferred structure explicit for this special case yields the following. \begin{prop} There is a derived $A_\infty$-algebra structure on the vertical homology $H^v(A)$ of a bidga $A$ over a field, which can be described as follows. We obtain $m_{ij}$ as a (suitably signed) sum over the maps corresponding to planar trees with $j$ leaves, where each vertex has been assigned a weight of either $2$ or $3$, and the number of vertices of weight $2$ is $i$. The procedure for assigning a map to such a tree is as follows. We adorn the trees with the map $i$ on the leaves, the map $p$ at the root and the map $h$ on internal edges. On vertices, we put the multiplication $m$ at every vertex of weight $3$ and the horizontal differential $d_h$ at every vertex of weight $2$. \qed \end{prop} This construction specializes to the $A_\infty$-case which involves binary trees with no vertices of degree $2$. That is, we recover the expected $A_\infty$-algebra structure on the part concentrated in degrees $(0,j)$; see~\cite[9.4.4, 10.3.8]{LodVal12}. The signs can be calculated recursively from the explicit signs appearing in the formula~(\ref{F:Delta}) for $\Delta$. \section{Operadic and Hochschild cohomology}\label{sec:HH} In this section, we compute the tangent complex of a derived $A_\infty$-algebra $A$, define the Hochschild cohomology of $A$ and make the link with the formality theorem of~\cite{RoiWhi11}. Hochschild cohomology has previously only been defined, in~\cite{RoiWhi11}, for a special class of derived $A_\infty$-algebras, the ``orthogonal'' ones. Given a vertical bicomplex $A$, the trigraded $\mathbf k$-module $C_*^{*,*}(A,A)$ is defined by \[ C_k^{n,i}(A,A)={\rm Mor}(A^{\otimes n},A)_k^i. \] We will describe a graded Lie structure on $CH^{*+1}(A,A)$, where the grading is the total grading \[ CH^N(A,A)=\prod\limits_{n\geq 1}\ \prod\limits_{k,j| k+j+n=N} C_k^{n,j}(A,A), \] that is, an element in $ C_k^{n,j}(A,A)$ has {\it total degree} $j+k+n$. \subsection{Lie structures} Let us make explicit Definition~\ref{def:convolution} for the differential graded pre-Lie structure on $\prod_n{\textbf{Hom}}((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}_A)(n)$. From Corollary \ref{cor:infinitesimal}, knowing the infinitesimal cocomposition on $(d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}$, the $\star$ operation on ${\textbf{Hom}}((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}_A)$ is given by \begin{equation}\label{eq:prelie} (f \star g)(\mu_{uv})= \sum\limits_{j=1+r+t, u=i+p, v=r+q+t} (-1)^{r(1+p+q)+pt+|g||\mu_{ij}|} f(\mu_{ij})(1^{\otimes r} \otimes g(\mu_{pq}) \otimes 1^{\otimes t}), \end{equation} where $|g|$ denotes the vertical grading. For every $N$, there is a bijection \[ \Phi=\prod_n\Phi_n: \prod_n{\textbf{Hom}}((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}_A)(n)^N \longrightarrow \prod_n\prod\limits_{u}C^{n,N+1-n-u}_u(A,A) \] where $\Phi_n: {\textbf{Hom}}((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}_A)(n)^N \longrightarrow \prod\limits_{u}C^{n,N+1-n-u}_u(A,A)$ is given by evaluation: \[ \Phi_n(f_n) = \prod\limits_{u}f_n(\mu_{un}). \] The unique preimage of a family $(G_n)_n$, where $G_n=(G_u^{n,N+1-n-u})_u$, is given by the family $g=(g_n)_n=(\Phi_n^{-1}(G_n))_n$ in degree $N$ defined via \[ g_n(\mu_{un}) = G_u^{n,N+1-n-u}. \] We can now transport the pre-Lie structure on $\prod_n{\textbf{Hom}}((d{\mathcal A}s)^{\scriptstyle \text{\rm !`}}, {\rm End}_A)(n)$ to $CH^{*+1}(A,A)$ as follows: let $F=(F_n)_{n\geq 1}$ be of total degree $N+1$ and let $G=(G_m) _{m\geq 1}$ be of total degree $M+1$. There are unique families $f=(f_n)_n, g=(g_m)_m$ of degree $N$ and $M$ respectively such that $F=\Phi(f)$ and $G=\Phi(g)$. Then \[ F \star G := \Phi(f\star g). \] Note that the total degree of $F\star G$ is $N+M+1$. Hence the pre-Lie product decreases the total degree by one. That is, this pre-Lie product endows $CH^{*+1}(A,A)$ with the structure of a graded pre-Lie algebra. Naturally, this gives rise to a graded Lie algebra structure on $CH^{*+1}(A,A)$ via \[ [F,G] = F\star G - (-1)^{(N+1)(M+1)} G \star F. \] Let us now compare the pre-Lie structure above with the pre-Lie structure on $C^{*,*}_*(A,A)$ built in \cite{RoiWhi11}. Let $\mathfrak{f} \in C^{n,i}_k(A,A)$ and $\mathfrak{g} \in C^{m,j}_l(A,A)$. Then \[ \mathfrak{f}=f_n(\mu_{kn})\,\,\,\mbox{with}\,\,\, |f_n|=n+i+k-1 \] and \[ \mathfrak{g}=g_m(\mu_{lm})\,\,\,\mbox{with}\,\,\, |g_m|=m+j+l-1. \] Putting this into formula (\ref{eq:prelie}) yields \[ \mathfrak{f} \star \mathfrak{g}= \sum\limits_{r=0}^{n-1} (-1)^{(n+1)(m+1)+r(m+1)+j(n+1)+k(m+j+l+1)} \mathfrak{f}(1^{\otimes r} \otimes \mathfrak{g} \otimes 1^{\otimes n-r-1}) \in C^{n+m-1,i+j}_{k+l}. \] Hence we can see that the sign in this formula differs from the sign in the other pre-Lie algebra structure $\mathfrak{f} \circ_{RW} \mathfrak{g}$ given in~\cite[Definition 2.11]{RoiWhi11} by the sign $(-1)^{k(m+j+l+1)}$. We can read off the following. \begin{lem}\label{lem:squarezero} Let $m \in CH^2(A,A)$. Then $m$ defines a $dA_\infty$-structure on $A$ if and only if $m \star m=0$. \qed \end{lem} \subsection{Hochschild cohomology} We now use this new Lie structure to define another notion of Hochschild cohomology of derived $A_\infty$-algebras. This definition differs from that constructed in~\cite{RoiWhi11} by the different signs in the Lie structure, as explained above. It has the advantage that it applies to all $dA_\infty$-algebras rather than just the ``orthogonal'' ones. \begin{defn}\label{def:hochschild} Let $(A,m)$ be a $dA_\infty$-algebra. Then the \emph{Hochschild cohomology of $A$} is defined as \[ \hh^*(A,A) := \h^*(CH(A,A), [m,-] ). \] \end{defn} The morphism \[ [m,-]: CH^*(A,A) \longrightarrow CH^*(A,A) \] is indeed a differential. Since $m$ has total degree 2 and $[-,-]$ has total degree $-1$, it raises degree by 1. By~\cite[Lemma 1.10]{Liv11} (with respect to the pre-Lie product $\circ$), one has $[m,[m,-]]= [m\star m,-]$, and the right-hand side vanishes because of Lemma \ref{lem:squarezero}. In the case of $(A,m)$ being an associative algebra, this definition recovers the classical definition of Hochschild cohomology of associative algebras. \begin{rem} Because of the bijection $\Phi$ the complex computing the Hochschild cohomology of $A$ coincides with the operadic cohomology. Recall that given a $\mathcal P$-algebra $A$, its operadic cohomology with coefficients in itself is $H^*({\textbf{Hom}}(\mathcal P^{\scriptstyle \text{\rm !`}}(A),A),\partial_\pi)$ where $\pi$ depends on the twisting cochain defining the structure on $A$. As an example, when $A$ is a bidga with $m=m_{11}+m_{02}$, i.e. if $A$ is a bidga with trivial horizontal differential, the external grading is preserved by both bracketing with $m_{11}$ and $m_{02}$. Hence we can, as in~\cite[Section 3.1]{RoiWhi11}, consider bigraded Hochschild cohomology \begin{equation} \hh^{s,r}(A,A) = \h^s(\prod\limits_n C^{n,r}_{*-n}(A,A), [m,-]). \nonumber \end{equation} We denote this special case by $\hh^{*,*}_{bidga}(A,A)$. It corresponds to the operadic cohomology with respect to the operad ${\mathrm d}As$. When $\mathcal P$ is a Koszul operad, given a $\mathcal P_\infty$-algebra, one can still define its operadic cohomology as the homology of the complex \begin{equation}\label{F:hhcomplex} ({\textbf{Hom}}(\mathcal P^{\scriptstyle \text{\rm !`}}(A),A),\partial_\pi), \end{equation} where $\pi$ represents the twisting cochain associated to the $\mathcal P_\infty$-structure on $A$. If $A$ is a derived $A_\infty$-algebra, the complex (\ref{F:hhcomplex}) is exactly the complex of Definition~\ref{def:hochschild}. That is, operadic cohomology for derived $A_\infty$-algebras is Hochschild cohomology as defined at the beginning of the subsection. Note however, that in order to identify this cohomology theory with the Andr\'e-Quillen cohomology of derived $A_\infty$-algebras as in~\cite[Proposition 12.4.11]{LodVal12} one needs to assume that $A$ is bounded below for the vertical grading and is free as a $\mathbf k$-module. \end{rem} This more compact definition of Hochschild cohomology has some structural advantage over $\hh^*_{RW}$, the Hochschild cohomology defined in \cite{RoiWhi11}. In particular, we see that the Lie bracket $[-,-]$ on $CH^*(A,A)$ induces a Lie bracket on $$\hh^*(A,A)=\h^*(CH^*(A,A),D=[m,-]).$$ This is the case because $D$ is an inner derivation with respect to $[-,-]$ due to the graded Jacobi identity. Hence, the bracket of two cycles is again a cycle, and the bracket of a boundary and a cycle is a boundary. \begin{prop} The (shifted) Hochschild cohomology of a $dA_\infty$-algebra $\hh^{*+1}(A,A)$ has the structure of a graded Lie algebra. \qed \end{prop} \subsection{Uniqueness and formality} \begin{defn} Let $A$ be a bidga with $m_{01}=0, \partial = m_{11}, \mu=m_{02}$. Then \[ a = \sum\limits_{i,j} a_{ij}, \,\,\, a_{ij} \in C^{j,2-i-j}_i(A,A), \,\, i+j \ge 3 \] is a \emph{twisting cochain} if $\partial + \mu + a$ is a derived $A_\infty$-structure. \end{defn} One can read off the following result immediately. \begin{lem} The element $a$ is a twisting cochain if and only if \[ -D(a)=a\star a \] for $D=[\partial+\mu,-]$. \qed \end{lem} The above is the \emph{Maurer-Cartan formula}. A key step in the obstruction theory leading to uniqueness of $dA_\infty$-structures is perturbing an existing twisting cochain by an element $b$ of total degree 1. Roughly speaking, this new perturbed $dA_\infty$-structure satisfies the following- it equals the existing $dA_\infty$-structure below a certain bidegree, is modified using $b$ in this bidegree and $E_2$-equivalent to the ``old'' $dA_\infty$-structure. This has been shown in detail in \cite[Lemma 3.6]{RoiWhi11}, but we verify briefly that this also works with our new Lie bracket. \begin{lem}\label{lem:perturb} Let $A$ be a bidga with multiplication $\mu$, horizontal differential $\partial$ and trivial vertical differential. Let $a$ be a twisting cochain. Let either \begin{description} \item[(A)] $b \in C^{n-1,2-(n+k)}_k(A,A)$ for some $k, n$ such that $k+n \ge 3$, satisfying $[\partial,b]=0$ \end{description} or \begin{description} \item[(B)] $b \in C^{n,2-(n+k)}_{k-1}(A,A)$, for some $k, n$ with $k+n \ge 3$, satisfying $[\mu,b]=0$. \end{description} Then there is a twisting cochain $\overline{a}$ satisfying \begin{itemize} \item the $dA_\infty$-structures $\partial + \mu + a$ and $\overline{m} = \partial + \mu + \overline{a}$ are $E_2$-equivalent, \item $\overline{a}_{uv} = a_{uv}$ for $u<k$ or $v<n-1$ or $(u,v)=(k,n-1)$ in case {\bf (A)} and for $u<k-1$ or $v<n$ or $(u,v)=(k-1,n)$ in case {\bf (B)}, \item $\overline{a}_{kn} = a_{kn} - [\mu,b]$ in case {\bf (A)}, \item $\overline{a}_{kn}= a_{kn} - [\partial,b] $ in case {\bf (B)}. \end{itemize} \end{lem} \begin{proof} A quick check of the signs in both Lie brackets shows that \[ [\partial, b]_{RW} = [\partial, b] \,\,\,\mbox{and}\,\,\, [\mu,b]_{RW}=[\mu,b]. \] Hence this is identical to \cite[Lemma 3.6]{RoiWhi11}, where the $\overline{a}_{uv}$ are constructed inductively. \end{proof} We can now proceed to our uniqueness theorem, which has been shown in the context of $[-,-]_{RW}$ and $\hh^{*,*}_{RW}$ in \cite[Theorem 3.7]{RoiWhi11}. \begin{thm} Let $A$ be a bidga with multiplication $\mu$, horizontal differential $\partial$ and trivial vertical differential. If \[ \hh^{r,2-r}_{bidga}(A,A)=0 \,\,\,\mbox{for}\,\,\, r\ge 3, \] then every $dA_\infty$-structure on $A$ with $m_{01} =0$, $m_{11}= \partial$ and $m_{02}=\mu$ is $E_2$-equivalent to the trivial one. \end{thm} \begin{proof} Let $m$ be a $dA_\infty$-structure on $A$ as given in the statement. We want to show that it is equivalent to the $dA_\infty$-structure $\partial + \mu$. We can write $m=\partial + \mu + a$ with $a$ a twisting cochain. We look at $a_{kn}$, $k+n=t\ge 3$. We show that $m$ is equivalent to a $dA_\infty$-structure $\bar{m}= \partial + \mu + \bar{a}$ with $\bar{a}_{kn}=0$ for fixed $t$ by induction on $k$. To start this induction we assume that \[ a_{ij}=0\,\,\,\mbox{for}\,\,\, i+j<t \,\,\,\mbox{and for}\,\,\, i+j=t, \mbox{if}\,\,\, i<k. \] The new equivalent $dA_\infty$-structure $\overline{m}$ will also satisfy \[ \bar{a}_{ij}=a_{ij}=0 \,\,\,\mbox{for}\,\,\, i+j<t \,\,\,\mbox{and for}\,\,\, i+j=t, \mbox{if}\,\,\, i<k \] as well as further \[ \bar{a}_{kn}=0. \] So to construct $\overline{m}$, we ``kill'' $a_{kn}$ but leave the trivial lower degree $a_{ij}$ invariant. Since $a$ is a twisting cochain, it satisfies the Maurer-Cartan formula \[ -D(a)= a\star a. \] However, an argument similar to \cite[Theorem 3.7]{RoiWhi11} shows that this implies $D(a_{kn})=0$ for degree reasons. Hence $a_{kn}$ is a cycle and gives us a class \[ [a_{kn}] \in \hh^{k+n,2-k-n}_{bidga}(A,A) \] in the Hochschild cohomology of $A$. This cohomology group has been assumed to be zero, hence $a_{kn}$ must be a boundary too. Thus, there is a $b$ of total degree 1 with $D(b)=a_{kn}$. For degree reasons, this $b$ has to be of the form \[ b = b_0 + b_1, \,\,\, b_0 \in C^{n,2-n-k}_{k-1}(A,A), \,\,\, b_1 \in C^{n-1,2-n-k}_k(A,A) \] with \[ [\mu, b_0] = 0 \,\,\,\,\mbox{and}\,\,\,\, [\partial, b_1]=0, \] meaning that \[ D(b) = D(b_0 + b_1)= [\mu, b_1] + [\partial, b_0]. \] Then, just as in the proof of~\cite[Theorem 3.7]{RoiWhi11}, applying Lemma~\ref{lem:perturb} to $b_1$ yields a $dA_\infty$-structure $\overline{m}=\partial+\mu+\overline{a}$ with \[ \bar{a}_{kn}=a_{kn} - [\mu, b_1] - [\partial, b_0] = a_{kn} -D(b)=0. \] \end{proof} It was shown in \cite[Section 4]{RoiWhi11} that $\hh^{*,*}_{RW}(A,A)$ is invariant under $E_2$-equivalences. Since this argument is independent of choice of signs in the Lie bracket, it also holds for our $\hh^{*,*}_{bidga}(A,A)$. Hence we can now give a criterion for intrinsic formality of a dga. (Recall that a dga $A$ is intrinsically formal if for any other dga $B$ with $\h^*(A) \cong \h^*(B)$ as associative algebras, $A$ and $B$ are quasi-isomorphic.) \begin{cor} Let $A$ be a dga and $E$ its minimal model with $dA_\infty$-structure $m$. By $\tilde{E}$, we denote the underlying bidga of $E$, i.e. $\tilde{E}=E$ as $k$-modules together with $dA_\infty$-structure $\tilde{m}=m_{11}+m_{02}$. If \[ \hh^{m,2-m}_{bidga}(\tilde{E},\tilde{E})=0 \quad\quad\mbox{for}\ m \ge 3, \] then $A$ is intrinsically formal.\qed \end{cor} \section{Directions for further work}\label{sec:further} In this paper we have given an operadic perspective on derived $A_\infty$-structures, allowing us to view derived $A_\infty$-algebras as algebras over an operad. By results of various authors~\cite{Fresse09, Har10,Mur11}, it follows from our description that there is a model category structure on derived $A_\infty$-algebras such that the weak equivalences are the $E_1$-equivalences (see Definition~\ref{def:equivs}). However, we do not expect this model structure to be homotopically meaningful. Indeed, in order to view Sagave's minimal models as some kind of cofibrant replacement, one would need a model structure in which the weak equivalences are the $E_2$-equivalences. Producing such a model structure will involve a change of underlying category, probably to the category of twisted chain complexes. One would then need a suitable model structure on this underlying category and also to develop the appropriate notion of cobar construction. The apparent complication in carrying out such a programme explains our choice to work with vertical bicomplexes in this paper. We expect to return to this in future work. \end{document}
\begin{document} \newcommand{\ci}[1]{_{ {}_{\scriptstyle #1}}} \newcommand{\norm}[1]{\ensuremath{\|#1\|}} \newcommand{\abs}[1]{\ensuremath{\vert#1\vert}} \newcommand{\ensuremath{\partial}}{\ensuremath{\ensuremath{\partial}artial}} \newcommand{\ensuremath{\partial}r}{\mathcal{P}} \newcommand{\ensuremath{\partial}bar}{\ensuremath{\bar{\ensuremath{\partial}artial}}} \newcommand{\overline\partial}{\overline\ensuremath{\partial}artial} \newcommand{\mathcal{D}}{\mathcal{D}} \newcommand{\mathbb{B}}{\mathbb{B}} \newcommand{\mathbb{S}}{\mathbb{S}} \newcommand{\mathbb{T}}{\mathbb{T}} \newcommand{\mathbb{R}}{\mathbb{R}} \newcommand{\mathbb{Z}}{\mathbb{Z}} \newcommand{\mathbb{C}}{\mathbb{C}} \newcommand{\mathbb{N}}{\mathbb{N}} \newcommand{\mathcal{H}}{\mathcal{H}} \newcommand{\mathcal{L}}{\mathcal{L}} \newcommand{\widetilde\Delta}{\widetilde\mathcal{D}elta} \newcommand{\langle }{\langle } \newcommand{\mathbb{R}a}{\operatorname{Ran}gle } \newcommand{\operatorname{rk}}{\operatorname{rk}} \newcommand{\operatorname{card}}{\operatorname{card}} \newcommand{\operatorname{Ran}}{\operatorname{Ran}} \newcommand{\operatorname{OSC}}{\operatorname{OSC}} \newcommand{\operatorname{Im}}{\operatorname{Im}} \newcommand{\operatorname{Re}}{\operatorname{Re}} \newcommand{\operatorname{tr}}{\operatorname{tr}} \newcommand{\varphi}{\varphi} \newcommand{\f}[2]{\ensuremath{\frac{#1}{#2}}} \newcommand{\entrylabel}[1]{\mbox{#1} } \newenvironment{entry} {\begin{list}{X} {\operatorname{Re}newcommand{\entrylabel}{\entrylabel} \setlength{\labelwidth}{55pt} \setlength{\leftmargin}{\labelwidth} \addtolength{\leftmargin}{\labelsep} } } {\end{list}} \numberwithin{equation}{section} \newtheorem{dfn}{Definition}[section] \newtheorem{thm}{Theorem}[section] \newtheorem{lm}[thm]{Lemma} \newtheorem{cor}[thm]{Corollary} \newtheorem{conj}[thm]{Conjecture} \newtheorem{prob}[thm]{Problem} \newtheorem{prop}[thm]{Proposition} \newtheorem*{prop*}{Proposition} \theoremstyle{remark} \newtheorem{rem}[thm]{Remark} \newtheorem*{rem*}{Remark} \newtheorem{quest}[thm]{Question} \title{Multilinear Dyadic Operators And Their Commutators} \author{Ishwari Kunwar} \address{Ishwari Kunwar, School of Mathematics\\ Georgia Institute of Technology\\ 686 Cherry Street\\ Atlanta, GA USA 30332-0160} \email{[email protected]} \subjclass[2000]{Primary } \keywords{Multilinear Paraproducts, multilinear Haar Multipliers, dyadic BMO functions, Commutators.} \begin{abstract} We introduce multilinear analogues of dyadic paraproduct operators and Haar Multipliers, and study boundedness properties of these operators and their commutators. We also characterize dyadic $BMO$ functions via boundedness of certain paraproducts and also via boundedness of the commutators of multilinear Haar Multipliers and paraproduct operators. \end{abstract} \maketitle \setcounter{tocdepth}{1} \tableofcontents \section{Introduction and statement of main results} \noindent Dyadic operators have attracted a lot of attention in the recent years. The proof of so-called $A_2$ theorem (see \cite{Hyt}) consisted in representing a general Calder$\acute{\text{o}}$n-Zygmund operator as an average of dyadic shifts, and then verifying some testing conditions for those simpler dyadic operators. It seems reasonable to believe that, taking a similar approach, general multilinear Calder$\acute{\text{o}}$n-Zygmund operators can be studied by studying multilinear dyadic operators. Regardless of this possibility, multilinear dyadic operators in their own right are an important class of objects in Harmonic Analysis. Statements regarding those operators can be translated into the non-dyadic world, and are sometimes simpler to prove.\\ \noindent In this paper we introduce multilinear analogues of dyadic operators such as paraproducts and Haar multipliers, and study their boundedness properties. Corresponding theory of linear dyadic operators, which we will be using very often, can be found in \cite{Per}. In \cite{BMNT}, the authors have studied boundedness properties of bilinear paraproducts defined in terms of so-called ``smooth molecules". The paraproduct operators we study are more general multilinear operators, but defined in terms of indicators and Haar functions of dyadic intervals. In \cite{CRW} Coifman, Rochberg and Weiss proved that the commutator of a $BMO$ function with a singular integral operator is bounded in $L^p$, $1<p<\infty.$ The necessity of $BMO$ condition for the boundedness of the commutator was also established for certain singular integral operators, such as the Hilbert transform. S. Janson \cite{Jan} later studied its analogue for linear martingale transforms. In this paper we study commutators of multilinear dyadic operators, and characterize dyadic $BMO$ functions via boundedness of these commutators. For the corresponding theory for general multilinear Calder$\acute{\text{o}}$n-Zygmund operators we refer to \cite{GT} and \cite{LOPTT}.\\ \noindent We organize the paper as follows:\\ \noindent In section 2, we present an overview of some of the main tools we will be using in this paper. These include: the Haar system, linear Haar multipliers, dyadic maximal/square functions, linear/bilinear paraproduct operators and the space of dyadic $BMO$ functions. For more details we refer to \cite{Per}.\\ \noindent In section 3, we obtain a decomposition of the pointwise product of $m$ functions, $m \geq 2,$ which generalizes the paraproduct decomposition of two functions. On the basis of this decomposition we define multilinear paraproducts and investigate their boundedness properties as operators on products of Lebesgue spaces. We also define multilinear anologue of the linear paraproduct operator $\ensuremath{\partial}i_b$, and characterize dyadic $BMO$ functions via boundedness of certain multilinear paraproduct operators.\\ \noindent In section 4, we define multilinear Haar multipliers in a way consistent with the definition of linear Haar multipliers and multilinear paraproducts, and then investigate their boundedness properties. We also study boundedness properties of their commutators with dyadic $BMO$ functions, and provide a characterization of dyadic $BMO$ functions via the boundedness of those multilinear commutators. In particular, we show that the commutators of the multilinear paraproducts with a function $b$ are bounded if and only if $b$ is a dyadic $BMO$ function. \\ \noindent Our main results involve the following operators:\\ \begin{itemize} \item $\displaystyle P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m) = \sum_{I\in\mathcal{D}} \left(\ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j)\right) h_I^{\sigma(\vec{\alpha})}, \quad \vec{\alpha} \in \{0,1\}^m \backslash\{(1,1,\ldots,1)\}. $\\ \item $\displaystyle \ensuremath{\partial}i_b^{\vec{\alpha}}(f_1, f_2, \ldots, f_m) = \sum_{I \in \mathcal{D}} \langle b , h_I \mathbb{R}a \left(\ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j)\right) h_I^{1+\sigma(\vec{\alpha})},\quad \vec{\alpha} \in \{0,1\}^m.$\\ \item $\displaystyle T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) := \sum_{I\in \mathcal{D}} \epsilon_I \left(\ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j)\right) h_I^{\sigma(\vec{\alpha})},$ \\ $ \quad \vec{\alpha} \in =\{0,1\}^m \backslash \{(1,1,\ldots,1)\}, \, \epsilon = \{\epsilon_I\}_{I\in \mathcal{D}} \text{ bounded}.$\\ \item $\displaystyle [b,T_\epsilon^{\vec{\alpha}}]_i(f_1,f_2,\ldots,f_m)(x) := b(x)T_\epsilon^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)(x) - T_\epsilon^{\vec{\alpha}}(f_1, \ldots, bf_i,\ldots,f_m)(x),$\\ $ 1 \leq i \leq m$, $ \vec{\alpha} \in \{0,1\}^m \backslash\{(1,1,\ldots,1)\},\, \epsilon = \{\epsilon_I\}_{I\in \mathcal{D}} \text{ bounded and } b\in BMO^d.$\\ \end{itemize} \noindent In the above definitions, $\mathcal{D} := \{[m2^{-k}, (m+1)2^{-k}): m,k\in \mathbb{Z}\}$ is the standard dyadic grid on $\mathbb{R}$ and $h_I$'s are the Haar functions defined by $h_I = \displaystyle \frac{1}{\abs{I}^{1/2}}\left(\mathsf{1}_{I_+} - \mathsf{1}_{I_-}\right),$ where $I_-$ and $I_+$ are the left and right halves of $I.$ With $\left< \;,\;\right>$ denoting the standard inner product in $L^2(\mathbb{R}),$ $f_i(I,0) := \left< f_i,h_I\right>$ and $\displaystyle f_i(I,1) := \langle f_i, h_I^2\mathbb{R}a = \frac{1}{\abs{I}} \int_I f_i,$ the average of $f_i$ over $I.$ The Haar coefficient $\langle f_i, h_I\mathbb{R}a$ is sometimes denoted by $\widehat{f_i}(I)$ and the average of $f_i$ over $I$ by $\langle f_i \mathbb{R}a_I$. For $\vec{\alpha} \in \{0,1\}^m,$ $\sigma(\vec{\alpha})$ to denotes the number of 0 components in $\vec{\alpha}$. For convenience, we will denote the set $\{0,1\}^m \backslash\{(1,1,\ldots,1)\}$ by $U_m.$\\ \noindent In the following main results $L^p$ stands for the Lebesgue space $L^p(\mathbb{R}):= \left\{f:\norm{f}_p < \infty \right\} $ with $\displaystyle\norm{f}_p = \norm{f}_{L^p} := \left(\int_\mathbb{R} \abs{f(x)}^p dx \right)^{1/p}.$ The Weak $L^p$ space, also denoted by $L^{p,\infty}$, is the space of all functions $f$ such that $$ \norm{f}_{L^{p,\infty}(\mathbb{R})}:= \sup_{t>0} t \, \left\vert \{x\in \mathbb{R}: f(x) >t \} \right\vert^{1/p} < \infty.$$ Moreover, $ \displaystyle \norm{b}_{BMO^d}:=\sup_{I\in \mathcal{D}}\frac{1}{\abs{I}}\int_I \abs{b(x) - \langle b \mathbb{R}a_I} \,dx < \infty, $ is the dyadic $BMO$ norm of $b.$\\ \noindent We now state our main results:\\ \noindent \noindent \textbf{Theorem:} Let $ \vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in \{0,1\}^m$ and $ 1 < p_1, p_2, \ldots, p_m < \infty$ with $\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$ Then \begin{enumerate}[label = $(\alph*)$] \item For $\vec{\alpha} \neq (1,1,\ldots,1),$ $\displaystyle \left\Vert P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \lesssim \ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}.$ \item For $\sigma(\vec{\alpha}) \leq 1,$ $\displaystyle \left\Vert\ensuremath{\partial}i_b^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \lesssim \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j},$ if and only if $b \in BMO^d.$\\ \item For $\sigma(\vec{\alpha}) > 1,$ $\displaystyle\left\Vert\ensuremath{\partial}i_b^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \leq C_b \ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j},$ if and only if $\displaystyle\sup_{I\in \mathcal{D}} \frac{\abs{\langle b,h_I\mathbb{R}a}}{\sqrt{\abs{I}}} < \infty.$ \end{enumerate} In each of the above cases, the paraproducts are weakly bounded if $1\leq p_1, p_2, \ldots, p_m < \infty$.\\ \noindent \textbf{Theorem:} Let $\epsilon = \{\epsilon_I\}_{I\in\mathcal{D}}$ be a given sequence and let $\vec{\alpha} = (\alpha_1,\alpha_2, \ldots,\alpha_m) \in U_m.$ Let $1<p_1,p_2, \ldots,p_m<\infty$ with $$\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Then $T_\epsilon^{\vec{\alpha}}$ is bounded from $L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}$ to $L^r$ if and only if $\norm{\epsilon}_\infty:= \displaystyle \sup_{I \in \mathcal{D}}\abs{\epsilon_I} < \infty.$\\ Moreover, $T_\epsilon^{\vec{\alpha}}$ has the corresponding weak-type boundedness if $1 \leq p_1,p_2, \ldots,p_m<\infty.$\\ \noindent \textbf{Theorem:} Let $\vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in U_m,$ $1\leq i \leq m,$ and $1<p_1,p_2, \ldots,p_m, r < \infty$ with $$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Suppose $b \in L^p$ for some $p \in (1,\infty).$ Then the following two statements are equivalent. \begin{enumerate}[label = $(\alph*)$] \item $b\in BMO^d.$\\ \item $\displaystyle [b,T_\epsilon^{\vec{\alpha}}]_i:L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r $ is bounded for every bounded sequence $\epsilon = \{\epsilon_I\}_{I\in \mathcal{D}}.$\\ \end{enumerate} \noindent In particular, $b\in BMO^d$ if and only if $[b,P^{\vec{\alpha}}]_i:L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$ is bounded.\\ \noindent \textbf{Acknowledgement:} The author would like to thank Brett Wick for suggesting him this research project, and for providing valuable suggestions. \section{Notation and preliminaries} \subsection{The Haar System and the Haar multipliers:} Let $\mathcal{D}$ denote the standard dyadic grid on $\mathbb{R},$ $$\mathcal{D} = \{[m2^{-k}, (m+1)2^{-k}): m,k\in \mathbb{Z}\}.$$ Associated to each dyadic interval $I$ there is a Haar function $h_I$ defined by $$h_I(x) = \frac{1}{\abs{I}^{1/2}}\left(\mathsf{1}_{I_+} - \mathsf{1}_{I_-}\right),$$ where $I_-$ and $I_+$ are the left and right halves of $I.$\\ \noindent The collection of all Haar functions $\{h_I: I \in \mathcal{D}\}$ is an orthonormal basis of $L^2(\mathbb{R}),$ and an unconditional basis of $L^p$ for $ 1 < p < \infty.$ In fact, if a sequence $\epsilon = \{\epsilon_I\}_{I \in \mathcal{D}}$ is bounded, the operator $T_\epsilon$ defined by $$T_\epsilon f(x) = \sum_{I \in \mathcal{D}} \epsilon_I \langle f, h_I \mathbb{R}a h_I $$ is bounded in $L^p$ for all $1 < p < \infty.$ The converse also holds. The operator $T_\epsilon$ is called the Haar multiplier with symbol $\epsilon.$ \\ \subsection{The dyadic maximal function:} Given a function $f$, the dyadic Hardy-Littlewood maximal function $M^df$ is defined by $$M^df(x):= \sup_{x\in I\in \mathcal{D}} \frac{1}{\abs{I}} \int_I \abs{f(t)}\,dt.$$ \noindent For the convenience of notation, we will just write $M$ to denote the dyadic maximal operator. Clearly, $M$ is bounded on $L^\infty.$ It is well-known that $M$ is of weak type $(1,1)$ and strong type $(p,p)$ for all $1<p<\infty.$\\ \subsection{The dyadic square function:} The dyadic Littlewood-Paley square function of a function $f$ is defined by $$S f(x):= \left(\sum_{I \in \mathcal{D}} \frac{\abs{\langle f,h_I \mathbb{R}a}^2}{\abs{I}} \mathsf{1}_I(x) \right)^{1/2}.$$ For $f\in L^p$ with $1<p<\infty,$ we have $\norm{Sf}_p \approx \norm{f}_p$ with equality when $p=2.$\\ \subsection{BMO Space} A locally integrable function $b$ is said to be of bounded mean oscillation if $$\norm{b}_{BMO}:=\sup_{I}\frac{1}{\abs{I}}\int_I \abs{b(x) - \langle b \mathbb{R}a_I} \,dx < \infty, $$ where the supremum is taken over all intervals in $\mathbb{R}.$ The space of all functions of bounded mean oscillation is denoted by $BMO.$\\ \noindent If we take the supremum over all dyadic intervals in $\mathbb{R},$ we get a larger space of dyadic BMO functions which we denote by $BMO^d.$\\ \noindent For $0<r<\infty,$ define $$ BMO_r = \left\{b \in L_{loc}^r(\mathbb{R}): \norm{b}_{BMO_r} < \infty \right\},$$ where, $\displaystyle \norm{b}_{BMO_r} := \left(\sup_{I}\frac{1}{\abs{I}}\int_I \abs{b(x) - \langle b \mathbb{R}a_I}^r \,dx \right)^{1/r}.$\\ \noindent For any $0<r<\infty,$ the norms $\norm{b}_{BMO_r}$ and $\norm{b}_{BMO}$ are equivalent. The equivalence of norms for $r > 1$ is well-known and follows from John-Nirenberg's lemma (see \cite{JN}), while the equivalence for $0<r<1$ has been proved by Hanks in \cite{HR}. (See also \cite{SE}, page 179.)\\ \noindent For $r=2$, it follows from the orthogonality of Haar system that $$ \norm{b}_{BMO_2^d} = \left(\sup_{I \in \mathcal{D}} \frac{1}{\abs{I}} \sum_{J \subseteq I} \abs{\widehat{b}(J)}^2\right)^{1/2}.$$ \subsection{The linear/ bilinear paraproducts:} Given two functions $f_1$ and $f_2$, the point-wise product $f_1f_2$ can be decomposed into the sum of bilinear paraproducts: $$ f_1f_2 = P^{(0,0)}(f_1,f_2) + P^{(0,1)}(f_1,f_2) + P^{(1,0)}(f_1,f_2),$$ where for $\vec{\alpha} = (\alpha_1, \alpha_2) \in \{0,1\}^2$, $$ P^{\vec{\alpha}}(f_1,f_2) = \displaystyle \sum_{I \in \mathcal{D}} f_1(I,\alpha_1) f_2(I, \alpha_2) h_I^{\sigma(\vec{\alpha})}$$ with $ f_i(I,0) = \langle f_i, h_I \mathbb{R}a, \;\; f_i(I,1) = \langle f_i \mathbb{R}a_I,\; \sigma(\vec{\alpha}) = \# \{i: \alpha_i = 0\}, \text{ and } h_I^{\sigma(\vec{\alpha})}$ being the pointwise product $h_I h_I \ldots h_I$ of $ \sigma(\vec{\alpha})$ factors. \\ The paraproduct $P^{(0,1)}(f_1,f_2)$ is also denoted by $\ensuremath{\partial}i_{f_1}(f_2),$ i.e.,\\ $$ \ensuremath{\partial}i_{f_1}(f_2) = \sum_{I \in \mathcal{D}} \langle f_1, h_I \mathbb{R}a \langle f_2 \mathbb{R}a_I h_I.$$ Observe that $$\langle \ensuremath{\partial}i_{f_1}(f_2), g \mathbb{R}a = \left\langle \sum_{I \in \mathcal{D}} \langle f_1, h_I \mathbb{R}a \langle f_2 \mathbb{R}a_I h_I, g \right\mathbb{R}a = \sum_{I \in \mathcal{D}} \langle f_1, h_I \mathbb{R}a \langle f_2 \mathbb{R}a_I \langle g, h_I \mathbb{R}a $$ which is equal to \begin{eqnarray*} \left\langle f_2, P^{(0,0)}(f_1,g) \right\mathbb{R}a & = & \left\langle f_2, \;\sum_{I \in \mathcal{D}} \langle f_1, h_I \mathbb{R}a \langle g, h_I \mathbb{R}a h_I^2 \right\mathbb{R}a \\ & = & \sum_{I \in \mathcal{D}} \langle f_1, h_I \mathbb{R}a \langle g,h_I \mathbb{R}a \langle f_2, h_I^2 \mathbb{R}a \\ & = & \sum_{I \in \mathcal{D}} \langle f_1, h_I \mathbb{R}a \langle f_2 \mathbb{R}a_I \langle g, h_I \mathbb{R}a . \end{eqnarray*} \noindent This shows that $\ensuremath{\partial}i_{f_1}^* = P^{(0,0)}(f_1, \cdot) = P^{(0,0)}(\cdot,f_1)$.\\ \noindent The ordinary multiplication operator $M_b: f \rightarrow bf$ can therefore be given by: $$M_b(f) = bf = P^{(0,0)}(b,f) + P^{(0,1)}(b,f) + P^{(1,0)}(b,f) = \ensuremath{\partial}i_b^*(f) + \ensuremath{\partial}i_b(f) + \ensuremath{\partial}i_f(b).$$ \noindent The function $b$ is required to be in $L^\infty$ for the boundedness of $M_b$ in $L^p$. However, the paraproduct operator $\ensuremath{\partial}i_b$ is bounded in $L^p$ for every $1 < p < \infty$ if $b \in BMO^d.$ Note that $BMO^d$ properly contains $L^\infty$. Detailed information on the operator $\ensuremath{\partial}i_b$ can be found in \cite{Per} or \cite{Bla}. \subsection{Commutators of Haar multipliers:} The commutator of $T_\epsilon$ with a locally integrable function $b$ is defined by $$ [b, T_\epsilon](f)(x) := T_\epsilon(bf)(x) - M_b (T_\epsilon(f))(x).$$ \noindent It is well-known that for a bounded sequence $\epsilon$ and $1<p<\infty$, the commutator $[b, T_\epsilon]$ is bounded in $L^p$ for all $p\in (1, \infty)$ if $b \in BMO^d.$\\ These commutators have been studied in \cite{Treil} in non-homogeneous martingale settings.\\ \section{Multilinear dyadic paraproducts} \subsection{Decomposition of pointwise product $\displaystyle\ensuremath{\partial}rod_{j=1}^m f_j$} \noindent In this sub-section we obtain a decomposition of pointwise product $\displaystyle\ensuremath{\partial}rod_{j=1}^m f_j$ of $m$ functions that is analogous to the following paraproduct decomposition : $$ f_1f_2 = P^{(0,0)}(f_1,f_2) + P^{(0,1)}(f_1,f_2) + P^{(1,0)}(f_1,f_2).$$ The decomposition of $\displaystyle\ensuremath{\partial}rod_{j=1}^m f_j$ will be the basis for defining \textit{multi-linear paraproducts} and \textit{m-linear Haar multipliers}, and will also be very useful in proving boundedness properties of multilinear commutators.\\ \noindent We first introduce the following notation:\\ \begin{itemize} \item $f(I,0) := \widehat{f}(I) = \langle f,h_I \mathbb{R}a = \displaystyle \int_\mathbb{R} f(x) h_I(x) dx. $ \item $f(I,1) := \langle f \mathbb{R}a_I = \frac{1}{\abs{I}} \displaystyle \int_I f(x) dx. $\\ \item $U_m:= \left\{(\alpha_1, \alpha_2, \ldots,\alpha_m) \in \{0,1\}^m: (\alpha_1, \alpha_2,\ldots,\alpha_m) \neq (1,1, \ldots,1)\right\}.$\\ \item $\sigma(\vec{\alpha}) = \# \{i: \alpha_i =0\}$ for $\vec{\alpha} = (\alpha_1, \ldots,\alpha_m) \in \{0,1\}^m.$\\ \item $ (\vec{\alpha}, i) = (\alpha_1, \ldots,\alpha_m, i),\; (i,\vec{\alpha}) = (i,\alpha_1, \ldots,\alpha_m)$ for $\vec{\alpha} = (\alpha_1, \ldots,\alpha_m) \in \{0,1\}^m.$\\ \item $P_I^{\vec{\alpha}} (f_1, \ldots,f_m) = \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j)h_I^{\sigma(\vec{\alpha})}$ for $\vec{\alpha} \in U_m$ and $I \in \mathcal{D}.$\\ \item $P^{\vec{\alpha}}(f_1, \ldots,f_m) = \displaystyle\sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}} (f_1, \ldots,f_m) = \displaystyle\sum_{I\in\mathcal{D}}\ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j)h_I^{\sigma(\vec{\alpha})}$ for $\vec{\alpha} \in U_m.$ \end{itemize} \noindent With this notation, the paraproduct decomposition of $f_1f_2$ takes the following form: $$ f_1f_2 = P^{(0,0)}(f_1,f_2) + P^{(0,1)}(f_1,f_2) + P^{(1,0)}(f_1,f_2) = \sum_{\vec{\alpha} \in U_2} P^{\vec{\alpha}}(f_1,f_2).\\ $$ Note that \begin{equation} \label{IndexSetUm} U_m = \{(\alpha,1): \vec{\alpha} \in U_{m-1}\} \cup \{(\vec{\alpha},0): \vec{\alpha} \in U_{m-1}\} \cup \{(1,\ldots,1,0)\}. \end{equation} \noindent To obtain an analogous decomposition of $\displaystyle\ensuremath{\partial}rod_{j=1}^m f_j,$ we need the following crucial lemma: \begin{lm} Given $m\geq 2$ and functions $f_1,f_2, \ldots, f_m,$ with $f_i \in L^{p_i}, 1<p_i<\infty,$we have $$\ensuremath{\partial}rod_{j=1}^{m} \langle f_j \mathbb{R}a_J \mathsf{1}_J = \sum_{\vec{\alpha} \in U_m} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_m) \; \mathsf{1}_J, $$ for all $J\in\mathcal{D}.$ \end{lm} \noindent \begin{proof} We prove the lemma by induction on $m.$\\ \noindent First assume that $m=2.$ We want to prove the following: \begin{eqnarray} \label{AverageProduct} \langle f_1 \mathbb{R}a_J \langle f_2\mathbb{R}a_J\mathsf{1}_J&=& \sum_{\vec{\alpha} \in U_2} \sum_{J\subsetneq I}P^{\vec{\alpha}}_I(f_1,f_2) \; \mathsf{1}_J\\ &=& \nonumber\left(\sum_{J\subsetneq I}P^{(0,1)}_I(f_1,f_2)+\sum_{J\subsetneq I}P^{(1,0)}_I(f_1,f_2)+\sum_{J\subsetneq I}P^{(0,0)}_I(f_1,f_2)\right) \; \mathsf{1}_J\\ &=& \nonumber \left(\sum_{J\subsetneq I}\widehat{f_1}(I)\langle f_2 \mathbb{R}a_I h_I+\sum_{J\subsetneq I}\langle f_1 \mathbb{R}a_I \widehat{f_2}(I)h_I+\sum_{J\subsetneq I}\widehat{f_1}(I)\widehat{f_2}(I) h_I^2\right) \; \mathsf{1}_J. \end{eqnarray} \noindent We have, \begin{eqnarray*} &&\langle f_1 \mathbb{R}a_J \left< f_2 \right>_J \mathsf{1}_J\\ &=& \left( \sum_{J\subsetneq I}\widehat{f_1}(I) h_I\right) \left( \sum_{J\subsetneq K}\widehat{f_2}(K) h_K\right)\mathsf{1}_J\\ &=& \sum_{J\subsetneq I}\widehat{f_1}(I) h_I \left( \sum_{I\subsetneq K}\widehat{f_2}(K) h_K + \widehat{f_2}(I) h_I+\sum_{J\subsetneq K \subsetneq I}\widehat{f_2}(K) h_K\right)\mathsf{1}_J\\ &=& \left\{\sum_{J\subsetneq I}\widehat{f_1}(I) \left< f_2\right>_I h_I + \sum_{J\subsetneq I}\widehat{f_1}(I) \widehat{f_2}(I) h_I^2 + \sum_{J\subsetneq I}\widehat{f_1}(I) h_I \left(\sum_{J\subsetneq K \subsetneq I}\widehat{f_2}(K) h_K\right)\right\}\mathsf{1}_J\\ &=& \left\{\sum_{J\subsetneq I}\widehat{f_1}(I) \left< f_2\right>_I h_I + \sum_{J\subsetneq I}\widehat{f_1}(I) \widehat{f_2}(I) h_I^2 + \sum_{J\subsetneq K}\widehat{f_2}(K) h_K \left(\sum_{K \subsetneq I}\widehat{f_1}(I) h_I\right)\right\}\mathsf{1}_J\\ &=& \left\{\sum_{J\subsetneq I}\widehat{f_1}(I) \left< f_2\right>_I h_I + \sum_{J\subsetneq I}\widehat{f_1}(I) \widehat{f_2}(I) h_I^2 + \sum_{J\subsetneq K}\widehat{f_2}(K) \left<f_1\right>_K h_K \right\}\mathsf{1}_J\\ &=& \left\{\sum_{J\subsetneq I}\widehat{f_1}(I) \left< f_2\right>_I h_I + \sum_{J\subsetneq I}\widehat{f_1}(I) \widehat{f_2}(I) h_I^2 + \sum_{J\subsetneq I}\widehat{f_2}(I)\left<f_1\right>_Ih_I \right\}\mathsf{1}_J\\ &=& \left(\sum_{J\subsetneq I}\widehat{f_1}(I)\langle f_2 \mathbb{R}a_I h_I+\sum_{J\subsetneq I}\langle f_1 \mathbb{R}a_I \widehat{f_2}(I)h_I+\sum_{J\subsetneq I}\widehat{f_1}(I)\widehat{f_2}(I) h_I^2\right) \; \mathsf{1}_J.\\ \end{eqnarray*} \noindent Now assume $m > 2$ and that $$\ensuremath{\partial}rod_{j=1}^{m-1} \langle f_j \mathbb{R}a_J \mathsf{1}_J = \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1}) \mathsf{1}_J. $$ \noindent Then, \begin{eqnarray*} &&\ensuremath{\partial}rod_{j=1}^{m} \langle f_j \mathbb{R}a_J \mathsf{1}_J\\ &=& \left(\ensuremath{\partial}rod_{j=1}^{m-1} \langle f_j \mathbb{R}a_J \mathsf{1}_J \right) \langle f_m\mathbb{R}a_J\mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1}) \left( \sum_{J\subsetneq K}\widehat{f_m}(K) h_K\right) \mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1})\left( \sum_{I\subsetneq K}\widehat{f_m}(K) h_K + \widehat{f_m}(I) h_I+\sum_{J\subsetneq K \subsetneq I}\widehat{f_m}(K) h_K\right)\mathsf{1}_J\\ \end{eqnarray*} \noindent This gives \begin{eqnarray*} &&\ensuremath{\partial}rod_{j=1}^{m} \langle f_j \mathbb{R}a_J \mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1})\langle f_m \mathbb{R}a_I \mathsf{1}_J + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1}) \widehat{f_m}(I) h_I \mathsf{1}_J\\ && \quad +\sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1})\left( \sum_{J\subsetneq K \subsetneq I}\widehat{f_m}(K) h_K\right)\mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},1)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},0)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J\\ && \quad+ \sum_{J\subsetneq K}\widehat{f_2}(K) h_K \left(\sum_{\vec{\alpha} \in U_{m-1}}\sum_{K\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1})\right) \mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},1)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},0)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J\\ && \quad+ \sum_{J\subsetneq K}\widehat{f_m}(K) h_K \langle f_1\mathbb{R}a_K \ldots\langle f_{m-1}\mathbb{R}a_K \mathsf{1}_J\\ &=&\sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},1)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},0)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J\\ && \quad + \sum_{J\subsetneq I} P_I^{(1,\ldots,1,0)}(f_1,f_2,\ldots,f_m) \mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_m} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_m) \mathsf{1}_J. \end{eqnarray*} The last equality follows from (\operatorname{Re}f{IndexSetUm}). \end{proof} \begin{lm} Given $m\geq 2$ and functions $f_1,f_2, \ldots, f_m,$ with $f_i \in L^{p_i}, 1<p_i<\infty,$we have $$\displaystyle\ensuremath{\partial}rod_{j=1}^m f_j = \sum_{\vec{\alpha} \in U_m} P^{\vec{\alpha}}(f_1,f_2, \ldots, f_m). $$ \end{lm} \noindent \begin{proof} We have already seen that it is true for $m=2.$ By induction, assume that \begin{eqnarray*} \displaystyle\ensuremath{\partial}rod_{j=1}^{m-1} f_j &=& \sum_{\vec{\alpha} \in U_{m-1}} P^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1})\\ &= & \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1}) \end{eqnarray*} \noindent Then, \begin{eqnarray*} \displaystyle\ensuremath{\partial}rod_{j=1}^m f_j &=& \left(\displaystyle\ensuremath{\partial}rod_{j=1}^{m-1} f_j \right) f_m\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1}) \left(\sum_{J\in\mathcal{D}}\widehat{f_m}(J) h_J \right)\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1}) \left( \sum_{I\subsetneq J}\widehat{f_m}(J) h_J + \widehat{f_m}(I) h_I+\sum_{J\subsetneq I }\widehat{f_m}(J) h_J\right)\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1}) \langle f_m \mathbb{R}a_I + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1}) \widehat{f_m}(I) h_I\\ && \quad + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1})\left(\sum_{J\subsetneq I }\widehat{f_m}(J) h_J \right)\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},1)}(f_1,f_2, \ldots, f_m) + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},0)}(f_1,f_2, \ldots, f_m)\\ && \quad + \sum_J\widehat{f_m}(J) h_J \left(\sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1})\right)\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},1)}(f_1,f_2, \ldots, f_m) + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},0)}(f_1,f_2, \ldots, f_m)\\ && \quad + \sum_J\widehat{f_m}(J) h_J \langle f_1 \mathbb{R}a_J \ldots\langle f_{m-1} \mathbb{R}a_J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},1)}(f_1,f_2, \ldots, f_m) + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},0)}(f_1,f_2, \ldots, f_m)\\ && \quad + P^{(1,\ldots,1,0)}(f_1,f_2, \ldots, f_m)\\ &=& \sum_{\vec{\alpha} \in U_m} P^{\vec{\alpha}}(f_1,f_2, \ldots, f_m). \end{eqnarray*} Here the last equality follows from $(\operatorname{Re}f{IndexSetUm})$. \end{proof} \subsection{Multilinear dyadic paraproducts} \noindent On the basis of the decomposition of pointwise product $\ensuremath{\partial}rod_{j=1}^m f_j$ we now define multi-linear dyadic paraproduct operators, and study their boundedness properties.\\ \begin{dfn} For $m \geq 2$ and $\vec{\alpha} = (\alpha_1, \alpha_2, \ldots, \alpha_m) \in \{0,1\}^m$, we define \textit{multi-linear dyadic paraproduct operators} by $$ P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m) = \sum_{I\in\mathcal{D}} \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} $$ where $f_i(I,0) = \langle f_i, h_I \operatorname{Ran}gle$, $f_i(I,1) = \langle f_i \operatorname{Ran}gle_I$ and $\sigma(\vec{\alpha}) = \#\{i: \alpha_i = 0\}.$\\ \end{dfn} \noindent Observe that if $\vec{\beta} = (\beta_1, \beta_2, \ldots,\beta_m)$ is some permutation of $\vec{\alpha} = (\alpha_1, \alpha_2, \ldots, \alpha_m)$ and $(g_1, g_2, \ldots, g_m)$ is the corresponding permutation of $(f_1, f_2, \ldots, f_m)$, then $$P^{\vec{\alpha}} (f_1, f_2, \ldots, f_m) = P^{\vec{\beta}} (g_1, g_2, \ldots, g_m).$$ \noindent Also note that $P^{(1,0)}$ and $P^{(0,1)}$ are the standard bilinear paraproduct operators: $$ P^{(0,1)}(f_1,f_2) = \sum_{I\in\mathcal{D}} \langle f_1, h_I \operatorname{Ran}gle \langle f_2 \operatorname{Ran}gle_I h_I = P(f_1,f_2)$$ $$ P^{(1,0)}(f_1,f_2) = \sum_{I\in\mathcal{D}} \langle f_1 \operatorname{Ran}gle_I \langle f_2, h_I \operatorname{Ran}gle h_I = P(f_1,f_2).$$ \noindent In terms of paraproducts, the decomposition of point-wise product $\displaystyle\ensuremath{\partial}rod_{j=1}^m f_j$ we obtained in the previous section takes the form $$\displaystyle\ensuremath{\partial}rod_{j=1}^m f_j = \displaystyle \sum_{\substack {\vec{\alpha} \in \{0,1\}^m\\ \vec{\alpha} \neq (1,1,\ldots,1)}} P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m).$$ \noindent \begin{dfn} For a given function $b$ and $\vec{\alpha} = (\alpha_1, \alpha_2, \ldots, \alpha_m) \in \{0,1\}^m$, we define the paraproduct operators $\ensuremath{\partial}i_b^{\vec{\alpha}}$ by $$\ensuremath{\partial}i_b^{\vec{\alpha}}(f_1, f_2, \ldots, f_m) = P^{(0,\vec{\alpha})}(b,f_1, f_2, \ldots, f_m) = \sum_{I \in \mathcal{D}} \langle b , h_I \mathbb{R}a \ensuremath{\partial}rod_{j=1}^m f_j(I, \alpha_j) \; h_I^{1+\sigma(\vec{\alpha})}$$ where $(0,\vec{\alpha}) = (0,\alpha_1,\ldots, \alpha_m) \in \{0,1\}^{m+1}.$\\ \end{dfn} \noindent Note that $$\ensuremath{\partial}i_b^1(f) = P^{(0,1)}(b,f) = \sum_{I \in \mathcal{D}} b(I,0) f(I,1) h_I = \sum_{I \in \mathcal{D}} \langle b, h_I \mathbb{R}a \langle f \mathbb{R}a_I h_I = \ensuremath{\partial}i_b(f).$$ \noindent The rest of this section is devoted to the boundedness properties of these multilinear paraproduct operators $P^{\vec{\alpha}}$ and $\ensuremath{\partial}i_b^{\vec{\alpha}}.$\\ \noindent \begin{lm}\label{MPPTh1} Let $1 <p_1,p_2,\ldots,p_m, r < \infty$ and \,$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}$. Then for $\vec{\alpha} = (\alpha_1, \alpha_2, \ldots, \alpha_m) \in U_m$, the operators $P^{\vec{\alpha}}$ map $L^{p_1} \times \cdots\times L^{p_m} \rightarrow L^{r}$ with estimates of the form: $$\norm{P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)}_r \lesssim \ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}$$ \end{lm} \noindent \begin{proof} First we observe that, if $x\in I \in \mathcal{D},$ then $$\abs{\langle f \mathbb{R}a_I} \leq \langle \abs{f}\mathbb{R}a_I \leq Mf(x)$$ and that \begin{eqnarray*} \frac{\left\vert \langle f , h_I \mathbb{R}a \right\vert}{\sqrt{\abs{I}}} & = & \frac{1}{\sqrt{\abs{I}}} \left\vert \int_\mathbb{R} f h_I \right\vert\\ & = & \frac{1}{\abs{I}} \left\vert \int_\mathbb{R} f \mathsf{1}_{I_+} - \int_\mathbb{R} f \mathsf{1}_{I_-} \right\vert\\ & = & \frac{1}{\abs{I}} \left(\int_{I_+} \abs{f} + \int_{I_-} \abs{f} \right)\\ & \leq & \frac{1}{\abs{I}} \int_{I} \abs{f} \\ & \leq & Mf(x). \end{eqnarray*} \noindent \textbf{Case I:} $\sigma(\vec{\alpha}) = 1.$\\ Let $\alpha_{j_0} = 0.$ Then \begin{eqnarray*} \displaystyle P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m) &=& \sum_{I\in\mathcal{D}} \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ &=& \sum_{I\in\mathcal{D}} \left(\ensuremath{\partial}rod_{\substack{j = 1\\j\neq j_0}}^m \langle f_j \mathbb{R}a_I\right) \langle f_{j_0}, h_I\mathbb{R}a h_I. \end{eqnarray*} \noindent Using square function estimates, we obtain \begin{eqnarray*} \left\Vert P^{\vec{\alpha}}(f_1, f_2, \ldots, f_m) \right\Vert_r &\lesssim& \left\Vert\left(\sum_{I\in \mathcal{D}} \ensuremath{\partial}rod_{\substack{j = 1\\j\neq j_0}}^m \left\vert\langle f_j\mathbb{R}a_I \right\vert^2 \abs{\langle f_{j_0}, h_I \mathbb{R}a}^2 \frac{\mathsf{1}_I}{\abs{I}}\right)^{1/2}\right\Vert_r\\ &\leq& \left\Vert\left(\ensuremath{\partial}rod_{\substack{j = 1\\j\neq j_0}}^m Mf_j \right) \left(\sum_{I\in \mathcal{D}} \abs{\langle f_{j_0}, h_I \mathbb{R}a}^2 \frac{\mathsf{1}_I}{\abs{I}}\right)^{1/2}\right\Vert_r\\ &= & \left\Vert\left(\ensuremath{\partial}rod_{\substack{j = 1 \\j\neq j_0}}^m Mf_j \right) (Sf_{j_0}) \right\Vert_r\\ &\leq& \ensuremath{\partial}rod_{\substack{j = 1\\j\neq j_0}}^m \norm{Mf_j}_{p_j} \norm{Sf_{j_0}}_{j_0}\\ &\lesssim& \ensuremath{\partial}rod_{j=1}^m \norm{f_j}_{p_j}, \end{eqnarray*} where we have used H$\ddot{\text{o}}$lder inequality, and the boundedness of maximal and square function operators to obtain the last two inequalities.\\ \noindent \textbf{Case II:} $\sigma(\vec{\alpha}) > 1.$\\ Choose $j'$ and $j''$ such that $\alpha_{j'} = \alpha_{j''} = 0.$ Then \begin{eqnarray*} \left\vert P^{(0,0,\ldots,0)}(f_1,f_2,\ldots,f_m)(x) \right\vert & = & \left\vert \sum_{I\in \mathcal{D}}\left(\ensuremath{\partial}rod_{j:\alpha_j = 1} \langle f_j \mathbb{R}a_I\right) \left( \ensuremath{\partial}rod_{\substack{j:\alpha_j = 0 \\j \neq j', \,j''}} \frac{\langle f_j, h_I\mathbb{R}a}{\sqrt{\abs{I}}} \right) \langle f_{j'}, h_I\mathbb{R}a \langle f_{j''}, h_I\mathbb{R}a \frac{\mathsf{1}_I(x)}{\abs{I}} \right\vert \\ & \leq & \left(\ensuremath{\partial}rod_{ j: j \neq j',\,j''} Mf_j(x) \right) \left( \sum_{I\in \mathcal{D}} \abs{\langle f_{j'}, h_I\mathbb{R}a} \abs{\langle f_{j''}, h_I\mathbb{R}a} \frac{\mathsf{1}_I(x)}{\abs{I}} \right). \end{eqnarray*} By Cauchy-Schwarz inequality \begin{eqnarray} \nonumber && \sum_{I\in\mathcal{D}} \left\vert\langle f_{j'},h_I\mathbb{R}a \right\vert \, \left\vert\langle f_{j''},h_I \mathbb{R}a\right\vert \frac{\mathsf{1}_I(x)}{\abs{I}}\\ \label{eq:sf}&\leq& \left(\sum_{I\in\mathcal{D}}\abs{\langle f_{j'},h_I\mathbb{R}a }^2 \frac{\mathsf{1}_I(x)}{\abs{I}}\right)^{\frac{1}{2}} \left(\sum_{I\in\mathcal{D}}\abs{\langle f_{j''},h_I\mathbb{R}a }^2 \frac{\mathsf{1}_I(x)}{\abs{I}}\right)^{\frac{1}{2}}\\ \nonumber &=& Sf_{j'}(x)\, Sf_{j''}(x). \end{eqnarray} Therefore, \begin{eqnarray*} \left\vert P^{(0,0,\ldots,0)}(f_1,f_2,\ldots,f_m)(x) \right\vert & \leq & \left(\ensuremath{\partial}rod_{ j: j \neq j',\,j''} Mf_j(x) \right) Sf_{j'}(x)\, Sf_{j''}(x). \end{eqnarray*} \noindent Now using generalized H$\ddot{\text{o}}$lder's inequality and the boundedness properties of the maximal and square functions, we get \begin{eqnarray*} \left\Vert P^{(0,0,\ldots,0)}(f_1,f_2,\ldots,f_m) \right\Vert_r &\leq& \left(\ensuremath{\partial}rod_{ j: j \neq j',\,j''} \norm{Mf_j}_{p_j}\right) \norm{Sf_{j'}}_{p_{j'}}\, \norm{Sf_{j''}}_{p_{j''}}\\ & \lesssim &\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}. \end{eqnarray*} \end{proof} \noindent \begin{lm} \label{MPPTh2} Let $\vec{\alpha} = (\alpha_1, \ldots, \alpha_m) \in \{0,1\}^m$ and $1 <p_1, \ldots,p_m,r < \infty$ with $\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$ \begin{enumerate}[label = $(\alph*)$] \item For $\sigma(\vec{\alpha}) \leq 1,$ $\ensuremath{\partial}i_b^{\vec{\alpha}}$ is a bounded operator from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r}$ if and only if \qquad $b \in BMO^d.$\\ \item For $\sigma(\vec{\alpha}) > 1,$ $\ensuremath{\partial}i_b^{\vec{\alpha}}$ is a bounded operator from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r}$ if and only if $\displaystyle\sup_{I\in \mathcal{D}} \frac{\abs{\langle b,h_I\mathbb{R}a}}{\sqrt{\abs{I}}} < \infty.$ \end{enumerate} \end{lm} \begin{proof} $(a)$ We prove this part first for $\sigma(\vec{\alpha}) = 0,$ that is, for $\alpha_1 = \cdots = \alpha_m = 1.$\\ \noindent Assume that $b \in BMO^d.$ Then for $(f_1, \ldots,f_m) \in L^{p_1} \times \cdots \times L^{p_m},$ we have \begin{eqnarray*} \ensuremath{\partial}i_b^{\vec{\alpha}} (f_1,\ldots,f_m) &=& P^{(0,\vec{\alpha})}(b,f_1,\ldots,f_m) \\ &=& \sum_{I\in\mathcal{D}} \langle b, h_I \mathbb{R}a \ensuremath{\partial}rod_{j=1}^m \langle f_j \mathbb{R}a_I h_I\\ &=& \sum_{I\in\mathcal{D}} \langle \ensuremath{\partial}i_b(f_1), h_I \mathbb{R}a \ensuremath{\partial}rod_{j=2}^m \langle f_j \mathbb{R}a_I h_I\\ &=& P^{(0, \alpha_2, \ldots,\alpha_m)} \left(\ensuremath{\partial}i_b(f_1), f_2, \ldots, f_m \right). \end{eqnarray*} Since $b \in BMO^d$ and $f_1 \in L^{p_1}$ with $p_1 > 1,$ we have $\norm {\ensuremath{\partial}i_b(f_1)}_{p_1} \lesssim \norm{b}_{BMO^d} \norm{f_1}_{p_1}.$ So, \begin{eqnarray*} \norm{\ensuremath{\partial}i_b^{\vec{\alpha}} (f_1,\ldots,f_m)}_r &=& \norm{P^{(0,\alpha_2, \ldots, \alpha_m)}\left(\ensuremath{\partial}i_b(f_1), f_2, \ldots, f_m \right)}_r \\ &\lesssim & \norm {\ensuremath{\partial}i_b(f_1)}_{p_1} \ensuremath{\partial}rod_{j=2}^m\norm{f_j}_{p_j}\\ &\lesssim & \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}, \end{eqnarray*} where the first inequality follows from Theorem \operatorname{Re}f{MPPTh1}.\\ \noindent Conversely, assume that $\ensuremath{\partial}i_b^{(1,\ldots,1)}: L^{p_1} \times \cdots \times L^{p_m}\rightarrow L^{r}$ is bounded. Then for $f_i = \abs{J}^{-\frac{1}{p_i}}\mathsf{1}_J(x)$ with $J \in \mathcal{D},$ $$\left\Vert \ensuremath{\partial}i_b^{(1,1,\ldots,1)}(f_1,f_2,\ldots,f_m)\right\Vert_r \leq \left \Vert \ensuremath{\partial}i_b^{(1,1,\ldots,1)} \right \Vert_{L^{p_1}\times \cdots\times L^{p_m} \rightarrow L^r}, $$ since $\norm{f_i}_{p_i} = 1$ for all $ 1 \leq i \leq m.$ For such $f_i,$ \begin{eqnarray*} \left\Vert \ensuremath{\partial}i_b^{(1,1,\ldots,1)}(f_1,f_2,\ldots,f_m)\right\Vert_r &=& \left\Vert \abs{J}^{-\left(\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_m}\right)}\;\ensuremath{\partial}i_b^{(1,1,\ldots,1)}(\mathsf{1}_J,\mathsf{1}_J,\ldots,\mathsf{1}_J)\right\Vert_r \\ &=& \abs{J}^{-\frac{1}{r}}\left\Vert \sum_{I\in\mathcal{D}}\widehat{b}(I) \langle \mathsf{1}_J\mathbb{R}a_I^m h_I \right\Vert_r. \end{eqnarray*} \noindent Taking $\epsilon_I = 1$ if $I\subseteq J$ and $\epsilon_I = 0$ otherwise, we observe that \begin{eqnarray*} \left\Vert \sum_{J\supseteq I \in \mathcal{D}}\widehat{b}(I) h_I \right\Vert_r &=& \left\Vert \sum_{J\supseteq I \in \mathcal{D}}\widehat{b}(I) \langle \mathsf{1}_J\mathbb{R}a_I^m h_I \right\Vert_r \\ &=& \left\Vert \sum_{I\in\mathcal{D}}\epsilon_I\widehat{b}(I) \langle \mathsf{1}_J\mathbb{R}a_I^m h_I \right\Vert_r\\ &\lesssim& \left\Vert \sum_{I\in\mathcal{D}}\widehat{b}(I) \langle \mathsf{1}_J\mathbb{R}a_I^m h_I \right\Vert_r, \end{eqnarray*} where the last inequality follows from the boundedness of Haar multiplier $T_\epsilon$ on $L^r.$ Thus, we have \begin{eqnarray*} \sup_{J\in\mathcal{D}}\abs{J}^{-1/r}\left\Vert \sum_{J\supseteq I \in \mathcal{D}}\widehat{b}(I) h_I \right\Vert_r &\lesssim & \sup_{J\in\mathcal{D}}\abs{J}^{-1/r} \left\Vert \sum_{I\in\mathcal{D}}\widehat{b}(I) \langle \mathsf{1}_J\mathbb{R}a_I^m h_I \right\Vert_r\\ &\lesssim& \left \Vert \ensuremath{\partial}i_b^{(1,1,\ldots,1)} \right \Vert_{L^{p_1}\times \cdots\times L^{p_m} \rightarrow L^r}, \end{eqnarray*} proving that $b\in BMO^d.$\\ \noindent Now the proof for $\sigma(\vec{\alpha}) = 1$ follows from the simple observation that $\ensuremath{\partial}i_b^{\vec{\alpha}}$ is a transpose of $\ensuremath{\partial}i_b^{(1,\ldots,1)}$. For example, if $\sigma(\vec{\alpha}) = 1$ with $\alpha_1 = 0$ and $\alpha_2 = \cdots = \alpha_m =1$ and if $r'$ is the conjugate exponent of $r,$ then for $g \in L^{r'}$ \begin{eqnarray*} \left\langle \ensuremath{\partial}i_b^{\vec{\alpha}}(f_1,\ldots,f_m), g \right\mathbb{R}a &=& \left\langle \sum_{I \in \mathcal{D}} \langle b, h_I \mathbb{R}a \langle f_1, h_I \mathbb{R}a \ensuremath{\partial}rod_{j=2}^m \langle f_j \mathbb{R}a_I h_I^2, g \right\mathbb{R}a\\ &=& \sum_{I \in \mathcal{D}} \langle b, h_I \mathbb{R}a \langle f_1, h_I \mathbb{R}a \ensuremath{\partial}rod_{j=2}^m \langle f_j \mathbb{R}a_I \langle g, h_I^2\mathbb{R}a\\ &=& \sum_{I \in \mathcal{D}} \langle b, h_I \mathbb{R}a \langle f_1, h_I \mathbb{R}a \ensuremath{\partial}rod_{j=1}^m \langle f_j \mathbb{R}a_I \langle g\mathbb{R}a_I\\ &=& \left\langle \sum_{I \in \mathcal{D}} \langle b, h_I \mathbb{R}a \langle g \mathbb{R}a_I \ensuremath{\partial}rod_{j=1}^m \langle f_j \mathbb{R}a_I h_I, f_1 \right\mathbb{R}a\\ &=& \left\langle \ensuremath{\partial}i_b^{(1, \ldots, 1)}(g,f_2,\ldots,f_m), f_1 \right\mathbb{R}a. \end{eqnarray*} \noindent $(b)$ Assume that $ \norm {b}_*\equiv \displaystyle\sup_{I\in \mathcal{D}} \frac{\abs{\langle b,h_I\mathbb{R}a}}{\sqrt{\abs{I}}} < \infty.$ For $m =2$ we have \begin{eqnarray*} \displaystyle \int_\mathbb{R} \left\vert \ensuremath{\partial}i_b^{(0,0)}(f_1,f_2) \right\vert^r dx &=& \displaystyle\int_\mathbb{R} \left\vert \sum_{I\in\mathcal{D}}\langle b,h_I\mathbb{R}a \langle f_1,h_I\mathbb{R}a \langle f_2,h_I \mathbb{R}a h_I^3(x) \right\vert^r dx \\ &\leq & \int_\mathbb{R} \left( \sum_{I\in\mathcal{D}}\abs{\langle b,h_I\mathbb{R}a}\, \abs{\langle f_1,h_I\mathbb{R}a }\, \abs{\langle f_2,h_I \mathbb{R}a} \frac{\mathsf{1}_I(x)}{\abs{I}^{3/2}} \right)^r dx \\ &\leq & \int_\mathbb{R} \left( \sup_{I\in\mathcal{D}} \frac{\abs{\langle b,h_I\mathbb{R}a}}{\sqrt{\abs{I}}}\sum_{I\in\mathcal{D}} \abs{\langle f_1,h_I\mathbb{R}a }\, \abs{\langle f_2,h_I \mathbb{R}a} \frac{\mathsf{1}_I(x)}{\abs{I}} \right)^r dx\\ &=& \norm{b}_*^r \int_\mathbb{R} \left(\sum_{I\in\mathcal{D}} \abs{\langle f_1,h_I\mathbb{R}a }\, \abs{\langle f_2,h_I \mathbb{R}a} \frac{\mathsf{1}_I(x)}{\abs{I}} \right)^r dx. \end{eqnarray*} Using \eqref{eq:sf} and H$\ddot{\text{o}}$lder's inequality we obtain \begin{eqnarray*} \displaystyle \int_\mathbb{R} \left\vert \ensuremath{\partial}i_b^{(0,0)}(f_1,f_2) \right\vert^r dx &\leq& \norm{b}_*^r \int_\mathbb{R} (Sf_1)^r(x)\,(Sf_2)^r(x)\,dx\\ &\leq& \norm{b}_*^r \left(\int_\mathbb{R} \left\{(Sf_1)^r(x)\right\}^{p_1/r}\,dx\right)^{r/p_1}\left(\int_\mathbb{R} \left\{(Sf_2)^r(x)\right\}^{p_2/r}\,dx\right)^{r/p_2}\\ &\leq& \norm{b}_*^r \norm{Sf_1}_{p_1}^r \norm{Sf_2}_{p_2}^r\\ &\lesssim & \norm{b}_*^r \norm{f_1}_{p_1}^r \norm{f_2}_{p_2}^r. \end{eqnarray*} \noindent Thus we have, $$ \norm{\ensuremath{\partial}i_b^{(0,0)}(f_1,f_2)}_r \lesssim \norm{b}_* \norm{f_1}_{p_1} \norm{f_2}_{p_2}.$$ \noindent Observe that $$\ensuremath{\partial}i_b^{(0,0)}(f_1,f_2)(I,0) = \langle \ensuremath{\partial}i_b^{(0,0)}(f_1,f_2), h_I \mathbb{R}a = \frac{1}{\abs{I}}\langle b,h_I\mathbb{R}a \langle f_1,h_I\mathbb{R}a \langle f_2,h_I \mathbb{R}a.$$ \noindent Now consider $m > 2$ and let $\sigma(\vec{\alpha})>1$. Without loss of generality we may assume that $\alpha_1 = \alpha_2 = 0.$ Then\\ \begin{eqnarray*} \norm{\ensuremath{\partial}i_b^{\vec{\alpha}} (f_1,f_2, \ldots,f_m)}_r & = & \left\Vert \sum_{I\in\mathcal{D}} \langle b,h_I\mathbb{R}a \langle f_1,h_I\mathbb{R}a \langle f_2,h_I\mathbb{R}a\ensuremath{\partial}rod_{j=3}^m f_j(I, \alpha_j) h_I^{1+\sigma(\vec{\alpha})}\right\Vert_r\\ & = & \left\Vert \sum_{I\in\mathcal{D}} \frac{1}{\abs{I}}\langle b,h_I\mathbb{R}a \langle f_1,h_I\mathbb{R}a \langle f_2,h_I\mathbb{R}a \ensuremath{\partial}rod_{j=3}^m f_j(I, \alpha_j) h_I^{\sigma(\vec{\alpha})-1}\right\Vert_r\\ &=& \left\Vert \sum_{I\in\mathcal{D}} \langle \ensuremath{\partial}i_b^{(0,0)}(f_1,f_2), h_I \mathbb{R}a \ensuremath{\partial}rod_{j=3}^m f_j(I, \alpha_j) h_I^{\sigma(\vec{\alpha})-1}\right\Vert_r\\ &=& \left\Vert P^{\vec{\beta}}(\ensuremath{\partial}i_b^{(0,0)}(f_1,f_2),f_3,\ldots,f_m) \right\Vert_r\\ &\lesssim& \norm{\ensuremath{\partial}i_b^{(0,0)}(f_1,f_2)}_{q} \ensuremath{\partial}rod_{j=3}^m \norm{f_j}_{p_j}\\ &\lesssim& \norm{b}_* \ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}\\ \end{eqnarray*} where $\vec{\beta} =(0,\alpha_3,\ldots,\alpha_m) \in \{0,1\}^{m-1}$ and $\ensuremath{\partial}i_b^{(0,0)}(f_1,f_2) \in L^q$ with $\frac{1}{p_1}+\frac{1}{p_2}=\frac{1}{q}, q>r>1.$\\ \noindent Conversely, assume that $\ensuremath{\partial}i_b^{\vec{\alpha}}: L^{p_1} \times \cdots \times L^{p_m}\rightarrow L^{r}$ is bounded and that $\sigma(\vec{\alpha}) > 1.$ Choose any $J \in \mathcal{D}$, and take $f_j = \abs{J}^{\frac{1}{2} - \frac{1}{p_j}} h_J$ if $\alpha_j = 0,$ and $f_j = \abs{J}^{-\frac{1}{p_j} } \mathsf{1}_J$ if $\alpha_j = 1$ so that $\norm{f_j}_{p_j} = 1.$ Then $$ \left \Vert \ensuremath{\partial}i_b^{\vec{\alpha}} (f_1, \ldots, f_m) \right \Vert_r \leq \left \Vert \ensuremath{\partial}i_b^{\vec{\alpha}} \right \Vert _{L^{p_1} \times \cdots \times L^{p_m}}. $$ We also have \begin{eqnarray*} \left \Vert \ensuremath{\partial}i_b^{\vec{\alpha}} (f_1, \ldots, f_m) \right \Vert_r &=& \left \Vert \abs{J}^{\frac{\sigma{(\vec{\alpha})}}{2} - \sum_{j=1}^m \frac{1}{p_j}} \langle b, h_J \mathbb{R}a h_J^{1+\sigma(\vec{\alpha})} \right \Vert_r \\ &=& \abs{J}^{\frac{\sigma{(\vec{\alpha})}}{2} - \frac{1}{r}} \abs{\langle b, h_J \mathbb{R}a} \left \Vert h_J^{1+\sigma(\vec{\alpha})} \right \Vert_r \\ &=& \abs{J}^{\frac{\sigma(\vec{\alpha})}{2} - \frac{1}{r}} \abs{\langle b, h_J \mathbb{R}a} \abs{J}^{-\frac{1+\sigma(\vec{\alpha})}{2}}\left \Vert \mathsf{1}_J \right \Vert_r \\ &=& \abs{J}^{\frac{\sigma(\vec{\alpha})}{2} - \frac{1}{r}} \abs{\langle b, h_J \mathbb{R}a} \abs{J}^{-\frac{1+\sigma(\vec{\alpha})}{2}}\abs{J}^{\frac{1}{r}}\\ &=& \frac{\abs{\langle b, h_J \mathbb{R}a}}{\sqrt{\abs{J}}}. \end{eqnarray*} \noindent Thus $ \frac{\abs{\langle b, h_J \mathbb{R}a}}{\sqrt{\abs{J}}} \leq \left \Vert \ensuremath{\partial}i_b^{\vec{\alpha}}\right \Vert _{L^{p_1} \times \cdots \times L^{p_m}}.$ Since it is true for any $J \in D,$ we have $$ \displaystyle \sup_{J \in \mathcal{D}} \frac{\abs{\langle b, h_J \mathbb{R}a}}{\sqrt{\abs{J}}} \leq \left \Vert \ensuremath{\partial}i_b^{\vec{\alpha}} \right \Vert _{L^{p_1} \times \cdots \times L^{p_m}} < \infty,$$ as desired.\\ \end{proof} \noindent Now that we have obtained strong type $L^{p_1} \times\cdots\times L^{p_m} \rightarrow L^r$ boundedness estimates for the paraproduct operators $P^{\vec{\alpha}}$ and $\ensuremath{\partial}i_b^{\vec{\alpha}}$ when $1 < p_1, p_2, \ldots, p_m,r < \infty$ and $\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}$, we are interested to investigate estimates corresponding to $\frac{1}{m} \leq r < \infty$. We will prove in Lemma $\operatorname{Re}f{MPPL}$ that we obtain weak type estimates if one or more $p_i$'s are equal to 1. In particular, we obtain $L^{1} \times\cdots\times L^{1} \rightarrow L^{\frac{1}{m},\infty}$ estimates for those operators. Then it follows from multilinear interpolation that the paraproduct operators are strongly bounded from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^r$ for $1 < p_1, p_2, \ldots, p_m < \infty$ and $\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r},$ even if $\frac{1}{m} < r \leq 1.$\\ \noindent We first prove the following general lemma, which when applied to the operators $P^{\vec{\alpha}}$ and $\ensuremath{\partial}i_b^{\vec{\alpha}}$ gives aforementioned weak type estimates.\\ \noindent \begin{lm}\label{WBL} Let $T$ be a multilinear operator that is bounded from the product of Lebesgue spaces $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r,\infty}$ for some $1 < p_1, p_2, \ldots, p_m < \infty$ with $$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Suppose that for every $I \in \mathcal{D}$, $T(f_1, \ldots, f_m)$ is supported in $I$ if $f_i = h_I$ for some $i \in \{1, 2, \ldots, m\}$. Then $T$ is bounded from $L^1 \times \cdots \times L^1 \times L^{p_{k+1}} \times \cdots\times L^{p_m} \rightarrow L^{\frac{q_k}{q_k + 1},\infty}$ for each $k = 1, 2, \ldots,m,$ where $q_k$ is given by $$\frac{1}{q_k} = (k-1) + \frac{1}{p_{k+1}} + \cdots+\frac{1}{p_{m}}.$$ In particular, $T$ is bounded from $L^{1} \times \cdots \times L^{1}$ to $L^{\frac{1}{m},\infty}$. \end{lm} \noindent \begin{proof} We first prove that $T$ is bounded from $L^{1} \times L^{p_2}\times \cdots \times L^{p_m} $ to $ L^{\frac{q_1}{q_{1}+1},\infty}.$\\ Let $\lambda > 0$ be given. We have to show that $$\abs {\{ x: \abs {T(f_1, f_2, \ldots,f_m)(x)} > \lambda \}} \lesssim \left(\frac{\norm{f_1}_1 \ensuremath{\partial}rod_{j=2}^m\norm{f_j}_{p_j}}{\lambda}\right)^{\frac{q_1}{1+q_1}}$$ for all $(f_1, f_2, \ldots,f_m) \in L^{1} \times L^{p_2} \cdots \times L^{p_m}$.\\ Without loss of generality, we assume $\norm{f_1}_{1} = \norm{f_2}_{p_2}= \cdots = \norm{f_m}_{p_m} =1,$ and prove that $$\abs {\{ x: \abs {T(f_1, f_2, \ldots,f_m)(x)} > \lambda \}} \lesssim \lambda^{-\frac{q_1}{1+q_1}}.$$ For this, we apply Calder$\acute{\text{o}}$n-Zygmund decomposition to the function $f_1$ at height $\lambda^{\frac{q_1}{q_{1}+1}}$ to obtain `good' and `bad' functions $g_1$ and $b_1$, and a sequence $\{I_{1,j}\}$ of disjoint dyadic intervals such that $$ f_1 = g_1 + b_1, \;\;\; \norm{g_1}_{p_1} \leq \left(2 \lambda^{\frac{q_1}{q_{1}+1}}\right)^{ '} \norm{f_1}_1^{1/p_1} = \left(2 \lambda^{\frac{q_1}{q_{1}+1}}\right)^{\frac{p_1-1}{p_1}}\;\;\; \text{ and } \;\;\; b_1 = \sum_j b_{1,j},$$ where $$\text{supp}(b_{1,j}) \subseteq I_{1,j},\;\; \; \int_{I_{1,j}} b_{1,j} dx = 0,\;\; \text{ and } \;\; \sum_j\abs{I_{1,j}} \leq {\lambda}^{-\frac{q_1}{q_1+1}}\norm{f_1}_1 = {\lambda}^{-\frac{q_1}{q_1+1}}.$$\\ Multilinearity of $T$ implies that $$\left|\left\{x:|T(f_1,\ldots,f_m)(x)| > \lambda \right\} \right|$$ $$ \leq \left|\left\{x:|T(g_1, f_2, \ldots,f_m)(x)| > \frac{\lambda}{2}\right\}\right| \; + \; \left|\left \{x:|T(b_1, f_2, \ldots,f_m)(x)| > \frac{\lambda}{2} \right \}\right|.$$ Since $g_1 \in L^{p_1}$ and $T$ is bounded from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r,\infty}$, we have \begin{eqnarray*} \abs {\{ x: \abs {T(g_1, f_2, \ldots,f_m)(x)} > \lambda/2 \}} & \lesssim & \left(\frac{2\norm{g_1}_{p_1} \displaystyle\ensuremath{\partial}rod_{j=2}^m f_j(J,\alpha_j)}{\lambda}\right)^r\\ & \leq & \left(\frac{2\left(2 \lambda^{\frac{q_1}{q_{1}+1}}\right)^{\frac{p_1 -1}{p_1}}} {\lambda}\right)^r\\ & \lesssim & \lambda^{r\left(\frac{q_1(p_1 -1)}{p_1(q_{1}+1)} -1 \right)} \end{eqnarray*} Now, $\frac{1}{r} = \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{p_1} + \frac{1}{q_1}$ implies that $r = \frac{p_1 q_1} {p_1+q_1}.$ So,\\ \begin{eqnarray*} r\left(\frac{q_1(p_1 -1)}{p_1(q_{1}+1)} -1\right) &=& \frac{p_1 q_1}{(p_1+q_1)}\left(\frac{p_1q_1 - q_1 - p_1q_1 - p_1}{p_1(q_{1}+1)}\right)\\ &=& \frac{p_1 q_1}{(p_1+q_1)}\frac{(-p_1 - q_1)}{p_1(q_{1}+1)}\\ &=& -\frac{q_1}{q_1+1}. \end{eqnarray*} \noindent Thus we have: $$\abs {\{ x: \abs {T(g_1, f_2, \ldots,f_m)(x)} > \lambda/2 \}} \lesssim \lambda^{-\frac{q_1}{1+q_1}}.$$ \noindent From the properties of `bad' function $b_1$ we deduce that $\langle b_1, h_I \mathbb{R}a \neq 0$ only if $I \subseteq I_{1,j}$ for some $j$. The hypothesis of the lemma on the support of $T(f_1, \ldots, f_m)$ then implies that $$ \text{supp}\left(T(b_1,f_2, \ldots, f_m)\right) \subseteq \cup_j I_{1,j}.$$ Thus, $$ \left|\left \{x:|T(b_1, f_2, \ldots,f_m)(x)| > \frac{\lambda}{2} \right \}\right| \leq \left| \cup_j I_{1,j} \right| \leq \lambda^{-\frac{q_1}{1+q_1}}.$$ \noindent Combining these estimates corresponding to $g_1$ and $b_1$, we have the desired estimate $$\abs {\{ x: \abs {T(f_1, f_2, \ldots,f_m)(x)} > \lambda \}} \lesssim \lambda^{-\frac{q_1}{1+q_1}}.$$ \noindent Now beginning with the $L^{1} \times L^{p_2}\times \cdots \times L^{p_m} \rightarrow L^{\frac{q_1}{q_{1}+1},\infty}$ estimate, we use the same argument to lower the second exponent to 1 proving that $T$ is bounded from $L^{1} \times L^{1}\times L^{p_3} \times \cdots \times L^{p_m} $ to $ L^{\frac{q_2}{q_{2}+1},\infty}, $ where $q_2$ is given by $\frac{1}{q_2} = 1 + \frac{1}{p_{3}} + \cdots +\frac{1}{p_{m}}.$\\ \noindent We continue the same process until we obtain $L^{1} \times L^{1}\times \cdots \times L^{1} \rightarrow L^{\frac{q_m}{q_{m}+1},\infty}$ boundedness of $T$ with $\frac{1}{q_m} = 1+1+ \cdots+1 \; (m-1 \text{ terms}) = m-1.$ This completes the proof since $\frac{q_m}{q_m+1} = \frac{1}{m}.$ \end{proof} \noindent \begin{lm} \label{MPPL} Let $ \vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in \{0,1\}^m, 1 \leq p_1, p_2, \ldots, p_m < \infty$ and $\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$ Then \begin{enumerate}[label = $(\alph*)$] \item For $\vec{\alpha} \neq (1,1,\ldots,1),$ $P^{\vec{\alpha}}$ is bounded from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r,\infty}.$ \item If $b \in BMO^d$ and $\sigma(\vec{\alpha}) \leq 1, \,\ensuremath{\partial}i_b^{\vec{\alpha}}$ is bounded from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r,\infty}.$ \item If $\displaystyle\sup_{I\in \mathcal{D}} \frac{\abs{\langle b,h_I\mathbb{R}a}}{\sqrt{\abs{I}}} < \infty$ and $\sigma(\vec{\alpha}) > 1, \,\ensuremath{\partial}i_b^{\vec{\alpha}}$ is bounded from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r,\infty}.$ \end{enumerate} \end{lm} \begin{proof} By orthogonality of Haar functions, $h_I(J,0) = \langle h_I, h_J \mathbb{R}a = 0 $ for any two distinct dyadic intervals $I$ and $J.$ The Haar functions have mean value 0, so it is easy to see that $$\langle h_I \mathbb{R}a_J \neq 0 \text{ only if } J \subsetneq I$$ since any two dyadic intervals are either disjoint or one is contained in the other.\\ \noindent Consequently, if some $f_i = h_I,$ then $$P^{\vec{\alpha}}(f_1, f_2,\ldots,f_m) = \sum_{J\subseteq I}\ensuremath{\partial}rod_{j=1}^m f_j(J,\alpha_j) h_J^{\sigma(\vec{\alpha})}$$ and, $$\ensuremath{\partial}i_b^{\vec{\alpha}}(f_1, f_2,\ldots,f_m) = \sum_{J\subseteq I}\langle b,h_J \mathbb{R}a \ensuremath{\partial}rod_{j=1}^m f_j(J,\alpha_j) h_J^{1+ \sigma(\vec{\alpha})},$$ which are both supported in $I.$ Since the paraproducts are strongly (and hence weakly) bounded from $L^{p_1} \times \cdots \times L^{p_m}\rightarrow L^r$, the proof follows immediately from Lemma $\operatorname{Re}f{WBL}.$ \end{proof} \noindent Combining the results of Lemmas \operatorname{Re}f{MPPTh1}, \operatorname{Re}f{MPPTh2} and \operatorname{Re}f{MPPL}, and using multilinear interpolation (see \cite{GLLZ}), we have the following theorem: \begin{thm} Let $ \vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in \{0,1\}^m$ and $ 1 < p_1, p_2, \ldots, p_m < \infty$ with $\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$ Then \begin{enumerate}[label = $(\alph*)$] \item For $\vec{\alpha} \neq (1,1,\ldots,1),$ $\displaystyle \left\Vert P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \lesssim \ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}.$ \item For $\sigma(\vec{\alpha}) \leq 1,$ $\displaystyle \left\Vert\ensuremath{\partial}i_b^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \lesssim \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j},$ if and only if $b \in BMO^d.$\\ \item For $\sigma(\vec{\alpha}) > 1,$ $\displaystyle\left\Vert\ensuremath{\partial}i_b^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \leq C_b \ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j},$ if and only if $\displaystyle\sup_{I\in \mathcal{D}} \frac{\abs{\langle b,h_I\mathbb{R}a}}{\sqrt{\abs{I}}} < \infty.$ \end{enumerate} In each of the above cases, the paraproducts are weakly bounded if $1\leq p_1, p_2, \ldots, p_m < \infty$.\\ \end{thm} \section {Multilinear Haar multipliers and multilinear commutators} \subsection{Multilinear Haar Multipliers} \noindent In this subsection we introduce multilinear Haar multipliers, and study their boundedness properties.\\ \noindent \begin{dfn} Given $\vec{\alpha} = (\alpha_1,\alpha_2, \ldots,\alpha_m) \in \{0,1\}^m,$ and a symbol sequence $\epsilon = \{\epsilon_I\}_{I\in\mathcal{D}},$ we define \textit{m-linear Haar multipliers} by $$ T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) \equiv \sum_{I\in \mathcal{D}} \epsilon_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}.$$ \end{dfn} \noindent \begin{thm}\label{MHMTh} Let $\epsilon = \{\epsilon_I\}_{I\in\mathcal{D}}$ be a given sequence and let $\vec{\alpha} = (\alpha_1,\alpha_2, \ldots,\alpha_m) \in U_m.$ Let $1<p_1,p_2, \ldots,p_m<\infty$ with $$\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Then $T_\epsilon^{\vec{\alpha}}$ is bounded from $L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}$ to $L^r$ if and only if $\norm{\epsilon}_\infty:= \displaystyle \sup_{I \in \mathcal{D}}\abs{\epsilon_I} < \infty.$\\ Moreover, $T_\epsilon^{\vec{\alpha}}$ has the corresponding weak-type boundedness if $1 \leq p_1,p_2, \ldots,p_m<\infty.$ \end{thm} \noindent \begin{proof} To prove this lemma we use the fact that the linear Haar multiplier $$T_\epsilon(f) = \sum_{I\in\mathcal{D}} \epsilon_I \langle f,h_I\mathbb{R}a h_I$$ is bounded on $L^p$ for all $1<p<\infty$ if $\norm{\epsilon}_\infty:= \displaystyle \sup_{I \in \mathcal{D}}\abs{\epsilon_I} < \infty,$ and that $\langle T_\epsilon(f),h_I \mathbb{R}a = \epsilon_I \langle f,h_I\mathbb{R}a.$\\ \noindent By assumption $\sigma(\vec{\alpha})\geq 1$. Without loss of generality we may assume that $\alpha_i = 0$ if $1\leq i \leq \sigma(\vec{\alpha})$ and $\alpha_i = 1$ if $\sigma(\vec{\alpha}) < i \leq m.$ In particular, we have $\alpha_1 = 0.$ Then $$\epsilon_I f_1(I,\alpha_1) = \epsilon_I \langle f_1,h_I\mathbb{R}a = \langle T_\epsilon(f_1),h_I \mathbb{R}a = T_\epsilon(f_1)(I,\alpha_1).$$ \noindent First assume that $\norm{\epsilon}_\infty:= \displaystyle \sup_{I \in \mathcal{D}}\abs{\epsilon_I} < \infty.$\\ \noindent Then, \begin{eqnarray*} \norm{T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m)}_r &=& \left\Vert \sum_{I\in \mathcal{D}} \epsilon_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right\Vert_r\\ &=& \left\Vert\sum_{I\in \mathcal{D}} T_\epsilon(f_1)(I,\alpha_1)\ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right\Vert_r\\ &=& \norm{P^{\vec{\alpha}}(T_\epsilon(f_1),f_2, \ldots,f_{m})}_r\\ & \lesssim & \norm{T_\epsilon(f_1)}_{p_1} \ensuremath{\partial}rod_{j=2}^m \norm{f_j}_{p_j}\\ & \lesssim & \ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}.\\ \end{eqnarray*} \noindent Conversely, assume that $T_\epsilon^{\vec{\alpha}}$ : $L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$ is bounded, and let $\sigma(\vec{\alpha}) = k.$ Recall that $\alpha_i = 0$ if $1\leq i \leq \sigma(\vec{\alpha}) = k$ and $\alpha_i = 1$ if $k=\sigma(\vec{\alpha}) < i \leq m.$ Taking $f_i = h_I$ if $1 \leq i \leq k$ and $f_i = \mathsf{1}_I$ if $k < i \leq m,$ we observe that\\ \begin{eqnarray*} {\norm{T_\epsilon^{\vec{\alpha}}(f_1,f_2, \ldots,f_m)}}_r &=& \left(\int_{\mathbb{R}} \abs{\epsilon_I h_I^k(x)}^r dx\right)^{1/r} \\ &=& \left(\frac{\abs{\epsilon_I}^r}{\abs{I}^{kr/2}}\int_{\mathbb{R}} \mathsf{1}_I(x) dx\right)^{1/r} \\ & = & \frac{\abs{\epsilon_I}}{\abs{I}^{k/2}} \abs{I}^{1/r} \end{eqnarray*} and, \begin{eqnarray*} \ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j} &=& \ensuremath{\partial}rod_{i=1}^k \left(\int_{\mathbb{R}} \abs{ h_I(x)}^{p_i} dx \right)^{1/p_i} \ensuremath{\partial}rod_{j=k+1}^m\left(\int_{\mathbb{R}} \abs{\mathsf{1}_I(x)}^{p_j} dx\right)^{1/p_j}\\ &=& \ensuremath{\partial}rod_{i=1}^k\left(\frac{1}{\abs{I}^{p_i/2}} \int_{\mathbb{R}} \mathsf{1}_I(x) dx \right)^{1/p_i}\ensuremath{\partial}rod_{j=k+1}^m\left(\int_{\mathbb{R}} \mathsf{1}_I(x) dx \right)^{1/p_j}\\ & = & \ensuremath{\partial}rod_{i=1}^k\left(\frac{1}{\abs{I}^{1/2}} \abs{I}^{1/p_i}\right) \ensuremath{\partial}rod_{j=k+1}^m \abs{I}^{1/p_j}\\ & = & \frac{\abs{I}^{1/r}}{\abs{I}^{k/2}} \end{eqnarray*} Since $(f_1, f_2, \ldots,f_m) \in L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}$, the boundedness of $T_\epsilon$ implies that $${\norm{T_\epsilon^{\vec{\alpha}}(f_1,f_2, \ldots,f_m)}}_r \leq {\norm{T_\epsilon^{\vec{\alpha}}}}_{L^{p_1}\times \cdots\times L^{p_m} \rightarrow L^r} \ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}.$$ That is,$$\frac{\abs{\epsilon_I}}{\abs{I}^{k/2}} \abs{I}^{1/r} \leq {\norm{T_\epsilon^{\vec{\alpha}}}}_{L^{p_1}\times \cdots\times L^{p_m}} \frac{\abs{I}^{1/r}}{\abs{I}^{k/2}},$$ for all $I \in \mathcal{D}.$ Consequently, $\norm{\epsilon}_\infty = \displaystyle\sup_{I\in \mathcal{D}} \abs{\epsilon_I} \leq {\norm{T_\epsilon^{\vec{\alpha}}}}_{L^{p_1}\times \cdots\times L^{p_m}} < \infty,$ as desired. \\ If $1 \leq p_1,p_2, \ldots,p_m<\infty,$ the weak-type boundedness of $T_\epsilon^{\vec{\alpha}}$ follows from Lemma \operatorname{Re}f{WBL}. \end{proof} \subsection{Multilinear commutators} \noindent In this subsection we study boundedness properties of the commutators of $T_\epsilon^{\vec{\alpha}}$ with the multiplication operator $M_b$ when $b\in BMO^d.$ For convenience we denote the operator $M_b$ by $b$ itself. We are interested in the following commutators: $$[b,T_\epsilon^{\vec{\alpha}}]_i(f_1,f_2,\ldots,f_m)(x) \equiv (T_\epsilon^{\vec{\alpha}}(f_1, \ldots, bf_i,\ldots,f_m) - bT_\epsilon^{\vec{\alpha}}(f_1,f_2,\ldots,f_m))(x)$$ \noindent where $1\leq i \leq m.$\\ \noindent Note that if $b$ is a constant function, $[b,T_\epsilon^{\vec{\alpha}}]_i(f_1,f_2,\ldots,f_m)(x) = 0$ for all $x.$ Our approach to study the boundedness properties of $[b,T_\epsilon^{\vec{\alpha}}]_i: L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$ with $1<p_1,p_2, \ldots,p_m < \infty$ and $\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}$ for non-constant $b$ requires us to assume that $b \in L^p$ for some $p\in (1,\infty),$ and that $r > 1.$ However, this restricted unweighted theory turns out to be sufficient to obtain a weighted theory, which in turn implies the unrestricted unweighted theory of these multilinear commutators. We will present the weighted theory of these commutators in a subsequent paper.\\ \begin{thm}\label{boc} Let $\vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in U_m.$ If $b \in BMO^d \cap L^p$ for some $1<p<\infty$ and $\norm{\epsilon}_\infty := \sup_{I\in \mathcal{D}} \abs{\epsilon_I} < \infty,$ then each commutator $[b,T_\epsilon^{\vec{\alpha}}]_i$ is bounded from $L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$ for all $1<p_1,p_2, \ldots,p_m, r < \infty$ with $$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r},$$ with estimates of the form: $$ \norm{[b,T_\epsilon^{\vec{\alpha}}]_i(f_1,f_2,\ldots,f_m)}_r \lesssim \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}.$$ \end{thm} \noindent \begin{proof} It suffices to prove boundedness of $[b,T_\epsilon^{\vec{\alpha}}]_1,$ as the others are identical. Moreover, we may assume that each $f_i$ is bounded and has compact support, since such functions are dense in the $L^p$ spaces.\\ \noindent Writing $bf_1 = \ensuremath{\partial}i_b(f_1) + \ensuremath{\partial}i_b^*(f_1) + \ensuremath{\partial}i_{f_1}(b)$ and using multilinearity of $T_\epsilon^{\vec{\alpha}}$, we have\\ $$T_\epsilon^{\vec{\alpha}} (bf_1, f_2, \ldots,f_m) = T_\epsilon^{\vec{\alpha}} (\ensuremath{\partial}i_b(f_1),f_2, \ldots,f_m) + T_\epsilon^{\vec{\alpha}} (\ensuremath{\partial}i_b^*(f_1),f_2, \ldots,f_m) + T_\epsilon^{\vec{\alpha}} (\ensuremath{\partial}i_{f_1}(b),f_2, \ldots,f_m).$$ \noindent On the other hand, \begin{eqnarray*} b T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) &=& \sum_{I \in \mathcal{D}}\epsilon_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\left(\sum_{J \in \mathcal{D}} \widehat{b}(J) h_J\right)\\ &=& \sum_{I \in \mathcal{D}}\epsilon_I \widehat{b}(I)\ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{1+\sigma(\vec{\alpha})}\\ && +\sum_{I \in \mathcal{D}}\epsilon_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\left(\sum_{I\subsetneq J} \widehat{b}(J) h_J\right)\\ && +\sum_{I \in \mathcal{D}}\epsilon_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\left(\sum_{J\subsetneq I} \widehat{b}(J) h_J\right)\\ &=& \ensuremath{\partial}i_b^{\vec{\alpha}} (f_1, \ldots, T_\epsilon(f_i), \ldots,f_m) \\ && + \sum_{I \in \mathcal{D}}\epsilon_I \langle b\mathbb{R}a_I\ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \\ &&+ \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\sum_{J\subsetneq I}\epsilon_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right) \end{eqnarray*} for some $i$ with $\alpha_i = 0.$ Indeed, some $\alpha_i$ equals 0 by assumption, and for such $i$, we have $$T_\epsilon(f_i)(I,\alpha_i) = \widehat{T_\epsilon(f_i)}(I) = \epsilon_I \widehat{f_i}(I) = \epsilon_I f_i(I,\alpha_i) .$$ \noindent For $(f_1,f_2,\ldots,f_m) \in L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m},$ we have \begin{eqnarray*} \norm{T_\epsilon^{\vec{\alpha}} (\ensuremath{\partial}i_b(f_1),f_2, \ldots,f_m)}_r &\lesssim & \norm{\ensuremath{\partial}i_b(f_1)}_{p_1}\ensuremath{\partial}rod_{j=2}^m \norm{f_j}_{p_j}\\ &\lesssim & \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j} \end{eqnarray*} \begin{eqnarray*} \norm{T_\epsilon^{\vec{\alpha}} (\ensuremath{\partial}i_b^*(f_1),f_2, \ldots,f_m)}_r &\lesssim & \norm{\ensuremath{\partial}i_b^*(f_1)}_{p_1}\ensuremath{\partial}rod_{j=2}^m \norm{f_j}_{p_j}\\ &\lesssim & \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}. \end{eqnarray*} and, \begin{eqnarray*} \norm{\ensuremath{\partial}i_b^{\vec{\alpha}} (f_1, \ldots,T_\epsilon(f_i), \ldots,f_m)}_r &\lesssim & \norm{b}_{BMO^d}\norm{f_1}_{p_1} \cdots \norm{T_\epsilon(f_i)}_{p_i}\cdots\norm{f_m}_{p_m}\\ &\lesssim& \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}.\\ \end{eqnarray*} \noindent So, to prove boundedness of $[b,T_\epsilon^{\vec{\alpha}}]_1$, is suffices to show similar control over the terms: \begin{equation} \label{term1}\left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\sum_{J\subsetneq I}\epsilon_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right)\right\Vert_r \end{equation} and, \begin{equation}\label{term2}\left\Vert T_\epsilon^{\vec{\alpha}} (\ensuremath{\partial}i_{f_1}(b),f_2, \ldots,f_m)- \sum_{I \in \mathcal{D}}\epsilon_I \langle b\mathbb{R}a_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right\Vert_r. \end{equation} \noindent \textbf{Estimation of} $(\operatorname{Re}f{term1})$:\\ \noindent Case I: $\sigma(\vec{\alpha})$ odd.\\ In this case, $$T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) = \sum_{I \in \mathcal{D}}\epsilon_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} = \sum_{I \in \mathcal{D}}\epsilon_I\abs{I}^{\frac{1-\sigma(\vec{\alpha})}{2}} \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I. $$ So, $$ \langle T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m), h_I \mathbb{R}a h_I = \epsilon_I\abs{I}^{\frac{1-\sigma(\vec{\alpha})}{2}} \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I = \epsilon_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j)h_I^{\sigma(\vec{\alpha})}. $$ This implies that \begin{eqnarray*} (\operatorname{Re}f{term1}) &=&\left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\sum_{J\subsetneq I}\langle T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m), h_I \mathbb{R}a h_I\right)\right\Vert_r\\ &= & \left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J)\langle T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) \mathbb{R}a_J h_J\right\Vert_r\\ &=& \left\Vert \ensuremath{\partial}i_b \left( T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) \right)\right\Vert_r\\ &\lesssim & \norm{b}_{BMO^d}\left\Vert T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) \right\Vert_r\\ &\lesssim & \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}. \end{eqnarray*} \noindent Case II: $\sigma(\vec{\alpha})$ even.\\ \noindent In this case at least two $\alpha_i's$ are equal to 0. Without loss of generality we may assume that $\alpha_1=0.$ Then denoting $T_\epsilon (f_1)$ by $g_1,$ $P^{(\alpha_2,\ldots,\alpha_m)}(f_2, \ldots,f_m)$ by $ g_2,$ and using the fact that $$\langle g_1 \mathbb{R}a_J \langle g_2 \mathbb{R}a_J \mathsf{1}_J = \left(\sum_{J\subsetneq I}\widehat{g_1}(I) \langle {g_2}\mathbb{R}a_I h_I +\sum_{J\subsetneq I}\langle g_1\mathbb{R}a_I \widehat{g_2}(I) h_I +\sum_{J\subsetneq I}\widehat{g_1}(I) \widehat{g_2}(I) h_I^2 \right)\mathsf{1}_J, $$ we have \begin{eqnarray*} && \left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\sum_{J\subsetneq I}\epsilon_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right)\right\Vert_r\\ &=&\left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\sum_{J\subsetneq I}\widehat{g_1}(I) \widehat{g_2}(I) h_I^2 \right)\right\Vert_r\\ &= & \left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\langle g_1 \mathbb{R}a_J \langle g_2 \mathbb{R}a_J \mathsf{1}_J - \sum_{J\subsetneq I}\widehat{g_1}(I) \langle {g_2}\mathbb{R}a_I h_I -\sum_{J\subsetneq I}\langle g_1\mathbb{R}a_I \widehat{g_2}(I) h_I \right)\right\Vert_r\\ &\leq & \left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) \langle g_1 \mathbb{R}a_J \langle g_2 \mathbb{R}a_J h_J\right\Vert_r +\left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) \langle P^{(0,1)}(g_1,g_2) \mathbb{R}a_J h_J \right\Vert_r\\ && \qquad \qquad +\left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) \langle P^{(1,0)}(g_1,g_2) \mathbb{R}a_J h_J\right\Vert_r \\ &\lesssim & \norm{b}_{BMO^d}\norm{g_1}_{p_1}\norm{g_2}_{q} + \norm{b}_{BMO^d}\norm{P^{(0,1)}(g_1,g_2)}_{r} +\norm{b}_{BMO^d}\norm{P^{(1,0)}(g_1,g_2)}_r\\ &\lesssim & \norm{b}_{BMO^d}\norm{g_1}_{p_1}\norm{g_2}_{q}\\ &\lesssim & \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}. \end{eqnarray*} where, $q$ is given by $\displaystyle \frac{1}{q} = \sum_{j=2}^m \frac{1}{p_j}.$ Here the last three inequalities follow from Theorems $\operatorname{Re}f{MPPTh1}$ and $\operatorname{Re}f{MPPTh2},$ and the fact that $\norm{g_1}_{p_1} = \norm{T_\epsilon (f_1)}_{p_1} \lesssim \norm{f_1}_{p_1}.$ \\ \noindent \textbf{Estimation of} $(\operatorname{Re}f{term2}):$\\ \noindent Case I: $\alpha_1 = 0.$\\ \noindent This case is easy as we observe that\\ \begin{eqnarray*} && T_\epsilon^{\vec{\alpha}} (\ensuremath{\partial}i_{f_1}(b),f_2, \ldots,f_m)- \sum_{I \in \mathcal{D}}\epsilon_I \langle b\mathbb{R}a_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ &=& \sum_{I \in \mathcal{D}}\epsilon_I \widehat{\ensuremath{\partial}i_{f_1}(b)}(I) \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} - \sum_{I \in \mathcal{D}}\epsilon_I \langle b\mathbb{R}a_I \widehat{f_1}(I) \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ & =& \sum_{I \in \mathcal{D}}\epsilon_I \langle b\mathbb{R}a_I \widehat{f_1}(I)\ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} - \sum_{I \in \mathcal{D}}\epsilon_I \langle b\mathbb{R}a_I \widehat{f_1}(I) \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ &=& 0. \end{eqnarray*} So there is nothing to estimate. \noindent Case II: $\alpha_1 = 1.$\\ \noindent In this case, \begin{eqnarray*} && T_\epsilon^{\vec{\alpha}} (\ensuremath{\partial}i_{f_1}(b),f_2, \ldots,f_m) - \sum_{I \in \mathcal{D}}\epsilon_I \langle b\mathbb{R}a_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ &=& \sum_{I \in \mathcal{D}}\epsilon_I \langle \ensuremath{\partial}i_{f_1}(b) \mathbb{R}a_I \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} - \sum_{I \in \mathcal{D}}\epsilon_I \langle b\mathbb{R}a_I \langle f_1\mathbb{R}a_I \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ & =& \sum_{I \in \mathcal{D}}\epsilon_I \left(\langle \ensuremath{\partial}i_{f_1}(b) \mathbb{R}a_I - \langle b\mathbb{R}a_I \langle f_1\mathbb{R}a_I \right)\ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ \end{eqnarray*} \noindent Now, \begin{eqnarray*}\langle b \mathbb{R}a_I \langle f_1 \mathbb{R}a_I \mathsf{1}_I &=& \sum_{I\subsetneq J}\widehat{b}(J) \langle {f_1}\mathbb{R}a_J h_J \mathsf{1}_I +\sum_{I\subsetneq J}\langle b\mathbb{R}a_J \widehat{f_1}(J) h_J \mathsf{1}_I+\sum_{I\subsetneq J}\widehat{b}(J) \widehat{f_1}(J) h_J^2\mathsf{1}_I\\ &=& \langle \ensuremath{\partial}i_b(f_1)\mathbb{R}a_I \mathsf{1}_I +\langle \ensuremath{\partial}i_{f_1}(b)\mathbb{R}a_I \mathsf{1}_I+ \sum_{I\subsetneq J}\widehat{b}(J) \widehat{f_1}(J) h_J^2\mathsf{1}_I. \end{eqnarray*} Hence, $\langle b \mathbb{R}a_I \langle f_1 \mathbb{R}a_I \mathsf{1}_I -\langle \ensuremath{\partial}i_{f_1}(b)\mathbb{R}a_I \mathsf{1}_I= \langle \ensuremath{\partial}i_b(f_1)\mathbb{R}a_I \mathsf{1}_I + \displaystyle \sum_{I\subsetneq J}\widehat{b}(J) \widehat{f_1}(J) h_J^2 \mathsf{1}_I.$\\ \noindent So we have \begin{eqnarray*} && T_\epsilon^{\vec{\alpha}} (\ensuremath{\partial}i_{f_1}(b),f_2, \ldots,f_m)- \sum_{I \in \mathcal{D}}\epsilon_I \langle b\mathbb{R}a_I \ensuremath{\partial}rod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ && = -\sum_{I \in \mathcal{D}}\epsilon_I \left( \langle \ensuremath{\partial}i_b(f_1)\mathbb{R}a_I \mathsf{1}_I + \sum_{I\subsetneq J}\widehat{b}(J) \widehat{f_1}(J) h_J^2\right)\ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ && = -\sum_{I \in \mathcal{D}}\epsilon_I \langle \ensuremath{\partial}i_b(f_1)\mathbb{R}a_I \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \\ && \hspace{1in} -\sum_{I \in \mathcal{D}}\epsilon_I \left( \sum_{I\subsetneq J}\widehat{b}(J) \widehat{f_1}(J) h_J^2\right)\ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ && = - T_\epsilon(\ensuremath{\partial}i_b(f_1),f_2,\ldots,f_m) - \sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2 \left(\sum_{I\subsetneq J}\epsilon_I \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \right).\\ \end{eqnarray*} \noindent Since $$\norm{T_\epsilon(\ensuremath{\partial}i_b(f_1),f_2,\ldots,f_m)}_r \lesssim \norm{\ensuremath{\partial}i_b(f_1)}_{p_1}\ensuremath{\partial}rod_{j=2}^m f_j(J,\alpha_j) \lesssim \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j},$$ we are left with controlling $$\left\Vert \sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2 \left(\sum_{I\subsetneq J}\epsilon_I \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \right)\right\Vert_r. $$ \noindent For this we observe that $$\left\Vert T_\epsilon^{(\alpha_2, \ldots,\alpha_m)}(f_2, \ldots,f_m) \right\Vert_q \lesssim \ensuremath{\partial}rod_{j=2}^m \norm{f_j}_{p_j},$$ and that \begin{eqnarray*} \ensuremath{\partial}i_b^*(f_1)\; T_\epsilon^{(\alpha_2, \ldots,\alpha_m)}(f_2, \ldots,f_m) &=& \sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2 \left( \sum_{I\subsetneq J}\epsilon_I \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right)\\ && + \sum_{J \in \mathcal{D}}\epsilon_J \widehat{b}(J) \widehat{f_1}(J) \ensuremath{\partial}rod_{j=2}^m f_j(J,\alpha_j) h_J^{2+ \sigma(\vec{\alpha})} \\ && +\sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2\left(\sum_{J\subsetneq I}\epsilon_I \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \right) \end{eqnarray*} \noindent Now, following the same technique we used to control $(\operatorname{Re}f{term1}),$ we obtain $$\displaystyle \left\Vert\sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2\left(\sum_{J\subsetneq I}\epsilon_I \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right)\right\Vert_r\lesssim \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}.$$ We also have \begin{eqnarray*} \left\Vert \ensuremath{\partial}i_b^*(f_1)\; T_\epsilon^{(\alpha_2, \ldots,\alpha_m)}(f_2, \ldots,f_m) \right\Vert_r &\leq& \left\Vert\ensuremath{\partial}i_b^*(f_1)\right\Vert_{p_1} \left\Vert T_\epsilon^{(\alpha_2, \ldots,\alpha_m)}(f_2, \ldots,f_m)\right\Vert_q \\ &\lesssim& \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j} \end{eqnarray*} and, $$ \left\Vert\sum_{J \in \mathcal{D}}\epsilon_J \widehat{b}(J) \widehat{f_1}(J) \ensuremath{\partial}rod_{j=2}^m f_j(J,\alpha_j) h_J^{2+ \sigma(\vec{\alpha})}\right\Vert_r \lesssim \norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}.$$. \noindent So we conclude that $$ \left\Vert\sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2 \left(\sum_{I\subsetneq J}\epsilon_I \ensuremath{\partial}rod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \right)\right\Vert_r\lesssim\norm{b}_{BMO^d}\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}.$$ Thus we have strong type boundedness of $$[b,T_\epsilon^{\vec{\alpha}}]_1 \rightarrow L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$$ for all $1<p_1,p_2, \ldots,p_m, r < \infty$ with $$\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ \end{proof} \noindent In the next theorem, we show that BMO condition is necessary for the boundedness of the commutators.\\ \noindent \begin{thm}\label{bmonecessity} Let $\vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in U_m,$ and $1<p_1,p_2, \ldots,p_m, r < \infty$ with $$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Assume that for given $b$ and $i$, \begin{equation} \norm{[b,T_\epsilon^{\vec{\alpha}}]_i(f_1,f_2,\ldots,f_m)}_r \leq C_\epsilon \ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j}, \label{eq:bd} \end{equation} for every bounded sequence $\epsilon = \{\epsilon_I\}_{I\in \mathcal{D}},$ and for all $f_i \in L^{p_i}.$ Then $b\in BMO^d.$ \end{thm} \noindent \begin{proof} Without loss of generality we may assume that $i=1.$ Fix $I_0 \in \mathcal{D}$ and let $\epsilon = \{\epsilon_I\}_{I\in \mathcal{D}}$ with $\epsilon_I =1$ for all $I\in \mathcal{D}.$ \\ \noindent \textbf{Case I:} $\alpha_1 = 0, \sigma(\vec{\alpha}) = 1.$\\ \noindent Take $f_1 = \mathsf{1}_{I_0}$ and $f_i = h_{I_0^{(1)}}$ for $i>1$, where $I_0^{(1)}$ is the parent of $I_0.$ Then, $$T_\epsilon^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)) = \sum_{I\in \mathcal{D}} \langle \mathsf{1}_{I_0}, h_I\mathbb{R}a \langle h_{I_0^{(1)}}\mathbb{R}a_I^{m-1} h_I=0,$$ and, \begin{eqnarray*} T_\epsilon^{\vec{\alpha}}(bf_1, f_2, \ldots, \ldots,f_m) &=& \sum_{I\in \mathcal{D}} \langle b\mathsf{1}_{I_0}, h_I\mathbb{R}a \langle h_{I_0^{(1)}}\mathbb{R}a_I^{m-1} h_I\\ &=& \sum_{I\subseteq I_0} \langle b\mathsf{1}_{I_0}, h_I\mathbb{R}a \left( \frac{K(I_0,I_0^{(1)})}{\sqrt{\left\vert{I_0^{(1)}}\right\vert}}\right)^{m-1}h_I\\ &=& \left( \frac{K(I_0,I_0^{(1)})}{\sqrt{\left\vert{I_0^{(1)}}\right\vert}}\right)^{m-1}\sum_{I\subseteq I_0} \langle b, h_I\mathbb{R}a h_I, \end{eqnarray*} where $ K(I_0,I_0^{(1)})$ is either $1$ or $-1$ depending on whether $I_0$ is the right or left half of $I_0^{(1)}.$ \\ \noindent For the second to last equality we observe that, if $I$ is not a proper subset of $I_0^{(1)},$ $ \langle h_{I_0^{(1)}}\mathbb{R}a_I = 0,$ and that if $I$ is a proper subset of $I_0^{(1)}$ but is not a subset of $I_0$, then $\langle b\mathsf{1}_{I_0}, h_I\mathbb{R}a =0.$ Moreover, for $I \subseteq I_0,$ $\langle b\mathsf{1}_{I_0}, h_I\mathbb{R}a = \int_\mathbb{R}{b\mathsf{1}_{I_0} h_I} = \int_\mathbb{R}{b h_I} = \langle b, h_I\mathbb{R}a.$\\ \noindent Now from inequality \eqref{eq:bd}, we get $$ \left\Vert \left( \frac{K(I_0,I_0^{(1)})}{\sqrt{\left\vert{I_0^{(1)}}\right\vert}}\right)^{m-1}\sum_{I\subseteq I_0} \langle b, h_I\mathbb{R}a h_I \right\Vert_r \leq C_\epsilon \abs{I_0}^{\frac{1}{p_1}} \ensuremath{\partial}rod_{i=2}^{m}\frac{\abs{I_0^{(1)}}^{\frac{1}{p_i}}} {\sqrt{\abs{I_0^{(1)}}}}$$ $$i.e. \quad \left\Vert \sum_{I\subseteq I_0} \langle b, h_I\mathbb{R}a h_I \right\Vert_r \leq 2^{\frac{1}{p_2}+ \cdots+\frac{1}{p_m}} C_\epsilon \abs{I_0}^{\frac{1}{r}}.$$ Thus for every $I_0 \in \mathcal{D},$ $$\frac{1}{\abs{I_0}^{\frac{1}{r}}}\left\Vert \sum_{I\subseteq I_0} \langle b, h_I\mathbb{R}a h_I \right\Vert_r \leq 2^{\frac{1}{p_2}+ \cdots+\frac{1}{p_m}} C_\epsilon ,$$ and hence $b \in BMO^d.$\\ \noindent \textbf{Case II:} $\alpha_1 \neq 0$ \, or \, $\sigma(\vec{\alpha}) > 1.$\\ \noindent Taking $f_i = \begin{cases} h_{I_0}, &\text{if }\alpha_i = 0\\ \mathsf{1}_{I_0}, \;\;\; & \text{if }\alpha_i = 1,\\ \end{cases} $\; we observe that $$T_\epsilon^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)) = h_{I_0}^{\sigma(\vec{\alpha})} \;\;\text{ and } \;\;\;T_\epsilon^{\vec{\alpha}}(bf_1, f_2, \ldots, \ldots,f_m) = (bf_1)(I_0, \alpha_1)h_{I_0}^{\sigma(\vec{\alpha})}. $$ \noindent If $\alpha_1 = 0, $ $$ (bf_1)(I_0, \alpha_1) = {bh_{I_0}}(I_0, 0) = \widehat{bh_{I_0}}(I_0) = \int_\mathbb{R}{ bh_{I_0}h_{I_0}} = \frac{1}{\abs{I_0}}\int_\mathbb{R}{ b \mathsf{1}_{I_0}} = \langle b \mathbb{R}a_{I_0}.$$ \noindent If $\alpha_1 = 1,$ $$ (bf_1)(I_0, \alpha_1) = {b\mathsf{1}_{I_0}}(I_0, 1) = \langle {b\mathsf{1}_{I_0}}\mathbb{R}a_{I_0} = \langle {b}\mathbb{R}a_{I_0}.$$ \noindent So in each case, \begin{eqnarray*} \norm{[b,T_\epsilon^{\vec{\alpha}}]_1(f_1,f_2,\ldots,f_m)}_r &=& \left\Vert{bT_\epsilon^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)) - T_\epsilon^{\vec{\alpha}}(bf_1, f_2, \ldots, \ldots,f_m)}\right\Vert_r\\ &=& \left\Vert{b h_{I_0}^{\sigma(\vec{\alpha})} - \langle b \mathbb{R}a_{I_0} h_{I_0}^{\sigma(\vec{\alpha})}}\right\Vert_r\\ &=& \left\Vert{(b - \langle b \mathbb{R}a_{I_0}) h_{I_0}^{\sigma(\vec{\alpha})}}\right\Vert_r\\ &=& \frac{1}{(\sqrt{\abs{I_0}})^{\sigma(\vec{\alpha})}}\norm{(b - \langle b \mathbb{R}a_{I_0}) \mathsf{1}_{I_0}}_r.\\ \end{eqnarray*} \noindent On the other hand, $$\ensuremath{\partial}rod_{j=1}^m\norm{f_j}_{p_j} = \frac{1}{(\sqrt{\abs{I_0}})^{\sigma(\vec{\alpha})}} \abs{I_0}^{\frac{1}{p_1}+ \cdots + \frac{1}{p_m}} = \frac{1}{(\sqrt{\abs{I_0}})^{\sigma(\vec{\alpha})}} \abs{I_0}^{\frac{1}{r}}. $$ Inequality \eqref{eq:bd} then gives $$ \frac{1}{(\sqrt{\abs{I_0}})^{\sigma(\vec{\alpha})}}\norm{(b - \langle b \mathbb{R}a_{I_0}) \mathsf{1}_{I_0}}_r \leq C_\epsilon \frac{1}{(\sqrt{\abs{I_0}})^{\sigma(\vec{\alpha})}} \abs{I_0}^{\frac{1}{r}}$$ $$ \text{i.e. } \quad \frac{1}{\abs{I_0}^{\frac{1}{r}}}\norm{(b - \langle b \mathbb{R}a_{I_0}) \mathsf{1}_{I_0}}_r \leq C_\epsilon. $$ Since this is true for any $I_0 \in \mathcal{D}$, we have $b\in BMO^d.$ \end{proof} \noindent Combining the results from Theorems \operatorname{Re}f{boc} and \operatorname{Re}f{bmonecessity}, we have the following characterization of the dyadic BMO functions. Note that if $\epsilon_I = 1$ for every $I \in \mathcal{D}$, we have $T_\epsilon^{\vec{\alpha}} = P^{\vec{\alpha}},$ and that in the proof of Theorem \operatorname{Re}f{bmonecessity}, only the boundedness of $[b, T_\epsilon^{\vec{\alpha}}]_i$ for $\epsilon$ with $\epsilon_I = 1$ for all $I\in \mathcal{D}$ was used to show that $b\in BMO^d.$\\ \begin{thm} Let $\vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in U_m,$ $1\leq i \leq m,$ and $1<p_1,p_2, \ldots,p_m, r < \infty$ with $$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Suppose $b \in L^p$ for some $p \in (1,\infty).$ Then the following two statements are equivalent. \begin{enumerate}[label = $(\alph*)$] \item $b\in BMO^d.$\\ \item $\displaystyle [b,T_\epsilon^{\vec{\alpha}}]_i:L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r $ is bounded for every bounded sequence $\epsilon = \{\epsilon_I\}_{I\in \mathcal{D}}.$ \end{enumerate} \noindent In particular, $b\in BMO^d$ if and only if $[b,P^{\vec{\alpha}}]_i:L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$ is bounded.\\ \end{thm} \begin{bibdiv} \begin{biblist} \normalsize \bib{BMNT}{article}{ title={Bilinear paraproducts revisited}, author={B{\'e}nyi, {\'A}.}, author={Maldonado, D.}, author={Nahmod, A. R.}, author={Torres, R. H.}, journal={Mathematische Nachrichten}, volume={283}, number={9}, pages={1257--1276}, year={2010}, publisher={Wiley Online Library}} \bib{Bla}{article}{ author={Blasco, O.}, title={Dyadic BMO, paraproducts and Haar multipliers}, journal={Contemp. Math., Vol 445, Amer. Math. Soc., Providence, RI,}, pages={11-18, MR 2381883}} \bib{CRW}{article}{ title={Factorization theorems for Hardy spaces in several variables}, author={Coifman, R.R.}, author={Rochberg, R.}, author={Weiss, G.}, journal={Ann. of Math.}, volume={103}, pages={611-635}, year={1976}} \bib{GLLZ}{article}{ author={Grafakos,L.}, author={Liu, L.}, author={Lu, S}, author={Zhao,F.}, title={The multilinear Marcinkiewicz interpolation theorem revisited: The behavior of the constant}, journal={J. Funct. Anal.}, volume={262}, year={2012}, pages={2289-2313}} \bib{GT}{article}{ author={Grafakos,L.}, author={Torres, R.H.}, title={Multilinear Calder$\acute{\text{o}}$n-Zygmund theory}, journal={Adv. Math.}, volume={165}, year={2002}, number={1}, pages={124-164.}} \bib{HR}{article}{title={Interpolation by the real method between $BMO$, $L^\alpha (0 < \alpha < \infty)$ and $H^\alpha (0 < \alpha < \infty)$}, author = {Hanks, R.}, journal={Indiana Univ. Math. J.}, volume ={26}, number ={4}, pages={679-689}, year = {1977}} \bib{Hyt}{article}{ author={Hyt$\ddot{\text{o}}$nen ,Tuomas P.}, title={Representation of singular integrals by dyadic operators, and the $A_2$ theorem}, journal={arXiv:1108.5119}, year={2011}} \bib{Jan}{article}{ author={Janson, S.}, title={BMO and commutators of martingale transforms}, journal={Ann. Inst. Fourier}, volume={31}, number = {1}, year={1981}, pages={265-270}} \bib{JN}{article}{title={On functions of bounded mean oscillation}, author = {John, F.},author = {Nirenberg, L.}, journal={Comm. Pure Appl. Math.}, volume = {14} year = {1961}, page ={415–426}} \bib{LOPTT}{article}{ title={New maximal functions and multiple weights for the multilinear Calder$\acute{\text{o}}$n-Zygmund theory}, author={Lerner, A.K.}, author={Ombrosi, S.}, author={P$\acute{\text{e}}$rez, C.}, author={Torres, R. H.}, author={Trujillo-Gonz$\acute{\text{a}}$lez, R.}, journal={Adv. in Math.}, volume={220}, number={4}, pages={1222--1264}, year={2009}, publisher={Wiley Online Library}} \bib{Per}{article}{ author={Pereyra, M.C.}, title={Lecture notes on dyadic harmonic analysis}, journal={Contemporary Mathematics}, volume={289}, date={2001}, pages={1-60}} \bib{SE}{book}{title={Harmonic Analysis: Real Variable Methods, Orthogonality, and Oscillatory Integrals}, author = {Stein, E. M.}, publisher ={Princeton Univ. Press, Princeton}, year = {1993}} \bib{Treil}{article}{ author={Treil, S.}, title={Commutators, paraproducts and BMO in non-homogeneous martingale settings}, journal={http://arxiv.org/pdf/1007.1210v1.pdf}} \end{biblist} \end{bibdiv} \end{document}
\begin{document} \maketitle \begin{abstract} We prove new results on generalized derivations on C$^*$-algebras. By considering the triple product $\{a,b,c\} =2^{-1} (a b^* c + c b^* a)$, we introduce the study of linear maps which are triple derivations or triple homomorphisms at a point. We prove that a continuous linear $T$ map on a unital C$^*$-algebra is a generalized derivation whenever it is a triple derivation at the unit element. If we additionally assume $T(1)=0,$ then $T$ is a $^*$-derivation and a triple derivation. Furthermore, a continuous linear map on a unital C$^*$-algebra which is a triple derivation at the unit element is a triple derivation. Similar conclusions are obtained for continuous linear maps which are derivations or triple derivations at zero. We also give an automatic continuity result, that is, we show that generalized derivations on a von Neumann algebra and linear maps on a von Neumann algebra which are derivations or triple derivations at zero are all continuous even if not assumed a priori to be so. \end{abstract} \section{Introduction} Automorphisms and derivations on Banach algebras are among the most intensively studied classes of operators. Recent studies are concerned with the question of finding weaker conditions to characterize these maps. One of the most fruitful lines studies maps which are derivations or automorphisms at a certain point. More concretely, a linear map $S$ from a Banach algebra $A$ to a Banach $A$-bimodule $X$ is said to be a \emph{derivation} at a point $z\in A$ if the identity \begin{equation}\label{eq derivation} S(ab ) = S(a) b + a S(b), \end{equation} holds for every $a,b\in A$ with $a b =z$. In the literature a linear map which is a derivation at a point $z$ is also called \emph{derivable} at $z$. Clearly, a linear map $D$ from $A$ into $X$ is a derivation if and only if it is a derivation at every point of $A$. We can similarly define linear maps which are Jordan derivations or generalized derivations at a point (see subsection \ref{subsect1} for detailed definitions). Following the terminology set by J. Alaminos, M. Bresar, J. Extremera, and A. Villena in \cite[\S 4]{AlBreExVill09} and J. Li and Z. Pan in \cite{LiPan}, we shall say that a linear operator $G$ from a Banach algebra $A$ into a Banach $A$-bimodule $X$ is a \emph{generalized derivation} if there exists $\xi\in X^{**}$ satisfying $$G(ab) = G(a) b + a G(b) - a \xi b \hbox{ ($a, b \in A$).}$$ Every derivation is a generalized derivation, however there exist generalized derivations which are not derivation. This notion is very useful when characterizing (generalized) derivations in terms of annihilation of certain products of orthogonal elements (see, for example, Theorem 2.11 in \cite[\S 2]{AyuKudPe2014}). The first results on linear maps that are derivable at zero appear in \cite[Subsection 4.2]{Bre07} and \cite[Theorem 2]{ChebKeLee}, where they were related to generalized derivations. In \cite[Theorem 4]{JingLuLi2002} W. Jing, S.J. Lu, and P.T. Li prove that the implication $$\delta \hbox{ is a derivation at zero } \Rightarrow \delta \hbox{ is a generalized derivation},$$ holds for every continuous linear map $\delta$ on a von Neumann algebra. If, under the above hypothesis $\delta (1)=0$, then $\delta$ is a derivation. We shall prove in Corollary \ref{c JingLuLi without continuity} that the hypothesis concerning continuity can be relaxed. W. Jing proves in \cite[Theorems 2.2 and 2.6]{Jing} the following result: for an infinite dimensional Hilbert space $H$, a linear map $\delta: B(H) \to B(H)$ which is a generalized Jordan derivation at zero, or at 1, is a generalized derivation. We observe that, in the latter result, $\delta$ is not assumed to be continuous. More related results read as follow. Let $X$ be a Banach $A$-bimodule over a Banach algebra $A$. In 2009, F. Lu establishes that a linear map $\delta : A\to X$ is a derivation whenever it is continuous and a derivation at an element which is left (or right) invertible (see \cite{Lu2009}). Is is further shown that $\delta$ is a derivation if it continuous and a derivation at an idempotent $e$ in $A$ such that for $x\in X$ the condition $e A (1-e) X =\{0\}$ implies $(1-e) X=\{0\}$ and the condition $XeA(1-e)=\{0\}$ gives $X e =0$. Here the linear map is assumed to be continuous. Concerning our goals, J. Zhu, Ch. Xiong, and P. Li prove in \cite{ZhuXiLi} a significant result showing that, for a Hilbert space $H$, a linear map $\delta :B(H)\to B(H)$ is a derivation if and only if it is a derivation at a non-zero point in $B(H)$. It is further shown that a linear map which is a derivation at zero need not be a derivation (for example, the identity mapping on $B(H)$ is a derivation at zero but it is not a derivation). We refer to \cite{Houqi, JingLuLi2002, ZhangHouQi2014, ZhangHouQi2014b, Zhu2007, ZhuXio2002, ZhuXio2005, ZhuXio2007, ZhuXion2008} and \cite{ZhuZhao2013} for additional results on linear or additive maps on JSL algebras, finite CSL algebras, nest algebras or standard operator algebras. In the present note we continue with the study of those linear maps which are derivable at zero. We shall introduce a new point of view by exploiting those properties of a C$^*$-algebra $A$ which are related to the ternary product defined by \begin{equation}\label{eq C*-triple product}\{a,b,c\}=\frac12 (a b^* c + cb^* a) \ \ \ (a,b,c\in A). \end{equation} Every C$^*$-algebra $A$ is a JB$^*$-triple (in the sense of \cite{Kaup83}) with respect to the triple product defined in \eqref{eq C*-triple product}. This is the natural triple product appearing in the study of J$^*$-algebras by L.A. Harris \cite{Harr74,Harris81} and the \emph{ternary rings of operators} (TRO's) in the sense of D.P. Blecher and M. Neal in \cite{BleNe} and M. Neal and B. Russo in \cite{NealRusso}. A linear map $T$ between C$^*$-algebras preserving the previous triple product is called a \emph{triple homomorphism}. A triple derivation on a C$^*$-algebra $A$ is a linear map $\delta : A\to A$ satisfying the generalized Leibnitz's rule $$\delta \{a,b,c\} = \{\delta(a),b,c\}+ \{a,\delta(b),c\}+\{a,b,\delta(c)\},$$ for all $a,b,c\in A$. We recall that a $^*$-derivation on a C$^*$-algebra $A$ is a derivation $D:A\to A$ satisfying $D(a)^*= D(a^*)$ for all $a\in A$. Examples of derivations on $A$ be given by fixing $z\in A$ and defining $D_z : A\to A$ as the linear map defined by $D_z (a) = [z,a] = z a - az$. It is known that every $^*$-derivation on a C$^*$-algebra is a triple derivation in the above sense. It is further known the existence of derivations on $A$ which are not triple derivations (compare \cite[Comments after Lemma 3]{BurFerGarPe2014}). On the other hand, for each $a$ in a C$^*$-algebra $A$, the mapping $\delta_a (x) := i \{a,a,x\}$ is a triple derivation on $A$, however, $ i (a^* a + a a^*)= 2\delta_a (1) =\delta_a (1)= \frac{i}{2} (a^* a + a a^*)$ if and only if $a=0$, and thus $\delta_a$ is not an associative derivation on $A$ for every $a\neq 0$. In a recent paper M.J. Burgos, J.Cabello-S{\'a}nchez and the second author of this note explore those linear maps between C$^*$-algebras which are $^*$-homomorphisms at certain points of the domain, for example, at the unit element and at zero (see the introduction of section \ref{sec: triple hom at a point} for more details). In this paper we widen the scope by introducing linear maps which are triple derivations or triple homomorphism at a certain point. Our study will be conducted around the next two notions. \begin{definition}\label{def triple derivation at a point} Let $T:A\to A$ be a linear map on a C$^*$-algebra, and let $z$ be an element in $A.$ We shall say that $T$ is a triple derivation at $z$ if $z=\{a,b,c\}$ in $A$ implies that $T(z)=\{T(a),b,c\}+\{a,T(b),c\}+\{a,b,T(c)\}.$ \end{definition} The set of all linear maps on $A$ which are triple derivable at an element $z\in A$ is a subspace of the space $L(A)$ of all linear operators on $A$. \begin{definition}\label{def triple homomorphism at a point} Let $T:A\to B$ be a linear map between C$^*$-algebras, and let $z$ be an element in $A.$ We shall say that $T$ is a triple homomorphism at $z$ if $z=\{a,b,c\}$ in $A$ implies that $\{T(a),T(b),T(c)\}=T(z).$ \end{definition} Let $T$ be a continuous linear map on a unital C$^*$-algebra. In Theorem \ref{t triple derivation at 1 are generalized derivations} we prove that $T$ being a triple derivation at the unit implies that $T$ is a generalized derivation. If we also assume that $T(1)=0,$ then $T$ is a $^*$-derivation and a triple derivation (see Proposition \ref{symmetric}). Among the consequences, we establish that a continuous linear map on a unital C$^*$-algebra which is a triple derivation at the unit element is a triple derivation (see Corollary \ref{c derivable at 1 are triple derivations}). When we study linear maps which are triple derivation at zero, our conclusions are stronger. We begin with an extension of \cite[Theorem 4]{JingLuLi2002} to the setting of unital C$^*$-algebras. We show that a continuous linear map $T$ on a C$^*$-algebra is a generalized derivation whenever it is a derivation or a triple derivation at zero (see Theorem \ref{t continuous triple der at zero are g der}). Moreover, a bounded linear map $T$ on a C$^*$-algebra $A$ which is a triple derivation at zero with $T(1)=0$ is a $^*$-derivation, and hence a triple derivation (compare Corollary \ref{c bl triple derivable at zero with T1=0}). We further show that a bounded linear map on a unital C$^*$-algebra $A$ which is a triple derivation at zero and satisfies $T(1)^*=-T(1)$ is a triple derivation (see Corollary \ref{c T1 skew}). For linear maps whose domain is a von Neumann algebra the continuity assumptions can be dropped for certain maps. More concretely, generalized derivations on a von Neumann algebra, linear maps on a von Neumann algebra which are derivations (respectively, triple derivations) at zero are all continuous (see Corollary \ref{c automatic cont gen der and triple der at zero on von Neumann}). Several characterizations of generalized derivations on von Neumann algebras are established in Corollary \ref{c non continuous generalized derivation} without assuming continuity. In this particular setting, some hypothesis in \cite[Theorem 4]{JingLuLi2002} and \cite{Lu2009} can be relaxed. In section \ref{sec: triple hom at a point} we study continuous linear maps on C$^*$-algebras which are triple homomorphisms at zero or at the unit element. Let $T:A\to B$ be a continuous linear map between C$^*$-algebras, where $A$ is unital. We prove in Theorem \ref{theorem3131} that if $T$ is a triple homomorphism at the unit of $A,$ then $T$ is a triple homomorphism. Furthermore, $T(1)$ is a partial isometry and $T: A \to B_2 (T(1))$ is a Jordan $^*$-homomorphism. For triple homomorphisms at zero, we rediscover the orthogonality preserving operators. More concretely, let $T :A \to B$ be a bounded linear map between two C$^*$-algebras. We shall revisit the main results in \cite{BurFerGarMarPe08} to show that $T$ is orthogonality preserving if, and only if, $T$ preserves zero-triple-products (i.e. $\{a,b,c\}=0$ in $A$ implies $\{T(a),T(b),T(c)\}=0$ in $B$) if, and only if, $T$ a triple homomorphism at zero. \subsection{Basic background and definitions}\label{subsect1} \ \ \vspace*{1mm} The class of C$^*$-algebras admits a Jordan analogous in the wider category of JB$^*$-algebras. More concretely, a real (resp., complex) \emph{Jordan algebra} is an algebra $\mathcal{J}$ over the real (resp., complex) field whose product is commutative (but, in general, non-associative) and satisfies the \emph{Jordan identity}:\begin{equation}\label{eq Jordan idenity algebra} (a \circ b)\circ a^2 = a\circ (b \circ a^2). \end{equation} A JB$^*$-algebra is a complex Jordan algebra $\mathcal{J}$ which is also a Banach space and admits an isometric algebra involution $^*$ satisfying $\| a\circ b\| \leq \|a\| \ \|b\|,$ and $$\|\J a{a^*}a \|= \|a\|^3,$$ for all $a,b\in \mathcal{J}$, where $\J a{a^*}a =2 (a\circ a^*) \circ a - a^2 \circ a^*$. Every C$^*$-algebra is a JB$^*$-algebra with respect to its natural norm and involution and the Jordan product given by $a\circ b = \frac12( a b +b a)$. The self-adjoint part $\mathcal{J}_{sa}$ of a JB$^*$-algebra $\mathcal{J}$ is a real Jordan Banach algebra which satisfies $$\|a\|^2 = \|a^2\| \hbox{ and } \|a^2\|\leq \|a^2+b^2\|,$$ for every $a,b\in \mathcal{J}_{sa}.$ These axioms provide the precise definition of JB-algebras. A \emph{JBW$^*$-algebra} (resp., a \emph{JBW-algebra}) is a JB$^*$-algebra (resp., a JB-algebra) which is also a dual Banach space. The bidual of every JB$^*$-algebra is a JBW$^*$-algebra with a Jordan product and involution extending the original ones. The reader is referred to the monograph \cite{Hanche} for the basic background on JB$^*$- and JB-algebras. Let $\mathcal{B}$ be a JB$^*$-subalgebra of a JB$^*$-algebra $\mathcal{J}$. Accordingly to the notation in \cite{AlBreExVill09,BurFerGarPe2014,BurFerPe2014} a linear mapping $G: \mathcal{B}\to \mathcal{J}$ will be called a \emph{generalized Jordan derivation} if there exists $\xi\in \mathcal{J}^{**}$ satisfying \begin{equation}\label{eq generalized Jordan derivation} G (a\circ b) = G(a)\circ b + a\circ G(b) - U_{a,b} (\xi ), \end{equation} for all $a,b$ in $\mathcal{J}$, where $U_{a,b} (x) := (a\circ x) \circ b + (b\circ x)\circ a - (a\circ b) \circ x$ ($x\in \mathcal{J}$). We shall write $U_a$ instead of $U_{a,a}$. If $\mathcal{B}$ is unital, every generalized Jordan derivation $G : \mathcal{B}\to \mathcal{J}$ with $G(1) =0$ is a Jordan derivation. Jordan derivations are generalized Jordan derivations. \section{Triple derivations at fixed point of a C$^*$-algebra} In this section we shall study linear maps between C$^*$-algebras which are triple derivations at a fixed point of the domain. There are two remarkable elements that every study should consider in a first stage, we refer to zero and the unit element of a C$^*$-algebra. We shall show later that linear maps between C$^*$-algebras which are triple derivations at zero or at the unit element are intrinsically related to generalized derivations. Let $T: A \to X$ be a bounded linear operator from a C$^*$-algebra into an essential Banach $A$-bimodule. It is proved in \cite[Theorem 4.5]{AlBreExVill09} and \cite[Proposition 4.3]{BurFerPe2014} (see also \cite[Theorem 2.11]{AyuKudPe2014}) that $T$ is a generalized derivation if and only if one of the next statements holds:\begin{enumerate}[$(a)$] \item $T$ is a generalized derivation; \item $a T(b) c = 0$, whenever $ab=bc=0$ in $A$; \item $a T(b) c = 0$, whenever $ab=bc=0$ in $A_{sa}$. \end{enumerate} When in the above statement $X$ coincides with $A$ or with any C$^*$-algebra containing $A$ as a C$^*$-subalgebra with the same unit, the above equivalent statements admit another reformulation which is more interesting for our purposes. We shall isolate here an equivalence which was germinally contained in the proof of \cite[Theorem 2.8]{EssaPeRa16}. More concretely, each statement in $(a)$-$(c)$ is equivalent to any of the following:\label{eq reformulations of g der} \begin{enumerate}[$(d)$] \item[$(d)$] $a T(b) c + c T(b) a = 0$, whenever $ab=bc=0$ in $A_{sa}$; \item[$(e)$] $a T(b) a = 0$, whenever $ab=0$ in $A_{sa}$. \item[$(f)$] For each $b$ in $A_{sa}$ we have $(1-r(b)) T(b) (1-r(b))=0$ in $B^{**}$, where $r(b)$ denotes the range projection of $b$ in $A^{**}$. \end{enumerate} For the proof we observe that $(c)\Rightarrow (d)$ and $(d)\Rightarrow (e)$ are clear. We shall prove $(e)\Rightarrow (f)$. Suppose $a T(b) a = 0$, whenever $ab=0$ in $A_{sa}$. We shall focus on the commutative C$^*$-subalgebra $A_b$ generated by $b$. It is known from the Gelfand theory that $A_b \cong C_0(\sigma(b))$, where $\sigma (b)\subseteq [-\|b\|,\|b\|]$ denotes the spectrum of $b$ and $C_0(\sigma(b))$ the C$^*$-algebra of all continuous functions on $\sigma(b)$ vanishing at zero. For each natural $n$, let $p_n$ denote the projection in $A_b^{**}\subseteq A^{**}$ corresponding to the characteristic function of the set $([-\|b\|, -\frac1n]\cup [\frac1n,\|b\|])\cap \sigma(b).$ Let us also pick a function $b_n\in A_b$ such that $b_n p_n = p_n b_n = b_n=b_n^*$ and $\|b_n-b\|< \frac1n$. Clearly, $(p_n)$ converges to $r(b)$, the range projection of $b$ in the strong$^*$-topology of $A^{**}$ (see \cite[\S 1.8]{S}). Let us take $z \in ((1-p_n) A^{**} (1-p_n))\cap A_{sa}$. Since $b_n z = 0$, it follows from the hypothesis that $z T(b_n) z =0$. On the other hand, it is known that $p_n$ is a closed projection in $A_b^{**}\subseteq A^{**}$ in the sense employed in \cite[Definition III.6.19]{Tak}. It is known that, under these circumstances, $((1-p_n) A^{**} (1-p_n))\cap A_{sa}$ is weak$^*$-dense in $(1-p_n) A^{**} (1-p_n)$ (compare \cite[Proposition 3.11.9]{Ped}). By Kaplansky density theorem \cite[Theorem 1.9.1]{S}, we can find a bounded net $(z_\lambda)$ in $((1-p_n) A^{**} (1-p_n))\cap A_{sa}$ converging to $1-p_n$ in the strong$^*$-topology of $A^{**}$. We have seen above that $z_\lambda T(b_n) z_\lambda =0 $ for all $\lambda$. Since the product of $A$ is jointly strong$^*$-continuous (cf. \cite[Proposition 1.8.12]{S}), we deduce that $(1-p_n) T(b_n) (1-p_n) =0 $ for all natural $n$. Since $(1-p_n)\to 1-r(b)$ in the strong$^*$-topology and $T(b_n) \to T(b)$ in norm, we have $(1-r(b)) T(b) (1-r(b))=0$ in $A^{**}$. $(f)\Rightarrow (c)$ We take $a,b,c\in A_{sa}$ with $a b = bc=0$. We can easily see that $a = a (1-r(b))$ and $c = (1-r(b)) c$. Therefore, by assumptions, $a T(b) c = a (1-r(b)) T(b) (1-r(b)) c = 0$, which finishes the proof. \subsection{Triple derivations at the unit element of a C$^*$-algebra}\ Along the rest of this subsection, the symbol $A$ will denote a C$^*$-subalgebra of unital C$^*$-algebra $B$, and we shall assume that $A$ contains the unit of $B$. Continuous linear maps $T: A\to B$ which are derivations at 1 are derivations. This problem has been already studied in the literature, at least for continuous linear maps (see \cite[Theorem 2.1 or Corollary 2.3]{Lu2009}). Actually the next result follows from the just quoted reference and \cite[Theorem 6.3]{John96}. \begin{proposition}\label{p cont derivation at 1}{\rm(\cite[Theorem 2.1 or Corollary 2.3]{Lu2009}, \cite[Theorem 6.3]{John96})} Let $A$ be a unital C$^*$-algebra, and $X$ be a unital Banach $A$-bimodule. Suppose $T: A\to B$ is a continuous linear map which is a derivation at the unit element. Then $T$ is a derivation.$ \Box$ \end{proposition} A common property of triple derivations and local triple derivations on C$^*$-algebras is that they map the unit of the domain C$^*$-algebra into a skew symmetric element (cf. \cite[proof of Lemma 1]{HoMarPeRu}, \cite[Lemma 3.4]{HoPeRu} or \cite[Lemma 2.1]{KuOikPeRu14}). This good behavior is also true for linear maps which are derivations at the unit element. \begin{lemma}\label{lemma33} Let $T:A\to B$ be a triple derivation at the unit of $A.$ Then the following statements hold: \begin{enumerate}[$(a)$] \item $T(1)^*=-T(1);$ \item The identity $T(p)=T(p)p+pT(p)-pT(1)p,$ holds for every projection $p$ in $A.$ \end{enumerate} \end{lemma} \begin{proof}$(a)$ Since $1=\{1,1,1\},$ by assumptions, we have $$T(1)=T(\{1,1,1\})= 2\{T(1),1,1\}+\{1,T(1),1\}=2T(1)+T(1)^*,$$ which proves the statement. $(b)$ Let $p\in A$ be a projection. The identity $\{(1-2p),1,(1-2p)\}=1$ and the hypothesis prove that $$T(1)=T(\{(1-2p),1,(1-2p)\})$$ $$ =2\{T(1-2p),1,(1-2p)\}+\{(1-2p),T(1),(1-2p)\}$$ $$=(T(1)-2T(p))(1-2p)+(1-2p)(T(1)-2T(p))+(1-2p)T(1)^*(1-2p)$$ $$=2T(1)-4T(p)+4T(p)p+4pT(p)-2T(1)p$$ $$-2pT(1)-2T(1)^*p-2pT(1)^*+4pT(1)^*p+T(1)^*$$ $$=\hbox{(by $(1)$)} = T(1)- 4T(p) + 4 T(p) p + 4 p T(p) -4 p T(1) p.$$ This implies that $T(p)=T(p)p+pT(p)-pT(1)p.$ \end{proof} There exist C$^*$-algebras containing no non-zero projections. For this reason, we need to deal with unitaries. \begin{theorem}\label{t triple derivation at 1 are generalized derivations} Let $T:A\to B$ be a continuous linear map which is a triple derivation at the unit of $A.$ Then $T$ is a generalized derivation. \end{theorem} \begin{proof} Let us take $a\in A_{sa}.$ Since, for each $t\in {\mathbb{R}},$ $e^{it a}$ is a unitary element in $A$ and $1=\{e^{i t a},\;1,e^{-i t a}\},$ we deduce that $$T(1)= \{T(e^{i t a}),\;1,e^{-i t a}\}+\{e^{i t a},\;T(1),e^{-i t a}\}+\{e^{i t a},\;1,T(e^{-i t a})\}.$$ Taking the first derivative in $t$ we get $$0=\{T(ae^{i t a}),\;1,e^{-i t a}\}-\{T(e^{i t a}),\;1,ae^{-i t a}\}+\{ae^{i t a},\;T(1),e^{-i t a}\}$$ $$-\{e^{i t a},\;T(1),ae^{-i t a}\}+ \{ae^{i t a},\;1,T(e^{-i t a})\}-\{e^{i t a},\;1,T(ae^{-i t a})\}$$ for every $t\in {\mathbb{R}}.$ Taking a new derivative at $t=0$ in the last equality, we get $$0=2 \{T(a^2 ),\;1,1\} - 4 \{T(a),\;1,a \} + 2 \{T(1),\;1,a^2\} + 2 \{a^2 ,\;T(1),1 \}$$ $$ - 2 \{a ,\;T(1), a\},$$ or equivalently, $$2T(a^2)=2T(a)a+2aT(a)+2aT(1)^*a-T(1)a^2-a^2T(1)-T(1)^*a^2-a^2T(1)^*.$$ Lemma \ref{lemma33}$(a)$ assures that $T(1)^*=-T(1),$ which implies that \begin{equation}\label{eq 1 theorem 1} T(a^2)=T(a)a+aT(a)-aT(1)a, \end{equation} for every $a$ in $A_{sa}.$ Finally, let us take $a,b,c\in A_{sa}$ with $a b=0= bc$. If we write $b= b^+ - b^-$ with $b^+ b^- = 0$ and $b^{\sigma} \geq 0$ for all $\sigma\in \{ \pm \}$. Find $d^{\sigma} \geq 0$ in $A$ such that $(d^{\sigma})^2 = b^{\sigma}$ ($\sigma=\pm$). It is not hard to check (for example, by applying the orthogonality of the corresponding range projections) that $a d^{\sigma} = d^{\sigma} c=0$ for $\sigma = \pm$. Now applying \eqref{eq 1 theorem 1} we get $$ a T(b) c = a T(b^+) c - a T(b^-) c $$ $$= a( T(d^+) d^+ +d^+ T(d^+) )c - a( T(d^-) d^- + d^- T(d^-) )c =0.$$ We deduce from \cite[Theorem 2.11]{AyuKudPe2014} that $T$ is a generalized derivation. \end{proof} There exists generalized derivations which are not triple derivations at 1. For example, let $a$ be a non-zero symmetric element in $A$ and define $T(x)= a x$ ($\forall x\in A$). Then $T( xy ) = a x y = T(x) y + x T(y) - x T(1) y = a x y +x a y - x a y,$ for all $x,y\in A$, which assures that $T$ is a generalized derivation. However, $T(1) = a \in A_{sa}\backslash \{0\}$ together with Lemma \ref{lemma33} assure that $T$ is not a triple derivation at $1$. An appropriate change in the arguments given in the above theorem provide additional information when $T(1)=0$. \begin{proposition}\label{symmetric} Let $T:A\to A$ be a continuous linear map which is a triple derivation at 1 with $T(1)=0.$ Then $T$ is a $^*$-derivation and a triple derivation. \end{proposition} \begin{proof} As in the above proof, let us take $a\in A_{sa}.$ Since, for each $t\in {\mathbb{R}},$ $e^{it a}$ is a unitary element in $A$ and $1=\{e^{i t a},e^{i t a}, 1\},$ we deduce that $$0=T(1)= \{T(e^{i t a}),e^{i t a}, 1\}+\{e^{i t a}, T(e^{i t a}), 1\}+\{e^{i t a}, e^{i t a}, T(1) \}, $$ that is, $$ 0= T(e^{i t a})\circ e^{-i t a} + e^{i t a}\circ T(e^{i t a})^*.$$ Taking derivatives at $t=0$ we get $$0= T(a) - T(1) \circ a + a \circ T(1)^* - T(a)^*,$$ which proves that $T(a) = T(a)^*$ for all $a\in A_{sa}$, and thus $T$ is a symmetric map. Finally, by Theorem \ref{t triple derivation at 1 are generalized derivations}, $T$ is a generalized derivation. Furthermore, since $T(1)=0$ and $T$ is a symmetric operator, we deduce that $T$ is a $^*$-derivation and a triple derivation as well. \end{proof} \begin{corollary}\label{c derivable at 1 are triple derivations} Let $T:A\to A$ be a continuous linear map on a unital C$^*$-algebra which is a triple derivation at 1. Then $T$ is a triple derivation. \end{corollary} \begin{proof} Since, by Lemma \ref{lemma33}, $T(1)^* =-T(1),$ it is known that the mapping $\delta(T(1),1):A \to B,$ $\delta(T(1),1) (x) = \{T(1),1,x\}- \{1,T(1),x\}$ is a triple derivation (compare \cite[Proof of Lemma 1]{HoMarPeRu}). Since the linear combination of linear maps which are triple derivations at $1$ is a triple derivation at $1$, the mapping $\tilde{T}=T-\frac{1}{2}\delta(T(1),1)$ is a triple derivation at 1, and $\tilde{T} (1) = 0$. Applying Proposition \ref{symmetric}, we derive that $\tilde{T}$ is a $^*$-derivation. Therefore $T= \tilde{T} +\frac{1}{2}\delta(T(1),1)$ is a triple derivation. \end{proof} \begin{problem} If $T:A\to B$ is a triple derivation at the unit of a unital C$^*$-algebra $A$, is $T$ continuous? \end{problem} \subsection{Triple derivations at zero}\ We begin this subsection exploring the basic properties of linear maps which are derivations at zero. \begin{lemma}\label{lemma1.2 associative der} Suppose $A$ is a C$^*$-subalgebra of a C$^*$-algebra $B$, and let $T:A\to B$ be a linear map which is a derivation at zero. Then $$a T(b) c=0,\quad \forall \;a,\;b,\;c\in A \text{ with } a b= b c=0.$$ \end{lemma} \begin{proof} Suppose $a,b,c\in A$ satisfy the hypothesis of the lemma. Since $T$ is a derivation at zero we have $aT(b) c = (T(ab)-T(a) b) ) c =0.$ \end{proof} Let us observe that under the hypothesis of the above lemma, we are not in a position to apply \cite[Theorem 4.5]{AlBreExVill09} and \cite[Proposition 4.3]{BurFerPe2014} (see also \cite[Theorem 2.11]{AyuKudPe2014}) and the reformulations we have reviewed in page \pageref{eq reformulations of g der} because $T$ is not assumed to be continuous. We shall see later that continuity can be relaxed when the domain is a von Neumann algebra. Let $a,\;b\in A,$ we recall that $a$ and $b$ are orthogonal (written $a\perp b$) if and only if $ab^*=b^*a=0.$ \begin{lemma}\label{lemma1.2} Suppose $A$ is a C$^*$-subalgebra of a C$^*$-algebra $B$, and let $T:A\to B$ be a linear map which is a triple derivation at zero. Then $$\{a,T(b),c\}=0,\quad \forall \;a,\;b,\;c\in A \text{ with } a\perp b \perp c.$$ \end{lemma} \begin{proof} Let us take $a,\;b,\;c\in A,$ satisfying $a\perp b \perp c.$ Since $\{a,b,c\}=0,$ it follows from the hypothesis that $$ 0=\{T(a),b,c\}+\{a,T(b),c\}+\{a,b,T(c)\},$$ which proves the statement because $\{T(a),b,c\}= \{a,b,T(c)\}=0$. \end{proof} We can now apply the reformulations of being a generalized derivation proved in page \pageref{eq reformulations of g der}. Let us recall that as observed by J. Zhu, Ch. Xiong, and P. Li in \cite{ZhuXiLi} linear maps which are derivations at zero need not be derivations. We shall see next that continuous linear maps which are derivations at zero are always generalized derivations. \begin{theorem}\label{t continuous triple der at zero are g der} Let $A$ be a C$^*$-subalgebra of a unital C$^*$-algebra $B$. Let $T:A\to B$ be a bounded linear map. If $T$ is a derivation at zero or a triple derivation at zero, then $T$ is a generalized derivation. \end{theorem} \begin{proof} If $T$ is a triple derivation at zero, by Lemma \ref{lemma1.2}, given $a,b,c\in A_{sa}$ with $a b = b c =0$ we have $$0=2 \{a,T(b),c\}= a T(b)^* c + c T(b)^* a ,$$ or equivalently, $$0= a T(b) c + c T(b) a .$$ Lemma \ref{lemma1.2 associative der} assures that a similar conclusion holds when $T$ is a derivation at zero. It follows from the equivalence $(d)\Leftrightarrow (a)$ in page \pageref{eq reformulations of g der} that $T$ is a generalized derivation. \end{proof} We observe that Theorem \ref{t continuous triple der at zero are g der} above extends \cite[Theorem 4]{JingLuLi2002} to the setting of unital C$^*$-algebras. \begin{corollary}\label{c bl triple derivable at zero with T1=0} Suppose $A$ is a unital C$^*$-algebra. Let $T:A\to A$ be a bounded linear map which is a triple derivation at zero with $T(1)=0.$ Then $T$ is a $^*$-derivation, and hence a triple derivation. \end{corollary} \begin{proof} Since $T(1)=0$, the previous Theorem \ref{t continuous triple der at zero are g der} assures that $T$ is a derivation. We shall next show that $T$ is a symmetric mapping. It is well known that the bitransposed $T^{**}: A^{**}\to B^{**}$ is a derivation too (see for example \cite[Lemma 4.1.4]{S}). To avoid confusion with the natural involution on $A$, we shall denote $T^{**}$ by $T$. Fix $b\in A_{sa}$ with range projection $r(b) \in A^{**}$. Applying the same arguments given in the proof of $(e)\Rightarrow (f)$ in page \pageref{eq reformulations of g der}, we can find sequences $(p_n)\subseteq A^{**}$ and $(b_n)\in A_b \cong C_0(\sigma(b))$ such that $\|b_n-b\|\to 0$, $p_n$ is a closed projection in $A^{**}$ for every $n$, $b_n p_n = b_n$, and for each natural $n$, there exists a bounded net $(z_\lambda)$ in $((1-p_n) A^{**} (1-p_n))\cap A_{sa}$ converging to $1-p_n$ in the strong$^*$-topology (and hence in the weak$^*$-topology) of $A^{**}$. By hypothesis $$0 = \{T(z_\lambda), b_n, 1\} + \{ z_{\lambda}, T(b_n),1\}, \hbox{ for all } \lambda.$$ Taking weak$^*$-limits in the above equality we get from the weak$^*$-continuity of $T\equiv T^{**}$ that $$0 = \{T (1-p_n), b_n, 1\} + \{ 1-p_n, T(b_n),1\}, \hbox{ for all } n,$$ which implies, via norm continuity, that $$0 = \{T (1-r(b)), b, 1\} + \{ 1-r(b), T(b),1\}= T (1-r(b)) \circ b + (1-r(b)) \circ T(b)^*,$$ or equivalently $$ T (r(b)) \circ b = (1-r(b)) \circ T(b)^*. $$ Since the range projection of every power $b^m$ with $m\in \mathbb{N}$ coincides with the $r(b)$ we can apply the above argument to deduce that $$T (r(b)) \circ b^m = (1-r(b)) \circ T(b^m)^*, \hbox{ for all natural } m,$$ and by linearity and norm continuity of the product we have \begin{equation}\label{eq 2 Jun2} T (r(b)) \circ z = (1-r(b)) \circ T(z)^*, \hbox{ for all } z = z^*\in A_b. \end{equation} A standard argument involving weak$^*$-continuity of $T^{**}\equiv T$ gives \begin{equation}\label{eq 1 June2} T (r(b)) \circ r(b) = (1-r(b)) \circ T(r(b))^*. \end{equation} Combining that $T^{**} \equiv T$ is a derivation with \eqref{eq 1 June2} we get $$T(r(b)) = T (r(b)) r(b) + r(b) T (r(b)) = (1-r(b)) T(r(b))^* + T(r(b))^* (1-r(b)). $$ By \cite[Proposition 3.7]{EssaPeRa16b}, we know that $r(b) T(r(b)) r(b)=0 = r(b) T(r(b))^* r(b)$, and by the equivalence $(f)\Leftrightarrow (a)$ in page \pageref{eq reformulations of g der} we have $$(1-r(b)) T(r(b))^* (1-r(b)) =0= (1-r(b)) T(r(b)) (1-r(b)),$$ and thus $$r(b) T(r(b)) = r(b) T(r(b))^* (1-r(b))$$ and $$(1-r(b)) T(r(b)) = (1-r(b)) T(r(b))^* .$$ Adding the last two identities we derive at $$T(r(b))^*= T(r(b)).$$ We have proved that $T(r)^* =T(r)$ for every range projection $r$ of a hermitian element in $A$ We return to $A_b \cong C_0(\sigma(b))$. We observe that every projection of the form $p=\chi_{_{([-\|b\|, -\delta)\cup (\delta,\|b\|])\cap \sigma(b)}}\in C_0(\sigma(b))^{**}$, with $0<\delta<\|b\|$, is the range projection of an function in $C_0(\sigma(b))$. Furthermore, every projection of the form $q=\chi_{_{([-\theta, -\delta)\cup (\delta,\theta])\cap \sigma(b)}}\in C_0(\sigma(b))^{**}$ with $0<\delta<\theta<\|b\|$ can be written as the difference of two projections of the previous type. We have shown in the previous paragraph that $T(p)^*=T(p)$ for every projection $p$ of the first type, and consequently for every projection of the second type. Since $b$ can be approximated in norm by finite linear combinations of mutually orthogonal projections $q_j$ of the second type, and $T$ is continuous, we conclude that $T(b)^* = T(b)$, which finishes the proof. \end{proof} The conclusion after Corollary \ref{c bl triple derivable at zero with T1=0} is now clear. \begin{corollary}\label{c T1 skew} Let $T: A\to A$ be a bounded linear map on a unital C$^*$-algebra $A$. Suppose $T$ is triple derivable at zero and $T(1)^*=-T(1)$. Then $T$ is a triple derivation. \end{corollary} \begin{proof} As in the proof of Corollary \ref{c derivable at 1 are triple derivations}. Since $T(1)^* =-T(1),$ the mapping $\delta(T(1),1):A \to A,$ $\delta(T(1),1) (x) = \{T(1),1,x\}- \{1,T(1),x\}$ is a triple derivation. Since the the linear combination of linear maps which are triple derivations at zero is a triple derivation at zero, the mapping $\tilde{T}=T-\frac{1}{2}\delta(T(1),1)$ is a triple derivation at zero, and $\tilde{T} (1) = 0$. Applying Corollary \ref{c bl triple derivable at zero with T1=0}, we derive that $\tilde{T}$ is a $^*$-derivation, and hence a triple derivation. Therefore $T= \tilde{T} +\frac{1}{2}\delta(T(1),1)$ is a triple derivation. \end{proof} The above results, are somehow optimal, in the sense that there exists bounded linear maps which are triple derivations at zero but they are not triple derivations. For example, let $Z$ be the center of a unital C$^*$-algebra $B$, and let us pick $b_0\in B$ with $0\neq b_0 \neq - b_0^*$. We define a bounded linear mapping $T: Z\to B$ by $T(x) = b_0 x$. Clearly, $T(1) = b_0\neq - b_0^*$ implies that $T$ is not a triple derivation (cf. Lemma \ref{lemma33}\cite[proof of Lemma 1]{HoMarPeRu}, \cite[Lemma 3.4]{HoPeRu} or \cite[Lemma 2.1]{KuOikPeRu14}). Suppose $\{ a,b,c \} =0$ in $Z$. Since $Z$ is the center of $B$, $b_0$ commutes with every element in $B$, and then we have $$\{T(a),b,c\} +\{a,T(b),c\} + \{a,b,T(c)\}= \{b_0 a,b,c\} +\{a,b_0 b,c\} + \{a,b,b_0 c\}$$ $$ =2 b_0 \{a,b,c\} + b_0^* \{a,b,c\} =0= T(0) , $$ witnessing that $T$ is a triple derivation at zero. With the help of \cite[Theorem 2.8 and Proposition 2.4]{EssaPeRa16} we can now throw some new light about the automatic continuity of generalized derivations and linear maps which are derivable at zero on a von Neumann algebra. \begin{theorem}\label{t automatic cont gen der and triple der at zero on von Neumann} Let $T: M\to M$ be a linear mapping on a von Neumann algebra. Suppose that for each $a,b,c$ in a commutative von Neumann subalgebra $\mathcal{B}$ with $a b = bc=0$ we have $a T(b) c= 0$. Then $T$ is continuous. \end{theorem} \begin{proof} We can mimic the ideas in the proof of \cite[Theorem 2.8]{EssaPeRa16}. Assume that $T$ satisfies hypothesis $(a)$ (respectively $(b)$). Let $\mathcal{B}$ be a commutative von Neumann subalgebra of $M$ containing the unit of $M$. Fix $a,b\in \mathcal{B}$ with $a b=0$, and define a linear mapping $L_{a,b} : \mathcal{B}\to M$, by $L_{a,b} (x) = aT(bx)$. Let $c,d$ be elements in $\mathcal{B}$ with $cd=0$, then, by hypothesis $L_{a,b} (c) d = aT(bc)d =0$ (because $a(bc) d=0$). This proves that $L_{a,b}$ is a linear left-annihilator preserving. Proposition 2.4 in \cite{EssaPeRa16} assures that $L_{a,b}$ is continuous and a left-multiplier, that is, $L_{a,b} (x) = L_{a,b} (1) x,$ for every $x\in \mathcal{B}$. This property assures that, for each $x\in \mathcal{B}$ the mapping $R_x : \mathcal{B}\to M$, $R_{x} (z) = T(xz)-T(z)x$ satisfies $a R_{x} (b) = 0,$ for every $ab=0$ in $\mathcal{B}$. Consequently, $R_{x}$ is a linear right-annihilator preserving, and Proposition 2.4 in \cite{EssaPeRa16} proves that $R_{x}$ is a continuous right multiplier. We have shown that $$T(y x) -T(y) x = R_x (y) = y R_x (1) = y T(x),$$ for every $x,y\in \mathcal{B}$, or equivalently, $T|_{\mathcal{B}}: \mathcal{B}\to M$ is a derivation. Theorem 2 in \cite{Ringrose72} assures that $T|_{\mathcal{B}}$ is a bounded linear map, and this holds for every abelian von Neumann subalgebra $\mathcal{B}$ of $M$ containing the unit of $M$. The continuity of $T$ follows as a consequence of \cite[Theorem 2.5]{Ringrose74}. \end{proof} Every generalized derivation on a von Neumann algebras satisfies the hypothesis of the above Theorem \ref{t automatic cont gen der and triple der at zero on von Neumann}. Surprisingly, the linear maps on a von Neumann algebra which are triple derivable at zero also satisfy the same hypothesis. \begin{corollary}\label{c automatic cont gen der and triple der at zero on von Neumann} Every generalized derivation on a von Neumann algebra is continuous. Every linear map on a von Neumann algebra which is a derivation {\rm(}respectively, a triple derivation{\rm)} at zero is continuous. \end{corollary} \begin{proof} The first statement is clear. The statement concerning (associative) derivations at zero is a consequence of Lemma \ref{lemma1.2 associative der}. In order to prove the remaining statement, we assume that $T:M\to M$ is a linear map on a von Neumann algebra which is triple derivable at zero. Let $\mathcal{B}$ be a commutative von Neumann subalgebra of $M$ containing the unit, and let us take $a,b,c\in \mathcal{B}$ with $a b= bc=0$. By the commutativity of $\mathcal{B}$ we have $ b\perp a, a^*$ and $b\perp c,c^*$. By Lemma \ref{lemma1.2} we have $0=\{ a^*, T(b), c^*\} = \frac12(a^* T(b)^* c^* + c^* T(b)^* a^* ),$ and then $a T(b) c + c T(b) a=0$. Since $\mathcal{B}$ is a commutative von Neumann algebra, the every element in $\mathcal{B}$ is normal and its left and right range projections coincide and belong to $\mathcal{B}$. Let $r(b)\in \mathcal{B}$ be the range projection of $b$. Since $0=b (1-r(b)) = (1-r(b))b$, we deduce from what is proved in the first paragraph that $(1-r(b)) T(b) (1-r(b)) =0$. Finally, since $a = a(1-r(b)) $ and $c= c(1-r(b))$, we have $a T(b) c = a (1-r(b)) T(b) (1-r(b)) c=0$. This shows that $T$ satisfies the hypothesis in Theorem \ref{t automatic cont gen der and triple der at zero on von Neumann}, and thus $T$ is continuous. \end{proof} Combining Corollary \ref{c automatic cont gen der and triple der at zero on von Neumann} with Theorem \ref{t continuous triple der at zero are g der} and Corollaries \ref{c bl triple derivable at zero with T1=0} and \ref{c T1 skew} we establish the following. \begin{corollary}\label{c final for vN algebras} Let $M$ be a von Neumann algebra. Suppose $T: M\to M$ is a linear map which is a triple derivation at zero. Then the following statements hold: \begin{enumerate}[$(a)$]\item $T$ is a continuous generalized derivation; \item If $T(1)=0$ then $T$ is a {\rm(}continuous{\rm)} $^*$-derivation and a triple derivation; \item If $T(1)^*=-T(1)$ then $T$ is a {\rm(}continuous{\rm)} triple derivation. $ \Box$ \end{enumerate} \end{corollary} We can also apply the above results to relax some of the hypothesis in previous papers. We begin with a version of the results reviewed in page \pageref{eq reformulations of g der} for non-necessarily continuous linear maps. \begin{corollary}\label{c non continuous generalized derivation} Let $T: M\to M$ be a linear map on a von Neumann algebra. The the following statements are equivalent: \begin{enumerate}[$(a)$] \item $T$ is a generalized derivation; \item $a T(b) c = 0$, whenever $ab=bc=0$ in $M$; \item $a T(b) c = 0$, whenever $ab=bc=0$ in $M_{sa}$; \item[$(d)$] $a T(b) c + c T(b) a = 0$, whenever $ab=bc=0$ in $M_{sa}$; \item[$(e)$] $a T(b) a = 0$, whenever $ab=0$ in $M_{sa}$. \item[$(f)$] For each $b$ in $M_{sa}$ we have $(1-r(b)) T(b) (1-r(b))=0$, where $r(b)$ denotes the range projection of $b$ in $M$. \end{enumerate} \end{corollary} \begin{proof} The implication $(a)\Rightarrow (b)$ follows from Lemma \ref{lemma1.2 associative der}, while the implications $(b)\Rightarrow (c)\Rightarrow (d) \Rightarrow (e)$ are clear. If we show that any linear mapping $T:M\to M$ satisfying $(e)$ or $(f)$ is continuous then the remaining implications will follow from the arguments given in page \pageref{eq reformulations of g der}. If $T$ satisfies $(e)$ then $T$ also satisfies $(f)$ because $r(b)\in M_{sa}$ for every $b\in M_{sa}$ and $(1-r(b)) b=0$. Let $\mathcal{B}$ be a commutative von Neumann subalgebra of $M$, and let us take $x,y,z\in \mathcal{B}$ with $x y = y z=0$. We can write $x = x_1 + i x_2,$ $y = y_1 + i y_2$, and $z = z_1 + i z_2$ with $x_j,y_j, z_j\in M_{sa}$. Suppose $T$ satisfies $(e)$. Since $\mathcal{B}$ is commutative $x_j y_k = y_k x_j=0$ and $z_j y_k = y_k z_j=0$ for all $j,k=1,2$. Clearly $x_j = x_j (1-r(y_k))$ and $y_j = y_j (1-r(y_k))$, for all $j,k=1,2$, which assures that $x = x (1-r(y_k))$ and $y = y (1-r(y_k))$ for all $k=1,2$. Therefore, by assumptions, we obtain $$ x T(y) z= x T(y_1) z + i x T(y_2) z = x (1-r(y_1)) T(y_1) (1-r(y_1)) z $$ $$+ i x (1-r(y_2)) T(y_2) (1-r(y_2)) z =0,$$ which finishes the proof. \end{proof} In \cite[Theorem 4]{JingLuLi2002} W. Jing, S.J. Lu, and P.T. Li prove that a continuous linear map $\delta$ on a von Neumann algebra is a generalized derivation whenever it is a derivation at zero. If additionally $\delta (1)=0$, then $\delta$ is a derivation. Corollary \ref{c automatic cont gen der and triple der at zero on von Neumann} assures that the hypothesis concerning the continuity of $\delta$ can be relaxed. \begin{corollary}\label{c JingLuLi without continuity} Let $\delta$ be a linear map on a von Neumann algebra. Suppose $\delta$ is a derivation at zero. Then $\delta$ is a (continuous) generalized derivation. If additionally $\delta (1)=0$, then $\delta$ is a derivation.$ \Box$ \end{corollary} \section{Triple homomorphisms at a fixed point}\label{sec: triple hom at a point} Let $A$ and $B$ be C$^*$-algebras. According to the notation in \cite{BurCabSanPe2016}, a linear map $T : A \to B$ is said to be a \emph{$^*$-homomorphism at $z\in A$} if $$\hbox{$a b^* =z\Rightarrow T(a) T(b)^*=T(z),$ and $c^* d =z\Rightarrow T(c)^* T(d)=T(z)$.}$$ In \cite[Theorem 2.5]{BurCabSanPe2016} it is shown that when $A$ is unital, a linear map $T : A \to B$ which is a $^*$-homomorphism at $1$ is continuous and a Jordan $^*$-homomorphism. The same conclusion hold if there exists a non-zero projection $p\in A$ such that $T$ is a $^*$-homomorphism at $p$ and at $1-p$ \cite[Corollary 2.12]{BurCabSanPe2016}. Furthermore, in the above setting, $T$ is a $^*$-homomorphism if and only if $T$ is a $^*$-homomorphism at $0$ and at $1$ \cite[Corollary 2.11]{BurCabSanPe2016}. If $A$ is assumed to be simple and infinite, then a linear map $T: A\to B$ is a $^*$-homomorphism if and only if $T$ is a $^*$-homomorphism at the unit of $A$ (cf. \cite[Theorem 2.8]{BurCabSanPe2016}). In the just quoted paper, it also studied when a continuous linear map which is a $^*$-homomorphism at a unitary element is a Jordan $^*$-homomorphism. We recall some terminology needed in forthcoming results. For each partial isometry $e$ in a C$^*$-algebra $A$ (i.e., $ee^*e =e$), we can decompose $A$ as a direct sum of the form $$A= (ee^* A e^*e) \oplus \left((1-ee^*) A e^*e \oplus ee^* A (1-e^*e)\right)\oplus (1-ee^*) A (1-e^*e).$$ The above decomposition is called the \emph{Peirce decomposition} of $A$ associated with $e$. The subsets $A_2(e) = ee^* A e^*e$, $A_1(e) = (1-ee^*) A e^*e \oplus ee^* A (1-e^*e),$ and $A_0(e) = (1-ee^*) A (1-e^*e)$ are called the \emph{Peirce subspaces} associated with $e$. \subsection{Triple homomorphisms at the unit element}\ We explore first the behavior on the projections of a linear map which is a triple homomorphism at the unit. \begin{lemma}\label{lemma31} Let $T:A\to B$ be a linear map between C$^*$-algebras, where $A$ is unital. Suppose $T$ is a triple homomorphism at the unit of $A.$ Then the following statements hold:\begin{enumerate}[$(a)$] \item $T(1)$ is a partial isometry; \item The identity $\{T(p),T(1),T(p)\}=\{T(p),T(1),T(1)\}$ holds for every projection $p\in A$. \end{enumerate} \end{lemma} \begin{proof} $(a)$ The identity $\{1,1,1\}=1$ and the hypothesis imply $$T(1)=T(\{1,1,1\})=\{T(1),T(1),T(1)\}=T(1)T(1)^*T(1).$$ $(b)$ Let $p\in A$ be a projection. We know that $\{(1-2p),1,(1-2p)\}=1$. Thus $$T(1)=T(\{(1-2p),1,(1-2p)\})=\{T(1-2p),T(1),T(1-2p)\}$$ $$= \{T(1),T(1),T(1)\} -4 \{T(p),T(1),T(1)\} + 4 \{T(p),T(1),T(p)\},$$ which combined with $(a)$ gives the desired statement. \end{proof} For the next result we shall assume continuity of our linear map. \begin{proposition}\label{p symmetry} Let $T:A\to B$ be a continuous linear map between C$^*$-algebras, where $A$ is unital. Suppose $T$ is a triple homomorphism at the unit of $A.$ Then the identity $$\{T(x^*),T(1),T(1)\}=\{T(1),T(x),T(1)\},$$ holds for all $x\in A$. Consequently, $T(1)$ is a partial isometry and $T(A)\subseteq B_2 (T(1))\oplus B_0(T(1))$. \end{proposition} \begin{proof} Let us take $a\in A_{sa}$ and $t\in {\mathbb{R}}.$ Since $e^{ita}$ is a unitary in $A$, we have $1=\{e^{iat},e^{iat},1\},$ and by the assumptions we get $T(1)=\{T(e^{iat}),T(e^{iat}),1)\}.$ By taking derivative in $t=0$, we obtain $$0=\{T(a),T(1),T(1)\}-\{T(1),T(a),T(1)\}.$$ Since for $x\in A,$ we can write $x=a+ib$ with $a,\; b\in A_{sa},$ it follows from the above that $$\{T(x^*),T(1),T(1)\}=\{T(a),T(1),T(1)\}-i\{T(b),T(1),T(1)\}$$ $$=\{T(1),T(a),T(1)\}-i\{T(1),T(b),T(1)\}=\{T(1),T(x),T(1)\}.$$ It follows from Lemma \ref{lemma31} that $e=T(1)$ is a partial isometry. For the final statement we observe that for each $a\in A_{sa}$ we have $$ee^* T(a) e^*e + \frac12 ( (1-ee^*) T(a) e^*e \oplus ee^* T(a) (1-e^*e) ) = \{T(a),T(1),T(1)\}$$ $$=\{T(1),T(a),T(1)\} = e T(a)^* e = ee^* (e T(a)^* e) e^* e, $$ which shows that $(1-ee^*) T(a) e^*e \oplus ee^* T(a) (1-e^*e) = 0,$ and hence $$T(a) = ee^* T(a) e^*e + (1-ee^*) T(a) (1-e^*e)\in B_2(e) \oplus B_0(e).$$ \end{proof} For the next result we explore new arguments with higher derivatives. \begin{proposition}\label{p zero annihilating the zero part} Let $T:A\to B$ be a continuous linear map between C$^*$-algebras, where $A$ is unital. Suppose $T$ is a triple homomorphism at the unit of $A.$ Then $T(1)$ is a partial isometry and $T(A)\subseteq B_2 (T(1))$. \end{proposition} \begin{proof} As in previous cases, we fix $a\in A_{sa}$. Since $1=\{e^{iat},e^{2 iat},e^{iat}\},$ for all $t\in {\mathbb{R}},$ by hypothesis we get $T(1)=\{T(e^{iat}),T(e^{2iat}),T(e^{iat})\}.$ By taking a first derivative in $t$, we obtain $$0=2 \{T( a e^{iat}),T(e^{2iat}),T(e^{iat})\} -2 \{T(e^{iat}),T(a e^{2iat}),T(e^{iat})\},$$ for all $t\in \mathbb{R}$. By taking subsequent derivatives at $t$ we get \begin{equation}\label{eq second derivative} 0=2 \{T( a^2 e^{iat}),T(e^{2iat}),T(e^{iat})\} - 8 \{T( a e^{iat}),T(a e^{2iat}),T(e^{iat})\} \end{equation} $$ + 2 \{T( a e^{iat}),T(e^{2iat}),T(a e^{iat})\}+4 \{T(e^{iat}),T(a^2 e^{2iat}),T(e^{iat})\}, $$ and $$0=2 \{T( a^3 e^{iat}),T(e^{2iat}),T(e^{iat})\} - 4 \{T( a^2 e^{iat}),T(a e^{2iat}),T(e^{iat})\}$$ $$+ 2 \{T( a^2 e^{iat}),T(e^{2iat}),T(a e^{iat})\} - 8 \{T( a^2 e^{iat}),T(a e^{2iat}),T(e^{iat})\}$$ $$ +16 \{T( a e^{iat}),T(a^2 e^{2iat}),T(e^{iat})\} - 8 \{T( a e^{iat}),T(a e^{2iat}),T(a e^{iat})\}$$ $$+ 2 \{T( a^2 e^{iat}),T(e^{2iat}),T(a e^{iat})\}-4 \{T( a e^{iat}),T(a e^{2iat}),T(a e^{iat})\}$$ $$+ 2 \{T( a e^{iat}),T(e^{2iat}),T(a^2 e^{iat})\} +4 \{T(a e^{iat}),T(a^2 e^{2iat}),T(e^{iat})\}$$ $$-8 \{T(e^{iat}),T(a^3 e^{2iat}),T(e^{iat})\} +4 \{T(e^{iat}),T(a^2 e^{2iat}),T(a e^{iat})\}.$$ By replacing $t$ with $0$ we get \begin{equation}\label{eq TaTaTa} 0=2 \{T( a^3),T(1),T(1)\} - 12 \{T( a^2 ),T(a ),T(1)\}+ 6 \{T( a^2 ),T(1),T(a )\} \end{equation} $$-8 \{T(1),T(a^3 ),T(1)\} +24 \{T( a),T(a^2 ),T(1)\} -12 \{T( a),T(a),T(a )\}.$$ Now, by Proposition \ref{p symmetry}, we write $T(a) = x_2 + x_0$, $T(a^2) =y_2+y_0$ and $T(a^3) = z_2 +z_0$, where $x_j,y_j,z_j\in B_j(T(1))$ for all $j=2,0$. It is not hard to check that $\{T( a^3),T(1),T(1)\},$ $\{T( a^2 ),T(a ),T(1)\},$ $\{T( a^2 ),T(1),T(a )\},$ $\{T(1),T(a^3 ),T(1)\}$, and $\{T( a),T(a^2 ),T(1)\}$ all lie in $B_2(T(1)),$ while\linebreak $\{T( a),T(a),T(a )\} = \{x_2,x_2,x_2\}+ \{x_0,x_0,x_0\}$ with $\{x_2,x_2,x_2\}\in B_2(T(1))$ and $\{x_0,x_0,x_0\}\in B_0(T(1))$. It follows from \eqref{eq TaTaTa} that $\{x_0,x_0,x_0\}= x_0x_0^* x_0=0$ and hence $x_0 x_0^*x_0 x_0^*=x_0=0$. We have therefore shown that $T(a)\in B_2(T(1))$ for all $a\in A_{sa}$. The desired conclusion follows from the linearity of $T$. \end{proof} We can now establish our main result for bounded linear maps which are triple homomorphisms at the unit element. We recall that given a partial isometry $e$ in a C$^*$-algebra $A$, the Peirce subspace $A_2(e) = ee^* A e^*e$ is a JB$^*$-algebra with Jordan product $x\circ_e y := \{ x, e, y\}= \frac12 (x e^* y + y e^* x),$ and involution $x^{\sharp_e} =\{e,x,e\} = e x^*e$. \begin{theorem}\label{theorem3131} Let $T:A\to B$ be a continuous linear map between C$^*$-algebras, where $A$ is unital. Suppose $T$ is a triple homomorphism at the unit of $A.$ Then $T$ is a triple homomorphism. Furthermore, $T(1)$ is a partial isometry and $T: A \to B_2 (T(1))$ is a Jordan $^*$-homomorphism. \end{theorem} \begin{proof} By Lemma \ref{lemma31} the element $T(1)$ is a partial isometry. Proposition \ref{p zero annihilating the zero part} proves that $T(A)\subseteq B_2 (T(1))$, and consequently, Proposition \ref{p symmetry} guarantees that $T(x^*) =\{T(x^*),T(1),T(1)\}= \{T(1), T(x), T(1)\} = T(x)^{\sharp_{T(1)}}$ for every $x\in A$. It is not hard to see from these properties that $T(a^2) =\{T( a^2 ),T(1),T(1)\} =\{T( a ),T(a ),T(1)\}=\{T( a ),T(1),T(a)\},$ for every $a\in A_{sa}$. The proof will be completed if we show that $T: A \to B_2 (T(1))$ is a Jordan $^*$-homomorphism. We shall only prove that $T$ preserves the corresponding Jordan product. Following the arguments in the proof of Proposition \ref{p zero annihilating the zero part}, and replacing $t$ with $0$ in \eqref{eq second derivative} we arrive at $$ 0=2 \{T( a^2 ),T(1),T(1)\} - 8 \{T( a ),T(a ),T(1)\} $$ $$+ 2 \{T( a ),T(1),T(a)\}+4 \{T(1),T(a^2),T(1)\}, $$ and then $$ T(a^2) = \{T( a^2 ),T(1),T(1)\} = \{T( a ),T(a ),T(1)\} $$ $$= \{T( a ),T(1),T(a )\} = T(a)\circ_{T(1)} T(a),$$ for all $a\in A_{sa}$. A standard polarization argument proves that $T$ is a Jordan $^*$-homomorphism.\end{proof} \begin{problem} Let $T:A\to B$ be a linear map between C$^*$-algebras, where $A$ is unital. Suppose $T$ is a triple homomorphism at the unit of $A.$ Is $T$ continuous? \end{problem} Accordingly to the structure of this note, the reader is probably interested on bounded linear maps which are triple homomorphisms at zero. It is not a big surprise that these maps are directly connected with the so-called \emph{orthogonality preserving operators} in the sense studied, for example, in \cite{Wolff94,BurFerGarMarPe08}, and subsequent papers. We recall that a linear map $T$ between C$^*$-algebras is called orthogonality preserving if the equivalence $$a\perp b \hbox{ in } A \Rightarrow T(a)\perp T(b) \hbox{ in } B.$$ It is known that elements $a,b$ in a C$^*$-algebra $A$ are orthogonal if, and only if, $\{a,a,b\}=0$ (see, for example, \cite[Lemma 1 and comments in page 221]{BurFerGarMarPe08}). The main result in \cite{BurFerGarMarPe08} establishes a complete description of those continuous linear maps between C$^*$-algebras which preserver orthogonal elements. Let $T :A \to B$ be a bounded linear map between two C$^*$-algebras, Corollary 18 in \cite{BurFerGarMarPe08} proves that $T$ is orthogonality preserving if, and only if, $T$ preserves zero-triple-products (i.e. $\{a,b,c\}=0$ in $A$ implies $\{T(a),T(b),T(c)\}=0$ in $B$), and the latter is precisely the notion of being a triple homomorphism at zero. \end{document}
\begin{document} \author{L. Bayón} \address{Departamento de Matemáticas, Universidad de Oviedo\\ Avda. Calvo Sotelo s/n, 33007 Oviedo, Spain} \author{P. Fortuny Ayuso} \address{Departamento de Matemáticas, Universidad de Oviedo\\ Avda. Calvo Sotelo s/n, 33007 Oviedo, Spain} \title{The Best-or-Worst and the Postdoc problems} \author{J.M. Grau} \address{Departamento de Matemáticas, Universidad de Oviedo\\ Avda. Calvo Sotelo s/n, 33007 Oviedo, Spain} \author{A. M. Oller-Marcén} \address{Centro Universitario de la Defensa de Zaragoza\\ Ctra. Huesca s/n, 50090 Zaragoza, Spain} \author{M.M. Ruiz} \address{Departamento de Matemáticas, Universidad de Oviedo\\ Avda. Calvo Sotelo s/n, 33007 Oviedo, Spain} \begin{abstract} We consider two variants of the secretary problem, the\emph{ Best-or-Worst} and the \emph{Postdoc} problems, which are closely related. First, we prove that both variants, in their standard form with binary payoff 1 or 0, share the same optimal stopping rule. We also consider additional cost/perquisites depending on the number of interviewed candidates. In these situations the optimal strategies are very different. Finally, we also focus on the Best-or-Worst variant with different payments depending on whether the selected candidate is the best or the worst. \end{abstract} \maketitle \keywords{Keywords: Secretary problem, Combinatorial Optimization} \subjclassname{60G40, 62L15} \section{Introduction} The \emph{secretary problem} is one of many names for a famous problem of optimal stopping theory. This problem can be stated as follows: an employer is willing to hire the best secretary out of $n$ rankable candidates. These candidates are interviewed one by one in random order. A decision about each particular candidate is to be made immediately after the interview. Once rejected, a candidate cannot be called back. During the interview, the employer can rank the candidate among all the preceding ones, but he is unaware of the quality of yet unseen candidates. The goal is then to determine the optimal strategy that maximizes the probability of selecting the best candidate. This problem has a very elegant solution. Dynkin \cite{48} and Lindley \cite{101} independently proved that the best strategy consists in a so-called threshold strategy. Namely, in rejecting roughly the first $n/e$ (cutoff value) interviewed candidates and then choosing the first one that is better than all the preceding ones. Following this strategy, the probability of selecting the best candidate is at least $1/e$, this being its approximate value for large values of $n$. This well-known solution was later refined by Gilbert and Mosteller \cite{gil} showing that $\left\lfloor (n-\frac{1}{2})e^{-1}+\frac{1} {2}\right\rfloor$ is a better approximation than $\lfloor n/e\rfloor$, although the difference is never greater than 1. This secretary problem has been addressed by many authors in different fields such as applied probability, statistics or decision theory. In \cite{FER}, \cite{FER2} or \cite{2009} extensive bibliographies on the topic can be found. On the other hand, different generalizations of this classical problem have been recently considered in the framework of partially ordered objects \cite{poset2,garrod,poset1} or matroids \cite{1,soto}. It is also worth mentioning the work of Bearden \cite{KK}, where the author considers a situation where the employer receives a payoff for selecting a candidate equal to the ``score'' of the candidate (in the classical problem the payoff is 1 if the candidate is really the best and 0 otherwise). In this situation, the optimal cutoff value is roughly the square root of the number of candidates. In this paper we focus on two closely related variants of the secretary problem. The so-called \emph{Best-or-Worst} and \emph{Postdoc} variants. In the Best-or-Worst variant, the classic secretary problem is modified so that the goal is to select either the best or the worst candidate, indifferent between the two cases. This variant can only be found on \cite{fergu} as a multicriteria problem in the perfect negative dependence case. Here we present it in greater detail. In the Postdoc variant, instead of selecting the best candidate, the goal is to select the second best candidate. This problem was proposed to Robert J. Vanderbei by Eugene Dynkin in 1980 with the following motivating story that explains the name of the problem: we are trying to hire a postdoc, since the best candidate will receive (and accept) an offer from Harvard, we are interested in hiring the second best candidate. Vanderbei himself solved the problem in 1983 using dynamic programming \cite{posdoc}. However, he never published his work because he learned that Rose had already published his own solution using different techniques \cite{rose}. Moreover, Szajowski had already solved the problem of picking the $k$-th better candidate for $2\leq k\leq 5$ \cite{aesima}. In the present paper, for these two variants, we study the standard problem (binary payoff function 1 or 0), showing that both have the same optimal cutoff rule strategy and also the problems considering payoff functions that depend on the number of performed interviews, showing that in this case they have very different optimal strategies. The paper is organized as follows: in Section 2, we present some technical results, in Section 3, we revisit the classic secretary problem and also solve two new situations with payoff functions that depend on the number of performed interviews. In Section 4 we focus on the Best-or-Worst variant, solving the problem for three different payoff functions and also presenting a variant in which the choice of the best or the worst candidate is no longer indifferent. In Section 5 we solve the three versions of the Postdoc variant and, finally, we compare the obtained results in Section 6. \section{Two technical results} The following result can be widely applied in different optimal stopping problems and it will be extensively used throughout the paper. For a sequence of continuous real functions $\{F_{n}\}_{n\in\mathbb{N}}$ defined on a closed interval, it determines the asymptotic behavior of the sequence $\{\mathcal{M}(n)\}_{n\in\mathbb{N}}$, where $\mathcal{M}(n)$ is the value for which the function $F_n$ reaches its maximum. \begin{prop}\label{conv} Let $\{F_{n}\}$ be a sequence of real functions with $F_n\in\mathcal{C}[0,n]$ and let $\mathcal{M}(n)$ be the value for which the function $F_n$ reaches its maximum. Assume that the sequence of functions $\{g_{n}\}_{n\in\mathbb{N}}$ given by $g_{n}(x):=F_{n}(nx)$ converges uniformly on $[0,1]$ to a function $g$ and that $\theta$ is the only global maximum of $g$ in $[0,1]$. Then, \begin{itemize} \item[i)] $\displaystyle\lim_{n} \mathcal{M}(n)/n =\theta$. \item[ii)] $\displaystyle\lim_{n} F_{n}(\mathcal{M}(n))= g(\theta)$. \item[iii)] If $\mathfrak{M}(n)\sim\mathcal{M}(n)$ then $\displaystyle\lim_{n}F_{n}(\mathfrak{M}(n))=g(\theta)$. \end{itemize} \end{prop} \begin{proof} \begin{itemize} \item[i)] Let us consider the sequence $\{\mathcal{M}(n)/n\}\subset [0,1]$ and assume that $\{\mathcal{M}(s_{n})/s_{n}\}$ is a subsequence that converges to $\alpha$. Then, $$g_{s_n}(\theta)=F_{s_n}(s_n\theta)\leq F_{s_{n}}(\mathcal{M}(s_{n}))=F_{s_{n}}\left(\frac{\mathcal{M}(s_{n})}{s_n}s_n\right)=g_{s_n}\left(\frac{\mathcal{M}(s_n)}{s_n}\right).$$ Consequently, since $g_n\to g$ uniformly on $[0,1]$, if we take limits we get $$g(\theta)=\lim_n g_{s_n}(\theta)\leq\lim_n g_{s_n}\left(\frac{\mathcal{M}(s_n)}{s_n}\right)=g(\alpha)$$ and since $\theta$ is the only global maximum of $g$, it follows that $\theta=\alpha$. Thus, we have proved that every convergent subsequence of $\{\mathcal{M}(n)/n\}$ converges to the same limit $\theta$. Since $\{\mathcal{M}(n)/n\}$ is defined on a compact set this implies that $\{\mathcal{M}(n)/n\}$ itself must also converge to $\theta$. \item[ii)] It is enough to observe that $$\lim_{n} F_{n}(\mathcal{M}(n))=\lim_{n}F_{n}\left(\frac{\mathcal{M}(n)}{n} n\right)=\lim_n g_n\left(\frac{\mathcal{M}(n)}{n}\right)=g(\theta),$$ where the last equality holds because $g_n\to g$ uniformly on $[0,1]$. \item[iii)] If $\mathfrak{M}(n)\sim\mathcal{M}(n)$, then it also holds that $\displaystyle \lim_n \frac{\mathfrak{M}(n)}{n}=\theta$ and we can reason as in the previous point. \end{itemize} \end{proof} \begin{rem} The condition of uniform convergence is required to ensure, for instance, that $\displaystyle \lim_n g_{s_n}\left(\frac{\mathcal{M}(s_n)}{s_n}\right)=g(\alpha)$. In fact, it is easy to give counterexamples to Proposition \ref{conv} if convergence is not uniform. \end{rem} Observe that Proposition \ref{conv} implies that that $\displaystyle \lim_n F_{n}(n \theta)=g(\theta)$. Moreover, it also implies that $\displaystyle \lim_n F_{n}(n \theta+o(n))=g(\theta)$. This means that $n\theta$ is a good estimate for $\mathcal{M}(n)$ and that, for large values of $n$, the maximum value of $F_n$ approaches $g(\theta)$. Proposition \ref{conv} admits the following two-variable version that can be proved in the same way. \begin{prop}\label{conv2} Let $\{G_{n}\}$ be a sequence of two variable real functions with $G_{n}\in\mathcal{C}\big(\{(x,y)\in\lbrack0,n]^{2}:x\leq y\}\big)$ and let $(\mathcal{M}_{1}(n),\mathcal{M}_{2}(n))$ be a point for which $G_{n}$ reaches its maximum. Assume that the sequence $\{h_n\}_{n\in\mathbb{N}}$ given by $h_{n}(x,y):=G_{n}(nx,ny)$ converges uniformly on $T:=\{(x,y)\in\mathbb{R}^2:0\leq x\leq y\leq 1\}$ to a function $h$ and that $(\theta_1,\theta_2)$ is the only global maximum of $h$ in $T$. Then, \begin{itemize} \item[i)] $\displaystyle\lim_{n} \mathcal{M}_{i}(n)/n =\theta_{i}$ for $i=1,2$. \item[ii)] $\displaystyle\lim_{n} G_{n}(\mathcal{M}_{1}(n),\mathcal{M}_{2}(n))= h(\theta_{1},\theta_{2}).$ \item[iii)] If $\mathfrak{M}_{i}(n)\sim\mathcal{M}_{i}(n)$ for $i=1,2$, then $\displaystyle\lim_{n} G_{n}( \mathfrak{M}_{1}(n),\mathfrak{M}_{2}(n))=h(\theta_{1},\theta_{2})$. \end{itemize} \end{prop} \section{A new look at the classic secretary problem} In the classical secretary problem, let $n$ be the number of candidates and let us consider a cutoff value $r\in(1,n)$. If $k\in (r,n]$ is an integer, the probability of successfully selecting the best candidate in the $k$-th interview is $\displaystyle P_{n,r}(k)=\frac{r}{n}\frac{1}{k-1}$. Thus, the probability function of succeeding in the classical secretary problem with $n$ candidates using $r$ as cutoff value, is given by $$F_{n}(r):=\sum_{k=r+1}^{n}P_{n,r}(k)=\frac{r}{n}\sum_{k=r+1}^{n}\frac{1}{k-1}.$$ The goal is now to determine the value of $r$ that maximizes this probability (i.e., to determine the optimal cutoff value) and to compute this maximum probability. This can be done using Proposition \ref{conv} in the following way. First, we extend $F_{n}$ to a real variable function by $$F_{n}(r)=\frac{r}{n}(\psi(n)-\psi(r)),$$ where $\psi$ is the so-called digamma function. Then, it can be seen with little effort that the sequence of functions $\{g_n\}$ defined by $g_{n}(x):=F_{n}(nx)$ converges uniformly on $[0,1]$ to the function $g(x):=-x\log(x)$ and the remaining is just some elementary calculus. \begin{rem} In \cite{FER} the following rather lax reasoning showing that $\mathcal{M}(n)/n$ tends to $1/e$ is given. If we let $n$ tend to infinity and write $x$ as the limit of $r/n$, then using $t$ for $j/n$ and $dt$ for $1/n$, the sum becomes a Riemann approximation to an integral $$F_{n}(r) \rightarrow x \int_{x}^{1} \frac{dt}{t}= - x \log(x).$$ Proposition 1 provides a more rigorous approach. \end{rem} We introduce a more general situation. Let $p:\mathbb{R}\to[0,+\infty)$ be a function (payoff function) and assume that a payoff of $p(k)$ is received if the $k$-th candidate is selected. In this setting, the expected payoff is $$E_n(r):=\sum_{k=r+1}^n p(k)P_{n,r}(k)=\frac{r}{n}\sum_{k=r+1}^{n}\frac{p(k)}{k-1}.$$ Note that in the classical situation \begin{equation}\label{pobin} p_B(k)=\begin{cases} 1, & \textrm{if the $k$-th candidate is the seeked candidate};\\ 0, & \textrm{otherwise}.\end{cases} \end{equation} and the expected payoff coincides with the probability of successfully selecting the best candidate. Now, let us modify the classical situation considering that performing each interview has a constant cost of $1/n$. Clearly, in this situation the payoff function is given by \begin{equation}\label{pocost} p_C(k)=\begin{cases} 1-k/n, & \textrm{if the $k$-th candidate is the seeked candidate};\\ 0, & \textrm{otherwise}.\end{cases} \end{equation} and the expected payoff is $$E^{C}_{n}(r):=\frac{r}{n}\sum_{k=r+1}^{n}\frac{1-\frac{k}{n}}{k-1}.$$ The following result provides the optimal cutoff value and the maximum expected payoff in this setting. In what follows, we denote by $W$ the main branch of the so-called Lambert-$W$ function, defined by $z=W(ze^z)$. \begin{prop} Given an integer $n>1$, let us consider the function $$E^{C}_{n}(r):=\frac{r}{n}\sum_{k=r+1}^{n}\frac{1-\frac{k}{n}}{k-1}$$ defined for every integer $1\leq r\leq n-1$ and let $\mathcal{M}(n)$ be the value for which the function $E^{C}_{n}$ reaches its maximum. Then, \begin{itemize} \item[i)] $\displaystyle\lim_{n} {\mathcal{M}(n)}/{n}=\rho:= -\frac{1}{2}W(-2 e^{-2}) =0.20318\dots$. \item[ii)] $\displaystyle \lim_{n}E^{C}_{n}(\mathcal{M}(n))=\displaystyle \lim_{n}E^{C}_{n}(\lfloor \rho n\rfloor)=\rho(1-\rho)= 0.16190\dots$. \end{itemize} \end{prop} \begin{proof} First, we extend $E^{C}_{n}$ to a real variable function by $$E^C_{n}(r)=\frac{r\,\left( -n+r+\left( -1+n\right) \,\psi(n)-\left(-1+n\right) \,\psi(r)\right) }{n^{2}}.$$ Now, it can be seen that $g_{n}(x):=E^{C}_{n}(nx)$ converges uniformly in $[0,1]$ to $g(x):=x\left(-1+x-\log(x)\right)$. To conclude the proof it is enough to apply Proposition \ref{conv} together with some straightforward computations. \end{proof} This result means that the optimal strategy in this setting consists in rejecting roughly the first $\rho n$ interviewed candidates and then accepting the first candidate which is better than all the preceding ones. Following this strategy, the maximum expected payoff is asymptotically equal to $\rho^{2}-\rho$. \begin{rem} The constant $\rho=-\frac{1}{2}W(-2e^{-2})=0.20318786\dots$ (A106533 in OEIS) appears in \cite{FER2} (erroneously approximated as 0.20388) in the context of the Best-Choice Duration Problem considering a payoff of $(n-k+1)/n$. Furthermore, as a noteworthy curiosity, it should be pointed out that this constant has appeared in a completely different context from the one addressed here (the Daley-Kendall model) and it is known as the \emph{rumour's constant} \cite{RUMOR,ru}. \end{rem} Now, let us consider that performing each interview has an perquisite of $1/n$. Clearly, in this situation the payoff function is given by \begin{equation}\label{popay} p_P(k)=\begin{cases} 1+k/n, & \textrm{if the $k$-th candidate is the seeked candidate};\\ 0, & \textrm{otherwise}.\end{cases} \end{equation} and the expected payoff is $$E^{P}_{n}(r):=\frac{r}{n}\sum_{k=r+1}^{n}\frac{1+\frac{k}{n}}{k-1}.$$ The following result provides the optimal cutoff value and the maximum expected payoff in this setting. \begin{prop} Given an integer $n>1$, let us consider the function $$E^{P}_{n}(r):=\frac{r}{n}\sum_{k=r+1}^{n}\frac{1+\frac{k}{n}}{k-1}$$ defined for every integer $1\leq r\leq n-1$ and let $\mathcal{M}(n)$ be the value for which the function $E^{P}_{n} $ reaches its maximum. Then, \begin{itemize} \item[i)] $\displaystyle\lim_{n} {\mathcal{M}(n)}/{n}=\mu:= \frac{1}{2}W( 2 ) =0.42630\dots$. \item[ii)] $\displaystyle \lim_{n}E^{P}_{n}(\mathcal{M}(n))=\displaystyle \lim_{n}E^{P}_{n}(\lfloor\mu n \rfloor)=\mu(1+\mu)= 0.608037\dots$. \end{itemize} \end{prop} \begin{proof} First, we extend $E^{P}_{n}$ to a real variable function by $$E^{P}_{n}(r)=\frac{r\,\left( n - r + \left( 1 + n \right) \,\psi(n) - \left( 1+ n \right) \,\psi(r) \right) }{n^{2}}.$$ Now it can be seen that $g_{n}(x):=E^{P}_{n}(nx)$ converges uniformly in $[0,1]$ to $g(x):=-x\left(-1+x+\log(x)\right)$. To conclude the proof it is enough to apply Proposition \ref{conv} together with some straightforward computations. \end{proof} This result means that the optimal strategy in this setting consists in rejecting roughly the first $\mu n$ interviewed candidates and then accepting the first candidate which is better than all the preceding ones. Following this strategy, the maximum expected payoff is asymptotically equal to $\mu^{2}+\mu$. \section{The Best-or-Worst variant} In this section we focus on the Best-or-Worst variant, as described in the introduction, in which the goal is to select either the best or the worst candidate, indifferent between the two cases. First of all we prove that, just like in the classic problem, the optimal strategy is a threshold strategy. \begin{teor}\label{BWS} For the Best-or-Worst variant, if $n$ is the number of objects, there exists $r(n)$ such that the following strategy is optimal: \begin{enumerate} \item Reject the $r(n)$ first interviewed candidates. \item After that, accept the first candidate which is either better or worse than all the preceding ones. \end{enumerate} \end{teor} \begin{proof} For the sake of brevity, a candidate which is either better or worse than all the preceding ones will be called a \emph{nice candidate}. Since the game under consideration is finite, there must exist an optimal strategy (in the sense that it maximizes the probability of success). Hence, we can define $P_{rej}(k)$ as the probability of success following an optimal strategy when rejecting a candidate in the $k$-th interview (regardless of its being a nice candidate or not). We can also define $P_{acc}(k)$ as the probability of success accepting a nice candidate in the $k$-th interview. Any optimal strategy will reject any non-nice candidate since the probability of being a successful choice will be $0$. Probability $P_{acc}(k)$ is $k/n$, which increases with $k$. On the other hand, the function $P_{rej}(k)$ is non-increasing because $$P_{rej}(k)=p\cdot(\max\{P_{acc}(k+1),P_{rej}(k+1)\}+(1-p)P_{rej}(k+1)\geq P_{rej}(k+1).$$ Thus, since $P_{acc}$ is increasing and $P_{rej}$ is non-increasing and given that $P_{{acc}}(n)=1$ and $P_{rej}(n)=0$, there exists a natural number $r(n)$ for which: $$P_{acc}(k)<P_{rej}(k)\ \textrm{if}\ k\leq r(n),$$ $$P_{acc}(k)\geq P_{rej}(k)\ \textrm{if}\ k>r(n).$$ As a consequence of this fact, the following strategy must be optimal: for each $k$-th interview with $k\in\{1,\dots, n\}$ do the following: \begin{itemize} \item Reject the $k$-th candidate if $k\leq r(n)$ or if it is not a nice candidate. \item Accept the $k$-th candidate if $k>r(n)$ and it is a nice candidate. \end{itemize} Note that the optimality of this strategy follows from the fact that, in each interview, we are choosing the action with greatest probability of success. \end{proof} Once that we have determined the optimal strategy, we focus on determining the probability of success in the $k$-th interview. To do so, let $n$ be the number of candidates and let us consider a cutoff value $r\in(1,n)$. If $k\in (r,n]$ is an integer, the probability of successfully selecting the best or the worst candidate in the $k$-th interview is $\displaystyle P^{BW}_{n,r}(k)=\frac{2}{n}\frac{\binom{r}{2}}{\binom{k-1}{2}}$. Thus, the probability function of succeeding in the Best-or-Worst variant with $n$ candidates using $r$ as cutoff value, is given by $$F^{BW}_{n}(r):=\sum_{k=r+1}^{n}P^{BW}_{n,r}(k)=\frac{2r(r-1)}{n}\sum_{k=r+1}^{n}\frac{1}{(k-1)(k-2)}=\frac{2r(n-r)}{n(n-1)},$$ where the last equality follows using telescopic sums. \begin{rem} Note that for $n>r\in\{0,1\}$, it is straightforward to see that the probability of success is $$F^{BW}_{n}(0)=F^{BW}_{n}(1)=\frac{2}{n}.$$ \end{rem} The goal is now to determine the value of $r$ that maximizes the probability $F^{BW}_{n}$ (i.e., to determine the optimal cutoff value) and to compute this maximum probability. We do so in the following result. \begin{teor}\label{BWP} Given a positive integer $n>2$, let us consider the function $$F^{BW}_{n}(r)=\frac{2r(n-r)}{n(n-1)}$$ defined for every integer $2\leq r\leq n-1$ and let $\mathcal{M}(n)$ be the value for which the function $F^{BW}_{n}$ reaches its maximum. Then, \begin{itemize} \item[i)] $\mathcal{M}(n)=\lfloor n/2\rfloor$. \item[ii)] The maximum value of $F^{BW}_{n}$ is: $$F^{BW}_{n}(\mathcal{M}(n))=\frac{\lfloor\frac{1+n}{2}\rfloor }{2\lfloor\frac{1+n}{2}\rfloor-1}= \begin{cases} \frac{n}{2(n-1)}, & \text{if $n$ is even};\\ \frac{n+1}{2n}, & \text{if $n$ is odd}. \end{cases} $$ \end{itemize} \end{teor} \begin{proof} \ \begin{itemize} \item[i)] Since $F^{BW}_{n}(r)=-\frac{2}{n(n-1)}r^{2}+\frac{2}{(n-1)}r$ is the equation of a parabola in the variable $r$, it is clear that $$\mathcal{M}(n)=\min\left\{ r\in[2,n-1]:F^{BW}_{n}(r)\geq F^{BW}_{n}(r+1)\right\}.$$ Now, $$F^{BW}_{n}(r+1)-F^{BW}_{n}(r)=\frac{2}{n(n-1)}(n-2r-1)$$ so it follows that $$F^{BW}_{n}(r+1)-F^{BW}_{n}(r)\leq0\Leftrightarrow(n-2r-1)\leq0\Leftrightarrow r\geq \frac{n-1}{2}.$$ Consequently, $$\mathcal{M}(n)=\min\left\{ r\in[2,n-1]:r\geq\frac{n-1}{2}\right\} =\lfloor n/2\rfloor$$ as claimed. \item[ii)] It is enough to apply the previous result. If $n$ is even, then $n=2N$ and $$F^{BW}_{n}(\mathcal{M}(n))=F^{BW}_{n}(N)=\frac{2N(n-N)}{n(n-1)}=\frac{2N^{2}} {2N(2N-1)}=\frac{N}{2N-1}.$$ Moreover, in this case $$\left\lfloor \frac{1+n}{2}\right\rfloor =\left\lfloor \frac{1+2N} {2}\right\rfloor =N$$ so it follows that $$F^{BW}_{n}(\mathcal{M}(n))=\frac{N}{2N-1}=\frac{\left\lfloor \frac{1+n} {2}\right\rfloor }{2\left\lfloor \frac{1+n}{2}\right\rfloor -1}$$ as claimed. Otherwise, if $n$ is odd, then $n=2N+1$ and $$F^{BW}_{n}(\mathcal{M}(n))=F_{n}^{BW}(N)=\frac{2N(n-N)}{n(n-1)}=\frac{2N(2N+1-N)} {(2N+1)2N}=\frac{N+1}{2N+1}.$$ In this case $$\left\lfloor \frac{1+n}{2}\right\rfloor =\left\lfloor \frac{1+2N+1} {2}\right\rfloor =N+1$$ so we also have that $$F^{BW}_{n}(\mathcal{M}(n))=\frac{N+1}{2N+1}=\frac{\left\lfloor \frac{1+n} {2}\right\rfloor }{2\left\lfloor \frac{1+n}{2}\right\rfloor -1}$$ and the proof is complete. \end{itemize} \end{proof} This result means that, for $n>2$, optimal strategy in this setting consists in rejecting roughly the first $\lfloor\frac{n}{2}\rfloor$ interviewed candidates and then accepting the first candidate which is either better or worse than all the preceding ones. Following this strategy, the maximum probability of success is $\displaystyle\frac{\lfloor\frac{1+n}{2}\rfloor}{2\lfloor\frac{1+n}{2}\rfloor-1}$. In the cases $n\in\{1,2\}$, it is evident that an optimal cutoff value is $r=0$, i.e. to accept the first candidate that we consider The probability of success is 1 in both cases according to the fact that $F^{BW}_1(0)=F^{BW}_2(0)=1$. \begin{rem} Unlike in the classic secretary problem, the probability of success in the Best-or-Worst variant is not strictly increasing in $n$. In fact, we have that $F^{BW}_{2n}(\mathcal{M}(2n))=F^{BW}_{2n-1}(\mathcal{M}(2n-1))$ for every $n$. \end{rem} We are now going to consider the Best-or-Worst variant with the payoff function $p_C$ given in (\ref{pocost}); i.e., we assume that performing each interview has a constant cost of $1/n$. Under this assumption it can be proved that the optimal strategy is the same threshold strategy given in Theorem \ref{BWS}. Moreover, in this setting, the expected payoff with $n$ candidates and cutoff value $r$ is given by $$E_n^{BW,C}(r):=\sum_{k=r+1}^n\left(1-\frac{k}{n}\right)P^{BW}_{n,r}(k)=\frac{2r(r-1)}{n^2}\sum_{k=r+1}^{n}\frac{n-k}{(k-1)(k-2)}.$$ As usual, the goal is to determine the optimal cutoff value that maximizes the expected payoff $E^{BW,C}_{n}$ and to compute this maximum expected payoff. We do so in the following result. \begin{teor} Given an integer $n>1$, let us consider the function $E^{BW,C}_n(r)$ defined above for every integer $1<r<n$ and let $\mathcal{M}(n)$ be the value for which the function $E^{BW,C}_n$ reaches its maximum. Also, let $$\theta:=-\frac{1}{2W_{_{-1}}(-\frac{1}{2\sqrt{e}})}=e^{\frac{1}{2} + W_{-1}(\frac{-1}{2\,\sqrt{e}})}$$ be the solution to the equation $2x\log(x)=x-1$. Then, the following hold: \begin{itemize} \item[i)] $\displaystyle\lim_{n} {\mathcal{M}(n)}/{n}=\theta= 0.284668\dots$. \item[ii)] $\displaystyle \lim_{n}E^{BW,C}_n( \mathcal{M}(n))=\displaystyle \lim_{n}E^{BW,C}_n(\lfloor n \theta \rfloor)=\theta(1-\theta)=0.2036321\dots$ \end{itemize} \end{teor} \begin{proof} First, observe that \begin{align*} E^{BW,C}_n(r)&=\frac{2r(r-1)}{n^{2}}\sum_{k=r+1}^{n}\frac{(n-k)}{(k-1)(k-2)} =\frac{2r(r-1)}{n^{2}}\left[ \frac{n-2}{r-1}-\frac{n-2}{n-1}-\sum_{i=r} ^{n-1}\frac{1}{i}\right]\\ & =2\frac{r}{n}\left( 1-\frac{2}{n}\right) -2\frac{r}{n}\left( \frac {r}{n-1}-\frac{1}{n-1}\right) -2\frac{r}{n}\left( \frac{r}{n}-\frac{1} {n}\right) \sum_{i=r}^{n-1}\frac{1}{i}. \end{align*} Now, we can extend $E^{BW,C}_n$ to a real variable function by $$E^{BW,C}_n(r)=2\frac{r}{n}\left( 1-\frac{2}{n}\right) -2\frac{r}{n}\left( \frac {r}{n-1}-\frac{1}{n-1}\right) -2\frac{r}{n}\left( \frac{r}{n}-\frac{1} {n}\right) (\psi(n)-\psi(r)).$$ Furthermore, it can be seen that the sequence of functions $g_n(x):=E^{BW,C}_n(nx)$ converges uniformly in $[0,1]$ to the function $g(x)=2x\left(1-x+x\log x\right)$. To conclude the proof it is enough to apply Proposition \ref{conv} together with some straightforward computations. \end{proof} \begin{rem} The constant $\theta=-\frac{1}{2W_{-1}(-\frac{1}{2\sqrt{e}})}=0.284668\dots$ also appears related to rumour theory \cite{RUMOR,ru} and to Gabriel's Horn (see A101314 in OEIS). \end{rem} Now, let us consider the Best-or-Worst variant with the payoff function $p_P$ given in (\ref{popay}); i.e., we assume that performing each interview has an additional payoff of $1/n$. Under this assumption, since the payoff increases with the number of interviews, it can be proved that the optimal strategy is again the same threshold strategy given in Theorem \ref{BWS}. Moreover, in this setting, the expected payoff with $n$ candidates and cutoff value $r$ is given by $$E_n^{BW,P}(r):=\sum_{k=r+1}^n\left(1+\frac{k}{n}\right)P^{BW}_{n,r}(k)=\frac{2r(r-1)}{n^2}\sum_{k=r+1}^{n}\frac{n+k}{(k-1)(k-2)}.$$ The optimal cutoff value that maximizes the expected payoff $E^{BW,P}_{n}$ and this maximum expected payoff are determined in following result. \begin{teor} Given an integer $n>1$, let us consider the function $E^{BW,P}_n(r)$ defined above for every integer $1<r<n$ and let $\mathcal{M}(n)$ be the value for which the function $E^{BW,P}_n$ reaches its maximum. Also let $$\vartheta:=\frac{1}{2\,W(\frac{e^{\frac{3}{2}}}{2})}=0.552001\dots$$ be the solution to the equation $1 - 3\,x - 2\,x\,\log(x)=0$. Then, the following hold: \begin{itemize} \item[i)] $\displaystyle\lim_{n} {\mathcal{M}(n)}/{n}=\vartheta$. \item[ii)] $\displaystyle \displaystyle \lim_{n}E^{BW,P}_n(\mathcal{M}(n))=\lim_{n}E^{BW,P}_n(\lfloor n\vartheta \rfloor)=\vartheta(1+\vartheta)=0.8567\dots$ \end{itemize} \end{teor} \begin{proof} First, observe that \begin{align*} E^{BW,P}_n(r) & =\frac{2r(r-1)}{n^{2}}\sum_{k=r+1}^{n}\frac{(n+k)}{(k-1)(k-2)}=\\ & =2\frac{r}{n}\left( 1+\frac{2}{n}\right) -2\frac{r}{n}\frac{r-1} {n}\left( 1+\frac{3}{n-1}\right) -2\frac{r}{n}\frac{r-1}{n}\sum_{i=r} ^{n-1}\frac{1}{i}. \end{align*} Now, we can extend $E^{BW,P}_n$ to a real variable function by $$E^{BW,P}_n(r)=2\frac{r}{n}\left( 1+\frac{2}{n}\right) -2\frac{r}{n}\frac{r-1} {n}\left( 1+\frac{3}{n-1}\right) -2\frac{r}{n}\frac{r-1}{n}(\psi(n)-\psi(r)).$$ Furthermore, it can be seen that the sequence of functions $g_{n}(x):=E^{BW,P}_n(nx)$ converges uniformly on $[0,1]$ to $g(x)=-2x\left(-1+x+x\log x\right)$. To conclude the proof it is enough to apply Proposition \ref{conv} together with some straightforward computations. \end{proof} So far, we have considered the Best-or-Worst variant in which the goal is to select either the best or the worst candidate, indifferent between the two cases. To finish this section we are going to further modify the Best-or-Worst variant. In particular we are going to consider different payoff depending on whether we select the best or the worst candidate. In paticular we are going to consider the following payoff function, with $m<M$. \begin{equation}\label{poun} p_U(k)=\begin{cases} m, & \textrm{if the $k$-th candidate is the worst candidate};\\ M, & \textrm{if the $k$-th candidate is the best candidate};\\ 0, & \textrm{otherwise}. \end{cases} \end{equation} In this new setting the optimal strategy has two thresholds, as stated in the following result, whose proof is analogue to that of Theorem \ref{BWS}. \begin{teor} For the Best-or-Worst variant, if $n$ is the number of candidates and the payments for selecting the worst and the best candidates are, respectively, $m<M$, there exist $r(n)\leq s(n),$ such that the following strategy is optimal: \begin{enumerate} \item Reject the $r(n)$ first interviewed candidates. \item Accept the first candidate which is better than all the preceding ones until reaching the $s(n)$-th candidate. \item After that, accept the first candidate which is either better or worse than all the preceding ones. \end{enumerate} \end{teor} Now, let $n$ be the number of candidates and let us consider cutoff values $1<r<s<n$. Then, if $k\in (r,n]$ is an integer, the probability of successfully selecting the best candidate in the $k$-th interview is given by $$P^{BW,U}_{n,r,s}(k)=\begin{cases} \frac{r}{(k-1) n}, & \textrm{if $r<k<s$};\\ \frac{r}{k-1}\frac{s-1}{k-2}\frac{1}{n}, & \textrm{if $k\geq s$}. \end{cases}$$ On the other hand, if $k\in (r,n]$ is an integer, the probability of successfully selecting the best or the worst candidate in the $k$-th interview is given by $$\overline{P}^{BW,U}_{n,r,s}(k)=\begin{cases} 0, & \textrm{if $r<k<s$};\\ \frac{r}{k-1}\frac{s-1}{k-2}\frac{1}{n}, & \textrm{if $k\geq s$}. \end{cases}$$ Because, according to the optimal strategy we can only select the worst candidate if $k\geq s$. Consequently, the expected payoff with $n$ candidates and cutoff values $r<s$ is given by \begin{align*} E^{BW,U}_{n}(r,s)&:=\sum_{k=r+1}^n MP^{BW,U}_{n,r,s}(k)+m\overline{P}^{BW,U}_{n,r,s}(k)=\\&=\sum_{k=r+1}^{s}\frac{M\,r}{\left(k-1\right) \,n}+\sum_{k=s+1}^{n}\left(M+m\right) \frac{r(s-1)}{(k-1)(k-2)n}. \end{align*} The following result determines the cutoff values as well as the corresponding maximum expected payoff. \begin{teor}\label{TnM} Given a positive integer $n>2$, let us consider the function $E^{BW,U}_{n}(r,s)$ defined above for every pair of integers in the set $\{(r,s)\in\mathbb{Z}^{2}:0\leq r\leq s<n\}$ and let $(\mathcal{M}_{1}(n),\mathcal{M}_{2}(n))$ be the point for which $E_n^{BW,U}$ reaches its maximum. Then, \begin{itemize} \item[i)] $\displaystyle \lim_{n}\frac{\mathcal{M}_1(n)}{n}=\frac{e^{-1+\frac{n}{M}}M}{m+M}.$ \item[ii)] $\displaystyle \lim_{n}\frac{\mathcal{M}_2(n)}{n}=\frac{M}{m+M}.$ \item[iii)] $\displaystyle \lim_{n}E^{BW,U}_{n}(\mathcal{M}_{1}(n),\mathcal{M}_{2}(n))=\frac{e^{-1+\frac{n}{M}}M^2}{m+M}.$ \end{itemize} \end{teor} \begin{proof} Let us define the sequence of functions $\{h_n\}$ by $h_n(x,y)=E^{BW}_n(nx,xy)$. Then, $$ \lim_{n}h_n(x,y)=h(x,y)= \begin{cases} (M+m)x-(M+m)xy+Mx\log(y/x), & \textrm{if $x,y\neq0$};\\ 0 & \textrm{otherwise}. \end{cases} $$ and the convergence is uniform on $T:=\{(x,y)\in\mathbb{R}^{2}:0\leq x\leq y\leq1\}$. Hence, we can apply Proposition \ref{conv2}. To do so, observe that $h$ is a concave function on the convex set $T$ with a negative definite hessian matrix. Since $h$ has only one critical point, namely $$\left(\frac{e^{-1+\frac{m}{M}}M}{M+m},\frac{M}{M+m}\right)$$ and $$h\left(\frac{e^{-1+\frac{m}{M}}M}{M+m},\frac{M}{M+m}\right)=\frac{e^{-1+\frac{m}{M}}M^{2}}{M+m}$$ the result follows. \end{proof} This result means that the optimal strategy in this setting consists in rejecting roughly the first $n\dfrac{e^{-1+\frac{m}{M}}M}{M+m}$ interviewed candidates, then accepting the first candidate which is better than all the preceding ones until reaching roughly the $n\dfrac{M}{M+m}$ candidate and, finally accepting the first candidate which is either better or worse than all the preceding ones. Following this strategy, the maximum expected payoff is asymptotically equal to $\displaystyle\frac{e^{-1+\frac{m}{M}}M^{2}}{M+m}$. \begin{rem} If $m\ll M$ the cuttof values obtained in Theorem \ref{TnM} are, approximately, $ne^{-1}$ and $n$. This means that the optimal strategy ignores the objective of obtaining the worst candidate and we recover the original secretary problem. In addition, if $m=M$, then both cutoff values coincide with $n/2$ and we recover the original Best-or-Worst variant. \end{rem} \section{The Postdoc variant} In this section we focus on the Postdoc variant, as described in the introduction, in which the goal is to select the second best candidate. First of all we have to prove that, just like in classic problem, the optimal strategy is a threshold strategy. In this variant it is not obvious that the optimal strategy has only one threshold. This is because the candidate considered in a given interview could be selected both if it is better or the second better than all the preceding ones and in both cases it could end up being the second best candidate. However, we are going to see that selecting a candidate which is better than all the preceding ones is never preferable to waiting for a candidate which is the second better than all the preceding ones. Assume for a moment that we are following a threshold strategy. Let $n$ be the number of candidates and let us consider a cutoff value $r\in(1,n)$. If $k\in (r,n]$ is an integer, the probability of successfully selecting the best or the worst candidate in the $k$-th interview is $P^{PD}_{n,r}(k)=\frac{r}{k-1}\frac{1}{k}\frac{\binom{k}{2}}{\binom{n}{2}}$. Thus, the probability function of succeeding in the Postdoc variant with $n$ candidates using $r$ as cutoff value and provided we are following a threshold strategy for the second best candidate, is given by $$F^{PD}_{n}(r):=\sum_{k=r+1}^{n}P^{PD}_{n,r}(k)=\sum_{k=r+1}^{n}\frac{r\,{\binom{k}{2}}}{\left( -1+k\right) \,k\,{\binom{n}{2}}}.$$ Note that the following holds: \begin{align*} F^{PD}_{n}(r):=&=\sum_{k=r+1}^{n}\frac{r\,{\binom{k}{2}}}{\left( -1+k\right) \,k\,{\binom {n}{2}}}=\frac{r\,{\binom{r+1}{2}}}{\left( -1+r+1\right) \,(r+1)\,{\binom {n}{2}}}+\sum_{k=r+2}^{n}\frac{r\,{\binom{k}{2}}}{\left( -1+k\right) \,k\,{\binom {n}{2}}}=\\ &= \frac{ \,{\binom{r+1}{2}}}{ \,(r+1)\,{\binom {n}{2}}}+\sum_{k=r+2}^{n}\frac{(r+1)r\,{\binom{k}{2}}}{\left( -1+k\right) \,k\,{(r+1)\binom {n}{2}}}=\\ &= \frac{ \,{\binom{r+1}{2}}}{ \,(r+1)\,{\binom {n}{2}}}+\frac{r}{r+1}\sum_{k=r+2}^{n}\frac{(r+1) \,{\binom{k}{2}}}{\left( -1+k\right) \,k\,{ \binom {n}{2}}}=\\ &= \frac{ \,{\binom{r+1}{2}}}{ \,(r+1)\,{\binom {n}{2}}}+ \frac{r}{r+1}F^{PD}_{n}(r+1). \end{align*} On the other hand, let us denote by $T_n(r)$ the probability of success after the $r$-th interview provided we have already selected a candidate which is better than all the preceding ones. Then, the probability of finding the second best candidate in the $(r+1)$-th interview is $\frac{1}{r+1}$ and, furthermore, the probability of not finding a better candidate among all the remaining interviews is $\frac{\binom{r+1}{2}}{\binom{n}{2}}$. On the other hand, the probability of not obtaining the second best candidate in the $(r+1)$-th interview is $\frac{r}{r+1}$ and the probability of success in this case will be $T_n(r+1)$. Hence, $$T_n (r)=\frac{1}{r+1}\frac{\binom{r+1}{2}}{ \binom{n}{2}}+\frac{r}{r+1}T_n(r+1).$$ Thus, we have seen that $T_n(r)$ and $F^{PD}_{n}(r) $ both satisfy the same recurrence relation in $r$. Moreover, it holds that $T_n(n-1)=F^{PD}_{n}( n-1)=1/n$ so, consequently, we obtain that $T_n(r)=F^{PD}_{n}(r) $ for every $r<n$. Note that this means that the optimal strategy can neglect if a given candidate is better than all the preceding ones and focus only on whether the candidate is the second better than all the preceding ones and thus the optimal strategy has only one threshold. \begin{teor}\label{TEORPDC} For the Postdoc variant, if $n$ is the number of candidates, there exists $r(n)$ such that the following strategy is optimal: \begin{enumerate} \item Reject the $r(n)$ first interviewed candidates. \item After that, accept the first candidate which is the second best until then. \end{enumerate} \end{teor} \begin{proof} Just use the same ideas as in Theorem \ref{BWS}. \end{proof} Thus, the probability function of succeeding in the Postdoc variant with $n$ candidates using $r$ as cutoff value, is given by $$F^{PD}_{n}(r):=\sum_{k=r+1}^{n}P^{PD}_{n,r}(k)=\frac{r(n-r)}{n(n-1)}.$$ Observe that we have obtained that $F_n^{PD}(r)=\dfrac{1}{2}F_n^{BW}(r)$. Consequently, if we follow the previous strategy, the optimal cutoff value is the same as in the Best-or-Worst variant; i.e., $\lfloor \frac{n}{2}\rfloor$) and the maximum probability of success is one half of the maximum probability of success in the Best-or-Worst variant (see Theorem \ref{BWP}). We are now going to consider the Postdoc variant with the payoff function $p_C$ given in (\ref{pocost}); i.e., we assume that performing each interview has a constant cost of $1/n$. Under this assumption it can be proved that the optimal strategy has two thresholds. \begin{teor}\label{PDP} For the Postdoc variant, if $n$ is the number of candidates and if the payoff function is given by (\ref{pocost}), there exist $r(n)\leq s(n)$, such that the following strategy is optimal: \begin{enumerate} \item Reject the $r(n)$ first interviewed candidates. \item Accept the first candidate which is better than all the preceding ones until reaching the $s(n)$-th candidate. \item After that, accept the first candidate which is either better or second better than all the preceding ones. \end{enumerate} \end{teor} \begin{proof} Proceed as in Theorem \ref{BWS} with each threshold separately. \end{proof} Under this strategy, the probability of successfully selecting the second best candidate in the $k$-th interview is given by the function $$P^{PD,C}_{n,r,s}(k)=\begin{cases} \frac{r(n-k)}{n(n-1)(k-1)}, & \textrm{if $r<k<s$};\\ \frac{r(s-1)(n-k)}{n(n-1)(k-1)(k-2)}+\frac{r(s-1)}{n(n-1)(k-2)}, & \textrm{if $k\geq s$}. \end{cases}$$ Consequently, the expected payoff with $n$ candidates and cutoff values $r<s$ is given by $$E^{PD,C}_{n}(r,s)=\sum_{k=r+1}^n \left(1-\frac{k}{n}\right)P^{PD,C}_{n,r,s}(k).$$ In the following result we determine the optimal cutoff values and the maximum expected payoff. \begin{teor} Given a positive integer $n>2$ let us consider the function $E^{PD,C}_{n}(r,s)$ defined above for every $(r,s)\in\{(r,s) \in\mathbb{Z}^{2}:0\leq r\leq s<n\}$ and let $(\mathcal{M}_{1} (n),\mathcal{M}_{2}(n))$ be the point for which $E^{PD,C}_{n}$ reaches its maximum. Then, \begin{itemize} \item[i)] $\displaystyle \lim_{n} {\mathcal{M}_1(n)}/{n}=0.17248\dots$ \item[ii)] $\displaystyle \lim_{n} {\mathcal{M}_2(n)}/{n}=0.39422\dots$ \item[iii)] $\displaystyle \lim_{n}E^{PD,C}_{n}(\mathcal{M}_{1}(n),\mathcal{M}_{2}(n))=0.11811\dots$ \end{itemize} \end{teor} \begin{proof} First of all, observe that \begin{align*} E^{PD,C}_{n}(r,s)&=\frac{r}{n^{2}}\left( n+\frac{n-1}{s-1}-s+\frac{\left( s-r\right) \,\left( 3-4\,n+r+s\right) }{2\,\left( n-1\right) }\right) +\\ & +\frac{r}{n^{2}}\left( \left( 1-s\right) \,\psi(-1+n)-\left( n-1\right) \,\psi(r)+\left( n-2+s\right) \,\psi(s-1)\right). \end{align*} Thus, if we define the sequence of functions $\{h_n\}$ by $h_n(x,y)=E^{PD,C}_n(nx,ny)$, it follows that $$\lim_{n}h_n(x,y)=h(x,y):=\begin{cases} \frac{x\left(2-6y+y^{2}+4x-x^{2}+2(1+y)\log y-2\,\log x\right)}{2}, & \textrm{if $x,y\neq0$};\\ 0, & \textrm{otherwise}. \end{cases}$$ and the convergence is uniform on $\{(x,y)\in\mathbb{R}^{2}:0\leq x\leq y\leq1\}$. Using elementary techniques we get that $h$ reaches its absolute maximum at the point $(\alpha,\beta)$ with $\beta:=0.39422\dots$ is the solution to $-2+\frac{1}{\beta}+\beta+\log(\beta)=0$ and $\alpha:=0.1724844\dots$ is the solution to $1-\frac{1}{\beta}-2\,\beta-\frac{\beta^{2}}{2}+4\,\alpha-\frac{3\,\alpha^{2}}{2}-\log(\alpha)=0$. The fact that $h(\alpha,\beta)=0.11811\dots$ concludes the proof. \end{proof} Finally, let us consider the Postdoc variant with the payoff function $p_P$ given in (\ref{popay}); i.e., we assume that performing each interview has an additional payoff of $1/n$. Under this assumption, it is clear that no optimal strategy will accept a candidate which is better than all the preceding ones because, if the search continues, the probability of success is the same and the payoff will be greater. Hence, we must only consider strategies with one threshold for the second best candidate, as in Theorem \ref{TEORPDC}, ignoring if the interviewed candidate in better than the preceding ones. In this setting, the expected payoff with $n$ candidates and cutoff value $r$ is given by $$E_n^{PD,P}(r):=\sum_{k=r+1}^n\left(1+\frac{k}{n}\right)P^{PD}_{n,r}(k)=\frac{r(n-r)(3n+1+r)}{2n^2(n-1)}.$$ The optimal cutoff value that maximizes the expected payoff $E^{PD,P}_{n}$ and this maximum expected payoff are determined in the following result. \begin{teor} Given an integer $n>1$, let us consider the function $E_n^{PD,P}(r)$ defined above for every integer $1<r<n$ and let $\mathcal{M}(n)$ be the value for which the function $E_n^{PD,P}$ reaches its maximum. Then, the following hold: \begin{itemize} \item[i)] $\displaystyle \lim_{n}\frac{\mathcal{M}(n)}{n}=\frac{\sqrt{13}-2}{3}=0.53518\dots$ \item[ii)] $\displaystyle\lim_{n}E_n^{PD,P}(\mathcal{M}(n))=\frac{13\sqrt{13}-35}{27}=0.4397\dots$ \end{itemize} \end{teor} \begin{proof} Since $E_n^{PD,P}$ is a degree 3 polynomial, we can explicitly obtain the exact value of $\mathcal{M}(n)$ by elementary methods. Namely, $$\mathcal{M}(n)=\frac{-1-2\,n+\sqrt{1+7\,n+13\,n^{2}}}{3}.$$ The result follows immediately. \end{proof} \begin{rem} Note that we can further refine the previous result by noting that $\displaystyle \mathcal{M}(n)=\left(\frac{\sqrt{13}-2}{3}\right)n+\frac{7-2\sqrt{13}}{6\sqrt{13}}+o(n)$. In this case, $[\mathcal{M}(n)]$ is the optimal cutoff value for all $n$ up to 10000, without any exception. \end{rem} \section{Conclusions} In this paper, we have analyzed two variants of the secretary problem which happen to be closely related: the Postdoc and the Best-or-Worst variants. Both of them have the same optimal threshold strategy and the mean payoff for the first one is twice as for the second one. We now show a comparative table of the asymptotic optimal cutoff value (ACV) given by $\displaystyle \lim_{n} \mathcal{M}(n)/n$ and the the asymptotic maximum expected payoff (AMP) in the classical secretary problem, in the Best-or-Worst variant and in the Postdoc variant with payoff functions $p_B$, $p_C$ and $p_P$. In the case of the Postdoc variant with payoff function $p_P$, in the cell corresponding to $\mathcal{M}(n)/n$ we show the two thresholds related to the optimal strategy in that setting. \[ \begin{tabular} [c]{|c|c|c|c|c|c|c|}\hline {\footnotesize Payoff} & \multicolumn{2}{|c|}{Classic} & \multicolumn{2}{|c|}{Best-or-Worst} & \multicolumn{2}{|c|}{Postdoc} \\\cline{2-7} & $\text{ACV}$ & $\text{AMP}$ & $\text{ACV}$ & $\text{AMP}$ & $\text{ACV}$ & $\text{AMP}$\\\hline $p_B$ & $e^{-1}$ & $e^{-1}$ & $1/2$ & 1/2 & $1/2$ & 1/4\\\hline $p_C$ & $ \begin{array} [c]{c} \rho\simeq\\ 0.2031 \end{array} $ & $ \begin{array} [c]{c} \rho-\rho^{2}\simeq\\ 0.1619 \end{array} $ & $ \begin{array} [c]{c} \theta\simeq\\ 0.2846 \end{array} $ & $ \begin{array} [c]{c} \theta-\theta^{2}\simeq\\ 0.2036 \end{array} $ & $ \begin{array} [c]{c} 0.1724,\\ 0.3942 \end{array} $ & $0.1181$\\\hline $p_P$ & $ \begin{array} [c]{c} \eta\simeq\\ 0.4263 \end{array} $ & $ \begin{array} [c]{c} \eta^{2}+\eta\simeq\\ 0.6080 \end{array} $ & $ \begin{array} [c]{c} \vartheta\simeq\\ 0.5520 \end{array} $ & $ \begin{array} [c]{c} \vartheta^{2}+\vartheta\simeq\\ 0.8567 \end{array} $ & $ \begin{array} [c]{c} \frac{\sqrt{13}-2}{3}\simeq\\ 0.5351 \end{array} \,$ & $ \begin{array} [c]{c} \frac{13\sqrt{13}-35}{27}\\ \simeq0.4397 \end{array} $\\\hline \end{tabular} \ \ \] \end{document}
\begin{document} \begin{center} {\Large The inertialess limit of particle sedimentation modeled by the Vlasov-Stokes equations} Richard M. H\"{o}fer\footnote{University of Bonn, Institute For Applied Mathematics. Endenicher Allee 60, 53115 Bonn, Germany. \newline Email: [email protected], Phone: +49 228 735602} \today \end{center} \begin{abstract} We study the Vlasov-Stokes equations which macroscopically model the sedimentation of a cloud of particles in a fluid, where particle inertia are taken into account but fluid inertia are assumed to be negligible. We consider the limit when the inertia of the particles tends to zero, and obtain convergence of the dynamics to the solution of an associated inertialess system of equations. This system coincides with the model that can be derived as the homogenization limit of the microscopic inertialess dynamics. \end{abstract} \section{Introduction} We consider the sedimentation of a cloud of identical spherical particles suspended in a fluid subject to gravitation. It is assumed that the suspension is sufficiently dilute such that collisions of particles do not play a role. Furthermore, we neglect inertial forces of the fluid, i.e., the fluid is modeled by a Stokes equation, but particle inertia are taken into account. These assumptions are justified if the Reynolds number is much smaller than the Stokes numbers which is the case for very small particles in gases. We refer to \cite{Koc90} for the details of the microscopic model and a discussion about the regime of validity. Let a nonnegative function $f(t,x,v)$ describe the number density of particles at time $t$ and position $x \in \mathbb{R}^3$ with velocity $v \in \mathbb{R}^3$. We denote the position density and current by \begin{align} \rho(t,x) &:= \int_{\mathbb{R}^3} f(t,x,v) \, \mathrm{d} v, \label{eq:defRho}\\ j(t,x) := \rho(t,x) \bar{V}(t,x) &:= \int_{\mathbb{R}^3} f(t,x,v) v \, \mathrm{d} v. \label{eq:defJVbar} \end{align} Here, the mean velocity $\bar{V}$ is defined to be zero in the set $\{\rho = 0\}$. As a model for the macroscopic dynamics, we consider the so-called Vlasov-Stokes equations, a Vlasov equation for the particles coupled with Brinkman equations for the fluid, \begin{equation} \label{eq:VlasovStokes0} \begin{aligned} \partial_t f + v \cdot \nabla_x f + \lambda \operatornameeratorname{div}_v \left(\hat{g} f + \frac{9}{2} \gamma (u-v) f \right) &= 0, \qquad f(0,\cdot,\cdot) = f_0, \\ - \Delta u + \nabla p + 6 \pi \gamma \rho(u-\bar V) &= 0, \qquad \operatornameeratorname{div} u = 0. \end{aligned} \end{equation} Here, $u$ and $p$ are the fluid velocity and pressure respectively, $\hat{g} = g/|g|$ with $g$ being the gravitational acceleration, and $\lambda$ and $\gamma$ are constants that will be discussed below. The first equation expresses that the forces acting on the particles are the gravitation and the drag exerted by the fluid. The Brinkman equations are Stokes equations with a force term that arises from the same drag. A rigorous derivation of these macroscopic equations from the microscopic dynamics has not been achieved yet, a formal derivation can be found in \cite{Koc90}. In the quasi-static case, the Brinkman equations have been established in \cite{Al90a}, \cite{DGR08}. Using this, the Vlasov-Stokes equations \eqref{eq:VlasovStokes0} can be formally derived from the microscopic dynamics after non-dimensionalizing. The constants $\lambda$ and $\gamma$ are given by \begin{align} \lambda = \frac{\mu^2 }{\rho_p (\rho_p - \rho_f) \phi^2 |g| L^3}, \qquad \gamma = \frac{\phi L^2}{R^2}, \end{align} where $\mu$ is the fluid viscosity, $\rho_p$ and $\rho_f$ are the particle and fluid mass density respectively, $\phi$ is the volume fraction of the particles, $L$ is the diameter of the cloud of particles, and $R$ the radius of the particles. The constant $\gamma$ determines the interaction strength between fluid and particles. The quantity $(\lambda \gamma)^{-1}$ is known as the Stokes number and determines the strength of the inertial forces. For definiteness, we assume $\rho_p > \rho_f$ such that $\lambda >0$. Then, the larger $\lambda$, the less important inertial effects become. For a more detailed discussion of these parameters as well as a formal derivation of the system \eqref{eq:VlasovStokes0}, we refer to \cite{Hof16}. For similar equations as \eqref{eq:VlasovStokes0}, global well-posedness has been proven in \cite{Ham98} and \cite{BDGM09}. In \cite{Jab00}, the author considers the inertialess limit of the system, where the fluid velocity $u$ in \eqref{eq:Vlasov Stokes} is replaced by a force term $F[\rho,j]$ that is given by a convolution operator which is more regular than the Stokes convolution operator. In \cite{Gou01}, similar limits are studied for a one dimensional model without gravity and including inertial forces on the fluid. In \cite{GP04}, the authors consider limits of high and low inertia of the system of a Vlasov equation without gravity and with a given random fluid velocity field. Similar systems that include Brownian motion of the particles and their limits have been studied among others in \cite{CP83}, \cite{GJV04a}, \cite{GJV04b} \cite{CG06}, and \cite{GHMZ10}. \subsection{Main result} We are interested in the limit $\lambda \to \infty$, which corresponds to inertialess particles. For the ease of notation we drop all the other constants and consider the system \begin{equation} \label{eq:Vlasov Stokes} \begin{aligned} \partial_t f + v \cdot \nabla_x f + \lambda \operatornameeratorname{div}_v \left(g f + (u-v) f \right) &= 0, \qquad f(0,\cdot,\cdot) = f_0, \\ - \Delta u + \nabla p + \rho(u-\bar V) &= 0, \qquad \operatornameeratorname{div} u = 0. \end{aligned} \end{equation} For inertialess particles, the following macroscopic equation has been proven in \cite{Hof16} to be the homogenization limit of many small particles. \begin{equation} \label{eq:limitEquation} \begin{aligned} \partial_t \rho_\ast + \left( g + u_\ast \right) \cdot \nabla \rho_\ast &= 0, \qquad \rho_\ast(0,\cdot) = \rho_0 := \int_{\mathbb{R}^3} f_0 \, \mathrm{d} v,\\ - \Delta u_\ast + \nabla p& = {g} \rho_\ast, \qquad \operatornameeratorname{div} u_\ast = 0 . \end{aligned} \end{equation} Moreover, well-posedness of this system has been proven in \cite{Hof16}. In these equations, particles are described by their position density $\rho_\ast$ only, because their velocity is the sum of the fluid velocity $u$ and the constant $g$ which is the direct effect due to gravitation. The main result of this paper is the following theorem. \begin{theorem} \label{th:main} Assume $f_0 \in W^{1,\infty}(\mathbb{R}^3\times\mathbb{R}^3)$ is compactly supported. Then, for $\lambda > 0$, there exists a unique solution $(f_\lambda,u_\lambda)$ to \eqref{eq:Vlasov Stokes}. Let $(\rho_\ast,u_\ast)$ be the unique solution to \eqref{eq:limitEquation}. Then, for all $T > 0$, and all $\alpha < 1$ \begin{align} \rho_\lambda &\to \rho_\ast \quad \text{in} ~ C^{0,\alpha}((0,T) \times \mathbb{R}^3), \\ u_\lambda &\to u_\ast \quad \text{in} ~ L^\infty((t,T) ; W^{1,\infty}(\mathbb{R}^3)) ~ \text{and in} ~ L^1((0,T) ; W^{1,\infty}(\mathbb{R}^3)). \end{align} \end{theorem} Formally, for large values of $\lambda$, the first equation in \eqref{eq:Vlasov Stokes} forces the particle to attain the velocity $g + u(t,x)$, i.e., the density $f(t,x,v)$ concentrates around $g + u(t,x)$. Using that and integrating the first equation in \eqref{eq:Vlasov Stokes} in $v$ leads to the first equation in \eqref{eq:limitEquation}. Moreover, $\bar{V}$ in the fluid equation in \eqref{eq:Vlasov Stokes} can formally be replaced by $g + u(t,x)$, which leads to the fluid equation in \eqref{eq:limitEquation}. Formally, the adjustment of the particle velocities described above happens in times of order $1/\lambda$. In fact, the process is more complicated as the fluid velocity changes very fast in this time scale as well. In other words, there is a boundary layer of width $1/\lambda$ at time zero for the convergence of the fluid (and particle) velocity. This is the reason, why the convergence $u_\lambda \to u_\ast$ can only hold uniformly on time intervals $(t,T)$ for $t \geq 0$ as stated in the theorem. The particles, however, do not move significantly in times of order $1/\lambda$. Thus, there is no boundary layer in the convergence $\rho_\lambda \to \rho_\ast$. \subsection{Idea of the proof} We introduce the kinetic energy of the particles \[ E(t) := \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} |v|^2 f \, \mathrm{d} x \, \mathrm{d} v . \] Using the Vlasov-Stokes equations \eqref{eq:Vlasov Stokes} yields the following energy identities for the fluid velocity and the particle energy (cf. Lemma \ref{lem:WellPosednessFluid} and Lemma \ref{lem:aPrioriWellPosedness}). \begin{align} \label{eq:energyFluid} \|\nabla u\|_{L^2(\mathbb{R}^3)}^2 + \| u \|_{L^2(\rho)} &= (u,j)_{L^2(\mathbb{R}^3)} \leq \|\bar{V} \|^2_{L^2_\rho} \leq E, \\ \label{eq:energyParticles} \frac{1}{2} \frac{d}{d t} E &=\lambda \left( g \cdot \int_{\mathbb{R}^3 \times \mathbb{R}^3} j \, \mathrm{d} x - \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} (u - v)^2 f \, \mathrm{d} x \, \mathrm{d} v - \|\nabla u \|^2_{L^2(\mathbb{R}^3)} \right). \end{align} Here and in the following, the weighted $L^p$-norm is defined by \[ \|h\|_{L^p_\rho}^p := \int_{\mathbb{R}^3} |h|^p \rho \, \mathrm{d} x. \] As expected, equation \eqref{eq:energyParticles} shows that there is loss of energy due to friction (friction between the particles and the fluid as well as friction inside of the fluid), but the gravity pumps energy into the system (if we assume $g \cdot \int_{\mathbb{R}^3 \times \mathbb{R}^3} j \, \mathrm{d} x > 0$, which at least after some time should be the case). Note that the Vlasov-Stokes equations \eqref{eq:Vlasov Stokes} also imply that the mass of the particles $\|\rho\|_{L^1(\mathbb{R}^3)}$ is conserved. To analyze solutions to the Vlasov equation in \eqref{eq:Vlasov Stokes}, we look at the characteristic curves $(X,V,Z)(s,t,x,v)$ starting at time $t$ at position $(x,v) \in \mathbb{R}^3 \times \mathbb{R}^3$, where $Z$ denotes the value of the solution $f$ along the characteristic curve. \begin{equation} \label{eq:characteristics} \begin{aligned} \partial_s{X} &= V, \qquad &&X(t,t,x,v) = x, \\ \partial_s{V} &= \lambda (g + u(s,{X}) - V(s,t,x,v)), \qquad &&V(t,t,x,v) = v, \\ \partial_s{Z} &= 3 \lambda Z, \qquad &&Z(t,t,x,v) = f(t,x,v). \end{aligned} \end{equation} By the standard theory, any solution $f \in W^{1,\infty}((0,T) \times \mathbb{R}^3 \times \mathbb{R}^3)$ with $u \in L^\infty((0,T);W^{1,\infty}(\mathbb{R}^3))$ is of the form \begin{equation} \label{eq:fByCharacteristics} f(t,x,v) = e^{3 \lambda t} f_0(X(0,t,x,v),V(0,t,x,v)). \end{equation} Using the characteristics as well as estimates based on the energy identities \eqref{eq:energyFluid} and \eqref{eq:energyParticles} and regularity theory of Stokes equations, we prove global well-posedness of the Vlasov-Stokes equations \eqref{eq:Vlasov Stokes} for compactly supported initial data $f_0 \in W^{1,\infty}(\mathbb{R}^3 \times \mathbb{R}^3)$. A similar approach based on an analysis of the characteristics has been used to prove existence of solutions to the Vlasov-Poisson equations in \cite{BD85}, \cite{Pfa92}, and \cite{Sch91} (see also \cite{Gla96}). From the PDE point of view, the electrostatic potential appearing in the Vlasov-Poisson equation is similar to the fluid velocity in the Vlasov-Stokes equations. However, in the Vlasov-Poisson equations, the force acting on the particles is the gradient of the electrostatic potential. whereas in the Vlasov-Stokes equations, only the fluid velocity itself contributes. This makes it possible to prove existence (and also uniqueness) in a much simpler way for the Vlasov-Stokes equations. In order to prove the convergence in Theorem \ref{th:main}, the starting point is integrating the characteristics which yields \begin{equation} \label{eq:integrateCharacteristics} V(t,0,x,v) - V(0,0,x,v) = \lambda \left(\int_0^t u_\lambda(s,X(s,0,x,v)) + g \, \mathrm{d} s + X(0,0,x,v) - X(t,0,x,v) \right). \end{equation} Thus, \begin{equation} \label{eq:almostTransportedByU} \left| X(t,0,x,v) - x - \int_0^t u_\lambda(s,X(s,0,x,v)) + g \, \mathrm{d} s \right| \leq \frac{|V(t,0,x,v) - v|}{\lambda}. \end{equation} Therefore, provided the speed of the particles does not blow up, we see that for large values of $\lambda$ the particles are almost transported by the fluid plus the gravity. Clearly, this is also what happens for solutions to the limit inertialess equations \eqref{eq:limitEquation}. In order to show that $u_\lambda$ is close to $u$, we introduce a fluid velocity $\tilde{u}_\lambda$ which can be viewed as intermediate between $u_\lambda$ and $u_\ast$ by \begin{equation} \label{eq:uTilde} - \Delta \tilde{u}_\lambda + \nabla p_\lambda = g \rho_\lambda, \qquad \operatornameeratorname{div} \tilde{u}_\lambda = 0. \end{equation} In order to prove smallness of $u_\lambda - \tilde{u_\lambda}$, one needs estimates on $\rho_\lambda$ and $u_\lambda$ that are uniform in $\lambda$, which are more difficult to obtain than those that we use in the proof of well-posedness. Indeed, in view of the energy identity for the particles \eqref{eq:energyParticles}, any naive estimate based on that equation will blow up as $\lambda \to \infty$. However, as the first term is linear in the velocity and the other terms (which have a good sign) are quadratic, the energy $E$ cannot exceed a certain value as long as the particle density $\rho$ is not too concentrated (cf. Lemma \ref{lem:boundsU}). In other words, if the energy is high enough, the quadratic friction terms will prevail over the linear gravitation terms and therefore prevent the energy from increasing further. However, if concentrations of the particle density occur, the particles essentially fall down like one small and heavy particle, leading to large velocities. Indeed, the terminal velocity of a spherical particle of radius $R$ in a Stokes fluid at rest is \[ V = \frac{2}{9}\frac{\rho_p - \rho_f}{\mu} g R^2. \] In order to rule out such concentration effects, we use again the representation of $f$ in \eqref{eq:fByCharacteristics} obtained from the characteristics. Indeed, computing $\rho$ by taking the integral over $v$ in \eqref{eq:fByCharacteristics}, we can show that the prefactor $e^{3 \lambda t}$ in that formula is canceled due to concentration of $f$ in velocity space in regions of size $e^{-\lambda t}$ as long as we control $\nabla u$ in a suitable way (cf. Lemma \ref{lem:biLipschitzV}). As $\nabla u$ is controlled by $E$ due to the energy identity \eqref{eq:energyFluid}, this enables us to get uniform estimates for both $u$, $\nabla u$, and $\rho$ for small times. It turns out that also estimates on derivatives of $\rho$ are needed to prove smallness of $u_\lambda - \tilde{u_\lambda}$. These are provided by a more detailed analysis of the characteristics. \subsection{Plan of the paper} The rest of the paper is organized as follows. In Section 2, we prove global well-posedness of the Vlasov Stokes equations \eqref{eq:Vlasov Stokes}, based on energy estimates, analysis of the characteristics, and a fixed point argument. In Section 3, we derive a priori estimates that are uniform in $\lambda$ for small times by analyzing the characteristics more carefully. In particular we prove and use that the supports of the solutions concentrate in the space of velocities. In Section 4.1, we use those a priori proven in Section 4 to show that the fluid velocity $u_\lambda$ is close to the intermediate fluid velocity $\tilde{u}_\lambda$ defined in \eqref{eq:uTilde} as $\lambda \to \infty$. In Section 4.2, we prove the assertion of the main result, Theorem \ref{th:main}, up to times where we have uniform a priori estimates. This follows from compactness due to the a priori estimates and convergence of averages of $\rho_\lambda$ on small cubes, which we prove using again the characteristic equations. In Section 4.3, we finish the proof of the main result, Theorem \ref{th:main}, by extending the a priori estimates from Section 3 to arbitrary times. This is done by using both the a priori estimates and the convergence for small times. \section{Global well-posedness of the Vlasov-Stokes equations} In this section, we write $C$ for any constant that depends only on the initial datum. Any additional dependencies are denoted by arguments of $C$, e.g. $C(\lambda t)$ is a constant that depends only on $\lambda t$ and the initial datum. We use the convention that $C$ is monotone in all its arguments. \subsection{Estimates for the fluid velocity} \begin{lemma} \label{lem:WellPosednessFluid} Let $g \in L^\infty( \mathbb{R}^3 \times \mathbb{R}^3)$ be nonnegative, and assume $Q > 0$ is such that $\operatorname{supp} g \subset B_Q(0) \subset \mathbb{R}^3\times \mathbb{R}^3$. Let \begin{align} \rho (x) &:= \int_{\mathbb{R}^3} g(x,v) \, \mathrm{d} v, \\ j(x) := \rho \bar{V} &:= \int_{\mathbb{R}^3} g(x,v) v \, \mathrm{d} v, \\ E &:= \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} g(x,v) |v|^2 \, \mathrm{d} x \, \mathrm{d} v.\\ \end{align} Then there exists a unique weak solution $u \in W^{1,\infty}(\mathbb{R}^3)$ to the Brinkman equation \[ - \Delta u + \nabla p + \rho u = j. \] Moreover, \begin{align} \label{eq:energyEstimateFluid} \|\nabla u\|_{L^2(\mathbb{R}^3)}^2 + \| u \|_{L^2_\rho(\mathbb{R}^3)} &= (u,j)_{L^2(\mathbb{R}^3)} \leq \|\bar{V} \|^2_{L^2_\rho(\mathbb{R}^3} \leq E, \\ \label{eq:L^inftyFluid} \| u \|_{L^\infty(\mathbb{R}^3)} & \leq C(\|g\|_{L^\infty(\mathbb{R}^3},\|g\|_{L^1(\mathbb{R}^3)},E) (1 + Q), \\ \label{eq:LipschitzFluid} \|u \|_{W^{1,\infty}(\mathbb{R}^3)} & \leq C(Q,E) \|g\|_{L^\infty(\mathbb{R}^3)}. \end{align} \end{lemma} \begin{proof} Existence and uniqueness of weak solutions in $\dot{H}^1(\mathbb{R}^3) := \{ w \in L^{6}(\mathbb{R}^3) \colon \nabla w \in L^2(\mathbb{R}^3) \}$ follows from the Lax-Milgram theorem. In the following, we write $\|\cdot\|_q$ instead of $\|\cdot\|_{L^q(\mathbb{R}^3)}$ and $\|\cdot\|_{L_\rho^q}$ instead of $\|\cdot\|_{L_\rho^q(\mathbb{R}^3)}$. Testing the Brinkman equation with $u$ itself yields \begin{equation} \label{eq:energyIdentity} \|\nabla u\|_2^2 + \| u \|^2_{L^2_\rho} = (j,u)_{L^2(\mathbb{R}^3)} \leq \|u\|_{L^2_\rho} \|\bar{V}\|_{L^2_\rho}. \end{equation} By the Cauchy-Schwarz inequality \begin{equation} \label{eq:VBarByE} \bar{V}^2 \rho = \frac{\left(\int_{\mathbb{R}^3} g(x,v) v \, \mathrm{d} v\right)^2}{\int_{\mathbb{R}^3} g(x,v) \, \mathrm{d} v} \leq \int_{\mathbb{R}^3} g(x,v) v^2 \, \mathrm{d} v. \end{equation} Hence, \[ \| u \|^2_{L^2(\rho)} \leq \|\bar{V}\|_{L^2(\rho)} \leq E. \] Using again \eqref{eq:energyIdentity} yields \eqref{eq:energyEstimateFluid}. Using the critical Sobolev embedding, we have \begin{equation} \label{eq:uInL^6} \|u\|_6^2 \leq C \| \nabla u \|_2^2 \leq C E. \end{equation} Moreover, we can use this Sobolev inequality in \eqref{eq:energyEstimateFluid} to get \[ \|u\|_6^2 \leq C\|u\|_6 \|j\|_{{6/5}}. \] Using the definition of $Q$ yields $\|j\|_{6/5} \leq C(Q) \|g\|_{\infty}$ and therefore \begin{equation} \label{eq:FluidEstimatesLinear} \| \nabla u \|_{2} + \|u\|_{6} \leq C(Q) \|g\|_{\infty} \end{equation} Standard regularity theory for the Stokes equation (see \cite{Ga11}) implies \begin{equation} \label{eq:ellipticRegularity} \| \nabla^2 u\|_{q} \leq C\|\rho u \|_q + C\|j\|_q. \end{equation} for all $ 1 < q < \infty$. In order to prove \eqref{eq:LipschitzFluid}, we use \eqref{eq:ellipticRegularity} and \eqref{eq:uInL^6} to get \[ \| \nabla^2 u\|_{6} \leq C\|\rho u \|_6 + C\|j\|_6 \leq C\|\rho\|_\infty \| u \|_6 + C\|j\|_6 \leq C(E, Q) \|g\|_\infty. \] Hence, by Sobolev embedding and \eqref{eq:FluidEstimatesLinear} \[ \| \nabla u\|_{\infty} \leq C \| \nabla^2 u\|_{6} + C \| \nabla u\|_{2} \leq C(E, Q) \|g\|_\infty, \] and similarly for $\|u\|_{\infty}$. It remains to prove \eqref{eq:L^inftyFluid}. Let $R > 0$. Then, \begin{align} \rho = \int_{\mathbb{R}^3} g \, \mathrm{d} v \leq \int_{\{ |v| < R\}} g \, \mathrm{d} v + R^{-2} \int_{\{|v|>R\}} |v|^2 g \, \mathrm{d} v \leq C R^3 \|g\|_{\infty} + C R^{-2} \int_{\{|v|>R\}} |v|^2 g \, \mathrm{d} v. \end{align} We choose \[ R = \left(\int_{\mathbb{R}^3} |v|^2 f \, \mathrm{d} v\right)^{1/5} \|g\|_{\infty}^{-1/5}. \] Thus, \[ \rho \leq \|g\|_{\infty}^{2/5} \left(\int_{\mathbb{R}^3} |v|^2 g \, \mathrm{d} v\right)^{3/5}, \] and therefore, \begin{equation} \label{eq:rho5over3} \|\rho\|_{5/3} \leq \|g\|_{\infty}^{2/5} E^{\frac{3}{5}}. \end{equation} Moreover, by definition of $Q$, \eqref{eq:rho5over3} implies for all $1 \leq p \leq 5/3$, \begin{equation} \label{eq:j5over3} \|j\|_{p} \leq Q \|\rho \|_{p} \leq C(\|g\|_{\infty},\|g\|_{1},E) Q. \end{equation} Sobolev and H\"older's inequality imply \[ \| u \|_{10} \leq C \| \nabla^2 u\|_{30/23} \leq C\|\rho\|_{5/3} \|u\|_{6} + C\|j\|_{30/23} \leq C(\|g\|_{\infty},\|g\|_{1},E) (1 + Q), \] where we used \eqref{eq:uInL^6}, \eqref{eq:rho5over3}, and \eqref{eq:j5over3}. Now, we can repeat the argument, using this improved estimate for $u$ in \eqref{eq:ellipticRegularity}. This yields \[ \|u\|_{30} \leq C(\|g\|_{\infty},\|g\|_{1},E) (1 + Q). \] Using again \eqref{eq:ellipticRegularity} yields \[ \| \nabla^2 u\|_{30/19} \leq C(\|g\|_{\infty},\|g\|_{1},E) (1 + Q). \] As $30/19 > 3/2$, we can apply Sobolev embedding to get \[ \|u\|_{\infty} \leq C\| \nabla^2 u\|_{30/19} + C \|u\|_{6} \leq C(\|g\|_{\infty},\|g\|_{1},E) (1 + Q), \] which finishes the proof of \eqref{eq:L^inftyFluid}. \end{proof} \subsection{A priori estimates for the particle density} \label{sec:energyParticles} \begin{lemma} \label{lem:aPrioriWellPosedness} Let $T > 0$ and $f_0 \in W^{1,\infty}(\mathbb{R}^3 \times \mathbb{R}^3)$ and let $Q_0 > 0$ be minimal such that $\operatorname{supp} f_0 \subset B_{Q_0}(0)$. Assume $f \in W^{1,\infty}((0,T) \times \mathbb{R}^3 \times \mathbb{R}^3)$ is a solution to \eqref{eq:Vlasov Stokes} with $u \in L^\infty((0,T);W^{1,\infty}(\mathbb{R}^3))$. Then, $f$ is compactly supported on $[0,T]\times\mathbb{R}^3 \times \mathbb{R}^3$. Let $Q(t)$ be minimal such that $\operatorname{supp} f(t,\cdot,\cdot) \subset B_{Q(t)}(0)$. Furthermore, define \begin{align} E(t) &:= \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} |v|^2 f \, \mathrm{d} x \, \mathrm{d} v. \end{align} Then, \begin{align} \label{eq:fL^Infty} \|f(t,\cdot,\cdot)\|_{L^\infty(\mathbb{R}^3\times\mathbb{R}^3)} &= e^{3 \lambda t}, \\ \label{eq:massConservation} \| \rho \|_1 &= 1, \\ \label{eq:kineticEnergyParticles} \partial_t E &= 2 \lambda \left( g \cdot \int_{\mathbb{R}^3} j \, \mathrm{d} x - \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} (u - v)^2 f \, \mathrm{d} x \, \mathrm{d} v - \|\nabla u \|^2_{L^2(\mathbb{R}^3)} \right) \\ \label{eq:kineticEnergyParticlesEstimate} &\leq 2 \lambda \bigg( C E^{\frac{1}{2}} - \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} (v - \bar{V})^2 f \, \mathrm{d} x \, \mathrm{d} v - \|u-\bar{V}\|_{L^2_\rho(\mathbb{R}^3)}^2 - \|\nabla u \|^2_{L^2(\mathbb{R}^3)} \bigg)\!, \hspace{1.2em}\\ \label{eq:particleEnergyEstimate} E(t) & \leq C(1+ (\lambda t)^2), \\ \label{eq:growthOfQ} Q(t) & \leq C(t,\lambda). \end{align} \end{lemma} \begin{proof} By the regularity assumptions on $f$ and $u$, the characteristics in \eqref{eq:characteristics} are well defined and \eqref{eq:fByCharacteristics} holds. This shows that the support of $f$ remains uniformly bounded on compact time intervals. The exponential growth of the $L^\infty$-norm of $f$ \eqref{eq:fL^Infty} follows from the characteristic equations as we have seen in \eqref{eq:fByCharacteristics}. Mass conservation \eqref{eq:massConservation} follows directly from integrating the Vlasov equation \eqref{eq:Vlasov Stokes}. We multiply the Vlasov equation by $|v|^2$ and integrate to find \begin{equation} \begin{aligned} \partial_t E & = 2 \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} v \cdot \lambda (g + u - v) f \, \mathrm{d} x \, \mathrm{d} v \\ & = 2 \lambda \left( g \cdot \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} v f \, \mathrm{d} x \, \mathrm{d} v - \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} (u - v)^2 f \, \mathrm{d} x \, \mathrm{d} v + \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} u \cdot (u-v) f \, \mathrm{d} x \, \mathrm{d} v \right)\\ & = 2 \lambda \left( g \cdot \int_{\mathbb{R}^3 \times \mathbb{R}^3} j \, \mathrm{d} x - \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} (u - v)^2 f \, \mathrm{d} x \, \mathrm{d} v - \|\nabla u \|^2_{L^2(\mathbb{R}^3)} \right). \end{aligned} \end{equation} This yields the identity \eqref{eq:kineticEnergyParticles}. By the Cauchy-Schwarz inequality \begin{equation} \label{eq:jE^1/2} \int_{\mathbb{R}^3} |j| \, \mathrm{d} x \leq \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} |v| f \, \mathrm{d} v \, \mathrm{d} x \leq \|\rho\|_{L^1(\mathbb{R}^3)}^{1/2} E^{1/2}. \end{equation} Moreover, by definition of $\bar{V}$ in \eqref{eq:defJVbar} \begin{equation} \label{eq:Variance} \begin{aligned} \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} (u - v)^2 f \, \mathrm{d} x \, \mathrm{d} v &=\int_{\mathbb{R}^3 \times \mathbb{R}^3} \!\! \left( (v - \bar{V})^2 + (\bar{V} - u)^2 - 2 (v - \bar{V})(\bar{V} - u) \right)f \, \mathrm{d} x \, \mathrm{d} v \\ &= \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} (v - \bar{V})^2 f \, \mathrm{d} x \, \mathrm{d} v + \|u-\bar{V}\|_{L^2_\rho(\mathbb{R}^3)}^2. \end{aligned} \end{equation} Using \eqref{eq:jE^1/2} and \eqref{eq:Variance} shows \eqref{eq:kineticEnergyParticlesEstimate}. In particular \[ \partial_t E \leq C \lambda E^{1/2}. \] This proves \eqref{eq:particleEnergyEstimate} by a comparison principle for ODEs. The characteristic equation for $V$ in \eqref{eq:characteristics} implies \begin{align*} |V(t,0,x,v)|& = \left|e^{-\lambda t} \left( v + \lambda \int_0^t e^{\lambda s} (g + u(s,X(s,0,x,v))) \, \mathrm{d} s \right)\right| \\ &\leq e^{-\lambda t} v + |g| + \int_0^t \|u(s\cdot)\|_{L^\infty(\mathbb{R}^3)} \, \mathrm{d} s. \end{align*} Thus, for all $ (x,v) \in \operatorname{supp} f_0$, we get by Lemma \ref{lem:WellPosednessFluid}, \eqref{eq:fL^Infty}, \eqref{eq:massConservation}, and \eqref{eq:particleEnergyEstimate} \begin{equation} \begin{aligned} \label{eq:velocityBound} |V(t,0,x,v)| & \leq Q_0 + 1 + C(\|f\|_{L^\infty((0,t) \times \mathbb{R}^3\times \mathbb{R}^3)},\|E\|_{L^\infty(0,t)}) \int_0^t (1 + Q(s)) \, \mathrm{d} s \\ & \leq C + C (\lambda t) \int_0^t (1+Q(s)) \, \mathrm{d} s. \end{aligned} \end{equation} By the equation for $X$, we get for all $ (x,v) \in \operatorname{supp} f_0$ \begin{equation} \label{eq:PositionBound} |X(t,0,x,v)| \leq Q_0 + \int_0^t |V(s,0,x,v)| \, \mathrm{d} s \leq Q_0 + t C (\lambda t) \int_0^t (1+Q(s)) \, \mathrm{d} s. \end{equation} Hence, \[ Q(t) \leq \sup_{(x,v) \in \operatorname{supp} f_0} |( X(t,0,x,v),V(t,0,x,v) )| \leq C + (1+t) C(\lambda t) \int_0^t (1+Q(s)) \, \mathrm{d} s. \] Gronwall's equation yields \eqref{eq:growthOfQ}. \end{proof} \subsection{Well-posedness by the Banach fixed point theorem} \begin{proposition} \label{pro:WellPosedness} Let $f_0 \in W^{1,\infty}(\mathbb{R}^3 \times \mathbb{R}^3) $ with compact support. Then, for all $T>0$, there exists a unique solution $f \in W^{1,\infty}((0,T) \times \mathbb{R}^3 \times \mathbb{R}^3)$ to \eqref{eq:Vlasov Stokes} with $u \in L^{\infty}((0,T);W^{2,\infty}(\mathbb{R}^3)) \cap W^{1,\infty}((0,T)\times\mathbb{R}^3))$. \end{proposition} \begin{proof} We want to prove existence of solutions using the Banach fixed point theorem. Let $Q_1,E_1 > 0$. We define the metric space, where we want to prove contractiveness, \begin{align} Y := \bigg\{ h \in L^{\infty}((0,T)\times \mathbb{R}^3 \times \mathbb{R}^3) \colon &h \geq 0, \|h(t,\cdot)\|_{L^1(\mathbb{R}^3)} = \|f_0\|_{L^1(\mathbb{R}^3)}, \\ &\int_{\mathbb{R}^3\times\mathbb{R}^3} \hspace{-1em} (1 + |v|^2) h \, \mathrm{d} x \, \mathrm{d} v \leq E_1, \operatorname{supp} h \subset [0,T] \times \overline{B_{Q_1}(0)} \bigg\}. \end{align} Then, $Y$ is a complete metric space. Let $T > 0$ and $h_1, h_2 \in Y$. For $i=1,2$, we define $u_i$ to be the solution to \[ - \Delta u_i + \nabla p = \int_0^\infty \int_{\mathbb{R}^3} (v-u_i) h_i \, \mathrm{d} v. \] We define the characteristics $(X_i,V_i)(s,t,x,v)$ analogously to \eqref{eq:characteristics} by \begin{align} \partial_s (X_i,V_i)(s,t,x,v) &= (V_i(s,t,x,v),g + u_i(s,X_i(s,t,x,v)) - V_i(s,t,x,v)), \\ (X_i,V_i)(t,t,x,v) = (x,v). \end{align} Then, the solutions to the equation \begin{equation} \partial_t f_i + v \cdot \nabla_x f_i + \lambda \operatornameeratorname{div}_v \left( g f_i + (u_i-v) f_i \right) = 0, \end{equation} with initial datum $f_0$ is given by \begin{equation} \label{eq:fByCharacteristicsContraction} f_i(t,x,v) = e^{3\lambda t} f_0 ((X_i,V_i)(0,t,x,v)), \end{equation} and $f_i \in W^{1,\infty}((0,T)\times\mathbb{R}^3\times\mathbb{R}^3)$. We estimate \begin{equation} \label{eq:difff1f2} |f_1(t,x,v) - f_2(t,x,v)| \leq e^{3\lambda t} \| \nabla f_0 \|_{L^\infty(\mathbb{R}^3\times\mathbb{R}^3)} |(X_1,V_1)(0,t,x,v) - (X_2,V_2)(0,t,x,v)|. \end{equation} Furthermore, writing $X_i(s)$ instead of $X_i(s,t,x,v)$ and similar for $V_i$, we have for all $0 \leq s \leq t$ \begin{align} &|(X_1,V_1)(s) - (X_2,V_2)(s)| \\ &\leq \int_s^t |\left(V_1(\tau) - V_2(\tau), \lambda \left(u_1(\tau,X_1(\tau))- u_2(\tau,X_2(\tau)) - V_1(\tau) + V_2(\tau) \right)\right)| \, \mathrm{d} \tau \\ & \leq \int_s^t |V_1(\tau) - V_2(\tau)| + \|\nabla u_1(\tau,\cdot)\|_{L^\infty(\mathbb{R}^3)} |X_1(\tau) - X_2(\tau)| + \| u_1(\tau,\cdot) - u_2(\tau,\cdot)\|_{L^\infty(\mathbb{R}^3)} \, \mathrm{d} \tau \\ & \leq C(X,Q_1,E_1) V \int_s^t |(X_1,V_1)(\tau) - (X_2,V_2)(\tau)| \, \mathrm{d} \tau + C(Q_1,E_1) (t-s) \|g_1 - g_2 \|_{L^\infty(\mathbb{R}^3)}, \end{align} where we used Lemma \ref{lem:WellPosednessFluid}. Gronwall's inequality implies \[ |(X_1,V_1)(t) - (X_2,V_2)(t)| \leq C(Q_1,E_1) t \|g_1 - g_2 \|_{L^\infty(\mathbb{R}^3)} \exp\left(C(Q_1,E_1) \|g_1\|_{L^\infty(\mathbb{R}^3)}t \right). \] Inserting this in \eqref{eq:difff1f2} yields \begin{equation} \label{eq:contraction} \begin{aligned} &\|f_1 - f_2\|_{L^\infty((0,T) \times \mathbb{R}^3 \times \mathbb{R}^3)} \\ &\leq T e^{3T } C(Q_1,E_1) \| \nabla f_0 \|_{L^\infty(\mathbb{R}^3)} \|g_1 - g_2 \|_{L^\infty(\mathbb{R}^3)} \exp\left(C(Q_1,E_1) T \|g_1\|_{L^\infty(\mathbb{R}^3)} \right) \end{aligned} \end{equation} For $L>0$, consider $B_L(0) \subset Y$. Then, for all $L$, equation \eqref{eq:contraction} implies that there exists $T>0$ such that the mapping $g \mapsto f$ is contractive. We have to check that $g \in B_L(0)$ implies $f \in B_L(0)$. First, \begin{equation} \|f(t,\cdot,\cdot)\|_{L^1(\mathbb{R}^3)} = \|f_0\|_{L^1(\mathbb{R}^3)} \label{eq:massConservationContraction} \end{equation} follows from the equation. Moreover, for any $L > \|f_0\|_{L^\infty( \mathbb{R}^3 \times \mathbb{R}^3)}$, equation \eqref{eq:fByCharacteristicsContraction} implies that we can choose $T$ sufficiently small such that \begin{equation} \label{eq:Linftyf} \|f\|_{L^\infty((0,T) \times \mathbb{R}^3 \times \mathbb{R}^3)} = \|f_0\|_{L^\infty(\mathbb{R}^3 \times \mathbb{R}^3)} e^{3 \lambda T} \leq L. \end{equation} Furthermore, we have \begin{align} \partial_t \int_{\mathbb{R}^3} \int_{\mathbb{R}^3} |v|^2 f \, \mathrm{d} x \, \mathrm{d} v & = 2 \int_{\mathbb{R}^3} \int_{\mathbb{R}^3} v \cdot (g + u - v) f \, \mathrm{d} x \, \mathrm{d} v \\ & \leq 2(|g| + \|u\|_{L^\infty(\mathbb{R}^3)}) \int_{\mathbb{R}^3} \int_{\mathbb{R}^3} (1 + |v|^2) f \, \mathrm{d} x \, \mathrm{d} v. \end{align} Hence, using mass conservation, equation \eqref{eq:massConservationContraction}, \[ \partial_t \int_{\mathbb{R}^3\times\mathbb{R}^3} \hspace{-1em} (1 + |v|^2) f \, \mathrm{d} x \, \mathrm{d} v \leq (|g|+\|u\|_{L^\infty(\mathbb{R}^3)}) \int_{\mathbb{R}^3\times\mathbb{R}^3} \hspace{-1em}(1 + |v|^2) f \, \mathrm{d} x \, \mathrm{d} v. \] Therefore, Lemma \ref{lem:WellPosednessFluid} and Gronwall's inequality imply \begin{equation} \int_{\mathbb{R}^3\times\mathbb{R}^3} \hspace{-1em} (1 + |v|^2) f \, \mathrm{d} x \, \mathrm{d} v \leq \int_{\mathbb{R}^3\times\mathbb{R}^3} \hspace{-1em} (1 + |v|^2) f_0 \, \mathrm{d} v \, \mathrm{d} x \exp (C(Q_1,E_1)L t). \end{equation} Thus, for any $E_1 > \int_{\mathbb{R}^3\times\mathbb{R}^3} (1 + |v|^2) f_0 \, \mathrm{d} v \, \mathrm{d} x$, we can choose $T$ small enough such that $ \int_{\mathbb{R}^3\times\mathbb{R}^3} (1 + |v|^2) f \, \mathrm{d} x \, \mathrm{d} v \leq E_1$ for all $t \leq T$. Finally, we need to control the support of $f$. To do this, we follow the same argument as in the last part of the proof of Lemma \ref{lem:aPrioriWellPosedness} to get \[ Q(t) \leq Q_0 + (1+t) \int_0^t C(L,E_1,Q_1) \, \mathrm{d} s \leq Q_0 + (1+t) t C(L,E_1,Q_1). \] Again, for any $Q_1 > Q_0$, we can choose $T$ small enough such that $ Q(t) \leq Q_1$ for all $t \leq T$. Therefore, by the Banach fixed point theorem, we get local in time existence of solutions to \eqref{eq:Vlasov Stokes}. Global existence follows directly from the a priori estimates in Lemma \ref{lem:aPrioriWellPosedness}, since these ensure that all the relevant quantities for the fixed point argument do not blow up in finite time. Since $f \in W^{1,\infty}((0,T)\times\mathbb{R}^3\times\mathbb{R}^3)$ with uniform compact support, higher regularity of $u$ follows from taking derivatives in the Brinkman equations in \eqref{eq:Vlasov Stokes} and using regularity theory for Stokes equations similar as in the proof of Lemma \ref{lem:WellPosednessFluid}. \end{proof} \section{Uniform estimates on $\bm{\rho_\lambda}$ and $\bm{u_\lambda}$} In the following, we assume that $(f,u)$ is the solution to the Vlasov-Stokes equations \eqref{eq:Vlasov Stokes} for some $\lambda > 0$ and some compactly supported initial datum $f_0 \in W^{1,\infty}(\mathbb{R}^3\times\mathbb{R}^3)$. In this section we want to derive a priori estimates for these solutions that do not depend on $\lambda$. This is why we cannot use the a priori estimates derived in Lemma \ref{lem:aPrioriWellPosedness}. However, the drawback of the estimates that we prove in this section is that they allow for blow-up in finite time. This is also why they are not suitable in the proof of global well-posedness, that we showed in the previous section. Later, we will use the limit equation in order to show that the estimates derived here allow for uniform estimates for arbitrary times. Again, we denote by $C$ any constant, which only depends on $f_0$ and may change from line to line. \subsection{Estimates for the fluid velocity} In this subsection we show that the fluid velocity as well as the particle velocity is controlled by $\|\rho\|_{L^\infty(\mathbb{R}^3)}$, uniformly in $\lambda$, which means that high velocities can only occur if particles concentrate in position space. This also implies control on the particle positions and velocities The proof is based on the energy identity from Lemma \ref{lem:aPrioriWellPosedness}, equation \eqref{eq:kineticEnergyParticles}, and the subsequent estimate \eqref{eq:kineticEnergyParticlesEstimate}. The idea is to estimate the sum of the quadratic terms in that expression, which have a negative sign, by $E(t)$ from below. The following Lemma, which is a general observation on weighted $L^2$-spaces, shows why such an estimate is true if $\|\rho\|_{L^{3/2}(\mathbb{R}^3)}$ is not too large. Having shown this estimate, the quadratic terms in \eqref{eq:kineticEnergyParticlesEstimate} dominate the linear term, which has been estimated by $E(t)^{1/2}$. This leads to control of $E$ uniformly in $\lambda$. \begin{lemma} \label{lem:frictionCoercive} There exists a constant $c_0$, such that for all nonnegative $\sigma \in L^{3/2}(\mathbb{R}^3)$, $h \in L^2(\sigma)$ and $w \in H^1(\mathbb{R}^3)$, \[ \| \nabla w \|_{L^2(\mathbb{R}^3)}^2 + \| w - h \|_{L^2_\sigma(\mathbb{R}^3)}^2 \geq c_0 \min \{ \|\sigma\|_{L^{3/2}(\mathbb{R}^3)}^{-1}, 1\} \|h\|_{L^2_\sigma(\mathbb{R}^3)}^2. \] \end{lemma} \begin{proof} We estimate using the critical Sobolev inequality \begin{equation} \label{eq:frictionCoerciveSobolev} \| w \|^2_{L^2_\sigma(\mathbb{R}^3)} \leq \|w\|_{L^6(\mathbb{R}^3)}^2 \|\sigma\|_{L^{3/2}(\mathbb{R}^3)} \leq C \| \nabla w \|_{L^2(\mathbb{R}^3)}^2 \|\sigma\|_{L^{3/2}(\mathbb{R}^3)}. \end{equation} We have for any $ \theta > 0 $ and any $a,b \in H$ for some Hilbert space $H$ \[ \|a - b\|^2 = \| a \|^2 + \|b\|^2 - 2(a,b) \geq (1 - \theta) \|a\|^2 + (1- \frac{1}{\theta}) \|b\|^2. \] Applying this with $1 - \theta := - C^{-1} \|\sigma \|_{L^{3/2}(\mathbb{R}^3)}^{-1}$, where $C$ is the constant from equation \eqref{eq:frictionCoerciveSobolev}, we find \[ \| \nabla w \|_{L^2(\mathbb{R}^3)}^2 + \| w - h \|_{L^2_\sigma(\mathbb{R}^3)}^2 \geq \frac{\theta - 1}{\theta} \|h\|_{L^2_\sigma(\mathbb{R}^3)}^2. \] To conclude, we notice that \[ \frac{\theta - 1}{\theta} = \frac{C^{-1} \|\sigma \|_{L^{3/2}(\mathbb{R}^3)}^{-1}}{1+C^{-1} \|\sigma \|_{L^{3/2}(\mathbb{R}^3)}^{-1}} \geq c_0 \min \{ \|\sigma\|_{L^{3/2}(\mathbb{R}^3)}^{-1}, 1\}. \qedhere \] \end{proof} \begin{lemma} \label{lem:boundsU} There exists a constant $C$ that depends only on $f_0$ such that for all $\lambda >0$ and all $t > 0$, we have \begin{align} \label{eq:energyBound} E(t) & \leq C\sup_{s\leq t}\|\rho\|_{L^{\infty}(\mathbb{R}^3)}^{\frac{2}{3}}, \\ \label{eq:uInfty} \| u(t,\cdot)\|_{L^{\infty}(\mathbb{R}^3)} &\leq C \sup_{s\leq t} \|\rho\|_{L^\infty(\mathbb{R}^3)}, \\ \label{eq:nablaU} \|\nabla u(t,\cdot)\|_{L^{\infty}(\mathbb{R}^3)} &\leq C \sup_{s\leq t} \|\rho\|_{L^\infty(\mathbb{R}^3)}^{2}, \\ \label{eq:VBarInfty} \| \bar{V}(t,\cdot)\|_{L^{\infty}(\mathbb{R}^3)} &\leq C \sup_{s\leq t} \|\rho\|_{L^\infty(\mathbb{R}^3)}, \end{align} where $\bar{V}$ is the average particle velocity defined in \eqref{eq:defJVbar}. Moreover, for all $(x,v) \in \operatorname{supp} f_0$, \begin{align} \label{eq:boundOnV} |V(t,0,x,v)| &\leq C \sup_{s\leq t} \|\rho\|_{L^\infty(\mathbb{R}^3)}, \\ |X(t,0,x,v)| &\leq C t \sup_{s\leq t} \|\rho\|_{L^\infty(\mathbb{R}^3)}. \label{eq:boundOnX} \end{align} \end{lemma} \begin{proof} By the energy estimate \eqref{eq:kineticEnergyParticlesEstimate} from Lemma \ref{lem:aPrioriWellPosedness} and Lemma \ref{lem:frictionCoercive}, we have for the energy of the particles \begin{align} \partial_t E & \leq 2 \lambda \left( C E^{\frac{1}{2}} - \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} (v - \bar{V})^2 f \, \mathrm{d} x \, \mathrm{d} v - \|u-\bar{V}\|_{L^2_\rho(\mathbb{R}^3)}^2 - \|\nabla u \|_{L^2(\mathbb{R}^3)}) \right) \\ & \leq 2 \lambda \left( C E^{\frac{1}{2}} - \int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} (v - \bar{V})^2 f \, \mathrm{d} x \, \mathrm{d} v - c_0 \min\{ \|\rho\|_{L^{3/2}(\mathbb{R}^3)}^{-1},1\} \|\bar{V}\|_{L^2_\rho(\mathbb{R}^3)}^2 \right)\\ & \leq 2 \lambda \left(C E^{\frac{1}{2}} - c_0 \min\{ \|\rho\|_{L^{3/2}(\mathbb{R}^3)}^{-1},1\} E \right). \end{align} A comparison principle for ODEs implies \begin{equation} \label{eq:energyEstimateRho} E^{\frac{1}{2}}(t) \leq E(0)^{\frac{1}{2}} e^{-2\lambda t} + \frac{C}{c_0} \sup_{s\leq t} \max \{\|\rho\|_{L^{3/2}(\mathbb{R}^3)},1 \} \leq C \sup_{s\leq t}\|\rho\|_{L^{3/2}(\mathbb{R}^3)} \leq C\sup_{s\leq t}\|\rho\|_{L^{\infty}(\mathbb{R}^3)}^{\frac{1}{3}}, \end{equation} where we used that the $L^1$-norm of $\rho$ is constant in time by \eqref{eq:massConservation}. Note that here and in the following we also use that $C$ might depend on $f_0$ in order to get rid of lower order terms (using that if $f_0 = 0$, the solution $f$ is also trivial). This proves \eqref{eq:energyBound}. Recall from \eqref{eq:energyEstimateFluid} that $\|\bar V\|_{L^2_\rho(\mathbb{R}^3)} \leq E^{\frac{1}{2}}$. Thus, \eqref{eq:energyEstimateRho} yields \begin{equation} \label{eq:barVRho} \|\bar V(t)\|_{L^2_\rho(\mathbb{R}^3)} \leq E^{\frac{1}{2}}(t) \leq C \sup_{s\leq t}\|\rho\|_{L^{\infty}(\mathbb{R}^3)}^{\frac{1}{3}}. \end{equation} Using regularity theory for the Stokes equations (see \cite{Ga11}) together with \eqref{eq:energyEstimateFluid} and \eqref{eq:barVRho} yields \begin{align} \|\nabla^2 u \|_{L^2(\mathbb{R}^3)} &\leq C\|\rho u \|_{L^2(\mathbb{R}^3)} + C\|\rho \bar V\|_{L^2(\mathbb{R}^3)} \leq C\|u\|_{L^6(\mathbb{R}^3)} \|\rho\|_{L^3(\mathbb{R}^3)} + C\|\rho \bar V\|_{L^2(\mathbb{R}^3)} \\ &\leq C \|\bar V\|_{L^2_\rho(\mathbb{R}^3)} \|\rho\|_{L^{\infty}(\mathbb{R}^3)}^{\frac{2}{3}} + \|\bar V\|_{L^2_\rho(\mathbb{R}^3)} \|\rho\|_{L^{\infty}(\mathbb{R}^3)}^{\frac{1}{2}} \\ &\leq C \|\bar V\|_{L^2_\rho(\mathbb{R}^3)} \|\rho\|_{L^{\infty}(\mathbb{R}^3)}^{\frac{2}{3}} \leq C \sup_{s\leq t} \|\rho\|_{L^\infty(\mathbb{R}^3)}. \end{align} Sobolev inequality and \eqref{eq:energyEstimateFluid} yield \[ \|u\|_{L^\infty(\mathbb{R}^3)} \leq \|u\|_{C^{0,\frac{1}{2}}(\mathbb{R}^3)} \leq C \|u\|_{W^{1,6}(\mathbb{R}^3)} \leq C \|\nabla u \|_{W^{1,2}(\mathbb{R}^3)} \leq C \sup_{s\leq t} \|\rho\|_{L^\infty(\mathbb{R}^3)}. \] This proves \eqref{eq:uInfty}. Using the characteristic equations \eqref{eq:characteristics}, we find for all $(x,v) \in \operatorname{supp} f_0$ \begin{equation} |V(t,0,x,v)| \leq e^{-\lambda t} |v| + |g| + \sup_{s\leq t} \|u(t)\|_{L^\infty(\mathbb{R}^3)} \leq C \sup_{s\leq t} \|\rho\|_{L^\infty(\mathbb{R}^3)}, \end{equation} with a constant $C$ that depends on the support of $f_0$. This proves \eqref{eq:boundOnV}. Moreover, using the equation for $X$, \eqref{eq:boundOnV} implies \eqref{eq:boundOnX}. Furthermore, by \eqref{eq:boundOnV} \[ \|\bar{V}(t)\|_{L^\infty(\mathbb{R}^3)} \leq C \sup_{s\leq t} \|\rho\|_{L^\infty(\mathbb{R}^3)}, \] which proves \eqref{eq:VBarInfty}. This can be used again to derive a bound for $\nabla^2 u$ in $L^p(\mathbb{R}^3)$ to get \eqref{eq:nablaU}. More precisely, \begin{align} \|\nabla^2 u \|_{L^6(\mathbb{R}^3)} &\leq \|u\|_{L^6(\mathbb{R}^3)} \|\rho\|_{L^\infty(\mathbb{R}^3)} + \|\rho \bar V\|_{L^6(\mathbb{R}^3)} \\ &\leq C \|\bar V\|_{L^2_\rho(\mathbb{R}^3)} \|\rho\|_{L^{\infty}(\mathbb{R}^3)} + \|\bar V\|^{\frac{1}{3}}_{L^2_\rho(\mathbb{R}^3)} \|\bar V\|_{L^\infty(\mathbb{R}^3)}^{\frac{2}{3}} \|\rho\|_{L^\infty}^{\frac{5}{6}} \leq C \sup_{s\leq t} \|\rho\|_{L^\infty(\mathbb{R}^3)}^2. \end{align} Thus, \[ \|\nabla u\|_{L^\infty(\mathbb{R}^3)} \leq C \sup_{s\leq t} \|\rho\|_{L^\infty(\mathbb{R}^3)}^2. \qedhere \] \end{proof} \subsection{Estimates for the particle density} In this subsection we prove estimates on $\rho$ that are uniform in $\lambda$ for sufficiently $\lambda$ sufficiently large but depend on $u$. Then, we will combine these estimates with the ones from Lemma \ref{lem:boundsU} in order to get estimates on $\rho$ independent of $\lambda$ and $u$ but only for small times. We first prove a small lemma on estimates for ODEs that will be used several times analyzing the characteristics. \begin{lemma} \label{lem:ODEEstimates} Let $T>0$ and $a,b:[0,T] \to \mathbb{R}_+$ be Lipschitz continuous. Let $\alpha \colon [0,T] \to \mathbb{R}_+$ be continuous and $\lambda \geq 4 \max\{ 1,\|\alpha\|_{L^\infty(0,T)} \}$. Let $\beta \geq 0$ be some constant and assume that on $(0,T)$ \begin{align} |\dot{a}| &\leq b, \\ \dot{b} &\leq \lambda(\alpha a - b) + \beta e^{-\lambda s}. \end{align} \begin{enumerate}[label=(\roman*)] \item \label{it:ODEEstimates1} If $a(T) = 0$, then for all $s,t \in [0,T]$ with $s \leq t$ \begin{align} a(t) &\leq \frac{2}{\lambda} b(t) + \frac{4}{\lambda^2} \beta e^{-\lambda t}, \label{eq:ODEEstimates1a} \\ b(t) & \leq \exp \left(\int_s^t -\lambda + 2 \alpha(\tau) \, \mathrm{d} \tau \right) \left( b(s) + \frac{2 \beta}{\lambda} e^{-\lambda s} \right). \label{eq:ODEEstimates1b} \end{align} \item \label{it:ODEEstimates2} If $\beta = 0$ and $b(0) = 0$, then for all $t \in [0,T]$ \[ b(t) \leq 2 \|\alpha\|_{L^\infty(0,T)} a. \] \end{enumerate} \end{lemma} \begin{proof} We define \[ z(s) := b(s) - \frac{\lambda}{2} a(s) + \frac{2}{\lambda} \beta e^{-\lambda s}. \] Then, if $a(T) = 0$, \[ z(T) = b(T) + \frac{2}{\lambda} \beta e^{-\lambda T} \geq 0, \] and \[ \dot{z} \leq \lambda \left( \alpha a - \frac{b}{2} \right) - \beta e^{- \lambda s} = \lambda \left( \alpha a - \frac{\lambda}{4} a - \frac{z}{2} + \frac{\beta}{\lambda} e^{- \lambda s} \right) + \beta e^{- \lambda s} \leq - \frac{\lambda}{2} z. \] Hence, (applying Gronwall's inequality to $-z(T-t)$) we find $z \geq 0$ in $[0,T]$. This proves \eqref{eq:ODEEstimates1a}. Moreover, \eqref{eq:ODEEstimates1a} implies \[ \dot{b} \leq (2 \alpha - \lambda) b + \left( 1+ \frac{4}{\lambda} \right) \beta e^{-\lambda s} \leq (2 \alpha - \lambda) b + 2 \beta e^{-\lambda s}. \] Thus, using the comparison principle for ODEs yields \eqref{eq:ODEEstimates1b}. In order to prove \ref{it:ODEEstimates2}, we define $z := 2 \|\alpha\|_{L^\infty(0,T)} a - b$. Then, $b(0) = 0$ implies $z(0) \geq 0$. Using the equations for $a$ and $b$, one obtains $\dot{z} \geq - (\lambda/2) z$ as in the proof of part (i). This implies $z \geq 0$ in $[0,T]$, and the assertion follows. \end{proof} Using the previous Lemma, we are able to prove that the particle velocities concentrate in regions of size $e^{-\lambda t}$ with an error due to fluctuations of the fluid velocity. Based on this result and equation \eqref{eq:fByCharacteristics}, we also prove an estimate for $\rho$. \begin{lemma} \label{lem:biLipschitzV} Let $T > 0$ and assume $\lambda \geq 4(1+ \|\nabla u\|_{L^\infty((0,T) \times \mathbb{R}^3)})$. Then, for all $t < T$ and all $x \in \mathbb{R}^3$, the map \[ v \mapsto V(0,t,x,v) \] is bi-Lipschitz. In particular its inverse $W(t,x,w)$ is well defined, and \begin{equation} \label{eq:rhoByW} \rho(t,x) = \int_{\mathbb{R}^3} e^{ 3 \lambda t} f_0(X(0,t,x,W(t,x,w)),w) \det \nabla_w W(t,x,w) \, \mathrm{d} w. \end{equation} Moreover, denoting \begin{equation} \label{eq:defM} M(t) := \exp \left(\int_0^t 2 \|\nabla u(s,\cdot) \|_{L^\infty(\mathbb{R}^3)} \, \mathrm{d} s \right), \end{equation} we have \begin{align} |\nabla_v V(0,t,x,v)| &\leq M(t) e^{\lambda t}, \label{eq:LipschitzV} \\ |\nabla_w W(t,x,w)| &\leq M(t) e^{-\lambda t}, \label{eq:LipschitzW} \\ 0 \leq \det \nabla_w W(t,x,w) &\leq M(t)^3 e^{-3\lambda t}. \label{eq:JacobianW} \end{align} Furthermore, \begin{equation} \label{eq:estimateRho} \| \rho(t,\cdot)\|_{L^\infty(\mathbb{R}^3)} \leq C_0 M(t)^3, \end{equation} where the constant depends only on $f_0$. \end{lemma} \begin{proof} We fix $t$, $x$, $v_1$, and $v_2$ and define \begin{align} a(s) &= | X(s,t,x,v_1) - X(s,t,x,v_2) |,\\ b(s) &= | V(s,t,x,v_1) - V(s,t,x,v_2)|. \end{align} Then, \begin{align} |\dot{a}| & \leq b, \qquad &&a(t) = 0, \\ \dot{b} &\leq \lambda ( \|\nabla u(s,\cdot) \|_{L^\infty(\mathbb{R}^3)} a - b), \qquad &&b(t) = |v_1 - v_2|. \end{align} Then, with $\alpha(s) := \|\nabla u(s,\cdot) \|_{L^\infty(\mathbb{R}^3)}$ and $\beta = 0$, we can apply Lemma \ref{lem:ODEEstimates}\ref{it:ODEEstimates1} to deduce \[ b(t) \leq b(0) M(t) e^{-\lambda t}, \] which implies \begin{equation} \label{eq:LipschitzInverse} b(0) \geq M(t)^{-1} e^{\lambda t} |v_1 - v_2|. \end{equation} Note that the first inequality in \eqref{eq:ODEEstimates1a} also implies \[ a(t) \leq \frac{2}{\lambda} b(t). \] Hence, \[ \dot{b} \geq\lambda ( - \|\nabla u(s,\cdot) \|_{L^\infty(\mathbb{R}^3)} a - b) \geq \left( - \lambda - 2\|\nabla u(s,\cdot) \|_{L^\infty(\mathbb{R}^3)} \right) b. \] Thus \begin{equation} \label{eq:Lipschitz} b(0) \leq e^{\lambda t} M(t) |v_1 - v_2|. \end{equation} Estimates \eqref{eq:LipschitzInverse} and \eqref{eq:Lipschitz} imply that the map $v \mapsto V(0,t,x,v)$ is bi-Lipschitz and yield the bounds \eqref{eq:LipschitzV}, \eqref{eq:LipschitzW}, and \eqref{eq:JacobianW}. The Jacobian of $W$ is positive since $W(0,x,v) = w$ and the Jacobian is continuous in $t$, which follows from the definition of $V$ and regularity of $u$ proven in Proposition \ref{pro:WellPosedness}. Moreover, recalling \eqref{eq:fByCharacteristics}, these estimates imply \begin{align} \rho(t,x) = \int_{\mathbb{R}^3} f(t,x,v) \, \mathrm{d} v& = \int_{\mathbb{R}^3} e^{ 3 \lambda t} f_0(X(0,t,x,v),V(0,t,x,v)) \, \mathrm{d} v \\ &= \int_{\mathbb{R}^3} e^{ 3 \lambda t} f_0(X(0,t,x,W(t,x,w)),w) \det \nabla_w W(t,x,w) \, \mathrm{d} w \\ &\leq \int_{\mathbb{R}^3} M(t)^3 f_0(X(0,t,x,W(t,x,w)),w) \, \mathrm{d} w \\ & \leq C_0 M(t)^3, \end{align} which finishes the proof. \end{proof} We define \begin{equation} \label{eq:defT_ast} T_\ast := \sup \Big\{ t \geq 0 \colon \limsup_{\lambda \to \infty} \|\rho_\lambda\|_{L^\infty((0,t)\times\mathbb{R}^3)} < \infty \Big\} \end{equation} In the lemma below, we prove that $T_\ast > 0$. Later we will show the convergence to the limit equation \eqref{eq:limitEquation} first only up to times $ T < T_\ast$ and finally, we will show $T_\ast = \infty$ using the convergence result for times $T < T_\ast$. \begin{lemma} \label{lem:T_astPositive} Let $T_\ast$ be defined as in \eqref{eq:defT_ast}. Then, \[ T_\ast >0. \] \end{lemma} \begin{proof} By Lemma \ref{lem:boundsU}, we have for all $t > 0$ \[ \|\nabla u_\lambda \|_{L^\infty((0,t) \times \mathbb{R}^3)} \leq C \sup_{s \leq t} \|\rho_\lambda(s,\cdot)\|^2_{L^\infty(\mathbb{R}^3)}. \] Moreover, by Lemma \ref{lem:biLipschitzV}, if $\lambda \geq 4 (\|\nabla u_\lambda \|_{L^\infty(0,t) \times \mathbb{R}^3)} + 1)$, then \[ \sup_{s \leq t} \|\rho_\lambda(s,\cdot)\|^2_{L^\infty(\mathbb{R}^3)} \leq C_0 \exp\left(2 \int_0^t \|\nabla u_\lambda (s,\cdot)\|_{L^\infty(\mathbb{R}^3)} \, \mathrm{d} s \right). \] Combining these two estimates, we see that $\lambda \geq C \sup_{s \leq t} \|\rho_\lambda(s,\cdot)\|^2_{L^\infty(\mathbb{R}^3)}$ implies \begin{equation} \label{eq:growRho} \sup_{s \leq t} \|\rho_\lambda(s,\cdot)\|_{L^\infty(\mathbb{R}^3)} \leq C_0 \exp\left(C t \sup_{s \leq t} \|\rho_\lambda(s,\cdot)\|^2_{L^\infty(\mathbb{R}^3)}\right). \end{equation} We define \[ T_\lambda := \sup \{t \geq 0 \colon \sup_{s \leq t} \|\rho_\lambda(s,\cdot)\|_{L^\infty(\mathbb{R}^3)} \leq 2 C_0\}. \] Then, $T_\lambda > 0$ as $\rho_\lambda$ is continuous (and $C_0$ is chosen such that $\|\rho(0,\cdot)\|_{L^\infty(\mathbb{R}^3)} \leq C_0$). Moreover, \eqref{eq:growRho} implies for all $\lambda \geq 4 (C C_0^2+1)$ and all $t < T_\lambda$ \[ \sup_{s \leq t} \|\rho_\lambda(s,\cdot)\|_{L^\infty(\mathbb{R}^3)} \leq C_0 \exp( C C_0^2 t). \] As $\rho_\lambda$ is continuous, this yields for all $\lambda \geq 4 (C C_0^2+1)$ \[ T_\lambda \geq \frac{\log(2)}{C C_0^2}, \] which is independent of $\lambda$. Thus, \[ T_\ast \geq \inf_{\lambda \geq 4 (C C_0^2+1)} T_\lambda > 0. \qedhere \] \end{proof} \subsection{Higher order estimates} In this subsection, we prove estimates on $\partial_t \rho$ and $\nabla \rho$ which are uniform in $\lambda$ for times $T < T_\ast$. On the one hand, this yields compactness of $\rho_\lambda$ in H\"older spaces. On the other hand, we will also need these estimates in order to prove that the functions $\tilde{u}_\lambda$ defined in \eqref{eq:uTilde} are close to $u_\lambda$ for large values of $\lambda$. From now on, any constant $C$ might depend on $T$ but not on $\lambda$. In particular, for $T<T_\ast$, $C$ might depend on $\limsup_{\lambda \to \infty} \|\rho_\lambda\|_{L^\infty((0,T)\times\mathbb{R}^3)}$. \begin{lemma} \label{lem:APriori} Let $T < T_\ast$. Then, there exist $\lambda_0$ and $C$ depending on $T$ and $f_0$ such that for all $\lambda \geq \lambda_0$ and all multiindices $\beta \in \mathbb{N}^3$, \begin{align} \|\rho\|_{W^{1,\infty}((0,T)\times\mathbb{R}^3)} &\leq C, \label{eq:rhoLipschitz}\\ \|u\|_{L^\infty((0,T_0);W^{2,\infty}(\mathbb{R}^3))} &\leq C, \label{eq:nabla^2U} \\ \|\bar V\|_{L^\infty((0,T_0)\times\mathbb{R}^3)} &\leq C, \label{eq:VBarL^Infty}\\ \Big\| \nabla_x \int_{\mathbb{R}^3} v^\beta f \, \mathrm{d} v \Big\|_{L^\infty((0,T_0)\times\mathbb{R}^3)} & \leq C. \label{eq:nablaRho}\\ \end{align} Moreover, the support of $f$ is uniformly bounded in $\lambda$ up to time $T$. \end{lemma} \begin{proof} By definition of $T_\ast$, there is some $\lambda_0$ such that for all $\lambda \geq \lambda_0$ \begin{align} \|\rho\|_{L^\infty((0,T)\times\mathbb{R}^3)} &\leq C \label{eq:rhoInfty} . \end{align} Thus, Lemma \ref{lem:boundsU} yields \eqref{eq:VBarL^Infty} and \begin{align} \|u\|_{L^\infty((0,T_0);W^{1,\infty}(\mathbb{R}^3))} &\leq C. \label{eq:uInftyT_ast} \end{align} Using this, we have $M(t) \leq C$ for all $t \leq T$, where $M$ is the quantity from \eqref{eq:defM} in Lemma \ref{lem:biLipschitzV}. Moreover, we can assume that $\lambda_0$ has been chosen sucht that for all $\lambda \geq \lambda_0$ \begin{equation} \label{eq:lambdaLarge} \lambda \geq 4 (1+\|\nabla u \|_{L^\infty((0,T)\times\mathbb{R}^3)}). \end{equation} In the following, we only consider $\lambda \geq \lambda_0$. By Lemma \ref{lem:biLipschitzV}, $V(0,t,x,v)$ is invertible with inverse $W(t,x,v)$, and we define \begin{equation} \label{eq:defY} \begin{aligned} Y(s,t,x,w)&:= X(s,t,x,W(t,x,w)), \\ U(s,t,x,w) &:= V(s,t,x,W(t,x,w)). \end{aligned} \end{equation} Then, \begin{equation} \begin{aligned} \partial_s Y &= U, \qquad &&Y(t,t,x,w) = x, \\ \partial_s U &= \lambda (g + u(Y,s) - U), \qquad &&U(0,t,x,w) = w, \qquad U(t,t,x,w) = W(t,x,w). \end{aligned} \end{equation} Note that by \eqref{eq:rhoByW} \[ \int_{\mathbb{R}^3} f(t,x,v) \, \mathrm{d} v = e^{3 \lambda t} \int_{\mathbb{R}^3} f_0 (Y,w) \det \nabla_w W \, \mathrm{d} w. \] We compute \[ \partial_{x_i} \det \nabla_w W = \operatorname{tr} ( \operatorname{adj} \nabla_w W \nabla_w \partial_{x_i} W) = \det \nabla_w W \operatorname{tr} ( (\nabla_w W)^{-1} \nabla_w \partial_{x_i} W). \] Thus, for any multiindex $\beta$, \begin{equation} \label{eq:derivativeThreeTermes} \begin{aligned} \partial_{x_i} \int_{\mathbb{R}^3} v^\beta f \, \mathrm{d} v &= e^{3 \lambda t} \int_{\mathbb{R}^3} \partial_{x_i} (W^\beta) f_0 (Y,w) \det \nabla_w W \, \mathrm{d} w \\ {} & + e^{3 \lambda t} \int_{\mathbb{R}^3} W^\beta \nabla_x f_0 (Y,w) \cdot \partial_{x_i} Y \det \nabla_w W \, \mathrm{d} w \\ {} & + e^{3 \lambda t} \int_{\mathbb{R}^3} W^\beta f_0 (Y,w) \det \nabla_w W \operatorname{tr} ( (\nabla_w W)^{-1} \nabla_w \partial_{x_i} W) \, \mathrm{d} w \\ & =: A_1 + A_2 + A_3. \end{aligned} \end{equation} We notice that \[ W(t,x,w) = V(t,0,X(0,t,x,W(t,x,w)),V(0,t,x,W(t,x,w))) = V(t,0,Y(0,t,x,w),w). \] Hence, for all $(Y(0,t,x,w),w) \in \operatorname{supp} f_0$, estimate \eqref{eq:boundOnV} implies \begin{equation} \label{eq:boundOnW} |W(t,x,w)| \leq C. \end{equation} Integrating the equation for $U$ yields (analogously to \eqref{eq:integrateCharacteristics}) \[ Y(s,t,x,w) = x - \int_s^t g + u(\tau,Y) \, \mathrm{d} \tau + \lambda^{-1} ( U(t,t,x,w) - U(s,t,x,w)). \] Therefore, \begin{equation} \label{eq:nabla_xYGronwall} \nabla_x Y(s,t,x,w) = \operatorname{Id} - \int_s^t \nabla u(\tau,Y) \nabla_x Y \, \mathrm{d} \tau + \lambda^{-1} ( \nabla_xU(t,t,x,w) - \nabla_x U(s,t,x,w)). \end{equation} We claim that \begin{equation} \label{eq:velocitiesControlledByPositions} |\nabla_x U(s,t,x,w)| \leq 2 \|\nabla u\|_{L^\infty((0,T_0) \times \mathbb{R}^3)} | \nabla_x Y(s,t,x,w)|. \end{equation} Indeed, with \begin{align} a(s) &:= | \nabla_x Y(s,t,x,w)|, \\ b(s) &:=|\nabla_x \partial_s Y(s,t,x,w)|, \\ \alpha(s) &:= \|\nabla u(s,\cdot)\|_{L^\infty(\mathbb{R}^3)}, \end{align} this follows from Lemma \ref{lem:ODEEstimates}\ref{it:ODEEstimates2} using \eqref{eq:lambdaLarge}. We use estimate \eqref{eq:velocitiesControlledByPositions} in equation \eqref{eq:nabla_xYGronwall} to get \[ a(s) \leq 1 + \int_s^t \alpha(\tau) a(\tau) \, \mathrm{d} \tau + \frac{2 \| \alpha \|_{L^\infty(0,T)}}{\lambda} (a(t) + a(s)). \] Since $a(t) = 0$ and equation \eqref{eq:lambdaLarge} implies $4 \| \alpha \|_{L^\infty(0,T)} \leq \lambda$, we have \[ a(s) \leq 2 + 2 \int_s^t \alpha(\tau) a(\tau) \, \mathrm{d} \tau. \] Therefore, \eqref{eq:uInftyT_ast} implies for all $0 \leq s \leq t \leq T$ \begin{equation} \label{eq:nablaxY} | \nabla_x Y(s,t,x,w) | = a(s) \leq C. \end{equation} Moreover, by \eqref{eq:velocitiesControlledByPositions}, \eqref{eq:uInftyT_ast}, and \eqref{eq:nablaxY} \begin{equation} \label{eq:nablaxW} | \nabla_x W(t,x,w) | = |\nabla_x U(t,t,x,w)| \leq C. \end{equation} We want to estimate $\nabla_x \det \nabla_w W$. We compute \begin{equation} \label{eq:derivDet} \partial_{x_i} \det \nabla_w W = \operatorname{tr} ( \operatorname{adj} \nabla_w W \nabla_w \partial_{x_i} W) = \det \nabla_w W \operatorname{tr} ( (\nabla_w W)^{-1} \nabla_w \partial_{x_i} W). \end{equation} By \eqref{eq:LipschitzV}, we have \begin{equation} |(\nabla_w W(t,x,w)^{-1}| = |(\nabla_v V(0,t,x,W(0,t,x,w))| \leq C e^{\lambda t}. \end{equation} Thus, using also \eqref{eq:JacobianW}, we find \begin{equation} \label{eq:derivDetEst} |\partial_{x_i} \det \nabla_w W| \leq \det \nabla_w W | (\nabla_w W)^{-1}| | \nabla_w \partial_{x_i} W| \leq C e^{-3 \lambda t} e^{\lambda t} | \nabla_w \partial_{x_i} W|. \end{equation} In order to estimate $| \nabla_w \partial_{x_i} W|$ we further analyze the characteristics $Y$ and $U$ defined in \eqref{eq:defY}. Fix $t$, $x$, and $w$ and denote \begin{align} a(s) &:= |\nabla_w Y(s,t,x,w)|, \\ b(s) &:= |\nabla_w U(s,t,x,w)|, \\ \alpha(s) &:= \|\nabla u(s,\cdot) \|_{L^\infty(\mathbb{R}^3)}. \end{align} Then, the assumptions of Lemma \ref{lem:ODEEstimates}\ref{it:ODEEstimates1} are satisfied with $\beta = 0$. Thus, \[ b(t) \leq \exp \left(\int_0^t -\lambda + 2 \|\nabla u(s,\cdot) \|_{L^\infty(\mathbb{R}^3)} \, \mathrm{d} s \right), \] and \begin{equation} \label{eq:nablawY} |\nabla_w Y(s,t,x,w)| = a(s) \leq \frac{2}{\lambda} b(s) \leq \frac{C}{\lambda} e^{-\lambda s}. \end{equation} Next, we consider the second derivative. We denote \begin{align} a(s) &:= |\nabla_w \nabla_x Y(s,t,x,w)|, \\ b(s) &:= |\nabla_w \nabla_x U(s,t,x,w)|, \\ \alpha(s) &:= \|\nabla u(s,\cdot) \|_{L^\infty(\mathbb{R}^3)}, \\ \beta & := 4 M(T)^3 \|\nabla^2 u \|_{L^\infty([0,T] \times \mathbb{R}^3)}, \end{align} with $M$ as in \eqref{eq:estimateRho}. Then, using the estimates for $|\nabla_x Y|$ and $| \nabla_w Y|$ from \eqref{eq:nablaxY} and \eqref{eq:nablawY} respectively, \begin{align} \dot{a} &\geq -b, \\ \dot{b} & \leq \lambda (\|\nabla^2 u\|_\infty |\nabla_x Y| | \nabla_w Y| + \|\nabla u\|_\infty a - b) \leq \lambda ( \alpha a - b) + \beta e^{-\lambda s} . \end{align} Hence, the assumptions of Lemma \ref{lem:ODEEstimates}\ref{it:ODEEstimates1} are satisfied. Since $b(0) = 0$, Lemma \ref{lem:ODEEstimates}\ref{it:ODEEstimates1} yields \begin{equation} \label{eq:nabla2W} |\nabla_w \nabla_x W(0,t,x,w) | = b(t) \leq C \frac{2 \beta}{\lambda} e^{- \lambda t} \leq \frac{C}{\lambda} e^{-\lambda t} \|\nabla^2 u \|_{L^\infty((0,T) \times \mathbb{R}^3)} . \end{equation} Inserting this in \eqref{eq:derivDetEst}, we find \begin{equation} \label{eq:derivDetFinal} |\partial_{x_i} \det \nabla_w W| \leq \frac{C}{\lambda} e^{-3 \lambda t} \|\nabla^2 u \|_{L^\infty((0,T) \times \mathbb{R}^3)}. \end{equation} We recall the definition of $A_1$, $A_2$, and $A_3$ from equation \eqref{eq:derivativeThreeTermes}. Using \eqref{eq:JacobianW} and \eqref{eq:nablaxW} yields \[ A_1 \leq C(\beta). \] Estimates \eqref{eq:JacobianW}, \eqref{eq:boundOnW}, and \eqref{eq:nablaxY} imply \[ A_2 \leq C(\beta). \] Finally, \eqref{eq:boundOnW} and \eqref{eq:derivDetFinal} yield \[ A_3 \leq \frac{C(\beta)}{\lambda} e^{-3 \lambda t} \|\nabla^2 u \|_{L^\infty((0,T) \times \mathbb{R}^3)}. \] Inserting these bounds on $A_i$ in \eqref{eq:derivativeThreeTermes} we have. \begin{equation} \label{eq:nablaRhoAlmost} \Big\| \nabla_x \int_{\mathbb{R}^3} v^\beta f(t,x,v) \, \mathrm{d} v \Big\|_{L^\infty(\mathbb{R}^3)} \leq C(\beta) \left( 1+ \frac{1}{\lambda} \|\nabla^2 u \|_{L^\infty((0,T) \times \mathbb{R}^3)} \right). \end{equation} Since the support of $f$ in $x$ is controlled by \eqref{eq:boundOnX}, we also have for any $1 \leq p \leq \infty$ \begin{equation} \label{eq:nablaRhoL^p} \Big\| \nabla_x \int_{\mathbb{R}^3} v^\beta f(t,x,v) \, \mathrm{d} v \Big\|_{L^p(\mathbb{R}^3)} \leq C(\beta) (1+T)\left( 1+ \frac{1}{\lambda} \|\nabla^2 u \|_{L^\infty((0,T) \times \mathbb{R}^3)} \right). \end{equation} In order to control $\|\nabla^2 u \|_{L^\infty((0,T)\times\mathbb{R}^3)}$, the Brinkman equations in \eqref{eq:Vlasov Stokes} and regularity theory for the Stokes equations yield \begin{align} \| \nabla^3 u \|_{L^p(\mathbb{R}^3)} & \leq \|\nabla (\rho(u-\bar{V}))\|_{L^p(\mathbb{R}^3)} \\ & \leq \|\rho \|_{L^p(\mathbb{R}^3)} \|\nabla u \|_{L^\infty(\mathbb{R}^3)} + \|\nabla \rho \|_{L^p(\mathbb{R}^3)} \|u \|_{L^\infty(\mathbb{R}^3)} + \|\nabla (\rho \bar{V}))\|_{L^p(\mathbb{R}^3)}. \end{align} Note that both $\nabla \rho$ and $\nabla (\rho \bar{V})$ are of the form of the left hand side in \eqref{eq:nablaRhoL^p}. Therefore, using also Sobolev embedding together with \eqref{eq:uInftyT_ast} and \eqref{eq:rhoInfty} yields \[ \| \nabla^2 u \|_{L^\infty(0,T;C^{2,\alpha})} \leq C \left( 1+ \frac{1}{\lambda} \|\nabla^2 u \|_{L^\infty((0,T) \times \mathbb{R}^3)} \right). \] This implies \eqref{eq:nabla^2U} for $\lambda$ sufficiently large. Inserting \eqref{eq:nabla^2U} in \eqref{eq:nablaRhoAlmost} proves \eqref{eq:nablaRho}. The missing estimate for the time-derivative in \eqref{eq:rhoLipschitz} follows from the Vlasov-Stokes equations \eqref{eq:Vlasov Stokes} and \eqref{eq:nablaRho}. \end{proof} \begin{remark} One might wonder, whether the complicated splitting in \eqref{eq:derivativeThreeTermes} is really needed. Indeed, we also have \begin{align} \partial_{x_i} \int_{\mathbb{R}^3} v^\beta f \, \mathrm{d} v &= \partial_{x_i} e^{3 \lambda t} \int_{\mathbb{R}^3} v^\beta f_0(X,V) \, \mathrm{d} v \\ &= e^{3 \lambda t} \int_{\mathbb{R}^3} v^\beta \nabla_x f_0(X,V) \partial_{x_i} X + \nabla_v f_0(X,V) \partial_{x_i} V \, \mathrm{d} v, \end{align} an expression that involves only two terms and in particular does not involve any second derivatives. However, it turns out, that both $\partial_{x_i} X$ and $\partial_{x_i} V$ blow up as $\lambda \to \infty$. Therefore, estimating both terms individually in the above expression cannot lead to the assertion. \end{remark} \section{Proof of the Convergence result} \subsection{Error estimates for the particle and fluid velocities} Recall the definition of $\tilde{u}_\lambda$ from \eqref{eq:uTilde}, which can be viewed as intermediate between $u_\lambda$ and $u_\ast$ defined by \eqref{eq:Vlasov Stokes} and \eqref{eq:limitEquation} respectively. As a first step to show smallness of $u_\lambda - u_\ast$ (and also $\rho_\lambda - \rho_\ast$), we will show smallness of $u_\lambda - \tilde{u}_\lambda$. Comparing the PDEs that $u_\lambda$ and $\tilde{u}_\lambda$ fulfill, we observe that we have to prove smallness of $\rho(\bar{V} - u_\lambda - g)$. This is almost what we do in the proof of the lemma below. Indeed, it turns out that it is more convenient to consider the error term $\Phi = \bar{V} - \tilde{u}_\lambda - g$ instead of $\bar{V} - u_\lambda - g$ because we control the time derivative of $\tilde{u}$. Then, we are able to prove smallness of $u_\lambda - \tilde{u}_\lambda$ using energy identities for $\Phi$ and $u_\lambda - \tilde{u}_\lambda$ analogous to \eqref{eq:energyFluid} and \eqref{eq:energyParticles}. \begin{lemma} \label{lem:uTilde} Assume $T < T_\ast$ and let $\tilde{u}_\lambda$ be defined as in \eqref{eq:uTilde}. Then, there exist $\lambda_0$ such that for all $\lambda \geq \lambda_0$ \begin{align} \label{eq:estTildeU} \|\tilde u \|_{W^{1,\infty}((0,T_0)\times\mathbb{R}^3)} &\leq C, \\ \label{eq:diffUTildeU} \|\tilde u(t,\cdot) - u(t,\cdot)\|^2_{W^{1,\infty}(\mathbb{R}^3)} &\leq C \left( e^{-c \lambda t} + \frac{1}{\lambda} \right). \end{align} \end{lemma} \begin{proof} Again, we consider only $\lambda > \lambda_0$ whith $\lambda_0$ as in Lemma \ref{lem:APriori}. Then, Lemma \ref{lem:APriori} implies that we control the $L^\infty$-norms of $\rho$ and $\partial_t \rho$ and the support of $\rho$. Thus, \eqref{eq:estTildeU} follows from regularity theory for the Stokes equations. We define \begin{align} \Phi &:= \bar{V} - \tilde{u} - g, \\ Z &:= u - \tilde{u}. \end{align} Then, \begin{align} -\Delta Z + \nabla p + (Z-\Phi)\rho = 0, \qquad \operatornameeratorname{div} Z =0. \end{align} Therefore \begin{equation} \label{eq:energyZ} \| \nabla Z \|^2_{L^2(\mathbb{R}^3)} = (Z,\Phi-Z)_{L^2_\rho(\mathbb{R}^3)}. \end{equation} We compute \begin{align} \partial_t (\rho \bar{V}) &= - \int_{\mathbb{R}^3} v \cdot \nabla_x f v \, \mathrm{d} v + \lambda \rho(g + u -\bar{V}) = - \int_{\mathbb{R}^3} v \cdot \nabla_x f v \, \mathrm{d} v + \lambda \rho(Z - \Phi), \\ \label{eq:del_tRhoPhi} \partial_t (\rho \Phi) &= \partial_t (\rho \bar{V})- \partial_t (\rho \tilde{u}) = \lambda \rho(Z - \Phi) - \int_{\mathbb{R}^3} v \cdot \nabla_x f v \, \mathrm{d} v - \partial_t (\rho \tilde{u}). \end{align} Note that \eqref{eq:estTildeU} and the bound on $\bar{V}$ from Lemma \ref{lem:APriori} imply that $\Phi(t,\cdot)$ is uniformly bounded in $L^\infty(\mathbb{R}^3)$ up to time $T$. Thus, we use \eqref{eq:del_tRhoPhi}, \eqref{eq:energyZ}, and the estimates from Lemma \ref{lem:APriori}, \eqref{eq:estTildeU}, and Lemma \ref{lem:frictionCoercive} to obtain \begin{align*} \partial_t \frac{1}{2} \| \Phi\|_{L^2(\rho)}^2 &= \int_{\mathbb{R}^3} \partial_t (\rho \Phi) \cdot \Phi - \frac{1}{2} \partial_t \rho |\Phi|^2 \, \mathrm{d} x \\ &= \lambda \int_{\mathbb{R}^3} \rho \Phi \cdot ( Z - \Phi) \, \mathrm{d} x -\int_{\mathbb{R}^3 \times \mathbb{R}^3} \hspace{-1em} v \cdot \nabla_x f v \cdot \Phi \, \mathrm{d} v \, \mathrm{d} x \\ & - \int_{\mathbb{R}^3} \partial_t (\rho \tilde{u}) \cdot \Phi\, \mathrm{d} x - \frac{1}{2} \int_{\mathbb{R}^3} \partial_t \rho |\Phi|^2 \, \mathrm{d} x \\ &\leq - \lambda \|\nabla Z\|_{L^2(\mathbb{R}^3)}^2 - \lambda \|Z - \Phi\|_{L^2_\rho(\mathbb{R}^3)}^2 + C \\ &\leq - c \lambda \|\Phi\|_{L^2_\rho(\mathbb{R}^3)}^2 + C. \end{align*} Therefore, we have \[ \| \Phi\|_{L^2_\rho(\mathbb{R}^3)}^2 \leq C\left( e^{-c \lambda t} + \frac{1}{\lambda} \right). \] By the energy identity for the Brinkman equations \eqref{eq:energyZ}, it follows \[ \|\nabla Z\|^2_{L^2 (\mathbb{R}^3)} + \| Z\|^2_{L^2_\rho(\mathbb{R}^3)} \leq C\left( e^{-c \lambda t} + \frac{1}{\lambda} \right). \] Regularity theory for Stokes equations implies \begin{align} \| \nabla^2 Z \|^2_{L^2(\mathbb{R}^3)} \leq 2 \|\rho Z\|^2_{L^2(\mathbb{R}^3)} + 2 \|\rho \Phi \|^2_{L^2(\mathbb{R}^3)} &\leq 2 \|\rho \|^2_{L^3(\mathbb{R}^3)} \|Z\|^2_{L^6(\mathbb{R}^3)} + 2 \|\Phi\|^2_{L^2_\rho(\mathbb{R}^3)} \|\rho\|_{L^\infty(\mathbb{R}^3)} \\ &\leq C\left( e^{-c \lambda t} + \frac{1}{\lambda} \right). \end{align} Thus, using Sobolev embedding, \begin{equation} \label{eq:ZLinfty} \| Z \|_{L^\infty(\mathbb{R}^3)}^2 \leq C\left( e^{-c \lambda t} + \frac{1}{\lambda} \right). \end{equation} Taking $\lambda_0 \geq 1$ and using again \eqref{eq:del_tRhoPhi} yields \[ \partial_t (\rho\Phi) \leq - \rho \Phi + C( \lambda e^{-c \lambda t} + \sqrt{\lambda}). \] Thus, \[ \| \rho \Phi \|_{L^\infty(\mathbb{R}^3)}^2 \leq C\left( e^{-c \lambda t} + \frac{1}{\lambda} \right), \] which again yields smallness of $Z$ in even better norms. More precisely, for $p \geq 2$ \begin{align} \| \nabla^2 Z \|^2_{L^p(\mathbb{R}^3)} \leq C \|\rho Z\|^2_{L^p(\mathbb{R}^3)} + C \|\rho \Phi \|^2_{L^p(\mathbb{R}^3)} &\leq C \|\rho \|^2_{L^p(\mathbb{R}^3)} \|Z\|^2_{L^\infty(\mathbb{R}^3)} + C \|\rho \Phi\|^2_{L^\infty(\mathbb{R}^3)} \\ &\leq C \left( e^{-c \lambda t} + \frac{1}{\lambda} \right). \end{align} In particular, \[ \| Z\|^2_{W^{1,\infty}} \leq C \left( e^{-c \lambda t} + \frac{1}{\lambda} \right). \] By definition of $Z$, this proves \eqref{eq:diffUTildeU}. \end{proof} \subsection{Convergence for times $\bm{T<T_\ast}$} We want to prove $\rho_\lambda \to \rho_\ast$ as $\lambda \to \infty$, where $\rho_\ast$ is the solution to \eqref{eq:limitEquation}. By the a priori estimate from Lemma \ref{lem:APriori}, we have that $\rho_\lambda$ is uniformly bounded in $W^{1,\infty}((0,T_0)\times \mathbb{R}^3)$ for times $T_0 < T_\ast$ defined in \eqref{eq:defT_ast}. Hence, we can extract strongly convergent subsequences in $C^{0,\alpha}((0,T_0)\times \mathbb{R}^3)$ for all $\alpha < 1$. It remains to prove that any limit of these subsequences is $\rho_\ast$. To this end we will show that $\rho_\lambda$ converges to $\rho_\ast$ in a weaker sense by using again the characteristics. We note that \begin{equation} \label{eq:rho_astByX_ast} \rho_\ast (t,x) = \rho_0 (X_\ast(0,t,x)) = \int_{\mathbb{R}^3} f_0(X_\ast(0,t,x),v) \, \mathrm{d} v, \end{equation} where $X_\ast(s,t,x)$ is defined as the solution to \begin{align} \partial_s X_\ast(s,t,x) &= g + u_\ast(s,X_\ast(s,t,x)), \\ X_\ast(t,t,x) &= x. \end{align} We have seen in \eqref{eq:almostTransportedByU} that for large values of $\lambda$, the particles are almost transported by $u_\lambda +g$. Moreover, in Lemma \ref{lem:uTilde}, we have seen that the fluid velocity $u_\lambda$ is close to $\tilde u_\lambda$, which roughly speaking is the fluid velocity corresponding to the limit equation \eqref{eq:limitEquation}. In order to compare $\rho_\lambda $ to $\rho_\ast$, we want to use the formula for $\rho_\lambda$ from Lemma \eqref{lem:biLipschitzV}, \begin{equation} \label{eq:rhoByW2} \rho_\lambda(t,x) = \int_{\mathbb{R}^3} e^{ 3 \lambda t} f_0(X_\lambda(0,t,x,W_\lambda(t,x,w)),w) \det \nabla_w W_\lambda(t,x,w) \, \mathrm{d} w. \end{equation} Provided $X_\lambda(0,t,x,W_\lambda(t,x,w))$ is close to $X_\ast(0,t,x)$ independently of $w$, the right hand sides of \eqref{eq:rho_astByX_ast} and \eqref{eq:rhoByW2} look very similar. However, we lack information on the Jacobian $\det \nabla_w W_\lambda(t,x,w)$. We know that $e^{ 3 \lambda t} \det \nabla_w W_\lambda(t,x,w)$ is uniformly bounded (for small times $t$ and large values of $\lambda$, cf. Lemma \ref{lem:biLipschitzV} and Lemma \ref{lem:APriori}), but we do not know whether it tends to $1$ in the limit $\lambda \to \infty$. To avoid dealing with this Jacobian, we also integrate over a small set in position space. To this end, let $\Psi_\lambda(t,\xi) := (X_\lambda(t,0,\xi),V_\lambda(t,0,\xi))$ with $\xi = (x,v)$. Then, using the characteristic equations \eqref{eq:characteristics}, \[ \partial_t \nabla \Psi_\lambda = \nabla \Psi_\lambda \begin{pmatrix} 0 & \mathrm{Id} \\ \lambda \nabla u & -\lambda \mathrm{Id} \end{pmatrix} . \] Hence, \[ \partial_t \det \nabla \Psi_\lambda = \det \nabla \Psi_\lambda \operatorname{tr} \left( (\nabla \Psi_\lambda)^{-1} \nabla \Psi_\lambda \begin{pmatrix} 0 & \mathrm{Id} \\ \lambda \nabla u & -\lambda \mathrm{Id} \end{pmatrix} \right) = - 3 \lambda \det \nabla \Psi_\lambda. \] Thus, \[ \det \nabla \Psi_\lambda(t,\xi) = e^{- 3 \lambda t}. \] Therefore, for $\Omega \subset \mathbb{R}^3$ measurable, \begin{equation} \label{eq:intRho_lambda} \begin{aligned} \int_{\Omega} \rho_\lambda(t,y) \, \mathrm{d} y = \int_{\Omega} \int_{\mathbb{R}^3} e^{3 \lambda t} f_0(\Psi_\lambda^{-1}(y,v)) \, \mathrm{d} v \, \mathrm{d} y = \int_{\Psi_\lambda^{-1}(\Omega \times \mathbb{R}^3)} f_0(y,v) \, \mathrm{d} v \, \mathrm{d} y. \end{aligned} \end{equation} On the other hand, since $u_\ast$ is divergence-free, we observe that \begin{equation} \label{eq:intRho_ast} \int_{\Omega} \rho_\ast(t,y) \, \mathrm{d} y = \int_{\Omega} \rho_0(X_\ast(0,t,y)) \, \mathrm{d} y = \int_{X(0,t,\Omega)} \rho_0(y) \, \mathrm{d} y = \int_{X(0,t,\Omega) \times \mathbb{R}^3} \hspace{-1em} f_0(y,v) \, \mathrm{d} y \, \mathrm{d} v . \end{equation} Now, we have to compare the right hand sides of \eqref{eq:intRho_ast} and \eqref{eq:intRho_lambda}. It is convenient to choose $\Omega$ to be a cube. We denote by $\mathcal{Q}_\delta$ the the set of all cubes $Q \subset \mathbb{R}^3$ of length $\delta$. We define \[ d_{\lambda,\delta}(t) := \sup_{Q \in \mathcal{Q}_\delta} \left| \fint_Q \rho_\lambda(t,y) - \rho_\ast(t,y) \, \mathrm{d} y \right|. \] We will show that \begin{equation} \label{eq:weakL^1Cubes} \lim_{\lambda \to \infty} \lim_{\delta \to 0} d_{\lambda,\delta}(t) = 0 \qquad \text{for all} ~ t < T_\ast. \end{equation} This implies $\rho_\lambda(t,\cdot) \to \rho_\ast(t,\cdot)$ weakly-* in $L^\infty(\mathbb{R}^3)$ because we already have uniform boundedness by Lemma \ref{lem:APriori}. For the proof of \eqref{eq:weakL^1Cubes} in Proposition \ref{pro:ConvergenceCube}, we essentially need three ingredients. First, we will show in Lemma \ref{lem:d_lambda,delta} that $d_{\lambda,\delta}$ is controlled by $|X_\lambda - X_\ast|$. Second, we will show in Lemma \ref{lem:uTildeUAst} that $\tilde{u}_\lambda- u_\ast$ is controlled by $d_{\lambda,\delta}$. Finally, we use that the particle trajectories $X_\lambda$ are almost the ones, which one get from a transport velocity $\tilde{u}_\lambda + g$. This last ingredient is due to \eqref{eq:almostTransportedByU} and Lemma \ref{lem:uTilde}. \begin{lemma} \label{lem:d_lambda,delta} Let $T_0 < T_\ast$. Then, there exist constants $C_1$ and $\lambda_0$ such that for all $\lambda > \lambda_0$ and all $t < T_0$ \[ d_{\lambda,\delta}(t) \leq C \left(\sup_{(x,v) \in \operatorname{supp} f_0} |X_\lambda(t,0,x,v) - X_\ast(t,0,x)| + \delta + \frac{1}{\delta \lambda} \right). \] \end{lemma} \begin{proof} Let $Q \in \mathcal{Q}_\delta$. Let $\Psi_\lambda(t,y,v) := (X_\lambda(t,0,y,v),V_\lambda(t,0,y,v))$. Recall from \eqref{eq:intRho_ast} and \eqref{eq:intRho_lambda} \begin{align} \label{eq:rho_astByf_0} \int_{Q} \rho_\ast(t,y) \, \mathrm{d} y & = \int_{X(0,t,Q)} \rho_0(y) \, \mathrm{d} y, \\ \label{eq:rho_lambdaByf_0} \int_{Q} \rho_\lambda(t,y) \, \mathrm{d} y &= \int_{\Psi_\lambda^{-1}(Q \times \mathbb{R}^3)} f_0(y,v) \, \mathrm{d} y \, \mathrm{d} v. \end{align} We want to replace the right hand side of \eqref{eq:rho_lambdaByf_0} by an integral of $\rho_0$ to compare its value to the right hand side of \eqref{eq:rho_astByf_0}. To this end, we have to replace the set $\Psi_\lambda^{-1}(Q \times \mathbb{R}^3)$ by a set of the form $\Omega \times \mathbb{R}^3$. We define \begin{equation} \label{eq:defOmega} \begin{aligned} \Omega &:= \{ X(0,t,z,w) \colon (z,w) \in \Psi(\operatorname{supp} f_0) \cap (Q \times \mathbb{R}^3 ) \} \\ &= \{ y \in \mathbb{R}^3 \colon \text{there is} ~ v \in \mathbb{R}^3 ~ \text{with} ~ (y,v) \in \operatorname{supp} f_0, X_\lambda(t,0,y,v) \in Q \}. \end{aligned} \end{equation} Then, we claim \begin{equation} \label{eq:Inclusions} \Psi_\lambda^{-1}(Q \times \mathbb{R}^3) \cap \operatorname{supp} f_0 \subset \left( \Omega \times \mathbb{R}^3 \right) \cap \operatorname{supp} f_0 \subset \Psi_\lambda^{-1}(Q_{C\lambda^{-1}} \times \mathbb{R}^3), \end{equation} where $C$ is a constant independent of $\delta$ (and $\lambda$), and \[ Q_{C\lambda^{-1}} := \bigcup_{y \in Q} B_{C\lambda^{-1}}(y). \] The first inclusion in \eqref{eq:Inclusions} follows from the definition of $\Omega$. To prove the second inclusion, let $(y,v) \in \operatorname{supp} f_0 \cap ( \Omega \times \mathbb{R}^3 )$. Then, by definition of $\Omega$, there exists $\tilde{v} \in \mathbb{R}^3$ such that $(y,\tilde{v}) \in \operatorname{supp} f_0$ and $X_\lambda(t,0,y,\tilde{v}) \in Q$. From \eqref{eq:almostTransportedByU} and the fact that the support of $f_\lambda$ is uniformly bounded up to time $T_0$ by Lemma \ref{lem:APriori}, we know that \begin{align} |X_\lambda(t,0,y,v) - X_\lambda(t,0,y,\tilde{v})| &\leq \frac{C}{\lambda} + \int_0^t |u(s,X_\lambda(s,0,y,v)) - u(s,X_\lambda(s,0,y,\tilde{v}))| \, \mathrm{d} s \\ &\leq \frac{C}{\lambda} + \int_0^t \|\nabla u\|_{L^\infty} |X_\lambda(s,0,y,v) - X_\lambda(s,0,y,\tilde{v})|\, \mathrm{d} s. \end{align} Using the estimate for $\nabla u$ from Lemma \ref{lem:APriori} yields \begin{equation} \label{eq:smallDependenceOnInitialVelocity} |X_\lambda(t,0,y,v) - X_\lambda(t,0,y,\tilde{v})| \leq \frac{C}{\lambda} e^{C t}. \end{equation} Therefore, $X_\lambda(t,0,y,v) \in Q_{C\lambda^{-1}}$ and thus $(y,v) \in \Psi_\lambda^{-1}(Q_{C\lambda^{-1}} \times \mathbb{R}^3)$. From \eqref{eq:Inclusions} it follows \begin{equation} \label{eq:cylinderRemaining} \begin{aligned} &\left| \int_{\Psi_\lambda^{-1}(Q \times \mathbb{R}^3)} f_0(y,v) \, \mathrm{d} y \, \mathrm{d} v - \int_{\Omega \times \mathbb{R}^3} f_0(y,v) \, \mathrm{d} y \, \mathrm{d} v \right| \\ &\leq \int_{\Psi_\lambda^{-1}((Q_{C\lambda^{-1}} \backslash Q )\times \mathbb{R}^3)} f_0(y,v) \, \mathrm{d} y \, \mathrm{d} v \\ & = \int_{Q_{C\lambda^{-1}} \backslash Q } \rho_\lambda (t,y) \, \mathrm{d} y \\ & \leq \|\rho_\lambda(t,\cdot)\|_{L^\infty(\mathbb{R}^3)} |Q_{C\lambda^{-1}} \backslash Q| \\ &\leq C \frac{\delta^2}{\lambda}. \end{aligned} \end{equation} Combining \eqref{eq:rho_astByf_0}, \eqref{eq:rho_lambdaByf_0}, and \eqref{eq:cylinderRemaining} yields \begin{equation} \label{eq:integralRho_0} \left| \int_{Q} \rho_\lambda(t,y) - \rho_\ast(t,y) \, \mathrm{d} y \right| \leq \left| \int_{X_\ast(0,t,Q)} \rho_0(y) \, \mathrm{d} y - \int_{\Omega} \rho_0(y) \, \mathrm{d} y \right| + C \frac{\delta^2}{\lambda}. \end{equation} To estimate the right hand side, we note that \begin{equation} \label{eq:X_astVolumePreserving} |X_\ast(0,t,Q)| = |Q| = \delta^3, \end{equation} since $\operatornameeratorname{div} u_\ast = 0$. We want to show that $|\Omega| \approx |Q|$. To this end, we define $\tilde{X}_\lambda$ to be the solution to \begin{align} \partial_s {\tilde{X}}_\lambda(s,t,x) &= u_\lambda(s,\tilde{X}_\lambda(s,t,x)), \\ \tilde{X}_\lambda(t,t,x) &= x. \end{align} Then, using \eqref{eq:almostTransportedByU}, we have for all $(x,v) \in \operatorname{supp} f_0$ \begin{align} |\tilde{X}_\lambda(t,0,x) - X_\lambda(t,0,x,v)| &\leq \frac{C}{\lambda} + \int_0^t |u_\lambda(s,\tilde{X}_\lambda(s,t,x)) - u_\lambda(s,X_\lambda(s,t,x,v))| \, \mathrm{d} s \\ &\leq \frac{C}{\lambda} + \int_0^t \|\nabla u_\lambda \|_{L^\infty(\mathbb{R}^3)} | \tilde{X}_\lambda(s,t,x) - X_\lambda(s,t,x,v)| \, \mathrm{d} s. \end{align} Gronwall implies \[ |\tilde{X}_\lambda(t,0,x) - X_\lambda(t,0,x,v)| \leq \frac{C}{\lambda}. \] Thus, \[ \tilde{X}^{-1}_\lambda(t,0,I_{C \lambda^{-1}}(Q)) \subset \Omega \subset \tilde{X}^{-1}_\lambda(t,0,Q_{C\lambda^{-1}}), \] where \[ I_{C \lambda^{-1}}(Q) := \{ y \in Q \colon B_{C \lambda^{-1}}(y) \subset Q \}. \] Since $\operatornameeratorname{div} u_\lambda = 0$, we have that $\tilde{X}_\lambda$ is volume preserving as well. Therefore, using also \eqref{eq:X_astVolumePreserving} \begin{equation} \label{eq:VolumeDifference} ||\Omega| - |X_\ast(0,t,Q)|| \leq | Q_{C\lambda^{-1}} \backslash I_{C \lambda^{-1}}(Q)|\leq C \frac{\delta^2}{\lambda}. \end{equation} We observe that for any function $g \in W^{1,\infty}(\mathbb{R}^3)$ and measurable sets $E,F \subset \mathbb{R}^3$ \begin{equation} \label{eq:integralSameFunctionDifferentSets} \left|\int_E g \, \mathrm{d} x - \int_F g \, \mathrm{d} x\right| \leq ||E| - |F|| \|g\|_{L^\infty} + \min\{|E|,|F|\} \|\nabla g\|_{L^\infty} \sup \{|x-y| \colon x \in E, y \in F\}. \end{equation} Indeed, using the first term on the right hand side, we may assume without loss of generality that $E$ and $F$ are of equal measure. Approximating $E$ and $F$ by equisized cubes further reduces the situation to the estimate for two of these cubes. For these cubes, the statement obviously holds. Applying \eqref{eq:integralSameFunctionDifferentSets} together with \eqref{eq:X_astVolumePreserving} and \eqref{eq:VolumeDifference} yields \begin{equation} \label{eq:estimateRho_0Integral} \begin{aligned} &\left| \int_{X_\ast(0,t,Q)} \rho_0(y) \, \mathrm{d} y - \int_{\Omega} \rho_0(y) \, \mathrm{d} y \right| \\ &\leq ||\Omega| - |X_\ast(0,t,Q)||\|\rho_0\|_{L^\infty} + \delta^3 \sup \{|y-z| \colon y \in \Omega, z \in X_\ast(0,t,Q) \} \|\nabla \rho_0\|_{L^\infty} \\ & \leq C \frac{\delta^2}{\lambda} + C \delta^3 \bigg( \sup_{y \in \Omega} \operatornameeratorname{dist} (y,X_\ast(0,t,Q)) + \operatorname{diam} (X_\ast(0,t,Q)) \bigg). \end{aligned} \end{equation} We need to estimate the second term on the right hand side. To this end, recall the definition of the set $\Omega$ from \eqref{eq:defOmega}. For any $y \in \Omega$, we find $(x,v) \in \operatorname{supp} f_0$ such that $p:=X_\lambda(t,0,y,v) \in Q$. Define $z = X_\ast(0,t,p) \in X_\ast(0,t,Q)$. Then \begin{equation} \label{eq:estimateDist} \begin{aligned} |z - y| &= |X_\ast(0,t,p) - X_\ast(0,t,X_\ast(t,0,y)| \\ &\leq \|\nabla X_\ast (0,t,\cdot)\|_{L^\infty(\mathbb{R}^3)} |X_\lambda(t,0,y,v) - X_\ast(t,0,y)| \\ &\leq \|\nabla X_\ast (0,t,\cdot)\|_{L^\infty(\mathbb{R}^3)} \sup_{(x,v) \in \operatorname{supp} f_0} |X_\lambda(t,0,x,v) - X_\ast(t,0,x)|. \end{aligned} \end{equation} Observe that \begin{equation} \label{eq:nablaX_ast} \|\nabla X_\ast (0,t,\cdot)\|_{L^\infty(\mathbb{R}^3)} \leq e^{\int_0^t \|\nabla u_\ast(s,\cdot)\| \, \mathrm{d} s} \leq C. \end{equation} Thus, \eqref{eq:estimateDist} and \eqref{eq:nablaX_ast} imply \begin{equation} \label{eq:estDist} \begin{aligned} \sup_{y \in \Omega} \operatornameeratorname{dist} (y,X_\ast(0,t,Q)) &\leq C \sup_{(x,v) \in \operatorname{supp} f_0} |X_\lambda(t,0,x,v) - X_\ast(t,0,x)|. \end{aligned} \end{equation} Note that \eqref{eq:nablaX_ast} also yields \begin{equation} \label{eq:diam} \operatorname{diam} (X_\ast(0,t,Q)) \leq \delta \|\nabla X_\ast (0,t,\cdot)\|_{L^\infty(\mathbb{R}^3)} \leq C \delta. \end{equation} Finally, estimates \eqref{eq:estimateRho_0Integral}, \eqref{eq:diam}, and \eqref{eq:estDist} yield \[ \left| \int_{X_\ast(0,t,)} \rho_0(y) \, \mathrm{d} y - \int_{\Omega} \rho_0(y) \, \mathrm{d} y \right| \leq C \frac{\delta^2}{\lambda} + C \delta^3\left(\sup_{(x,v) \in \operatorname{supp} f_0} |X_\lambda(t,0,x,v) - X_\ast(t,0,x)| + \delta\right). \] Combining this estimate with \eqref{eq:integralRho_0} finishes the proof. \end{proof} \begin{lemma} \label{lem:uTildeUAst} Let $T_0 < T_\ast$. For $u_\ast$ and $\tilde{u}_\lambda$ as in \eqref{eq:limitEquation} and \eqref{eq:uTilde}, we have for all $\delta \leq 1$ and for all $t < T_0$ \[ \|\tilde{u}_\lambda(t,\cdot) - u_\ast(t,\cdot)\|_{L^\infty(\mathbb{R}^3)} \leq C (d_{\lambda,\delta}(t) + \delta). \] \end{lemma} \begin{proof} We choose disjoint cubes $(Q_i)_{j \in \mathbb{N}} \subset \mathcal{Q}_\delta$ that cover $\mathbb{R}^3$ up to a nullset. Define $I \subset \mathbb{N}$ to be the index set for those cubes that intersect with the support of either $\rho_\lambda(t,\cdot)$ or $\rho_\ast(t,\cdot)$ and let $(z_i)_{i \in I}$ be the centers of those cubes. Let $x \in \mathbb{R}^3$. Then, \begin{align} |\tilde{u}_\lambda(t,x) - u_\ast(t,x)| &= \left| \int_{\mathbb{R}^3} \Phi(x-y) (\rho_\lambda(t,y) - \rho_\ast(t,y) )\, \mathrm{d} y \right| \\ &\leq \sum_{j \in I} \left| \int_{Q_j} \Phi(x-y) (\rho_\lambda(t,y) - \rho_\ast(t,y) )\, \mathrm{d} y \right|, \end{align} where $\Phi$ is the fundamental solution of the Stokes equations, \[ \Phi(y) = \frac{1}{8 \pi} \left(\frac{\mathrm{Id}}{|y|} + \frac{y \otimes y}{|y|^3} \right). \] Let $I_1 \subset I$ be the index set of those cubes $Q_j$ which contain $x$ or are adjacent to that cube. Then, $|I_1| \leq 27$ and for $j \in I_1$ we estimate \begin{equation} \begin{aligned} \left| \int_{Q_j} \Phi(x-y) (\rho_\lambda(t,y) - \rho_\ast(t,y) )\, \mathrm{d} y \right| &\leq (\|\rho_\lambda(t,\cdot)\|_{L^\infty(\mathbb{R}^3)} + \|\rho_\ast(t,\cdot)\|_{L^\infty(\mathbb{R}^3)} \left| \int_{Q_j} \Phi(x-y) \, \mathrm{d} y \right|\\ &\leq C \delta^2. \end{aligned} \end{equation} Let $I_2 = I \backslash I_1$. For $h \in L^1(\mathbb{R}^n)$ and $\Omega \subset \mathbb{R}^n$ measurable, we use the notation \[ (h)_{\Omega} := \fint_\Omega h \, \mathrm{d} x := \frac{1}{|\Omega|} \int_\Omega h \, \mathrm{d} x. \] Then, for $j \in I_2$, \begin{equation} \begin{aligned} \left| \int_{Q_j} \Phi(x-y) (\rho_\lambda(t,y) - \rho_\ast(t,y) )\, \mathrm{d} y \right| &\leq |(\Phi(x-\cdot))_{Q_j}| \left| \int_{Q_j} (\rho_\lambda(t,y) - \rho_\ast(t,y) )\, \mathrm{d} y \right| \\ {} &+ \int_{Q_j} |\Phi(x-y) - (\Phi(x-\cdot))_{Q_j}| |\rho_\lambda(t,y) - \rho_\ast(t,y) |\, \mathrm{d} y \\ & \leq \frac{\delta^3}{|x-z_j|} d_{\lambda,\delta}(t) + \frac{\delta^4}{|x-z_j|^2}, \end{aligned} \end{equation} where we used that we control $\rho_\lambda(t,\cdot)$ and $\rho_\ast(t,\cdot)$ in $L^\infty(\mathbb{R}^3)$ by Lemma \ref{lem:APriori}. Summing over all $j \in I$ yields \begin{align} |\tilde{u}_\lambda(t,x) - u_\ast(t,x)| &\leq C \delta^2 + \sum_{j \in I^2} \frac{\delta^3}{|x-z_j|} d_{\lambda,\delta}(t) + \frac{\delta^4}{|x-z_j|^2} \\ &\leq C (\delta^2 + \delta + d_{\lambda,\delta}(t)), \end{align} where the constant $C$ depends on the spatial support of $\rho_\lambda$ and $\rho_\ast$ which we control uniformly up to time $T_0$ by Lemma \ref{lem:APriori}. Using $\delta \leq 1$ finishes the proof. \end{proof} \begin{proposition} \label{pro:ConvergenceCube} Let $t < T_\ast$. Then \[ \lim_{\delta \to 0} \lim_{\lambda \to \infty} d_{\lambda,\delta}(t) = 0. \] \end{proposition} \begin{proof} We define \[ \eta(t) := \sup_{(x,v) \in \operatorname{supp} f_0} |X_\lambda(t,0,x,v) - X_\ast(t,0,x)|. \] Let $(x,v) \in \operatorname{supp} f_0$. We write again $X_\lambda(t)$ instead of $X_\lambda(t,0,x,v)$ and similar for $X_\ast$. We estimate using first \eqref{eq:almostTransportedByU} together with the fact that the support of $f_\lambda$ remains uniformly bounded up to time $T_0$, and then applying Lemma \ref{lem:uTilde}, Lemma \ref{lem:uTildeUAst}, and Lemma \ref{lem:d_lambda,delta} \begin{align*} |X_\lambda(t)) - X_\ast(t)| & \leq \int_0^t |u_\lambda(s,X_\lambda(s)) - u_\ast(s,X_\ast(s))| \, \mathrm{d} s + \frac{C}{\lambda} \\ & \leq \int_0^t |\tilde{u}_\lambda(s,X_\lambda(s)) - u_\ast(s,X_\ast(s))| + |\tilde{u}_\lambda(s,X_\lambda(s)) - u_\lambda(s,X_\ast(s))| \, \mathrm{d} s + \frac{C}{\lambda} \\ & \leq \int_0^t \|\tilde{u}_\lambda(s,\cdot) - u_\ast(s,\cdot)\|_{L^\infty(\mathbb{R}^3)} + \|\nabla u_\ast(s,\cdot)\|_{L^\infty(\mathbb{R}^3)} |X_\lambda(s) - X_\ast(s)| \, \mathrm{d} s + \frac{C}{\lambda} \\ & \leq C \int_0^t d_{\lambda,\delta}(t) + \delta + |X_\lambda(s) - X_\ast(s)| \, \mathrm{d} s + \frac{C}{\lambda} \\ & \leq C \int_0^t \eta(t) + \frac{1}{\delta \lambda} + \delta \, \mathrm{d} s + \frac{C}{\lambda}. \end{align*} Taking the supremum over $(x,v) \in \operatorname{supp} f_0$ yields for $\delta \leq 1$ \begin{align} \eta(t) &\leq C \int_0^t \eta(s) \, \mathrm{d} s +C \left( \frac{1}{\delta \lambda} + \delta\right). \end{align} Gronwall's inequality implies \[ \eta(t) \leq C \left( \frac{1}{\delta \lambda} + \delta\right) e^{C t}. \] Lemma \ref{lem:d_lambda,delta} yields \[ d_{\lambda,\delta}(t) \leq C \left( \frac{1}{\delta \lambda} + \delta\right) e^{C t}. \] Taking the limits $\lambda \to \infty$ followed by $\delta \to 0$ finishes the proof. \end{proof} Now, we have all the estimates needed to prove the statement of Theorem \ref{th:main} up to times $T < T_\ast$. \begin{proposition} \label{pro:strongConvergence} Let $T < T_\ast$. Then, for all $\alpha < 1$, \begin{equation} \label{eq:ConvergenceRho} \rho_\lambda \to \rho_\ast \quad \text{in} ~ C^{0,\alpha}((0,T) \times \mathbb{R}^3). \end{equation} Moreover, for all $0 < t < T$, \begin{equation} \label{eq:CovergenceU} u_\lambda \to u_\ast \quad \text{in} ~ L^\infty((t,T) ; W^{1,\infty}(\mathbb{R}^3)) ~ \text{and in} ~ L^1((0,T) ; W^{1,\infty}(\mathbb{R}^3)). \end{equation} \end{proposition} \begin{proof} By Lemma \ref{lem:APriori}, the sequence $\rho_\lambda$ is uniformly bounded in $W^{1,\infty}((0,T) \times \mathbb{R}^3)$ for large enough $\lambda$. Therefore, for any $\alpha < 1$, $\rho_\lambda$ has a subsequence that converges in $C^{0,\alpha}((0,T) \times \mathbb{R}^3)$ to some function $\sigma$. We need to show $\sigma = \rho_\ast$. We claim that for all cubes $Q \subset \mathbb{R}^3$ and all $t < T$, \begin{equation} \label{eq:ConvergenceCubes} \int_Q \rho_\lambda(t,x) \, \mathrm{d} x \to \int_Q \rho_\ast(t,x) \, \mathrm{d} x. \end{equation} Clearly, \eqref{eq:ConvergenceCubes} implies $\sigma = \rho_\ast$. In order to prove \eqref{eq:ConvergenceCubes}, let $\varepsilon > 0$. Then, by Proposition \ref{pro:ConvergenceCube}, there exists $\delta_0 > 0$ such that for all $\delta < \delta_0$ and all $x \in \mathbb{R}^3$ \begin{equation} \label{eq:smallerCubes} \lim_{\lambda \to \infty} d_{\lambda,\delta} = \left| \fint_{Q_{\delta,x}} \rho_\lambda(t,x) - \rho_\ast(t,x) \, \mathrm{d} x \right| < \varepsilon. \end{equation} Up to a nullset, we can write $Q$ as the disjoint union of cubes $Q_i \in \cup_{\delta < \delta_0} \mathcal{Q}_\delta$. Thus, since $\varepsilon$ is arbitrary, \eqref{eq:ConvergenceCubes} follows. In order to prove \eqref{eq:CovergenceU}, we notice that by Lemma \ref{lem:uTilde} it suffices to prove \begin{equation} \tilde{u}_\lambda \to u_\ast \quad \text{in} ~ L^\infty((0,T) ; W^{1,\infty}(\mathbb{R}^3)). \end{equation} However, by regularity theory of the Stokes equations \[ \|\tilde{u}_\lambda - u_\ast\|_{L^\infty((0,T) ; W^{1,\infty}(\mathbb{R}^3))} \leq C \| \rho_\lambda - \rho_\ast\|_{L^\infty((0,T)\times(\mathbb{R}^3))}, \] where we used that by Lemma \ref{lem:APriori} we have uniform control of the support of $\rho_\lambda$. \end{proof} \subsection{Convergence for arbitrary times} In view of Proposition \ref{pro:strongConvergence}, it only remains to prove $T_\ast = \infty$ to finish the proof of Theorem \ref{th:main}. The idea of the proof is the following. Due to Lemma \ref{lem:biLipschitzV}, it is sufficient to control the quantity $M_\lambda(t)$ defined in \eqref{eq:defM} uniformly in $\lambda$. Indeed, arguing similar as in Lemma \ref{lem:T_astPositive}, $\limsup_{\lambda \to \infty}M_\lambda(t)$ has to blow up at time $T_\ast$. However, Proposition \ref{pro:strongConvergence} shows, that for large enough values of $\lambda$, $M_\lambda(t)$ is controlled by the corresponding quantity of the limit equation. This gives a contradiction. \begin{proof}[Proof of Theorem \ref{th:main}] By Proposition \ref{pro:strongConvergence}, it suffices to prove $T_\ast = \infty$. By Lemma \ref{lem:T_astPositive}, we have $T_\ast >0$. Assume $T_\ast < \infty$ and let $T < T_\ast$. By definition of $T_\ast$ and Lemma \ref{lem:boundsU}, the assumption $\lambda \geq 4 \|\nabla u_\lambda \|_{L^\infty((0,T) \times \mathbb{R}^3)}$ is satisfied for all $\lambda \geq \lambda_0(T)$. Recall the definition of $M(T)$ from Lemma \ref{lem:biLipschitzV}, which we will now denote by $M_\lambda(T)$ to emphasize the dependence on $\lambda$. Moreover, we denote by $M_\ast$ the corresponding quantity for the solution of the limit problem, i.e., \[ M_\ast (t) := \exp \left(\int_0^t 2 \|\nabla u(s,\cdot) \|_{L^\infty(\mathbb{R}^3)} \, \mathrm{d} s \right). \] By Proposition \ref{pro:strongConvergence}, we have \[ M_\lambda(T) \to M_\ast(T) \leq M_\ast(T_\ast). \] In particular, for all $\lambda \geq \lambda_0(T)$ (possibly enlarging $\lambda_0(T)$), \[ M_\lambda(T) \leq 2 M_\ast(T_\ast). \] Therefore, Lemma \ref{lem:biLipschitzV} implies for all $\lambda \geq \lambda_0(T)$ \[ \sup_{s \leq t} \|\rho_\lambda(s,\cdot)\|^2_{L^\infty(\mathbb{R}^3)} \leq 2 M_\ast(T_\ast). \] The rest of the proof is very similar to the proof of Lemma \ref{lem:T_astPositive}. We define \[ T_\lambda := \sup \{t \geq 0 \colon \sup_{s \leq t} \|\rho_\lambda(s,\cdot)\|_{L^\infty(\mathbb{R}^3)} \leq 2 C_0 (2 M_\ast(T_\ast))^3\}. \] Then, $T_\lambda > T$ as $\rho_\lambda$ is continuous. Analogously as we have shown \eqref{eq:growRho} in Lemma \ref{lem:T_astPositive}, we find that for all $t>0$ and $\lambda \geq C \sup_{s \leq T + t} \|\rho_\lambda(s,\cdot)\|^2_{L^\infty(\mathbb{R}^3)}$ \begin{equation} \label{eq:growRho2} \sup_{s \leq T + t} \|\rho_\lambda(s,\cdot)\|_{L^\infty(\mathbb{R}^3)} \leq C_0 (2M_\ast(T_\ast))^3 \exp \bigg(C t \sup_{s \leq T + t} \|\rho_\lambda(s,\cdot)\|^2_{L^\infty(\mathbb{R}^3)}\bigg). \end{equation} This implies for all $\lambda \geq \max \{\lambda_0(T), C C_0^2 (M_\ast(T_\ast))^6\}$ and all $T + t < T_\lambda$ \[ \sup_{s \leq T + t} \|\rho_\lambda(s,\cdot)\|_{L^\infty(\mathbb{R}^3)} \leq C_0 (2M_\ast(T_\ast))^3 \exp( C C_0^2 (M_\ast(T_\ast))^6 t). \] As $\rho_\lambda$ is continuous, this yields for all $\lambda \geq \max \{ \lambda_0, C C_0^2 (M_\ast(T_\ast))^6$ \[ T_\lambda \geq T + \frac{\log(2)}{C C_0^2 (M_\ast(T_\ast))^6}. \] In particular, choosing $T < T_\ast$ large enough, we deduce for all $\lambda \geq \max \{ \lambda_0, C C_0^2 (M_\ast(T_\ast))^6 \}$ \[ T_\lambda > T_\ast, \] which gives a contradiction to the definition of $T_\ast$. \end{proof} \end{document}
\begin{document} \begin{frontmatter} \begin{abstract} New methods for solving the college admissions problem with indifference are presented and characterised with a Monte Carlo simulation in a variety of simple scenarios. Based on a qualifier defined as the average rank, it is found that these methods are more efficient than the Boston and Deferred Acceptance algorithms. The improvement in efficiency is directly related to the reduced role of random tie-breakers. The strategy-proofness of the new methods is assessed as well. \end{abstract} \begin{keyword} college admission problem \sep deferred acceptance algorithm \sep Boston algorithm \sep Zeeburg algorithm \sep pairwise exchange algorithm \sep strategic behaviour \end{keyword} \end{frontmatter} \section{Introduction} After six years of primary school education pupils in the Netherlands choose a secondary school. In Amsterdam children have abundant choice, with up to a dozen different schools at each of the available school levels. Pupils will have a preference for a certain school based primarily on the distance to their home, objective and less objective public data on the quality of education, and the impression the schools make in their advertisement and `open days'. Unfortunately, the number of pupils a school can accept is not necessarily proportional to its popularity. Therefore, the local government has introduced a matching system to assign pupils to schools, similar to that used in many other cities in the world.\footnote{In Amsterdam the matching system is referred to with the Dutch word \emph{kernprocedure}.} Each pupil composes an ordered list of schools of their choice. \footnote{In the language of the field we say that students only provide their \emph{ordinal} preferences. Their \emph{cardinal} preferences, how much they value schools compared to one another on a more continuous scale, are unknown to the matching system.} Based on the collection of these preference lists the matching is performed. Although the pupils have clear preferences, the schools in the Amsterdam system are not allowed to differentiate pupils. In literature, this matching problem is also referred to as the \emph{college admissions problem with indifference}~\cite{Roth:1989,Roth:2007}. As all pupils may hand in exactly the same preference list, the system requires a method for arbitration, or \emph{tie-breaking}, at popular schools. This arbitration is performed by assigning pupils a lottery number. The matching system relies on a procedure, or \emph{algorithm}, to turn the available list of choices and lottery numbers into a final assignment of pupils to schools. Different methods were applied in the past. Up to the year 2014 the city of Amsterdam effectively used the so-called ``Boston'' algorithm. A disadvantage of the Boston method is that it is not \emph{strategy-proof}~\cite{Abdulkadiroglu:2003}, a well-known concept in game theory: Using predictions on how other pupils will vote (e.g. from the popularity of schools in previous years), pupils may benefit from providing a preference list that is different from their true preference list. Whether this is actually a disadvantage or not is a topic for debate~\cite{Abdulkadiroglu:2008,Abdulkadiroglu:2009}, but nevertheless, in 2015 the Deferred Acceptance (DA) algorithm~\cite{Gale:1962} was introduced, which is known to be strategy-proof~\cite{Dubins:1981}. In the implementation chosen in Amsterdam, a different random tie-breaker for each school was used. This algorithms is sometimes abbreviated as \emph{DA-MTB}~\cite{Oosterbeek:2015}, where MTB stands for multiple tie-breakers. Unfortunately, the DA-MTB algorithm is not what is called \emph{Pareto efficient}: two students may find that they both end up higher on their preference list if they exchange schools after the assignment. Not surprisingly this led to a public outcry from parents that had not anticipated this. As a result the system was yet again changed for the current calendar year: In 2016 Amsterdam will apply the DA algorithm with a single random tie-breaker (a.k.a. \emph{DA-STB}). The college admission problem is a well-studied problem. Yet, the pace with which algorithms are replaced in Amsterdam illustrates the lack of consensus on how to decide what is the best algorithm. The aim of this article is threefold. First, we introduce a qualifier, a single number that measure for the welfare of the students, by which one can compare algorithms. Second, we introduce alternatives for the Boston and DA algorithms. One alternative, which we will call the `Zeeburg algorithm', is a matching algorithm specifically designed to minimize the number of comparisons made with the random tie-breaker. The other alternative is a method to improve on the solutions given by DA and Boston by introducing pairwise exchanges. Using simulated data for a number of different scenarios we show that these algorithms are less sensitive to the results of the lottery and better respect the preferences of the pupils. Finally, we argue that although the new algorithms are not strategy-proof, there is a compelling reason for favouring them over the algorithm that is currently applied in Amsterdam: even students that do not apply a strategy are better off. \section{The average rank as a welfare qualifier} Before discussing the algorithms and results, we briefly introduce notation and a few definitions. We consider a set of $M$ schools labeled by an index $j$. Every school has place for $N_j$ new pupils. We label the pupils by an index $i$ and assume that the total number of pupils $N$ is smaller or equal to the total number of places, $ N \leq \sum_j N_j$. Based on personel preference, every pupil ranks the $M$ available schools in a list. We call that ranked list a \emph{preference} and denote it with the symbol $p$. For example, given four schools with labels 1, 2, 3 and 4, the list of pupil $i$, could look like \begin{equation*} p_i \; = \; ( 3,1,2,4 ) \end{equation*} such that each of the four schools appears exactly once. We label the $j$-th entry in the list by $p_{i,j}$ such that the most preferred school is $p_{i,1}$ (school $3$ in the example) and the least preferred school is $p_{i,N}$. The total set of preferences $\{ p_i \}$ of all students is the \emph{preference set}, or simply dataset. The result of the matching procedure is an assignment of pupils to schools $j$. We denote the value of $j$ for student $i$ by the $a_{i}$. Every pupil is assigned to only one school and that school is $a_i$. Furthermore, every school can be assigned as most as many pupils as it has place, or, for every school $j$, \begin{equation} \sum_{\text{pupils $i$}} \delta_{a_{i},j} \; \leq \; N_j \end{equation} where $\delta_{ij}$ is $1$ for $i=j$ and $0$ otherwise. We call a set of $N$ assignments $ \{ a_{i} \} $ a \emph{solution} $S$ if it satisfies this property. We now introduce a simple qualifier to be able to rank solutions. The lower the assigned school ranks on the pupils preference list, the less satisfied the pupil is with the assignment. For a given solution $S$ we quantify the dissatisfaction as the pupil's rank of the assigned school, or \begin{equation} r^S_i = \text{``value of $j$ for which $p_{i,j} = a^S_i$''} \end{equation} For instance, in the example above, the pupil's rank for a solution in which it would get assigned school~$1$ is two, etc. Our welfare qualifier for the solution $S$ is now simply the average rank \begin{equation} Q(S) \; = \; \frac{1}{N} \sum_i r^S_i . \end{equation} We define the optimal solution as the solution for which $Q$ is minimal. The optimal solution is not necessarily unique: there may be several solutions with the same value $Q$. In theory these solutions could be found by simply trying all possible assignments. Unfortunately, in any realistic scenario the number of possible combinations is far too large to try even a small fraction of them.\footnote{With $N$ pupils divided equally over $M$ schools the total number of permutations equals $N! / (N/M)^{M}$. With 1000 pupils and 10 schools there are more permutations than atoms in the universe! CHECK} Therefore, in practice it is not easy to find even a single optimal solution. As we shall see below the Boston algorithm maximizes the number of students with rank one. One may wonder if the optimal solution always satisfies this property as well. However, a simple counter example shows that it does not.\footnote{Take four schools with one student each. If the students preference sets are ${ (1,3,2,4), (2,1,3,4), (3,4,1,2),(2,3,1,4) }$, the solution with the most pupils at rank one is $a = (1,2,3,4)$, which has $Q = 7/4$. The solution with the smallest average rank is $a = (1,2,4,3)$, with $Q=6/4$.} This illustrates that the solution cannot be found by first maximising the number of rank one assignments, then rank two, etc. As far as we know, there is no algorithm that can find the optimal solution to this matching problem in a reasonable amount of time. We label an algorithm \emph{efficient} if it provides the optimal solution. Lacking a truly efficient algorithm, all that we can wish for is an algorithms that is \emph{nearly} efficient. In the following, we shall call one algorithm more efficient than another algorithm if on an ensemble of similar datasets it gives a solution that on average has a smaller value for $Q$. \section{Characterisation of algorithms in a Monte Carlo simulation} The DA algorithm was originally developed to solve a two-sided market problem in which both sides have a strict ordinal preference to partners on the other side~\cite{Gale:1962}. The college matching in Amsterdam and many other cities is different: Schools are not allowed to rank students. To apply the traditional algorithms anyway, a sequence of random numbers, the \emph{tie-breaker}, takes the role of the ordinal preferences of the schools. The Boston and DA algorithms can use a random tie-breaker in two ways~\cite{Abdulkadiroglu:2008}: If all schools share the same tie-breaker, we talk about the \emph{single tie-breaker} (STB) variant. If every school has its own tie-breaker, we call it the \emph{multiple tie-breaker} (MTB) variant. It can be shown that, independent of the tie-breaker, the Boston algorithm is not strategy-proof, while the DA-MTB algorithm is not Pareto efficient. Using random numbers as a tie-breaker may lead to inefficient matching, because the real preferences of the students for schools compete with the random preference from schools for students~\cite{Abdulkadiroglu:2008}. To illustrate this we now compare the behaviour of these algorithms in a Monte Carlo simulation. \subsection{Description of the simulation} In~\cite{Oosterbeek:2015} the matching algorithms are compared on an actual dataset collected in the year 2015 in Amsterdam. This has the advantage that it corresponds to a real scenario, with real preferences of students including correlations. It has the disadvantage that only a single dataset can be used for the comparison: It tells little about the sensitivity to variations in the input dataset. One could imagine that an algorithm can be tuned to be efficient on one dataset, but behaves differently on the next.\footnote{The authors of~\cite{Oosterbeek:2015} have used their single input set to simulate multiple experiments by varying the random numbers for the tie-breakers. This helps to understand the sensitivity to the tie-breakers, but not to variations in the data, \emph{e.g.} variations in students in different years.} As an alternative we have chosen to define a set of simple scenarios that allow us to randomly generate datasets. This technique is called a Monte Carlo simulation. One generated dataset is called an \emph{experiment}. We consider a matching problem with 10 schools, labeled 1 to 10. Each school has place for 100 pupils, giving a total of 1000 pupils. Consequently, a single generated dataset consists of 1000 rankings of ten schools. We now consider four scenarios that differ in the way students \emph{on average} fill in their preference list. More specifically, in each scenario we choose how often a pupil puts a particular school as its first choice. The selected scenarios are given in table~\ref{tab:scenarios}. \begin{table}[htb] \centerline{ \small \begin{tabular}{|c||c|c|c|c c|} \hline {} & scenario A & scenario B & scenario C & \multicolumn{2}{c|}{scenario D} \\ \hline fraction of students & 100\% & 100\% & 100\% & 60\% & 40\% \\ \hline school 1 & 1 & 10 & 50 & 20 & 1 \\ school 2 & 1 & 9 & 50 & 20 & 1 \\ school 3 & 1 & 8 & 10 & 20 & 1 \\ school 4 & 1 & 7 & 10 & 20 & 1 \\ school 5 & 1 & 6 & 10 & 20 & 1 \\ school 6 & 1 & 5 & 10 & 1 & 20 \\ school 7 & 1 & 4 & 10 & 1 & 20 \\ school 8 & 1 & 3 & 10 & 1 & 20 \\ school 9 & 1 & 2 & 1 & 1 & 20 \\ school 10 & 1 & 1 & 1 & 1 & 20 \\ \hline \end{tabular}} \caption{Relative popularity of schools in the different scenarios. A value of 10 means that a school is ten times more likely to appear as a first choice than a school with a value of 1. In scenario D two populations of students are simulated, with a relative size of 60:40.} \label{tab:scenarios} \end{table} In scenario A all schools are equally popular. In scenario B school~1 is ten times more popular than school 10, and the rest is in between at fixed intervals. Scenario C is a variation of this, with two highly popular schools and two highly unpopular schools. We shall use this scenario to assess the effect of strategic ranking. The motivation for scenario D is given later. A single dataset is now generated as follows. Using the popularity of the schools a set of random numbers determines the order of the schools for each pupil. To determine the first school, the relative popularities are normalized to add up to one and a random number in the interval $[0,1)$ is thrown. The quantile of the random number determines the first school. A second random number determines the second school on the list, by considering the popularity normalized over the remaining schools. This procedure is repeated, until there are no schools left. It is then performed a thousand times --- once for each pupil --- to obtain the dataset for one experiment. Figure~\ref{fig:generatedschoolrank} shows for scenarios A, B and C how often a particular school appears first, and which position it takes on average on a pupil's preference list, averaged over 1000 experiments. \begin{figure} \caption{Fraction of times a school is ranked first (left) and average rank (right) as function of school number in scenarios A, B and C, measured over 100 experiments.} \label{fig:generatedschoolrank} \end{figure} In scenarios A, B and C the preferences of the pupils are uncorrelated: The selection of the preference for the second school is independent of which school was put first. In practice, pupil preferences are often correlated, for example because students prefer schools that are close to their neighbourhood. To include also a scenario with correlations we consider yet another scenario, labeled by D in the table. In this scenario there are two categories of students who have each their own popularity assignment: the first set of students strongly prefer the first five schools while the second set prefer the last five schools. If the two categories had an equal number of students, we could just factorise the matching problem, and effectively end up with scenario A. However, to simulate also the effect of an imbalance in capacity, we generate 60\% of the students in the first category and 40\% in the second category. That means that for 20\% of the students in the first category no school in their top-five can be assigned. \subsection{Results of the simulation} Given a simulated dataset we can now use the algorithms discussed above to obtain a matching. We have found that the Boston-MTB and Boston-STB are close in behaviour on all considered scenarios, and therefore we only consider Boston with a single tie-breaker. Besides DA and Boston we also test a new algorithm that we have called the Zeeburg algorithm. The details are described in appendix~\ref{app:zeeburg}. In brief, this algorithm minimizes the number of times the tie-breaker is used to compare students by making students jump to a queue of a school that appears later in their preference list if by doing so they are guaranteed to be admitted to that school. In some sense, the algorithm encodes a strategy for the students. The Zeeburg algorithm is Pareto efficient and stable\footnote{In a two-sided market problem with strict preferences on both sides a stable solution is a solution in which there is no pair of a student and a school that would prefer each other over their actual assigned partner(s). However, since the school preferences in the college problem with indifference are entirely fictitious, stability is not really relevant here.}, but not strategy-proof. Figure~\ref{fig:oneexperiment} shows the distribution of the assigned rank for one single experiment for each of the four scenarios. The first bin shows how many students get assigned to the school of their first choice, the second bin their second choice, etc. These distributions will look different for every experiment, because the preferences of the students differ and because the tie-breakers used in the matching algorithms differ. \begin{figure} \caption{Distribution of rank for one experiment in each of the four scenarios sketched in the text for the Boston, DA-STB and DA-MTB algorithms. Each distribution has a thousand entries, corresponding to the thousand students in the experiment.} \label{fig:oneexperiment} \end{figure} A convenient way to summarise the information in Fig.~\ref{fig:oneexperiment} for many experiments is to integrate this distribution, normalize it and average over the experiments. The result of this is shown in Fig~\ref{fig:acceptancecurves}. The curves in this figure show which fraction of students get assigned to the school of their first choice, to their first \emph{or} second choice, etc. It is clear from these graphs that independent of the scenario, the Boston algorithms assigns most pupils to the school of their first choice. This is a property of the algorithm: it actually assigns the maximum possible number of students to their first choice. The DA-MTB scores poorly when it comes to the first choice, but it has a smaller tail. The reason for this is that with multiple tie-breakers, it is unlikely that a student is unlucky at every school. \begin{figure} \caption{Average cumulative acceptance functions for the four considered scenarios and for the Boston, DA-STB and DA-MTB algorithms, averaged over 1000 experiments. The vertical error bars correspond to the standard deviation of the variation between experiments.} \label{fig:acceptancecurves} \end{figure} Each of the points in Fig.~\ref{fig:acceptancecurves} has a vertical `error' bar. The size of the error reflects the variation in the integrals between the different experiments. This variation is larger for the DA-MTB algorithms than for the Boston algorithm, because the former is more sensitive to the random numbers in the tie-breaker. Another way to represent the variation between experiments is to consider the distribution of the average rank (our qualifier $Q$) in each experiment, shown in Fig.~\ref{fig:rankdistribution}. In scenario A the difference between the algorithms is small, but in all others it is substantial. In terms of the qualifier defined above, there is clear order in the efficiency of the four algorithms, with Zeeburg having the highest efficiency and DA-MTB the lowest. For example, in scenario B students are on average assigned to their third rank school by the DA-MTB algorithm, while the average assignment of Zeeburg is between the first and second ranked school. \begin{figure} \caption{Distribution of the average rank $Q$ over 1000 experiments for the four different scenarios in the Boston, DA-STB, DA-MTB and Zeeburg algorithms.} \label{fig:rankdistribution} \end{figure} Figure~\ref{fig:rankdistribution} also indicates a large variation in the rank of an algorithm between different experiments. This variation has two sources, namely the actual differences in the datasets and the random character of the tie-breakers. To illustrate the importance of the latter we show yet another distribution. For each experiment we run the algorithms a second time, but with a different tie-breaker, a different student lottery. For each algorithm we now count how many students are the second time assigned to a different school, that is, how `deterministic' the algorithm is. The result is shown in Fig.~\ref{fig:schoolswaps} for all four algorithms. Comparing to Fig.~\ref{fig:rankdistribution} we note that that the sensitivity to the tie-breaker is correlated with the efficiency: the less important the tie-breaker, the more efficient the algorithm. \begin{figure} \caption{Number of students that gets a different assignment in two consecutive calls to the same algorithm in 1000 experiments.} \label{fig:schoolswaps} \end{figure} \subsection{The pairwise exchange method} Given a particular set of matches one can improve the average ranking using \emph{pairwise exchanges} (PE), a swap of the schools assigned to a pair of students. In principle, using pairwise exchanges one can transform any solution into any other, including the optimal solution. In practice, in order to limit the time-consumption of such an algorithm, it is necessary to limit the set of considered exchanges. In~\cite{Oosterbeek:2015} only pairwise exchanges that improved the ranking for both pupils involves in the swap were considered. If any such swaps can be found, the original solution was not Pareto efficient. However, exchanges that reduce the sum of the ranks of the two pupils improve the solution as well. Therefore, in order for the pairwise exchange method to be effective, such exchanges should be considered. Besides pairwise exchanges, one can also consider exchanges of higher order in which the average improves. Unfortunately, in our implementation in the \emph{python} programming language, the time consumption of even a tripple exchange algorithm was found to be prohibitively large and we have not pursued this any further. The pairwise exchange method may reduce the number of students assigned to their first preference. Although this is perfectly allowed, we do build in a small bias towards rank one: Besides exchanges that decrease the average rank, we also consider exchanges that leave the average rank invariant, but for which the minimum of the rank of the two students after the exchange is smaller than before. That is, we prefer an assignment with ranks one and three to an assignment with ranks two and two, etc. Our pairwise exchange algorithm thus becomes: \begin{enumerate} \item order the pupils in decreasing rank according to the original solution; \item starting from the first pupil, labeled $i$, consider an exchange with all other pupils, labeled $j$; \item if the change in the average rank is smaller than zero, or if it is equal to zero but the minimum of the ranks of the two students becomes smaller, make the exchange; \item continue until exchanges of all pairs of pupils have been considered. \end{enumerate} We have found that it does not improve the performance of the exchange algorithm on our scenarios if the pupils are sorted again after every exchange. However, we do 'restart' the loop on pupil $j$ after a successful exchange. By running the algorithm more than once we have verified that the algoritm effectively converges in one iteration. \begin{figure} \caption{Average cumulative acceptance functions for a 1000 experiments in the four considered scenarions and for the DA-MTB, DA-STB, Boston and Zeeburg algorithm after the pairwise exchange (PE) algorithm. The Zeeburg algorithm without PE is included for the comparison. The vertical error bars correspond to the RMS of the variation between experiments.} \label{fig:acceptancecurvesPE} \end{figure} Figure~\ref{fig:acceptancecurvesPE} shows the cumulative acceptance functions for all four algorithms after the PE algorithm is applied. It is both interesting and reassuring that the curves depend little on which algorithm was used to provide the solution that the PE starts from. The PE algorithm can be successfully applied to improve the inefficiency of any of the tested algorithms to about the same level. This is also indicated by the average rank $Q$ shown in Tab.~\ref{tab:averagerank} for all scenarios and for all algorithms before and after the pairwise exchange. \begin{figure} \caption{Distributions for the number of students changing school (left) and the change in the average rank (right) after two consecutive calls to the algorithm in scenario C for 1000 experiments.} \label{fig:schoolswapsPE} \end{figure} \begin{table}[htb] \centerline{\small \begin{tabular}{|l|c|c|c|c|} \hline & A & B & C & D \\ \hline DA-MTB & $ 1.14 \pm 0.05 $ & $ 3.03 \pm 0.13 $ & $ 3.96 \pm 0.09 $ & $ 1.79 \pm 0.06 $ \\ DA-STB & $ 1.11 \pm 0.03 $ & $ 2.17 \pm 0.06 $ & $ 2.76 \pm 0.05 $ & $ 1.45 \pm 0.04 $ \\ Boston & $ 1.10 \pm 0.03 $ & $ 1.95 \pm 0.05 $ & $ 2.53 \pm 0.04 $ & $ 1.41 \pm 0.03 $ \\ Zeeburg & $ 1.08 \pm 0.02 $ & $ 1.51 \pm 0.04 $ & $ 2.26 \pm 0.07 $ & $ 1.21 \pm 0.03 $ \\ \hline DA-MTB-PE & $ 1.05 \pm 0.01 $ & $ 1.44 \pm 0.03 $ & $ 2.06 \pm 0.04 $ & $ 1.18 \pm 0.02 $ \\ DA-STB-PE & $ 1.04 \pm 0.01 $ & $ 1.44 \pm 0.03 $ & $ 2.06 \pm 0.04 $ & $ 1.18 \pm 0.02 $ \\ Boston-PE & $ 1.04 \pm 0.01 $ & $ 1.43 \pm 0.03 $ & $ 2.06 \pm 0.04 $ & $ 1.17 \pm 0.02 $ \\ Zeeburg-PE & $ 1.04 \pm 0.01 $ & $ 1.43 \pm 0.03 $ & $ 2.07 \pm 0.04 $ & $ 1.17 \pm 0.02 $ \\ \hline \end{tabular}} \caption{Average rank $Q$ for the scenarios and algorithms discussed in the text for 1000 experiments. The quoted error is the standard deviation of the variation between experiments.} \label{tab:averagerank} \end{table} To illustrate the stability of the result figure~\ref{fig:schoolswapsPE} (left) shows the fraction of students changing schools for two independent sets of tie-breakers for a subset of the DE improved algorithms in scenario C. Note that there is still a large variation in the assignment. However, as illustrated in the right figure, the solutions are actually very close in rank. We found that most of the difference between the solution can be attributed to pairs of students that have exchanged places such that the final change is rank neutral, simply because the students have ranked the two schools in the same way. One may wonder how close to the optimal solution the result of the pairwise exchange is. As the starting point is determined by the random tie-breaker and only a finite set of pairwise exchanges is tried, the result may correspond to a `local minimum' of the average rank. As a different local minimum is obtained with a different tie-breaker, one can try to assess the distance to the true minimum by trying different random tie-breakers. We have compared the average rank obtained with a single call to Boston plus PE to that obtained with a pick of `the best of 10'. The difference was found to be small, of the order of the variations seen on the right in Fig.~\ref{fig:schoolswapsPE}. We did not study the asymptotic behaviour in more detail but it seems that in practice the solution is close to optimal. \begin{figure} \caption{Average cumulative acceptance functions for a 1000 experiments in scenario B and C for Boston with the default pairwise exchange algorithm (PE) and for pairwise exchange with minimal variance (PEM).} \label{fig:PEalternatives} \end{figure} Finally, we have also compared the alternative option for dealing with 'neutral' exchanges, namely to choose the one with the smallest maximum rank, rather than the smallest minimum rank. The former will lead to a smaller variance of the rank distribution. The comparison for Boston with PE is shown in Fig.~\ref{fig:PEalternatives}, where the alternative is labeled with the abbreviation PEM. As expected, the acceptance functions cross: the PE method gives slightly more results with rank 1, but has slightly more tail. The average rank is practically the same for both methods. \subsection{Tests of strategy-proofness} A matching method is strategy-proof if pupils do not benefit from specifying a preference list different from their true ordinal preference. It is not apriori clear what `benefit' means in this context, since there is always a price to pay. As we shall see below, students could apply a strategy that gives them a higher chance to get their first preference, at the expense of having a higher change to end up with a school that ranks low on their list; or they could aim to increase the chance to get within their top three, by ranking their actual first choice lower. Therefore, one may argue that determining a strategy is just a cost-benefit analysis that individual pupils should be allowed to make. The main reason that we should worry about strategy-proofness anyway is because pupils that do not apply a strategy may be harmed by the behaviour of the strategists. This leads to a form of inequality as the background of students and parents influences their ability to understand the consequences of different strategies. In the following we test the effect of two simple selection strategies in our simulations. It should be emphasised that for a subset of students the current system in Amsterdam is already not strategy-proof for any matching algorithm. The reason is that some schools give preference to students that either have brothers or sisters at the same school, or that attended a certain type of primary school. As this preference is only given if students rank the school first, it is an incentive to put the school at the first place, even if it is not actually the first preference. To investigate strategy-proofness we can compare the efficiency of the matching algorithms for students that apply different kind of strategies. We have found that, in practice, it is not that simple to define a popularity scenario and a ranking strategy that actually lead to a benefit for strategic students. After some trial and error, we have come up with scenario C: two schools that are so popular that mosts student will rank them as one and two, and two schools that are so unpopular that they are almost always at the bottom of the list. In this scenario strategic students can try to evade the unpopular school by putting one of the less unfavourable schools in their top two. To keep the implementation generic the actual applied strategy is that students re-order their true top three according to the known average popularity (table~\ref{tab:scenarios}). Figure~\ref{fig:strategy} shows the effect on the acceptance curves in a simulation of scenario C with 50\% of the students applying this strategy. Note that the cumulative acceptance is given as function of the \emph{true} rank, not the rank that the strategic student provided. The students applying a strategy are called `cautious', while the remaining students in the sample are `honest'. For reference also the original curves with only honest students are shown. \begin{figure} \caption{Average cumulative acceptance as function of the \emph{true} \label{fig:strategy} \end{figure} As expected, the DA algorithm with a random tie-breaker (be it STB or MTB) is indeed strategy-proof: students applying the cautious strategy are worse of than honest students, so applying a strategy makes no sense. The Boston algorithm is not strategy-proof in this scenario: although the cautious loose on their top one and two ranking, they beat their victims in the top three and beyond. The Zeeburg algorithm and any algorithm combined with pairwise-exchange optimisation are not strategy-proof either. Interesting enough, in this scenario, they seem to be more strategy-proof than Boston, even though they are more efficient. This shows that efficiency is not directly coupled to strategy-proofness. In any case, it is important to note that in this scenario the victims are not worse off with any of the improved algorithms than they are with DA-STB. This can be seen by comparing the `original' curve for DA-STB with the `honest' curve in DA-STB-PE. The costs of DA's strategy-proofness is simply too high to compensate for the inefficiency caused by the lack of strategy-proofness in the other algorithms. \begin{figure} \caption{Average cumulative acceptance as function of the \emph{true} \label{fig:strategyG} \end{figure} Students could also apply a strategy that increases their chances to get assigned to their first choice by exploiting that some of the algorithms effectively give higher preferences to students that are more difficult to place at another school. This holds in particular for the Zeeburg and PE algorithm. We implement this strategy by keeping the first choice as is as, but rank the remaining schools in order of decreasing popularity. We call this the `gambling' strategy as these students give up on anything but their first choice. The result is shown for Zeeburg and DA-STB-PE in figure~\ref{fig:strategyG} for a scenario with 50\% gamblers. Clearly, the gamblers manage to profit from their strategy as the fraction of them that gets a rank one assignment is larger than for the 'honest only' scenario. However, it also shows that the effect on the remaining students is small. Those students are still better of with the improved algorithm than with the original strategy-proof algorithm. \begin{table}[htb] \centerline{\small \begin{tabular}{|l||c|c||c|c|} \hline {} & \multicolumn{2}{c||}{cautious} & \multicolumn{2}{c|}{gambling}\\ \hline {} & strategists & honest & strategists & honest \\ \hline Boston & $ 2.50 $ & $ 2.73 $ & $ 3.68 $ & $ 1.66 $ \\ Zeeburg & $ 2.26 $ & $ 2.20 $ & $ 3.41 $ & $ 1.79 $ \\ Boston-PE & $ 2.33 $ & $ 2.00 $ & $ 3.58 $ & $ 1.61 $ \\ Zeeburg-PE & $ 2.31 $ & $ 2.03 $ & $ 3.55 $ & $ 1.63 $ \\ DA-STB-PE & $ 2.34 $ & $ 1.99 $ & $ 3.58 $ & $ 1.61 $ \\ \hline \end{tabular} } \caption{Average (true) rank $Q$ for strategic and honest pupils measured over 100 experiments in scenario C with either 50\% cautious strategists (left) or 50\% gambling strategists (right).} \label{tab:averagerankstrategising} \end{table} Table~\ref{tab:averagerankstrategising} shows the average rank obtained for strategic and honest pupils in the scenarios above. One clearly observes the price the strategists pay: as they do not provide their true preferences, their average rank is usually higher than for the honest students. Comparing to Tab.~\ref{tab:averagerank} we find that in terms of the average rank the honest do not score worse in Zeeburg and PE algorithms than they did without the strategists. In the other scenarios (A, B and D) the negative effect on honest students as a result of cautious and gambling strategies was found to be insignificant. That does not mean that there do not exist scenarios in which honest students are better off with a strategy-proof inefficient algorithm as DA-STB. However, it illustrates that in practice such scenarios may be rare. \section{Practical considerations and other discussion points} \subsection{School preferences} Some of the schools in Amsterdam may give a subset of pupils a preference over others, for example because elder siblings attend the school. In order to respect these constraints, they need to be built into the tie-breaker at the school. It is not easy to use such constraint in the Zeeburg algorithm. The easiest solution is to deal with this subset of students first, and use the matching algorithms only for the students that remain. \subsection{Incomplete preference lists} Above we have simulated a situation in which all pupils submit an ordered list that contains all schools. In Amsterdam, pupils do not need to hand in a full list: they may hand in a list with just one school. If a pupil cannot be assigned to that school in Boston or DA, the consequence is that the pupil needs to participate in a second round, in which only schools participate that still have places left. Clearly, this has consequences for the implementation of the algorithms. For instance, the Zeeburg and pairwise exchange cannot be applied in a fair way unless all preference lists are complete, as students may on purpose hand in lists that do not contain less popular schools. One practical solution is to complete the preference lists. They could be completed deterministically as follows: once the preference lists are available, schools are ranked by popularity. Every pupil preference lists is completed with the missing schools in order of increasing popularity. This is a clear motivation for students to hand in a long preference list. \subsection{School types} In contrast to many other countries in the world, the school system in the Netherlands differentiates the level of education directly at the start of secondary education. The level appropriate for a pupil's secondary education is determined by the teachers at the primary school based on scores to standard tests performed during the pupil's primary school career. The proposed level is called the \emph{advice}. There are roughly four `levels' of education. In theory, this just splits the matching in four independent parts. In practice, it is not that simple. First, students may be given a mixed advice. Second, many schools offer transition classes for the first or second year that combine more than one level. This complication is not a show-stopper, however. If the matching can be applied with Boston or DA, then the pairwise exchange algorithm can be applied a such, as long as it only exchanges students that have the same school advice. \subsection{Simplicity} An important property of a suitable matching algorithm is that it is sufficiently simple that it can be both easily be explained and unambiguously described and implemented. In this respect the Zeeburg algorithm is perhaps a bordercase. However, the pairwise exchange algorithm certainly qualifies as simple. \subsection{Alternative optimisation criterion} The pairwise exchange method optimised the average rank $Q$. In the optimisation the difference between rank 8 and 9 is the same as between rank 1 and 2. However, pupils probably care less about the order in the tail, than the order of their top ranked schools. This was the main reason that we preferred the PE method with larger variance over the one with minimal variance (PEM). Still, one may wonder if alternative definitions of $Q$, for example as a power-law $Q \propto \sum_i r_i^\alpha$ with $\alpha<1$, would not lead to a solution that better reflects the cardinal preferences of the pupils. Inevitably, this will lead to a larger tail in the rank distribution. We have not further investigated this. \section{Conclusions} As was known long before it was introduced in Amsterdam, the DA algorithm is not a particularly efficient solution to the college admission problem with indifference~\cite{Abdulkadiroglu:2008,Abdulkadiroglu:2009}, as it was developed for a two-sided market problem with preferences on both sides. The Boston algorithm better respects the students preferences. Other algorithms, such as the Zeeburg algorithm and the pairwise exchange optimisation introduced here, perform even better, in a variety of simple scenarios. The reason is that the sensitivity to the tie-breaker, the lottery tickets of the pupils, is significantly smaller in these alternatives. The inefficiency of the DA algorithms with random tie-breakers is the cost of strategy-proofness~\cite{Abdulkadiroglu:2008}. The more efficient algorithms are not strategy-proof. However, in the considered scenarios the costs of the lack of strategy-proofness is smaller than the costs of the inefficiency of DA: Even students that do not apply a strategy are better of with the non-strategy-proof algorithms. Therefore, it seems hard to maintain strategy-proofness as a requirement of the matching algorithm. To understand whether or not these conclusions hold in more realistic scenarios, an analysis like the one in~\cite{Oosterbeek:2015} will need to be performed. However, based on the current results we strongly advise local authorities to reconsider their choice for DA in school matching. The most simple way to `fix' the algorithm is to augment it with the pairwise exchange algorithm that we described. This algorithm is simple and suffers little from the practical limitations discussed above. We believe that by applying this method, the results of the matching will be significantly more in line with the students preferences. From personel experience we know that pupils and their parents spend a lot of effort to prioritise the schools in Amsterdam. If these efforts are taken seriously, random number should play a minimal role in the matching. \section{The Zeeburg algorithm} \label{app:zeeburg} The DA and Boston algorithms rely on a random tie-breaker that effectively describes the school preferences. We have implemented another algorithm in which the number of ties broken by the random tie-breaker is minimised. The algorithm works as follows: \begin{enumerate} \item For every school keep track of \begin{enumerate}[(i.)] \item the number of vacant places at the school; \item the rank of the current queue; \item the pupils in the queue. \end{enumerate} In addition keep track of the list of completed student-school matches. This defines the \emph{state} of the algorithm. \item Sort pupils according to a single random tie-breaker. Set the rank of the queue of every school to one and assign the number of vacant places. Line up pupils in the queue of their favourite school. This populates the queue in every school and completes the \emph{initial} state of the algorithm; \item Now run the following loop: \begin{enumerate}[(a.)] \item select a school that can entirely admit the queue of its current rank. If there is more than one such school, select the school with the queue with the smallest rank. If there is more than one such queue, select the school for which the number of places remaining after accepting all pupils in the queue is the smallest; \item for this queue, accept all pupils. Remove these pupils in every other queue that they appear. (Initially, pupils appear in only one queue, but this changes while the algorithm is running.) Reduce the number of vacant places according to the number of newly accepted students. If there are places left at the school, increase the rank of the queue, and line up all pupils that have not yet found a place and that rank the school according to the rank of the queue. (These students will now be in more than one queue); \item repeat until the condition under (3a.) can no longer be satisfied for any school. \end{enumerate} \item Apply the tie-breaker to force a decision on one of the queues: \begin{enumerate}[(a.)] \item select the queue with the smallest rank in a school that is not yet full. If there is more than one such queue, select the queue that has the smallest overflow (that is, for which the length of the queue minus the number of available places is minimal); \item accept pupils from the start of this queue until the school is full. Remove accepted pupils from other queues; \end{enumerate} \item Repeat steps 3 and 4 until all pupils have been accepted. \end{enumerate} \input{main.bbl} \end{document}
\begin{document} \begin{frontmatter} \title{Invited Discussion of ``A Unified Framework for De-Duplication and Population Size Estimation''} \runtitle{Invited Discussion} \begin{aug} \author{\fnms{Jared S.} \snm{Murray}\thanksref{addr1,t1,t2,m1}\ead[label=e1]{[email protected]}} \runauthor{J. S. Murray} \address[addr1]{Department of Information, Risk, and Operations Management and Department of Statistical Science. University of Texas at Austin. \printead{e1} } \thankstext{t2}{Supported by SES-1824555 The author gratefully acknowledges support from the National Science Foundation under grant number SES-1824555. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the funding agencies. } \end{aug} \end{frontmatter} I would like to congratulate the authors on a stimulating contribution to the literature on record linkage/de-duplication and population size estimation. \cite{tancredi2011hierarchical} was one of the papers that first piqued my interest in record linkage, so I am pleased to see more work along these lines (with an author population size of N+1!) My discussion below focuses on two main themes: Providing a more nuanced picture of the costs and benefits of joint models for record linkage and the ``downstream task'' (i.e. whatever we might want to do with the linked and de-duplicated files), and how we should measure performance. \section{The promise and peril of joint modeling: A partial defense of disunity} The promise of a joint model for record linkage, de-duplication, and population size estimation is likely obvious to the readership of Bayesian Analysis: We immediately obtain valid posterior inference over the population size that accounts for uncertainty about duplicates and links across files -- provided that we specify an adequate joint model. Which leads us predictably to the peril of joint modeling, the fact that specifying a model for any of these three tasks alone is nontrivial. Addressing them simultaneously in a single model requires specifying a joint model sufficiently rich to do well on all three tasks (linkage, de-duplication, and population size estimation) while being tractable enough to understand its properties and perform posterior inference. The model presented here necessarily makes some compromises in service of joint modeling, and I wonder about their impact. For example, assumptions about the sampling process generating the lists are essential to modeling the unknown population size and therefore must appear in any unified model. This will consequently restrict the prior distribution over the overlap between files in the record linkage/de-duplication portion of the model, despite the fact that the assumption of simple random sampling from the population -- or any sort of random sampling at all -- is otherwise irrelevant to record linkage and de-duplication. The assumptions made by the authors imply a very particular, informative prior distribution on $Z$, the partition of records into co-referent sets, and therefore on $K$, the number of distinct units captured across all lists (as reported in Table 1). This choice is consequential. Indeed, immediately prior to Section 3.1 the authors note that the induced prior distribution on $K$ is probably {\em not} well-suited to record linkage tasks in general, which makes me wonder why we should expect it to work well when doing record linkage and population size estimation simultaneously. I have to assume that either 1) we actually don't expect it to work particularly well but the joint model at hand demands it or 2) the assumptions about the sampling process are actually warranted here, at least approximately, while they may not be in general applications of record linkage. If the former, this seems to beg the question and ignore options beyond joint modeling. If the latter, things are more interesting. If the assumptions are in fact correct, we would expect to obtain more accurate and efficient inferences by inducing the ``true'' prior over $Z$ and $K$ using the joint model. But what happens when the sampling assumptions are violated? It is difficult to say, and it must depend on a host of factors (such as the degree and frequency of errors among co-referent records). However, it is not hard to imagine a case where relatively minor deviations from the sampling assumptions are more or less innocuous in the context of a population size model with known partition $Z$ but become influential when $Z$ is unknown and jointly modeled, due to the influence of the ``misspecified'' informative prior over $Z$. It would be interesting to try and draw this out via a simulation exercise {particularly in light of how influential \cite{steorts2016} found a similar prior to be in a pure record linkage/de-duplication context). If posterior inference is not robust to deviations from the sampling assumptions, what could we do instead? The desire to mitigate this undesirable ``feedback'' from a misspecified sub-model appears in many different settings, from Bayesian causal inference with propensity score models \citep{mccandless2010cutting,zigler2013model} to astrophysics \citep{yu2018incorporating} and beyond (see \citet{jacob2017better} for additional examples). This is a difficult problem and an active area of research. The proposed solutions often take the form of (possibly incoherent) multistage inference, in this case inferring the linkage structure in stage 1 and the population size in stage 2, propagating uncertainty from stage 1 to stage 2 without allowing any information from stage 2 to flow to stage 1. \citet{jacob2017better} give examples of settings where these ``posteriors'' are better than the posterior under a misspecified joint model in a decision-theoretic sense. In the context of de-duplication and population size modeling, \cite{sadinle2018} proposes a related two-stage alternative to joint modeling termed ``linkage averaging''. If (in the notation of the current paper) $h(\lambda)$ is the estimate of population size we would compute given complete data (i.e., a de-duplicated and linked set of files) then under certain conditions the posterior for $h(\lambda)$ under a record linkage/de-duplication model alone will give the same inferences as a proper Bayesian joint model for linkage, de-duplication, and population size estimation. With a single set of posterior samples one can perform inference over multiple models for the population size, again provided that they all satisfy some relatively mild conditions. These conditions do necessarily demand a degree of compatibility between the prior on $\lambda$ and the population size model. They bear a striking similarity to the conditions under which multiple imputation delivers (asymptotically) valid Bayesian inference (``congeniality'', \citep{meng1994multiple,xie2017dissecting,murray2018multiple}). This raises the interesting question of whether the compatibility conditions might be relaxed while still yielding conservative inferences, similar to the way one can obtain conservative inferences using imputations under an uncongenial imputation model, provided it is uncongenial in the ``right'' way (roughly, by making fewer assumptions during imputation than analysis). \section{Measuring and improving performance} Various sub-specialties of statistics have spawned their own de-facto benchmark datasets -- think of the iris data for clustering or the galaxy dataset for density estimation. Likewise, \texttt{RLdata500} and \texttt{RLdata10000} have arguably become something of a benchmark in record linkage problems due in large part to their accessibility via the popular \texttt{RecordLinkage} R package. I have used them in publications myself \citep{Murray_2015}. Benchmark datasets form a sort of lingua franca that is useful for teaching, exposition, and as a sort of sanity check (when our brilliant new method finds six distinct clusters in the iris data, it's back to the drawing board). However, we have to be careful extrapolating from these datasets to more complex settings. In the provocatively titled ``Leave the Pima Indians Alone'', \citet{chopin2017leave} make the case that an excessive focus on relatively simple binary regression problems like the Pima Indians diabetes dataset has had a distortive impact on the Bayesian computation literature. I worry a little that repeatedly going back to the \texttt{RLdata} datasets might lead the record linkage literature up the same path. In particular, the errors in these synthetic datasets are rather minimal, and the duplicate record pairs are quite well-separated from the non-duplicates. In my experience this not representative of the datasets we see in the wild, at least not those that demand sophisticated statistical modeling. Like Britney and the Pima Indians, I think it may be time to leave \texttt{RLdata} alone. However, the primary evidence that the authors provide in favor of their model is its performance on \texttt{RLdata} datasets. Even setting aside whether this is a representative testbed, I wonder if this is much evidence at all since no alternative approaches are presented. Several are available, at least for the record linkage and de-duplication tasks, including some developed by the authors themselves (e.g. \cite{steorts2015} reports FNR and FDR of 0.02 and 0.04 on \texttt{RLdata500}, versus 0.015 and 0.08 using the model in the current paper). How well do existing Bayesian models perform on the linkage/de-duplication task? What about even simpler methods, like the point estimates generated by Fellegi-Sunter methods \citep{fellegi1969theory} or their generalizations \citep{sadinle2013generalized,Murray_2015}? This is important context; while the model proposed here offers richer inference, should we trust those inferences if the model does not perform relatively well on the linkage/de-duplication task? The authors actually seem to go a step further and use results on {\texttt RLdata} to inform parameter selection when modeling the Syrian casualty data. This frankly seems like a bad idea; in my own experience with similar files \citep{syria}, including expert hand-linked datasets, we observed very different patterns of distortion among co-referent records than the simple patterns one would find in {\texttt RLdata}. Given how variable performance is across parameter settings in Section 4, I would suggest that at least some sensitivity analysis might be in order for the Syria application. Rather than rely on unrepresentative benchmark datasets to measure performance and select parameters, what could we do instead? The longer I work on record linkage problems the more I am convinced of the need to include a hand-labeling exercise alongside every serious application. The synthetic datasets at our disposal are limited in the range of errors they include and are often poor representations of the problem at hand. Model-based estimates of error rates are only as good as the model, and if we're not sure about the model... However, provided that the true error rates are low, precise estimates of false match rates (false discovery rates) can be obtained via random sampling from matched record pairs. False match rates aren't everything, but they aren't nothing either. Sadly the authors missed an opportunity to do even a little inspection here; after finding a small number of duplicates in the Syria application, they note only that ``visual inspection of these pairs may eventually confirm their matching status''. Ideally a labeling exercise to evaluate a record linkage/de-duplication method should include matches generated by other methods (to remove potential bias toward declaring estimated matches correct), blinding (to the method(s) that declared the link), multiple review, an ``indeterminate'' or ``unsure'' option for the labelers, and should present labelers with neighboring ``near-match''record pairs. Stellar examples of hand-labeling study designs include \cite{bailey2017well,8637549}. In \cite{posthoc} we hand-labeled a relatively small number of links to compare two competing methods, including one Bayesian model. For the Bayesian model we also used these labels to obtain the posterior distribution of false match rate adjusted estimators by computing them on each posterior sample of the linkage structure (similar to \cite{sadinle2018}'s linkage averaging). For our estimands, we only found it necessary to adjust for the false match rate and we did not grapple with simultaneous de-duplication or multiple files. But we did find that variation due to assumptions about bias from linkage error tended to swamp variation due to uncertainty about the linkage structure. Reducing or otherwise accounting for linkage error seems important in the context of the current paper as well. Observe that in Figure 3, the estimates of $K$ are worse in the blocks with higher error rates (blocks 7, 1, 10, 3) and in each case the estimate for $K$ is biased down with a rather concentrated posterior distribution. If the model cannot be improved further, perhaps we would be better off looking at the posterior distribution of linkage error adjusted estimates of the population size. Linkage error adjusted estimators for the population size do exist, at least for relatively simple settings (e.g. \citet{ding1994dual,di2018population,soton436665}) and perhaps could be cast in \cite{sadinle2018}'s framework of linkage averaging (although I have not checked the compatibility conditions myself). These estimators depend on false non-match rates, which are more difficult to obtain through hand labeling but often can be reasoned about based on plausible levels of duplication and overlap. This reasoning could form the basis of a computationally efficient sensitivity analysis. This seems like a promising avenue for future research, alongside further improvements in model and prior specification to minimize error rates. \end{document}
\begin{document} \title{Development of an approximate method for quantum optical models and their pseudo-Hermicity} \date{\today} \author{Ramazan Ko\c{c}} \email{[email protected]} \affiliation{Department of Physics, Faculty of Engineering University of Gaziantep, 27310 Gaziantep, Turkey} \begin{abstract} An approximate method is suggested to obtain analytical expressions for the eigenvalues and eigenfunctions of the some quantum optical models. The method is based on the Lie-type transformation of the Hamiltonians. In a particular case it is demonstrated that $E\times \varepsilon $ Jahn-Teller Hamiltonian can easily be solved within the framework of the suggested approximation. The method presented here is conceptually simple and can easily be extended to the other quantum optical models. We also show that for a purely imaginary coupling the $E\times \varepsilon $ Hamiltonian becomes non-Hermitian but $P\sigma _{0}$-symmetric. Possible generalization of this approach is outlined. \end{abstract} \pacs{03.65.Fd, 42.50.Ap} \keywords{Algebraic Methods, Quantum Optical Models, Pseudo-Hermicity} \maketitle \section{Introduction} It is well known that the rotating wave approximation (RWA) is a useful method in determination of the eigenvalues and associated eigenfunctions of the various quantum optical Hamiltonians. The approximation gives accurate results when the frequency associated with the free evaluation of the system is essentially bigger than the transmission frequencies induced by the interaction between subsystem or external source. In quantum physics the application of the RWA usually leads to symmetry breaking: the representation space of the whole system is then divided into invariant subspaces, which strongly simplifies the mathematical complexity of the problem and usually provides the exact solution of the Hamiltonian. The simplest model which describes a two-level atom interacting with a single mode cavity field is the Jaynes-Cummings (JC) model \cite{jaynes}. A considerable attention has been devoted to the interaction of a radiation field with atoms since the paper of Dicke \cite{dicke}. Such system is commonly termed as the Dicke model. In spite of its simplicity, the whole spectrum of the Dicke Hamiltonian can not be obtained exactly and usually it has been treated in the framework of RWA. Besides its solution with RWA, in some papers an attempt is made to go beyond the RWA \cite{tur}. The continual integration methods are based on variational principles. The perturbative approach \cite{zaheer,zeng} leads to more complicated mathematical treatments and the theory converges only for a certain relationship between parameters of the Hamiltonian. In a more recent study, Klimov and his co-workers \cite{klimov} have developed a general perturbative approach to quantum optical models beyond the RWA, based on the Lie-type transformation. The Jahn Teller (JT) interaction \cite{jahn} is one of the most fascinating phenomena in modern physics and chemistry, providing a general approach to understanding the properties of molecules and crystals and their origins. This phenomena has inspired of the most important recent scientific discoveries, such as the concept of high temperature superconductivity. The JT interaction is an example of electron-phonon coupling. Therefore it seems that the RWA can be applied for solving the JT problems. Most of the JT Hamiltonians are more complicated than the Dicke Hamiltonian. At present, a few of them (i.e. $E\otimes \beta ,\ E\otimes \epsilon $ ) has been analyzed in the framework of quasi-exactly solvable problem \cite{koc1,koc2} or isolated exact solvability \cite{judd,longuet,reik,loorits,klenner,kus}, both provide finite number of exact eigenvalues and eigenfunctions in the closed form. In this paper we devise a novel method for solving JT Hamiltonians, as well as other quantum optical Hamiltonians in the framework of RWA. It will be shown that the eigenvalues and the associated eigenfunctions can be obtained in the closed form when the coupling constant is smaller than the natural frequency of the oscillator. The method described here includes a part of the motivation provided by the existence of the connection between JT Hamiltonians and Dicke Model. Here we concentrate our attention to the solution of the $E\otimes \epsilon $ \ JT\ Hamiltonian. Its solution has been treated previously by many authors \cite {judd,longuet,reik,kulak,lo,szopa}. We develop a new approximation method which is based on the similarity transformation. The method introduced here is the same as the RWA which has been usually used to solve Dicke Hamiltonian. An interesting and somewhat simpler form of the JT Hamiltonian is obtained by RWA. Other purpose of this paper is to show that for some purely imaginary couplings the $E\otimes \epsilon $ JT Hamiltonian becomes non-Hermitian but its low-lying part of the spectrum is real. It will be shown that the non-Hermitian Hamiltonian is not $PT$-invariant \cite {bender1,bender2,znojil,bagchi,ahmed} , but it is pseudo-Hermitian \cite {must1,must2,must3,bhabani,piju}. In the following section, we shall demonstrate our procedure on the $ E\otimes \epsilon $ JT Hamiltonian. We present a transformation procedure and we obtain approximate form of the $E\otimes \epsilon $ JT Hamiltonian. We show that Hamiltonian can be transformed in the form of the Dicke type Hamiltonians. We also obtain explicit expressions for the eigenstates and eigenvalues of the JT Hamiltonian. In section 3, we discuss the pseudo-Hermicity of the Hamiltonian. Finally we summarize our results. \section{Method and Summary of the Previous Results} The well-known form of the $E\otimes \varepsilon $ JT Hamiltonian describing a two-level fermionic subsystem coupled to two boson modes has obtained by Reik\cite{reik} is given by \begin{equation} H=\omega \left( a_{1}^{+}a_{1}+a_{2}^{+}a_{2}+1\right) +\omega _{0}\sigma _{0}+\kappa \lbrack (a_{1}+a_{2}^{+})\sigma _{+}+(a_{1}^{+}+a_{2})\sigma _{-}], \label{1} \end{equation} where $\omega _{0}$ is the level separation, $\omega $ is the frequency of the oscillator and $\kappa $ is the coupling strength. The Pauli matrices $ \sigma _{0,\pm }$ are given by \begin{equation} \sigma _{+}=\left[ \begin{array}{cc} 0 & 1 \\ 0 & 0 \end{array} \right] ,\quad \sigma _{-}=\left[ \begin{array}{cc} 0 & 0 \\ 1 & 0 \end{array} \right] ,\;\sigma _{0}=\left[ \begin{array}{cc} 1 & 0 \\ 0 & -1 \end{array} \right] . \label{2} \end{equation} The annihilation and creation operators, $a_{i}\;$and$\;a_{i}^{+},$ satisfy the usual commutation relations, \begin{equation} \lbrack a_{i}^{+},a_{j}^{+}]=[a_{i},a_{j}]=0,\quad \lbrack a_{i},a_{j}^{+}]=\delta _{ij}. \label{eq:4} \end{equation} The Hamiltonian (\ref{1}) can be solved in the framework of quasi-exactly solvable problems\cite{koc2} or by using numerical diagonalization method \cite{tur}. In order to obtain rotating wave approximated form of the $ E\otimes \varepsilon $ Hamiltonian, we use similarity transformation by introducing the operator \begin{equation} T=\frac{\kappa }{\omega +\omega _{0}}\left( \ \sigma _{+}a_{2}^{+}-\sigma _{-}a_{2}\right) +\frac{\kappa }{\omega -\omega _{0}}\left( \ \sigma _{-}a_{2}^{+}-\sigma _{+}a_{2}\right) , \label{3} \end{equation} and imposing the condition \ $\left| \omega \pm \omega _{0}\right| \gg \kappa ,$ which usually holds in the weak interaction, transformation of the Hamiltonian (\ref{1}), yields that \begin{eqnarray} \widetilde{H} &=&e^{T}He^{-T}\approx \omega \left( a_{1}^{+}a_{1}+a_{2}^{+}a_{2}+1\right) +\omega _{0}\sigma _{0}+\kappa \lbrack (a_{1}+a_{2})\sigma _{+}+(a_{1}^{+}+a_{2}^{+})\sigma _{-}]+ \notag \\ &&\left[ \frac{\kappa ^{2}}{\omega +\omega _{0}}\left( a_{1}^{+}a_{2}^{+}+a_{1}a_{2}\right) +\frac{\kappa ^{2}}{\omega -\omega _{0}} \left( a_{1}^{+}a_{2}+a_{1}a_{2}^{+}\right) \right] \sigma _{0}+ \notag \\ &&\frac{\omega \kappa ^{2}}{\omega ^{2}-\omega _{0}^{2}}\left( a_{2}^{+2}+a_{2}^{2}+2a_{2}^{+}a_{2}\right) \sigma _{0}+ \label{4} \\ &&\frac{\kappa ^{2}\sigma _{+}\sigma _{-}}{\omega -\omega _{0}}-\frac{\kappa ^{2}\sigma _{-}\sigma _{+}}{\omega +\omega _{0}}+O\left( \frac{\kappa ^{3}}{ \omega ^{2}-\omega _{0}^{2}}\right) . \notag \end{eqnarray} Since $\frac{\kappa ^{2}}{\omega \pm \omega _{0}}\ll 1$ is assumed to be a small parameter, neglection of the last term confirms result; \begin{equation} \widetilde{H}\approx \omega \left( a_{1}^{+}a_{1}+a_{2}^{+}a_{2}+1\right) +\omega _{0}\sigma _{0}+\kappa \lbrack (a_{1}+a_{2})\sigma _{+}+(a_{1}^{+}+a_{2}^{+})\sigma _{-}]. \label{5} \end{equation} It is analytically solvable due to the neglect of \ the counter-rotating terms, so called \ RWA. Now, we turn our attention to the solution of the Hamiltonian (\ref{5}). The rotation of the bosons given by the following operator \begin{equation} U=\exp \left( \frac{\pi }{4}(a_{1}^{+}a_{2}-a_{2}^{+}a_{1}\right) \label{5a} \end{equation} provides the expressions \ \begin{subequations} \begin{eqnarray} &&U(a_{1}+a_{2})U^{-1}=\sqrt{2}a_{1},\quad U(a_{1}^{+}+a_{2}^{+})U^{-1}= \sqrt{2}a_{1}^{+} \notag \\ &&U(a_{1}^{+}a_{1}+a_{2}^{+}a_{2})U^{-1}=a_{1}^{+}a_{1}+a_{2}^{+}a_{2} \label{5b} \end{eqnarray} Under $U,$ the Hamiltonian becomes \end{subequations} \begin{equation} \widetilde{H}\approx \omega \left( a_{1}^{+}a_{1}+a_{2}^{+}a_{2}+1\right) +\omega _{0}\sigma _{0}+\sqrt{2}\kappa \lbrack a_{1}\sigma _{+}+a_{1}^{+}\sigma _{-}]. \label{5x} \end{equation} The resultant Hamiltonian can easily be solved, because the matrix of the Hamiltonian can be decomposed in infinite dependent $2\times 2$ blocks on the subspaces $\left\{ \left| \uparrow ,n_{1}\right\rangle \left| \uparrow ,n_{2}\right\rangle ,\left| \downarrow ,n_{1}+1\right\rangle \left| \downarrow ,n_{2}\right\rangle \right\} $, where $n_{1}$ and $n_{2}$ are the number of photons.\ The eigenvalue problem can be written as \begin{table}[t] \begin{tabular}{|c|c|c|c|c|} \hline & \multicolumn{2}{|c|}{Ground state} & \multicolumn{2}{|c|}{First excited state} \\ \hline $\kappa ^{2}$ & $E_{rwa}$ & $E_{exact}$ & $E_{rwa}$ & $E_{exact}$ \\ \hline $0.1$ & 0.90455 & 0.90442 & 1.85982 & 1.82286 \\ \hline $0.2$ & 0.81678 & 0.81595 & 1.73508 & 1.67515 \\ \hline $0.3$ & 0.73508 & 0.73277 & 1.62159 & 1.54472 \\ \hline $0.4$ & 0.65835 & 0.65371 & 1.51676 & 1.36373 \\ \hline $0.5$ & 0.58578 & 0.57798 & 1.41886 & 1.31592 \\ \hline $0.6$ & 0.51676 & 0.50498 & 1.32667 & 1.21248 \\ \hline $0.7$ & 0.45080 & 0.43429 & 1.23931 & 1.11438 \\ \hline $0.8$ & 0.38754 & 0.36557 & 1.15609 & 1.02070 \\ \hline $0.9$ & 0.32667 & 0.29856 & 1.07646 & 0.93072 \\ \hline \end{tabular} \caption{Ground-state and first excited-state energies of the $E\otimes \protect\varepsilon $ JT Hamiltonian.} \label{tab:b} \end{table} \begin{equation} \widetilde{H}\left| \psi \right\rangle =E\left| \psi \right\rangle \label{6} \end{equation} where $\left| \psi \right\rangle $ is the two component \ eigenstate \begin{equation} \left| \psi \right\rangle =\left( \begin{array}{l} c_{1}\left| n_{1}\right\rangle \left| n_{2}\right\rangle \\ c_{2}\left| n_{1}+1\right\rangle \left| n_{2}\right\rangle \end{array} \right) , \label{7} \end{equation} where $c_{1}$and $c_{2\text{ }}$are normalization constant. Action of $ \widetilde{H\text{ }}$on $\psi $ yields the following expressions \begin{subequations} \begin{align} \left( c_{1}\left( \omega \left( n_{1}+n_{2}+1\right) +\omega _{0}\right) +c_{2}\sqrt{2}\kappa \sqrt{n_{1}+1}\right) \left| n_{1}\right\rangle \left| n_{2}\right\rangle & =c_{1}E\left| n_{1}\right\rangle \left| n_{2}\right\rangle \label{8a} \\ \left( c_{2}\left( \omega \left( n_{1}+n_{2}+2\right) -\omega _{0}\right) +c_{1}\sqrt{2}\kappa \sqrt{n_{1}+1}\right) \left| n_{1}+1\right\rangle \left| n_{2}\right\rangle & =c_{2}E\left| n_{1}+1\right\rangle \left| n_{2}\right\rangle . \label{8b} \end{align} Eliminating $c_{1}$ and $c_{2\text{ }}$between (\ref{8a}) and (\ref{8b}) and solving the resultant equation \ for $E$, we obtain \end{subequations} \begin{equation} E=\left( j+1\right) \omega \pm \frac{1}{2}\sqrt{8\kappa ^{2}(n+1)+\left( \omega -2\omega _{0}\right) ^{2}}. \label{9} \end{equation} where $j=n_{1}+n_{2}$ total number of bosons and $n=0,1,2,\cdots ,2j.$ The eigenstates can be easily written by using boson operators, acting on a vacuum state$\left| 0\right\rangle ;$ \begin{equation} \left| \psi \right\rangle =\left[ c_{1}a_{2}^{j-n}a_{1}^{+n}\left| 0\right\rangle ,c_{2}a_{2}^{j-n}a_{1}^{+n+1}\left| 0\right\rangle \right] ^{T}. \label{10} \end{equation} We conclude that in weak coupling limit the oscillators does not coupled to each other and each of them oscillates with their own frequencies. We have proven that when the interaction between $E$ ion and $\varepsilon $-modes are weak then $E\otimes \varepsilon $ JT Hamiltonian can be reduced to the JC model. Our formalism provides a solution of the problem which allows us to discuss the JT effects in the Dicke model. The accuracy of the approximate eigenvalues can be checked by means of the (quasi) exact solution of the $E\otimes \varepsilon $ JT Hamiltonian. The material parameters are chosen to be $\omega =1$ and $\omega _{0}=0$. The results are tabulated in Table 1. The results of our study show that the eigenvalues and eigenstates \ of the $ E\otimes \varepsilon $ JT\ Hamiltonian can be approximately described when the frequency $\omega $ of the oscillation larger than the interaction constant. \section{Non-Hermitian interaction} It has been shown that for some purely imaginary couplings constant $\kappa , $ the low-lying part of the $E\otimes \varepsilon $ JT Hamiltonian is real, although the Hamiltonian is non-Hermitian. Let us consider the Hamiltonian ( \ref{5x}) with the imaginary coupling $\kappa =i\gamma :$ \begin{equation} h=\omega \left( a_{1}^{+}a_{1}+a_{2}^{+}a_{2}+1\right) +\omega _{0}\sigma _{0}+i\sqrt{2}\gamma \lbrack a_{1}\sigma _{+}+a_{1}^{+}\sigma _{-}]. \label{11} \end{equation} This Hamiltonian is not Hermitian as, \begin{equation} h^{\dagger }=\omega \left( a_{1}^{+}a_{1}+a_{2}^{+}a_{2}+1\right) +\omega _{0}\sigma _{0}-i\sqrt{2}\gamma \lbrack a_{1}\sigma _{+}+a_{1}^{+}\sigma _{-}]\neq h. \label{12} \end{equation} Under the parity transformation, the Pauli matrices become invariant but both the creation and annihilation operators change sign. The time reversal operator for this Hamiltonian is $T=-i\sigma _{y}K$ where $K$ is complex conjugation operator. The time reversal operator changes the sign of the Pauli matrices and boson operators. \ It is easy to see that the Hamiltonian (\ref{11}) is not $PT$-symmetric \begin{equation} (PT)h(PT)^{-1}=\omega \left( a_{1}^{+}a_{1}+a_{2}^{+}a_{2}+1\right) -\omega _{0}\sigma _{0}+i\sqrt{2}\gamma \lbrack a_{1}\sigma _{+}+a_{1}^{+}\sigma _{-}]\neq h. \label{13} \end{equation} The Hamiltonian is not $PT$-symmetric but it gives real spectrum. Mustafazadeh \cite{must1,must2,must3} has shown that the reality of the spectrum of non-Hermitian Hamiltonian is due to pseudo-Hermicity properties of the Hamiltonian. A Hamiltonian is called $\eta $-pseudo-Hermitian if it satisfies the following relation \begin{equation} \eta h\eta ^{-1}=h^{\dagger }, \end{equation} where $\eta $ is a linear Hermitian operator. The Hamiltonian $h$ and its adjoint $h^{\dagger }$ can be related to each others by the operator $\sigma _{0}$ and using the relation $\sigma _{0}\sigma _{\pm }\sigma _{0}^{-1}=-\sigma _{\pm }:$ \begin{equation} \sigma _{0}h\sigma _{0}^{-1}=h^{\dagger }. \end{equation} Then the Hamiltonian (\ref{11}) is $\sigma _{0}$-pseudo-Hermitian. Our Hamiltonian is also pseudo-Hermitian with respect to the parity operator. As it is shown \cite{must1} that if a Hamiltonian is pseudo-Hermitian under two different operators, $\eta _{1},$ $\eta _{2}$ then the system is symmetric under the transformation generated by $\eta _{1}\eta _{2}^{-1}.$ Therefore our Hamiltonian is invariant under the symmetry generated by the combined operator, $P\sigma _{0}:$ \begin{equation} \left[ H,P\sigma _{0}\right] =0. \end{equation} \section{Conclusion} The aim of the this paper was to illustrate how the $E\otimes \varepsilon $ JT Hamiltonians can be solved by developing a transformation procedure. It has been found an approximate form of the $E\otimes \varepsilon $ JT Hamiltonian in the framework of the RWA. The resultant Hamiltonian can be solved analytically and its eigenvalues can be obtained in the closed form. We have shown that in the weak coupling limit the JT models may be recognized as the Dicke model. We have shown that when the coupling constant is imaginary the Hamiltonian is non-Hermitian but $P\sigma _{0}$-symmetric. We also hope to extend the method to the other JT and quantum optical systems. \end{document}
\begin{document} \begin{center} {\LARGE Gears, Pregears and Related Domains} \end{center} \begin{center} \today \end{center} \begin{center} Philip R. Brown\footnote{Partially supported by CONACyT grant 166183} \\ R. Michael Porter\footnotemark[1] \end{center} \noindent Abstract. We study conformal mappings from the unit disk to one-toothed gear-shaped planar domains from the point of view of the Schwarzian derivative. Gear-shaped (or ``gearlike'') domains fit into a more general category of domains we call ``pregears'' (images of gears under M\"obius transformations), which aid in the study of the conformal mappings for gears and which we also describe in detail. Such domains being bounded by arcs of circles, the Schwarzian derivative of the Riemann mapping is known to be a rational function of a specific form. One accessory parameter of these mappings is naturally related to the conformal modulus of the gear (or pregear) and we prove several qualitative results relating it to the principal remaining accessory parameter. The corresponding region of univalence (parameters for which the rational function is the Schwarzian derivative of a conformal mapping) is determined precisely. \noindent Keywords: conformal mapping, accessory parameter, Schwarzian derivative, gearlike domain, conformal modulus, topological quadrilateral. \noindent AMS Subject Classification: Primary 30C30; Secondary 30C20, 33E05. \section{Introduction \label{sec:intro}} A special case of a circular quadrilateral is a \textit{gear domain} with one tooth: a starlike open set in the complex plane bounded by arcs of two circles centered at the origin and segments of two lines passing through the origin. A related family of domains which we call \textit{pregear domains}, are those which are M\"obius transformations of gear domains. In \cite{Goo}, the Riemann mapping of the unit disk onto a gear domain, fixing the origin, is expressed as the solution of a first order linear differential equation (see (\,{\rm Re}\,f{eq:goodman}) below) which is derived by making use of the starlike property in the unit disk and the boundary behavior of the mapping. An alternative approach, which we follow here, follows the more general construction of conformal maps onto circular polygons, expressing the mapping as a solution of the equation which prescribes its Schwarzian derivative, as expounded in \cite{BG,Br2,BrP}, \cite[p.\ 70]{DT}, \cite{Hi,KP2}, \cite[p.\ 198]{Neh}, and \cite{Phoro} for circular quadrilaterals. In the present case of gear domains with one tooth, after normalizing the location of the prevertices, the Schwarzian derivative $R_{t,\lambda}(z)$ (see (\,{\rm Re}\,f{eq:Rtlambda})--(\,{\rm Re}\,f{eq:psi0tpsi1t}) below) contains the two unknown parameters $t$ (which determines the prevertices) and $\lambda$ (an auxiliary parameter), which in turn determine via the conformal mapping the two natural geometric quantities which specify the gear domain (Figure \,{\rm Re}\,f{fig:onetooth}); namely, the ratio of the radii of the outermost and the innermost circle, which we call the \emph{gear ratio} $\beta$, and half the angle between the rays that form the straight boundary segments, which we call the \emph{gear angle} $\gamma$. We describe one-tooth gear domains, summarize the relevant theory from \cite{Goo}, and work out the formula for $R_{t,\lambda}(z)$ in Section~\,{\rm Re}\,f{sec:quads}. The functional relationship between the parameters $t$ and $\lambda$ and the corresponding gear domain is analyzed Section~\,{\rm Re}\,f{modules}, where for convenience we work with the conformal module $M(t)$ of the gear domain. We prove that when $t$ is fixed, for each $\gamma$ there is at most one value of $\lambda$ for which a solution of (\,{\rm Re}\,f{eq:SfRtlambda0}) is a gear. We also prove that when $\gamma$ is fixed, $\beta$ is a monotonic function of $M(t)$ in the full range $0<t<\pi/2$. On the other hand, when $\beta$ is fixed, $M(t)\to 0$ as $\gamma\to 0$ or $\gamma \to\pi$, which leads to our conjecture that there are exactly two gears (corresponding to two different values of $\gamma$) with module $M(t)$, provided $t$ is below a threshold value $t_\beta$; for $t=t_\beta$ there is only one gear with module $M(t)$ and when $t$ is above the threshold there are no gears at all with this module. A basic property of the Schwarzian derivative is its invariance under M\"obius transformations. Consequently, the solutions $f$ obtained by solving (\,{\rm Re}\,f{eq:sdeq}) or (\,{\rm Re}\,f{eq:SfRtlambda0}) with the classical normalizations \mbox{$f(0)\!=\!0$} and \mbox{$f'(0)\!=\!1$} are, in general, M\"obius transforms of one-tooth gear domains. In Section~\,{\rm Re}\,f{sec:gearregion} we give a full geometric description of these ``pregear'' domains and regard them to be of independent interest from the point of view of conformal mapping. The family of pregear domains is seen to be bounded by ``degenerate pregear domains'', which are M\"obius transformations of certain unbounded rectilinear quadrilaterals. From this observation we are able to determine precisely the boundary of the region in the $(t,\lambda)$-plane for which $R_{t,\lambda}$ is the Schwarzian derivative of a univalent function (i.e., a conformal mapping to a gear) . \section{Schwarzian derivative and accessory\\ parameters \label{sec:quads}} \subsection{Gear domains} In this study a \textit{gearlike domain} (or gear domain) is a starlike open set $G$ in the complex plane $\mathbb{C}$ bounded by arcs of circles centered at the origin and segments of lines passing through the origin. Occasionally for reasons of normalization of mappings we may use the same term for a translate of a gear domain, in which case we will clarify that the ''gear center'' may not be the origin. The fundamental study of gearlike domains was initiated in \cite{Goo} and further results have appeared in \cite{BPea,Br3,Ni,Pea}. We will consider in particular bounded gearlike domains $G$ with a single ``tooth'' as in Figure \,{\rm Re}\,f{fig:onetooth}. The interior angles are defined by $\pi\alpha_i$ for $i=1,2,3,4$, where $\alpha_1=\alpha_4=1/2$ and $\alpha_2=\alpha_3=3/2$. We will assume that $G$ is symmetric in the real axis, and that the corresponding prevertices of the conformal mapping are of the form \[ z_1=e^{it_1},\ z_2=e^{it_2},\ z_3=e^{-it_2},\ z_4=e^{-it_1}, \] $0<t_1<t_2<\pi$, as can always be obtained by a preliminary transformation of $\mathbb{D}$. The straight edges of $G$ will be be referred to as the \emph{tooth edges}, which when prolonged meet at the \emph{gear center} at an angle $2\gamma$, where $\gamma$ will be termed the \emph{gear angle}. The two edges of $\partial G$ which are not tooth edges are arcs of circles centered at the gear center: the \emph{A-arc} ending at angles of $3\pi/2$ and the \emph{B-arc} ending at angles of $\pi/2$. The quotient $\beta$ of the radius of the B-arc to that of the A-arc is the \emph{gear ratio} of $G$. We write $G_{\beta,\gamma}$ for the standard gear domain with gear parameters $\beta,\gamma$ which is centered at the origin and has A-arc of radius $1$. \begin{figure} \caption{Gear parameters. } \label{fig:onetooth} \end{figure} Most of the research which has been done on gearlike domains is based on results in \cite{Goo}. We state here the following particular case. \begin{prop}\label{prop:goodman} A necessary and sufficient condition for $f(z)$ to be a univalent mapping of the unit disk onto a one-tooth gear domain, satisfying the normalizations $f(0)=0$ and \begin{equation}\label{derivatives} f''(0)=2f'(0)(\cos t_2-\cos t_1), \end{equation} where the prevertices $e^{\pm it_1}$ map to the vertices with interior angles $\pi/2$ and the prevertices $e^{\pm it_2}$ map to the vertices with interior angles $3\pi/2$, is that \begin{equation}\label{eq:goodman} f'(z)= \frac{1}{z}\left(\frac{z^2-2z\cos t_1+1}{z^2-2z\cos t_2+1}\right)^{1/2} f(z). \end{equation} Furthermore, the gear ratio $\beta$ and the gear angle $\gamma$ are determined by the following two integral formulas: \[ \log \beta = \int_{t_1}^{t_2}\sqrt{\frac{\cos\theta-\cos t_2} {\cos t_1 -\cos\theta }}\,d\theta, \quad \gamma = \int_0^{t_1}\sqrt{\frac{\cos\theta-\cos t_2} {\cos\theta -\cos t_1 } }\,d\theta. \] \end{prop} \par \noindent\textit{Proof. } The necessity of (\,{\rm Re}\,f{eq:goodman}) is a consequence of \cite[Theorem 2]{Goo}. Goodman also derived the formulas for $\beta$ and $\gamma$ and gave formulas for the coefficients $\{b_n\}_{n\ge2}$ of the Maclaurin series of $f(z)$ in terms of the coefficient $b_1$. In particular, he obtained (\,{\rm Re}\,f{derivatives}). The sufficiency of (\,{\rm Re}\,f{eq:goodman}) is a consequence of the geometry and symmetry of a one-tooth gear domain: divide the unit circle $\partial\mathbb{D}$ into four arcs separated by the points $z_1,z_2,z_3,z_4$. It follows from (\,{\rm Re}\,f{eq:goodman}) as in the proofs of \cite[Lemmas 1 and 2]{Goo} that as $\theta$ increases, $|f|$ is constant and $\arg f$ is increasing for $-t_1<\theta<t_1$ and $t_2<\theta<2\pi-t_2$; similarly $\arg f$ is constant and $|f|$ is decreasing for $t_1<\theta<t_2$, while $\arg f$ is constant and $|f|$ is increasing for $-t_2<\theta<-t_1$. As a result of the singularities in (\,{\rm Re}\,f{eq:goodman}), the interior angles of $f(\partial\mathbb{D})$ at $f(e^{\pm it_1})$ must be $\pi/2$ and the interior angles at $f(e^{\pm it_2})$ must be $3\pi/2$. Now if $f$ were not univalent, then an examination of a few possibilities for which the properties above are satisfied would show that the winding number of $f(\partial\mathbb{D})$ about the origin must be greater than 1, which means that $f$ could not have a simple zero at the origin. However, this contradicts the property $\lim_{z\to0}z\,f'(z)/f(z)=1$. \hskip1em\raise3.5pt\hbox{\framebox[2mm]{\ }} While it is straightforward to solve (\,{\rm Re}\,f{eq:goodman}) numerically, from these relations it is difficult to find the values of $t_1,t_2$ corresponding to a pair $\beta,\gamma$. \subsection{The Schwarzian derivative of a gear mapping} Although in principle the operator $zf'/f$ considered by Goodman is simpler than the Schwarzian derivative ${\mathcal{S}}_f=(f''/f')'-(1/2)(f''/f')^2$, we will base our study of gear domains on the latter to take advantage of the rich theory which has developed around it, as referred to in the introduction. We require some general facts about conformal mappings of circle polygon domains. Let $\mathbb{D}=\{|z<1\}$ denote the unit disk. For a general circle polygon domain $D$ with interior angles $\pi\alpha_k$ at the vertices $w_k$, write $a_k=(1-\alpha_k^2)/2$. Let $f\colon\mathbb{D}\to D$ be a conformal mapping. Then the Schwarzian derivative ${\mathcal{S}}_f$ is a rational function of the form \begin{equation} \label{eq:schgen} {\mathcal{S}}_f(z) = z^{-2}\sum_{k=1}^n \bigg( \frac{a_k z_k z}{(z-z_k)^2} + i r_k\frac{z+z_k}{z-z_k} \bigg), \end{equation} where $z_k=f^{-1}(w_k)\in\partial\mathbb{D}$ ($1\le k\le n$) are the prevertices of $D$ with respect to the mapping $f$, and $r_k\in\mathbb{R}$ are additional accessory parameters which satisfy the relations \begin{equation} \label{eq:rcondgen} \sum_{k=1}^n r_k = 0, \quad \sum_{k=1}^n z_k(a_k+2ir_k) = 0, \end{equation} These relations assure that $f$ sends the boundary $\partial\mathbb{D}$ to a union of circular arcs, and further imply that the singularity of (\,{\rm Re}\,f{eq:schgen}) at the origin is removable. See \cite{Phoro} for a proof. For one-tooth gear domains this specializes as follows. \begin{theo} \label{th:ratfuncs} Let $G$ be a one-tooth gear domain and let $f\colon\mathbb{D}\to G$ be a conformal mapping. Suppose that $f$ is symmetric in the real axis. Then there are unique values $t_1,t_2,\lambda$ ($0<t_1<t_2<\pi$, $\lambda\in\mathbb{R}$) such that Schwarzian derivative ${\mathcal{S}}_f$ of $f$ can be expressed as \begin{equation} \label{eq:sdeq} S_f=R_{t_1,t_2,\lambda} \end{equation} where \begin{equation} \label{eq:ratfuncs} \frac{1}{2} R_{t_1,t_2,\lambda}(z)= \psi_{0,(t_1,t_2)}(z) - \lambda \psi_{1,(t_1,t_2)}(z) \end{equation} with \begin{equation}\label{eq:psi1} \psi_{1,(t_1,t_2)}(z) = \frac{4 (\cos t_2-\cos t_1) } {\left(z^2-(2\cos t_1) z +1\right) \left(z^2-(2 \cos t_2) z +1\right)} \end{equation} and \begin{equation}\label{eq:psi0} \psi_{0,(t_1,t_2)}(z) = \frac{ c_{40} z^4 + c_{30}z^3 + c_{20} z^2 + c_{10}z + c_{00} } {\left(z^2-(2\cos t_1) z +1\right)^2 \left(z^2-(2 \cos t_2) z +1\right)^2}, \end{equation} \begin{eqnarray*} c_{00} &=& c_{40} \ =\ \frac{ 3 \cos 2t_1-5 \cos 2 t_2+2}{8}, \\ c_{10} &=& c_{30} \ =\ 3 \sin ^2t_1 \cos t_2-5 \cos t_1 \sin ^2t_2 , \\[1ex] c_{20} &=& \frac{(\cos 2 t_1) (11-2 \cos 2 t_2)-13 \cos 2 t_2+4}{4} . \end{eqnarray*} \end{theo} \par \noindent\textit{Proof. } Via the assumed symmetry $f(z)=\overline{f(\overline{z})}$ we have ${\mathcal{S}}_f(z)=\overline{{\mathcal{S}}_f(\overline{z})}$. From this and the general form (\,{\rm Re}\,f{eq:schgen}) with $n=4$ it follows (using $z_1=\exp(it_1)=\overline{z_4}$, $z_2=\exp(it_2)=\overline{z_3}$) that $r_1=-r_4$, $r_2=-r_3$, so with the first relation of (\,{\rm Re}\,f{eq:rcondgen}) the general formula reduces to \begin{eqnarray*} z^2 {\mathcal{S}}_f(z)= && \frac{3}{8}\left( \frac{ z {z_1}}{(z-{z_1})^2}+\frac{ z\overline{z_1}}{(z-\overline{z_1})^2} \right) -\frac{5}{8}\left( \frac{ z {z_2}}{(z-{z_2})^2}+\frac{ z \overline{z_2}}{(z-\overline{z_2})^2} \right) \\ && +i {r_1} \left(\frac{z+{z_1}}{z-{z_1}}-\frac{z+\overline{z_1}}{z-\overline{z_1}}\right)+i {r_2} \left(\frac{z+{z_2}}{z-{z_2}}-\frac{z+\overline{z_2}}{z-\overline{z_2}}\right) \end{eqnarray*} We introduce a new real parameter $\lambda$, determined by \begin{equation} r_1 = \frac{ \lambda + (3/8)\cos t_1}{2\sin t_1}, \quad r_2= \frac{ \lambda - (5/8)\cos t_1}{2\sin t_2} , \end{equation} and the second relation of (\,{\rm Re}\,f{eq:rcondgen}). After substituting $\exp\pm it_1$ and $\exp\pm it_2$ for the prevertices of $f$, the result follows by algebraic manipulation (see \cite[Appendix]{Phoro} for further details). Clearly $t_1$, $t_2$ are determined by the given prevertices of $f$, and the uniqueness of $\lambda$ follows from the explicit formula. \hskip1em\raise3.5pt\hbox{\framebox[2mm]{\ }} \noindent\emph{Symmetrization of prevertices.} We are interested in the combinations of accessory parameters $(t_1,t_2,\lambda)$ for which $f$ is a gear mapping. We have the flexibilty to relocate the prevertices $t_1$ and $t_2$, as we explain in the following paragraphs. The \textit{pullback} of a gear mapping $f$ via the self-mapping \begin{equation} \label{eq:Tq} T_q(z) = \frac{z-q}{-qz+1}. \end{equation} of $\mathbb{D}$ is by definition the Schwarzian derivative of the function $f\circ T_q$, also defined in $\mathbb{D}$. Since the composition is also a gear mapping, by Theorem \,{\rm Re}\,f{th:ratfuncs} and the Chain Rule we obtain the form of the pullback of ${\mathcal{S}}_f$, \begin{equation}\label{eq:pullback} R_{t_1,t_2,\lambda}(z) = R_{t_1^*,t_2^*,\lambda^*}(z^*)\, T_q'(z)^2, \end{equation} for some $\lambda^*\in\mathbb{R}$, where ${\mathcal{S}}_f=R_{t_1^*,t_2^*,\lambda^*}(z^*)$, $e^{it_1^*}=T_q(e^{it_1})$, $e^{it_2^*}=T_q(e^{it_2})$, $z^*=T_q(z)$. It is not difficult to see that the ``$\psi_0$'' and ``$\psi_1$'' parts of the Schwarzian derivative pull back independently because of the differing degrees of the polynomials in their denominators: \begin{eqnarray} \psi_{0;t_1,t_2}(z) &=& \psi_{0;t_1^*,t_2^*}(z^*)\, T_q'(z)^2, \nonumber\\ \psi_{1;t_1,t_2}(z) &=& \psi_{1;t_1^*,t_2^*}(z^*)\, T_q'(z)^2. \end{eqnarray} In fact, the following holds. \begin{prop} \label{prop:pullback} The pullback of the Schwarzian derivative ${\mathcal{S}}_f$ of a gear mapping respects the auxiliary parameters in the sense that $\lambda^*=\lambda$ in (\,{\rm Re}\,f{eq:pullback}). \end{prop} \par \noindent\textit{Proof. } This is an explicit but tedious calculation making use of the elementary formula \[ T_q(a)-T_q(b) = T_q'(a)^{1/2}T_q'(b)^{1/2}(a-b) = \frac{(1-q^2)(a-b)}{(1-qa)(1-qb)} \] which can be derived immediately from the the formula for the derivative $T_q'(z)=(1-q^2)/(-qz+1)^2$ (and choosing say $\sqrt{1-q^2}/(-qz+1)$ as the indicated square root). \hskip1em\raise3.5pt\hbox{\framebox[2mm]{\ }} The following result shows that by means of an appropriate $T_q$ it is possible to make the prevertices symmetric in the imaginary axis. This will be useful for our considerations of modules in Section \,{\rm Re}\,f{modules}. \begin{prop} \label{prop:Tq} Let $0<t_1<t_2<\pi$, $z_1=e^{it_1}$, $z_2=e^{it_2}$. Define \[ q = \frac{z_1+z_2-2\sqrt{\,{\rm Im}\, z_1\,\,{\rm Im}\, z_2}\sqrt{z_1z_2}}{1+z_1z_2} \] where $\sqrt{z_1z_2}=e^{i(t_1+t_2)/2}$. Then there is a unique $0<t<\pi/2$ such that \[ T_q(z_1)=e^{it}, \quad T_q(z_2)=e^{i(\pi-t)} . \] \end{prop} \par \noindent\textit{Proof. } First we note that $\overline{q}=q$ because of the relations $z_1\overline{z_1}=z_2\overline{z_2}=1$. It follows from the definition (\,{\rm Re}\,f{eq:Tq}) that $|T_q(z_1)|=|T_q(z_2)|=1$, and then we calculate that \begin{eqnarray*} T_q(z_1) &=& \frac{z_1-z_2+2\sqrt{\,{\rm Im}\, z_1\,\,{\rm Im}\, z_2}\sqrt{z_1z_2}}{1-z_1z_2}, \\ T_q(z_2) &=& \frac{-z_2-z_1+2\sqrt{\,{\rm Im}\, z_1\,\,{\rm Im}\, z_2}\sqrt{z_1z_2}}{1-z_1z_2}. \end{eqnarray*} Using the same reasoning we used to show $\overline{q}=q$ it follows that $T_q(z_2)=-\overline{T_q(z_1)}$, so we may take $t=\arg T_q(z_1)$. Since $T_q$ fixes $\pm 1$ and thus conserves the order of $1,e^{it_1},e^{it_2},-1$ along $\partial\mathbb{D}$, it follows that $T_1'(0)>0$, which implies that $|q|<1$. \hskip1em\raise3.5pt\hbox{\framebox[2mm]{\ }} By Proposition~\,{\rm Re}\,f{prop:pullback} we can always express $ R_{t_1,t_2,\lambda}$ as \begin{equation} \label{eq:Rt1t2lambda} R_{t_1,t_2,\lambda} = (R_{t,\pi/2-t,\lambda}\circ T_q) (T_q')^2. \end{equation} for a parameter $t$ uniquely determined by $t_1,t_2$ according to Proposition~\,{\rm Re}\,f{prop:Tq}. We thus reduce the study of gear mappings to the special case \begin{equation} \label{eq:RtlambdafromRt1t2} R_{t ,\lambda} := R_{t,\pi/2-t,\lambda}. \end{equation} Explicitly, we have a much simpler Schwarzian derivative \begin{eqnarray} \label{eq:Rtlambda} \frac{1}{2}R_{t ,\lambda}(z) &=& \psi_{0,t}(z) - \lambda \psi_{1,t}(z) \end{eqnarray} where \begin{eqnarray} \label{eq:psi0tpsi1t} \psi_{0,t}(z) &=& \frac{ (\sin^2t)(z^4-(16\cos t)z^3 + (4+2\cos2t) z^2 - (16\cos t) z + 1) } {2(z^4-(2 \cos2t)z^2+1)^2}, \nonumber \\ \psi_{1,t}(z) &=& \frac{-8\cos t}{z^4-(2\cos2t)z^2+1}. \label{eq:psit} \end{eqnarray} We make the precautionary observation that even though the prevertices are now symmetric in both coordinate axes, the Schwarzian derivative $R_{t ,\lambda}$ is not symmetric in the imaginary axis. \section{Conformal modules of gear domains}\label{modules} We discuss some of the relations among $t,\lambda,\beta,\gamma$. First we will treat $t$ as fixed, for the following reason. The \textit{conformal module} $M(G_{\beta,\gamma})>0$ of any gear (or pregear) with prevertices $\pm e^{\pm it}$ is by definition the conformal module of the unique conformally equivalent rectangle $(0,1,1+\tau,\tau)$ with imaginary $\tau$; i.e., $\tau=iM(t)$. Thus we can write $M(t)=M(G_{\beta,\gamma})$. Note that \begin{equation}\label{eq:limitM(t)} \lim_{t\to0}M(t)=0; \quad \lim_{t\to\pi/2}M(t)=\infty . \end{equation} \begin{defi}\rm We will say that the rational function $R_{t,\lambda}$ is \textit{gearlike} if there is a solution $f$ of \begin{equation} \label{eq:SfRtlambda0} {\mathcal{S}}_f = R_{t,\lambda} \end{equation} that is a univalent mapping onto a gear. \end{defi} \begin{lemm} \label{lemm:lambdafromgamma} Fix $t$. Then for each $\gamma$ there is at most one value of $\lambda$ for which $R_{t,\lambda}$ is gearlike. \end{lemm} \par \noindent\textit{Proof. } Different values of $\lambda$ for which $R_{t,\lambda}$ is gearlike would correspond to different values of $\beta$. If two gears have the same gear angle $\gamma$ but different values of $\beta$, one must be contained within the other. By the well-known monotonicity of conformal modules of topological quadrilaterals \cite{LV} it is not possible for these gears to have the same $M(t)$. \hskip1em\raise3.5pt\hbox{\framebox[2mm]{\ }} \begin{prop} \label{prop:limitmodules} (i) Let $0<\gamma<\pi$. Then $M(G_{\beta,\gamma})\to\infty$ as $\beta\to1$, while $M(G_{\beta,\gamma})\to0$ as $\beta\to\infty$. (ii) Let $\beta>1$. Then $M(G_{\beta,\gamma})\to0$ as $\gamma\to0$ or $\gamma\to\pi$. \end{prop} \par \noindent\textit{Proof. } (i) The Euclidean separation of the vertical sides of the topological quadrilateral $G_{\beta,\gamma}$ tends to 0 as $\beta\to1$ while the horizontal sides are bounded away from one another, hence the conformal module tends to $\infty$ (\cite[Lemma 4.1]{LV}). Similarly, as $\beta\to\infty$, one may rescale the quadrilateral to see that the separation of the horizontal edges tends to 0, so the conformal module tends to $0$. (ii) This limit is harder to see because although the horizontal edges in $G_{\beta,\gamma}$ become arbitrarily close, the rightmost vertical edge also degenerates (the Riemann map with $0\to0$ takes all four vertices close to 1). We can decompose the gear as $G_{\beta,\gamma}=\mathbb{D} \cup \{e^{i\theta}\colon\ |\theta|<\gamma\}\cup D_1$ where $D_1$ is the tooth. (Here $\mathbb{D}$ is not a quadrilateral, just a bigon attached to a vertical side of $D_1$ but it can be thought of as a limiting case of a quadrilateral.) By monotonicity of conformal modulus, \[ \frac{1}{M(G_{\beta,\gamma})} \ge \frac{1}{M(D_1)}. \] Since $M(D_1)\to0$ as $\gamma\to0$, also $M(G_{\beta,\gamma})\to0$. Now let $\gamma$ tend to $\pi$. The arc of $\partial\mathbb{D}$ from $\gamma$ to $2\pi-\gamma$ is a vertical edge which is very small, since there are curves of length less than $2(\pi-\gamma)$ interior to $G_{\beta,\gamma}$ joining the vertical edges (which are the tooth edges, having length $\beta-1$) to each other. On the other hand, the Euclidean area of $G_{\beta,\gamma}$ is more than $\pi$, the area of $\mathbb{D}$, independently of $\gamma$. By \cite[sec.\ 4.3]{LV}, \[ M(G_{\beta,\gamma}) < \frac{(2(\pi-\gamma))^2}{\pi} \to 0. \] as $\gamma\to\pi$. \hskip1em\raise3.5pt\hbox{\framebox[2mm]{\ }} As in Lemma \,{\rm Re}\,f{lemm:lambdafromgamma}, the function $\beta\mapsto M(G_{\beta,\gamma})$ is monotone since the gear grows with $\beta$. By Proposition \,{\rm Re}\,f{prop:limitmodules} we have the following. \begin{prop}\label{prop:betafromt} Let $\gamma\in(0,\pi)$. Then for any\/ $t\in(0,\pi/2)$ there is a unique $\beta>1$ such that the gear $G_{\beta,\gamma}$ has conformal module $M(t)$. \end{prop} Given $\beta$, the existence of a value $\gamma$ for which $M(G(\beta,\gamma)))$ is maximal follows likewise from Proposition \,{\rm Re}\,f{prop:limitmodules}. The affirmation that the conformal module is monotone for $\gamma$ above and below this value is so far confirmed only by numerical evidence which we will present in our forthcoming article \cite{BrP3}. \begin{conj} \label{conj:betafromgamma} Let $\beta>1$. Then there is a value $t_\beta\in(0,\pi/2)$ such that for each $t\in(0,t_\beta)$ there are exactly two values of $\gamma$ such that $M(G(\beta,\gamma))=M(t)$. (For $t=t_\beta$ there is exactly one, while for $t>t_\beta$ there are none.) \end{conj} We now have in hand enough information about gears to prove one of our main results. \begin{theo} \label{theo:lambdainterval} For each $t$ in $(0,\pi/2)$ there are constants $\lambda_t^-$, $\lambda_t^+$ such that $R_{t,\lambda}$ is the Schwarzian derivative of a conformal mapping from $\mathbb{D}$ to a gear domain if and only if $\lambda_t^-<\lambda<\lambda_t^+$. \end{theo} \par \noindent\textit{Proof. } By Proposition \,{\rm Re}\,f{prop:limitmodules} there exist $\beta_0,\gamma_0$ such that $M(G_{\beta_0,\gamma_0})=M(t)$. By the Riemann Mapping Theorem and Theorem \,{\rm Re}\,f{th:ratfuncs}, there exists $\lambda_0\in\mathbb{R}$ such that $R_{t,\lambda_0}$ is the Schwarzian derivative of a conformal mapping from $\mathbb{D}$ to $G_{\beta_0,\gamma_0}$. Since the set of $\lambda\in\mathbb{R}$ for which $R_{t,\lambda}$ is the Schwarzian derivative of a conformal mapping from $\mathbb{D}$ to a gear domain is clearly open, we may consider the maximal open interval $I_t=(\lambda_t^-,\lambda_t^+)$ which contains $\lambda_0$ and is contained in this set. Suppose that there is a sequence $\lambda_n\to\lambda_t^+$ in $I_t$ for which the gear ratio $\gamma_n$ converges to some $\gamma_0\in(0,\pi)$. The corresponding conformal mappings $f_n\colon\mathbb{D}\to G_{\beta_n,\gamma_n}$ with ${\mathcal{S}}_{f_n}=R_{t,\lambda_n}$, symmetric in $\mathbb{R}$, and with $f_n'(0)>0$, all cover $\mathbb{D}$ and thus (perhaps on a subsequence) converge to a mapping $f$ with $\mathbb{D}\subseteq f(\mathbb{D})$. Since $\lambda_t^+$ is a boundary parameter, we must have $\beta_n\to1$ or $\beta_n\to\infty$ on a subsequence. However, by Proposition \,{\rm Re}\,f{prop:limitmodules} this implies $M(G_{\beta_n,\gamma_n})\to0$ or $M(G_{\beta_n,\gamma_n})\to\infty$, which is absurd since the conformal module $M(t)$ is fixed. It follows that $\gamma$ accumulates only to $0$ or $\pi$ as $\lambda\to\lambda_t^+$, and the same holds as $\lambda\to\lambda_t^-$. However, the function $\lambda\mapsto\gamma$ (for the fixed $t$ under consideration) is strictly monotone. Indeed, suppose that the Schwarzian derivatives $R_{t,\lambda_1}$ and $R_{t,\lambda_2}$ determined gears $G_{\beta_1,\gamma}$ and $G_{\beta_2,\gamma}$ with the same $\gamma$ but $\beta_1<\beta_2$. Then $G_{\beta_1,\gamma}\subseteq G_{\beta_2,\gamma}$ and by monotonicity of conformal modules, $M(G_{\beta_2,\gamma})<M(G_{\beta_1,\gamma})$, again contradicting the fact that both modules are equal to $M(t)$. We thus see that $\gamma\to\pi$ as $\lambda\to\lambda_t^-$ and $\gamma\to0$ as $\lambda\to\lambda_t^+$, or vice versa. (Numerically we will see in \cite{BrP3} that $\lambda\mapsto\gamma$ is actually decreasing.) Now we can show that $R_{t,\lambda}$ is not the Schwarzian derivative of a gear mapping for any $\lambda$ outside of the interval $I_t$. Suppose indeed that $\lambda_1$ were such a parameter. As was the case for $\lambda_0$, there is a maximal interval $I$ containing $\lambda_1$ and in which every $R_{t,\lambda}$ gives a gear mapping. As before, the limits of the $\gamma$ values at the endpoints of $I$ must be $0$ and $\pi$, and these values thus range over the whole interval $(0,\pi)$. Therefore there exists a value $\lambda\in I$ such that $R_{t,\lambda}$ produces a gear $G_{\beta,\gamma_0}$, where $R_{t,\lambda_0}$ produces $G_{\beta_0,\gamma_0}$. Again by monotonicity of the module we have that $\beta=\beta_0$. Then by the uniqueness of conformal mappings, $R_{t,\lambda}=R_{t,\lambda_0}$ and finally $\lambda=\lambda_0$. This proves the statement. \hskip1em\raise3.5pt\hbox{\framebox[2mm]{\ }} (The limiting rational expressions $R_{t,\lambda^\pm_t}$ correspond to maps to degenerate gears, as will be discussed now). \section{The region of gearlikeness\label{sec:gearlikeness}}\label{sec:gearregion} \subsection{Pregear domains}\label{sec:pregears} \begin{defi}\rm We will say that a domain $D$ is a \emph{pregear} when it is the image $D=T(G)$ of a one-tooth gear domain $G$ under a M\"obius transformation $T$. \end{defi} \begin{figure} \caption{Examples of pregears. The circles $C^\pm$ containing the tooth edges are solid gray; the circles containing the A- and B-arcs in dotted gray. } \label{fig:pregears1} \end{figure} The tooth edges as well as the A- and B-arcs of any pregear may be uniquely identified by the corresponding interior angles. Normally we will restrict the discussion to pregears with the symmetries of the following proposition. \begin{prop} \label{prop:pregearcondition} Let $D$ be a circular quadrilateral which is symmetric in $\mathbb{R}$, has no vertices on $\mathbb{R}$, and has two interior angles equal to $\pi/2$ and two interior angles equal to $3\pi/2$. Assume that one tooth edge of $D$ lies in the upper and the other in the lower half-plane. Then $D$ is a pregear if and only if the full circles $C^+$, $C^-$ containing the tooth edges intersect in two points. \end{prop} \par \noindent\textit{Proof. } Suppose that $C^+$ and $C^-$ intersect at the points $b^-$ and $b^+$, which by symmetry are necessarily in $\mathbb{R}$. We can suppose that $b^-<b^+$. If the A-arc passes between $b^-$ and $b^+$ as in diagrams (5) and (6) in Figure \,{\rm Re}\,f{fig:pregears1}, then $b^+$ is interior to $D$. Otherwise, if the B-arc passes between $b^-$ and $b^+$ as in diagrams (1), (2), (3) and (4) in Figure \,{\rm Re}\,f{fig:pregears1}, then $b^-$ is interior to $D$. Let $T$ be a M\"obius transformation such that $T^{-1}(z)=(z-b^-)/(z-b^+)$. Then $T^{-1}(C^+)$ and $T^{-1}(C^-)$ are straight lines which pass through the origin, so $D=T(G)$, where $G$ is a gear domain (or the image of a gear domain under $z\mapsto1/z$). For the converse, when the extended tooth edges intersect in two points, $C^+\cap C^-=\{T(0),T(\infty)\}$ where $T$ is a M\"obius transformation sending some $G_{\beta,\gamma}$ to the pregear $D$. \hskip1em\raise3.5pt\hbox{\framebox[2mm]{\ }} \begin{prop}\label{prop:gearcondition} Let $D$ be a pregear. Then $D$ is a gear if and only if its tooth edges are straight, or equivalently, if the A- and B-arcs are concentric. \end{prop} \subsection{Degenerate gears and pregears} \begin{defi}\rm A circular quadrilateral is called a \emph{degenerate} gear (or pregear) if it is not a gear (or pregear) but is arbitrarily close to one. \end{defi} Naturally a degenerate gear is also a degenerate pregear. Degenerate pregears have a very rigidly defined structure. According to Proposition~\,{\rm Re}\,f{prop:pregearcondition}, each edge of a pregear is a circular arc orthogonal to the two edges adjacent to it, and the tooth edges lie in circles $C^\pm$ which intersect in two points. It follows from this that in a degenerate pregear the circles $C^\pm$ must be tangent. Further, any circle orthogonal to both of them must pass through the point of tangency. From this one may deduce the following. \begin{prop} \label{prop:degeneratepregears} Let $D$ be a bounded degenerate pregear symmetric in $\mathbb{R}$. Then the circles $C^\pm$ containing the tooth edges are tangent, say at some point $w^*$, and are orthogonal to the A- and B-circles (the circles containing the non-tooth edges), which are also tangent at $w^*$. \end{prop} \begin{figure} \caption{The two types of degenerate pregears} \label{fig:degeneratepregears} \end{figure} The two essentially different possibilities allowed by Proposition \,{\rm Re}\,f{prop:degeneratepregears} are when the point of tangency $w^*$ of the tooth edges is at what we have been calling $f(-1)$ or at $f(1)$, i.e., according to whether the circles containing the A- and B-arcs are tangent internally or externally, as shown in Figure \,{\rm Re}\,f{fig:degeneratepregears}. We will denote by $D_t^-$ and $D_t^+$ respectively the two degenerate pregears with conformal module $M(t)$, $0<t<\pi$. For definiteness in the following discussion we normalize these domains as follows. For $D_t^-$ we apply a real M\"obius transformation to assure that the A-circle is $\{|w+1/2|=1/2\}$ and the B-circle is $\{|w|=1\}$, so the point of internal tangency of $C^\pm$ is at $w=-1\in\partial D_t^-$. This normalization does not affect the conformal module. For $D_t^+$ we will assume that the A- and B-circles are $\{|w|=1\}$ and $\{|w-2|=1\}$ respectively, with external tangency at $w=1\in\partial D_t^+$. \begin{figure} \caption{Extreme cases of degenerate pregears, for $\eta\to0$ (above) and $\eta\to\infty$ (below).} \label{fig:degeneratingdegenerate} \end{figure} Let us note the limiting behavior of degenerate pregears as $t\to0$ or $t\to\pi/2$. Let $\eta$ denote the common radius of $C^\pm$. Consider first $D_t^-$. When $\eta\to0$ it can be seen that the conformal module $M(t)$ tends to $\infty$ (for example, for each $\eta$ apply a M\"obius transformation leaving the A-circle invariant and sending $C^\pm$ to circles of radius 1). In the other direction, when $\eta\to\infty$, the A-arc is practically all of the A-circle, and the B-arc is a small arc of the B-circle near $1$, so $M(t)\to0$ (see Figure \,{\rm Re}\,f{fig:degeneratingdegenerate}). Similarly, for $D_t^+$, as $\eta\to0$ we have $M(t)\to\infty$, while as $\eta\to\infty$ we find that $M(t)\to0$. \subsection{The boundary of the region of gearlikeness}\label{bounds} Our main object of study is the the following. \begin{defi}\emph{ The \textit{region of gearlikeness} is the subset of $\mathbb{R}^2$ defined by \begin{eqnarray*} \mathcal{G} &=& \{ (t,\lambda)\colon\ R_{t,\lambda} \mbox{ is the Schwarzian derivative of a gear mapping}\}. \end{eqnarray*} } \end{defi} According to Theorem \,{\rm Re}\,f{theo:lambdainterval}, \[ \mathcal{G} = \{ (t,\lambda)\colon\ 0<t<\frac{\pi}{2},\ \lambda_t^- < \lambda < \lambda_t^+ \}. \] One may obtain very rough bounds on $\lambda_t^-$ and $\lambda_t^+$ by the classical estimate of Nehari which says that if $(1-|z|^2)^2|{\mathcal{S}}_f(z)|>6$ for some $z\in\mathbb{D}$, then the mapping $f$ is not even univalent. Applying $z=0$ in (\,{\rm Re}\,f{eq:Rtlambda}) gives the necessary condition $|16\lambda\cos t + \sin^2 t|<6$ for univalence, from which \begin{equation} \label{eq:neharibound} \lambda_t^- \ge -\frac{13-\cos 2t}{32\cos t} , \quad \lambda_t^+ \le \frac{11+\cos 2t}{32\cos t}. \end{equation} This approach was worked out in detail for the analogous case of symmetric quadrilaterals in \cite{BrP}, where the Nehari estimate gave rather better results. Indeed we give the following exact values for $\lambda_t^+$, $\lambda_t^+$. \begin{theo} \label{theo:extremlambdas} Let $0<t<\pi/2$. Then the extreme values of $\lambda\in\mathbb{R}$ for which the rational function $R_{t,\lambda}$ of (\,{\rm Re}\,f{eq:RtlambdafromRt1t2}) is gearlike are given by \[ \lambda_t^- = -\frac{1}{4} - \frac{1}{16}\left(\cos t + \frac{1}{\cos t}\right) ,\quad \lambda_t^+ = \frac{1}{4} - \frac{1}{16}\left(\cos t + \frac{1}{\cos t}\right). \] Further, $R_{t,\lambda_t^-}$, $R_{t,\lambda_t^+}$ are the Schwarzian derivatives of conformal mappings from the disk\/ $\mathbb{D}$ onto degenerate pregears of type $D_t^-$, $D_t^+$ respectively, as depicted in Figure \,{\rm Re}\,f{fig:degeneratepregears}. \end{theo} The region of gearlikeness $\mathcal{G}$ is drawn in Figure \,{\rm Re}\,f{fig:gearlikewithnehari}, together with the rough bound (\,{\rm Re}\,f{eq:neharibound}). \begin{figure} \caption{Region of gearlikeness (gray, bounded by solid curves, which correspond to degenerate gears).} \label{fig:gearlikewithnehari} \end{figure} \par \noindent\textit{Proof. } For fixed $t$, as $\lambda\to\lambda_t^-$ from above and $\lambda\to\lambda_t^+$ from below, it is difficult to describe immediately the behavior of the parameters of the corresponding gear domains $G_{\beta,\gamma}$. However, via apropriate M\"obius transformations we can map these gears to pregears with the A- and B-circles normalized so that the limiting domains are the degenerate pregears $D_t^\pm$ (recall Proposition \,{\rm Re}\,f{prop:degeneratepregears}). Then let us apply a real M\"obius transformation sending the point of tangency $w^*$ of the extended edges of the pregear to $\infty$. Since all image edges are now straight, the result $\widetilde D_t^\pm$ is an unbounded rectilinear quadrilateral of a very simple form (Figure \,{\rm Re}\,f{fig:unboundedquadrilaterals}). The limit mappings $\mathbb{D}\to\widetilde D_t^\pm$ are given by integrals of Schwarz-Christoffel type with prevertices at $\pm e^{\pm i t}$, and also at $-1$ or $1$ which are mapped to vertices of angle $\pi$ at $\infty$ (cf.\ \cite[Sec.\ 2.1]{DT} for an explanation of this technical detail): \begin{figure} \caption{Degenerate pregears with tangency mapped to $\infty$} \label{fig:unboundedquadrilaterals} \end{figure} \begin{eqnarray*} \label{eq:sch-chr} \widetilde f_t^-(z) &=& \int_0^z \frac{1}{(z+1)^2} \sqrt{ \frac{z^2 + (2\cos t)z + 1}{z^2 - (2\cos t)z + 1 } } \,dz,\\ \widetilde f_t^+(z) &=& \int_0^z \frac{1}{(z-1)^2} \sqrt{ \frac{z^2 + (2\cos t)z + 1}{z^2 - (2\cos t)z + 1 } }\,dz. \end{eqnarray*} From this the Schwarzian derivatives of the pregear mappings $f_t^\pm(z)\colon\mathbb{D}\to D_t^\pm$ are equal to those of the Schwarz-Christoffel integrals $\widetilde f_t^\pm(z)$. These are very easily calculated, and in the notation of (\,{\rm Re}\,f{eq:Rtlambda})--(\,{\rm Re}\,f{eq:psi0tpsi1t}) we have \begin{eqnarray*} \label{eq:schwdersch-chr} {\mathcal{S}}_{\widetilde f_t^-}(z) &=& -\frac{3+8\cos t+\cos2t}{2(z^4-(2\cos2t)z^2+1)} + \psi_{0,t}(z) ,\\ {\mathcal{S}}_{\widetilde f_t^+}(z) &=& -\frac{3-8\cos t+\cos2t}{2(z^4-(2\cos2t)z^2+1)} + \psi_{0,t}(z) \end{eqnarray*} Upon comparison with \[ R_{t,\lambda}(z) = \frac{16\lambda\cos t}{z^4-(2\cos2t)z^2+1}+ \psi_{0,t}(z) \] we obtain the formulas for $\lambda_t^-$ and $\lambda_t^+$. \hskip1em\raise3.5pt\hbox{\framebox[2mm]{\ }} \section{Discussion and conclusions} We have described in detail the structure of gear and pregear domains and shown how the general theory of conformal mapping to circular polygons can be used to relate the geometry of these domains to the auxiliary parameters of the corresponding conformal mappings. In particular, the conformal module $M(t)$ is a key element for understanding the degeneration of these domains. In order to calculate a gear domain numerically by the approach we have presented here, given a pair of parameters $(t,\lambda)$ it is necessary to find the appropriate self-mapping $T_q$ of (\,{\rm Re}\,f{eq:Tq}). This amounts to knowing the point $q\in[-1,1]$ which is sent to the gear center. Since solving the Schwarzian differential equation (\,{\rm Re}\,f{eq:SfRtlambda0}) a priori produces only a pregear, it is necessary to find a M\"obius transformation which maps this to a gear. This may be approached by solving numerically for the intersection points of $C^+\cap C^-$ of the circles containing the tooth edges, or by finding the curvature of the tooth edges numerically and then adjusting the parameters to assure that this curvature is zero. Once all this is obtained, the matter of how to find the unique $(t,\lambda)$ corresponding to prescribed geometric parameters $(\beta,\gamma)$ can be addressed. These questions will worked out in the study \cite{BrP3} of numerical aspects of conformal mappings to gear domains. \noindent Philip R. Brown\\ Department of General Academics\\ Texas A\&M University at Galveston\\ PO Box 1675, Galveston, Texas 77553 -1675 \\ \texttt{ [email protected] } \noindent R. Michael Porter \\ Departamento de Matem\'aticas, CINVESTAV--I.P.N.\\ Apdo.\ Postal 1-798, Arteaga 5 \\ Santiago de Queretaro, Qro., 76000 MEXICO \\ \texttt{[email protected]} \end{document}
\begin{document} \preprint{} \title{New Quasi-Exactly Solvable Sextic Polynomial Potentials} \author{Carl~M.~Bender\footnote{Permanent address: Department of Physics, Washington University, St. Louis, MO 63130, USA.} and Maria Monou} \affiliation{Blackett Laboratory, Imperial College, London SW7 2BZ, UK} \date{\today} \begin{abstract} A Hamiltonian is said to be {\it quasi-exactly solvable} (QES) if some of the energy levels and the corresponding eigenfunctions can be calculated exactly and in closed form. An entirely new class of QES Hamiltonians having sextic polynomial potentials is constructed. These new Hamiltonians are different from the sextic QES Hamiltonians in the literature because their eigenfunctions obey $\mathcal{PT}$-symmetric rather than Hermitian boundary conditions. These new Hamiltonians present a novel problem that is not encountered when the Hamiltonian is Hermitian: It is necessary to distinguish between the parametric region of unbroken $\mathcal{PT}$ symmetry, in which all of the eigenvalues are real, and the region of broken $\mathcal{PT}$ symmetry, in which some of the eigenvalues are complex. The precise location of the boundary between these two regions is determined numerically using extrapolation techniques and analytically using WKB analysis. \end{abstract} \pacs{03.65.Sq, 02.70.Hm, 02.90.+p} \maketitle \section{SEXTIC QES HAMILTONIANS} \label{sec1} The purpose of this paper is to introduce a new class of quasi-exactly solvable (QES) Hamiltonians having sextic polynomial potentials. While these new kinds of QES Hamiltonians have positive, real eigenvalues, they have not yet been discussed in the literature because they are not Hermitian. Instead, they are $\mathcal{PT}$ symmetric. The term {\em quasi-exactly solvable} (QES) is used to describe a quantum-mechanical Hamiltonian when a finite portion of its energy spectrum and associated eigenfunctions can be found exactly and in closed form \cite{Ush}. Typically, QES potentials depend on a parameter $J$, and for positive integer values of $J$ one can find exactly the first $J$ eigenvalues and eigenfunctions, usually of a given parity. It has been shown that QES systems can be classified by using an algebraic approach in which the Hamiltonian is expressed in terms of the generators of a Lie algebra \cite{Tur,Tur1,ST,GKO}. Perhaps the simplest example of a QES Hamiltonian having a sextic potential is \cite{BD1,BD2} \begin{equation} H=p^2+x^6-(4J-1)x^2, \label{eq1} \end{equation} where $J$ is a positive integer. For each positive integer value of $J$, the time-independent Schr\"odinger equation for this Hamiltonian, \begin{equation} -\psi''(x)+[x^6-(4J-1)x^2]\psi(x)=E\psi(x), \label{eq2} \end{equation} has $J$ even-parity eigenfunctions in the form of an exponential times a polynomial: \begin{equation} \psi(x)=e^{-x^4/4}\sum_{k=0}^{J-1}c_k x^{2k}. \label{eq3} \end{equation} The polynomial coefficients $c_k$ ($0\leq k\leq J-1$) satisfy the recursion relation \begin{equation} 4(J-k)c_{k-1}+Ec_k+2(k+1)(2k+1)c_{k+1}=0, \label{eq4} \end{equation} where we define $c_{-1}=c_{J}=0$. The simultaneous linear equations (\ref{eq4}) have a nontrivial solution for $c_0,\,c_1,\,...,\,c_{J-1}$ if the determinant of the coefficients vanishes. This determinant is a polynomial of degree $J$ in the variable $E$. The roots of this polynomial are all real and are the $J$ quasi-exact energy eigenvalues of the Hamiltonian (\ref{eq1}). Note that all of the QES eigenfunctions (\ref{eq3}) of $H$ in (\ref{eq1}) have the form of a decaying exponential $\exp(-\frac{1}{4}x^4)$ multiplying a polynomial. This is the standard form in the literature for the eigenfunctions of any QES Hamiltonian whose potential is a polynomial. The QES Hamiltonians associated with Hermitian Hamiltonians have been examined in depth and classified exhaustively \cite{Ush}. However, in 1998 new kinds of Hamiltonians that have positive real energy levels were discovered \cite{A,B}. These new kinds of Hamiltonians are not Hermitian ($H\neq H^\dag$) in the usual Dirac sense, where the Dirac adjoint symbol $\dag$ represents combined transpose and complex conjugation. Instead, these Hamiltonians possess $\mathcal{PT}$ symmetry $H=H^{\mathcal{PT}}$; that is, they remain invariant under combined space and time reflection. This new class of non-Hermitian Hamiltonians has been studied heavily \cite{C,D,E} and it has been shown that when the $\mathcal{PT}$ symmetry is not broken, such Hamiltonians define unitary theories of quantum mechanics \cite{BBJ} The key difference between Hermitian Hamiltonians and complex, non-Hermitian, $\mathcal{PT}$-symmetric Hamiltonians is that with $\mathcal{PT}$-symmetric Hamiltonians the boundary conditions on the eigenfunctions (the solutions to the time-independent Schr\"odinger equation) are imposed in wedges in the complex plane. Sometimes these wedges do not include the real axis. (A detailed discussion of the complex asymptotic behavior of solutions to eigenvalue problems may be found in Ref.~\cite{BT}.) The discovery of $\mathcal{PT}$-symmetric Hamiltonians was followed immediately by the discovery of a new class of QES models. Until 1998 it was thought that if the potential associated with a QES Hamiltonian was a polynomial, then this polynomial had to be at least sextic; its degree could not be less than six. This property is in fact true for Hamiltonians that are Hermitian. However, in 1998 it was discovered that it is possible to have a QES {\it non-Hermitian} complex Hamiltonian whose potential is {\it quartic} \cite{BeBo}: \begin{equation} H=p^2-x^4+2iax^3+(a^2-2b)x^2+2i(ab-J)x. \label{eq5} \end{equation} Here, $a$ and $b$ are real parameters and $J$ is a positive integer. For a large region of the parameters $a$ and $b$, the energy levels of this family of quartic Hamiltonians are real, discrete, and bounded below, and the quasi-exact portion of the spectra consists of the lowest $J$ eigenvalues. Like the eigenvalues of the Hamiltonian (\ref{eq1}), the lowest $J$ eigenvalues of these potentials are the roots of a $J$th-degree polynomial \cite{KM}. The reality of the eigenvalues of $H$ in (\ref{eq5}) is ensured by the boundary conditions that its eigenfunctions are required to satisfy. The eigenfunctions are required to vanish as $|x|\to\infty$ in the complex-$x$ plane inside of two wedges called {\it Stokes wedges}. The right wedge is bounded above and below by lines at $0^\circ$ and $-60^\circ$ and the left wedge is bounded above and below by lines at $-180^\circ$ and $-120^\circ$. The leading asymptotic behavior of the wave function inside these wedges is given by \begin{eqnarray} \psi(x)\sim e^{-ix^3/3}\quad(|x|\to\infty). \label{eq6} \end{eqnarray} The new class of QES sextic Hamiltonians reported in this paper has the form \begin{equation} H=p^2+x^6+2ax^4+(4J-1+a^2)x^2, \label{eq7} \end{equation} where $J$ is a positive integer and $a$ is a real parameter. These Hamiltonians are very similar in structure to those in (\ref{eq1}) and to the other QES sextic Hamiltonians discussed in the literature \cite{Ush}, but their distinguishing characteristic is that the asymptotic behavior of their eigenfunctions in the complex-$x$ plane is different. Let us examine first the asymptotic behavior of the eigenfunction solutions to the Schr\"odinger equation (\ref{eq2}). For brevity, we call the eigenfunctions in (\ref{eq3}) the {\it good} solutions to (\ref{eq2}) because they satisfy the physical requirement of being quadratically integrable. These good solutions decay exponentially like $\exp(-\frac{1}{4}x^4)$ as $x\to\pm\infty$, while the corresponding linearly independent {\it bad} solutions grow exponentially like $\exp(\frac{1}{4}x^4)$ as $x\to\pm\infty$. In the complex-$x$ plane the good solutions (\ref{eq3}) decay exponentially as $|x|\to\infty$ in two Stokes wedges that are centered about the positive and the negative real-$x$ axes. These wedges have an angular opening of $45^\circ$. The bad solutions grow exponentially in these wedges. At the upper and lower edges of these wedges the good and bad solutions cease to decay and to grow exponentially and they become purely oscillatory. As we move downward past the lower edges of these wedges, we enter a new pair of Stokes wedges. These wedges also have a $45^\circ$ angular opening and are centered about the lines ${\rm arg}\,x=-45^\circ$ and ${\rm arg}\,x=-135^\circ$. In these lower wedges, the good solutions grow exponentially as $|x|\to\infty$ and thus they behave like a bad solutions. In the lower pair of wedges we can find solutions to the new class of Hamiltonians in (\ref{eq7}) that behave like good solutions. These new $\mathcal{PT}$-symmetric eigenfunctions have the general form of the exponential $\exp(\frac{1}{4}x^4+\frac{1}{2}ax^2)$ multiplied by a polynomial \cite{PARITY}: \begin{equation} \psi(x)=e^{x^4/4+ax^2/2}\sum_{k=0}^{J-1}c_k x^{2k}. \label{eq8} \end{equation} Hamiltonians having even sextic polynomial potentials are special because such Hamiltonians can be {\it either} Hermitian or $\mathcal{PT}$-symmetric depending on whether the eigenfunctions are required to vanish exponentially in the $45^\circ$ wedges containing the positive and negative real-$x$ axes or in the other pair of $45^\circ$ wedges contiguous to and lying just below these wedges in the complex-$x$ plane. The solutions for these two different boundary conditions are somewhat related. Specifically, a good solution in one pair of wedges becomes a bad solution in the other pair of wedges. However, a bad solution in one pair of wedges does not become a good solution in the other pair of wedges, as we now explain. Given a good solution $\psi_{\rm good}(x)$ in one pair of wedges, we use the method of reduction of order \cite{BO} to find the bad solution. We seek a bad solution in the form $\psi_{\rm bad}(x)=\psi_{\rm good}(x)u(x)$, where $u(x)$ is an unknown function to be determined. Substituting the bad solution into the Schr\"odinger equation $-\psi''(x)+V(x)\psi(x)=E\psi(x)$, we get the differential equation satisfied by $u(x)$: \begin{equation} \psi_{\rm good}(x)u''(x)+2\psi_{\rm good}'(x)u'(x)=0. \label{eq9} \end{equation} We solve this equation by multiplying by the integrating factor $\psi_{\rm good}(x)$ and obtain the result \begin{equation} \psi_{\rm bad}(x)=\psi_{\rm good}(x)\left(\int^x ds\,\left[\psi_{\rm good}(s)\right]^{-2}+C\right), \label{eq10} \end{equation} where $C$ is an arbitrary constant. This bad solution always grows exponentially in the two wedges in which the good solution decays exponentially. How does this bad solution behave in the other pair of wedges in which the good solution grows exponentially? We can always choose the constant $C$ so that the bad solution vanishes as $|x|\to\infty$ in {\it one} of these two wedges. However, in the other of the two wedges, the bad solution will always grow exponentially. Thus, while the good solution becomes bad as we cross from one pair of wedges to the other, the bad solution does not become good. \section{Determination of the $\mathcal{PT}$ Boundary} \label{sec2} The difference between the Hermitian Hamiltonians in (\ref{eq1}) and the $\mathcal{PT}$-symmetric Hamiltonians in (\ref{eq7}) is that the Hermitian Hamiltonians always have real eigenvalues. The $\mathcal{PT}$-symmetric Hamiltonians in (\ref{eq7}) have real eigenvalues only if the $\mathcal{PT}$ symmetry is unbroken; if the $\mathcal{PT}$ symmetry is broken, some of the eigenvalues will be complex. Thus, it is crucial to determine whether the $\mathcal{PT}$ symmetry is broken. We will see that there is a range of values of the parameter $a$ in (\ref{eq7}) for which the energy levels are real and this is the region of unbroken $\mathcal{PT}$ symmetry. Outside of this region some of the eigenvalues appear as complex-conjugate pairs. Let us illustrate the difference between the regions of broken and unbroken $\mathcal{PT}$ symmetry by examining some special solutions of the Schr\"odinger equation \begin{equation} -\psi''(x)+[x^6+2ax^4+(4J-1+a^2)x^2]\psi(x)=E\psi(x), \label{eq11} \end{equation} corresponding to $H$ in (\ref{eq7}). First, consider the case $J=1$. The unique eigenfunction solution to (\ref{eq11}) of the form in (\ref{eq8}) is $\psi(x)= \exp(\frac{1}{4}x^4+\frac{1}{2}ax^2)$ and the corresponding eigenfunction is $E= -a$. Note that $E$ is real so long as $a$ is real. Thus, for $J=1$ there is no region of broken $\mathcal{PT}$ symmetry. Next, consider the case $J=2$. Now, there are two eigenfunctions. The two eigenvalues are given by \begin{equation} E=-3a\pm2\sqrt{a^2-2}. \label{eq12} \end{equation} Thus, there is now an obvious transition between real eigenvalues (unbroken $\mathcal{PT}$ symmetry) and complex eigenvalues (broken $\mathcal{PT}$ symmetry). Evidently, the eigenvalues are real if $a\geq \sqrt{2}$ or if $a\leq- \sqrt{2}$. We find that for any positive integer value of $J>1$, the eigenvalues $E$ for $H$ in (\ref{eq7}) are entirely real if $a^2$ is greater than some critical value $[a_{\rm crit}(J)]^2$ that depends on $J$. These critical values up to $J= 20$ are shown in Table \ref{t1}. \begin{table}[!hbtp] \begin{center} \begin{tabular}{c|c|c} $J$\quad&\quad$[a_{\rm crit}(J)]^2$\quad&\quad$[a_{\rm crit}(J+1)]^2-[a_{\rm crit}(J)]^2$\quad\\ \hline 2 & 2 \\ 3 & 10.5874700363 & \raisebox{1ex}{8.5874700363}\\ 4 & 20.5515334397 & \raisebox{1ex}{9.9640634033}\\ 5 & 31.0534552654 & \raisebox{1ex}{10.5019218257}\\ 6 & 41.8519569727 & \raisebox{1ex}{10.7985017073}\\ 7 & 52.8409390328 & \raisebox{1ex}{10.9889820601}\\ 8 & 63.9636348939 & \raisebox{1ex}{11.1226958611}\\ 9 & 75.1858755649 & \raisebox{1ex}{11.2222406710}\\ 10 & 86.4853951835 & \raisebox{1ex}{11.2995196186}\\ 11 & 97.8468072286 & \raisebox{1ex}{11.3614120451}\\ 12 & 109.2590335351 & \raisebox{1ex}{11.4122263065}\\ 13 & 120.7137913596 & \raisebox{1ex}{11.4547578245}\\ 14 & 132.2047259144 & \raisebox{1ex}{11.4909345548}\\ 15 & 143.7268461067 & \raisebox{1ex}{11.5221201923}\\ 16 & 155.2761720922 & \raisebox{1ex}{11.5493064512}\\ 17 & 166.8494020446 & \raisebox{1ex}{11.5732299524}\\ 18 & 178.4439117241 & \raisebox{1ex}{11.5945096795}\\ 19 & 190.0574079492 & \raisebox{1ex}{11.6134962251}\\ 20\quad &$\quad 201.6880273595\quad$ & \raisebox{1ex}{11.6306193103}\\ \end{tabular} \caption{Critical values, $[a_{\rm crit}(J)]^2$, of the parameter $a^2$ listed as a function of $J$. When $a^2$ is greater than this critical value, the eigenvalues of the $\mathcal{PT}$-symmetric Hamiltonian $H$ in (\ref{eq7}) are all real. Thus, this is the region of unbroken $\mathcal{PT}$ symmetry. The $\mathcal{PT}$ symmetry is broken when $a^2<[a_{\rm crit}(J)]^2$. Note that the differences between successive values of $[a_{\rm crit}(J)]^2$ appear to be approaching a limit and this is indeed the case. In fact, the numerical value of this limit is exactly 12. Thus, for large $J$ the critical values have the simple asymptotic behavior $[a_{\rm crit}(J)]^2\sim12J$. \label{t1}} \end{center} \end{table} Observe from Table \ref{t1} that the critical values of $[a_{\rm crit}]^2$ grow monotonically with increasing $J$. We have therefore also calculated the differences between successive critical values of $a^2$. These differences also grow monotonically with increasing $J$, but they appear to be leveling off and seem to be approaching a limiting value. To see whether the differences are indeed approaching a limiting value as $J$ increases, we have plotted in Fig.~\ref{f1} these differences as a function of $1/J$. This plot suggests that the differences tend to the value $12$ as $J\to\infty$. \begin{figure} \caption{The differences $[a_{\rm crit} \label{f1} \end{figure} To determine whether it is true that these differences really do approach limit $12$, it is necessary to extrapolate the sequence of differences to its value at $J=\infty$. To do so we have calculated the Richardson extrapolants \cite{BO} of the sequence of differences. The {\it first} Richardson extrapolants, $R_1(J)$, of these differences are listed in Table \ref{t2}. Observe that the sequence $R_1(J)$ rises more slowly and quite convincingly appears to be approaching the value $12$. The differences $R_1(J+1)-R_1(J)$ between successive Richardson extrapolants are also shown. \begin{table}[!hbtp] \begin{center} \begin{tabular}{c|c|c} $J$ & $R_1(J)$ series & $R_1(J+1)-R_1(J)$ \\ \hline 1 & 11.3406567704\\ 2 &11.5776386705 & \raisebox{1ex}{0.23698190}\\ 3 &11.6882413518 & \raisebox{1ex}{0.11060268}\\ 4 &11.7509034718 & \raisebox{1ex}{0.06266212}\\ 5 &11.7912648657 & \raisebox{1ex}{0.04036140}\\ 6 &11.8195095305 & \raisebox{1ex}{0.02824466}\\ 7 &11.8404722516 & \raisebox{1ex}{0.02096272}\\ 8 &11.8565514577 & \raisebox{1ex}{0.01607921}\\ 9 &11.8695546582 & \raisebox{1ex}{0.01300320}\\ 10 &11.8800730055 & \raisebox{1ex}{0.01051835}\\ 11 &11.8888785336 & \raisebox{1ex}{0.00880552}\\ 12 &11.8963479526 & \raisebox{1ex}{0.00746942}\\ 13 &11.9027717144 & \raisebox{1ex}{0.00642376}\\ 14 &11.9083609386 & \raisebox{1ex}{0.00558923}\\ 15 &11.9132728866 & \raisebox{1ex}{0.00491195}\\ 16 &11.9176271918 & \raisebox{1ex}{0.00435430}\\ \end{tabular} \caption{First Richardson extrapolants $R_1(J)$ of the sequence of differences $[a_{\rm crit}(J+1)]^2-[a_{\rm crit}(J)]^2$ taken from Table \ref{t1}. Notice that $R_1(J)$ rises slowly and smoothly towards its limiting value $12$. The differences between successive Richardson extrapolants are also listed. \label{t2}} \end{center} \end{table} To test further the hypothesis that $R_1(J)$ tends to the limiting value $12$ as $J\to\infty$, we have calculated successive Richardson extrapolants of the Richardson extrapolants $R_1(J)$ in Table \ref{t2}. The successive extrapolants are listed in Table \ref{t3} and they provide very strong numerical evidence that $\lim_{J\to\infty}\left([a_{\rm crit}(J+1)]^2-[a_{\rm crit}(J)]^2\right)= 12$. From this we conclude that for large $J$ the asymptotic behavior of the critical value of $a^2$ is given by \begin{equation} [a_{\rm crit}(J)]^2\sim12J\quad(J\to\infty). \label{eq13} \end{equation} \begin{table}[!hbt] \begin{center} \begin{tabular}{c|c|c|c|c} J & $R_1(J)$ & R of $R_1(J)$ & R of R of $R_1(J)$ & R of R of R of $R_1(J)$ \\ \hline 1 & 11.3406567704 & 11.8146205706 & 12.0042728584 & 11.9912792745 \\ 2 & 11.5776386705 & 11.9094467145 & 11.9977760665 & 11.9869646719 \\ 3 & 11.6882413518 & 11.9388898318 & 11.9941722683 & 11.9887732331 \\ 4 & 11.7509034718 & 11.9527104409 & 11.9928225095 & 11.9978459499 \\ 5 & 11.7912648657 & 11.9607328547 & 11.9938271976 & 11.9483630075 \\ 6 & 11.8195095305 & 11.9662485785 & 11.9862498326 & 12.1168065542 \\ 7 & 11.8404722516 & 11.9691059005 & 12.0049007928 & 11.8377031045 \\ 8 & 11.8565514577 & 11.9735802620 & 11.9840010818 & 12.0982460411 \\ 9 & 11.8695546582 & 11.9747381309 & 11.9966949661 & 11.9726356846 \\ 10& 11.8800730055 & 11.9769338144 & 11.9942890380 & 11.9983156753 \\ 11& 11.8888785336 & 11.9785115620 & 11.9946550959 \\ 12& 11.8963479526 & 11.9798568565 \\ 13& 11.9027717144 \\ \end{tabular} \caption{Repeated Richardson extrapolants of the sequence of Richardson extrapolants in Table \ref{t2}. This table provides strong and convincing numerical evidence that Richardson extrapolants $R_1(J)$ tend to the limiting value $12$ as $J\to\infty$. This implies that for large $J$ the critical values of $a^2$ grow linearly with $J$. See Eq.~(\ref{eq13}). \label{t3}} \end{center} \end{table} Our numerical analysis provides convincing evidence that for large $J$ the boundary between the regions of broken and unbroken $\mathcal{PT}$ symmetry is given by the asymptotic behavior in (\ref{eq13}). We will now verify this result analytically by using WKB methods \cite{BO}. From our numerical analysis we know that the first eigenvalues to become complex conjugate pairs are always the highest, and this implies that WKB is the appropriate tool for investigating the $\mathcal{PT}$ boundary for large $J$. For the potential $V(x)=x^6+2ax^4+(a^2+4J-1)x^2$, the leading-order WKB quantization condition, valid for large $n$, is \begin{equation} (2n+\textstyle{\frac{1}{2}})\pi\sim\int_{T_1}^{T_2}dx\sqrt{E_n-V(x)}\quad (n\to\infty), \label{eq14} \end{equation} where $T_{1,2}$ are the turning points. Note that there is a factor of $2n+ \frac{1}{2}$, rather than $n+\frac{1}{2}$, on the left side of this asymptotic relation because we are counting {\it even}-parity eigenfunctions. For large $n=J$ we approximate the integral in (\ref{eq14}) by making the asymptotic substitution $a\sim\sqrt{J}b$, where $b$ is a number to be determined. In order to verify the asymptotic behavior in (\ref{eq13}), we must show that $b=\sqrt{12}$. We then make the scaling substitutions \begin{equation} x=yJ^{1/4}\quad{\rm and}\quad E_J\sim FJ^{3/2} \label{eq15} \end{equation} because for large $J$ we can then completely eliminate all dependence on $J$ from the integral. We thus obtain the condition \begin{equation} 2\pi=\int_{y=U_1}^{U_2}dy\sqrt{F-[y^6+2by^4+(b^2+4)y^2]}, \label{eq16} \end{equation} where $U_{1,2}=T_{1,2}J^{-1/4}$ are zeros of the algebraic equation \begin{equation} y^6+2by^4+(b^2+4)y^2-F=0. \label{eq17} \end{equation} Next, following the analysis in Ref.~\cite{BD2}, we assume that in this large-$J$ limit the polynomial in (\ref{eq17}) factors: \begin{equation} (y^2-\alpha)^2(y^2-\beta)=0. \label{eq18} \end{equation} The correctness of this factorization assumption will be verified in the subsequent analysis. We then expand (\ref{eq18}); \begin{equation} y^6-y^4(\beta+2\alpha)+y^2(\alpha^2+2\alpha\beta)-\alpha^2\beta=0. \label{eq19} \end{equation} Comparing coefficients of like powers of $y$ in (\ref{eq17}) and (\ref{eq19}), we obtain the three equations \begin{equation} F=\alpha^2\beta, \label{eq20} \end{equation} \begin{equation} 2b=-2\alpha-\beta, \label{eq21} \end{equation} \begin{equation} b^2+4=\alpha^2+2\alpha\beta. \label{eq22} \end{equation} Subtracting the square of Eq. (\ref{eq21}) from three times (\ref{eq22}), we get $\beta-\alpha=\pm\sqrt{b^2-12}$, and solving this equation simultaneously with (\ref{eq21}), we get expressions for $\alpha$ and $\beta$: \begin{equation} 3\alpha=-2b-\sqrt{b^2-12}, \label{eq23} \end{equation} \begin{equation} 3\beta=-2b+2\sqrt{b^2-12}. \label{eq24} \end{equation} We then substitute (\ref{eq23}) and (\ref{eq24}) into (\ref{eq20}) to obtain \begin{equation} F=-\textstyle{\frac{2}{27}}(b-\sqrt{b^2-12})(2b+\sqrt{b^2-12})^2. \label{eq25} \end{equation} Finally, we calculate the value of the number $b$. Our procedure is simply to show that the special choice $b^2=12$ is consistent with the limiting WKB integral in (\ref{eq16}). With this choice we can see from (\ref{eq23}) and (\ref{eq24}) that $\alpha=\beta=4/\sqrt{3}$ and that (\ref{eq16}) reduces to \begin{equation} 2\pi=\int_{y=-\alpha}^{\alpha}dy\,(\alpha-y^2)^{3/2}= 2\int_{y=0}^{\alpha}dy\,(\alpha-y^2)^{3/2}. \label{eq26} \end{equation} We simplify this integral by making the substitution $y=\sqrt{u\alpha}$, and obtain \begin{equation} \textstyle{\frac{3}{8}}\pi=\int_{u=0}^1 du\,u^{-1/2}(1-u)^{3/2}, \label{eq27} \end{equation} which is an exact identity. Thus, we may conclude that $b^2=12$. This verifies the asymptotic formula in (\ref{eq13}) for the location of the $\mathcal{PT}$ boundary. Furthermore, we can see that $F=\frac{64}{9}\sqrt{3}\approx12.3$. Thus, we obtain a formula for the large-$J$ asymptotic behavior of the largest QES eigenvalue at the $\mathcal{PT}$ boundary: \begin{equation} E_J\sim\textstyle{\frac{64}{9}}\sqrt{3}J^{3/2}\quad(J\to\infty). \label{eq28} \end{equation} The difference between this WKB calculation and that done in Ref.~\cite{BD2} for the Hermitian QES sextic Hamiltonian (\ref{eq1}) is that here we have a critical value, $b=\sqrt{12}$, or $a\sim\sqrt{12J}$. This critical value defines the boundary between the regions of broken and unbroken $\mathcal{PT}$ symmetry for the $\mathcal{PT}$-symmetric Hamiltonian in (\ref{eq7}). There is no analog of this boundary for Hermitian Hamiltonians. \begin{acknowledgments} We are greatful to Dr. H. F. Jones for giving us valuable advice with regard to our WKB approximations. CMB is grateful to the Theoretical Physics Group at Imperial College for its hospitality and he thanks the U.K. Engineering and Physical Sciences Research Council, the John Simon Guggenheim Foundation, and the U.S.~Department of Energy for financial support. MM gratefully acknowledges the financial support of ???. \end{acknowledgments} \begin{enumerate} \bibitem{Ush} See A.~G.~Ushveridze, {\sl Quasi-Exactly Solvable Models in Quantum Mechanics} (Institute of Physics, Bristol, 1993) and references therein. \bibitem{Tur} A.~V.~Turbiner, Sov.~Phys., JETP {\bf 67}, 230 (1988), Contemp.~Math.~{\bf 160}, 263 (1994), and M.~A.~Shifman, Contemp.~Math.~{\bf 160}, 237 (1994). \bibitem{Tur1} A.~V.~Turbiner, Comm. Math. Phys.~{\bf 118}, 467 (1988). \bibitem{ST} M.~A.~Shifman and A.~V.~Turbiner, Comm. Math. Phys. {\bf 126}, 347 (1989). \bibitem{GKO} A.~Gonz\'alez-L\'opez, N.~Kamran, and P.~J.~Olver, Comm. Math.~Phys.~{\bf 153}, 117 (1993) and Contemp.~Math.~{\bf 160}, 113 (1994). \bibitem{BD1} C.~M.~Bender and G.~V.~Dunne, J.~Math.~Phys.~{\bf 37}, 6 (1996). \bibitem{BD2} C.~M.~Bender, G.~V.~Dunne, and M.~Moshe, Phys.~Rev.~A {\bf 55}, 2625 (1997). \bibitem{A} C.~M.~Bender and S.~Boettcher, Phys. Rev.~Lett. {\bf 80}, 5243-5246 (1998). \bibitem{B} C. M. Bender, S. Boettcher, and P. N. Meisinger, J. Math. Phys. {\bf 40}, 2201-2229 (1999). \bibitem{C} P.~Dorey, C.~Dunning and R.~Tateo, J.~Phys.~A {\bf 34} L391 (2001); {\em ibid}. {\bf 34}, 5679 (2001). \bibitem{D} G.~L\'evai and M.~Znojil, J.~Phys. A{\bf 33}, 7165 (2000); B.~Bagchi and C.~Quesne, Phys.~Lett. A{\bf 300}, 18 (2002); Z. Ahmed, Phys. Lett. A{\bf 294}, 287 (2002); G.~S.~Japaridze, J.~Phys.~A{\bf 35}, 1709 (2002); A.~Mostafazadeh, J.~Math.~Phys. {\bf 43}, 205 (2002); {\em ibid}; {\bf 43}, 2814 (2002); D.~T.~Trinh, PhD Thesis, University of Nice-Sophia Antipolis (2002), and references therein. \bibitem{E} An excellent summary of the current status and the background of non-Hermitian and $\mathcal{PT}$-symmetric Hamiltonians may be found in F.~Kleefeld, hep-th/0408028 and hep-th/0408097. \bibitem{BBJ} C. M. Bender, D. C. Brody, and H. F. Jones, Phys. Rev. Lett. {\bf 89}, 270401 (2002) and Am. J. Phys. {\bf 71}, 1095 (2003). \bibitem{BT} C.~M.~Bender and A.~Turbiner, Phys.~Lett.~A {\bf 173}, 442 (1993). \bibitem{BeBo} C. M. Bender and S. Boettcher, J. Phys. A: Math. Gen. {\bf 31}, L273 (1998). \bibitem{KM} For a nonpolynomial QES $\mathcal{PT}$-symmetric Hamiltonian see A.~Khare and B.~P.~Mandal, Phys.~Lett.~A {\bf 272}, 53 (2000). \bibitem{PARITY} Notice that $\psi(x)$ in (\ref{eq8}) is a function of $x^2$ and thus all of the QES wave functions are symmetric under parity reflection ($x\to- x$). In general, $\mathcal{PT}$-symmetric Hamiltonians, such as $H=p^2-x^4$ are not symmetric under parity reflection because the parity operator $\mathcal{P}$ changes the complex domain of the Hamiltonian operator. As a consequence, the expectation value of the $x$ operator is nonvanishing. [See C.~M.~Bender, P.~N.~Meisinger, and H.~Yang, Phys.~Rev.~D 63, 45001 (2001).] Nevertheless, the special QES eigenfunctions in (\ref{eq8}) {\it are} parity-symmetric. We believe that the parity operator may therefore be used to distinguish between the QES and the non-QES portions of the Hilbert space. \bibitem{BO} C.~M.~Bender and S.~A.~Orszag, {\it Advanced Mathematical Methods for Scientists and Engineers}, (McGraw-Hill, New York, 1978), Chap.~10. \end{enumerate} \end{document}
\begin{document} \title{A fresh look at igorability for likelihood inference} \begin{abstract} When data are incomplete, a random vector $Y$ for the data process together with a binary random vector $R$ for the process that causes missing data, are modelled jointly. We review conditions under which $R$ can be ignored for drawing likelihood inferences about the distribution for~$Y$. The standard approach of Rubin\;(1976) and Seaman\;et.\,al.\,(2013),\,\textit{Statist.\,Sci.},\,\textbf{28}:2\;pp.\,257--268 emulates complete-data methods exactly, and directs an investigator to choose a full model in which missing at random (MAR) and distinct of parameters holds if the goal is not to use a full model. Another interpretation of ignorability lurking in the literature considers ignorable likelihood estimation independently of any model for the conditional distribution $R$ given~$Y$. We discuss shortcomings of the standard approach, and argue that the alternative gives the `right' conditions for ignorability because it treats the problem on its merits, rather than emulating methodology developed for when the investigator is in possession of all of the data. \vspace*{2mm} \noindent \textit{Key words and phrases:} incomplete data, missing data, ignorable, ignorability, missing at random, distinctness of parameters, likelihood theory. \end{abstract} \section{Introduction} \label{Sect:Intro} Missing data are a common problem in empirical research, and particularly so in medical and epidemiological studies. A central feature of the statistical methods for dealing with incomplete data pertains to conditions under which the random vector for the process causing the missing data need not be modelled. The modern framework was introduced by Rubin\;(1976). If $Y$ is a random vector representing the data generation process, Rubin\;(1976) introduced the concept of modelling missingness through a corresponding binary random vector $R$ of the same dimension as~$Y$, together with a joint probability distribution for $(Y, R)$. The realisations of $Y$ comprise complete data, both observed and unobserved, and realisations of $R$ determine which values of $Y$ are observed and which are missing. The conditional distribution for $R$ given~$Y$ represents the process that causes missing data, herafter called the \textbf{missingness process}. Given a model of joint densities for $(Y, R)$, Rubin (1976) identified conditions on the model for which the same inferences result whether the full model is used or the model for the missingness proces is discarded. Rubin considered direct likelihood, Bayesian and sampling distribution paradigms, but not frequentist likelihood inference specifically. Seaman\;et.\,al.\,(2013) reviewed use of the ignorability conditions in the literature to promote unity amongst writers, and adapted Rubin's conditions specifically for frequentist likelihood inference. We refer to the conditions derived in these works as the \textbf{standard conditions} for ignorabiliy. Seaman\;et.\,al.\,(2013,\,p.\,266) identified an alternative interpretation of ignorabilty lurking in the literature. This approach treats ignorable likelihood as an estimation process in its own right, independently of any model for the missingness process. Our aim is to review the two approaches, to explain some shortcomings of the standard approach, and to argue that the alternative interpretation gives the `right' conditions for ignorability because it treats the problem on its merits, rather than adopting methodology developed for when the investigator is in possession of all of the data. \section{Ignorability for direct likelihood inference (Rubin,\,1976)} \label{Sect:Rubin} We retain the notation $Y$\;and\;$R$ from Section~\ref{Sect:Intro}. The starting point for ignorability in the sense of Rubin (1976) is a (full) model of joint densities for $(Y, R)$: \begin{equation} \mathcal{M}_g \,=\, \{\, f_\theta(\mathbf{y})\,g_\psi(\mathbf{r} |\, \mathbf{y}) : (\theta, \psi)\in\Delta \,\}. \label{Eq:MgModel} \end{equation} If $\Theta$ and $\Psi$ are the images of the projections $(\theta,\psi)\mapsto\theta$ and $(\theta,\psi)\mapsto\psi$, respectively, then the \textbf{data model} of $\mathcal{M}_g$ is \begin{equation} \mathcal{M}_s \,=\, \{\, f_\theta(\mathbf{y}) : \theta\in\Theta \,\} \label{Eq:MsModel} \end{equation} and the \textbf{missingness model} is $\{\,g_\psi(\mathbf{r}|\,\mathbf{y})\,:\,\psi\in\Psi\,\}$. We call each density in the missingness model a \textbf{missingness mechanism}. Recall from Section~\ref{Sect:Intro} that the two conditions under which the missingness model could be discarded and identical direct likelihood inferences drawn from a model for the observed data derived from\;(\ref{Eq:MsModel}) were called MAR and distinctness of parameters. We consider these in turn. Given a realisation $(\mathbf{y}, \mathbf{r})$ of the random vector~$(Y, R)$, let~$\mathbf{y}^{ob(\mathbf{r})}$ and~$\mathbf{y}^{mi(\mathbf{r})}$ denote the vectors of observed and missing components of~$\mathbf{y}$, respectively. Analysis of the observed data $(\mathbf{y}^{ob(\mathbf{r})}, \mathbf{r})$ then proceeds by restriction of the random vector $(Y, R)$ to the event \begin{equation} \{\, (\mathbf{y}_*, \mathbf{r}_*) \,:\, \mathbf{r}_* = \mathbf{r} \text{ and } \mathbf{y}_*^{ob(\mathbf{r})} = \mathbf{y}^{ob(\mathbf{r})} \,\} \label{Eq:ODE} \end{equation} comprising all datasets $\mathbf{y}_*$ (together with~$\mathbf{r}$) which correspond to~$(\mathbf{y}, \mathbf{r})$ on the observed data values~$\mathbf{y}^{ob(\mathbf{r})}$ but may differ on the unobserved values~$\mathbf{y}^{mi(\mathbf{r})}$. A missingness mechanism $g_\psi(\mathbf{r} |\, \mathbf{y})$ is called \textbf{missing at random} (MAR) with respect to~$(\mathbf{y}, \mathbf{r})$ if $g_\psi(\mathbf{r} |\, \mathbf{y})$ is a constant function on the set~(\ref{Eq:ODE}), where $g$ is considered to be a function of $\mathbf{y}$ with $\mathbf{r}$ held fixed. Rubin (1976) defined missing at random to be a property of the full model (\ref{Eq:MgModel}) by requiring each density in the model to be MAR with respect to~$(\mathbf{y}, \mathbf{r})$. The terminology \textbf{realised MAR} was introduced in Seaman\;et.\,al.\,(2013) to distinguish this weaker form of MAR from a stronger form more suited to frequentist likelihood inference: a missingness mechanism is called \textbf{everywhere MAR} if it is realised MAR with respect to all possible data vectors and response pattens, $(\mathbf{y}, \mathbf{r})$, not just the realised pair representing the observed and missing data. The second of Rubin's conditions, \textbf{distinctness of parameters}, requires the parameter space $\Delta$ of~(\ref{Eq:MgModel}) to be a direct product $\Delta = \Theta\times\Psi$ of parameter spaces $\Theta$ of the data model and the missingness model. If every missingness mechanism in (\ref{Eq:MgModel}) is realised MAR with respect to the realised data vector~$\mathbf{y}$ and response pattern~$\mathbf{r}$, then the likelihood function for that part of (\ref{Eq:MgModel}) pertaining to the observable data factorizes as \begin{equation} L_g(\theta, \psi) \,=\, \int f_\theta(\mathbf{y}) \, g_\psi(\mathbf{r} |\, \mathbf{y}) \, \text{d}\mathbf{y}^{mi(\mathbf{r})} \,=\, g_\psi(\mathbf{r} |\, \mathbf{y}) \, \int f_\theta(\mathbf{y}) \, \text{d}\mathbf{y}^{mi(\mathbf{r})}\,. \label{Eq:MgLikelihood} \end{equation} If, in addition, distinctness of parameters holds for (\ref{Eq:MgModel}), then the (maximal) domain of each mapping $\theta\mapsto L_g(\theta,\psi)$ is the same for every value of~$\psi$, and likelihood estimates for $\theta$ can be obtained by maximising the simpler function \begin{equation} L_s(\theta) \,=\, \int f_\theta(\mathbf{y})\, \text{d}\mathbf{y}^{mt(\mathbf{r})}\,. \label{Eq:MsLikelihood} \end{equation} over its full domain. We refer the reader to Rubin (1976), Seaman\;et.\,al.\,(2013) and Mealli and Rubin (2015) for additional details. \vspace*{3mm} \textbf{An aside.} In (\ref{Eq:MsLikelihood}) we have used $\mathbf{y}^{mt(\mathbf{r})}$ instead of $\mathbf{y}^{mi(\mathbf{r})}$ to denote the unobserved variables because overlaying the response pattern $\mathbf{r}$ onto the marginal distribution for $Y$ involves a different relationship between $R$ and $Y$ compared to $(Y, R)$. The former does not respect the stochastic relationship encoded in the random vector~$(Y, R)$ because it involves holding $R$ fixed and allowing the marginal distribution for~$Y$ to vary. The `t' in $\mathbf{y}^{mt(\mathbf{r})}$ can be interpreted to mean `these are the variables of the marginal distribution for $Y$ that were missing this time.' Note also that we do \textbf{not} do the same on the right hand side of (\ref{Eq:MgLikelihood}) because the set being integrated over is all of $R\times Y,$ whereas in (\ref{Eq:MsLikelihood}) it is only~$Y$. See Galati (2019) for further details. $\qedsymbol$ \vspace*{3mm} It is helpful to view the two ignorability conditions, MAR and distinctness of parameters, in a hierarchy as follows: \begin{itemize} \item[(\textbf{a})] Does the investigator wish to enforce a relationship $\Delta\subsetneq\Theta\times\Psi$ between $\theta$ and $\psi$ for models (\ref{Eq:MgModel}) when estimating~$\theta$? If so, the analyst has no option but to consider only full models for which distinctness of parameters \textbf{does not} hold, irrespective of whether of not densities in the model are realised MAR. \item[(\textbf{b})] If the answer to (a) is no, then is every missingness mechanism in the model (\ref{Eq:MgModel}) realised MAR? If so, the analyst can discard the missingness mechanisms from the model. \end{itemize} Viewed in this way, ignorability for direct likelihood inferences is seen to be comprised of two components, the distinctness of parameters criterion, which really is just a statement that the investigator has no relationship of the form $\Delta\subsetneq\Theta\times\Psi$ to enforce when estimating~$\theta$, and the MAR component, which establishes the relationship between (\ref{Eq:MgLikelihood}) and (\ref{Eq:MsLikelihood}) for fixed~$\psi$. Note also this does \textbf{not} mean that the MAR condition is of no use when a relationship of the form $\Delta\subsetneq\Theta\times\Psi$ \textbf{is} enforced (that is, when distinctness of parameters does \textbf{not} hold). In this case, the MAR condition allows a likelihood $L(\psi)=g_\psi(\mathbf{r}|\,\mathbf{y})$ for the missingness model to be maximised independently of~$\theta$, and then $\Delta$ can be used to determine an appropriate restriction on the domain of (\ref{Eq:MsLikelihood}) for estimating~$\theta$. We will call full models (\ref{Eq:MgModel}) satifying the distinctness of parameters and MAR criteria \textbf{ignorable models}, and we emphasise that Rubin's (1976) ignorability theory for direct likelihood inference identifies a subset of models of the form (\ref{Eq:MgModel}), the ignorable models, from which an investigator can choose their model if they wish to draw inferences for $\theta$ free from the inconvenience of needing to model the missingness process explicitly. \section{Limitations caused by missing data} \label{Sect:Limitations} Ignorability is often presented as having something to do with drawing valid inferences. For example, Rubin\;(1976,\;Summary) states that the ignorability conditions are ``\textit{the weakest general conditions under which ignoring the process that causes missing data always leads to correct inferences.''} `Correct inferences' in this instance seems to mean that inferences will be drawn from the correct likelihood given the chosen model. It has nothing to do with whether or not the choice of model is valid for the given data, or whether or not valid conclusions will be drawn from the data. In the model-based paradigms, validity in the latter sense mentioned above is a subjective assessment of the goodness of fit of the model to the data. If the model fits poorly, then in some sense the inferences are not justifiable, and if the model fits too-well, then the model becomes more a description of the specific realised dataset rather than a description of the process which generated the data. When data are incomplete, the philosophy of the model-based likelihood paradigm breaks down in two essential ways. Firstly, it is impossible to validate the investigator's choice of missingness model against the data because the data required for this are missing (Molenberghs\;et.\,al.\,(2008)). So consideration of a missingness model becomes hypothetical in a manner analogous to the frequentist paradigm's hypothetical assumptions about $(Y, R)$. In the literature, this feature of incomplete data methods typically is referred to as `untestable assumptions.' The second way in which the paradigm breaks down is that it becomes impossible to validate even a model for the observed data against the observed data. The reason for this is a little more subtle. If the possible missingness patterns realisable from $R$ are $\mathbf{r}_1, \mathbf{r}_2, \ldots, \mathbf{r}_k$, and these occur with marginal probabilities $p_1, p_2, \ldots, p_k$, then a density $f(\mathbf{y})$ for the marginal distribution for $Y$ can be written as the mixture \begin{equation} f(\mathbf{y}) \;=\; \sum_{i=1}^k\;p_i\,p(\mathbf{y}|\,\mathbf{r}_i) \label{Eq:PatternMixture} \end{equation} where $p(\mathbf{y}|\,\mathbf{r}_i)$ is the conditional density for $Y$ given $R=\mathbf{r}_i$. If $\mathbf{r}_1=(1,1,\ldots,1)$ is the pattern for a complete case, then $p(\mathbf{y}|\,\mathbf{r}_1)$ gives the distribution for the complete cases, which may differ from the marginal distribution for $Y$ given by~$f(\mathbf{y})$. And in general, for the $i^{th}$ missingness pattern~$\mathbf{r}_i$, the distribution of the $\mathbf{y}$ values realised with $\mathbf{r}_i$ is $p(\mathbf{y}|\,\mathbf{r}_i)$ and \textbf{not} $f(\mathbf{y})$. Additionally, the distribution for the \textit{observed} values realised with $\mathbf{r}_i$ is given by the marginal density $\int\,p(\mathbf{y}|\,\mathbf{r}_i)\,d\mathbf{y}^{mi(\mathbf{r}_i)}$. This marginalisation stratifies the distribution for $Y$ into pieces of different shapes such that the complete data underlying each shape typically is not distributed according to~$f(\mathbf{y})$, and the differing shapes makes it impossible to mix them back together to recover $f(\mathbf{y})$ via~(\ref{Eq:PatternMixture}). The result is that the observed data comprise a collection of subsamples from different distributions, no one of which can be used to assess the fit of the data model, and the irregular shapes prevent the subsamples from being pooled together. To overcome the difficulties associated with checking the fit of the data model to the observed data, we note that imputation-based methods combined with posterier predictive checks have been considered, but we do not elaborate on these techniques. The points we wished to make are summarised below: \begin{itemize} \item[(\textbf{c})] When carrying over a model-based philosophy to the case of incomplete data, the types of hypothetical considerations typically rejected by these philosophies become inescapable due to the impossibility of validating the fit of the missingness model to the observed data. \item[(\textbf{d})] Validating the fit of the data model to the observed data becomes substantially more complicated when the data are incomplete. \end{itemize} \section{The middle road: When $R$ is MAR} \label{Sect:RisMAR} With any data analysis, the intention typically is to model the data to answer some substantive question under investigation. But incompleteness of the data impedes analysis in two ways, the dataset has an irregular shape, and the underlying missingness process can distort the distribution of the data that the investigator can observe. Methods for taking these factors into account differ in their difficulty and inconvenience, and as a matter of practicality, often methods that are less inconvenient are accorded priority ahead of more difficult and inconvienient methods. Methods like multiple imputation overcome the irregular shape of the data by filling in missing values with plausible values, and adjusting the precision of estimates accordingly (Molenberghs\;et.\,al.\,2015). A first step in creating imputations is often to consider a model of the form (\ref{Eq:MsModel}) to model jointly the variables in the dataset. To estimate~$\theta$, likelihood estimation might be employed. However, this estimation is further impeded by the potential distorting effect of the missingness process on the distribution of the observable data. The choice the analyst has at hand is to use ignorable likelihood estimation based on (\ref{Eq:MsLikelihood}) anyway, or to model the missingness process and base estimation of $\theta$ on the left hand side of~(\ref{Eq:MgLikelihood}). Apart from the class of ignorable models, the latter adds a substantial layer of complexity and inconvenience, and understanding conditions under which this can be avoided is important. In the situation just described, the primary concern is not in understanding the conditions under which modelling the missingness process would be unnecessary, as Rubin (1976) considered. Rather, the primary concern is simply to obtain an estimate for~$\theta$, and to understand the conditions under which this can be done without the need to consider models for the missingness process at all. This question cannot be answered using the approach in Rubin\;(1976), reviewed in Section~\ref{Sect:Rubin}, because no model for the missingness process is posited against which to compare the estimates from the ignorable likelihood estimation. Seaman\;et.\,al.\,(2013, p\,266) note that it is this question that some writers seem to have taken as their interpretation of ignorability. While these writers were considering frequentist properties of estimation, the same ideas apply to direct likelihood inferences. We elaborate on the details. Suppose that \begin{equation} h(\mathbf{y}, \mathbf{r}) = f(\mathbf{y})g(\mathbf{r}|\,\mathbf{y}) \label{Eq:FullDensity} \end{equation} is a joint density for the random vector~$(Y, R)$, and consider the model \begin{equation} \mathcal{M}_t \,=\, \{\, f_\theta(\mathbf{y})\,g(\mathbf{r} |\, \mathbf{y}) : \theta\in\Theta \,\}. \label{Eq:MtModel} \end{equation} By definition (\ref{Eq:MtModel}) is correctly specified for the missingness process. We can ask under what conditions can the likelihood for the observable data for this model, \begin{equation} L_t(\theta) \,=\, \int f_\theta(\mathbf{y}) \, g(\mathbf{r} |\, \mathbf{y}) \, \text{d}\mathbf{y}^{mi(\mathbf{r})} \label{Eq:MtLikelihoodNotMAR} \end{equation} be maximised without needing to evaluate the unknown density function $g(\mathbf{r}|\,\mathbf{y})$? If $g(\mathbf{r} |\, \mathbf{y})$ is MAR with respect to the realised values~$(\mathbf{y}, \mathbf{r})$, then~(\ref{Eq:MtLikelihoodNotMAR}) factorises in the usual way \begin{equation} L_t(\theta) \,=\, g(\mathbf{r} |\, \mathbf{y}) \, \int f_\theta(\mathbf{y})\, \text{d}\mathbf{y}^{mi(\mathbf{r})} \label{Eq:MtLikelihoodMAR} \end{equation} and~(\ref{Eq:MtLikelihoodMAR}) can be maximised without needing to evaluate~$g(\mathbf{r} |\, \mathbf{y})$. Therefore, under this MAR assumption about~$g(\mathbf{r} |\, \mathbf{y})$, maximising (\ref{Eq:MtLikelihoodMAR}) is equivalent to maximising~(\ref{Eq:MsLikelihood}). Hence, if the investigator would have no reason to impose a relationship $\Delta\subsetneq\Theta\times\Psi$ on the parameters of a model~(\ref{Eq:MgModel}), if such a model were to be considered, and if the investigator is happy to assert that the missingness process itself is realised MAR, then direct likelihood inferences for $\theta$ can be obtained by ignorable likelihood estimation without the need to consider a model for the missingness process at all. In particular, considering some hypothetical model (\ref{Eq:MgModel}) and asserting distinctness of parameters (that is, choosing an ignorable model as the starting point) is simply unnecessary. We record this formally. \begin{theorem}[Missingness-model-free Ignorability] \label{Thm:MMFIgnorabilty} If the investigator would have no reason to impose a relationship $\Delta\subsetneq\Theta\times\Psi$ on the parameters of a model~(\ref{Eq:MgModel}), and if the distribution for the random vector $R$ conditional on~$Y$ is realised MAR, then there is no need to consider models of the form (1) at all. In this case, direct likelihood inferences for $\theta$ can be obtained by ignorable likelihood estimation, and this equates to the investigator using the (unknown) conditional distribution for $R$ given $Y$ directly in the analysis. $\qedsymbol$ \end{theorem} When interpreting ignorability in this `model-free' sense, writers typically have gone further and adopted a frequentist view in which ignorable likelihood estimation retains the asymptotic properties of likelihood theory (Seaman\;et.\,al.\,2013,\;p.\,266). For completeness, we review the conditions that would be needed for ignorability in this sense. Recall from Section~\ref{Sect:Rubin} that ignorability for likelihood inferences in the sense of Rubin (1976) has two facets, with one being whether or not the investigator wishes to impose e a relationship $\Delta\subsetneq\Theta\times\Psi$ onto the estimation of~$\theta$. This consideration applies irrespective of the mode of likelihood inference, and we retain consideration of non-distinctness of parameters as the first step in the decision making process. When this would be of no interest to the investigator, a correctly specified ignorable model together with Theorem~\ref{Thm:MMFIgnorabilty} implies the investigator can dispense with consideration of the ignorable model, and instead assert directly that $R$ given $Y$ is MAR. For reasons explained in Seaman\;et.\,al.\,(2013), to consider frequentist properties of likelihood theory, we strengthen our assumption to $R$ given $Y$ being everywhere MAR. This is then sufficient for ignorable likelihood estimation to be valid in the frequentisti sense of likelihood theory provided the additional hypotheses of likelihood theory are satisfied. These requirements are summarised below. The model for the observable data is obtained by removing from each vector in $Y\times R$ the coordinates pertaining to the missing values. This creates an irregularly-shaped set. The probability measure on this irregularly-shaped set is obtained by pulling back events in this set to unions of events of the form (\ref{Eq:ODE}) in~$Y\times R$, and integrating the densities in (\ref{Eq:MtModel}) over these corresponding events for~$(Y, R)$ (by applying iterated integrals as per Fubini's Theorem {(Ash and Dol\'{e}ans-Dade 2000,\,p.\,101))}. In this way, the functions on the right had side of~(\ref{Eq:MtLikelihoodNotMAR}) are seen to give a model of densities for the observable data. By constuction, $\mathcal{M}_t$ is correctly specified if, and only~if, $f(\mathbf{y})\in\mathcal{M}_s$. The integration in~(\ref{Eq:MtLikelihoodNotMAR}) sets up a mapping from $\mathcal{M}_t$ to the observable data model. The observable data model therefore will be correctly specified if, and only if, $\mathcal{M}_t$ is, and identifiable provided $\mathcal{M}_s$ is identifiable (different values of $\theta$ correspond to different density functions $f_\theta$) and the mapping to it from $\mathcal{M}_t$ is one-to-one. A sufficient condition for the latter to be true is that the missingness process assigns non-zero probability to complete cases for all values of~$\mathbf{y}$, since then densities~(\ref{Eq:MtLikelihoodMAR}) corresponding to different values of $\theta$ will disagree for at least one $\mathbf{y}$ value on that part of the model pertaining to the complete cases. Finally, the appropriate regularity conditions must be satisfied by $L_t(\theta)$, $\Theta$ and the value $\theta_0\in\Theta$ for which $f_{\theta_0}=f$. We summarize this formally as follows. \begin{theorem}[Ignorability for frequentist likelihood inference] Sufficient conditions for ignoring the missingness process when drawing frequentist likelihood inferences are: \begin{enumerate} \item there is no relationship $\Delta\subsetneq\Theta\times\Psi$ (see (\ref{Eq:MgModel})) to be imposed on the analysis, \item \label{Item:MAR} the distribution of $R$ given $Y$ is everywhere MAR, \item the additional requirements of likelihood theory (summarised above) are satisfied. \end{enumerate} Moreover, when condition\;\ref{Item:MAR} holds, ignorable likelihood estimation equates to using the (unknown) distribution for $R$ given $Y$ directly in the analysis. In these circumstances, ignoring the missingness process is preferable to modelling it explicitly. $\qedsymbol$ \end{theorem} \section{Discussion} \label{Sect:Discussion} The foundations for ignorability of the process that causes missing data were put in place by Rubin (1976). With the exception of a stronger form of MAR framed in Seaman\;et.\,al.\,(2013) for frequentist likelihood theory, these foundations have been accepted essentially unaltered for more than four decades. Despite this, the conditions for ignorability seem to be more confusing than they should be. One reason for this might be that Rubin (1976) presented distinctness of parameters as a mathematical requirement of ignorable likelihood estimation. We suggest that this criterian is better understood from a statitistical perspective, namely, whether or not the investigators wish to impose on the analysis a relationship $\Delta\subsetneq\Omega\times\Psi$ between the parameters for the data densities and the missingness mechanisms. From this perspective, non-distinctness of parameters is a choice of restriction incorporated in the analysis by the investigator, not a mathematical requirement that makes ignorable estimation `work.' Moreover, the situations in which imposing such a restriction could be considered reasonable would seem to be rare. In many cases, problems similar to defining statistical significance to be~`$p<0.05$' would arise. We suggest that the condition should be expressed as non-distinctness of parameters, and that it should serve more as a footnote to the theory, rather than being given such a prominant place. Another factor which may be contributing to confusion about the concept is the linking of ignorability with notions of `valid' inferences. For example, Rubin (1976, Summary) communicates the implications of these conditions as ``\textit{always leads to correct inferences},'' while Little and Rubin (2002,\;p\,120) refer to inferences being ``\textit{valid from the frequency perspective},'' and Seaman\;et.\,al.\,(2013, Abstract) simply refer to ``\textit{valid inference}.'' In these cases, what the terms mean is left undefined. However, linking ignorability with validity of inferences at all would seem to be highly misleading because ignorability is silent on whether or not a particular choice of missingness model is a `valid' choice for the data at hand, and it is equally silent on whether the chosen data model is `valid' for the data at hand. So it is difficult to see that there can be any meaningful sense in which satisfaction of the ignorability conditions implies that inferences drawn from the model will be valid. We suggest, however, that a primary source of confusion surrounding ignorability is likely to be because, as framed, it is derived by emulating complete-data methods without modification. Specifically, a full model for $(Y, R)$ is taken as the starting point for the model-based paradigms, and correct specification of the full model is added for frequentist likelihood inference. These paradigms are predicated on the investigator being in possession of the data to enable validation of the model against the data, and this is not the case when data are incomplete. As discussed in Section~\ref{Sect:Limitations}, these complete-data paradigms do not carry over completely to incomplete data because the model for the missingness process cannot be validated against the data. This feature of the incomplete data setting undermines the rationale for considering a model for the missingness process as the starting point for an analysis. Additionally, by framing ignorability in terms of properties of the model for the missingness process, instead of in terms of the missingness process itself, the usual causal link between choice of model and properties of estimation is partially severed. Changes to the missingness model can be made without altering the ignorable likelihood estimator in any way at all. While it is true that swapping one ignorable missingness model for another merely results in a proportional change in the likelihood, this forces a user of the tools to be in possession of unnecessary detail about the relationship between the estimation process and some `hypothetical' set of models for the missingness process. Possibly the strongest argument against the standard conditions is the convoluted nature of the way the question posed is answered. Specifically, for an investigator to choose \textbf{not to use} a full model, the investigator is directed \textbf{to use} a full model with specific properties, when making any choice of full model at all is unnecessary. The alternative interpretation reviewed and fleshed out in Section~\ref{Sect:RisMAR} avoids these issues by making an assumption directly about the conditional distribution $R$ given~$Y$. This has no analogue in the correponding complete data methods (which do not make direct assumptions about the data random vector~$Y$). However, doing so is the most direct and natural way to answer the ignorability question: the two scenarios under which a full model is required are (i) $R$ given $Y$ is not MAR, or (ii) the investigator has prior information about a relationship between the data distribution and missingness process to incorporate into the estimation of~$\theta$. Otherwise, ignorable estimation is appropriate because it equates to (the unknown) $R$ given $Y$ being used directly in the analysis. Proponents of model-based paradigms might argue that the properties of $(Y, R)$ can never be known in reality. While it is true that the MAR assumption is `hypothetical' (untestable), ascribing it to a model for $R$ given~$Y$ has no advantages over ascribing it directly to $R$ given~$Y$ because it is impossible to validate this property for the missingness model against the data. In short, choosing an untestable model is not an improvement on making an untestable assumption, and comes with the disadvantages discussed above. In relation to the ignorable models identified by Rubin (1976), the primary difference between the standard conditions and the alternative interpretation of ignorability reviewed in Section~\ref{Sect:RisMAR} can be summed up as follows: in the former case, the ignorable models are the full models that an investigator would choose from in order to not use a full model; in the latter case, the ignorable models are the full models that can be ignored because the need for an investigator to contemplate ever choosing one never arises. \end{document}
\begin {equation}gin{document} \title{Some progress in the Dixmier Conjecture} \author [G. Han] {Gang Han} \address [G. Han]{School of Mathematics \\ Zhejiang University \\ Hangzhou, 310027, China} \email{[email protected]} \author[B. Tan]{Bowen Tan } \address [B. Tan]{School of Mathematics \\ Zhejiang University\\Hangzhou, 310027, China }\email{[email protected] } \date{Sept. 28, 2022 } \subjclass[2010]{16S32, 16W20} \keywords{ Weyl algebra, Dixmier Conjecture, Newton polygon} \begin {equation}gin{abstract} Let $p$ and $q$, where $pq-qp=1$, be the standard generators of the first Weyl algebra $A_1$ over a field of characteristic zero. Then the spectrum of the inner derivation $ad(pq)$ on $A_1$ are exactly the set of integers. The algebra $A_1$ is a $\mathbb{Z}$-graded algebra with each $i$-component being the $i$-eigenspace of $ad(pq)$, where $i\in \mathbb{Z}$. The Dixmier Conjecture says that if some elements $z$ and $w$ of $A_1$ satisfy $zw-wz=1$, then they generate $A_1$. We show that if either $z$ or $w$ possesses no component belonging to the negative spectrum of $ad(pq)$, then the Dixmier Conjecture holds. We give some generalization of this result, and some other useful criterions for $z$ and $w$ to generate $A_1$. An important tool in our proof is the Newton polygon for elements in $A_1$. \end{abstract} \thanks{Bowen Tan is the corresponding author.} \maketitle \section{Introduction} \setcounter{equation}{0}\setcounter{theorem}{0} Let $K$ be a field of Characteristic 0. Let $A_1$ be the first Weyl algebra over $K$ generated by $p,q$ with \[[p,q]=pq-qp=1.\] In the influential paper \cite{d}, Dixmier studied the structure of $A_1$ in great detail, and in the end he posed six problems. Joseph gave some characterization for semisimple and nilpotent elements of $A_1$ and solved two problems in \cite{jo}. The first problem of \cite{d} asks if every endomorphism of the Weyl algebra $A_1$ is an automorphism, i.e., given elements $z,w$ of $A_1$ such that $[z,w] = 1$, do they generate the algebra $A_1$? A similar problem for the $n$-th Weyl algebra is called the Dixmier Conjecture. It is open for all $n\ge 1$. In this paper, the Dixmier Conjecture for $A_1$ will be abbreviated by DC. People have made many explorations in DC. In \cite{mv}, the authors showed that DC holds for a particular type of endomorphism of $A_1$. By taking the same idea further, Moskowicz gave some equivalent formulation of DC \cite{mo}. The spectrum of the inner derivation $ad(pq)$ on $A_1$ are exactly the set of integers. $A_1$ is a $\mathbb{Z}$-graded algebra with each $i$-component $D_i$ being the $i$-eigenspace of $ad(pq)$, $i\in \mathbb{Z}$. An element $z\in D_i$ is said to be homogeneous of degree $i$. One has \[A_1=\opluslus_{i\in \mathbb{Z}}D_i,\ and\ D_i\cdot D_j\subseteq D_{i+j}, \forall i,j\in \mathbb{Z}.\] It follows that $D_{\ge 0}=\opluslus_{i\ge 0}D_i$ is a subalgebra of $A_1$. In \cite{b}, Bavula showed that the eigenvector algebra of $zw$, i.e., the subalgebra of $A_1$ generated by all the eigenvectors of $ad(zw)$ in $A_1$, is equal to the subalgebra $K\lambdangle z,w\rangle$ of $A_1$ generated by $z$ and $w$. In \cite{ggv}, the authors showed that if $z,w$ of $A_1$ satisfying $[z,w] = 1$, then the centralizer $C(z)$ of $z$ in $A_1$ equals $K[z]$. If $z$ and $w$ do not generate $A_1$, the authors in \cite{vgg} showed that the greatest common divisor of the total degrees of $z$ and $w$ are $>15$, where $z$ and $w$ are written as polynomials in $p$ and $q$. In \cite{bl} the authors prove that DC holds if $z$ and $w$ are sums of no more than two homogeneous elements of $A_1$. In this paper we show that if $z$ and $w$ satisfy certain conditions then they generate $A_1$. In particular we prove the following main result in Theorem \ref{33}. \begin {equation}gin {theorem} Assume that $z,w\in A_1,$ $[z,w]=1$, and either $z$ or $w$ is in $D_{\ge 0}$. Then $z$ and $w$ generate $A_1$. \end {theorem} This result is generalized in Corollary \ref{98}. Proposition \ref{93} and Theorem \ref{56} give some other criterions for $z$ and $w$ to generate $A_1$. As a corollary, the main result in \cite{bl} is generalized in Theorem \ref{97}. Our proof are partially motivated by the idea in \cite{jo} of Joseph. The Newton polygon for any element in $A_1$ is defined and plays important role in the proof. Our definition of Newton polygon is slightly different to that defined in \cite{ml2}. Some important observations of Newton polygons of elements in $A_1$ can be found in \cite{gz}. \\[3mm] Here are some of the conventions of the paper. $X,Y$ denote indeterminants; $i,j,k,l,m,n,r,s$ denote integers; For positive integers $m$ and $n$, $gcd(m,n)$ is the positive greatest common devisor of $m$ and $n$; $K$ is a field of characteristic 0; $\alphapha,\begin {equation}ta,\gamma,\lambdambda,\mu$ denotes elements in $K$; $K^*=K\setminus\{0\}$; For $z\in A_1$, $C(z)=\{w\in A_1|zw=wz\}$ is the centralizer of $z$ in $A_1$; $K[z]=\{f(z)|f\in K[X]\}$; $\mathbb{R}^2$ denotes the Euclidean plane with the Cartesian coordinate system $Oxy$; $(\mathbb{R}_{\ge 0})^2=\{(x,y)\in \mathbb{R}^2|x\ge 0,y\ge 0\}$; $V_+=\{(x,y)\in (\mathbb{R}_{\ge 0})^2|y\ge x\}, V_-=\{(x,y)\in (\mathbb{R}_{\ge 0})^2|y\le x\}$; For a nonempty set $S\subseteq \mathbb{R}^2$,\\[1mm] \centerline{$Convex(S)$ is the convex hull of $S$ in $\mathbb{R}^2$;} \[Cone(S)=\{t(x,y)| t\in \mathbb{R}_{\ge 0}, (x,y)\in S\}.\] \section{Some preliminaries} \setcounter{equation}{0}\setcounter{theorem}{0} \noindent\textbf{2.1.} Recall that $K$ is a field of Characteristic 0. For $f,g\in K[X,Y]$, define their Poisson product \begin {equation} \lambdabel{7} \{f,g\}=\left | \begin {equation}gin{array}{ll} {\partial f}/{\partial X} & {\partial f}/{\partial Y} \\ {\partial g}/{\partial X} & {\partial g}/{\partial Y} \end{array} \right |. \end {equation} In particular, one has \begin {equation} \lambdabel{54} \{X^iY^j,X^k Y^l\}=\left | \begin {equation}gin{array}{ll} i & j \\ k & l \end{array} \right | X^{i+k-1}Y^{j+l-1}. \end {equation} For $(\rho,\sigma)\in \mathbb{R}^2\setminus \{(0,0)\}$ and $\thetau\in \mathbb{R}$, if $f\in K[X,Y]$ satisfies \[\rho X\frac{\partial f}{\partial X}+\sigma Y\frac{\partial f}{\partial Y}=\thetau f,\] then $f$ is said to be $(\rho,\sigma)$-\textbf{homogeneous of degree} $\thetau$, and is denoted by $\mathbf{v}_{\rho,\sigma}(f)=\thetau$. Note that if $f$ is $(\rho,\sigma)$-homogeneous of degree $\thetau$, then $f$ is $(\iota\rho,\iota\sigma)$-homogeneous of degree $\iota\thetau$, for any $\iota\in \mathbb{R}\setminus \{0\}$. If $(r,s)\in \mathbb{Z}^2$, then for $X^iY^j\in K[X,Y]$, \[\mathbf{v}_{r,s}(X^iY^j)=ri+sj\in \mathbb{Z}.\] By this definition, 0 is $(\rho,\sigma)$-homogeneous of degree $\thetau$, for any $\thetau$ in $\mathbb{R}$; a nonzero constant polynomial is always $(\rho,\sigma)$-homogeneous of degree 0. For $(\rho,\sigma)\in \mathbb{R}^2\setminus \{(0,0)\}\ and\ \thetau\in \mathbb{R}$, let \[ V_{\rho,\sigma}(\thetau)=\{f\in K[X,Y]|\rho X\frac{\partial f}{\partial X}+\sigma Y\frac{\partial f}{\partial Y}=\thetau f\}. \] Then $V_{\rho,\sigma}(\thetau)$ is a linear subspace of $K[X,Y]$. \begin {equation}gin{prop}\lambdabel{78}(Proposition 2.1 of \cite{jo}) If $f\in V_{\rho,\sigma}(u), g\in V_{\rho,\sigma}(v)$ with $u,v\in \mathbb{R}$, then \[fg\in V_{\rho,\sigma}(u+v),\ and\ \{f,g\}\in V_{\rho,\sigma}(u+v-\rho-\sigma).\]\\[1mm] \end{prop} For $f\in K[X,Y]\setminus\{0\}$, assume $f=\sum \alphapha_{ij} X^iY^j$. Define \[E(f)=\{(i,j)\in \mathbb{Z}^2| \alphapha_{ij}\ne 0\}.\] Let $Convex(E(f))$ be the convex hull of $E(f)$ in $\mathbb{R}^2$. It is usually a solid polygon. For $(\rho,\sigma)\in \mathbb{R}^2\setminus \{(0,0)\}$, we also set \[\mathbf{v}_{\rho,\sigma}(f)=sup\{\rho i+\sigma j|(i,j)\in E(f)\},\] which is usually called the $(\rho,\sigma)$-degree of $f$. Note that if $f$ is a nonzero $(\rho,\sigma)$-homogeneous polynomial then $\mathbf{v}_{\rho,\sigma}(f)$ agrees with the previous definition. Let \[E(f;\rho,\sigma)=\{(i,j)\in E(f)|\rho i+\sigma j=\mathbf{v}_{\rho,\sigma}(f). \}\] Let \[\mathbf{f}_{\rho,\sigma}(f)=\sum_{(i,j)\in E(f;\rho,\sigma)} \alphapha_{ij} X^iY^j. \] Assume that $f\in K[X,Y]\setminus \{0\}$, and $(\rho,\sigma)\in \mathbb{R}^2\setminus \{(0,0)\}$, then there is a unique decomposition \begin {equation}\lambdabel{77}f=\sum_{i=0}^k f_i\end {equation} with $f_i\ne 0$ being $(\rho,\sigma)$-homogeneous for all $i$ and $\mathbf{v}_{\rho,\sigma}(f_i)>\mathbf{v}_{\rho,\sigma}(f_{i+1})$ for $i=0,\cdots,k-1$. We will refer to \eqref{77} as the $(\rho,\sigma)$-\textbf{homogeneous decomposition} of $f$, and $\mathbf{f}_{\rho,\sigma}(f)=f_0$ is the $(\rho,\sigma)$-\textbf{leading component}. Note that for $\thetau>0$, one has $\mathbf{f}_{\thetau\rho,\thetau\sigma}(f)=\mathbf{f}_{\rho,\sigma}(f)$. The set $Convex(E(\mathbf{f}_{\rho,\sigma}(f)))$ is either a vertex or an edge of $Convex(E(f))$. \begin {equation}gin{prop}\lambdabel{24} Let $f,g\in K[X,Y]\setminus \{0\}$, $(\rho,\sigma)\in \mathbb{R}^2\setminus \{(0,0)\}$, $\mathbf{f}_{\rho,\sigma}(f)=f_0$, and $\mathbf{f}_{\rho,\sigma}(g)=g_0$. Then (1) $\mathbf{f}_{\rho,\sigma}(fg)=f_0g_0$; (2) If $\{f_0,g_0\}\ne 0$ then $\mathbf{f}_{\rho,\sigma}(\{f,g\})=\{f_0,g_0\}$. \end{prop} \begin {proof} Assume that $f=\sum_{i=0}^k f_i$ and $g=\sum_{j=0}^l g_j$ are the respective $(\rho,\sigma)$-homogeneous decomposition of $f$ and $g$, where $\mathbf{v}_{\rho,\sigma}(f_i)=\thetau_i$, and $\thetau_i>\thetau_{i+1}$ for $i=0,\cdots,k-1$; $\mathbf{v}_{\rho,\sigma}(g_j)=\nu_j$, and $\nu_j>\nu_{j+1}$ for all $j=0,\cdots,l-1$. (1) One has \[fg=f_0 g_0+\sum_{i+j\ge 1}f_i g_j,\] where $f_ig_j$ are $(\rho,\sigma)$-homogeneous of degree $\thetau_i+\nu_j$, for all $i,j$. As $f_0 g_0\ne 0$, for any $i,j$ with $i+j\ge 1$, \[\thetau_0+\nu_0=\mathbf{v}_{\rho,\sigma}(f_0g_0)>\mathbf{v}_{\rho,\sigma}(f_ig_j)=\thetau_i+\nu_j,\] one has $\mathbf{f}_{\rho,\sigma}(fg)=f_0g_0$. (2) One has \[\{f,g\}=\{f_0,g_0\}+\sum_{i+j\ge 1}\{f_i,g_j\}, \] where $\{f_i,g_j\}$ are $(\rho,\sigma)$-homogeneous of degree $\thetau_i+\nu_j-(\rho+\sigma)$, for all $i,j$. By assumption, $\{f_0,g_0\}\ne 0$. Since for any $i,j$ with $i+j\ge 1$, \[\thetau_0+\nu_0-(\rho+\sigma)=\mathbf{v}_{\rho,\sigma}(\{f_0,g_0\})>\mathbf{v}_{\rho,\sigma}(\{f_i,g_j\})=\thetau_i+\nu_j-(\rho+\sigma),\] one has $\mathbf{f}_{\rho,\sigma}(\{f,g\})=\{f_0,g_0\}$. \end {proof} \noindent \textbf{2.2.} Assume that $f,g\in K[X,Y]$ are $(\rho,\sigma)$-homogeneous of degree $v,u$ respectively, one has \begin {equation}\lambdabel{20}\rho X\frac{\partial f}{\partial X}+\sigma Y\frac{\partial f}{\partial Y}=v f\end {equation} and \begin {equation}\lambdabel{211}\rho X\frac{\partial g}{\partial X}+\sigma Y\frac{\partial g}{\partial Y}=u g.\end {equation} Subtract $\frac{\partial g}{\partial X}$ times \eqref{20} from $\frac{\partial f}{\partial X}$ times \eqref{211}, one has \begin {equation}\lambdabel{22}\sigma Y(\frac{\partial f}{\partial X}\frac{\partial g}{\partial Y}-\frac{\partial f}{\partial Y}\frac{\partial g}{\partial X})=ug\frac{\partial f}{\partial X}-vf\frac{\partial g}{\partial X}.\end {equation} Subtract $\frac{\partial g}{\partial Y}$ times \eqref{20} from $\frac{\partial f}{\partial Y}$ times \eqref{211}, one has \begin {equation}\lambdabel{23} -\rho X(\frac{\partial f}{\partial X}\frac{\partial g}{\partial Y}-\frac{\partial f}{\partial Y}\frac{\partial g}{\partial X})=ug\frac{\partial f}{\partial Y}-vf\frac{\partial g}{\partial Y}.\end {equation} If $v,u$ are integers (which may be 0), then the above two equations become \begin {equation}\lambdabel{12}\sigma Y\{f,g\}=f^{-u+1}g^{v+1}\frac{\partial}{\partial X}(g^{-v}f^u);\end {equation} \begin {equation}\lambdabel{13}-\rho X\{f,g\}=f^{-u+1}g^{v+1}\frac{\partial}{\partial Y}(g^{-v}f^u).\end {equation} The above argument is due to Dixmier; see Lemma 1.4 of \cite{d}. \begin {equation}gin {lemma}\lambdabel{80} Assume that $f,g\in K[X,Y]\setminus K$, and $(\rho,\sigma)\in \mathbb{Z}^2\setminus\{(0,0)\}$. Assume that $f,g$ are $(\rho,\sigma)$-homogeneous of degree $v,u$ respectively. Then $v,u$ are integers and \[\{f,g\}=0\ \text{if and only if}\ f^u=\lambdambda g^v\ \text{for some }\ \lambda\in K^*.\] \end {lemma} \begin {proof} Assume that $\{f,g\}=0$. Noticing that $f^{-u+1}g^{v+1}\ne 0$, by \eqref{12} and \eqref{13} one has \[\frac{\partial}{\partial X}(g^{-v}f^u)=0\ and\ \frac{\partial}{\partial Y}(g^{-v}f^u)=0,\] which implies $f^u=\lambdambda g^v$ for some $\lambda\in K$. As $f\ne 0$, $\lambda\in K^*$. Assume that $f^u=\lambdambda g^v$ for some $\lambda\in K^*$. Then by \eqref{12} and \eqref{13} one has \[\sigma Y\{f,g\}=\rho X\{f,g\}=0.\] As at least one of $\sigma$ and $\rho$ is nonzero, one has $\{f,g\}=0$. \end {proof} \begin {equation}gin{prop}\lambdabel{27} Assume that $f,g\in K[X,Y]\setminus K$, and $(\rho,\sigma)\in \mathbb{R}^2\setminus\{(0,0)\}$. Assume that $f,g$ are $(\rho,\sigma)$-homogeneous of degree $v,u$ respectively, and $\{f,g\}=0$. (1) If $\rho,\sigma$ are linearly independent over $\mathbb{Q}$, then $f,g$ are monomials, and $E(f), E(g)$ are on the same ray through O. (2) If $\rho,\sigma$ are linearly dependent over $\mathbb{Q}$, then one can replace $(\rho,\sigma)$ with some suitable multiple of it so that $(\rho,\sigma)\in \mathbb{Z}^2\setminus\{(0,0)\}$ and $v,u\in \mathbb{Z}_{\ge 0}$. One has that for some $\lambdambda\in K^*$, \begin {equation} \lambdabel{26}f^u=\lambdambda g^v,\end {equation} and $f,g$ are in one of the following cases: (2.1) $v=u=0$. In this case $Convex(E(f))$ and $Convex(E(g))$ are on the ray through O orthogonal to $(\rho,\sigma)$; (2.2) $v,u>0$. Assume that $d=gcd(v,u)$ and ${v}_0=v/d, u_0=u/d $. Then there exists some polynomial $h\in K[X,Y]$ such that $f=\gamma h^{{v}_0}, g=\begin {equation}ta h^{u_0}$, $\gamma\begin {equation}ta\ne 0$. In this case $Convex(E(f))$ and $Convex(E(g))$ are homothetic with respect to O. In all the 3 cases, one has that $Cone(Convex(E(f)))=Cone(Convex(E(g)))$. \end{prop} \begin {proof} (1) Assume that $\rho,\sigma$ are linearly independent over $\mathbb{Q}$. Then $f,g$ are monomials. It follows from \eqref{54} that $E(f), E(g)$ are on the same ray through O. It is clear that $Cone(Convex(E(f)))=Cone(Convex(E(g)))$. (2) Assume that $\rho,\sigma$ are linearly dependent over $\mathbb{Q}$. After replacing $(\rho,\sigma)$ with $(\thetau\rho,\thetau\sigma)$ for some suitable $\thetau\ne 0$ one has $(\thetau\rho,\thetau\sigma)\in \mathbb{Z}^2\setminus\{(0,0)\}$, $v,u\in \mathbb{Z}$ and at least one of $v,u$ is $\ge 0$. As $\{f,g\}=0$, \eqref{12} and \eqref{13} are just \[ \frac{\partial}{\partial X}(g^{-v}f^u)=0,\ \ \frac{\partial}{\partial Y}(g^{-v}f^u)=0, \] then $g^{-v}f^u=\lambda\in K$. As $f,g$ are both nonzero polynomials, $\lambda\ne 0$. So \[f^u=\lambdambda g^v, \lambda\ne 0.\] It is clear that if $u=0$ then $v=0$, and vice versa. So there are 2 cases: (2.1) $v=u=0$. In this case $Convex(E(f))$ and $Convex(E(g))$ are both on the line through O orthogonal to $(\rho,\sigma)$. One has that $Cone(Convex(E(f)))=Cone(Convex(E(g)))$. (2.2) $v\ne 0, u\ne 0$. As $g^{-v}f^{u}$ is a constant, if $u>0>v$ then both $f$ and $g$ are constant polynomials, which contradicts to the assumption. Similarly the case $v>0>u$ can not happen. So one must have $v>0,u>0$. Let $d=gcd(v,u)$ and $v_0=v/d, u_0=u/d $. As $K[X,Y]$ is a unique factorization domain, one has \[f=\gamma h_1^{s_1}\cdots h_k^{s_k}, g=\begin {equation}ta h_1^{t_1}\cdots h_k^{t_k}, s_i>0,t_i>0,i=1,\cdots, k;\] where $h_i$'s are irreducible monic polynomials. By \eqref{26}, \[ (\gamma h_1^{s_1}\cdots h_k^{s_k})^{u_0 d}= \lambda (\begin {equation}ta h_1^{t_1}\cdots h_k^{t_k})^{v_0 d}. \] Then $h_i^{s_iu_0d}=h_i^{t_iv_0d}$ and $s_iu_0=t_iv_0,$ for $i=1,\cdots, k$. As $gcd( v_0,u_0)=1$, $v_0$ divides $s_i$. Let $s_i/v_0=m_i, i=1,\cdots, k$. Then $t_i/u_0=m_i, i=1,\cdots, k.$ So \[ s_i=v_0 m_i, t_i=u_0 m_i; i=1,\cdots, k. \] Let $h= h_1^{m_1}\cdots h_k^{m_k}$. Then $f=\gamma h^{v_0}, g=\begin {equation}ta h^{u_0}$, $\gamma\begin {equation}ta\ne 0$. So $Convex(E(f))$ and $Convex(E(g))$ are homothetic with respect to O, thus $Cone(Convex(E(f)))=Cone(Convex(E(g)))$. It is verified that in all the 3 cases $Cone(Convex(E(f)))=Cone(Convex(E(g)))$. \end {proof} For $f\in K[X,Y]$, let \begin {equation}\lambdabel{91}C(f)=\{g\in K[X,Y]|\{f,g\}=0\}.\end {equation} It is easy to verify that $C(f)\supseteq K[f]$. \begin {equation}gin {lemma}\lambdabel{79} Assume that $f\in K[X,Y]\setminus K$, and that $f$ is $(\rho,\sigma)$-homogeneous of degree $v$ for some $(\rho,\sigma)\in \mathbb{Z}^2\setminus\{(0,0)\}$. Let $g=\sum_{i=0}^k g_i$ be the $(\rho,\sigma)$-homogeneous decomposition of $g\in K[X,Y]\setminus \{0\}$, with $\mathbf{v}_{\rho,\sigma}(g_i)=u_i$ for all $i$. Then $g\in C(f)$ if and only if $g_i\in C(f)$ for all $i$. \end {lemma} \begin {proof} By Proposition \ref{78}, $\{f,g_i\}\in V_{\rho,\sigma}(v+u_i-\rho-\sigma)$ for all $i$. One has $\{f,g\}=\sum_{i=0}^k \{f,g_i\}$. Then $\{f,g\}=0$ if and only if $\{f,g_i\}=0$ for all $i$. \end {proof} For any $f\in K[X,Y]\setminus K$, there exists a unique decomposition \begin {equation}\lambdabel{73}f=\lambda h^m,\end {equation} such that (1) $\lambda\ne 0, h\in K[X,Y]$ is monic, and $m\ge 1$; (2) $h$ is not a proper power of some polynomial in $K[X,Y]$, i.e. $h$ cannot be written as $h= g^n$ with $ g\in K[X,Y]$ and $n>1$. We will refer to such decomposition \eqref{73} as the \textbf{power decomposition} of $f$. \begin {equation}gin {coro}\lambdabel{90} Assume that $f\in K[X,Y]\setminus K$, and that $f$ is $(\rho,\sigma)$-homogeneous of degree $v\ne 0$ for some $(\rho,\sigma)\in \mathbb{Z}^2\setminus\{(0,0)\}$. Assume $f=\mu h^m$ is the power decomposition of $f$. Then (1) $h$ is also $(\rho,\sigma)$-homogeneous and $C(f)=K[h]$. (2) $C(f)=K[f]$ if and only if $m=1$. \end {coro} \begin {proof} (1) By $f=\mu h^m$ (with $\mu\ne 0$), $h$ must also be $(\rho,\sigma)$-homogeneous. Assume that $g\in C(f)\setminus K$ is $(\rho,\sigma)$-homogeneous of degree $u$. Then as we have seen in Proposition \ref{27}, \[f^u=\lambdambda g^v, for\ some\ \lambda\ne 0.\] As $f=\mu h^m$, one has \[\mu^u h^{mu}=(\mu h^m)^u=\lambda g^v.\] By the uniqueness of the power decomposition of $g$, one knows that $g=\gamma h^k$ for some $k\ge 0,\gamma\ne 0$. Thus $g\in K[h]$, and $C(f)\subseteq K[h]$ by Lemma \ref{79}. As it is clear that $K[h]\subseteq C(f)$, one has $C(f)=K[h]$. (2) follows from (1). \end {proof} \begin {equation}gin {coro}\lambdabel{102} Assume that $f\in K[X,Y]\setminus K$, and that $f$ is $(\rho,\sigma)$-homogeneous of degree $v\ne 0$ for some $(\rho,\sigma)\in \mathbb{Z}^2\setminus\{(0,0)\}$. Assume that $g\in C(f)$. Then (1) $E(g)\subseteq Cone(Convex(E(f)))$; (2) If $E(f)\subseteq V_+$ (resp. $E(f)\subseteq V_-$) then $E(g)\subseteq V_+$ (resp. $E(g)\subseteq V_-$). \end {coro} \begin {proof} (1) Assume $f=\mu h^m$ ($\mu\ne 0$) is the power decomposition of $f$. By (1) of Corollary \ref{90}, $h$ is also $(\rho,\sigma)$-homogeneous and $C(f)=K[h]$. As \[Cone(Convex(E(h)))=Cone(Convex(E(f)))\] and $g\in K[h]$, one has $E(g)\subseteq Cone(Convex(E(f)))$. (2) follows from (1). \end {proof} \noindent \textbf{2.3.} Recall that $A_1$ is the first Weyl algebra over $K$ generated by $p,q$ with \[[p,q]=1.\] One knows that $A_1$ is a simple Noetherian domain of Gelfand-Kirillov dimension 2. One knows that $ \{p^i q^j| i,j\in \mathbb{Z}_{\ge 0}\} $ is a basis of $A_1$. Identify $A_1$ with $K[X,Y]$ by \[K[X,Y]\rightarrow A_1, \sum \alphapha_{ij}X^iY^j\mapsto \sum\alphapha_{ij}p^iq^j.\] Denote its inverse by \begin {equation}\lambdabel{94}\Phi: A_1\rightarrow K[X,Y], p^iq^j\mapsto X^iY^j.\end {equation} We will say that $z\in A_1$ is a monomial (resp. without constant term, etc.) if so is $\Phi(z)$. For $z\in A_1\setminus\{0\}$ define \[E(z)=E(\Phi(z))\ \text{and}\ Convex(E(z))=Convex(E(\Phi(z))).\] For $(\rho,\sigma)\in \mathbb{R}^2\setminus \{(0,0)\}, \rho+\sigma\ge 0$, let \[\mathbf{v}_{\rho,\sigma}(z)=\mathbf{v}_{\rho,\sigma}(\Phi(z))\] and \[\mathbf{f}_{\rho,\sigma}(z)=\mathbf{f}_{\rho,\sigma}(\Phi(z)).\] The polynomial $\mathbf{f}_{\rho,\sigma}(z)$ is called the polynomial $(\rho,\sigma)$-associated with $z$. By $[p,q]=1$ one obtains the following result (Lemma 2.1 of \cite{d}) \[q^i p^s=p^s q^i +\sum_{j=1}^{min\{i,s\}}(-1)^j j! \binom{i}{j}\binom{s}{j} p^{s-j}q^{i-j}.\] Note that the coefficient of $ p^{s-j}q^{i-j}$ is 0 if $j>i$ or $j>s$. Then it follows that \begin {equation} \begin {equation}gin{aligned}p^{s_1}q^{i_1}\cdot p^{s_2}q^{i_2}&=\sum_{j=0}^{min\{i_1,s_2\}}(-1)^j j! \binom{i_1}{j} \binom{s_2}{j}p^{s_1+s_2-j}q^{i_1+i_2-j}\\ &=p^{s_1+s_2}q^{i_1+i_2}-\binom{i_1}{1}\binom{s_2}{1} p^{s_1+s_2-1}q^{i_1+i_2-1}+\cdots \end{aligned}\end {equation} Set $min\{i_1,s_2\}=l, min\{i_2,s_1\}=m$. Then $[p^{s_1}q^{i_1}, p^{s_2}q^{i_2}]$ \begin {equation}e \begin {equation}gin{aligned} [p^{s_1}q^{i_1}, p^{s_2}q^{i_2}] &=\sum_{j=1}^{max{\{l,m\}}}(-1)^j j!\left[\binom{i_1}{j}\binom{s_2}{j}-\binom{i_2}{j}\binom{s_1}{j}\right] p^{s_1+s_2-j}q^{i_1+i_2-j}\\ &=\left |\begin {equation}gin{array}{l} s_1\ i_1\\ s_2\ i_2 \end{array}\right | p^{s_1+s_2-1}q^{i_1+i_2-1}-(1/2!)\left |\begin {equation}gin{array}{l} s_1(s_1-1),\ i_1(i_1-1)\\ s_2(s_2-1),\ i_2(i_2-1) \end{array}\right | p^{s_1+s_2-2}q^{i_1+i_2-2}+\cdots \end{aligned}\end {equation}e The following useful result is due to Dixmier. \begin {equation}gin {theorem} \lambdabel{31}(Lemma 2.7 of \cite{d}, Proposition 3.2 of \cite{jo})\\ Let $ z,w \in A_1\setminus \{0\}$. Assume that $(\rho,\sigma)\in \mathbb{R}^2\setminus \{(0,0)\}$ with $\rho+\sigma> 0$. Set $f=\mathbf{f}_{\rho,\sigma}(z)$ and $g=\mathbf{f}_{\rho,\sigma}(w)$. Then (1) $\mathbf{f}_{\rho,\sigma}(zw)=fg$; (2) If $\{f,g\}\ne 0$ then \[\mathbf{f}_{\rho,\sigma}([z,w])=\{f,g\}\ and \ \mathbf{v}_{\rho,\sigma}([z,w])=\mathbf{v}_{\rho,\sigma}(z)+\mathbf{v}_{\rho,\sigma}(w)-(\rho+\sigma);\] (3) If $\{f,g\}= 0$, then $ \mathbf{v}_{\rho,\sigma}([z,w])<\mathbf{v}_{\rho,\sigma}(z)+\mathbf{v}_{\rho,\sigma}(w)-(\rho+\sigma)$. \end {theorem} \noindent \textbf{2.4.} Now we recall the $\mathbb{Z}$-grading on $A_1$. For any $z\in A_1$, let $ad(z)$ be the derivation \[ad(z):A_1\rightarrow A_1, w\mapsto [z,w]=zw-wz.\] Then $(ad(pq))(p^i q^s )=[pq, p^i q^s ]=(s-i)p^i q^s$. Let \[ D_t=Span\{p^i q^s |s-i=t\},\ t\in \mathbb{Z}.\] One knows that $D_0=K[pq]$. If $z\in D_k$, then it is said to be \textbf{homogeneous} of degree $k$. For $i\in \mathbb{Z}$, set \[D_{\ge i}:=\bigoplus_{i\le t\in \mathbb{Z}} D_t,D_{>i}:=\bigoplus_{i< t\in \mathbb{Z}} D_t.\] $D_{\le i} $ and $D_{<i} $ is defined analogously. As $D_t\cdot D_s\subseteq D_{t+s}$ for any $t,s\in \mathbb{Z}$, it is clear that $D_{\ge 0}$ and $D_{\le 0}$ are subalgebras of $A_1$. \begin {equation}gin {lemma}\lambdabel{32} One has \[ [D_{\ge 0},D_{\ge 0}]=D_{>0}\ and\ [D_{\le 0},D_{\le 0}]=D_{<0}.\] \end {lemma} \begin {proof} By $[D_i,D_j]\subseteq D_{i+j}$ for all $i,j\in \mathbb{Z}_{\ge 0}$, and $[D_0,D_0]=0$, one has $[D_{\ge 0},D_{\ge 0}]\subseteq D_{>0}$. Assume $k>0$. For any $i\ge 0$, as $[pq, p^iq^{i+k}]=kp^iq^{i+k}$, one has $D_k\subseteq [D_{\ge 0},D_{\ge 0}]$. Therefore $[D_{\ge 0},D_{\ge 0}]$ contains all $D_k$ with $k>0$, thus $[D_{\ge 0},D_{\ge 0}]=D_{>0}$. The equation $[D_{\le 0},D_{\le 0}]=D_{<0}$ is proved similarly. \end {proof} It is clear that for any homogeneous element $z$ in $A_1$, $C(z)$ is a graded subalgebra of $A_1$. \begin {equation}gin {theorem}[Theorem 2.2 and Remark 2.6 of \cite{ggv}]\lambdabel{101} Assume that $z\in D_i\setminus K$, $i\in \mathbb{Z}$. (1) If $i=0$ then $C(z)=D_0$; if $i\ne 0$ then $dim(C(z)\cap D_j)\le 1$ for any $j\in \mathbb{Z}$. (2) If $i> 0$ and $dim(C(z)\cap D_j)= 1$, then $j\ge 0$; if $i<0$ and \\ $dim(C(z)\cap D_j)= 1$, then $j\le 0$. \end {theorem} \begin {equation}gin {coro}\lambdabel{51} Assume that $z\in D_i\setminus\{0\}$. If $i=\pm 1$ then $C(z)=K[z]$. \end {coro} \begin {proof} We will prove it in the case $i=1$. Note that $C(z)$ and $K[z]$ are both graded subalgebras of $D_{\ge 0}$ with $K[z]\subseteq C(z)$. As $dim(K[z]\cap D_j)= 1$ for any $j\ge 0$, and $dim(C(z)\cap D_j)\le 1$ for any $j\ge 0$, one has $K[z]\cap D_j=C(z)\cap D_j$ for any $j\ge 0$, thus $C(z)=K[z]$. \end {proof} For any $z\in A_1\setminus\{0\}$, $z=z_0+\cdots+z_m$ with $m\ge 0$ satisfying $z_i\in D_{k_i}\setminus\{0\}$ for $i=0,\cdots,m$ and $k_i>k_{i+1}$ for $i=0,\cdots,m-1$, is called the \textbf{homogeneous decomposition} of $z$ and $z_0$ is called the \textbf{leading component} of $z$. The following result is clear. \begin {equation}gin {lemma}\lambdabel{103} Assume that $z, w\in A_1\setminus\{0\}$ and \[z=z_0+\cdots+z_m, w=w_0+\cdots+w_n \] are the respective homogeneous decomposition of $z,w$. Then (1) $z_0w_0$ is the leading component of $zw$. (2) If $[z_0,w_0]\ne 0$, then $[z_0,w_0]$ is the leading component of $[z,w]$. \end {lemma} \noindent \textbf{2.5.} Now we describe the automorphism group $\text{Aut}(A_1)$ and $\text{Aut}(K[X,Y])$ of the $K$-algebras $A_1$ and $K[X,Y]$ respectively. The automorphism group $\text{Aut}(K[X,Y])$ is generated by linear automorphisms \begin {equation} \lambdabel{9}X \rightarrow aX+bY,\ \ Y\rightarrow cX+dY\ \ (\left | \begin {equation}gin{array}{ll} a & b \\ c & d \end{array} \right |\ne 0)\end {equation} and triangular automorphisms \begin {equation}\lambdabel{10}X\rightarrow X+f(Y),\ \ Y\rightarrow Y\ \ (f(Y)\in K[Y]).\end {equation} Recall that for any $\phi\in \text{Aut}(K[X,Y])$, its Jacobian determinant is \[J(\phi)=\left | \begin {equation}gin{array}{ll} \partial \phi(X)/\partial X & \partial \phi(X)/\partial Y \\ \partial \phi(Y)/\partial X & \partial \phi(Y)/\partial Y \end{array} \right |.\] The group $\text{Aut}_n (K[X,Y])$ consists of those automorphisms $\phi$ of $K[X,Y]$ with $J(\phi)=1$, which is a normal subgroup of $\text{Aut} (K[X,Y])$. It is generated by the automorphisms in \eqref{9} with $\left | \begin {equation}gin{array}{ll} a & b \\ c & d \end{array} \right |=1$ and the automorphisms in \eqref{10}. The polynomial algebra $K[X,Y]$ over $K$ with the Poisson product defined as in \eqref{7} is a Poisson algebra. It is usually called the 1st symplectic Poisson algebra, and is denoted by $S_1(K)$. Note that the $C(f)$ defined in \eqref{91} for $f\in K[X,Y]$ is the centralizer of $f$ in $S_1(K)$. Let $\text{Aut}(S_1(K))$ denote the group of automorphisms of $S_1(K)$. An automorphism $\phi$ of the polynomial algebra $K[X,Y]$ is an automorphism of $S_1(K)$ if and only if it satisfies \[\{\phi(f),\phi(g)\}=\{f,g\}, \forall f,g\in K[X,Y]. \] It follows that \[\text{Aut}(S_1(K))\cong \text{Aut}_n(K[X,Y]).\] \begin {equation}gin {theorem} (\cite{d} \cite{ml}) One has \[\text{Aut}(A_1)\cong \text{Aut}_n(K[X,Y]).\] \end {theorem} If $z_1,z_2\in A_1$ and there exists some $\psi\in \text{Aut}(A_1)$, such that $\psi(z_1)=z_2$, then we say that $z_1$ is conjugate to $z_2$.\\[3mm] For $\lambda\in K^*$, let $\psi_\lambda\in \text{Aut}(A_1)$ be defined such that \[\psi_\lambda(p)=\lambda p, \psi_\lambda(q)=\lambda^{-1} q.\] Note that $E(\psi_\lambda(z))=E(z)$ for any $z\in A_1$. Let $\psi_0\in \text{Aut}(A_1)$ be defined such that \begin {equation}\lambdabel{70}\psi_0(p)=q, \psi_0(q)=-p.\end {equation} Then \[\psi_0^2(p)=-p, \psi_0^2(q)=-q, \] and $\psi_0$ has order 4 in $\text{Aut}(A_1)$. Under the linear identification \[\Phi:A_1\rightarrow S_1(K), \quad (see \eqref{94}) \] $\psi_\lambda$ corresponds to $\tilde{\psi}_\lambda\in \text{Aut}(S_1(K))$ such that \[\tilde{\psi}_\lambda(X)=\lambda X, \tilde{\psi}_\lambda(Y)=\lambda^{-1} Y; \] $\psi_0$ corresponds to $\tilde{\psi}_0\in \text{Aut}(S_1(K))$ such that \begin {equation}\lambdabel{71}\tilde{\psi}_0(X)=Y, \tilde{\psi}_0(Y)=-X.\end {equation} Denote the subgroup of $\text{Aut}(A_1)$ consisting of all the $\psi_\lambda$ with $\lambda\in K^*$ by $G_0$. It is clear that $\psi_0$ normalize $G_0$. Call the subgroup of $\text{Aut}(A_1)$ generated by $G_0$ and $\psi_0$ by $G_1$. Then $G_1$ is the disjoint union of $G_0$ and $G_0\psi_0$. Similarly, denote the subgroup of $\text{Aut}(S_1(K))$ consisting all the $\tilde{\psi}_\lambda$ with $\lambda\in K^*$ by $\widetilde{G}_0$. One has that $\tilde{\psi}_0$ normalize $\widetilde{G}_0$. Call the subgroup of $\text{Aut}(S_1(K))$ generated by $\widetilde{G}_0$ and $\tilde{\psi}_0$ by $\widetilde{G}_1$. It is clear that there is a unique isomorphism \begin {equation}\lambdabel{96}G_1\rightarrow \widetilde{G}_1, \psi\mapsto \tilde{\psi}\end {equation} that maps $\psi_\lambda\rightarrow \tilde{\psi}_\lambda, \psi_0\rightarrow \tilde{\psi}_0$, and \[\tilde{\psi}(\Phi(z))=\Phi(\psi(z)), \forall z\in A_1, \forall \psi\in G_1. \] The following result is clear. \begin {equation}gin {lemma} One has that \[ \psi_0(D_i)=D_{-i}, i\in \mathbb{Z}.\] And it follows that \[ \psi_0(D_{\ge 0})=D_{\le 0}, \psi_0(D_{\le 0})=D_{\ge 0}.\]\end {lemma} \noindent \textbf{2.6.} Next we define the Newton Polygon for elements in $A_1$. If $z\in A_1\setminus \{0\}$, let \[ NTP(z)=\{(x,y)\in (\mathbb{R}_{\ge 0})^2|\exists t\in \mathbb{R}_{\ge 0}, (x,y)+t(1,1)\in Convex(E(z)) \},\] which is called the (solid) \textbf{Newton polygon} of $z$. If $z=0$, then $E(z)$ and $NTP(z)$ are both defined to be the empty set. Let $(\rho,\sigma)$ runs continuously from $(1,-1)$ to $(-1,1)$ on the path defined by \[ (\rho(t), \sigma(t))=\left\{ \begin {equation}gin{array}{ll} (1, t), & t\in [-1,1]; \\ (2-t, 1), & t\in [1,3]. \end{array} \right. \] The set \[Roof(z)=\bigcup_{-1<t<3} Convex(E(\mathbf{f}_{\rho(t),\sigma(t)}(z))) \] is called the roof of $NTP(z)$, which is always concave. Note that the set $Convex(E(\mathbf{f}_{\rho(t),\sigma(t)}(z)))$ is either a vertex or an edge of $NTP(z)$. If $|E(\mathbf{f}_{\rho(t),\sigma(t)}(z))|=1$, then $Convex(E(\mathbf{f}_{\rho(t),\sigma(t)}(z)))$ is a vertex. If $|E(\mathbf{f}_{\rho(t),\sigma(t)}(z))|\ge 2$, then $Convex(E(\mathbf{f}_{\rho,\sigma}(z)))$ is an edge. Note that \[ NTP(z)=\{(x,y)\in (\mathbb{R}_{\ge 0})^2|\exists t\in \mathbb{R}_{\ge 0}, (x,y)+t(1,1)\in Roof(z)\},\] so $NTP(z)$ is determined by $Roof(z)$. Let $z=\alpha_1 p+\alpha_2 p^2q^3+\alpha_3 p^3q+\alpha_4 p^4q^2+\alpha_5p^5$ with $\alpha_i\ne 0, i=1,\cdots, 5$. Then $NTP(z)$ is the pentagon $OP_1P_2P_3P_4$ and $Roof(z)$ is the polygonal chain: $(P_1,P_2,P_3)$. See Figure 1. \begin {equation}gin{figure} [htp] \centering \begin {equation}gin{tikzpicture}[thick,>=stealth] \draw [->] (0,0) -- (6,0) node [below] {$x$}; \draw [->] (0,0) -- (0,4) node [left] {$y$}; \coordinate[label=above:$P_1$] (p1) at (5,0); \coordinate[label=above right:$P_2$] (p2) at (4,2); \coordinate[label=above:$P_3$] (p3) at (2,3); \coordinate[label=left:$P_4$] (p4) at (0,1); \draw (p1) -- (p2) -- (p3) -- (p4); \foreach \j in {1,2,3,4} { \fill (p\j) circle (1.5pt); } \foreach \p in {(0,0),(1,0),(3,1)} { \fill \p circle (1.5pt); } \node [left] at (3,2) {$z$}; \node [below] at (1,0) {$1$}; \node [below] at (5,0) {$5$}; \node [below] at (0,0) {$O$}; \end{tikzpicture} \caption*{Figure 1} \end{figure} Let $a=min\{j-i|(i,j)\in E(f) \}, b=max\{j-i|(i,j)\in E(f) \}$. Then $Roof(z)$ and $NTP(z)$ are both in the region $\{(x,y)\in (\mathbb{R}_{\ge 0})^2|a\le y-x\le b \}.$ There are 2 cases of $Roof(z)$ with $z\ne 0$. (1) $\mathbf{f}_{\rho(t), \sigma(t)}(z)$ is a monomial, for any $t\in (-1,3).$ Now $z$ is in some single homogeneous space $D_k$. One has \[Roof(z)=E(\mathbf{f}_{1,1}(z)),\] which is just a point. Otherwise, it is in next case. (2) There exists some $n\ge 1$ and $t_1,t_2,\cdots,t_n$, where $-1=t_0<t_1<t_2<\cdots<t_n<t_{n+1}=3$, are all the $t\in (-1,3)$ such that $\mathbf{f}_{\rho(t_i), \sigma(t_i)}(z)$ is not a monomial. One has \[Roof(z)=\bigcup_{i=1}^n Convex(E(\mathbf{f}_{\rho(t_i),\sigma(t_i)}(z))), \] which consists of $n$ edges. The following observation is clear and we omit its proof. \begin {equation}gin {lemma}\lambdabel{60} The following statements are equivalent: (1) $z\in D_{\ge 0}$; (2) $E(z)\subseteq V^+$; (3) $Roof(z)\subseteq V^+$; (4) $Cone(Roof(z))\subseteq V_+$; \\(5) $NTP(z)\subseteq V_+$. \end {lemma} An analogous result for $z\in D_{\le 0}$ also holds.\\[3mm] \section{Proof of the main result} \setcounter{equation}{0}\setcounter{theorem}{0} Let \[{\Gamma}=\{(z,w)\in A_1^2|[z,w]=1\}.\] For any $\psi\in \text{Aut}(A_1)$ and $(z,w)\in \Gamma$, $[\psi(z), \psi(w)]=1$ thus $(\psi(z), \psi(w))\in {\Gamma}.$ So $\text{Aut}(A_1)$ acts on ${\Gamma}$ (faithfully) by \[ \psi\cdot (z,w)= (\psi(z), \psi(w)).\] If $(z,w)\in \Gamma$ then $(w,-z)\in \Gamma$. Let \[ \eta:\Gamma\rightarrow \Gamma, (z,w)\mapsto (w,-z). \] Let $U$ be the group of transformations of $\Gamma$ generated by $G_1$ (defined in \textbf{2.5}) and $\eta$. It is directly verified that \[ {\eta}{\psi}= {\psi}{\eta}, \forall {\psi}\in G_1.\] Let $\Omega$ be the collection of $(f,g)$ in $K[X,Y]^2$ such that (1) $\{f,g\}=1$; (2) $f,g$ are $(\rho,\sigma)$-homogeneous for some $(\rho,\sigma)\in \mathbb{R}^2\setminus \{(0,0)\}$. For $(f,g)\in \Omega$ and $\tilde{\psi}\in \widetilde{G}_1$ (defined in \textbf{2.5}), one has $(\tilde{\psi}(f), \tilde{\psi}(g))\in \Omega.$ So $\widetilde{G}_1$ acts on $\Omega$ (faithfully) by \[ \tilde{\psi}\cdot (f,g)= (\tilde{\psi}(f), \tilde{\psi}(g)).\] If $(f,g)\in \Omega$ then $(g,-f)\in \Omega$. Let \[\tilde{\eta}:\Omega\rightarrow \Omega, (f,g)\mapsto (g,-f). \] Let $\widetilde{U}$ be the group of transformations of $ \Omega$ generated by $\widetilde{G}_1$ and $\tilde{\eta}$. It is directly verified that \[ \tilde{\eta}\tilde{\psi}= \tilde{\psi}\tilde{\eta}, \forall \tilde{\psi}\in \widetilde{G}_1.\] \begin {equation}gin{prop}\lambdabel{30} Assume $(f,g)\in \Omega$. Then up to the action of $\widetilde{U}$, $(f,g)$ will be in one and only one of the following cases: (1) $(X,Y)$; (2) $(\alphapha X+\begin {equation}ta Y, \gamma X+\delta Y), \alphapha,\delta,\begin {equation}ta,\gamma\in K, \alphapha\delta-\begin {equation}ta\gamma=1, \alphapha\delta\begin {equation}ta\gamma\ne 0$; (3) $(X+\lambdambda Y^n, Y), \lambda\ne 0, n\ge 1$; (4) $(X+\lambda,Y), \lambda\ne 0$. \end{prop} Let us introduce a terminology before the proof. Assume that $f,h\in K[X,Y]$, $f=\sum \alphapha_{ij} X^iY^j$, $h=\sum \begin {equation}ta_{ij} X^iY^j$. If whenever $\begin {equation}ta_{ij}\ne 0$, one has $\alphapha_{ij}=\begin {equation}ta_{ij}$, then we say that $f$ \textbf{contains} $h$. \begin {proof} Assume that $(f,g)\in \Omega$. Then $\{f,g\}=1$ and $f,g$ are $(\rho,\sigma)$-homogeneous for some $(\rho,\sigma)\in \mathbb{R}^2\setminus \{(0,0)\}$ with $\rho+\sigma\ge 0$. There are 2 cases: (i) Both $f$ and $g$ are monomials; (ii) $f$ or $g$ is not a monomial. In Case (ii), if the 2nd polynomial $g$ of $(f,g)$ is not a monomial, then after applying $\tilde{\eta}$ one can always assume that the 1st polynomial $f$ of $(f,g)$ is not a monomial. (i) Suppose that $f, g$ are both monomials. Assume that $f=\lambdambda X^iY^j,g=\mu X^kY^l;$ $\lambda,\mu\in K^*$. By \eqref{54}, $ \{X^iY^j,X^k Y^l\}=\left | \begin {equation}gin{array}{ll} i & j \\ k & l \end{array} \right | X^{i+k-1}Y^{j+l-1}. $ Then \[i+k=1, j+l=1, il-kj\ne 0.\] Thus $(i,j)=(1,0), (k,l)=(0,1)$ or vice verse. One has \[(f,g)=(\lambdambda X,\lambdambda^{-1}Y), or\ (f,g)=(\lambdambda Y,-\lambdambda^{-1}X); \lambdambda\in K^*.\] After applying some transformation in $\widetilde{G}_1$, $(f,g)=(X,Y)$, which is in Case (1). (ii) Assume that $f$ is not a monomial. Now $\rho,\sigma$ are linearly dependent over $\mathbb{Q}$. One can assume that $(\rho,\sigma)=(r,s)$, where $r,s$ are coprime integers and $r+s\ge 0$. Assume that $f,g$ are $(r,s)$-homogeneous of degree $t,u$ respectively. By $\{f,g\}=1$ one has $t+u-(r+s)=0$ thus \begin {equation}\lambdabel{47} t+u=r+s=\mathbf{v}_{r,s}(XY).\end {equation} There are 3 cases to be dealt with: (a) $r, s>0$; (b) $(r,s)=(0,1)$; (c) $r>0>s$. The remaining cases $(r,s)=(1,0)$ and $s>0>r$ can be transformed to Case (b) and (c) respectively after applying $\tilde{\psi}_0$ (defined in \eqref{71}).\\[1mm] (a) $r, s>0$. If $t=0$ (resp. $u=0$), then $f$ (resp. $g$) is a constant, which contradicts to $\{f,g\}=1$. So $t,u>0$. By \eqref{47}, one has $t=\mathbf{v}_{r,s}(f)<\mathbf{v}_{r,s}(XY)$. If $\mathbf{v}_{r,s}(X^iY^j)<\mathbf{v}_{r,s}(XY)$, then $ri+sj<r+s$, thus $i=0$ or $j=0$. So \[f=\lambdambda X+\mu Y^n, \ or,\ f=\begin {equation}ta Y+\gamma X^n; \lambdambda,\mu,\begin {equation}ta,\gamma\in K^*, n\ge 1.\] See Figure 2. \begin {equation}gin{figure}[htp] \centering \begin {equation}gin{tikzpicture}[scale=0.8] \tikzstyle{every node}=[font=\small,scale=0.8] \draw [->] (0,0) -- (5,0) node [below] {$x$}; \draw [->] (0,0) -- (0,5) node [left] {$y$}; \coordinate (w1) at (0,3); \coordinate (w2) at (1,0); \coordinate (z1) at (0,0); \coordinate (z2) at (1,1); \coordinate (x1) at (4,4); \coordinate (x2) at (1,3); \draw (w1) -- node [left] {$f$} (w2); \draw [<-] (x1) -- node [below right] {$\left(r,s\right) $} (x2); \foreach \j in {1,2} { \foreach \k in {w,z} { \fill (\k\j) circle (1.5pt); } } \fill (0,1) circle (1.5pt); \fill (x2) circle (1.5pt); \node [left] at (0,3) {$3$}; \node [below] at (1,0) {$1$}; \node [left] at (0,1) {$1$}; \node [below] at (0,0) {$O$}; \end{tikzpicture} \caption*{Figure 2} \end{figure} After applying $\tilde{\psi}_0$ if needed, one can assume that $f=\lambdambda X+\mu Y^n, \lambda\mu\ne 0$. Then $(r,s)=(n,1)$ and $t=\mathbf{v}_{r,s}(f)=n$, thus $u=r+s-n=1$. Assume that $g=\sum \gamma_{ij} X^iY^j$. Then \[\mathbf{v}_{r,s}(X^iY^j)=ri+sj=ni+j=1\] for those $i,j$ with $\gamma_{ij}\ne 0$. If $n>1$ then $i=0,j=1$ and $g=\gamma Y, \gamma\ne 0$.\[1=\{f,g\}=\{\lambdambda X+\mu Y^n,\gamma Y\}=\lambdambda \gamma. \]After applying some $\psi\in \widetilde{G}_1$, one can assume that $(f,g)=(X+\begin {equation}ta Y^n,Y), \begin {equation}ta\ne 0$. This is in Case (3). If $n=1$ then $i=0,j=1$ or $i=1,j=0$. Now $g=\gamma X+\delta Y$. By $\{f,g\}=1$, one has $\lambdambda\delta-\mu\gamma=1$. It is in Case (2) if $\delta\gamma\ne 0$; it is in Case (3) if one of $\delta$ and $\gamma$ is 0.\\[1mm] (b) $(r,s)=(0,1)$. Now $t+u=r+s=1, t,u\ge 0$. Then $t=1,u=0$ or $t=0,u=1$. (b.1) $t=1,u=0$. Assume that $f=F(X)Y, g=H(X)$, where $F,H\in K[X]$. By $\{f,g\}=1$, one has \[ 1=\{F(X)Y, H(X)\}= -F(X)H'(X). \] Then $F(X)$ is a nonzero constant $\alpha$ and $(f,g)=(\alpha Y,-\alpha^{-1} X+r)$. Applying $\tilde{\eta}$, $(f,g)$ becomes $(-\alpha^{-1} X+r,-\alpha Y )$. Then applying some transformation in $\widetilde{G}_0$, $(f,g)$ becomes $(X+r,Y )$, which is in Case (4). (b.2) $t=0,u=1$. Then $(g,-f)=\tilde{\eta}(f,g)$ is in the situation of (b.1).\\[1mm] (c) $r>0>s$. As $\{f,g\}=1$, $f$ and $g$ contain the monomials $\alphapha_0 X$ and $\begin {equation}ta_0 Y$ respectively, where $\alphapha_0\begin {equation}ta_0\ne 0$. After applying $\tilde{\psi}_0$ if necessary, we assume that $f$ contains $\alphapha_0 X$ and that $g$ contains $\begin {equation}ta_0 Y$. Then, as $r,s$ are coprime, one has \[f=X\sum_{i=0}^k \alphapha_i (X^{-s}Y^r)^i, k\ge 0, \alphapha_k\ne 0;\] and \[g=Y\sum_{i=0}^l \begin {equation}ta_i (X^{-s}Y^r)^i, l\ge 0, \begin {equation}ta_l\ne 0.\] See Figure 3. \begin {equation}gin{figure}[htb] \centering \begin {equation}gin{tikzpicture}[scale=0.7] \tikzstyle{every node}=[font=\small,scale=0.7] \draw [->] (0,0) -- (6,0) node [below] {$x$}; \draw [->] (0,0) -- (0,6) node [left] {$y$}; \coordinate (w1) at (1,0); \coordinate (w2) at (3,1); \coordinate (z1) at (0,1); \coordinate (z2) at (2,2); \coordinate (x1) at (2,3); \coordinate (x2) at (1,5); \draw (w1) -- node [below] {$g$} (w2) (z1) -- node [above] {$f$} (z2); \draw [<-] (x1) -- node [above right] {$ \left(r,s\right)$} (x2); \foreach \j in {1,2} { \foreach \k in {w,z} { \fill (\k\j) circle (1.5pt); } } \fill (0,0) circle (1.5pt); \fill (x2) circle (1.5pt); \node [left] at (0,1) {$1$}; \node [below] at (1,0) {$1$}; \node [below] at (0,0) {$O$}; \end{tikzpicture} \caption*{Figure 3} \end{figure} One has \[\mathbf{f}_{-s,r}(f)= \alphapha_k X^{-sk+1}Y^{rk}, \mathbf{f}_{-s,r}(g)= \begin {equation}ta_l X^{-sl}Y^{rl+1}.\] By $r>0>s$, it is clear that \[\{\mathbf{f}_{-s,r}(f), \mathbf{f}_{-s,r}(g)\}=(rl-sk+1)\alpha_k\begin {equation}ta_l X^{-s(k+l)}Y^{r(k+l)}\ne 0,\] thus by Proposition\ \ref{24}, \[\{\mathbf{f}_{-s,r}(f), \mathbf{f}_{-s,r}(g)\}=\mathbf{f}_{-s,r}(\{f,g\})=1,\] which implies $k=l=0$. So $(f,g)$ is still in Case (1). Note that for any $\psi\in \widetilde{G}_1$ and $h\in K[X,Y]$, $|E(\psi(h))|=|E(h)|$. We have shown that $(f,g)$ will be in one of the 4 cases up to the action of $\widetilde{U}$. Since $(|E(f)|,|E(g)|)$ is respectively $(1,1), (2,2),(2,1), (2,1)$ in the 4 cases, we only need to show that the transformations in $\widetilde{U}$ will not take the $(f,g)$ in Case (3) into Case (4) but this is clear as there does not exist $\tilde{\psi}\in \widetilde{G}_1$ with $\tilde{\psi}(X+\lambda Y^n)=X+\alpha$, where $n\ge 1$. \end {proof} The following observation follows easily from the above proposition. \begin {equation}gin {coro}\lambdabel{100} Assume $(f,g)\in \Omega$. Then $f$ contains the term $\alpha X$ or $\alpha Y$ for some $\alpha\ne 0$. If $f$ contains the term $\alpha X$, then either $f=\alpha X$, or $f=\alpha X+\lambda Y^n$ with $\lambda\ne 0$ and $n\ge 0$. \end {coro} \begin {equation}gin{prop} \lambdabel{3} Assume that $z,w\in A_1$ and $[z,w]=1$. If $\mathbf{v}_{0,1}(z)\le 1$, then $z$ and $w$ generate $A_1$. Specifically, (1) If $\mathbf{v}_{0,1}(z)=1$, i.e., $z=f(p)q+g(p)$ for some $f,g\in K[X]$ and $f\ne 0$, then \begin {equation}\lambdabel{45}z=\alphapha q+g(p), w=\gamma-\alphapha^{-1}p+h(z); \alphapha\ne 0, h\in K[X].\end {equation} (2) If $\mathbf{v}_{0,1}(z)=0$, i.e. $z=f(p)$ for some $f\in K[X]$, then \begin {equation}\lambdabel{46}z=\alphapha p+\begin {equation}ta, w={\alphapha}^{-1}q+g(p), \alphapha\ne 0, g\in K[X].\end {equation} \end{prop} \begin {proof} (1) Assume that $\mathbf{v}_{0,1}(w)=j$ and $\mathbf{f}_{0,1}(w)=g(X)Y^j$. Note that $\mathbf{f}_{0,1}(z)=f(X)Y$. If $\{f(X)Y,g(X)Y^j\}\ne 0$ then \[j=\mathbf{v}_{0,1}([z,w])=\mathbf{v}_{0,1}(1)=0.\] So, if $j>0$ then $\{f(X)Y,g(X)Y^j\}=0$. By Lemma \ref{80}, \[g(X)Y^j= \mu (f(X)Y)^j=\mu f(X)^j Y^j,\ for\ some\ \mu\in K^*.\] Then $g(X)=\mu f(X)^j$. Let $w_1=w-\mu z^j$. One has $[z,w_1]=1$ and $0\le \mathbf{v}_{0,1}(w_1)<j$. If $\mathbf{v}_{0,1}(w_1)>0$ then repeat the above process until we get some polynomial $h(X)$ such that the element $w'=w-h(z)$ satisfies $\mathbf{v}_{0,1}(w')=0$ and $[z,w']=1$. If $j=0$ then let $w'=w$. Assume $w'=l(p)$ for some $l\in K[X]$. As \[[z,w']=[f(p)q+g(p), l(p)]=f(p)[q, l(p)]=-f(p)l'(p)=1,\] one has $f(p)=\alphapha\ne 0, l'(p)=-\alpha^{-1}$. So $z=\alphapha q+g(p), w'=l(p)=-\alphapha^{-1} p+\gamma$, from which one gets \eqref{45}. It is clear that $z,w$ generate $A_1$. (2) Assume that $\mathbf{v}_{0,1}(w)=j$. The assumption that $[z,w]=1$ implies that $j>0$. One has \[0=\mathbf{v}_{0,1}(1)=\mathbf{v}_{0,1}([z,w])=j-1.\] So $j=1$. Assume that $ w=l(p)q+h(p)$ for some $l,h\in K[X]$. Then by $[z,w]=1$ and similar computation as in (1), one gets \eqref{46}. It is clear that $z,w$ generate $A_1$. \end {proof} \begin {equation}gin {lemma} \lambdabel{1} Assume that $z\in D_{\le 0}$ or $z\in D_{\ge 0}$, and $(i,i)$ with $i\ge 1$ is a vertex of $NTP(z)$, i.e., $\mathbf{f}_{\rho,\sigma}(z)=\lambdambda X^iY^i$ for some $(\rho,\sigma)$ with $\rho+\sigma>0$ and $\lambda\ne 0$, then for any $w\in A_1$, $[z,w]\ne 1$. \end {lemma} \begin {proof} We will prove it in the case $z\in D_{\le 0}$. Then there exists some $\end {proof}silon\in \mathbb{R}$ with $0<\end {proof}silon<1$ such that $\mathbf{f}_{-1+\end {proof}silon,1}(z)=\lambdambda X^iY^i$. Suppose that for some $w\in A_1$, $[z,w]=1$. Assume that $\mathbf{f}_{-1+\end {proof}silon,1}(w)=g(X,Y)$. If $w\nsubseteq D_{\le 0}$, then $E(g)\cap \{(x,y)\in \mathbb{R}^2|y>x\ge 0\}\ne \varnothing$ and \[\{\mathbf{f}_{-1+\end {proof}silon,1}(z),\mathbf{f}_{-1+\end {proof}silon,1}(w)\}=\{\lambdambda X^iY^i, g(X,Y)\},\] which cannot be 1 by Proposition \ref{30}, and cannot be 0 by Proposition \ref{27}, thus $[z,w]\ne 1$. If $w\subseteq D_{\le 0}$ then $[z,w]\subseteq D_{<0}$ by Lemma \ref{32}, thus $[z,w]\ne 1$. \end {proof} Let $\widetilde{A_1}$ be the $K$-algebra generated by $p,q,q^{-1}$ subject to the relation \[q q^{-1}=q^{-1} q=1, [p,q]=1. \] It is easy to verify that $\{p^iq^k| i\in \mathbb{Z}_{\ge 0}, k\in \mathbb{Z}\}$ is a basis of $\widetilde{A_1}$. Then $A_1$ is the subalgebra of $\widetilde{A_1}$ generated by $p$ and $q$. It is easy to verify that \begin {equation}\lambdabel{99} q^k \cdot pq=(pq-k) q^k\end {equation} for any $k\in \mathbb{Z}$ as in Section 3.2 of \cite{d}, and it follows that \begin {equation}\lambdabel{40}q^k f(pq)=f(pq-k)q^k\end {equation} for any polynomial $f\in K[X]$ and any $k\in \mathbb{Z}$. \begin {equation}gin {lemma}\lambdabel{2} Assume that $z,w\in A_1$ and $[z,w]=1$. Then If $z$ is contained in some homogeneous space $D_k$ with $k\in \mathbb{Z}$, then \begin {equation} \lambdabel{42}z=\lambdambda q, w=\mu p+l(q), \lambdambda\mu=-1,l\in K[X];\end {equation} or \begin {equation}\lambdabel{43}z=\lambdambda p, w=\mu q+l(p), \lambdambda\mu=1,l\in K[X].\end {equation} \end {lemma} \begin {proof} By Lemma \ref{1}, if $z$ is contained in $D_0$ then $[z,w]\ne 1$ for any $w\in A_1$. Assume that $z\in D_k$ with $k>0$. Then there exists $f,g\in K[X]\setminus \{0\}$ such that \[z=f(pq)q^k, and\ w=w_0 +w_1, w_0=g(pq)q^{-k}, w_1\in \bigoplus_{i\ne -k} D_i.\] Then \begin {equation}\lambdabel{41}[z,w_0]=1, and\ \ [z,w_1]=0.\end {equation} Let $h(X)=f(X)g(X-k)$. By \eqref{99} one has \begin {equation}e\begin {equation}gin{split} 1&=[f(pq)q^k , g(pq)q^{-k} ]\\&=f(pq) (q^k g(pq)) q^{-k} - g(pq)(q^{-k}f(pq))q^k\\& = f(pq)g(pq-k) q^k\cdot q^{-k}-g(pq)f(pq+k) q^{-k}\cdot q^k\ \ (by\ \eqref{40})\\&=f(pq)g(pq-k)-f(pq+k)g(pq)\\&=h(pq)-h(pq+k).\end{split}\end {equation}e Then $h(X)-h(X+k)=1$. So $deg(h(X))=deg(f(X))+deg(g(X))=1$. As $k>0$, if $deg(g(X))=0$ then $w_0=\lambdambda q^{-k}$ (for some $\lambda\ne 0$) is not in $A_1$. So $deg(g(X))=1$ and $deg(f(X))=0$. Then $z=\lambdambda q^k$ and $w_0=(\mu\ pq+\gamma)q^{-k}$ with $\lambdambda,\mu\ne 0$. As $w_0\in A_1$, $k=1$ and $\gamma=0$. Then $z=\lambdambda q, w_0=\mu p, \lambdambda\mu=-1$. By \eqref{41}, $w_1\in C(\lambdambda q)=K[q]$. This is the case \eqref{42}. If $z\in D_k$ with $k<0$, then it will be in the case \eqref{43}. \end {proof} \begin {equation}gin {lemma}\lambdabel{44} Assume that (1) $z , w\in A_1\setminus K$, and $z,w$ are both without constant term; (2) for any $(\rho,\sigma)\in \mathbb{R}^2$ with $\rho+\sigma> 0$, $\{\mathbf{f}_{\rho,\sigma}(z),\mathbf{f}_{\rho,\sigma}(w)\}=0$. Then \[Cone(Roof(z))=Cone(Roof(w)).\] \end {lemma} \begin {proof} One has \[ Roof(z)=\bigcup_{\tiny{\begin {equation}gin{array}{c} (\rho,\sigma)\\ \rho+\sigma>0\end{array}}}\ Convex(E(\mathbf{f}_{\rho,\sigma}(z))).\] By the assumption $\{\mathbf{f}_{\rho,\sigma}(z),\mathbf{f}_{\rho,\sigma}(w)\}=0$ for any $(\rho,\sigma)$ with $\rho+\sigma> 0$, applying Proposition \ref{27} one has, \begin {equation}e\begin {equation}gin{aligned} Cone(Roof(z))&=\bigcup_{\tiny{\begin {equation}gin{array}{c} (\rho,\sigma)\\ \rho+\sigma>0\end{array}}}\ Cone(Convex(E(\mathbf{f}_{\rho,\sigma}(z))))\\ &=\bigcup_{\tiny{\begin {equation}gin{array}{c} (\rho,\sigma)\\ \rho+\sigma>0\end{array}}}\ Cone(Convex(E(\mathbf{f}_{\rho,\sigma}(w))))\\ &=Cone(Roof(w)). \end{aligned}\end {equation}e \end {proof} We are ready to prove the following main result. \begin {equation}gin {theorem}\lambdabel{33} Assume that $z,w\in A_1,$ $[z,w]=1$, and $z$ is conjugate to some element in $D_{\ge 0}$ (or in $D_{\le 0}$). Then $z$ and $w$ generate $A_1$. \end {theorem} \begin {proof} As any element in $D_{\ge 0}$ is conjugate to some element in $D_{\le 0}$ by $\psi_0$ in \eqref{70}, without loss of generality, we assume that $z\in D_{\le 0}$. If $z',w'\in A_1$, $z'-z\in K,w'-w\in K$, then it is clear that $[z',w']=1$ and $z, w$ generate $A_1$ if and only if $z', w'$ generate $A_1$. So we assume that--- \textbf{both $z$ and $w$ have constant term 0.}\\[2mm] (i) If $z$ is a monomial, then it follows from Lemma \ref{2} that \[ z=\lambdambda p, w=\lambdambda^{-1} q+f(p), \ \lambda\ne 0, \ \ f\in K[X].\] Then $z$ and $w$ generate $A_1$. If $w$ is a monomial, then one can prove that $z$ and $w$ generate $A_1$ similarly. (ii) Next we deal with the case that $z$ is not a monomial. (We will show that $[z,w]\ne 1$.) The case that $w$ is not a monomial can be treated similarly. As $z\in D_{\le 0}$, $Roof(z)\subseteq V_-, NTP(z)\subseteq V_-$. As by assumption $[z,w]=1$, for any $(\rho,\sigma) $ with $\rho+\sigma> 0$, $\{\mathbf{f}_{\rho,\sigma}(z), \mathbf{f}_{\rho,\sigma}(w)\}=0$ or 1, by Theorem \ref{31}. If for any $(\rho,\sigma) $ with $\rho+\sigma> 0$, $\{\mathbf{f}_{\rho,\sigma}(z), \mathbf{f}_{\rho,\sigma}(w)\}=0$, then by Lemma \ref{44}, $Cone(Roof(w))=Cone(Roof(z))\subseteq V_-$, thus $Roof(w)\subseteq V_-$. By Lemma \ref{60}, $NTP(w) \subseteq V_-$, thus $w\in A_1^{-}$. Then by Lemma \ref{32}, $[z,w]\in D_{<0}$ and $[z,w]\ne 1$. In the rest of the proof we assume that for some $(\rho,\sigma) $ with $\rho+\sigma> 0$, $\{\mathbf{f}_{\rho,\sigma}(z), \mathbf{f}_{\rho,\sigma}(w)\}=1$. Set $f=\mathbf{f}_{\rho,\sigma}(z), g= \mathbf{f}_{\rho,\sigma}(w) $. As $z\in D_{\le 0}$, $f$ contains some $\alpha X$ with $\alpha\ne 0$, and any $(i,i)$ with $i\ge 1$ is not in $E(z)$ by Lemma \ref{1}. Since $z$ has constant term 0, \[E(z)\subseteq \{(x,y)\in\mathbb{R}^2| x-y\ge 1, y\ge 0 \}, (1,0)\in E(z).\] As $z$ is not a monomial and $z\in D_{<0}$, there are 2 cases: (a.1) There exists some unique $(r,s) $ up to a positive constant such that $r\le 0, r+s> 0$, $\mathbf{f}_{r,s}(z)$ contains $\alphapha X$, and $\mathbf{f}_{r,s}(z)$ is not a monomial. See Figure 4. \begin {equation}gin{figure}[htp] \centering \begin {equation}gin{tikzpicture}[scale=0.8] \tikzstyle{every node}=[font=\small,scale=0.8] \draw [->] (0,0) -- (5,0) node [below] {$x$}; \draw [->] (0,0) -- (0,5) node [left] {$y$}; \coordinate[label=above right:$w$] (w1) at (0,1); \coordinate (w2) at (4,0); \coordinate (z1) at (4,2); \coordinate (z2) at (1,0); \coordinate (x1) at (0,0); \coordinate (x2) at (4,4); \draw (w1) -- (w2) (z1) -- (z2); \draw [dashed] (x1) -- (x2); \foreach \j in {1,2} { \foreach \k in {w,z} { \fill (\k\j) circle (1.5pt); } } \fill (x1) circle (1.5pt); \node [below ] at (0,0) {$O$}; \node [above left=0.5em] at (4,1) {$z$}; \node [left] at (0,1) {$1$}; \node [below] at (1,0) {$1$}; \end{tikzpicture} \caption*{Figure 4} \end{figure} (a.2) $\mathbf{f}_{-1,1}(z)$ contains $\alphapha X$ and is not a monomial. We first deal with (a.1). Let $\mathbf{f}_{r,s}(z)=f$ and $\mathbf{f}_{r,s}(w)=g$. By the assumption $[z,w]=1$, one has $E(g)\nsubseteq V_-$. While $E(f)\subseteq V_-$, $\{f, g\}\ne 0$ by Corollary \ref{102}. One has $\{f, g\}\ne 1$ by Corollary \ref{100}. So $[z,w]\ne 1$ and this case cannot happen. Then we deal with (a.2). Write $z=z_0+z_1, w=w_0+w_1$, where $z_0$, $w_0$ are the respective leading component of $z,w$. Assume that $w_0\in D_i$. As by assumption $[z,w]=1$, one has $i>0$. Then $[z_0,w_0]\ne 0$ by Theorem \ref{101}, thus $[z_0,w_0]$ is the leading component of $[z,w]$ and $[z_0,w_0]=1$. By \eqref{43} of Lemma \ref{2}, one has $z_0=\alpha p$ with $\alpha\ne 0$, which contradicts to the assumption that $\mathbf{f}_{-1,1}(z)$ (which equals $\mathbf{f}_{-1,1}(z_0)=\alpha X$) is not a monomial. Thus $[z,w]\ne 1$. So we have shown that if $z\in D_{\le 0}$ and $[z,w]=1$ then either $z$ or $w$ is a monomial, which is in the situation of (i) and $z,w$ generate $A_1$. \end {proof} \begin {equation}gin{rem}\lambdabel{75} Assume that $z\in D_{\le 0}$. If there exists some $w\in A_1$ with $[z,w]=1$, then in the above proof one knows that \[ z=\lambdambda p+\gamma, w=\lambdambda^{-1} q+f(p), \lambda\ne 0, \ \ f\in K[X].\] \end{rem} The following result gives an equivalent formulation of DC. \begin {equation}gin{prop} DC holds if and only if for any $z,w\in A_1$ with $[z,w]=1$, there exists some $\psi\in \text{Aut}(A_1)$ such that $\psi(z)\in D_{\le 0}$. \end{prop} \begin {proof} The 'if' part is obvious. So we only need to prove the 'only if' part. Assume that DC holds. Let $z,w\in A_1$ such that $[z,w]=1$. Then $z$ and $w$ generate $A_1$. There exists a unique $K$-algebra homomorphism $\psi:A_1\rightarrow A_1$ with $\psi(z)=p,\psi(w)=q$. It is clear that $\psi$ is surjective. The kernel of $\psi$ is an ideal of the simple algebra $A_1$, thus must be the zero ideal. So $\psi\in \text{Aut}(A_1)$ and $\psi(z)\in D_{\le 0}$. \end {proof} If $z,w\in A_1$ with $[z,w]=1$, then $z,w$ are both nilpotent in the sense of \cite{d}. It is known that a nilpotent element may not be conjugate to some element in $D_{\ge 0}$ by an automorphism in $A_1$ even when $K$ is algebraically closed; see the remark after Theorem 4.2 of \cite{jo}. But it is not known if any $z\in A_1$ satisfying $[z,w]=1$ for some $w\in A_1$ is conjugate to some element in $D_{\ge 0}$. \begin {equation}gin {coro} Assume that $z,w\in A_1$ with $[z,w]=1$. If $z\in D_{\ge -s}, s>0$, $z=z_{-s}+z'$ with $z_{-s}\in D_{-s}\setminus\{0\}$, $z'\in D_{> -s}$, and $C(z_{-s})=K[z_{-s}]$, then $z$ and $w$ generate $A_1$. \end {coro} \begin {proof} If $w\in D_{\ge 0}$, then the result holds by Theorem \ref{33}. So assume that $w\notin D_{\ge 0}$. Let $w=w_{-k}+w'$ with $k>0$, $w_{-k}\in D_{-k}\setminus\{0\}$ and $w'\in D_{> -k}$. By $[z,w]=1$ one has $[z_{-s}, w_{-k}]=0$, thus $w_{-k}\in C(z_{-s})=K[z_{-s}]$. Then $s|k$ and $w_{-k}=\alpha z_{-s}^d$, where $d=k/s$ and $\alpha\ne 0$. Let $w^{(1)}=w-\alpha z^d$ and $\mathbf{v}_{-1,1}(w^{(1)})=m$. Then $[z,w^{(1)}]=1$ and $m>-k$. If $m<0$, we continue this procedure, until we get some $w^{(n)}=w-f(z)\in D_{\ge 0}$, where $f\in K[X]$ and $n\ge 1$. It is clear that $[z,w^{(n)}]=1$. As $w^{(n)}\in D_{\ge 0}$ and $[z,w^{(n)}]=1$, $z$ and $w^{(n)}$ generate $A_1$ by Theorem \ref{33}. So $z$ and $w$ also generate $A_1$. \end {proof} \begin {equation}gin {coro}\lambdabel{98} Assume that $z,w\in A_1$ with $[z,w]=1$. If $z\in D_{\ge -1}$, then $z$ and $w$ generate $A_1$. \end {coro} \begin {proof} By Theorem \ref{33}, we only need to prove it in the case \[z=z_{-1}+z',\ z_{-1}\in D_{-1}\setminus\{0\},\ z'\in D_{\ge 0}.\] But now $C(z_{-1})=K[z_{-1}]$ by Corollary \ref{51}, so the result follows from the above corollary. \end {proof} \begin {equation}gin{prop}\lambdabel{93} Assume that $z,w\in A_1$, $[z,w]=1$, and $\{\mathbf{f}_{\rho,\sigma}(z), \mathbf{f}_{\rho,\sigma}(w)\}=1$ for some $(\rho,\sigma)\in \mathbb{R}^2$ with $\rho+\sigma>0$. Then $z$ and $w$ generate $A_1$. \end{prop} \begin {proof} Let \[G_1\rightarrow \widetilde{G}_1, \psi\mapsto \tilde{\psi}\] be the isomorphism in \eqref{96}. Set \[f=\mathbf{f}_{\rho,\sigma}(z), g=\mathbf{f}_{\rho,\sigma}(w).\] Then $\{f,g\}=1$. Assume that $(z^*,w^*)=(\psi(z),\psi(w))$ for some $\psi\in G_1$. Then $[z^*,w^*]=1$. Let \[f^*= \tilde{\psi}(f), g^*= \tilde{\psi}(g). \] Then $\{f^*,g^*\}=1$. Let \[(\rho^*,\sigma^*)=\left\{ \begin {equation}gin{array}{ll} (\rho,\sigma) & \text { if } \psi\in G_0; \\ (\sigma,\rho) & \text { if } \psi\in G_0\psi_0. \end{array} \right.\] Then \[f^*=\mathbf{f}_{\rho^*,\sigma^*}(z^*), g^*=\mathbf{f}_{\rho^*,\sigma^*}(w^*).\] One has the following commutative diagram: \[\begin {equation}gin{CD} (z,w) @>\mathbf{f}_{\rho,\sigma} >> (f,g)\\ @V \psi VV @V \tilde{\psi} V V\\ (z^*,w^*) @>\mathbf{f}_{\rho^*,\sigma^*}>> (f^*,g^*). \end{CD}\] Recall that $\eta(z,w)=(w,-z)$. By $\{\mathbf{f}_{\rho,\sigma}(z), \mathbf{f}_{\rho,\sigma}(w)\}=1$, one has $\{\mathbf{f}_{\rho,\sigma}(w), \mathbf{f}_{\rho,\sigma}(-z)\}=1$. It is clear that $z, w$ generate $A_1$ if and only if $z^*, w^*$ generate $A_1$, if and only if $w,-z$ generate $A_1$. As $U$ is generated by $G_1$ and $\eta$, we only need to prove the result for suitable representative in the $U$-orbit of $(z,w)$, which corresponds to suitable representative in the $\widetilde{U}$-orbit of $(f,g)$. By Proposition \ref{30}, up to the action of $\widetilde{U}$, $(f,g)$ will be in one of the following 4 cases. (1) $(f,g)=(X,Y)$. Assume $\rho\ge 0,\sigma\ge 0, \rho+\sigma>0$. If $\rho\le \sigma$ then $\mathbf{v}_{0,1}(z)\le 1$; If $\sigma\le \rho$ then $\mathbf{v}_{1,0}(w)\le 1$. By Proposition \ref{3} $z, w$ generate $A_1$. Assume $\rho<0,\rho+\sigma>0$. Then $z\in D_{\le 0}$, and $z, w$ generate $A_1$ by Theorem \ref{33}. The case $\sigma<0,\rho+\sigma>0$ are treated similarly. (2) $(f,g)=(\alphapha X+\begin {equation}ta Y, \gamma X+\delta Y), \alphapha,\delta,\begin {equation}ta,\gamma\in K^*, \alphapha\delta-\begin {equation}ta\gamma=1$. Now $(\rho,\sigma)=(1,1)$, $z=\alphapha p+\begin {equation}ta q+\lambdambda, w=\gamma p+\delta q+\mu$ and $z, w$ generate $A_1$. (3) $(f,g)=(X+\lambdambda Y^n, Y), \lambda\ne 0, and\ n\ge 1$. Now $(\rho,\sigma)=(n,1)$, $z=p+\lambda q^n+h(q)$ for some $h\in K[X]$ with $deg(h)<n$, and $w=q+\mu$. Then $z, w$ generate $A_1$. (4) $(f,g)=(X+\alpha,Y), \alpha\ne 0$. Now $(\rho,\sigma)=(0,1)$, $z= p+\alpha, w=q+h(p)$ for some $h\in K[X]$, and $z, w$ generate $A_1$. \end {proof} \begin {equation}gin {theorem}\lambdabel{56} Assume that $z\in A_1\setminus\{0\}$ and $f=\mathbf{f}_{\rho,\sigma}(z)$ for some $(\rho,\sigma)\in \mathbb{R}^2$ with $\rho+\sigma>0$. If $C(f)=K[f]$ and $[z,w]=1$ for some $w\in A_1$, then $z$ and $w$ generate $A_1$. \end {theorem} \begin {proof} As $C(f)=K[f]$, by Corollary \ref{90}, $f$ is not a nonzero multiple of some proper power of a nonconstant polynomial. If $\rho,\sigma$ are linearly dependent over $\mathbb{Q}$, then replace $(\rho,\sigma)$ by some positive multiple of itself so that $(\rho,\sigma) \in \mathbb{Z}^2$ with $\rho+\sigma>0$, and $\mathbf{f}_{\rho,\sigma}(z)$ is still $f$. If $\rho,\sigma$ are linearly independent over $\mathbb{Q}$, then $f$ is a monomial. We can find some $(\rho',\sigma')$ in a sufficiently small neighbourhood of $(\rho,\sigma)$ in $\mathbb{R}^2$ such that $\rho',\sigma'$ are linearly dependent over $\mathbb{Q}$, $\mathbf{f}_{\rho',\sigma'}(z)=f$, and $\rho'+\sigma'>0$. Replace $(\rho,\sigma)$ by a suitable positive multiple of $(\rho',\sigma')$ so that $(\rho,\sigma) \in \mathbb{Z}^2$ with $\rho+\sigma>0$, and $\mathbf{f}_{\rho,\sigma}(z)$ is still $f$. After the adjustment as above (if necessary), one can always assume that $(\rho,\sigma)\in \mathbb{Z}^2$ with $\rho+\sigma>0$, $\mathbf{f}_{\rho,\sigma}(z)=f$ is not a nonzero multiple of some proper power of a nonconstant polynomial. By assumption, $[z,w]=1$ for some $w\in A_1$. Set $ a=\mathbf{v}_{\rho,\sigma}(z)\in \mathbb{Z}$. We now prove that if $a\le 0$ then $z$ and $w$ generate $A_1$. Assume that $a\le 0$. If $\rho,\sigma>0$, then $z$ is a constant, which contradicts to $[z,w]=1$; if $\rho=0,\sigma>0$, then $\mathbf{v}_{0,\sigma}(z)=0$ and $z, w$ generate $A_1$ by Proposition \ref{3}; if $\rho<0,\rho+\sigma>0$, then $a\le 0$ implies that $z\in D_{\le 0}$, thus $z$ and $w$ generate $A_1$ by Theorem \ref{33}. The proof of the remaining cases is similar to the above cases and is omitted. We will assume that $a>0$ from now on. Set $g=\mathbf{f}_{\rho,\sigma}(w)$ and $b=\mathbf{v}_{\rho,\sigma}(w)$. As $[z,w]=1$, $\{f,g\}=0$ or 1. We start the following process.\\[2mm] Step 1: Judge whether $b\le 0$. If $b\le 0$ then for the same reason as above $z$ and $w$ generate $A_1$, and stop the process; If $b>0$, go to Step 2.\\[2mm] Step 2: Judge whether $\{f,g\}=1$. If $\{f,g\}=1$, then by Proposition \ref{93}, $z$ and $w$ generate $A_1$. Stop the process. If $\{f,g\}=0$, go to the next step.\\[2mm] Step 3: As $\{f,g\}=0$, one has $f^b=\lambda g^a, \lambda\ne 0$. Let $d=gcd(a,b), a_0=a/d, b_0=b/d$. By Proposition \ref{27}, there exists some $h\in K[X,Y], f=\gamma h^{a_0}, g=\mu h^{b_0}$, $\gamma\mu\ne 0$. By hypothesis, one must have $a_0=1$, thus $g=\begin {equation}ta f^{b_0}$ for some $\begin {equation}ta\ne 0$. Let $w_1=w-\begin {equation}ta z^{b_0}$. Then $[z,w_1]=1$, and, $z,w$ generate $A_1$ if and only if $z,w_1$ generate $A_1$. It is clear that $\mathbf{v}_{\rho,\sigma}(w_1)<\mathbf{v}_{\rho,\sigma}(w)$. Then go back to Step 1 and repeat the process for $w_1$. If the process does not stop at Step 1 and Step 2, then we get some $w_2$ at Step 3. Repeat this process and we get $w_1, w_2,\cdots, w_i,\cdots$. As $\mathbf{v}_{\rho,\sigma}(w_{i+1})<\mathbf{v}_{\rho,\sigma}(w_i)$ for all $i$, the process will terminate at Step 1 or Step 2 after finite many steps. So the proof is concluded. \end {proof} \begin {equation}gin {coro}\lambdabel{62} Assume that $z\in A_1\setminus\{0\}$ and $f=\mathbf{f}_{\rho,\sigma}(z)$ for some $(\rho,\sigma)\in \mathbb{R}^2$ with $\rho+\sigma>0$. (1) If $|E(f)|=2$, then $z$ and $w$ generate $A_1$. (2) Assume that $|E(f)|=1$ and $f=\lambda X^iY^j, \lambda\ne 0$. If $i\ge 1, j\ge 1$, and $gcd(i,j)=1$, then $z$ and $w$ generate $A_1$. \end {coro} \begin {proof} (1) We show that if $|E(f)|=2$, then $f$ is not a nonzero multiple of some proper power of a polynomial, thus $C(f)=K[f]$ and the result follows from the above theorem. Assume the contrary that $f=\lambda h^m$, where $\lambda\ne 0, h\in K[X,Y],$ and $m$ is an integer $\ge 2$. If $h$ is a monomial then $|E(f)|=1$; if $h$ has at least 2 terms, then $h^m$ with $m\ge 2$ will have at least 3 terms, which also contradicts to $|E(f)|=2$. (2) It is clear that in this case $f$ cannot be written as $\lambda h^m$, where $\lambda\ne 0, h\in K[X,Y],$ and $m$ is an integer $>1$. So the result follows from the above theorem. \end {proof} The main result of \cite{bl} says that if both $z$ and $w$ are sums of not more than 2 homogeneous elements, then $z$ and $w$ generate $A_1$. One can generalize it as follows. \begin {equation}gin {theorem}\lambdabel{97} Assume that $z,w\in A_1$ and $[z,w]=1$. Assume that $z$ is a sum of not more than 2 homogeneous elements of $A_1$, then $z$ and $w$ generate $A_1$. \end {theorem} \begin {proof} If $z$ is a homogeneous element, then the result follows from Lemma \ref{2}. Then we only need to consider the case that $z$ is a sum of 2 homogeneous elements. Write $z=z_1+z_2$ with $z_1,z_2$ homogeneous. It is clear that there exists some unique $(r,s)\in \mathbb{Z}^2$ with $r+s>0$ and $gcd(r,s)=1$, such that $f=\mathbf{f}_{r,s}(z)=\mathbf{f}_{r,s}(z_1)+\mathbf{f}_{r,s}(z_2)$ satisfies $|E(f)|=2$. See Figure 5. So the result follows from Corollary \ref{62} (1). \end {proof} \begin {equation}gin{figure} [hb] \centering \begin {equation}gin{tikzpicture} [scale=0.7] \tikzstyle{every node}=[scale=0.7] \draw [->] (0,0) -- (6,0) node [below] {$x$}; \draw [->] (0,0) -- (0,6) node [left] {$y$}; \coordinate (w1) at (3,0); \coordinate (w2) at (4,1); \coordinate (z1) at (0,1); \coordinate (z2) at (1,2); \coordinate (x1) at (4,5); \coordinate (x2) at (3,2); \draw (w1) -- node [below] {$z_2$} (w2) (z1) -- node [ above] {$z_1$} (z2); \draw [<-] (x1) -- node [above left] {$ \left(r,s\right)$ } (x2); \foreach \j in {1,2} { \foreach \k in {w,z} { \fill (\k\j) circle (1.5pt); } } \fill (x2) circle (1.5pt); \fill (0,0) circle (1.5pt); \draw [dashed] (0,2.33)--(7,0); \node [below] at (0,0) {$O$}; \end{tikzpicture} \caption*{Figure 5} \end{figure} \end{document}
\begin{document} \title[Regenerative permutations]{Regenerative random permutations of integers} \author[Jim Pitman]{{Jim} Pitman} \address{Statistics department, University of California, Berkeley. Email: } \email{[email protected]} \author[Wenpin Tang]{{Wenpin} Tang} \address{Statistics department, University of California, Berkeley. Email: } \email{[email protected]} \date{\today} \begin{abstract} Motivated by recent studies of large Mallows$(q)$ permutations, we propose a class of random permutations of $\mathbb{N}_{+}$ and of $\mathbb{Z}$, called {\em regenerative permutations}. Many previous results of the limiting Mallows$(q)$ permutations are recovered and extended. Three special examples: blocked permutations, $p$-shifted permutations and $p$-biased permutations are studied. \end{abstract} \maketitle \textit{Key words :} Bernoulli sieve, cycle structure, indecomposable permutations, Mallows permutations, regenerative processes, renewal processes, size biasing. \textit{AMS 2010 Mathematics Subject Classification: } 05A05, 60C05, 60K05. \setcounter{tocdepth}{1} \tableofcontents \setstcolor{red} \section{Introduction and main results} Random permutations have been extensively studied in combinatorics and probability theory. They have a variety of applications including: \begin{itemize} \item statistical theory, e.g. Fisher-Pitman permutation test \cite{Fisher,Pitman1}, ranked data analysis \cite{Critchlow,DiaconisRank}; \item population genetics, e.g. Ewens' sampling formula \cite{Ewens72} for the distribution of allele frequencies in a population with neutral selection; \item quantum physics, e.g. spatial random permutations \cite{U08,BU09} arising from the Feynman representation of interacting Bose gas; \item computer science, e.g. data streaming algorithms \cite{Muth,HLMV}, interleaver designs for channel coding \cite{DP,BM}. \end{itemize} Interesting mathematical problems are $(i)$ understanding the asymptotic behavior of large random permutations, and $(ii)$ generating a sequence of consistent random permutations. Over the past few decades, considerable progress has been made in these two directions: \begin{enumerate}[$(i)$] \item Shepp and Lloyd \cite{SL}, Vershik and Shmidt \cite{VS1,VS2} studied the distribution of cycles in a large uniform random permutation. The study was extended by Diaconis, McGrath and Pitman \cite{DMP}, Lalley \cite{Lalley} for a class of large non-uniform permutations. Hammersley \cite{Hammersley} first considered the longest increasing subsequences in a large uniform random permutation. The constant in the law of large numbers was proved by Logan and Shepp \cite{LS}, Kerov and Vershik \cite{KV} via representation theory, and by Aldous and Diaconis \cite{AD}, Sepp\"al\"ainen \cite{Sepp} using probabilistic arguments. The Tracy-Widom limit was proved by Baik, Deift and Johansson \cite{BDJ}. See also Romik \cite{Romik}. Recently, limit theorems for large Mallows permutations have been considered by Mueller and Starr \cite{MS}, Bhatnagar and Peled \cite{BP}, Basu and Bhatnagar \cite{BB}, Gladkich and Peled \cite{GP}. \item Pitman \cite{Pitman95,Pitmanbook} provided a sequential construction of random permutations of $[n]$ with consistent cycle structures. This is known as the {\em Chinese restaurant process}, or {\em virtual permutations} \cite{KOV93,KOV} in the Russian literature. A description of the Chinese restaurant process in terms of records was given by Kerov \cite{Kerov}, Kerov and Tsilevich \cite{KS95}. See also Pitman \cite{Pitman17}. Various families of consistent random permutations have been devised by Gnedin and Olshanski \cite{GO06,GO09,GO12}, Gnedin \cite{G11}, Gnedin and Gorin \cite{GG15,GG16} in a sequential way, and by Fichtner \cite{Fichtner}, Betz and Ueltschi \cite{BU09}, Biskup and Richthammer \cite{BR} in a Gibbsian way. \end{enumerate} The inspiration for this article is a series of recent studies of random permutations of countably infinite sets by Gnedin and Olshanski \cite{GO09,GO12}, Basu and Bhatnagar \cite{BB}, Gladkich and Peled \cite{GP}. Here a permutation of a countably infinite set is a bijection of that set. Typically, these models are obtained as limits in distribution, as $n \to \infty$, of some sequence of random permutations $\Pi^{[n]}$, with some given distributions $Q_n$ on the set $\mathfrak{S}_n$ of permutations of the finite set $[n]:= \{1, \ldots, n\}$. The distribution of a limiting bijection $\Pi: \mathbb{N}_{+} \to \mathbb{N}_{+}$ is then defined by \begin{equation} \label{limperm} \mathbb{P}( \Pi_i = n_i, 1 \le i \le k ) := \lim_{n \to \infty} \mathbb{P} ( \Pi^{[n]}_i = n_i, 1 \le i \le k), \end{equation} for every sequence of $k$ distinct values $n_i \in \mathbb{N}_{+}:=\{1,2,\ldots\}$, provided these limits exist and sum to $1$ over all choices of $(n_i, 1 \le i \le k ) \in \mathbb{N}_{+}^k$. It is easy to see that for $Q_n = U_n$ the uniform distribution on $\mathfrak{S}_n$, the limits in \eqref{limperm} are identically equal to $0$, so this program fails to produce a limiting permutation of $\mathbb{N}_{+}$. However, it was shown by Gnedin and Olshanski \cite[Proposition A.1]{GO09} that for every $0 < q < 1$ this program is successful for $Q_n = M_{n,q}$, the {\em Mallows$(q)$ distribution on $\mathfrak{S}_n$} \cite{Mallows}, which assigns each permutation $\pi$ of $[n]$ probability \begin{equation} \label{mallowsn} \mathbb{P}( \Pi^{[n]} = \pi) = M_{n,q}(\pi):= Z_{n,q}^{-1} \,\, q^{\inv(\pi)} \qquad \mbox{for}~ \pi \in \mathfrak{S}_n, \end{equation} where $\inv (\pi):= \{ (i,j): 1 \le i < j \le n, \pi(i) > \pi(j) \}$ is the {\em number of inversions} of $\pi$, and the normalization constant $Z_{n,q}$ is well known to be the {\em $q$-factorial function} \begin{equation} \label{qfac} Z_{n,q} = \prod_{j=1}^n \sum_{i = 1}^j q^{i-1} = (1-q)^{-n} \prod_{j = 1 }^n ( 1 - q^j) \qquad \mbox{for}~0 < q <1. \end{equation} See Diaconis and Ram \cite[Section 2.e]{DR} for algebraic properties of Mallows($q$) distributions, and additional references. Note that it is possible to define the projective limit for both $Q_n = U_n$ and $Q_n = M_{n,q}$: \begin{itemize} \item For $Q_n = U_n$, the consistency of the family $(U_n;~n \geq 1)$ with respect to the projection is closely related to the {\em Fisher-Yates-Durstenfeld-Knuth shuffle} \cite[Section 3.4.2]{Knuth2}. The projective limit is the Chinese restaurant process with $\theta = 1$. \item For $Q_n = M_{n,q}$, the fact that $(M_{n,q};~n \geq 1)$ are consistent relative to the projection is a consequence of the {\em Lehmer code} \cite[Section 5.1.1]{Knuth3}. Moreover, Gnedin and Olshanski \cite[Proposition A.6]{GO09} proved that the projective limit coincides with the limit in distribution \eqref{limperm}. \end{itemize} Gnedin and Olshanski \cite{GO09} gave a number of other characterizations of the limiting distribution of $\Pi$ so obtained for each $0 < q < 1$. They continued in \cite{GO12} to show that there exists a two-sided random permutation $\Pi^*$ of $\mathbb{Z}$, which is a similar limit in distribution of Mallows$(q)$ permutations of $[n]$, shifted to act on intervals of integers $[1 - a_n, n - a_n ]$, for any sequence of integers $a_n$ with both $a_n \to \infty$ and $n - a_n \to \infty$ as $n \to \infty$. They also showed that for each $0 < q < 1$ the process $\Pi^*$ is {\em stationary}, meaning that the {\em process of displacements} $(D^{*}_z:=\Pi^*_z-z;~z \in \mathbb{Z})$ is a stationary process: \begin{equation} (D^*_z;~z \in \mathbb{Z}) \stackrel{(d)}{=} (D^*_{a+z};~z \in \mathbb{Z}) \qquad \mbox{for}~a \in \mathbb{Z}. \end{equation} These results were further extended by Basu and Bhatnagar \cite{BB}, Gladkich and Peled \cite{GP}, who established a number of properties of the limiting Mallows$(q)$ permutations of $\mathbb{N}_{+}$ and of $\mathbb{Z}$, as well as provided many finer asymptotic results regarding the behavior of various functionals of Mallows$(q)$ permutations of $[n]$, including cycle counts, and longest increasing subsequences, in various finer limit regimes with $q$ approaching either $0$ or $1$ as $n \to \infty$. The analysis of limiting Mallows$(q)$ permutations $\Pi$ of $\mathbb{N_{+}}$ by these authors relies on a key regenerative property of these permutations, which is generalized in this paper to provide companion results for a much larger class of random permutations of $\mathbb{N}_{+}$ and of $\mathbb{Z}$. For a permutation $\Pi$ of a countably infinite set $I$, however it may be constructed, there is the basic question: \begin{equation} \parbox{30em}{ $\bullet$ {\em \mbox{ is every orbit of } $\Pi$ \mbox{ finite ?}} } \end{equation} If so, say {\em $\Pi$ has only finite cycles}. For $I= \mathbb{N}_{+}$ or $\mathbb{Z}$, one way to show $\Pi$ has only finite cycles, and to gain some control on the distribution of cycle lengths, is to establish the stronger property: \begin{equation} \parbox{30em}{ $\bullet$ {\em \mbox{ every component of } $\Pi$ \mbox{ has finite length.} } } \end{equation} Here we need some vocabulary. Let $I \subseteq \mathbb{Z}$ be an interval of integers, and $\Pi : I \rightarrow I$ be a permutation of $I$. Call $n \in I$ a {\em splitting time} of $\Pi$, or say that $\Pi$ {\em splits at }$n$, if $\Pi$ maps $(-\infty, n]$ red onto itself, or equivalently, $\Pi$ maps $I \cap [n+1 , \infty)$ onto itself. The set of splitting times of $\Pi$, called the {\em connectivity set} by Stanley \cite{Stanley05}, is the collection of finite right endpoints of some finite or infinite family of {\em components of $\Pi$}, say $\{ I_j \}$. So $\Pi$ acts on each of its components $I_j$ as an {\em indecomposable permutation} of $I_j$, meaning that $\Pi$ does not act as a permutation on any proper subinterval of $I_j$. These components $I_j$ form a partition of $I$, which is coarser than the partition by cycles of $\Pi$. For example, the permutation $\pi = (1)(2,4)(3) \in \mathfrak{S}_4$ induces the partition by components $[1] [2,3,4]$. A block of $\Pi$ is a component of $\Pi$, or a union of adjacent components of $\Pi$. For any {\em block} $J$ of $\Pi$ with $\#J = n$ (resp. $\# J = \infty$), the {\em reduced block of $\Pi$ on $J$} is the permutation of $[n]$ (resp. $\mathbb{N}_{+}$) defined via conjugation of $\Pi$ by the shift from $J$ to $[n]$ (resp. $\mathbb{N}_{+}$). For any permutation $\Pi$ of $\mathbb{N}_{+}$, there are two ways to express the event $\{\Pi \mbox{ splits at } n\}$ as an intersection of $n$ events: $$ \{ \Pi \mbox{ splits at } n \} = \bigcap_{i=1}^n \{\Pi_i \le n \} = \bigcap_{i=1}^n \{\Pi^{-1}_i \le n \}. $$ An alternative way of writing this event is: $$ \{ \Pi \mbox{ splits at } n \} = \bigcap_{i=1}^n \left\{ \Pi^{-1}_i < \min_{j > n} \Pi^{-1}_j \right\} . $$ For if $\Pi$ splits at $n$, then $\Pi^{-1}_i < n+1 = \min_{j > n} \Pi^{-1}_j$ for every $1 \le i \le n$. Conversely, if $\min_{j > n} \Pi^{-1}_j = m+1$ say, and $\Pi^{-1}_i < m+1$ for every $1 \le i \le n$, then the image of $[n]$ via $\Pi^{-1}$ is equal to $[m]$, so $m = n$ and $\Pi^{-1}_i \le n$ for every $1 \le i \le n$. Let \begin{equation} \label{Ani} A_{n,i}:= \left\{ \min_{j > n} \Pi^{-1}_j < \Pi^{-1}_i \right\}, \end{equation} be the complement of the $i^{th}$ event in the above intersection. Then by the principle of inclusion-exclusion \begin{equation} \label{ie} \mathbb{P}( \Pi \mbox{ splits at } n ) = 1 + \sum_{j = 1}^n (-1)^j \Sigma_{n,j}, \end{equation} where \begin{equation} \label{defsigma} \Sigma_{n,j}: = \sum_{1 \leq i_1 < \cdots <i_j \leq n} \mathbb{P}\left(\bigcap_{k = 1}^j A_{n,i_k}\right). \end{equation} So there are the Bonferroni bounds \begin{equation*} \mathbb{P}( \Pi \mbox{ splits at } n ) \ge 1 - \Sigma_{n,1}, \quad \mathbb{P}( \Pi \mbox{ splits at } n ) \le 1 - \Sigma_{n,1} + \Sigma_{n,2}, \end{equation*} and so on. Moreover, each of the intersections of the $A_{n,i}$ is an event of the form $$ F_{B,C}:= \left\{\min_{j \in B} \Pi^{-1}_j < \min_{h \in C} \Pi^{-1}_h \right\}, $$ for instance $A_{n,i} A_{n,j} = F_{B,C}$ for $F = \{n,n+1, \ldots \}$ and $C = \{i,j\}$. An approach to the problem of whether $\Pi$ has almost surely finite component lengths for a number of interesting models, including the limiting Mallows$(q)$ model, is provided by the following structure. Let $$\mathbb{N}_{+}: = \{1,2, \ldots\} \quad \mbox{and} \quad \mathbb{N}_0:= \{0,1, 2, \ldots \}.$$ If a permutation $\Pi$ of $\mathbb{N}_{+}$ splits at $n$, let $\Pi^n$ be the {\em residual permutation} of $\mathbb{N}_{+}$ defined by conjugating the action of $\Pi$ on $\mathbb{N}_{+} \setminus [n]$ by a shift back to $\mathbb{N}_{+}$: $$\Pi^n_i:= \Pi_{n+i} - n \qquad \mbox{for}~i \in \mathbb{N}_{+}.$$ Call $(T_n;~n \geq 0)$ a {\em delayed renewal process} if $$T_n := T_0 + Y_1 + \cdots + Y_n,$$ with $T_0 \in \mathbb{N}_0, Y_1, Y_2, \ldots \in \mathbb{N}_{+} \cup \{\infty\}$ independent, and the $Y_i$ identically distributed, allowing also the transient case with $\mathbb{P}(Y_1 < \infty) < 1$. When $T_0: = 0$, call $(T_n;~n \geq 0)$ a {\em renewal process with zero delay}. For $n > 0$, let $$R_n:= \sum_{k=0}^\infty 1(T_k = n),$$ be the {\em renewal indicator} at time $n$. The definition below is tailored to the general theory of regenerative processes presented by Asmussen \cite[Chapter VI]{Asmussen}. \begin{definition} \label{regendef} {\em ~ \begin{enumerate}[(1)] \item Call a random permutation of $\Pi$ of $\mathbb{N}_{+}$ {\em regenerative with respect to the delayed renewal process $(T_n; \, n \ge 0)$} if every $T_i$ is a splitting time of $\Pi$, and for each $n >0$ such that $\mathbb{P}(R_n=1) > 0$, conditionally given a renewal at $n$, \begin{enumerate}[$(i).$] \item there is the equality in distribution $$ ( \Pi^n, R_{n+1}, R_{n+2}, \ldots) \stackrel{(d)}{=} (\Pi^0, R_{1}^0, R_{2}^0, \ldots) $$ between the joint distribution of $\Pi^n$ with the residual renewal indicators $(R_{n+1},$\\ $ R_{n+2}, \ldots)$, and the joint distribution of some random permutation $\Pi^0$ of $\mathbb{N}_{+}$ with renewal indicators $(R_{1}^0, R_{2}^0, \ldots)$ with zero delay; \item the initial segment $(R_0, R_1, \ldots R_n)$ of the delayed renewal process is independent of $( \Pi^n, R_{n+1}, R_{n+2}, \ldots)$. \end{enumerate} \item Call a random permutation of $\Pi$ of $\mathbb{N}_{+}$ {\em regenerative} if $\Pi$ is regenerative with respect to some renewal process $(T_n; \, n \ge 0)$; \item Call a random permutation of $\Pi$ of $\mathbb{N}_{+}$ {\em strictly regenerative} if $\Pi$ is regenerative with respect to its own splitting times. \end{enumerate} } \end{definition} In Definition \ref{regendef}, we do not require the independence of the pre-renewal and the post-renewal permutations. This is called the {\em wide-sense regeneration} \cite[Chapter 10]{Thbook}, while assuming further independence refers to the {\em regeneration in the classical sense}. The formulation of Definition \ref{regendef} was motivated by its application to three particular models of random permutations of $\mathbb{N}_{+}$, introduced in the next three definitions. Each of these models is parameterized by a discrete probability distribution on $\mathbb{N}_{+}$, say $p = (p_1,p_2, \ldots)$. These models are close in spirit to the similarly parameterized models of {\em $p$-mappings} and {\em $p$-trees} studied in \cite{AP02, AMP}. See also \cite{GP05,GHP,Gnedin10} for closely related ideas of regeneration in random combinatorial structures. General properties of a regenerative random permutation $\Pi$ of $\mathbb{N}_{+}$ with zero delay can be read from the standard theory of regenerative processes \cite[Chapter XIII]{Feller}. Let $u_0:=0$, and \begin{align} u_n & := \mathbb{P}( \Pi \mbox{ regenerates at } n ), \\ f_n & := \mathbb{P}( \Pi \mbox{ regenerates for the first time at } n ). \end{align} If a random permutation $\Pi$ of $\mathbb{N}_{+}$ is regenerative but not strictly regenerative, the renewal process $(T_n; \, n \ge 0)$ is not uniquely associated with $\Pi$. So the sequences $u_n$ and $f_n$ are not necessarily intrinsic to $\Pi$ but to $(T_n; \, n \ge 0)$. Each of these sequences determines the other by the recursion \begin{equation} \label{ufrecursion} u_n = f_1 u_{n-1} + f_2 u_{n-2} + \cdots +f_n u_0 \quad \mbox{for all } n > 0, \end{equation} which may be expressed in terms of the generating functions $U(z) : = \sum_{n=0}^{\infty} u_n z^n$ and $F(z):=\sum_{n=1}^{\infty} f_n z^n$ as \begin{equation} \label{UFrelation} U(z) = (1-F(z))^{-1}. \end{equation} According to the discrete renewal theorem, either \begin{enumerate}[$(i)$] \item ({\em transient case}) $\sum_{n=1}^\infty u_n < \infty$, when $\mathbb{P}(Y_1 < \infty ) < 1$, and $\Pi$ has only finitely many regenerations with probability one, or \item ({\em recurrent case}) $\sum_{n=1}^\infty u_n = \infty$, when $\mathbb{P}(Y_1 < \infty) = 1$, and with probability one $\Pi$ has infinitely many regenerations, hence only finite components, and only finite cycles. \end{enumerate} Here is a simple way of constructing regenerative random permutations of $\mathbb{N}_{+}$ in the classical sense: \begin{definition} \label{regenblock} {\em For a probability distribution $p:= (p_1, p_2, \ldots)$ on $\mathbb{N}_{+}$, and $Q_n$ for each $n \in \mathbb{N}_{+}$ a probability distribution on $\mathfrak{S}_n$, call a random permutation $\Pi$ of $\mathbb{N}_{+}$ {\em recurrent regenerative with block length distribution $p$ and blocks governed by $(Q_n;~n \ge 1)$}, if $\Pi$ is a concatenation of an infinite sequence {\tt Block}$_i$, $i \geq 0$ such that \begin{enumerate}[$(i)$] \item the lengths $Y_i$ of {\tt Block}$_i$, $i \geq 1$ are independent and identically distributed (i.i.d.) with common distribution $p$, and are independent of the length $T_0$ of {\tt Block}$_0$ which is finite almost surely; \item conditionally given the block lengths, say $Y_i= n_i$ for $i = 1, 2, \ldots$, the reduced blocks of $\Pi$ are independent random permutations of $[n_i]$ with distributions $Q_{n_i}$. \end{enumerate} A transient regenerative permutation $\Pi$ of $\mathbb{N}_{+}$ can be constructed as above up to time $T_N$ for $T_n$ the sum of $n$ i.i.d. variables $Y_i$ with distribution $\mathbb{P}(Y_1 = n ) = p_n/\sum_{k} p_k$, and $N$ has geometric $(1 - \sum_{k} p_k)$ distribution on $\mathbb{N}_0$, independent of the sequence $(Y_i; \, i \ge 1)$. Then $T_N$ is the time of the last finite split point of $\Pi$, and given $T_N = n$ and the restriction of $\Pi$ on $[n]$ so created, the restriction of $\Pi$ on $[n,\infty)$, shifted back to be a permutation of $\mathbb{N}_{+}$ can be constructed according to any fixed probability distribution on the set of all permutations of $\mathbb{N}_{+}$ with no splitting times. } \end{definition} The main focus here is the {\em positive recurrent case}, with mean block length $\mu:= \mathbb{E}(Y_1) < \infty$, and an aperiodic distribution of $Y_1$, which according to the discrete renewal theorem \cite[Chapter XIII, Theorem 3]{Feller} makes \begin{equation} \label{descren} \lim_{n\to \infty} u_n = 1/\mu >0 . \end{equation} Then numerous asymptotic properties of the recurrent regenerative permutation $\Pi$ with this distribution of block lengths can be read from standard results in renewal theory, as discussed further in Section \ref{s3}. In particular, starting from any positive recurrent random permutation $\Pi$ of $\mathbb{N}_{+}$, renewal theory gives an explicit construction of a stationary, two-sided version $\Pi^*$ of $\Pi$, acting as a random permutation of $\mathbb{Z}$, along with ergodic theorems indicating the existence of limiting frequencies for various counts of cycles and components, for both the one-sided and two-sided versions. This greatly simplifies the construction of stationary versions of the limiting Mallows$(q)$ permutations in \cite{GO12,GP}. Observe that for every recurrent, strictly regenerative permutation of $\mathbb{N}_{+}$, the support of $Q_n$ is necessarily contained in the set $\mathfrak{S}^{\dagger}_n$ of indecomposable permutations of $[n]$. As will be seen in Section \ref{s4}, even the uniform distribution on $\mathfrak{S}^{\dagger}_n$ has a nasty denominator for which there is no very simple formula. The difficulty motivates the study of other constructions of random permutations of $\mathbb{N}_{+}$, such as the following: \begin{definition} \label{pshifted} {\em For $p$ a probability distribution on $\mathbb{N}_{+}$ with $p_1 >0$, call a random permutation $\Pi$ of $\mathbb{N}_{+}$ a {\em $p$-shifted permutation} of $\mathbb{N}_{+}$, if $\Pi$ has the distribution defined by the following construction from an i.i.d. sample $(X_j;~ j \ge 1)$ from $p$. Inductively, let \begin{itemize} \item $\Pi_1 := X_1$, \item for $i \ge 2$, let $\Pi_i := \psi(X_i)$ where $\psi$ is the increasing bijection from $\mathbb{N}_{+}$ to \\ $\mathbb{N}_{+} \setminus \{ \Pi_1, \Pi_2, \cdots, \Pi_{i-1} \}.$ \end{itemize} For example, if $X_1 = 2$, $X_2 = 1$, $X_3 = 2$, $X_4 = 3$, $X_5 = 4$, $X_6 = 1 \ldots$, then the associated permutation is $(2,1,4,6,8,3,\ldots)$. } \end{definition} The procedure described in Definition \ref{pshifted} is a version of sampling without replacement, or {\em absorption sampling} \cite{Raw, Kemp}. Gnedin and Olshanski \cite{GO09} introduced this construction of $p$-shifted permutations of $\mathbb{N}_{+}$ for $p$ the geometric$(1-q)$ distribution. They proved that the limiting Mallows$(q)$ permutations of $[n]$ is the geometric$(1-q)$-shifted permutation of $\mathbb{N}_{+}$. The regenerative feature of geometric$(1-q)$-shifted permutations was pointed out and exploited in \cite{BB,GP}. This regenerative feature is in fact a property of $p$-shifted permutations of $\mathbb{N}_{+}$ for any $p$ with $p_1>0$. This observation allows a number of previous results for limiting Mallows$(q)$ permutations to be extended as follows. \begin{proposition} \label{pshiftedprop} For each fixed probability distribution $p$ on $\mathbb{N}_{+}$ with $p_1 >0$, and $\Pi$ a $p$-shifted random permutation of $\mathbb{N}_{+}$: \begin{enumerate}[$(i)$] \item The joint distribution of the random injection $(\Pi_1, \ldots, \Pi_n): [n] \to \mathbb{N}_{+}$ is given by the formula \begin{equation} \label{inject} \mathbb{P}( \Pi_i = \pi_i, 1 \le i \le n) = \prod_{j=1}^n p \left( \pi_j - \sum_{1 \le i < j} 1 (\pi_i < \pi_j) \right), \end{equation} for every fixed injection $(\pi_i, 1 \le i \le n): [n] \to \mathbb{N}_{+}$, and $p(k):= p_k$. \item The probability that $\Pi$ maps $[n]$ to $[n]$ is \begin{equation} \label{un} u_{n} := \mathbb{P}( [n] \mbox{ is a block of } \Pi ) = \prod_{j = 1}^n \sum_{i=1}^j p_i . \end{equation} \item The random permutation $\Pi$ is strictly regenerative, with regeneration at every $n$ such that $[n]$ is a block of $\mathbb{N}_{+}$, and the renewal sequence $(u_n;~n \geq 1)$ as above. \item The distribution of component lengths $f_n:= \mathbb{P}(Y_1 = n)$ where $Y_1$ is the length of the first component of $\Pi$ is given by the probability generating function \begin{equation} \label{updag} \mathbb{E} z^{Y_1} = \sum_{n=1}^\infty f_n z^n = 1 - \frac{1}{U(z)} \quad \mbox{ where } U(z) := 1 + \sum_{n=1}^ \infty u_{n} z^n . \end{equation} \item If $\mathbb{E}X_1 = m:= \sum_{i} i p_i < \infty$, then $\mu:= \mathbb{E}(Y_1) < \infty$, so $\Pi$ is positive recurrent, with limiting renewal probability \begin{equation} \label{limprod} u_\infty:= \lim_{n \to \infty} u_n = \mu^{-1} = \prod_{j=1}^\infty (1 - \mathbb{P}(X_1 >j) ), \end{equation} Then $\Pi$ has cycle counts with limit frequencies detailed later in \eqref{limfreqs}, and there is a stationary version $\Pi^*$ of $\Pi$ acting on $\mathbb{Z}$, call it a {\em $p$-shifted random permutation of $\mathbb{Z}$.} \item If $ m = \infty$ then $\Pi$ is either null recurrent or transient, according to whether $U(1)$ is infinite or finite, and there is no stationary version of $\Pi$ acting on $\mathbb{Z}$. \end{enumerate} \end{proposition} Even for the extensively studied limiting Mallows$(q)$ model, Proposition \ref{pshiftedprop} contains some new formulas and characterizations of the distribution, which are discussed in Section \ref{s5}. An interesting byproduct of this proposition for a general $p$-shifted permutation is the following classical result of Kaluza \cite{Kaluza}: \begin{corollary} \cite{Kaluza} Every sequence $(u_n;~ n \ge 0)$ with \begin{equation} \label{kaluza} 0 < u_n \le u_0 = 1 \quad \mbox{and} \quad u_n^2 \le u_{n-1} u_{n+1} \mbox{ for all } n \ge 1, \end{equation} is a renewal sequence. The sequence $(u_n;~n \ge 0)$ satisfying \eqref{kaluza} is called a Kaluza sequence. The renewal process associated with a Kaluza sequence is generated by the random sequence of times $n$ at which $[n]$ is a block of $\Pi$, for $\Pi$ a $p$-shifted permutation of $\mathbb{N}_{+}$, with $$p_1: = u_1 \quad \mbox{and} \quad p_n:= \frac{u_n}{u_{n-1}} - \frac{u_{n-1}}{u_{n-2}} \mbox{ for } n \geq 2.$$ If $p_\infty:= 1 - \sum_{i=1}^\infty p_i >0$, and $X_1, X_2, \ldots$ is the sequence of independent choices from this distribution on $\{1,2, \ldots, \infty\}$ used to drive the construction of $\Pi$, then the construction is terminated by assigning some arbitrarily distributed infinite component on $[n+1, \infty)$ following the last splitting time $n$ such that $X_1 + \cdots + X_n < \infty$, for instance by a shifting to $[n+1,\infty)$ the deterministic permutation of $\mathbb{N}_{+}$ with no finite components $$ \cdots 6 \to 4 \to 2 \to 1 \to 3 \to 5 \to \cdots $$ \end{corollary} See also \cite{Kendall,Horn,KingmanR,Shanbhag,Liggett,Fristedt} for other derivations and interpretations of Kaluza's result, all of which now aquire some expression in terms of $p$-shifted permutations. Some further instances of regenerative permutations are provided by the following close relative of the $p$-shifted permutation: \begin{definition} \label{pbiased} {\em For $p$ with $p_i > 0 $ for every $i$, call a random permutation $\Pi$ of $\mathbb{N}_{+}$ a {\em $p$-biased permutation of $\mathbb{N}_{+}$} if the random sequence $(p_{\Pi_1}, p_{\Pi_2}, \ldots )$ is what is commonly called a {\em sized biased random permutation} of $(p_1, p_2, \ldots)$. That is to say, $(\Pi_1, \Pi_2, \ldots )$ is the sequence of distinct values, in order of appearance, of a random sample of positive integers $(X_1, X_2, \ldots)$, which are independent and identically distributed (i.i.d.) with distribution $(p_1, p_2, \ldots)$. Inductively, let \begin{itemize} \item $\Pi_1 := X_1$, and $J_1 := 1$, \item for $i \ge 2$, let $\Pi_i := X_{J_i}$, where $J_i$ is the least $j > J_{i-1}$ such that $$X_j \in \mathbb{N}_{+} \setminus \{X_{\Pi_1}, X_{\Pi_2}, \cdots, X_{\Pi_{i-1} } \}.$$ \end{itemize} } \end{definition} The procedure described in Definition \ref{pbiased} is an instance of sampling with replacement, or {\em successive sampling \cite{Rosen, Gordon}}. See \cite{SH,Hoppe,Donnelly,PPY,PY97} for various studies of this model of size-biased permutation, with emphasis on the annealed model, where $p$ is determined by a random discrete distribution $P:= (P_1, P_2, \ldots)$, and given $P = p$, the $X_j$ are i.i.d. with distribution $p$. In particular, the joint distribution of the random injection $(\Pi_1, \ldots \Pi_n): [n] \rightarrow \mathbb{N}_{+}$ is \begin{equation} \label{genbias} \mathbb{P}(\Pi_i = \pi_i, \,1 \le i \le n) = \mathbb{E} \left(P_{\pi_1} \prod_{i=2}^{n} \frac{P_{\pi_i}}{1-\sum_{j=1}^{i-1} P_{\pi_j}} \right). \end{equation} for every fixed injection $(\pi_i, \, 1 \leq i \leq n): [n] \rightarrow \mathbb{N}_{+}$. A tractable model of this kind, known as a {\em residual allocation model} ({\em RAM}), has the stick-breaking representation: \begin{equation} \label{ram} P_i:= (1-W_1) \cdots (1-W_{i-1}) W_i, \end{equation} with $0 < W_i < 1$ and the $W_i$'s are independent and identically distributed. This model is of special interest for Bayesian non-parametric inference and machine learning \cite{BPJ1,BPJ2}. In those contexts, the distribution of $P$ represents a prior distribution on the underlying probability model $p$, which may be updated in response to observations such as the values in the sample $(X_1, \ldots, X_n)$, or values of $(\Pi_1, \ldots, \Pi_n)$. A model of particular interest arises when each $W_i$ has the beta$(1,\theta)$ density $\theta (1-w)^{\theta -1 }$ at $w \in (0,1)$ for some $0 < \theta < \infty$. This distribution of $(P_1,P_2, \ldots)$ is known as the GEM$(\theta)$ distribution, after Griffiths, Engen and McCloskey who discovered the remarkable properties of this model, including McCloskey's result that the GEM$(\theta)$ model is the only RAM that is invariant under $P$-biased permutation, meaning that there is the equality in distribution \begin{equation} \label{isbp} (P_{\Pi_1}, P_{\Pi_2}, \ldots ) \stackrel{(d)}{=} (P_1 , P_2, \ldots ) \quad \mbox{for $\Pi$ a $P$-biased permutation of $\mathbb{N}_{+}$}. \end{equation} The following result reveals the regeneration of sized-biased random permutations of $\mathbb{N}_{+}$. \begin{proposition} \label{pbiasedprop} For every residual allocation model \eqref{ram} for a random discrete distribution $P$ with i.i.d. residual factors $W_i$, and $\Pi$ a $P$-biased random permutation of $\mathbb{N}_{+}$: \begin{enumerate}[$(i)$] \item The random permutation $\Pi$ is strictly regenerative, with regeneration at every $n$ such that $[n]$ is a block of $\mathbb{N}_{+}$, and the renewal sequence $(u_n;~n \geq 1)$ defined by \begin{equation} \label{PTa} u_n : = \mathbb{P}([n] \mbox{ is a block of } \Pi) = \int_0^{\infty} e^{-x} \mathbb{E} \prod_{i=1}^n \left(1 - \exp\left(-\frac{x W_i}{T_{i}} \right) \right) dx, \end{equation} where $T_{i}: = (1-W_1) \cdots (1-W_{i})$. Then $\Pi$ is positive recurrent if \begin{equation} \label{gencri} \sum_{i = 2}^{\infty} \mathbb{E} \exp\left(-\frac{x W_i}{T_{i}} \right) < \infty \quad \mbox{for some } x > 0. \end{equation} \item If each $W_i$ is the constant $1-q$ for some $0 < q < 1$, so $P$ is the geometric$(1-q)$ distribution on $\mathbb{N}_{+}$, then $\Pi$ is positive recurrent. Hence $\Pi$ has all blocks finite and limiting frequencies of cycle counts as in \eqref{limfreqs}, and there is a stationary version $\Pi^{*}$ of $\Pi$ acting on $\mathbb{Z}$, called a $p$-biased random permutation of $\mathbb{Z}$. \item If the $W_i$ are i.i.d. beta$(1,\theta)$ for some $\theta >0$, so $P$ has the GEM$(\theta)$ distribution, then $\Pi$ is positive recurrent, with the same further implications. \end{enumerate} \end{proposition} Propositions \ref{pshiftedprop} and \ref{pbiasedprop} expose a close affinity between $p$-shifted and $p$-biased permutations of $\mathbb{N}_{+}$, at least for some choices of $p$, which does not seem to have been previously recognized. For instance, if $p$ is such that $p_1$ is close to $1$, and subsequent terms decrease rapidly to $0$, then it is to be expected in either of these models that $\Pi$ should be close in some sense to the identity permutation on $\mathbb{N}_{+}$. This intuition is confirmed by the explicit formulas described in Section \ref{s6} both for the one parameter family of geometric$(1-q)$ distributions as $q \downarrow 0$, and for the GEM$(\theta)$ family as $\theta \downarrow 0$. This behavior is in sharp contrast to the case if $\Pi$ is a uniformly distributed permutation of $[n]$, where it is well known that the expected number of fixed points of $\Pi$ is $1$, no matter how large $n$ may be. See also Gladkich and Peled \cite{GP} for many finer asymptotic results for the Mallows$(q)$ model of permutations of $[n]$, as both $n \to \infty$ and $q \downarrow 0$. With further analysis, we derive explicit formulas for $u_{\infty}$ of the GEM$(\theta)$-biased permutations in Section \ref{s7}. But there does not seem to be any simple formula for $u_\infty$ of a $P$-biased permutation with $P$ a general RAM, and the condition \eqref{gencri} for positive recurrence is not easy to check. Nevertheless, we give a simple sufficient condition for a $P$-biased permutation of $\mathbb{N}_{+}$ with $P$ governed by a RAM to be positive recurrent. \begin{proposition} \label{simplecond} Let $\Pi$ be a $P$-biased permutation of $\mathbb{N}_{+}$ for $P$ a RAM with i.i.d. residual factors $W_i \stackrel{(d)}{=} W$. If the distribution of $- \log(1-W) < \infty$ is non-lattice, meaning that the distribution of $1-W$ is not concentrated on a geometric progression, and \begin{equation} \mathbb{E} [- \log W ]< \infty \quad \mbox{and} \quad \mathbb{E} [- \log(1-W)] < \infty, \end{equation} then $\Pi$ is positive recurrent regenerative permutation. \end{proposition} {\bf Organization of the paper:} The rest of this paper is organized as follows. \begin{itemize} \item Section \ref{s2} sets the stage by recalling some basic properties of indecomposable permutations of a finite interval of integers, which are the basic building blocks of regenerative permutations. \item Section \ref{s3} indicates how the construction of a stationary random permutation of $\mathbb{Z}$ along with some limit theorems is a straightforward application of the well established theory of regenerative random processes. \item Section \ref{s4} provides an example of the regenerative permutation of $\mathbb{N}_{+}$, with uniform block distribution. Some explicit formulas are given. \item Section \ref{s5} sketches a proof of Proposition \ref{pshiftedprop} for $p$-shifted permutations, following the template provided by \cite{BB} in the particular case of the limiting Mallows$(q)$ models. \item Section \ref{s6} gives a proof of Proposition \ref{pbiasedprop} for $P$-biased permutations. This is somewhat trickier, and the results are less explicit than in the $p$-shifted case. \item Section \ref{s7} provides further analysis of regenerative $P$-biased permutations. There Proposition \ref{simplecond} is proved. We also show that the limiting renewal probability of the GEM$(1)$-biased permutation is $1/3$. \end{itemize} {\bf Acknowledgement:} We thank David Aldous, Persi Diaconis, Marek Biskup and Sasha Gnedin for various pointers to the literature. Thanks to Jean-Jil Duchamps for an insightful first proof of our earlier conjecture that $u_\infty = 1/3$. We also thank an anonymous referees for his careful reading and valuable suggestions. \section{Indecomposable permutations} \label{s2} This section provides references to some basic combinatorial theory of indecomposable permutations of $[n]$ which may arise as the reduced permutations of $\Pi$ on its components of finite length. For $1 \le k \le n$, let $(n,k)^{\dagger}$ be the number of permutations of $[n]$ with exactly $k$ components. In particular, $(n,1)^{\dagger}:= \# \mathfrak{S}^{\dagger}_n$ is the number of indecomposable permutations of $[n]$, as the sequence A003319 of OEIS. As shown by Lentin \cite{Lentin} and Comtet \cite{Comtet72}, the counts $((n,1)^{\dagger};~ n \geq 1)$, starting from $(1,1)^{\dagger} = 1$, are determined by the recurrence \begin{equation} \label{indecomprec} n! = \sum_{k = 1}^n (k,1)^{\dagger} (n-k)! , \end{equation} which enumerates permutations of $[n]$ according to the size $k$ of their first component. Introducing the formal power series which is the generating function of the sequence $(n!;~n \geq 0)$ $$ G(z):= \sum_{n=0}^\infty n! \, z^n, $$ the recursion \eqref{indecomprec} gives the generating function of the sequence $((n,1)^{\dagger};~ n \geq 1)$, as \begin{equation} \label{indecompn1} \sum_{n=1}^\infty (n,1)^{\dagger} z^n = 1 - \frac{1}{G(z)}, \end{equation} which implies that \begin{equation} \label{asymf} (n,1)^{\dagger} = n! \left(1-\frac{2}{n} + O\left(\frac{1}{n^2}\right) \right). \end{equation} Furthermore, it is derived from \eqref{indecompn1} that \begin{equation} \label{indecompnk} \sum_{n=k}^\infty (n,k)^{\dagger} z^n = \left( 1 - \frac{1}{G(z)} \right)^k \quad \mbox{for}~1 \leq k \leq n. \end{equation} The identity \eqref{indecompnk} determines the triangle of numbers $(n,k)^{\dagger}$ for $1 \leq k \leq n$, as displayed for $1 \leq n \leq 10$ in Comtet \cite[Exercise VI.14]{Comtet74}. See also \cite{King,Cori1,Cori2,Cori3,AP13,BR16} for various results about indecomposable permutations. Recall that for $I \subseteq \mathbb{Z}$ an interval of integers, and $\Pi: I \rightarrow I$ a permutation of $I$, we say $\Pi$ splits at $n \in I $, if $\Pi$ maps $I \cap (-\infty,n]$ onto itself. As observed by Stam \cite{Stam}, the splitting times of a uniform random permutation $\Pi$ of a finite interval of integers $I = [a,b]$ are {\em regenerative} in the sense that conditionally given that $\Pi$ splits at some $n \in I$ with $a \le n < b$, the restrictions of $\Pi$ to $[a,n]$ and to $[n+1,b]$ are independent uniform random permutations of these two subintervals of $I$. However, for a uniform random permutation $\Pi$ of a finite interval, the components of $\Pi$ turn out not to be very interesting. In fact, for a large finite interval of integers $I$, most permutations of $I$ have only one component. Assuming for simplicity that $I = [n]$, let $$ V_n := \sum_{k = 1}^n 1 ( \Pi \mbox{ splits at } k ), $$ be the number of interval components of $\Pi$, a uniformly distributed random permutation of $[n]$. It is easily seen from \eqref{asymf} that $\mathbb{P}(V_n = 1) \to 1$ as $n \to \infty$. By an obvious enumeration $$ \mathbb{E} V_n := \sum_{k = 1}^n \mathbb{P} ( \Pi \mbox{ splits at } k ) = \sum_{k = 1}^n \frac{ k! (n-k)! } {n!} = \Sigma_n - 1, $$ where $$ \Sigma_n:= \sum_{k=0}^n {n \choose k } ^{-1}, $$ is the sum of reciprocals of binomial coefficients. The sum $\Sigma_n$, as the sequence A046825 of OEIS, has been studied in a number of articles \cite{Rockett,Sury}, with some other interpretations of the sum given in \cite{OEIS}. The following lemma records some basic properties of the decomposition of a uniform permutation $\Pi$ of $[n]$. \begin{lemma} Let $\Pi$ be a uniformly distributed random permutation of $[n]$. Then: \begin{enumerate}[$(i)$] \item The number $K_n$ of components of $\Pi$ has distribution \begin{equation} \mathbb{P}(K_n = k) = \frac{(n,k)^{\dagger}}{ n!} \quad \mbox{for}~ 1 \le k \le n, \end{equation} with the counts $(n,k)^{\dagger}$ determined as above. \item Conditionally given $K_n = k$, the random composition of $n$ defined by the lengths $L_{n,1}, \ldots, L_{n,k}$ of these components has the exchangeable joint distribution \begin{equation} \mathbb{P}( L_{n,1} = n_1, \ldots, L_{n,k} = n_k \,|\, K_n = k) = \frac{1}{(n,k)^{\dagger}} \prod_{i=1}^k (n_i,1)^{\dagger}, \end{equation} for all compositions $(n_1, \ldots, n_k)$ of $n$ with $k$ parts, meaning $n_i \ge 1$ and $\sum_{i=1}^k n_i = n$. \item The unconditional distribution of the length $L_{n,1}$ of the first component of $\Pi$ is given by \begin{equation} \mathbb{P}(L_{n,1} = \ell ) = \frac{ (\ell,1)^{\dagger} (n- \ell)! } { n!} \quad \mbox{for}~1 \le \ell \le n , \end{equation} while the conditional distribution of $L_{n,1}$ given that $K_n = k$ is given by \begin{equation} \mathbb{P}(L_{n,1} = \ell \,|\, K_n = k) = \frac{(\ell,1)^{\dagger} (n-\ell,k-1)^{\dagger} } { (n,k)^{\dagger}} \quad \mbox{for} ~1 \le \ell \le n, \end{equation} with the convention that $(0,0)^{\dagger} = 1$ but otherwise $(n,k)^{\dagger} = 0$ unless $1 \le k \le n$. \item The distribution of the length $L_{n}^{*}$ of a size-biased random component of $\Pi$, such as the length of the component of $\Pi$ containing $U_n$, where $U_n$ is independent of $\Pi$ with uniform distribution on $[n]$, is given by the formula \begin{equation} \label{SBCmp} \mathbb{P}( L_n^{*} = \ell ) = \frac{ \ell \, (\ell,1)^{\dagger} } { n \cdot n! } \sum_{ k = 1} ^n k \, (n-l , k-1)^{\dagger}, \end{equation} with the same convention. \end{enumerate} \end{lemma} \begin{proof} The first three parts are just probabilistic expressions of the preceding combinatorial discussion. Then part $(iv)$ follows from the definition of the size-biased pick, using $$ \mathbb{P}( L_n^{*} = \ell ) = \sum_{k=1}^n \mathbb{P}( L_n^{*} = \ell \,|\, K_n = k) \mathbb{P}(K_n = k). $$ Given that $K_n=k$ let the lengths of these $k$ components listed from left to right be $L_{n,1}, \ldots , L_{n,k}$, \begin{align*} \mathbb{P}(L_n^{*} = \ell \,|\, K_n = k) &= \sum_{j=1}^k \mathbb{P}(\mbox{pick } L_{n,j} \mbox{ and } L_{n,j} = \ell \,|\, K_n = k) \\ &= k \mathbb{P}( \mbox{pick } L_{n,1} \mbox{ and } L_{n,1} = \ell \,|\, K_n = k ) \\ &= k \frac{ \ell }{n} \mathbb{P}( L_{n,1} = \ell \,|\, K_n = k), \end{align*} where the second equality is obtained by exchangeability. Now part (iv) follows by plugging in the formulas in previous parts. \end{proof} Table of $n\cdot n! \, \mathbb{P}(L_n^{*} = \ell)$ for $1 \le \ell \le n \le 7$: \begin{center} \begin{tabular}{ c | c c c c c c c c } \multicolumn{1}{l}{$n$} &&&&&&&\\\cline{1-1} 1 &1&&&&&&& \\ 2 &2&2&&&&&&\\ 3 &5&4&9&&&&&\\ 4 &16 &10&18&52&&&&\\ 5 &64&32&45&104&355&&&\\ 6 &312&128&144&260&710&2766&&\\ 7 &1812&624&576&832&1775&5532&24129&\\\hline \multicolumn{1}{l}{} &0&1&2&3&4&5&6 & ~$\ell$\\ \end{tabular} \end{center} \section{Regenerative and stationary permutations} \label{s3} This section elaborates on the structure of a regenerative permutation of $\mathbb{N}_{+}$, and its stationary version $\Pi^{*}$ acting on $\mathbb{Z}$. To provide some intuitive language for discussion of a permutation $\Pi$ of $I= \mathbb{N}_{+}$ or of $I = \mathbb{Z}$, it is convenient to regard $\Pi$ as describing a motion of balls labeled by $I$. Initially, for each $i \in I$, ball $i$ lies in box $i$. After the action of $\Pi$, \begin{itemize} \item ball $i$ from box $i$ is moved to box $\Pi_i$; \item box $j$ contains the ball initially in box $\Pi^{-1}_j$. \end{itemize} For $i \in I$ let $D_i:= \Pi_i - i$, the displacement of ball initially in box $i$. It follows easily from Definition \ref{regendef} that if $\Pi$ is a regenerative permutation of $\mathbb{N}_{+}$, then the process $(D_{n};~ n \geq 1)$ is a {\em regenerative process with embedded delayed renewal process} $(T_k;~ k \geq 0)$. This means that if $R_n:= \sum_{k=0}^\infty 1(T_k = n)$ is the $n^{th}$ renewal indicator variable, then for each $n$ such that $\mathbb{P}(R_n = 1) >0$, conditionally given the event $\{R_n=1\}$, \begin{enumerate}[$(i).$] \item there is the equality of finite dimensional joint distributions $$ ( ( D_{n+j}, R_{n+j});~ j \geq 1 ) \stackrel{(d)}{=} ( ( D^0_{j}, R^0_{j});~ j \geq 1 ), $$ where the $D^0_{j}:= \Pi^0_{j} - j$ are the displacements of the random permutation $\Pi^0$ of $\mathbb{N}_{+}$, with associated renewal indicators $R^0_1, R^0_2, \ldots$ with zero delay. \item the bivariate process $( ( D_{n+j}, R_{n+j});~ j \geq 1 )$ is independent of $(R_1, \ldots, R_n)$. \end{enumerate} This paraphrases the discrete case of the general definition of a regenerative process proposed by Asmussen \cite[Chapter VI]{Asmussen}, and leads to the following Lemma. \begin{lemma} \label{Asmlem} \cite[Chapter VI, Theorem 2.1]{Asmussen}. Let $(D_{n};~n \geq 1)$ be a regenerative process with embedded delayed renewal process, $(T_k;~k \geq 0)$, in the sense indicated above. Assume that the renewal process is positive recurrent with finite mean recurrence time $\mu:= \mathbb{E}(Y_1) < \infty$, where $Y_1:= T_1 - T_0$, and that the distribution of $Y_1$ is aperiodic. Then there is the convergence in total variation of distributions of infinite sequences $$ (D_n, D_{n+1}, \ldots ) \stackrel{t.v.}{\longrightarrow} (D^*_0, D^*_1, \ldots), $$ where $(D^*_z;~ z \in \mathbb{Z})$ is a two-sided stationary process, whose law is uniquely determined by the block formula \begin{equation} \label{blockformula} \mathbb{E} g( D^*_z, D^*_{z+1}, \ldots) = \frac{1}{\mu}\mathbb{E} \left( \sum_{k = 1}^\infty g(D_k, D_{k+1}, \ldots) 1 ( Y_1 \ge k) \right), \end{equation} for all $z \in \mathbb{Z}$ and all non-negative product measurable functions $g$. \end{lemma} The existence of a stationary limiting Mallows$(q)$ permutation of $\mathbb{Z}$ was established by Gnedin and Olshanski \cite{GO09}, along with various characterizations of its distribution. Their work is difficult to follow, because they did not exploit the regenerative properties of this distribution. Gladkich and Peled \cite[Section 3]{GP} provides some further information about this model, including what they call a `stitching' construction of the two-sided model from its blocks on $(-\infty,T_0]$, $(T_0,T_1)$ and $[T_1 +1, \infty)$. But their construction too is difficult to follow. In fact, the structure of the two-sided Mallows permutation of $\mathbb{Z}$ is typical of the general structure of stationary regenerative processes. This structure is spelled out in the following theorem, which follows easily from Lemma \ref{Asmlem}. \begin{theorem} \label{thmmain} Let $\Pi$ be a positive recurrent regenerative random permutation of $\mathbb{N}_{+}$, with block length distribution $p$ and family of block distributions $Q_n$ on $\mathfrak{S}_n$, and $\mu: = \sum_{n} n p_n < \infty$. \begin{enumerate}[$(i)$] \item There exists a unique stationary regenerative random permutation $\Pi^*$ of $\mathbb{Z}$, with associated stationary renewal process $$\{\cdots T_{-2} < T_{-1} < T_0 < T_1 < T_2 \cdots\} \subseteq \mathbb{Z},$$ with indexing defined by $T_{-1} < 0 \le T_0$, and renewal indicators $R_z^*$, with $R_z^* = 1$ implying that $\Pi$ splits at $z$, such that $$ \mathbb{P}( R_z^* = 1 ) = 1/ \mu \quad \mbox{for}~z \in \mathbb{Z}, $$ and given the event $\{R^*_z = 1\}$, by letting $\Pi^{*,z}_{i}: = \Pi^{*}_{z+i} - z$ for $i \in \mathbb{N}_{+}$, $$ (\Pi^{*,z}_{1}, \Pi^{*,z}_{2}, \ldots \,|\, R^{*}_z = 1) \stackrel{(d)}{=} (\Pi ^n_1, \Pi^n _2, \ldots \,|\, R_n = 1) \quad \mbox{for } z \in \mathbb{Z}, $$ for every $n$ such that $\mathbb{P}(R_n = 1) >0$, where $(R_n;~n \geq 1)$ is the sequence of renewal indicators associated with the one-sided regenerative permutation $\Pi$. \item If $\Pi^{*}$ so defined, with block lengths $Y_z:= T_{z} - T_{z-1}$ for $z \in \mathbb{Z}$, then the $(Y_z;~ z \in \mathbb{Z})$ are independent, with the $Y_z, z \ne 0$ all copies of $Y_1$ with distribution $p$, while $Y_0$ has the size-biased distribution $$ \mathbb{P}(Y_0 = n) = n p_n /\mu \quad \mbox{for } n \geq 1 . $$ Conditionally given all the block lengths, the delay $T_0$ has uniform distribution on $\{0,1, \ldots,Y_0-1\}$, and conditional on all the block lengths and on $T_0$, with given block lengths $n_i$ say, the reduced permutation of $\Pi^*$ on the block of $n_i$ integers $(T_{i-1}, T_{i}]$ is distributed according to $Q_{n_i}$. \end{enumerate} Conversely, if $\Pi$ is regenerative, existence of such a stationary regenerative permutation of $\mathbb{Z}$ implies that $\Pi$ is positive recurrent. \end{theorem} Also note that the law of the stationary regenerative random permutation $\Pi^{*}$ is uniquely defined by the equality of joint distributions $$ (\Pi^{*}_1, \Pi^{*}_2, \ldots , T_0, T_1, T_2, \ldots \,|\, R^*_0 = 1) \stackrel{(d)}{=} (\Pi^0_1, \Pi^0_2, \ldots , T_0, T_1, T_2, \ldots ) , $$ where on the left side the $T_i$ are understood as the renewal times that are strictly positive for the stationary process $\Pi^*$, and on the right side the same notation is used for the renewal times of the regenerative random permutation $\Pi^0$ of $\mathbb{N}_{+}$ with zero delay, and on both sides $T_0 = 0$, the $Y_i:= T_i - T_{i-1}$ for $i \ge 1$ are independent random lengths with distribution $p$, and conditionally given these block lengths are equal to $n_i$, the corresponding reduced permutations of $[n_i]$ are independent and distributed according to $Q_{n_i}$. So the random permutation $\Pi^0$ of $\mathbb{N}_{+}$ is a {\em Palm version} of the stationary permutation $\Pi^{*}$ of $\mathbb{Z}$. See Thorisson \cite{Th1995,Thbook} for general background on stationary stochastic processes. Let $\Pi$ be a positive recurrent regenerative random permutation of $\mathbb{N}_{+}$, with block length distribution $p$. For $n \in \mathbb{N}_{+}$, let \begin{itemize} \item $\cyc_n$ be the length of the cycle of $\Pi$ containing $n$, \item $\cmp_n$ be the length of the component of $\Pi$ containing $n$, \item $\blk_n$ be the length of the block of $\Pi$ containing $n$. \end{itemize} Clearly, $1 \le \cyc_n \le \cmp_n \le \blk_n \le \infty$, and the structure of these statistics is of obvious interest in the analysis of $\Pi$. Assuming further that $p$ is aperiodic, it follows from Lemma \ref{Asmlem} there is a limiting joint distribution of $(\cyc_n, \cmp_n, \blk_n)$ as $n \to \infty$. However, the evaluation of this limiting joint distribution is not easy, even for the simplest regenerative models. \quad Suppose that a large number $M$ of blocks of $\Pi$ are formed and concatenated to make a permutation of the first $N$ integers for $N \sim M \mu$ almost surely as $M \rightarrow \infty$. Then among these $N \sim M \mu$ integers, there are about $M \ell p_{\ell}$ integers contained in regeneration blocks of length $\ell$. So for an integer $i = \lfloor U N \rfloor$ picked uniformly at random in $[N]$, the probability that this random integer falls in a regeneration block of length $\ell$ is approximately $$ \mathbb{P}( \lfloor U N \rfloor \in \mbox{regeneration block of length } \ell ) \approx \frac{M \, \ell \, p_{\ell} }{M \mu} = \frac { \ell p_{\ell} } {\mu} . $$ This is the well known size-biased limit distribution of the length of block containing a fixed point in a renewal process. Now given that $\lfloor U N \rfloor $ falls in a regeneration block of length $\ell$, the location of $\lfloor U N \rfloor $ relative to the start of this block has uniform distribution on $[\ell]$. These intuitive ideas are formalized and extended by the proposition below, which follows from Lemma \ref{Asmlem}, and the renewal reward theorem for ergodic averages \cite[Theorem 3.1]{Asmussen}. \begin{proposition} \label{cormain} Let $\Pi$ be a positive recurrent regenerative random permutation of $\mathbb{N}_{+}$, with block length distribution $p$ with finite mean $\mu$, and blocks governed by $(Q_n;~n \geq 1)$. \begin{enumerate}[$(i)$] \item Let $C_{n,j}$ be the number of cycles of $\Pi$ of length $j$ that are wholly contained in $[n]$. Then the cycle counts have limit frequencies \begin{equation} \label{limfreqs} \lim_{n \to \infty} \frac{ C_{n,j} }{n} = \frac{ \nu_j }{\mu} \quad a.s. \quad \mbox{for}~j \geq 1, \end{equation} where $\nu_j $ is the expected number of cycles of length $j$ in a generic block of $\Pi$, and $\mu = \sum_j j \nu_j$. The same conclusion holds with $C_{n,j}$ replaced by the larger number of cycles of $\Pi$ of length $j$ whose least element is contained in $[n]$. \item If the block length distribution $p$ is aperiodic, then $$ \lim_{n \to \infty} \mathbb{P}(\Pi \mbox{ regenerates at } n ) = 1/\mu, $$ and \begin{equation} \lim_{n \to \infty} \mathbb{P}( \cyc_n = j ) = \frac{ j \nu_j }{ \mu } \quad \mbox{for } j \in \mathbb{N}_{+}. \end{equation} Alternatively, let $L_{\ell}^*$ be a random variable with values in $[\ell]$, which is the length of a sized-biased cycle of a random permutation of $[\ell]$ distributed as $Q_{\ell}$. Then \begin{equation} \lim_{n \to \infty} \mathbb{P}(\cyc_n = j, \blk_n = \ell) =\frac{\ell p_{\ell}}{\mu} \mathbb{P}( L_{\ell}^{*} = j) \quad \mbox{for } 1 \leq j \leq \ell, \end{equation} and \begin{equation} \lim_{n \to \infty} \mathbb{P}(\cyc_n = j) = \frac{1}{\mu} \sum_{\ell = 1}^\infty \ell p_{\ell} \mathbb{P}(L_{\ell}^* = j) \quad \mbox{for~} j \geq 1. \end{equation} \item Continuing to assume that $p$ is aperiodic, there is an almost sure limiting frequency $p^{\circ}_{j}$ of cycles of $\Pi$ of length $j$, relative to cycles of all lengths. These limiting frequencies are uniquely determined by \begin{equation} \label{limfreqfor1} p^{\circ}_{j}= \frac{\nu_j}{\sum_{j=1}^{\infty} \nu_j} \quad \mbox{for } j \in \mathbb{N}_{+}, \end{equation} or by the relations \begin{equation} \label{limfreqfor2} p^{\circ}_{j}= \frac{\mu^\circ}{\mu} \frac{1}{j} \sum_{\ell = 1}^\infty \ell p_{\ell} \mathbb{P}(L_{\ell}^* = j) \quad \mbox{for } j \in \mathbb{N}_{+}, \end{equation} with $\mu^\circ := \sum_{j = 1}^{\infty} j p^{\circ}_{j}$. \item The statements $(i) - (iii)$ hold with cycles replaced by components, with almost sure limiting frequencies $p^{\dagger}_j$ of components of $\Pi$ of length $j$. \end{enumerate} \end{proposition} \section{Uniform blocked permutations} \label{s4} In this section we study an example of regenerative permutations where it is possible to describe the limiting cycle count frequencies explicitly. The story arises from the following observation of Shepp and Lloyd \cite{SL}. \begin{lemma} \cite{SL} Let $N$ be a random variable with the geometric$(1-q)$ distribution on $\mathbb{N}_0$. That is, $$\mathbb{P}(N = n) = q^n(1 - q) \quad \mbox{for } n \geq 0.$$ Let $\Pi$ be a uniform random permutation of $[n]$ given $N = n$. Let $(N_j;~j \geq 1)$ be the cycle counts of $\Pi$, which given $N = 0$ are identically $0$, and given $N=n$ are distributed as the counts of cycles of various lengths $j$ in a uniform random permutation of $[n]$. Then $(N_j;~j \geq 1)$ are independent Possion random variables with means $$\mathbb{E}N_j = \frac{q^j}{j} \quad \mbox{for~} j \ge 1.$$ \end{lemma} The {\em L\'evy-It\^o representation} of $N$ with the infinitely divisible geometric$(1-q)$ distribution as a weighted linear combination of independent Poisson variables, is realized as $N = \sum_{j=1}^\infty j N_j$. The possibility that $N = 0$ is annoying for concatenation of independent blocks. But this is avoided by simply conditioning a sequence of independent replicas of this construction on $N > 0$ for each replica. The obvious identity $N_j 1 (N >0 ) = N_j$ allows easy computation of \begin{equation} \label{countmean} \mathbb{E}( N_j \,|\, N > 0 ) = \frac{ \mathbb{E} N_j }{ \mathbb{P} ( N > 0 ) } = \frac{ q^{j-1} }{j} \quad \mbox{for } j \ge 1. \end{equation} Similarly, for $k = 1,2 \ldots$ \begin{equation} \mathbb{P}( N_j = k \, | \, N > 0 ) = \frac{1}{k! \, q} \left(\frac{q^j}{j}\right)^k \exp\left( - \frac{q^j}{j} \right) \quad \mbox{for } j \ge 1, \end{equation} hence by summation \begin{equation} \mathbb{P}( N_j > 0 \,|\, N > 0 ) = \frac{1}{q} \left[ 1 - \exp\left( - \frac{q^j}{j} \right) \right] \quad \mbox{for } j \ge 1. \end{equation} \begin{proposition} \label{unifgeo} Let $\Pi$ be the regenerative random permutation of $\mathbb{N}_{+}$, which is the concatenation of independent blocks of uniform random permutations of lengths $Y_1,Y_2,\ldots$ where each $Y_i>0$ has the geometric$(1-q)$ distribution on $\mathbb{N}_{+}$. Then: \begin{enumerate}[$(i)$] \item The limiting cycle count frequencies $\nu_j/\mu$ in \eqref{limfreqs} are determined by the formula $\mu:= \mathbb{E}(Y_1) = (1- q)^{-1}$, and \begin{equation} \nu_j = \frac{q^{j-1} }{j} \quad \mbox{for } j \in \mathbb{N}_{+}. \end{equation} \item The distribution of $\Pi_1$ is given by \begin{equation} \label{pi1} \mathbb{P}( \Pi_1 = k ) = \frac{1-q}{q} \left( \lambda_1(q) - \sum_{h=1}^{k-1} \frac{ q^h } {h} \right) \quad \mbox{for } k \in \mathbb{N}_{+}, \end{equation} where \begin{equation*} \lambda_1(q):= \sum_{h=1}^\infty \frac{q^h}{h } = - \log ( 1 - q ). \end{equation*} \item The probability of the event $\{\Pi_1 = 1, \Pi_2 = 2\}$ that both $1$ and $2$ are fixed points of $\Pi$, is \begin{equation} \label{fix2} \mathbb{P}(\Pi_1 = 1, \Pi_2 = 2) = 1-q . \end{equation} \item The regenerative random permutation $\Pi$ is not strictly regenerative. \end{enumerate} \end{proposition} \begin{proof} \noindent $(i)$ This follows readily from the formula \eqref{countmean} for the cycle counts in a generic block. \noindent $(ii)$ By conditioning on the first block length $Y_1$, since given $Y_1 = y$ the distribution of $\Pi$ is uniform on $[y]$, there is the simple computation for $k = 1,2, \ldots$ \begin{equation*} \mathbb{P}( \Pi_1 = k ) = \sum_{y = k}^\infty q ^{y-1} ( 1 - q) \frac{1}{y}, \end{equation*} which leads to \eqref{pi1}. In particular, the probability that $1$ is a fixed point of $\Pi$ is $$ \mathbb{P}( \Pi_1 = 1 ) = - \frac{1-q}{q} \log(1-q). $$ \noindent $(iii)$ The joint probability of the event $\{\Pi_1 = 1, \Pi_2 = 2\}$ is computed as \begin{align*} \mathbb{P}( \Pi_1 = 1, \Pi_2 = 2 ) &= \mathbb{P}(Y_1 = 1, \Pi_1 = 1, \Pi_2 = 2 ) + \mathbb{P}( Y_1 \ge 2 , \Pi_1 = 1, \Pi_2 = 2 ) \\ &= (1-q) \cdot \frac{1-q }{q} \lambda_1(q) + \sum_{y=2}^\infty q^{y-1} ( 1-q) \frac{ 1 } { y ( y - 1) }\\ &= \frac{(1-q)^2 }{q} \lambda_1(q) + \frac{ 1 - q }{q} \lambda_2(q), \end{align*} where \begin{equation} \lambda_2(q) := \sum_{h=2}^\infty \frac{ q^{h} } { h ( h - 1) } = q - ( 1-q) \lambda_1(q). \end{equation} But this simplifies, by cancellation of the two terms involving $\lambda_1(q)$, to the formula \eqref{fix2}. \noindent $(iv)$ This follows from the fact that $\mathbb{P}(\Pi_1 = 1, \Pi_2 = 2) \ne \mathbb{P}(\Pi_1 = 1)^2$. \end{proof} More generally, the probability of the event $\{\Pi_i = i, ~1 \le i \le k\}$ involves $$ \lambda_k(q):= \sum_{h=k}^\infty \frac{q^h}{ h ( h - 1) \cdots (h-k+1)} = \frac{1}{a_k} q p_{k-2}(q) + \frac{(q-1)^{k-1}}{(k-1)!} \lambda_1(q),$$ for some $a_k \in \mathbb{Z}$ and $p_{k-2} \in \mathbb{Z}_{k-2}[q]$. The sequence $(a_k;~ k \geq 1)$ appears to be the sequence A180170 of OEIS, see \cite{MMR} for related discussion. \begin{remark} {\em One referee proposed the following regenerative but not strictly regenerative permutation. Consider a stationary $M/M/c$ service system with $c>1$ servers, a single queue and the first-come-first serve policy. Labeling customers in the arrival order, the output order is a random permutation $\Pi$. When the system turns idle, we have a renewal for $\Pi$. But there is no renewal, though a split, if the first served customers are $1,2, \ldots, n$ in some output order and the $(n+1)^{th}$ customer is still in the system. } \end{remark} Proposition \ref{unifgeo} is generalized by the following one, which is a corollary of Theorem \ref{thmmain} and Proposition \ref{cormain}. \begin{proposition} \label{cormain2} Let $\Pi$ be a positive recurrent random permutation of $\mathbb{N}_{+}$, whose block lengths $Y_k$ are i.i.d. with distribution $p$, and whose reduced block permutations given their lengths are uniform on $\mathfrak{S}_n$ for each length $n$. \begin{enumerate}[$(i)$] \item The limiting cycle count frequencies $\nu_j/\mu$ in \eqref{limfreqs} are determined by the formula \begin{equation} \label{limfrequnif} \nu_j = j^{-1} \mathbb{P}(Y_1 \ge j) \quad \mbox{for}~j \in \mathbb{N}_{+}, \end{equation} where $\mathbb{P}(Y_1 \ge j) = \sum_{i=j}^{\infty} p_i$. So the almost sure limiting frequencies $p^{\circ}_{j}$ of cycles of $\Pi$ of length $j$ are given by \begin{equation} \label{limfreqfor3} p^\circ_j = \frac{ \sum_{i = j}^{\infty} p_i}{j \sum_{i=1}^{\infty} p_i H_i} \quad \mbox{for } j \in \mathbb{N}_{+}, \end{equation} where $H_i : = \sum_{j = 1}^i 1/j$ is the $i^{th}$ harmonic sum. \item If $p$ is aperiodic, the limit distribution of displacements $D_n: = \Pi_n - n$ as $n \to \infty$ is the common distribution of the displacement $D^*_z:=\Pi^*_z - z$ for every $z \in \mathbb{Z}$, which is symmetric about $0$, according to the formula \begin{equation} \label{limdispl} \lim_{n \to \infty} \mathbb{P}( D_n= d ) = \mathbb{P}( D^*_z = d ) = \frac{1}{\mu} \mathbb{E} \left( \frac{ (Y_1 - |d| )_{+} }{ Y_1 } \right) \quad \mbox{for~}d \in \mathbb{Z}, \end{equation} which implies \begin{equation} \lim_{n \to \infty} \mathbb{P}( \Pi_n > n ) = \mathbb{P}( D^*_z > 0 ) = \frac{1}{2} \left( 1 - \frac{1}{\mu} \right), \end{equation} and the same holds for $<$ instead of $>$. \item Continuing to assume that $p$ is aperiodic, there is also the convergence of absolute moments of all orders $r>0$ \begin{equation} \lim_{n \to \infty} \mathbb{E} | D_n |^r = \mathbb{E} |D^*_z |^r = \frac{2}{\mu} \mathbb{E} \delta_r(Y), \end{equation} where $$ \delta_r(n):= \sigma_r(n) - n^{-1} \sigma_{r+1}(n) \quad \mbox{with } \sigma_r(n):= \sum_{k=1}^n k^r, $$ the sum of $r^{th}$ powers of the first $n$ positive integers. In particular, for $r \geq 1$, $\delta_r(n)$ is a polynomial in $n$ of degree $r+1$, for instance $$ \delta_1(n) = \frac{1}{6} ( n^2 - 1), \quad \delta_2(n) = \frac{1}{12} n ( n^2 - 1), $$ implying that the limit distribution of displacements has a finite absolute moment of order $r$ if and only if $\mathbb{E} Y_1^{r+1} < \infty$. \end{enumerate} \end{proposition} \begin{proof} \noindent $(i).$ Recall the well known fact that for a uniform random permutation of $[n]$, for $1 \le j \le n$ the expected number of cycles of length $j$ is $\mathbb{E} C_{n,j} = 1/j$. This follows from the easier fact that the length of a size-biased pick from the cycles of a uniform permutation of $[n]$ is uniformly distributed on $[n]$, and the probability $1/n$ that the size-biased pick has length $j$ can be computed by conditioning on the cycle counts as $1/n = \mathbb{E}[ j C_{n,j}/n]$. Appealing to the uniform distribution of blocks given their lengths, given $Y_1$ the expected number of $j$-cycles in the block of length $Y_1$ is $(1/j) 1(Y_1 \ge j )$, and the conclusion follows. The limiting frequencies \eqref{limfreqfor3} are computed by injecting the formula \eqref{limfrequnif} for cycle counts into \eqref{limfreqfor1}. \noindent $(ii).$ This follows from Lemma \ref{Asmlem}, with the expression for the limit distribution of $D^*_z:= \Pi^*_z - z$ given by \begin{align} \label{dzexp} \mathbb{P} (D^*_z = d) = \frac{1}{\mu} \sum_{k = 1}^\infty \mathbb{P}( \Pi_k = k + d, Y_1 \ge k) . \end{align} By construction of $\Pi$, given $Y_1 = y$ for some $y \ge k$, the image of $\Pi_k$ is a uniform random pick from $[y]$, so $$ \mathbb{P}( \Pi_k = k + d, Y_1 \ge k , Y_1 = y) = 1( 1 \le k + d \le y ) y^{-1} p_y \quad \mbox{for~} y \ge k. $$ Sum this expression over $y$, then switch the order of summations over $k$ and $y$, to see that for each fixed $y \ge 1$ the coefficient of $\mu^{-1} y^{-1} p_y$ in \eqref{dzexp} is $$ \sum_{k=1}^\infty 1 ( 1 \le k + d \le y ) = ( y - |d|)_+ , $$ since if $d \ge 0$ the sum over $k$ is effectively from $1$ to $y-d$, and while if $d < 0$ it is from $1 + |d|$ to $y$, and in either case the number of non-zero terms is $y - |d|$ if $|d| < y$, and $0$ otherwise. This gives the expression for the limit on the right side of \eqref{limdispl}, from which follow the remaining assertions. \noindent $(iii).$ This follows from the formula \eqref{limdispl}, a known result of convergence of moments in the limit theorem for regenerative stochastic processes \cite[Chapter VI, Problem 1.4]{Asmussen}, and Bernoulli's formula for $\sigma_r(n)$ as a polynomial in $n$ of degree $r+1$, see e.g. Beardon \cite{Beardon}. \end{proof} Note, however, that the companion results for components of $\Pi$ seem to be complicated. For instance, there is in general no simple expression for the expected number of components of $\Pi$ of a fixed length. The limiting frequencies $p^{\dagger}_j$ of components of $\Pi$ of length $j$ are obtained by plugging \eqref{SBCmp} into \eqref{limfreqfor2}, which are determined implicitly by the relations \begin{equation} p^{\dagger}_j = \frac{\mu^{\dagger}}{\mu} (j,1)^{\dagger} \sum_{\ell = 1}^\infty \frac{p_{\ell}}{\ell!} \sum_{k = 1}^{\ell} k \, (l-j,k-1)^{\dagger} \quad \mbox{for } j \in \mathbb{N}_{+}. \end{equation} \section{$p$-shifted permutations} \label{s5} In this section we study the $p$-shifted permutations introduced in Definition \ref{pshifted}. It is essential that $p$ be fixed and not random to make $p$-shifted permutations regenerative. The point is that if $p$ is replaced by a random $P$, the observation of $\Pi_1, \ldots, \Pi_n$ given a split at $n$ allows some inference to be made about the $P_i$, $1 \le i \le n$. But according to the definition of the $P$-shifted permutation, these same values of $P_i$ are used to create the remaining permutation of $\mathbb{N}_{+} \setminus [n]$. Consequently, the independence condition required for regeneration at $n$ will fail for any non-degenerate random $P$. Now we give a proof of Proposition \ref{pshiftedprop}. \begin{proof}[Proof of Proposition \ref{pshiftedprop}] \noindent $(i)$ This is clear from the definition of $p$-shifted permutations. \noindent $(ii)$ This is obvious from the absorption sampling: one element of $[n]$ is sampled with probability $p_1 + \cdots + p_n$, then one element of the remaining in $[n]$ is sampled with probability $p_1 + \cdots + p_{n-1}$, and so on. Alternatively, observe that $$ u_n = \sum_{\pi \in \mathfrak{S}_n} \prod_{j=1}^n p \left(\pi_j - \sum_{1 \leq i <j} 1(\pi_i < \pi_j)\right) = \sum_{\pi \in \mathfrak{S}_n} \prod_{j=1}^n p \left(j - \sum_{1 \leq i < \pi^{-1}_j} 1(\pi_i <j)\right), $$ and the conclusion follows from the well known bijection $\mathfrak{S}_n \rightarrow [1] \times [2] \ldots \times [n]$ defined by $$ \pi \mapsto \left (j - \sum_{1 \leq i < \pi^{-1}_j} 1(\pi_i <j);~1 \le j \le n \right). $$ \noindent $(iii)$-$(iv)$ The strict regeneration is clear from the definition of $p$-shifted permutations, and the generating function \eqref{updag} follows easily from the the general theory of regenerative processes \cite[Chapter XIII]{Feller}. \noindent $(v)$-$(vi)$ The particular case of these results for $p$ the geometric$(1-q)$ distribution was given by Basu and Bhatnagar \cite[Lemmas 4.1 and 4.2]{BB}. Their argument generalizes as follows. The key observation is that for $X_1, X_2, \ldots$ the i.i.d. sample from $p$ which drives the construction of the $p$-shifted permutation $\Pi$, the sequence $M_n$ defined by $M_0:= 0$ and $$ M_{n}:= \max ( M_{n-1}, X_n ) - 1, $$ has the interpretation that $$ M_n = \# \left\{ i : 1 \le i \le \max_{1 \le j \le n} \Pi_j \right\} - n, $$ which can be understood as the current number of gaps in the range of $\Pi_j, 1 \le j \le n$. The event $\{$$\Pi$ regenerates at $n$$\}$ is then identical to the event $\{M_n = 0\}$. It is easily checked that $(M_n;~ n \geq 0)$ is a Markov chain with state space $\mathbb{N}_0$, and the unique invariant measure $(\mu_i;~ i \in \mathbb{N}_0)$ for the Markov chain $(M_n;~n \geq 0)$ is given by \begin{equation} \mu_0 =1 \quad \mbox{and} \quad \mu_i = \frac{\mathbb{P}(X_1>i)}{\prod_{j=1}^{i}[1 - \mathbb{P}(X_1 > j)]} \quad \mbox{for } i \geq 1. \end{equation} Moreover, it follows by standard analysis that this sequence $\mu_j$ is summable if and only if the mean $m$ of $X_1$ is finite. The conclusion follows from the well known theory of Markov chains \cite[Chapter 6]{Durrett}, and Theorem \ref{thmmain}. \end{proof} See also Alappattu and Pitman \cite[Section 3]{AP08} for a similar argument used to derive the stationary distribution of the lengths of the loop-erasure in a loop-erased random walk. For the $p$-shifted permutation, the first splitting probabilities $f_n:= \mathbb{P}( \Pi \mbox{ first splits at } n )$ are given by the explicit formulas \begin{align*} f_1 =& p_1, \\ f_2 =& p_1 p_2 ,\\ f_3 =& p_1 p_2 ^2 + p_1^2 p_3 + p_1 p_2 p_3 , \\ f_4 =&p_1 p_2^3 + 2 p_1^2 p_2 p_3 + 2 p_1 p_2^2 p_3 + p_1^2 p_3^2 + p_1 p_2 p_3^2 \\ & + p_1^3 p_4 + 2p_1^2 p_2p_4 + p_1 p_2^2 p_4 + p_1^2 p_3 p_4 + p_1 p_2 p_3 p_4. \end{align*} It is easily seen that for each $n$, $f_n(p_1,p_2, \ldots)$ is a polynomial of degree $n$ in variables $p_1, \ldots , p_n$. The polynomial so defined makes sense even for variables $p_i$ not subject to the constraints of a probability distribution. The polynomial can be understood as an enumerator polynomial for the vector of counts $$ R_{n,j} := \pi_j - \sum_{i = 1}^j 1 (\pi_i < \pi_j) \quad \mbox{for }1 \leq j \leq n. $$ In the polynomial for $f_n$, the choice of $\pi_1, \ldots, \pi_n$ is restricted to the set $\mathfrak{S}^\dagger_n$ of indecomposable permutations of $[n]$, and the coefficient of $p_1^{r_1} \ldots p_n^{r_n}$ is for each choice of non-negative integers $r_1, \ldots, r_n$ with $\sum_{i=1}^n r_i = n$ is the number of indecomposable permutations of $[n]$ such that $\sum_{j=1}^n 1 (R_{n,j} = i ) = r_i$ for each $1 \le i \le n$. In particular, the sum of all the integer coefficients of these monomials is $$ f_n(1,1, \ldots) =(n,1)^{\dagger}, $$ which is the number of indecomposable permutations of $[n]$ discussed in Section \ref{s2}. Properties of the limiting Mallows$(q)$ permutations of $\mathbb{N}_{+}$ and of $\mathbb{Z}$ are obtained by specializing Proposition \ref{pshiftedprop} with $p$ the geometric$(1-q)$ distribution on $\mathbb{N}_{+}$. Many results of \cite{GO09,GP} acquire simpler proofs by this approach. The following corollary also exposes a number of properties of the limiting Mallows$(q)$ models which were not mentioned in previous works. \begin{corollary} For each $0<q<1$, with $\mathbb{P}_q$ governing $\Pi$ as a geometric$(1-q)$-shifted permutation of $\mathbb{N}_{+}$, the conclusions of Proposition \ref{pshiftedprop} apply with the following reductions: \begin{enumerate}[$(i)$] \item The formula \eqref{inject} reduces to \begin{equation} \label{injectq} \mathbb{P}_q( \Pi_i = \pi_i, 1 \le i \le n) = (1-q)^n \, q^{\inv(\pi) + \delta(n,\pi)} \end{equation} where $\inv(\pi)$ is the number of inversions of $\pi$, and $\delta(n,\pi):= \sum_{i=1}^n \pi_i - \frac{1}{2} n ( n +1)$. In particular, \eqref{injectq} holds with the further simplification $\delta(n,p)=0$ if and only if $\pi$ is a permutation of $[n]$. \item The probability that $\Pi$ maps $[n]$ to $[n]$ is \begin{equation} \label{unq} u_{n,q} := \mathbb{P}_q( [n] \mbox{ is a block of } \Pi ) = (1-q)^n Z_{n,q}, \end{equation} where $Z_{n,q}$ is defined by \eqref{qfac}. \item The $\mathbb{P}_q$ distribution of $\Pi$ is strictly regenerative, with regeneration at every $n$ such that $[n]$ is a block of $\mathbb{N}_{+}$, and renewal sequence $(u_{n,q};~n \geq 1)$ as above. \item The $\mathbb{P}_q$ distribution of component lengths $f_{n,q} = \mathbb{P}_q(Y_1= n)$, where $Y_1$ is the length of the first component of $\Pi$, is given by the probability generating function \begin{equation} \label{pnmallows} \sum_{n = 1}^\infty f_{n,q} z^n = 1 - \frac{1}{U_q(z)} \quad \mbox{where } U_q(z) = 1 + \sum_{n=1}^\infty u_{q,n} z^n, \end{equation} as well as by the formula \begin{equation} \label{pnmallows} f_{n,q} = (1- q)^n \, Z_{n,q}^\dagger, \end{equation} where $ Z_{n,q}^\dagger := \sum_{\pi \in \mathfrak{S}^{+}_n } q^{\inv(\pi)} $ is the restricted partition function of the Mallows$(q)$ distribution $M_{n,q}$ on the set $\mathfrak{S}_n^{\dagger} $ of indecomposable permutations of $[n]$. \item Under $\mathbb{P}_q$, conditionally given the component lengths, say $Y_i = n_i$ for $i = 1,2,\ldots$, the reduced components of $\Pi$ are independent random permutations of $[n_i]$ with conditional Mallows$(q)$ distributions $M_{n_i,q}^{\dagger}$ defined by \begin{equation} M_{n_i,q}^{\dagger}(\pi):= \frac{1}{Z_{n_i,q}^\dagger}\, q^{\inv(\pi)} \quad \mbox{for } \pi \in \mathfrak{S}^{+}_{n_i}. \end{equation} \end{enumerate} \end{corollary} \section{$p$-biased permutations} \label{s6} This section provides a detailed study of $p$-biased permutations introduced in Definition \ref{pbiased}. For a $P$-biased permutation $\Pi$ of $\mathbb{N}_{+}$ with $P = (P_1, P_2, \ldots)$ a random discrete distribution, the joint distribution of $(\Pi_1, \ldots \Pi_n)$ is computed by the formula \eqref{genbias}. In particular, the distribution of $\Pi_1$ is given by the vector of means $(\mathbb{E}(P_1), \mathbb{E}(P_2), \ldots )$. So if $P$ is the GEM$(\theta)$ distribution, then $\Pi_1$ has the geometric$(\theta/(1+\theta))$ distribution on $\mathbb{N}_{+}$. The index $\Pi_1$ of a single size-biased pick from $(P_1,P_2, \ldots)$, and especially the random size $P_{\Pi_1}$ of this pick from $(P_1,P_2, \ldots)$ plays an important role in the theory of random discrete distributions and associated random partitions of positive integers \cite{Pitmanbook}. Features of the joint distribution of $(P_{\Pi_1}, \ldots, P_{\Pi_n})$ also play an important role in this setting \cite{Pitman95}, but we are unaware of any previous study of $(\Pi_1, \Pi_2, \ldots )$ regarded as a random permutation of $\mathbb{N}_{+}$. We start with the following construction of size-biased permutations from Perman, Pitman and Yor \cite[Lemma 4.4]{PPY}. See also Gordon \cite{Gordon} where this construction is indicated in the abstract, and Pitman and Tran \cite{Ptran} for further references to size-biased permutations. \begin{lemma} \cite{Gordon, PPY} \label{lemPPY} Let $(L_i;~ 1 \leq i \leq n)$ be a possibly random sequence such that $\sum_{i=1}^n L_i = 1$, and $(\varepsilon_i;~ 1 \leq i \leq n)$ be i.i.d. standard exponential variables, independent of the $L_i$'s. Define $$Y_i : = \frac{\varepsilon_i}{L_i} \quad \mbox{for } 1 \leq i \leq n.$$ Let $Y_{(1)} < \cdots <Y_{(n)}$ be the order statistics of the $Y_i$'s, and $L^*_1, \cdots ,L^*_n$ be the corresponding $L$ values. Then $(L^*_i;~1 \leq i \leq n)$ is a size-biased permutation of $(L_i;~ 1 \leq i \leq n)$. \end{lemma} By applying the formula \eqref{ie} and Lemma \ref{lemPPY}, we evaluate the splitting probabilities for $P$-biased permutation with $P$ a random discrete distribution. \begin{proposition} \label{bon} Let $\Pi$ be a $P$-biased permutation of $\mathbb{N}_{+}$ of a random discrete distribution $P = (P_1, P_2, \ldots)$, and $T_n: = 1 -\sum_{i=1}^n P_i$. Then the probability that $\Pi$ maps $[n]$ to $[n]$ is given by \eqref{ie} with \begin{equation} \Sigma_{n,j}= \sum_{1 \le i_1 < \cdots < i_j \le n } \mathbb{E} \left( \frac{T_n} {T_n + P_{i_1} + \cdots + P_{i_j} } \right). \end{equation} \end{proposition} \begin{proof} Recall the definition of $A_{n,i}$ from \eqref{Ani}. By Lemma \ref{lemPPY}, for $1\leq i_1< \cdots < i_j \leq n$, \begin{align*} \mathbb{P}\left( \bigcap_{k =1}^j A_{n,i_k} \right) & =\mathbb{P} \left( \min_{1 \leq k \leq j} \left(\frac{\varepsilon_{i_k}}{P_{i_k}}\right) > \frac{\varepsilon}{T_n} \right) \\ & = \mathbb{E} \exp\left(-\frac{\varepsilon( P_{i_1} + \cdots + P_{i_j})}{T_n}\right) \\ & = \mathbb{E} \left( \frac{T_n} {T_n + P_{i_1} + \cdots + P_{i_j} } \right), \end{align*} which leads to the desired result. \end{proof} In terms of the occupancy scheme by throwing balls independently into an infinite array of boxes indexed by $\mathbb{N}_{+}$ with random frequencies $P = (P_1, P_2,\ldots)$, the quantity $\Sigma_{n,j}$ has the following interpretation. Let $C_n$ be the count of empty boxes when the first box in $ \{n+1, n+2, \cdots\}$ is filled. Then \begin{equation} \Sigma_{n,j} = \mathbb{E} \binom{C_n}{j}. \end{equation} Further analysis of $C_n$ and $\Sigma_{n,j}$ for the GEM$(\theta)$ model will be presented in the forthcoming article \cite{DPT}. Contrary to $p$-shifted permutations, we consider $P$-biased permutations where $P$ is determined by a RAM \eqref{ram}. In the latter case, the only model with $P$ fixed is the geometric$(1-q)$-biased permutation for $0 < q < 1$. Now we give a proof of Proposition \ref{pbiasedprop}. \begin{proof}[Proof of Proposition \ref{pbiasedprop}] \noindent $(i)$ The strict regeneration follows easily from the stick breaking property of RAM models. By Lemma \ref{lemPPY}, the renewal probabilities $u_n$ are given by $$u_n = \mathbb{P}\left(\max_{1 \leq i \leq n} \frac{\varepsilon_i}{P_i} < \frac{\varepsilon}{T_n}\right),$$ where $P_i = W_i \prod_{j=1}^{i-1}(1-W_j)$, $T_n = \prod_{j=1}^n (1-W_j)$, and the $\varepsilon_i$'s and $\varepsilon$ are independent standard exponential variables. Note that for each $x>0$, $$ \mathbb{P}\left(\max_{1 \leq i \leq n} \frac{\varepsilon_i}{P_i} < \frac{x}{T_n}\right) = \mathbb{P} \left(\bigcap_{i=1}^n \left\{\varepsilon_i < \frac{x P_i}{T_n}\right\} \right) = \mathbb{E} \prod_{i=1}^n \left(1 - e^{-\frac{xP_i}{T_n}} \right), $$ which, by conditioning on $\varepsilon = x$, leads to \begin{equation} \label{interm} u_n = \int_0^{\infty} e^{-x} \mathbb{E} \prod_{i=1}^n \left(1 - e^{-x P_i / T_n}\right) dx. \end{equation} Since $(W_1, \ldots, W_n) \stackrel{(d)}{=} (W_n, \ldots, W_1)$ for every $n \ge 1$, the formula \eqref{interm} simplifies to \eqref{PTa}. So to prove $u_{\infty} > 0$, it suffices to prove \eqref{gencri}. \noindent $(ii)$ This is the deterministic case where $P_i = q^{i-1}(1-q)$ and $T_n = q^n$. So the formula \eqref{PTa} specializes to $$u_n = \int_0^{\infty} e^{-x} \prod_{i=1}^n \left(1 - e^{-x(1-q)/q^i}\right) dx.$$ It follows by standard analysis that $u_{\infty}: = \lim_{n \rightarrow \infty} u_n > 0$ if and only if $$\sum_{i = 1}^{\infty} e^{-x(1-q)/q^i} < \infty.$$ But this is obvious for $0 < q < 1$, which implies that $\Pi$ is positive recurrent. \noindent $(iii)$ This case corresponds to $P_i = W_i \prod_{j=1}^{i-1}(1-W_j)$ and $T_n = \prod_{j=1}^n (1-W_j)$, where $W_i$ are i.i.d. beta$(1,\theta)$ variables. Note that for each $i$, $W_i$ is independent of $T_{i-1}$. By conditioning on $T_{i-1}$, we get \begin{align*} \sum_{i = 2}^{\infty} \mathbb{E} \exp\left(-\frac{x W_i}{T_i} \right) & = \int_0^1 \mathbb{E}\exp \left(-\frac{xw}{T_{i-1}(1-w)} \right) \cdot \theta(1-w)^{\theta-1} dw \\ & = \int_0^1 \int_0^1 \exp \left(-\frac{xw}{t (1-w)} \right) \cdot \frac{\theta}{u} \cdot \theta(1-w)^{\theta-1} dt dw, \end{align*} where the second equality follows from Ignatov's description \cite{Ignatov} of GEM$(\theta)$ variables as a Poisson point process on $(0,1)$ with intensity $\theta (1-u)^{-1} du$. Note that $$\int_0^1 \int_0^1 \exp \left(-\frac{xw}{t (1-w)} \right) u^{-1} (1-w)^{\theta-1} dt dw = \int_0^1 E_1\left(\frac{xw}{1-w}\right) (1-w)^{\theta-1}dw,$$ where $E_1(x): = \int_x^{\infty} u^{-1} e^{-u} du$ with $E_1(x) \sim -\log(x)$ as $x \rightarrow 0^+$. It follows by elementary estimates that the above integral is finite, which leads to the desired result. \end{proof} Let $\Pi$ be the $P$-biased permutation of $\mathbb{N}_{+}$ for $P$ the geometric$(1-q)$ distribution, with the renewal sequence $(u_{n,q};~n \ge 1)$. Let $C_{n,1,q}$ be the number of fixed points of $\Pi$ contained in $[n]$, and $\nu_{1,q}$ be the expected number of fixed points of $\Pi$ in a generic component. According to Proposition \ref{cormain}, $$\lim_{n \rightarrow \infty} \frac{C_{n,1,q}}{n} = \nu_{1,q} u_{\infty,q} \quad a.s.,$$ where $u_{\infty,q}: = \lim_{n \rightarrow \infty} u_{n,q}$. Note that with probability $1-q$, a generic component has only one element. This implies that $\nu_{1,q} \ge 1-q$. It follows from Proposition \ref{bon} that $\lim_{q \downarrow 0} u_{\infty,q} = 1$. As a result, \begin{equation} \label{fix1} \lim_{n \rightarrow \infty} \frac{C_{n,1,q}}{n} = \alpha(q) \quad a.s. \quad \mbox{with } \lim_{q \downarrow 0} \alpha(q) = 1. \end{equation} Similarly, by letting $\Pi$ be the $P$-biased permutation of $\mathbb{N}_{+}$ for $P$ the GEM$(\theta)$ distribution, and $C_{n,1,\theta}$ be the number of fixed points of $\Pi$ contained in $[n]$, \begin{equation} \label{fix2} \lim_{n \rightarrow \infty} \frac{C_{n,1,\theta}}{n} = \beta(\theta) \quad a.s. \quad \mbox{with } \lim_{\theta \downarrow 0} \beta(\theta) = 1. \end{equation} \section{Regenerative $P$-biased permutations} \label{s7} This section provides further analysis of $P$-biased permutations of $\mathbb{N}_{+}$, especially for $P$ the GEM$(1)$ distribution, with the $W_i$'s i.i.d. uniform on $(0,1)$. While the formulas provided by \eqref{ie}, \eqref{PTa}, or by summing the r.h.s. of \eqref{genbias} over all permutations $\pi \in \mathfrak{S}_k$ for the renewal probabilities $u_k$ and their limit $u_{\infty}$ are quite explicit, it is not easy to evaluate these integrals and their limit directly. For instance, even in the simplest case where the $W_i$'s are uniform on $(0,1)$, explicit evaluation of $u_k$ for $k \ge 2$ involves the values of $\zeta(j)$ of the Riemann zeta function at $j = 2, \ldots, k$, as indicated later in Proposition \ref{conju}. We start with an exact simulation of the $P$-biased permutation for any $P = (P_1,P_2, \ldots)$ with $P_i>0$ for all $i\ge 1$ involving the following construction of a process $(\mathcal{W}_k;~ k \ge 1)$ with state space the set of finite unions of open subintervals of $(0,1)$, from $P$ and a collection of i.i.d. uniform variables $U_1, U_2 ,\ldots$ on $(0,1)$ independent of $P$. \begin{itemize} \item Construct $F_j = P_1 + \cdots + P_j$ until the least $j$ such that $F_j > U_1$. Then set $$\mathcal{W}_1 = \left( \bigcup_{i = 1}^{j-1} (F_{i-1},F_i) \right) \cup ( F_j, 1),$$ with convention $F_0: = 0$. \item Assume that $\mathcal{W}_{k-1}$ has been constructed for some $k \ge 2$ as a finite union of open intervals with the rightmost interval $(F_j,1)$ for some $j \ge 1$. If $U_k$ lands in one of the intervals of $\mathcal{W}_{k-1}$ that is not the rightmost interval, then remove that interval from $\mathcal{W}_{k-1}$ to create $\mathcal{W}_k$. If $U_k$ hits the rightmost interval $(F_j,1)$, then construct $F_\ell = F_j + P_{j+1} + \cdots + P_\ell$ for $\ell > j$ until the least $\ell$ such that $F_\ell > U_k$. Set $$\mathcal{W}_k = (\mathcal{W}_{k-1} \cap (0,F_j)) \cup \left( \bigcup_{i = j+1}^{\ell -1} (F_{i-1}, F_i) \right) \cup (F_{\ell},1).$$ \end{itemize} It is not hard to see that a $P$-biased permutation $\Pi$ of $\mathbb{N}_{+}$ can be recovered from the process $(\mathcal{W}_k;~ k \ge 1)$ driven by $P$, with $\Pi_k$ a function of $\mathcal{W}_1, \ldots, \mathcal{W}_k$. In particular, the length $Y_1$ of the first component of $\Pi$ is $$ Y_1 = \min \{k \ge 1: \mathcal{W}_k \mbox{ is composed of a single interval } (F_\ell,1) \mbox{ for some }l\}. $$ In the sequel, the notation $\stackrel{\theta}{=}$ or $\stackrel{\theta}{\approx}$ indicates exact or approximate evaluations for the GEM$(\theta)$ model; that is the residual factors $W_i$ are i.i.d. beta$(1,\theta)$ distributed. By a simulation of the process $(\mathcal{W}_k;~ k \ge 1)$ for GEM$(1)$, we get some surprising results: \begin{equation} \label{simulation} \mathbb{E} Y_1 \stackrel{1}{\approx} 3 \quad \mbox{and} \quad Var(Y_1) \stackrel{1}{\approx} 11, \end{equation} which suggests that \begin{equation} \label{ukone} u_{\infty} = 1 / \mathbb{E}Y_1 \stackrel{1}{=} 1/3. \end{equation} These simulation results \eqref{simulation} are explained by the following lemma, which provides an alternative approach to the evaluation of $u_k$ derived from a RAM. This lemma is suggested by work of Gnedin and coauthors on the Bernoulli sieve \cite{Gnedinsieve,GIM,Gsmall}, and following work on extremes and gaps in sampling from a RAM by Pitman and Yakubovich \cite{PY17, P17}. \begin{lemma} Let $X_1, X_2, \ldots$ be a sample from the RAM \eqref{ram} with i.i.d. stick-breaking factors $W_i \stackrel{(d)}{=} W$ for some distribution of $W$ on $(0,1)$. For positive integers $n$ and $k = 0,1, \ldots$ let \begin{equation} Q_n^*(k):= \sum_{i=1}^n 1 (X_i > k ) \end{equation} represent the number of the first $n$ balls which land outside the first $k$ boxes. For $m = 1,2, \ldots$ let $n(k,m):= \min \{n : Q_n^*(k) = m \}$ be the first time $n$ that there are $m$ balls outside the first $k$ boxes. Then: \begin{itemize} \item For each $k$ and $m$ there is the equality of joint distributions \begin{equation} \left( Q_{n(k,m)} ^* (k-j), 0 \le j \le k \right) \stackrel{(d)}{=} ( \widehat{Q}_j, 0 \le j \le k \,|\, \widehat{Q}_0 = m ) \end{equation} where $(\widehat{Q}_0, \widehat{Q}_1, \ldots)$ with $1 \le \widehat{Q}_0 \le \widehat{Q}_1 \cdots$ is a Markov chain with state space $\mathbb{N}_{+}$ and stationary transition probability function \begin{equation} \label{hatqdef} \widehat{q}(m,n) := {n - 1\choose m - 1} \mathbb{E} W^{n-m} (1-W)^m \quad \mbox{for}~m \le n. \end{equation} So $\widehat{q}(m, \bullet)$ is the mixture of Pascal $(m,1-W)$ distributions, and the distribution of the $\widehat{Q}$ increment from state $m$ is mixed negative binomial $(m, 1-W)$. \item For each $k \ge 1$ the renewal probability $u_k$ for the $P$-biased permutation of $\mathbb{N}_{+}$ for $P$ a RAM is the probability that the Markov chain $\widehat{Q}$ started in state $1$ is strictly increasing for its first $k$ steps: \begin{equation} \label{ukform} u_k = \mathbb{P}( \widehat{Q}_0 < \widehat{Q}_1 < \cdots < \widehat{Q}_k \,|\, \widehat{Q}_0 = 1 ). \end{equation} \item The sequence $u_k$ is strictly decreasing, with limit $u_\infty \ge 0$ which is the probability that the Markov chain $\widehat{Q}$ started in state $1$ is strictly increasing forever: \begin{equation} \label{uinfform} u_\infty = \mathbb{P}( \widehat{Q}_0 < \widehat{Q}_1 < \cdots \,|\, \widehat{Q}_0 = 1 ). \end{equation} \end{itemize} \end{lemma} \begin{proof} For $0 < v < 1$ and $U_1, U_2, \ldots$ a sequence of i.i.d. uniform $[0,1]$ variables, let $$ N_n(v,1):= \sum_{i = 1}^n 1 ( v < U_i < 1) $$ be the number of the first $n$ values that fall in $(v,1)$, and let $$g(v,m):= \min \{n \ge 1 : N_n(v,1) = m \}$$ be the random time when $N_n(v,1)$ first reaches $m$. So $g(v,m)$ has the Pascal$(m, 1-v)$ distribution of the sum of $m$ independent random variables with geometric $(1-v)$ distribution on $\mathbb{N}_{+}$. Then there is the well known identity in distribution of Pascal counting processes \cite{F79,BR91} \begin{equation} \label{pascal} \left( N_{g(v,m)} (u,1 ) , 0 \le u \le v \right) \stackrel{(d)}{=} \left( Y_m \left( \log \left( \frac{ 1- v}{1-u} \right) \right), 0 \le u \le v \right), \end{equation} where $(Y_m(t), t \ge 0)$ is a {\em standard Yule process}; that is the pure birth process on positive integers with birth rate $k$ in state $k$, with initial state $Y_m(0) = m$. Let the sample $X_1, X_2, \ldots$ from the RAM be constructed as $X_i = j$ iff $U_i \in (F_{j-1},F_j]$ where $F_j:= 1 - \prod_{i=1}^j(1-W_i)$ for a sequence of stick-breaking factors $(W_i;~i \ge 1)$ independent of the uniform sample points $(U_i;~i \ge 1)$. Then by construction $Q^{*}_{n(k,m)} (i) = N_{g(F_k,m)} (F_i,1 )$ for each $0 \le i \le k$. The identity in distribution \eqref{pascal} yields \begin{equation} \label{pascalq} \left( Q^{*}_{n(k,m)} (i) , 0 \le i \le k \right) \stackrel{(d)}{=} \left( Y_m \left( \log \left( \frac{ 1- F_k}{1-F_i} \right) \right), 0 \le i \le k \right), \end{equation} first conditionally on $F_1, \ldots, F_k$, then also unconditionally, where on the right side it is assumed that the Yule process $Y_m$ is independent of $F_1, \ldots, F_k$. By a reversal of indexing, and the equality in distribution $(W_k, \ldots, W_1) \stackrel{(d)}{=} (W_1, \ldots, W_k)$, this gives \begin{equation} \label{pascalq} \left( Q^{*}_{n(k,m)} (k - j) , 0 \le j \le k \right) \stackrel{(d)}{=} \left( Y_m ( \tau_j ), 0 \le j \le k \right), \end{equation} where $\tau_j := \sum_{i=1}^j - \log(1-W_i)$, and the $W_i$ are independent of the Yule process $Y_m$. It is easily shown that the process on the right side of \eqref{pascalq} is a Markov chain with stationary transition function $\widehat{q}$ as in \eqref{hatqdef}. This gives the first part of the lemma, and the remaining parts follow easily. \end{proof} For $P$ the GEM$(\theta)$ distribution, the transition probability function $\widehat{q}$ of the $\widehat{Q}$ chain simplifies to \begin{equation} \label{gemhatq} \widehat{q}(m,n) \stackrel{\theta}{=} \frac{ (m)_{n-m} (\theta)_m }{ (1 + \theta)_{n} } \stackrel{1}{=} \frac{ m }{ n ( n+1) } \quad \mbox{for}~m \le n, \end{equation} where $$(x)_j:= x ( x+1) \cdots (x + j-1) = \frac{\Gamma(x+j)}{\Gamma(x)}. $$ The Markov chain $\widehat{Q}$ with the transition probability function \eqref{gemhatq} for $\theta = 1$ was first encountered by Erd\"os, R\'enyi and Sz\"usz in their study \cite{ERS} of {\em Engel's series} derived from $U$ with uniform $(0,1)$ distribution, that is $$ U = \frac{1}{q_1} + \frac{1}{q_1 q_2 } + \cdots + \frac{1 } { q_1 q_2 \cdots q_n} + \cdots , $$ for a sequence of random positive integers $q_j \ge 2$. They showed that $$ (q_{k+1} - 1, k \ge 0) \stackrel{(d)}{=} (\widehat{Q}_k, k \ge 0 ), $$ for $\widehat{Q}$ with transition matrix $\widehat{q}$ as in \eqref{gemhatq} for $\theta = 1$, and initial distribution \begin{equation} \label{engelinit} \mathbb{P}(\widehat{Q}_0 = m ) = \frac{ 1}{ m (m+1) } \quad \mbox{for}~ m \ge 1. \end{equation} R\'enyi \cite[Theorem 1]{Renyi} showed that for this Markov chain derived from Engel's series, the occupation times \begin{equation} \label{occdef} G_j:= \sum_{k=0}^\infty 1( \widehat{Q}_k = j) \quad \mbox{for}~ j \ge 1, \end{equation} are independent random variables with geometric$(j/(j+1))$ distributions on $\mathbb{N}_0$. R\'enyi deduced that with probability one the chain $\widehat{Q}$ is eventually strictly increasing, and \cite[(4.5)]{Renyi} that for the initial distribution \eqref{engelinit} of $\widehat{Q}_0$ \begin{equation} \label{qone} \mathbb{P}( \widehat{Q}_0 < \widehat{Q}_1 < \cdots) \stackrel{1}{=} \prod_{j=1}^\infty \mathbb{P}(G_j \le 1) \stackrel{1}{=} \prod_{j=1}^\infty \frac{ j(j+2) }{(j+1)^2} = \frac{1}{2}, \end{equation} by telescopic cancellation of the infinite product. A slight variation of R\'enyi's calculation gives for each possible initial state $m$ of the chain \begin{equation} \label{qonem} \mathbb{P}(\widehat{ Q}_0 < \widehat{Q}_1 < \cdots \,|\, \widehat{Q}_0 = m ) \stackrel{1}{=} \frac{ m } {m+1 } \prod_{j={m+1}}^\infty \frac{ j (j+2)}{(j+1)^2} = \frac{m}{m + 2}. \end{equation} The instance $m=1$ of this formula, combined with \eqref{uinfform}, proves the formula \eqref{ukone} for $u_\infty$ for the GEM$(1)$ model. A straightforward variation of these calculations gives the corresponding result for $P$ the GEM$(\theta)$ distribution: \begin{equation} \label{uktheta} u_\infty \stackrel{\theta}{=} \frac{1}{1+\theta} \prod_{j =2}^{\infty} \frac{j(j+2 \theta)}{(j+\theta)^2}= \frac{\Gamma(\theta+2)\Gamma(\theta+1)}{\Gamma(2 \theta+2)}. \end{equation} A key ingredient in this evaluation is the fact that in the GEM$(\theta)$ model the random occupation times $G_j$ of $\widehat{Q}$ are independent geometric variables, see \cite{PY17,P17}. For a more general RAM, the $G_j$'s may not be independent, and they may not be exactly geometric, only conditionally so given $G_j \ge 1$. The Yule representation \eqref{pascalq} of $\widehat{Q}$ given $\widehat{Q}(0) = 1$ as $\widehat{Q}(j) = Y_1(\tau_j)$ combined with Kendall's representation \cite[Theorem 1]{Kendall66} of $Y_1(t) = 1+ N( \varepsilon (e^t - 1) )$ for $N$ a rate 1 Poisson process and $\varepsilon$ standard exponential independent of $N$, only reduces the expression \eqref{uinfform} for $u_\infty$ back to the limit form as $n \to \infty$ of the previous expression \eqref{PTa}. So $u_\infty$ for a RAM is always an integral over $x$ of the expected value of an infinite product of random variables. See also \cite{Ik1,Ik2,PY17} for treatment of closely related problems. Now we give a proof of Proposition \ref{simplecond}. \begin{proof}[Proof of Proposition \ref{simplecond}] The result of \cite[Theorem 3.3]{Gsmall} shows that under the assumptions of the proposition, if $L_n$ is the number of empty boxes to the left of the rightmost box when $n$ balls are thrown, then $$ L_n \stackrel{(d)}{\longrightarrow} L_{\infty}:= \sum_{j=1}^\infty (G_j - 1)_{+} \quad \mbox{ as } n \to \infty, $$ where the right side is defined by the occupation counts \eqref{occdef} of the Markov chain $\widehat{Q}$ for the special entrance law \begin{equation} \label{entrancelaw} \mathbb{P}( \widehat{Q}_0 = m ) = \frac{ \mathbb{E} W^m }{ m \, \mathbb{E} \left[ - \log(1 - W) \right]} \quad \mbox{for} ~ m \ge 1, \end{equation} which is the limit distribution of $Z_n$, the number of balls in the rightmost occupied box, as $n \to \infty$, and that also $$ \mathbb{E} L_n \to \mathbb{E} L_\infty = \frac{ \mathbb{E} \left[ - \log W \right] }{ \mathbb{E} \left[- \log (1-W)\right] }, $$ which is finite by assumption. It follows that $\mathbb{P}(L_\infty < \infty ) = 1$, hence also that $\mathbb{P}_m(L_\infty < \infty ) = 1$ for every $m$, where $\mathbb{P}_m(\bullet):= \mathbb{P}(\bullet \,|\, \widehat{Q}_0 = m)$. Let $R:= \max \{j : G_j > 1 \}$ be the index of the last repeated value of the Markov chain. From $\mathbb{P}_1(L_\infty < \infty) = 1$ it follows that $\mathbb{P}_1(R < \infty) = 1$, hence that $\mathbb{P}_1 (R = r ) > 0$ for some positive integer $r$. But for $r = 2, 3, \ldots$, a last exit decomposition gives $$ \mathbb{P}_1(R = r) = \left( \sum_{k=1}^\infty \mathbb{P}_1( \widehat{Q}_{k-1} = \widehat{Q}_k = r ) \right) \mathbb{P}_r ( L_\infty = 0 ), $$ where both factors on the right side must be strictly positive to make $\mathbb{P}_1(R=r) >0$. Combined with a similar argument if $\mathbb{P}_1(R = 1) >0$, this implies $\mathbb{P}_r ( L_\infty = 0 ) >0$ for some $r \ge 1$, hence also $$ u_\infty = \mathbb{P}_1( L_\infty = 0 ) \ge \mathbb{P}_1( G_i \le 1 \mbox{ for } 0 \le i <r, G_r = 1) \mathbb{P}_r (L_\infty = 0) >0, $$ which is the desired conclusion. \end{proof} To conclude, we present explicit formulas for $u_k$ of a GEM$(1)$-biased permutation of $\mathbb{N}_{+}$. The proof is deferred to the forthcoming article \cite{DPT}. \begin{proposition} \cite{DPT} \label{conju} Let $\Pi$ be a GEM$(1)$-biased permutation of $\mathbb{N}_{+}$, with the renewal sequence $(u_k;~ k \geq 0)$. Then $(u_k;~k \ge 0)$ is characterized by any one of the following equivalent conditions: \begin{enumerate}[$(i).$] \item The sequence $(u_k;~ k \ge 0)$ is defined recursively by \begin{equation} \label{rec} 2 u_{k} + 3 u_{k-1} + u_{k-2} \stackrel{1}{=} 2 \zeta(k) \quad \mbox{with } u_0 = 1, \, u_1 = 1/2, \end{equation} where $\zeta(k): = \sum_{n = 1}^{\infty} 1/n^k$ is the Riemann zeta function. \item For all $k \ge 0$, \begin{equation} \label{rzs} u_{k} \stackrel{1}{=} (-1)^{k-1} \left(2 - \frac{3}{2^k} \right) + \sum_{j=2}^{k} (-1)^{k-j} \left(2 -\frac{1}{2^{k-j}} \right) \zeta(j). \end{equation} \item For all $k \ge 0$, \begin{equation} \label{positive} u_{k} \stackrel{1}{=} \sum_{j = 1}^{\infty} \frac{2}{j^k(j+1)(j+2)}. \end{equation} \item The generating function of $(u_{k};~ k \ge 0)$ is \begin{equation} \label{Uz} U(z) : = \sum_{k=0}^{\infty}u_k z^k \stackrel{1}{=} \frac{2}{(1+z)(2+z)} \Bigg[ 1 + \Bigg(2 - \gamma - \Psi(1-z)\Bigg) z\Bigg], \end{equation} where $\gamma: = \lim_{n \rightarrow \infty} (\sum_{k=1}^n 1/k - \ln n) \approx 0.577$ is the {\em Euler constant}, and $\Psi(z): = \Gamma'(z)/\Gamma(z)$ with $\Gamma(z): = \int_0^\infty t^{z-1} e^{-t} dt$, is the {\em digamma function}. \end{enumerate} \end{proposition} The distribution of $Y_1$, that is $f_{k}: = \mathbb{P}(Y_1 = k)$ for all $k \ge 1$, is determined by $(u_k;~ k \ge 0)$ or $U(z)$ via the relations \eqref{ufrecursion}-\eqref{UFrelation}. It is easy to see that the generating function $F(z)$ of $(f_k;~ k \ge 1)$ is real analytic on $(0,z_0)$ with $z_0 \approx 1.29$. This implies that all moments of $Y_1$ are finite. By expanding $F(z)$ into power series at $z=1$, we get: \begin{equation} \label{powerseries} F(z) \stackrel{1}{=} 1 + 3(z-1) + \frac{17}{2} (z-1)^2 + \frac{1}{2}(47 + \pi^2) (z-1)^3 + \cdots, \end{equation} which agrees with the simulation \eqref{simulation}, since $\mathbb{E}Y_1 = F'(1) \stackrel{1}{=} 3$ and $Var(Y_1) = 2 F''(1) + F'(1) - F'(1)^2 \stackrel{1}{=}11$. \end{document}
\begin{document} \begin{center} \LARGE\textbf{Multiplier Theorem for Fourier Series in continuous-discrete Sobolev orthogonal polynomials.} \large \textbf{(B.P.Osilenker, Moscow, b\[email protected])} \end{center} \setlength{\leftskip}{5em} \setlength{\rightskip}{5em} \textbf{Abstract.} In paper we study the multipliers of Fourier series in polynomials orthogonal in continuous-discrete Sobolev’s spaces. Multiplier Theorem for Fourier-Sobolev series is obtained.This result based on the representation of the Fejér kernel, on the construction of the “humpbacked majorant” and weighted estimates of maximal functions. \textbf{Key words.} Orthogonal polynomials, Fourier series, multipliers, partial sums, Fejér’s averages, Dirichlet’s kernel, Fejér kernel, Sobolev’s polynomials, continuous-discrete spaces, Lebesgue’s points, continuously-discrete Sobolev spaces, multipliers of convergence \setlength{\leftskip}{0em} \setlength{\rightskip}{0em} Let $\theta(x)$ be a positive Borel measure in $[-1, 1]$, with infinitely many points of increasing and let the masspoints $a_k$, $-1\leq a_k \leq 1$, $k=1,2,\ldots, m$. For $f$ and $g$ in $L^2_{d\theta}[-1, 1]$ such that there exist the derivatives in $a_k$, one can introduce the inner product \begin{equation} \left<f, g\right> = \int^1_{-1} f(x)g(x)d\theta(x) + \sum^m_{k=1}\sum^{N_k}_{i=0} M_{k, i} f^{(i)}(a_k)g^{(i)}(a_k), \nonumber \end{equation} where $M_{k, i}\geq 0\ (i=0,1,2,\ldots,N_k-1);\ M_{k, N_k}>0, k=1,2,\ldots,m)$ and $\theta(\{a_k\})=0\ (k=1,2,\ldots,m)$. Linear spaces with this inner product is called «continuous-discrete Sobolev spaces». Let $\{\widehat{q}_n(x),\ n\in\mathbb{Z}_+,\ \mathbb{Z}_+=\{0,1,2,\ldots\};\ x\in[-1, 1]\}$ be the sequence of polynomials of degree $n$ with a positive leading coefficients orthonormal with respect to the inner product(continuous-discrete Sobolev orthonormal polynomials) \begin{equation} \left<\widehat{q}_n, \widehat{q}_m\right> = \int^1_{-1} \widehat{q}_n(x)\widehat{q}_m(x)d\theta(x) + \sum^m_{k=1}\sum^{N_k}_{i=0} M_{k, i} \widehat{q}_n^{(i)}(a_k)\widehat{q}_m^{(i)}(a_k) = \delta_{n,m}. \nonumber \end{equation} Denote by $\mathfrak{R}_p(1\leq p <\infty)$ the set of functions \begin{equation} \mathfrak{R}_p = \begin{Bmatrix} f,\ \int_{-1}^1|f(x)|^pd\theta(x)<\infty;\ f^{(i)}(a_k)\text{ - exist}\\ i = 0,1,2,\ldots,N_k;\ -1\leq a_k\leq 1(k=1,2,\ldots,m) \end{Bmatrix}. \nonumber \end{equation} To each $f\in\mathfrak{R}_p$ we assign Fourier-Sobolev series \begin{equation} f(x) \sim \sum^\infty_{k=0}c_k(f)\widehat{q}_k(x)\ (x\in[-1, 1]), \nonumber \end{equation} with Fourier coefficients \begin{equation} c_k(f) = \left<f, \widehat{q}_k\right> = \int^1_{-1}f(x)\widehat{q}_k(x)d\theta(x) +\sum^m_{s=1}\sum^{N_s}_{i=0} M_{s,i}f^{(i)}(a_s)\widehat{q}^{(i)}_k(a_s). \nonumber \end{equation} We consider the following sequence of the real numbers \begin{equation} \Phi = \{\phi_k,\ k=0,1,2,\ldots;\ \phi_0=1;\ \{\phi_k\}^\infty_{k=0}\in 1^\infty\}. \nonumber \end{equation} For any function $f\in\mathfrak{R}_p$ by their Fourier-Sobolev series we introduce the linear transformation $T$ defined by relation \begin{equation} \label{eq1} T(f;x;\Phi)\sim\sum^\infty_{k=0}\phi_k c_k(f)\widehat{q}_k(x). \end{equation} Transformation $T$ is called the multiplier operator, the sequence $\{\phi_k\}^\infty_{k=0}$ is called the multiplier of convergence6 and series \eqref{eq1} is called the multiplier of convergence6 and series \eqref{eq1} is called the multiplier series. We investigate some problems of pointwise and uniform multipliers of convergence for Fourier-Sobolev series. Multiplier Theorem for the Fourier-Sobolev series is obtained. There are many papers have been devoted to continuous-discrete Sobolev orthonormal polynomials and Fourier series (see, for example [1] –[27]). Some results about multipliers of the Fourier series in polynomials orthonormal in continuous-discrete Sobolev spaces were announced in [20]. Let $N^*_k$ be-the-positive integer number defined by \begin{equation} N^*_k = \begin{cases} N_k + 1,\text{ if }N_k\text{ is odd,}\\ N_k + 2,\text{ if }N_k\text{ is even,} \end{cases}\nonumber \end{equation} \begin{equation} w_N(x) = \Pi^m_{k=1} (x-a_k)^{N^*_k},\ N=\sum^m_{k=1}N^*_k,\ \pi_{N+1}(x)=\int^x_{-1} w_N(t)dt. \nonumber \end{equation} Orthonormal polynomials $\widehat{q}_n(x)$ satisfy the following recurrence relation \begin{equation} \pi_{N+1}\widehat{q}_n(x) = \sum^{N+1}_{j=0} d_{n+j,j}\widehat{q}_{n+j}(x) + \sum^{N+1}_{j=1} d_{n,j}\widehat{q}_{n-j}(x)(n\in\mathbb{Z}_+;\widehat{q}_{-j}=0,\ j=1,2,\ldots;\ d_{n,s}=0,\ n=0,1,\ldots,s-1). \nonumber \end{equation} Define by \begin{equation} \varepsilon_m = (-1, 1)\ \cup^m_{s=1}\{a_s\}. \nonumber \end{equation} The sequence $\Phi = \{\phi_n,\ n=0,1,2,\ldots;\ \phi_0=1\}$ is called quasiconvex if \begin{equation} \sum^\infty_{k=0} (k+1)|\Delta^2\phi_k|<\infty,\nonumber \end{equation} where $\Delta\phi_k = \phi_k-\phi_{k+1}$, $\Delta^2\phi_k = \Delta(\Delta\phi) = \phi_k - 2\phi_{k+1} + \phi_{k+2}(k=0,1,\ldots,n)$. \textbf{Theorem 1.} Let the orthonormal polynomial system $\{\widehat{q}_k(x)\}^\infty_{k=0}$ be satisfy the following condition \begin{equation}\label{eq2} |\widehat{q}_k(t)|\leq h(t)(t\in\varepsilon_m) \end{equation} and for the recurrence coefficients the estimate \begin{equation}\label{eq3} \sum^{N+1}_{j=1}j\sum^{N+1}_{l=0}\sum^\infty_{s=0}\left(|d_{s+j,j}-d_{s+j+l,j}| + |d_{s+j,l}-d_{s+j+l,l}|\right)<\infty \end{equation} holds. If for quasiconvex sequence $\Phi$ the relation \begin{equation}\label{eq4} \phi_k = O\left(\frac{1}{\ln k}\right)(k\rightarrow\infty) \end{equation} holds, then the following statements are valid: \begin{enumerate}[(i)] \item let for each function $f\in\mathfrak{R}_p(1\leq p<\infty)$ be fulfilled \begin{equation}\label{eq5} \int^1_{-1}\left|f(t)\right|^ph^p(t)d\theta(t)<\infty,\ \ \int^1_{-1}h^p(t)d\theta(t)<\infty, \end{equation} then at every Lebesgue's point $x\in\varepsilon_m$(and, consequently, a.e.) the series \eqref{eq1} converges \begin{equation} T(f;x;\Phi) = \sum^\infty_{k=0}\phi_kc_k(f)\widehat{q}_k(x);\nonumber \end{equation} \item in addition, suppose function $f$ is continuous in $[-1,1]$ and the measure $d\theta(x)$ is absolutely continuous and \begin{equation}\label{eq6} d\theta(x) = \omega(x)dx,\ \omega(x)\text{ is continuous in }\varepsilon_m; \end{equation} then the series \eqref{eq1} is uniformly converges on compact subsets $K\subset\varepsilon_m$. \end{enumerate} We define for $f\in\mathfrak{R}_p$ the space $W^p_\theta(F)(1\leq p<\infty)$ for subset $F\subseteq[-1, 1]$: \begin{equation} W^p_\theta(F) = \{f,\ ||f||_{W^p_{\theta}(F)}<+\infty,\ ||f||^P_{W^p_{\theta^p}(F)} = ||f||^p_{L^P_\theta(F)} +\sum^m_{k=1}\sum^{N_k}_{i=0}M_{k,i}|f^{(i)}(a_k)|^p\}. \nonumber \end{equation} The space $W^p_\theta([-1,1])(1\leq p <\infty)$ is not complete. \textbf{Theorem 2.} Let the orthonormal polynomial system $\{\widehat{q}_k(x)\}^\infty_{k=0}$ be satisfy the following condition \eqref{eq2}, \eqref{eq3}, \eqref{eq6} and \begin{equation}\label{eq7} \sup_{n\in\mathbb{Z}_+}\sum^n_{j=0}|q^{(i)}_j(a_s)|<\infty(i=0,1,\ldots,N_s;\ s=1,2,\ldots,m), \end{equation} \begin{equation}\label{eq8} ||h||_{L^p_\theta([-1,1])}<\infty,\ ||h||_{L^q_\theta([-1,1])}<\infty\left(1<p<\infty,\ \frac{1}{p}+\frac{1}{q}=1\right). \end{equation} If the sequence $\Phi$ is quasiconvex and satisfy \eqref{eq4}, then for $f\in W^p_\theta([-1,1])(1<p<\infty)$, satisfying \eqref{eq5}, on any compact subsets $K\subseteq \varepsilon_m$ the following estimate \begin{equation} ||T(f;x;\Phi)||_{W^p_\theta(K)}\leq C_p||f||_{W^p_\theta([-1,1])},\nonumber \end{equation} holds, where the constant $C_p>0$ indepent on function $f$ and the sequence $\Phi$. \textbf{Remark.} Symmetric Gegenbauer-Sobolev orthonormal polynomials $\left\{\widehat{B}^{(\alpha)}_n(x)\right\}$$\left(n\in\mathbb{Z}_+;\ x\in[-1,1]\right)$ orthonormal in an inner product \begin{equation} \left<f,g\right>_\alpha = \int^1_{-1} f(x)g(x)w_\alpha(x)dx + M\left[f(1)g(1) + f(-1)g(-1)\right] + N\left[f'(1)g'(1) + f'(-1)g'(-1)\right]\ \ \ (M\geq0;N\geq0),\nonumber \end{equation} where \begin{equation} w_\alpha(x) = \frac{\Gamma(2\alpha+2)}{2^{2\alpha + 1}\Gamma^2(\alpha+1)}(1-x^2)^\alpha\left(\alpha>\frac{1}{2}\right),\nonumber \end{equation} satisfying the conditions \eqref{eq2}, \eqref{eq3}, \eqref{eq6}, \eqref{eq7}, \eqref{eq8}. \begin{center} \large \textbf{REFERENCES} \end{center} \begin{enumerate}[{[}1{]}] \item H.Bavinck. Differential operators having Sobolev-type Gegenbauer polynomials as eigenfunctions, J.Comput. Appl. Math., 118(2000), 23-42. \item H.Bavinck, J.Koekoek. Differential operators having symmetric orthogonal polynomials as eigenfunctions, J.Comput. Appl. Math.,106(1999), 369-393. \item H.Bavinck, H.G.Meijer.Orthogonal polynomials with respect to a symmetric inner poduct involving derivatives, Appl. Anal. 33(1989), 103-117. \item H.Bavinck, H.G.Meijer .On orthogonal polynomials with respect to an inner product involving derivatives: zeros and recurrence relations, Indag. Math.(N.S.) 1(1990), 7-14. \item Ó.Ciaurri, J.Minguez Fourier series of Gegenbauer-Sobolev polynomials. Symmetry,Integrability, geometry, Methods and Applications 14(2018), Paper 024, 11pp. \item Ó.Ciaurri, J.Minguez Fourier series of Jacobi-Sobolev polynomials. Transforms Spec. Funct., 30(2019), 334-346. \item Foulquie A.Moreno,F. Marcellán, B.P. Osilenker, Estimates for polynomials orthogonal with respect to some Gegenbauer-Sobolev inner product, J.Ineq.Appl. 3(1999), 401-419. \item Abel Díaz-González, Francisco Marcellán-Español , Héctor Pijeira-Cabrera and Wilfredo Urbina-Romero. Discrete-Conti\-nuous Jacobi-Sobolev Spaces and Fourier Series. arXiv:1911, 12746v1[math.CA]28 Nov.2019. \item J. Heinonen, T.Kilpelâinen, O.Martio, Nonlinear Potential Theory of Degenerate Elliptic Equations, Oxford Science Publ., Clarendon Press, Oxford, 1993. \item T.Kilpelâinen, Weighted Sobolev spaces and capacity, Ann. Acad. Sci. Fenn. Ser.A.I.Math. 19(1994), 95-113. \item R.Koekoek. Differential equations for symmetric generalized ultraspherical polynomials//Trans.Amer.Math.Soc. 345:1 (1994), 47-72. \item A.Kufner, Weighted Sobolev Spaces, Teubner Verlagsgesellschaft, Teubner – Texte zur Mathematik (Band 31), 1980; also published by Wiley, 1985. \item A.Kufner,A.M.Sänding, Some Applicatios of Weighted Sobolev Spaces, Teubner Verlagsgesellschaft, Teubner – Texte zur Mathematik (Band 100), [39]. \item F.Marcellán, B.P.Osilenker, I.A.Rocha, On Fourier series of Jacobi- Sobolev orthogonal polynomials, J. Ineq. Appl. , 7(5) (2002), 673-699. \item F.Marcellán, B.P. Osilenker, I.A.Rocha, On Fourier series of a discrete Jacobi-Sobolev Inner Product, J. Approx. Theory, 117(2002), 1-22. \item F. Marcellán, Y.Xu, On Sobolev orthogonal polynomials, Expositiones Math., 33(2015), 308-352. \item B.P.Osilenker, Generalized trace formula and asymptotics of the averaged Turan determinant for orthogonal polynomials, J. Approx. Theory. 141(2005), 70-94. \item B.P.Osilenker, An Extremal Problem for Algebraic Polynomials in the Symmetric Discrete Gegenbauer-Sobolev Space, Math.Notes, 82:3(2007), 411-425; translation in Mathematical Notes, 82:3(2007), 366-379. \item B.P.Osilenker, On linear summability methods of Fourier series in polynomials orthogonal in a discrete Sobolev space, Siberian Math. Journal 56:2(2015), 420-435; translation in Siberian Math. Journal, 56:2(2015), 339-351. \item B.P.Osilenker, ?n multipliers for Fourier series in polynomial orthogonal in continual-discrete Sobolev spaces, Contempo\-rary problems of mathematical and mechanics, ?oscow,Max Press, 2019, 500-503 [in Russian]. \item I.A.Rocha ,F. Marcellán , L.Salto , Relative asymptotics and Fourier series of orthogonal polynomials with a discrete Sobolev inner product, J. Approx. Theory,121( 2003), 336-356. \item J.M.Rodriguez, Approximation by polynomials and smooth functions in Sobolev spaces with respect to measures, J. Approx. Theory 120(2003), 185-216. \item J.M.Rodriguez, V.Álvarez, E.Romera, D.Pestana, Generalized Weighted Sobolev Spaces and Applications to Sobolev Orthogonal Polynomials, I,Acta Appl. Math. 80(2004), 273-308. \item J.M.Rodriguez, V.Álvarez, E.Romera, D.Pestana, Generalized Weighted Sobolev Spaces and Applications to Sobolev Orthogonal Polynomials, II, Approx . Theory Appl. 18:2(2002), 1-32. \item I.I.Sharapudinov, Systems of Sobolev–orthogonal functions associated with an orthogonal systems, Izv.Ross.Akad.Nauk, Ser.Math., 82:1(2018), 225-258; translation in Izvestia Mathematics,82:1(2018), 212-244. \item I.I.Sharapudinov. Systems of functions, Systems of Sobolev-orthogonal functions and their Applications, Uspehi Math. Nauk, 74:4(448)(2019), 87-164 [in Russian]. \item H.Triebel.Interpolation Theory,Function Spaces, Differential Operators, Mir, Moscow, 1980 [in Russian]. \end{enumerate} \end{document}
\begin{document} \title[An improved bound in Wirsing's problem]{An improved bound in Wirsing's problem} \author{Dmitry Badziahin, Johannes Schleischitz} \thanks{Middle East Technical University, Northern Cyprus Campus, Kalkanli, G\"uzelyurt \\ [email protected]} \begin{abstract} We improve the lower bound for the classical exponent of approximation $w_{n}^{\ast}(\xi)$ connected to Wirsing's famous problem of approximation to real numbers by algebraic numbers of degree at most $n$. Our bound exceeds $n/\sqrt{3}\approx 0.5773n$ and thus provides a reasonable qualitative improvement to previous bounds of order $n/2+O(1)$. We further establish new relations between several classical exponents of approximation. \end{abstract} \maketitle {\footnotesize{ {\em Keywords}: Wirsing's problem, exponents of Diophantine approximation, parametric geometry of numbers\\ Math Subject Classification 2010: 11J13, 11J82, 11J83}} \section{Wirsing's problem: Introduction and main results} \label{int01} In this paper we are concerned with approximation to a transcendental real number $\xi$ by algebraic real numbers $\alpha$ of degree at most $n$. A classical setup is to relate the quality of approximation $|\xi-\alpha|$ with the naive height $H(\alpha)$ of the minimal polynomial of $\alpha$ over $\mathbb{Z}$ with coprime coefficients, that is the maximum modulus of its coefficients. In 1961 Wirsing~\cite{wirsing} defined the quantity $w_{n}^{\ast}(\xi)$ as the supremum of $w^{\ast}$ for which the estimate \[ | \xi-\alpha| < H(\alpha)^{-w^{\ast}-1} \] has infinitely many solutions in algebraic real numbers $\alpha$ of degree at most $n$. A longstanding open problem posed by Wirsing in~\cite{wirsing} is to decide whether the quantity $w_{n}^{\ast}(\xi)$ is always bounded from below by $n$. For $n=1$ this is true by Dirichlet's Theorem. In fact, by the theory of continued fractions, the estimate $|\alpha-\xi|< cH(\alpha)^{-2}$ has infinitely many solutions in rational numbers $\alpha=p/q$ (s.t. $H(\alpha)=\max\{|p|,|q|\}$) for any $c>\max\{1,|\xi|\}/\sqrt{5}$, see~\cite[Theorem~2F in Chapter I]{schmidt}. It was further verified for $n=2$ in a paper of Davenport and Schmidt~\cite{davsh67} from 1967, who similarly established an estimate of the form $|\alpha-\xi|< cH(\alpha)^{-3}$ with some explicit $c=c(\xi)$ for infinitely many rational or quadratic irrational numbers. In fact, the numbers $\alpha$ can be chosen quadratic irrationalities~\cite{moscj}. Furthermore, a combination of Sprind\v{z}uk's famous result~\cite{sprindzuk} with ~\cite[(7)]{wirsing} implies that almost all $\xi$ with respect to Lebesgue measure satisfy the identity $w_{n}^{\ast}(\xi)=n$ for any $n\geq 1$. The identity also holds for any algebraic number $\xi$ of degree larger than $n$ by an application of Schmidt Subspace Theorem~\cite[Theorem~2.9]{bugbuch}. Apart from that, for $n\geq 3$ and general $\xi$, Wirsing's problem remains open. It should be mentioned that a similar problem with respect to approximation by algebraic integers was answered negatively. For the case of approximation by cubic algebraic integers counterexamples were found by Roy~\cite{roy}. A bound of the form $w_{n}^{\ast}(\xi)\geq n/2+1-o(1)$ as $n\to\infty$ was established by Wirsing himself in the same paper~\cite{wirsing}. This had so far only been mildly improved by some additive constant. Bernik and Tsishchanka~\cite{ber} were first to improve the bound to an expression of order $n/2+2-o(1)$, and this was refined in follow up papers by Tsishchanka, the latest~\cite{Tsi07} contains the best currently known bound of order $n/2+3-o(1)$ (as $n$ tends to infinity). In this paper we finally go beyond the bound of order $n/2+O(1)$ by establishing the estimate $w_{n}^{\ast}(\xi)/n>1/\sqrt{3}>0.57$. To state our main results in a compact form let us define \[ \overline{w}^{\ast}(\xi)= \limsup_{n\to\infty} \frac{w_{n}^{\ast}(\xi)}{n}, \qquad \underline{w}^{\ast}(\xi)= \liminf_{n\to\infty} \frac{w_{n}^{\ast}(\xi)}{n}. \] Then we show \begin{theorem} \label{1B} Let $n\geq 4$ be an integer. Let $\xi$ be any transcendental real number. Then we have \[ w^{\ast}_{n}(\xi) > \frac{1}{\sqrt{3}}\cdot n= 0.5773\ldots n. \] In particular $\underline{w}^{\ast}(\xi) \geq 1/\sqrt{3}$. Moreover, \begin{equation} \label{eq:obenba} \overline{w}^{\ast}(\xi) \geq \delta, \end{equation} where $\delta=0.6408\ldots$ is given as $G(\gamma_{0})$ where \begin{equation} \label{eq:G} G(t)= \frac{4(t-t^{2})}{2t^{2}+2t-1+\sqrt{4t^{4}+24t^{3}-32t^{2}+12t+1}} \end{equation} and $\gamma_{0}$ is the root of $Q(t)=4t^4 - 12t^3 + 10t^2 - 6t + 1$ in $t\in(0,1/2)$. \end{theorem} Clearly if Wirsing's problem has a positive answer then $\underline{w}^{\ast}(\xi)\geq 1$ for any transcendental real $\xi$. However, it seems that there is no easy argument available to deduce any lower bound better than $1/2$ even for the larger quantity $\overline{w}^{\ast}(\xi)$. Theorem~\ref{1B} follows from optimization of $m$ in the following result. \begin{theorem} \label{1A} Let $n\geq 4$ be an integer and $\xi$ be a transcendental real number. Then for any $1\leq m< (n-1)/2$ one has \[ w_{n}^{\ast}(\xi) \geq \frac{4mn+6n-4m^{2}-8m}{2m+2-n+\sqrt{n^{2}+12mn+20n-12m^{2}-24m+4}}. \] \end{theorem} A slight improvement of the bound can be derived by our method. However, the resulting bound is a root of a complicated cubic polynomial and the refinement is too insignificant to improve on the factors $1/\sqrt{3}$ and $\delta$ of Theorem~\ref{1B}. See the comments below the proof for details. It is also worth noting that for $n\leq 24$, the bound by Tsishchanka~\cite{Tsi07} for $w_{n}^{\ast}(\xi)$ is better. The table below compares the bound of~\cite{Tsi07} with those from Theorem~\ref{1A} with suitable $m$ for some particular values of $n$. \begin{center} \begin{tabular}{ |c|c|c| } \hline n & Tsi & BS \\ \hline 3 & 2.73 & - \\ 4 & 3.45 & 2.64 \\ 5 & 4.14 & 3.34 \\ 10 & 7.06 & 6.42 \\ 20 & 12.39 & 12.16 \\ 24 & 14.46 & 14.46 \\ 25 & 14.98 & 15.04 \\ 30 & 17.55 & 17.92 \\ 50 & 27.70 & 29.46 \\ 100 & 52.84 & 58.32 \\ 1000 & 502.98 & 577.92 \\ \hline \end{tabular} \end{center} While other approaches to Wirsing's problem rely on counting algebraic numbers in small intervals, see for instance the recent preprint by Bernik, Goetze and Kalosha~\cite{bgk}, our result relies solely on relations between different exponents of Diophantine approximation defined in Section~\ref{e} below. Thereby we build up on ideas of Wirsing~\cite{wirsing}, Davenport and Schmidt~\cite{davsh} and Laurent~\cite{laurent}. For variants of Wirsing's problem that have been studied, including prescribing the degree of $\alpha$ as equal to $n$ (see~\cite{buteu}) or considering algebraic integers $\alpha$ of degree $n+1$ as in~\cite{roy}, our method does not apply. The concrete obstruction is identified in Section~\ref{twor}. Nevertheless we conjecture that the claims remain true. \section{Other classical exponents of approximation} \subsection{Exponents of Diophantine approximation} \label{e} Apart from $w_{n}^{\ast}(\xi)$ itself, the most important exponents in this paper are $\widehat{\lambda}_{n}(\xi)$, defined as the supremum of $\lambda$ such that the inequalities \begin{equation} \label{eq:tierekt} 1\leq x\leq X, \qquad L(\underline{x}):=\max_{1\leq j\leq n} \vert \zeta^{j}x-y_{j}\vert\leq X^{-\lambda}, \end{equation} have an integer vector solution $\underline{x}=(x,y_{1},\ldots,y_{n})$ for all large $X$. An easy application of the Dirichlet's theorem implies that $\widehat{\lambda}_{n}(\xi)$ is bounded below by $1/n$. On the other hand, Davenport and Schmidt~\cite{davsh} verified that $\widehat{\lambda}_{n}(\xi)$ does not exceed $2/n$. Thus it may vary only up to a factor $2$. Slight improvements of the upper bound for odd $n$ by Laurent~\cite{laurent} and for even $n$ by Schleischitz~\cite{equprin, indag, js} were obtained later. See also Roy~\cite{roy3} for $n=3$. Note that it follows from Davenport and Schmidt~\cite[Lemma~1]{davsh} that any improvement of the factor $2$ separating the upper bound from the trivial lower bound $1/n$ would directly lead to an improvement of the factor $1/2$ in the Wirsing's problem (as we establish in this paper), see Section~\ref{twor}. While we are unable to provide such improvements for $\widehat{\lambda}_{n}(\xi)$, the underlying estimate of Davenport and Schmidt is a crucial ingredient in our argument. We will sporadically make reference to the ordinary exponents $\lambda_{n}(\xi)$ defined similarly, but where we impose that \eqref{eq:tierekt} has a solution for some arbitrarily large values of $X$. This weaker condition is reflected in $\lambda_{n}(\xi)\geq \widehat{\lambda}_{n}(\xi)$. We will further employ the dual linear form exponents $w_{n}(\xi), \widehat{w}_{n}(\xi)$ defined as the supremum of $w$ so that the system \[ 1\leq \max_{1\leq j\leq n} |a_{j}|\leq X, \qquad |a_{0}+\xi a_{1}+\cdots+\xi^{n}a_{n}|\leq X^{-w} \] has a solution in integers $a_{0},\ldots,a_{n}$ for arbitrarily large $X$ and all sufficiently large $X$, respectively. These exponents also satisfy $w_{n}(\xi)\geq \widehat{w}_{n}(\xi)\geq n$ by the Dirichlet box principle, and again in~\cite{davsh} Davenport and Schmidt found the upper bound $2n-1$ for the uniform exponent $\widehat{w}_{n}(\xi)$, as well as an improved bound for $n=2$ which turned out to be sharp~\cite{royjl}. Again, as for $\widehat{\lambda}_{n}(\xi)$, the upper and lower bounds roughly differ by a factor of 2 which for large $n$ has not been improved so far. However, refinements in the constant term were made first by Bugeaud and Schleischitz~\cite{buschlei}. The proof strategy in~\cite{buschlei}, in the light of later findings~\cite{unif, mamo}, in turn yields slightly stronger bounds, in particular $\widehat{w}_{n}(\xi)\le 2n-2$ for $n\ge 10$. See also~\cite{acta2018}, where a conjectural bound of order $(1+1/\sqrt{2})n-o(1)<1.71n$ was motivated as well. Again, while we do not improve the bounds for the exponent $\widehat{w}_{n}(\xi)$, another estimate from~\cite{buschlei} linking it with $w_{n}^{\ast}(\xi)$ is essential for this paper. \subsection{New relations between classical exponents} On the way to the main results we establish the following connections between various exponents of approximation which are of some independent interest. \begin{theorem} \label{openup2} Let $m,n,\xi$ be as in Theorem~\ref{1A} and assume \begin{equation} \label{eq:only2} \widehat{\lambda}_{n}(\xi) > \frac{1}{n-m}. \end{equation} Then we have \begin{equation} \label{eq:h11} \widehat{w}_{n-m}(\xi)\geq \frac{(n-m)\widehat{\lambda}_{n}(\xi)+n-2m-1}{1-m\widehat{\lambda}_{n}(\xi)}. \end{equation} Moreover \begin{equation} \label{eq:h21} w_{n-m}(\xi)\geq \max\left\{\frac{(n-m)\widehat{\lambda}_{n}(\xi)+n-2m-2}{1-(m+1)\widehat{\lambda}_{n}(\xi)},\; \frac{(n-m)\lambda_{n}(\xi)+n-2m-1}{1-m\lambda_{n}(\xi)}\right\}, \end{equation} and conversely \begin{equation} \label{eq:verynew} w_{n-m}(\xi) \leq \frac{n-m-1}{m+1}\cdot \frac{(n-m)\widehat{\lambda}_{n}(\xi)+n-2m-1}{(n-m)\widehat{\lambda}_{n}(\xi)-1}. \end{equation} Finally, \begin{equation} \label{eq:windag} w_{m+1}(\xi) \leq \frac{1}{\widehat{\lambda}_{n}(\xi)}<n-m. \end{equation} \end{theorem} In fact, we only require \eqref{eq:h11} for the proof of Theorems~\ref{1B} and~\ref{1A}. The bounds \eqref{eq:h11}, \eqref{eq:h21} are increasing in $\widehat{\lambda}_{n}(\xi)$ and non-trivial (i.e. exceed $n-m$). We remark that a very similar argument would lead to the estimate $w_{\lfloor n/2\rfloor}(\xi)\leq 1/\widehat{\lambda}_{n}(\xi)$ if $\widehat{\lambda}_{n}(\xi)>\lceil n/2\rceil^{-1}$ (note that it is an upper bound here), which leads to a contradiction, in view of the reverse estimate $w_{\lfloor n/2\rfloor}(\xi)\geq \lfloor n/2\rfloor$. This would yield an alternative proof of the bounds for $\widehat{\lambda}_{n}(\xi)$ in~\cite{laurent}. We want to state the special case $n=4, m=1$ where much cancellation occurs as a corollary. \begin{corollary} \label{4fall} Let $\xi$ be real transcendental with $\widehat{\lambda}_{4}(\xi)>1/3$. We have \[ \frac{3\widehat{\lambda}_{4}(\xi)+1}{3\widehat{\lambda}_{4}(\xi)-1}\geq w_{3}(\xi) \geq \max\left\{ \frac{3\widehat{\lambda}_{4}(\xi)}{1-2\widehat{\lambda}_{4}(\xi)}, \frac{3\lambda_{4}(\xi)+1}{1-\lambda_{4}(\xi)}\right\}, \qquad \widehat{w}_{3}(\xi)\geq \frac{3\widehat{\lambda}_{4}(\xi)+1}{1-\widehat{\lambda}_{4}(\xi)}, \] and \[ w_{2}(\xi)\leq \frac{1}{\widehat{\lambda}_{4}(\xi)}<3. \] \end{corollary} Comparing the left lower and the upper bound for $w_{3}(\xi)$ gives $\widehat{\lambda}_{4}(\xi)\leq (\sqrt{19}+2)/15=0.4239\ldots$, which is however weaker than the best known bound $0.3706\ldots$ from~\cite{js} (a weaker bound in~\cite{equprin} differs only in the fifth decimal digit). The same method can be applied to any even $n$ and $m=n/2-1$. Then Theorem~\ref{openup2} yields that in the case $\widehat{\lambda}_{n}(\xi) > \frac{2}{n+2}$ one has \[ \max\left\{\frac{(n+2)\widehat{\lambda}_{n}(\xi)}{2-n\widehat{\lambda}_{n}(\xi)},\; \frac{(n+2)\lambda_{n}(\xi)+2}{2-(n-2)\lambda_{n}(\xi)}\right\}\leq w_{\frac{n}{2}+1}(\xi) \leq \frac{(n+2)\widehat{\lambda}_{n}(\xi)+2}{(n+2)\widehat{\lambda}_{n}(\xi)-2}. \] This further implies $\widehat{\lambda}_{n}(\xi)\leq 2/n-(4/3+o(1))n^{-2}$ as $n\to\infty$, however again larger than the bound in~\cite[Theorem~4.1]{equprin} of order $2/n-(3.18\ldots+o(1))n^{-2}$. As for the exponent $w_{n}^{\ast}$, we define the upper limits \[ \overline{\widehat{w}}(\xi) = \limsup_{n\to\infty} \frac{\widehat{w}_{n}(\xi)}{n}, \qquad \overline{\widehat{\lambda}}(\xi) = \limsup_{n\to\infty} n\widehat{\lambda}_{n}(\xi), \] and accordingly, the lower limits $\underline{\widehat{w}}(\xi)$ and $\underline{\widehat{\lambda}}(\xi)$. These quantities all lie in the interval $[1,2]$, see Section~\ref{e}. Another consequence of Theorem~\ref{openup2} reads \begin{corollary} \label{comp} For any transcendental real number $\xi$ we have \[ \overline{\widehat{w}}(\xi) \geq \frac{1-2\cdot R\left(\overline{\widehat{\lambda}}(\xi) \right)}{1-\left(\overline{\widehat{\lambda}}(\xi)+1\right)\cdot R\left(\overline{\widehat{\lambda}}(\xi) \right) +\overline{\widehat{\lambda}}(\xi)\cdot R\left(\overline{\widehat{\lambda}}(\xi) \right)^{2}} =: S\left(\overline{\widehat{\lambda}}(\xi) \right), \] and similarly \[ \underline{\widehat{w}}(\xi) \geq \frac{1-2\cdot R\left(\underline{\widehat{\lambda}}(\xi) \right)}{1-\left(\underline{\widehat{\lambda}}(\xi)+1\right)\cdot R\left(\underline{\widehat{\lambda}}(\xi) \right) +\underline{\widehat{\lambda}}(\xi)\cdot R\left(\underline{\widehat{\lambda}}(\xi) \right) ^{2}} =: S\left(\underline{\widehat{\lambda}}(\xi) \right) \] where the function $R(t)$ is given as \[ R(t)= \frac{t-\sqrt{2t-t^{2}}}{2t}. \] \end{corollary} One can verify that the function $S$ induces an increasing bijection from the interval $[1,2]$ to itself. We compute $S(1.5)=1.0718\ldots$, $S(1.75)=1.2038\ldots$, $S(1.99)=1.7527\ldots$, $S(1.9999)=1.9721\ldots$. Corollary~\ref{comp} complements~\cite[Theorem~3.4]{equprin} where reverse estimates in form of lower bounds for $\underline{\widehat{\lambda}}(\xi), \overline{\widehat{\lambda}}(\xi)$ in terms of $\underline{\widehat{w}}(\xi), \overline{\widehat{w}}(\xi)$ respectively were established (formulated there for ordinary exponents but as stated below the theorem it is true for uniform exponents as well). \begin{proof} Let $\epsilon>0$ be arbitrary. Then for $n$ large enough we get $\lambda:= n\widehat{\lambda}_{n}(\xi) > \overline{\widehat{\lambda}}(\xi) - \epsilon$. Fix some $\alpha\in [0,1/2)$ and select $m:= \lfloor n\alpha\rfloor$. Define $c$ from the equation $\widehat{\lambda}_{n}(\xi)=c/(n-m)$. Then $c = (1-\alpha)\lambda + o(1)$ as $n\to\infty$. If $c$ exceeds one, we may apply Theorem~\ref{openup2} to get \[ \frac{\widehat{w}_{n-m}(\xi)}{n-m} \geq \frac{\widehat{\lambda}(\xi) + \frac{n-2m-1}{n-m}}{1-m\widehat{\lambda}(\xi)}= \frac{1-2\alpha}{1-(\lambda+1)\alpha+\lambda\alpha^{2}}+o(1), \qquad n\to\infty. \] In the given range of $\alpha$ the expression is maximized for \[ \alpha= \frac{1}{2}-\frac{\sqrt{2\lambda-\lambda^{2}}}{2\lambda}+o(1), \qquad n\to\infty, \] and inserting gives the first lower bound of the corollary as we may choose $\lambda$ arbitrarily close to $\overline{\widehat{\lambda}}(\xi)$. Finally we check that the prerequisite $c>1$ is equivalent to $\lambda+\sqrt{2\lambda-\lambda^{2}}>2$ for small enough $\epsilon$ and large enough $n$. This inequality is verified for $\lambda\in(1,2)$, and for $\lambda=1$ our claim holds for trivial reasons. The second lower bound follows analogously. \end{proof} \section{Preparatory concepts and the crucial lemma} \label{prepara} In this section we prepare the proof of Theorem~\ref{openup2}. \subsection{Minimal points and the key lemma} We will use the concept of minimal points as for instance used in~\cite{davsh, laurent}. Let $n\in\mathbb{N}$ and transcendental real $\xi$ be given. Consider the simultaneous approximation problem \eqref{eq:tierekt}. Then $n,\xi$ give rise to a unique (up to sign) sequence of best approximations \[ \underline{x}_{i}= \underline{x}_{i}(n,\xi)= (x_{i,0},x_{i,1},\ldots,x_{i,n}), \qquad i\geq 1, \] with the property that $L(\underline{x}_{i})$ minimizes $L(\underline{x})$ upon all integer vectors $\underline{x}=(x,y_{1},\ldots,y_{n})$ with $1\leq x\leq x_{i,0}$. They have the properties \[ x_{1,0}< x_{2,0}< \ldots, \qquad L(\underline{x}_{1}) > L(\underline{x}_{2}) > \cdots. \] The study of the sequence of minimal points is the basis of many results regarding exponents of approximation, and we will make use of this concept in the following key lemma whose proof is an adaption of the method by Laurent~\cite{laurent}. \begin{lemma} \label{le1A} Let $n\geq 2$ be an integer and $1\leq m\leq \lceil n/2\rceil$ another integer. Let $\xi$ be a transcendental real number that satisfies \begin{equation} \label{eq:only} \widehat{\lambda}_{n}(\xi) > \frac{1}{n-m+1}. \end{equation} Then for any large $i$ the vectors \[ (x_{i,0}, \ldots, x_{i,n-m}),\; (x_{i,1},\; \ldots,\; x_{i,n-m+1}),\; \ldots,\; (x_{i,m},\; \ldots,\; x_{i,n}) \] formed from the $i$-th best approximation are linearly independent. \end{lemma} It is worth pointing out that the lemma uses a slightly relaxed restriction on $m$ compared to Theorems~\ref{1A}, \ref{openup2}. The proof of Theorem~\ref{openup2} will demonstrate that in contrast to the lemma, indeed some of its claims cannot be extended to the cases $m=\lfloor n/2\rfloor$ or $m=\lceil n/2\rceil$. \begin{proof} Fix the values $n,m$ and $\xi$ from the lemma. We follow the proof of Laurent~\cite{laurent}. Let $\lambda\in (1/(n-m+1), \widehat{\lambda}_{n}(\xi))$ be fixed for now to be specified later. For every $i\geq 1$, let $h=h_{i}$ be the smallest integer for which the $(h+1)\times (n-h+1)$ Hankel matrix \[ V_{i}(h)=\begin{pmatrix} x_{i,0} & x_{i,1} & \cdots & x_{i,n-h}\\ x_{i,1} & x_{i,2} & \cdots & x_{i,n-h+1}\\ \ddots & \ddots & \ddots & \ddots \\ x_{i,h} & x_{i,h+1} & \cdots & x_{i,n} \end{pmatrix}, \qquad\qquad i\geq 1, \] has rank at most $h$ (i.e. not full rank). Then the vectors $\underline{z}_j:=(x_{i,j},x_{i,j+1}.\ldots, x_{i,j+h-1})$, $j\in\{0,\ldots, n-h+1\}$ satisfy the recurrence relations $$ a_0\underline{z}_j + a_1\underline{z}_{j+1} + \cdots + a_h\underline{z}_{j+h} = \underline{0}. $$ Now \cite[Lemma~1]{laurent} implies that one can choose integer coefficients $a_j$ such that $$\max\{|a_0|, \ldots, |a_h|\} \ll Z^{1/(n-2h+2)},$$ where $Z$ denotes the maximum of the absolute values of all the $h\times h$ determinants formed from any $h$ of the vectors $\underline{z}_j$. On the other hand, by subtracting the first row of such matrix, multiplied by $\xi^j$, from the $j$'th row of this matrix, we can verify that for large $i$, $Z= o\left(x_{i,0}^{1-(h-1)\lambda}\right)$. Then it is easy to check that for \[ \lambda > \frac{1}{n-h_{i}+1} \] one has $\max\{|a_0|, \ldots, |a_h|\} = o(x_{i,0}^\lambda).$ Consider the polynomial $P_i(z):= a_0+a_1z+\ldots + a_h z^h$. One notices that $$ |x_{i,0} P_i(\xi)| = |a_1(x_{i,0}\xi - x_{i,1}) + a_2(x_{i,0}\xi^2 - x_{i,2}) + \ldots + a_h(x_{i,0}\xi^h-x_{i,h})| = o(1). $$ Now consider $|a_0 x_{i-1,0} + a_1x_{i-1,1} + \cdots + a_hx_{i-1,h}|$. As before, it equals $$ |x_{i-1,0}P_i(\xi) + a_1(x_{i-1,1} - x_{i-1,0}\xi) + \cdots + a_h(x_{i-1,h} - x_{i-1,0}\xi^h)| = o(1), $$ and because it must be an integer, we have that for $i\ge i_0$ large enough, this expression equals zero. Now suppose there exists an infinite strictly increasing sequence $(i_k)_{k\in \mathbb{N}}$ of indices such that $i_1\ge i_0$ and $h_{i_k}\le m$. We obtain that there is an integer vector $\underline{a}_{k}= (a_0, a_1, \ldots, a_h)$ which annihilates both matrices $V_{i_k}(h)$ and $V_{i_k-1}(h)$, i.e. $V_{i_k}(h)\cdot \underline{a}_{k}= V_{i_k-1}(h)\cdot \underline{a}_{k}=\underline{0}$. In particular, it implies that $h_{i_k-1}\le h_{i_k}$. By applying the same arguments iteratively to $V_{i_k-1}(h)$, $V_{i_k-2}(h)$ and so on, we get that $h_i\le m$ and $h_{i-1}\le h_i$ for all $i\ge i_0$. Since $h_i$ can not be arbitrarily large, the sequence $h_i$ is ultimately constant. In other words, for large $i$ we have $h_i = h\le m$. We further derive that for such large $i$ there is a vector $\underline{a}$ which does not depend on $i$ and annihilates all matrices $V_i(h)$. But that means there is a linear dependence between $1,\xi, \xi^2,\ldots, \xi^h$ which contradicts the assumption that $\xi$ is transcendental. \end{proof} \begin{example} Let $n\geq 2$ and $m=1$. The lemma claims that as soon as $\widehat{\lambda}_{n}(\xi)>1/n$ the vectors $(x_{i,0}, x_{i,1}, \ldots, x_{i,n-1})$ and $(x_{i,1}, x_{i,2}, \ldots, x_{i,n})$ are linearly independent for large $i$. Notice that the condition is necessary. Indeed, any number $\xi$ with $\lambda_{1}(\xi)>2n-1$ (or equivalently $\lambda_{n}(\xi)>1$, see~\cite[Theorem~1.6]{ich}) has infinitely many $\underline{x}_{i}$ with constant ratios $x_{i,j+1}/x_{i,j}, 0\leq j\leq n-1$, and thus the claim fails, see~\cite[Lemma~2.3]{ich} and also~\cite[Lemma~1]{bu}. Lemma~\ref{le1A} thus gives a new proof that such numbers satisfy $\widehat{\lambda}_{n}(\xi)=1/n$ (this statement is already contained in~\cite[Theorem~1.6]{ich}). \end{example} We state an easy consequence of the lemma. \begin{corollary} \label{ccoo} Let $m,n,\xi$ be as in Lemma~\ref{le1A}, and assume \eqref{eq:only} holds. Then for any $\lambda< \widehat{\lambda}_{n}(\xi)$ and any large $X$ the system \begin{equation} \label{eq:dassystem} 1\leq |x|\leq X, \qquad \quad \max_{1\leq j\leq n-m} |x\xi^{j}-y_{j}| \leq X^{-\lambda} \end{equation} has $m+1$ linearly independent solutions in integer vectors $(x,y_{1},\ldots,y_{n-m})$. Similarly, for any $\lambda< \lambda_{n}(\xi)$ the system \eqref{eq:dassystem} has $m+1$ linearly independent solutions for some arbitrarily large $X$ . \end{corollary} \begin{proof} Without loss of generality assume $\xi>0$ to avoid writing absolute values. Let \[ c=\frac{1}{2\max\{1,\xi^{n}\}(1+(\xi+\xi^{-1})^{n-1})}. \] Let $\lambda< \widehat{\lambda}_{n}(\xi)$ be fixed for the moment. Let $X>0$ be arbitrary large. For simplicity define the auxiliary parameter $Y=X/(2\max\{1,\xi^{n}\})$. Then the system \[ 1\leq x\leq Y, \qquad \max_{1\leq j\leq n} | x\xi^{j}-y_{j}| < cY^{-\lambda} \] has a solution in integers $(x,y_{1},\ldots,y_{n})\in \mathbb{Z}^{n+1}$ which can be chosen one of the best approximation vectors $\underline{x}_{i}(n,\xi)$. For $1\leq i\leq m$ and $1\leq j\leq n-m$ we have \begin{align*} | y_{i}\xi^{j}-y_{i+j}|&= | (y_{i}\xi^{j}- x\xi^{i+j}) + (x\xi^{i+j}- y_{i+j})| \leq cY^{-\lambda}(1+\xi^j)\\ &\leq \frac{Y^{-\lambda}}{2\max\{1,\xi^{n}\}}\leq \frac{Y^{-\lambda}}{(2\max\{1,\xi^{n}\})^{\lambda}}=X^{-\lambda}. \end{align*} We conclude \[ \max_{1\leq j\leq n-m} | x\xi^{j}-y_{j}| < X^{-\lambda}, \qquad \max_{1\leq j\leq n-m} | y_{i}\xi^{j}-y_{i+j}|< X^{-\lambda} \] and \[ \max\{|x|,|y_{1}|,\ldots,|y_{n-m}|\} < 2\max\{1,\xi^{n}\} Y=X. \] This shows that the vectors $(x,y_{1},\ldots,y_{n-m}), (y_{1},y_{2}\ldots,y_{n-m+1}), \ldots$, $(y_{m},y_{m+1},\ldots,y_{n})$ satisfy the estimates~\eqref{eq:dassystem}. Moreover they are linearly independent by Lemma~\ref{le1A}. The first claim follows. The second claim on the ordinary exponents $\lambda_{n}(\xi)$ is derived very similarly by considering minimal points $(x,y_{1},\ldots,y_{n})$ as in the definition of $\lambda_{n}(\xi)$ and putting $X=x$. \end{proof} Let $N\geq 1$ be an integer. For any integer $l\in \{1, 2,\ldots, N+1\}$, define the successive minima exponents $\lambda_{N,l}(\xi)$ as the supremum of $\lambda$ so that \[ 1\leq x\leq X, \qquad \max_{1\leq i\leq N}|x\xi^{i}-y_{i}|< X^{-\lambda} \] has $l$ linearly independent solution vectors $(x,y_{1},\ldots,y_{N})$ for arbitrarily large $X$. Similarly define $\widehat{\lambda}_{N,l}(\xi)$ with the inequalities having $l$ solutions for all large $X$. Accordingly, define $w_{N,l}(\xi)$ and $\widehat{w}_{N,l}(\xi)$ for the linear form problem. Notice that $\lambda_{N,1}(\xi)= \lambda_{N}(\xi)$ and $\widehat{\lambda}_{N,1}(\xi)= \widehat{\lambda}_{N}(\xi)$, as well as $w_{N,1}(\xi)= w_{N}(\xi)$ and $\widehat{w}_{N,1}(\xi)= \widehat{w}_{N}(\xi)$ just recover the classical exponents. As $\lambda$ can be chosen arbitrarily close to $\widehat{\lambda}_{n}(\xi)$ in the first claim of Corollary~\ref{ccoo}, first asserts that, if~\eqref{eq:only} is satisfied, we have \begin{equation} \label{eq:ceq} \widehat{\lambda}_{n-m, m+1}(\xi) \geq \widehat{\lambda}_{n}(\xi)>\frac{1}{n-m+1}. \end{equation} Similarly the second claim upon ~\eqref{eq:only} reads \begin{equation} \label{eq:ceq2} \lambda_{n-m, m+1}(\xi) \geq \lambda_{n}(\xi)>\frac{1}{n-m+1}. \end{equation} These inequalities are important ingredients in the proof of Theorem~\ref{openup2}. \subsection{Parametric geometry of numbers} \label{pgn} We give a very brief exposition of the concept of parametric geometry of numbers due to Schmidt and Summerer~\cite{ss,ssmh}, where we only provide the necessary results for this paper and refer to the quoted papers for more details. Let $N\geq 1$ be an integer and $1\leq l\leq N+1$. Given $\xi$, define $\psi_{N,l}(Q)$ as the supremum of exponents $\mu$ for which \[ 1\leq |x|\leq Q^{1+\mu}, \qquad \max_{1\leq i\leq N} |\xi^{i}x-y_{i}|\leq Q^{-1/N+\mu} \] has $l$ linearly independent integer vector solutions $(x,y_{1},\ldots,y_{N})$. Let \[ \underline{\psi}_{N,l}= \liminf_{Q\to\infty} \psi_{N,l}(Q), \qquad \overline{\psi}_{N,l}= \limsup_{Q\to\infty} \psi_{N,l}(Q). \] Similarly, one can define the dual function $\psi_{N,l}^{\ast}(Q)$ as the supremum of exponents $\mu$ such that the system of inequalities $$ \max\{ |x_0|,|x_1|,\ldots, |x_N|\} \le Q^{1/N + \mu};\qquad |x_0+x_1\xi + \ldots + x_N\xi^N|<Q^{-1+\mu} $$ has $l$ linearly independent integer vector solutions. The values $\underline{\psi}_{N,l}^{\ast}, \overline{\psi}_{N,l}^{\ast}$ are then defined analogously to $\underline{\psi}_{N,l}$ and $\overline{\psi}_{N,l}$. As pointed out in~\cite[Equation~(4.11)]{ss}, Mahler's Duality Theorem on Dual Convex bodies translates into \begin{equation} \label{eq:mahla} \underline{\psi}_{N,l}^{\ast}= -\overline{\psi}_{N,N+2-l}, \qquad \overline{\psi}_{N,l}^{\ast}= -\underline{\psi}_{N,N+2-l}. \end{equation} We further require the estimates from~\cite[(1.11)]{ssmh}: \begin{equation} \label{eq:sses} l\overline{\psi}_{N,l}+(N+1-l)\underline{\psi}_{N,N+1}\geq 0,\quad l\underline{\psi}_{N,l}+(N+1-l)\overline{\psi}_{N,N+1}\geq 0. \end{equation} and the relation from~~\cite[(1.15)]{ssmh}: \begin{equation} \label{eq:untob} \underline{\psi}_{N,l+1}\leq \overline{\psi}_{N,l}. \end{equation} To build a connection with Corollary~\ref{ccoo} we recall that these quantities are related to the successive minima exponents $\lambda_{N,l}, \widehat{\lambda}_{N,l}$ from the previous section by identities. For simplicity we drop the argument $\xi$ of the exponents in the following. All claims below hold for any $1\leq l\leq N+1$. By~\cite[Theorem~1.4]{ss} (for $l=1$, but the same argument also holds for larger $l$, see also~\cite{j2}) we have the identities \[ (1+\lambda_{N,l})\cdot (1+\underline{\psi}_{N,l})= \frac{N+1}{N}, \] and \begin{equation} \label{eq:fehlt2} (1+\widehat{\lambda}_{N,l})\cdot (1+\overline{\psi}_{N,l})= \frac{N+1}{N}. \end{equation} Similarly for the dual linear form problem we have \begin{equation} \label{eq:fehlt3} (1+w_{N,l})\cdot \left(\underline{\psi}_{N,l}^{\ast}+\frac{1}{N}\right)= \frac{N+1}{N}, \end{equation} and \begin{equation} \label{eq:fehlt4} (1+\widehat{w}_{N,l})\cdot \left(\overline{\psi}_{N,l}^{\ast}+\frac{1}{N}\right)= \frac{N+1}{N}. \end{equation} \section{Proof of Theorem~\ref{openup2}} With aid of the results from Section~\ref{prepara} we can prove Theorem~\ref{openup2}. \begin{proof}[Proof of Theorem~\ref{openup2}] For all quotations of formulas in Section~\ref{pgn} below we let $N=n-m$. Note that the assumption \eqref{eq:only2} is stronger than \eqref{eq:only}. Therefore Corollary~\ref{ccoo} can be applied. Its claim \eqref{eq:ceq}, when combined with \eqref{eq:fehlt2} for $l=m+1$, implies \[ \overline{\psi}_{n-m,m+1} \leq \frac{1-(n-m)\widehat{\lambda}_{n}(\xi)}{(n-m)(1+\widehat{\lambda}_{n}(\xi))}. \] On the other hand, since $2m<n$ by assumption, equations~\eqref{eq:mahla} and~\eqref{eq:sses} for $l=m+1$ give \[ \overline{\psi}^{\ast}_{n-m,1}= - \underline{\psi}_{n-m,n-m+1} \leq \frac{m+1}{n-m+1-(m+1)}\cdot \overline{\psi}_{n-m,m+1}= \frac{m+1}{n-2m}\cdot \overline{\psi}_{n-m,m+1}. \] After inserting the above bound for $\overline{\psi}_{n-m,m+1}$ and simplifying the expression we get $$ \overline{\psi}^{\ast}_{n-m,1} + \frac{1}{n-m} \le \frac{(n-m+1)(1-m\widehat{\lambda}_{n}(\xi))}{(n-m)(n-2m)(1+\widehat{\lambda}_{n}(\xi))}. $$ Then \eqref{eq:h11} follows from~\eqref{eq:fehlt4} with $l=1$. Similarly, \eqref{eq:untob} yields \[ \underline{\psi}_{n-m,m+2} \leq \overline{\psi}_{n-m,m+1} \leq \frac{1-(n-m)\widehat{\lambda}_{n}(\xi)}{(n-m)(1+\widehat{\lambda}_{n}(\xi))} \] and \eqref{eq:mahla}, \eqref{eq:sses} together with the assumption $2m+1<n$ again implies \begin{align*} \underline{\psi}^{\ast}_{n-m,1}&= - \overline{\psi}_{n-m,n-m+1} \leq \frac{m+2}{n-m+1-(m+2)}\cdot \underline{\psi}_{n-m,m+2} \\ &\leq \frac{m+2}{n-2m-1}\cdot\frac{1-(n-m)\widehat{\lambda}_{n}(\xi)}{(n-m)(1+\widehat{\lambda}_{n}(\xi))}. \end{align*} Then \eqref{eq:fehlt3} gives the stated left lower bound~\eqref{eq:h21} for $w_{n-m}(\xi)$. The right bound for $w_{n-m}(\xi)$ follows similarly as \eqref{eq:h11} in view of \eqref{eq:ceq2} that is equivalent to the second claim of Corollary~\ref{ccoo}. Finally, \eqref{eq:windag} follows by combining \eqref{eq:h11} with the estimate \[ \min\{ w_{m+1}(\xi), \widehat{w}_{n-m}(\xi)\} \leq \frac{1}{\widehat{\lambda}_{n}(\xi)} \] derived from~\cite[Theorem~2.1]{indag}. Indeed, we have $\widehat{\lambda}_{n}(\xi) \le 2/n<1/m$. Hence the assumptions~\eqref{eq:only2} and~\eqref{eq:h11} imply that $\widehat{w}_{n-m}(\xi)$ is larger than $1/\widehat{\lambda}_{n}(\xi)$, thus the left term in the minimum cannot exceed it. \end{proof} \section{Proof of Theorems~\ref{1B} and~\ref{1A}} \subsection{Two relations between Diophantine exponents} \label{twor} In this section we recall estimates linking $w_{n}^{\ast}$ with other exponents of approximation. They will be required in the proofs of the main results. Firstly, from~\cite[Theorem~2.7]{buschlei} any transcendental real $\xi$ satisfies \begin{equation} \label{eq:toll} w_{n}^{\ast}(\xi) \geq \frac{3}{2}\widehat{w}_{n}(\xi)-n+\frac{1}{2}, \qquad n\geq 1. \end{equation} We will apply \eqref{eq:toll} for the index $n-m$ in context of Theorem~\ref{1A} for its proof. We lack analogues of \eqref{eq:toll} for the modified versions of Wirsing's problem discussed at the end of Section~\ref{int01}. Therefore we cannot extend our results to these situations. Secondly, a small variation of~\cite[Lemma~1]{davsh} implies the relation \begin{equation} \label{eq:davschm} w_{n}^{\ast}(\xi) \geq \frac{1}{\widehat{\lambda}_{n}(\xi)}, \qquad n\geq 1. \end{equation} In fact, Lemma~1 from~\cite{davsh} provides a lower estimate for $w_{n+1}^{\ast}(\xi)$ instead of $w_{n}^{\ast}(\xi)$, however it is well-known to hold for the latter as well, see for example~\cite{j2} or~\cite{buteu}. For the sake of completeness, we state the related inequalities \begin{equation} \label{eq:toller} w_{n}^{\ast}(\xi) \geq \frac{w_{n}(\xi)+1}{2}, \qquad w_{n}^{\ast}(\xi) \geq w_{n}(\xi)-n+1, \qquad w_{n}^{\ast}(\xi) \geq \frac{\widehat{w}_{n}(\xi)}{\widehat{w}_{n}(\xi)-n+1} \end{equation} by Wirsing~\cite{wirsing}, Bugeaud~\cite[Lemma~1A]{bugbuch} and Bugeaud and Laurent~\cite{bula} respectively. Many of the above inequalities directly show the lower bounds \[ \overline{w}^{\ast}(\xi) \geq \underline{w}^{\ast}(\xi) \geq \frac{1}{2}. \] However, as indicated in the introduction, no improvement of the constant $1/2$ even for the larger quantity $\overline{w}^{\ast}(\xi)$ seems obvious from previous results. For our method, to improve $\underline{w}^{\ast}(\xi) \geq \frac{1}{2}$ it is essential to use \eqref{eq:toll}, the bounds in \eqref{eq:toller} are insufficient. On the other hand, the left estimate in \eqref{eq:toller} would imply $\overline{w}^{\ast}(\xi)>1/2$ when utilized in the framework below. \subsection{Deduction of the main results} \begin{proof}[Proof of Theorem~\ref{1A}] Let $m,n$ and $\xi$ be as in the theorem. First assume inequality \eqref{eq:only} holds. Then we apply Theorem~\ref{openup2} which together with \eqref{eq:toll} for index $n-m$ yields \begin{equation} \label{eq:kuerzer} w_{n}^{\ast}(\xi)\geq w_{n-m}^{\ast}(\xi)\geq \frac{3}{2}\cdot\frac{(n-m)\widehat{\lambda}_{n}(\xi)+n-2m-1}{1-m\widehat{\lambda}_{n}(\xi)}-(n-m)+\frac{1}{2}. \end{equation} Denote the right hand side by $\tau=\tau_{m,n}(\widehat{\lambda}_{n}(\xi))$. Regardless if \eqref{eq:only} holds or not, the estimates \eqref{eq:davschm} and~\eqref{eq:kuerzer} together imply \begin{equation}\label{eq:eq23} w_{n}^{\ast}(\xi)\geq \max\left\{ \tau\cdot \underline{1}_{\left(\frac{1}{n-m+1},1\right)}(\widehat{\lambda}_{n}(\xi)),\; \frac{1}{\widehat{\lambda}_{n}(\xi)}\right\}, \end{equation} where $\underline{1}_{I}(t)$ denotes the indicator function of an interval $I$. The first term in the maximum is rising as a function of $\widehat{\lambda}_{n}(\xi)$ on $[1/n,1/m)$, while the second term is obviously decreasing. It is easy to check that for $\widehat{\lambda}_{n}(\xi)=1/(n-m+1)$ and slightly larger values the right term prevails (since then $\tau>1/\widehat{\lambda}(\xi)$), while for $\widehat{\lambda}_{n}(\xi)=1/m$ the left term becomes bigger (it actually tends monotonically to infinity). Therefore the minimum of the right hand side of~\eqref{eq:eq23} is attained when the expressions are equal. This happens when $\widehat{\lambda}_{n}(\xi)$ solves the quadratic equation in $\lambda$: \begin{equation}\label{eq24} (2mn-2m^2 +3n-4m)\lambda^2 + (n-2m-2)\lambda -2=0. \end{equation} The reciprocal of this equilibrium value, according to \eqref{eq:eq23}, can readily be calculated as the lower bound in Theorem~~\ref{1A}. \end{proof} The lower bound in Theorem~\ref{1A} can be slightly improved if instead of~\eqref{eq:toll} one uses the stronger estimate \[ w_{n}^{\ast}(\xi) \geq \frac{w_{n}(\xi)}{2}+\widehat{w}_{n}(\xi)-n+\frac{1}{2}, \qquad n\geq 1, \] which holds as soon as $w_{n}^{\ast}(\xi)\leq n$. The last inequality can be derived by applying the proof in~\cite{buschlei}. Using the left bound in \eqref{eq:h21} we indeed obtain an improvement. Now, Theorem~\ref{1B} follows from Theorem~\ref{1A} with a proper choice of the parameter $m$. \begin{proof}[Proof of Theorem~\ref{1B}] Write \[ \Phi(u,v)= \frac{4uv+6v-4u^{2}-8u}{2u+2-v+\sqrt{v^{2}+12uv+20v-12u^{2}-24u+4}} \] so that $\Phi(m,n)$ is the bound in Theorem~\ref{1A}. If we fix $v$ and write $u=\alpha v$ for $\alpha\in(0,1/2)$, then we obtain a bound of order $\Phi(u,v)\geq F(\alpha)v+o(v)$ as $v\to\infty$, for the function \[ F(t)= \frac{4t-4t^{2}}{2t-1+\sqrt{1+12t-12t^{2}}}. \] By differentiation one can check that $F(t)$ is maximized for $\alpha=\alpha_{0}:=(3-\sqrt{3})/6=0.2113\ldots$ with maximum $F(\alpha_{0})=\beta:=1/\sqrt{3}$. For given $v=n$, if we take $m=\lfloor n\alpha_0\rfloor$ then the quotient $\Phi(m,n)/n$ will be arbitrarily close to $\Phi(\alpha_{0} n,n)/n$ for large enough $n$. By Theorem~\ref{1A} and continuity of $F$ we infer $w_{n}^{\ast}(\xi)/n\geq F(\alpha_{0})-o(1)=\beta-o(1)$ as $n\to\infty$. Next, we need to show that for all $n\ge 4$ there exists $m<(n-1)/2$ such that $\Phi(m,n)/n$ exceeds $\beta$. This is equivalent to saying that for the same values $n$ and $m$ the solution $\lambda$ of~\eqref{eq24} is less than $\sqrt{3}/n$. By substituting $\lambda = \sqrt{3}/n$ into the left hand side of the equation we get $$ -\left(\frac{3-\sqrt{3}}{\sqrt{6}}n - \sqrt{6} m\right)^2 + (9-2\sqrt{3})n- 12m. $$ For $m=\lfloor n\alpha_0\rfloor$, the square part of this expression is at least $-6$ while the remaining part is at least $$ -2(3-\sqrt{3})n + (9-2\sqrt{3})n = 3n \ge 12. $$ Therefore the expression is positive and therefore for $m=\lfloor n\alpha_0\rfloor$, the values $\Phi(m,n)/n$ are larger than $\beta$. We finally settle \eqref{eq:obenba}. We will show that for any $\epsilon>0$ and large $n\geq n_{0}(\epsilon)$, with $m=\lfloor\gamma n\rfloor$ for a certain $\gamma$ and $s=n-m$ we have \begin{equation} \label{eq:reicht} \max\left\{ \frac{w_{n}^{\ast}(\xi)}{n}, \frac{w_{s}^{\ast}(\xi)}{s}\right\} > \delta-\epsilon, \end{equation} with $\delta$ defined in the theorem. This clearly implies the claim. Let $\gamma\in(0,1/2)$ be a parameter. Choose $m=\lfloor\gamma n\rfloor$ and denote $c=n\widehat{\lambda}_{n}(\xi)$. Clearly $c\in[1,2]$ by Dirichlet's Theorem and~\cite{laurent}. On the one hand, \eqref{eq:davschm} implies $w_{n}^{\ast}(\xi)/n\geq c^{-1}$ , on the other hand by our choice of $\gamma$ we may apply Theorem~\ref{openup2} and again derive a similar estimate to \eqref{eq:kuerzer}. Putting negligible terms in a remainder term yields \[ \frac{w_{s}^{\ast}(\xi)}{s}=\frac{w_{n-m}^{\ast}(\xi)}{n-m}\geq \frac{3}{2}\cdot \left(\frac{1-2\gamma}{(1-\gamma)(1-c\gamma)}\right)-1-o(1), \qquad n\to\infty. \] Thus for every parameter $\gamma\in(0,1/2)$ we have \[ \max\left\{ \frac{w_{n}^{\ast}(\xi)}{n}, \frac{w_{s}^{\ast}(\xi)}{s}\right\} \geq \min_{c\in[1,2]} \max\left\{\frac{1}{c},\; \frac{3}{2}\cdot \left(\frac{1-2\gamma}{(1-\gamma)(1-c\gamma)}\right)-1 \right\} -o(1), \] as $n\to\infty$. For given $\gamma$ the minimum of the inner maximum is obtained when the expressions are equal, that is for \[ c=c(\gamma)=\frac{2\gamma^{2}+2\gamma-1+\sqrt{4\gamma^{4}+24\gamma^{3}-32\gamma^{2}+12\gamma+1}}{4(\gamma-\gamma^{2})} \] obtained as a solution of a quadratic equation. Observe that the right hand side is $1/G(\gamma)$ with $G$ defined in \eqref{eq:G}. Matlab calculations show that the reciprocal $1/c(\gamma)$ is maximized over $\gamma\in(0,1/2)$ for a numerical value $\gamma_{0}=0.2345\ldots$ which by differentiation can be checked to be a root of the irreducible quartic $Q(t)=4t^4 - 12t^3 + 10t^2 - 6t + 1$, yielding a bound $\delta=G(\gamma_{0})>0.6408$ thereby verifying~\eqref{eq:reicht}. \end{proof} $\boldsymbol{Acknowledgements}$: Foundations of the results in this paper were established at the collaborative workshop ''Ergodic theory, Diophantine approximation and related topics'' at MATRIX institute in Creswick, Australia in June 2019. \noindent Dzmitry Badziahin\\ \noindent The University of Sydney\\ \noindent Camperdown 2006, NSW (Australia)\\ \noindent {\tt [email protected]} \noindent Johannes Schleischitz\\ \noindent Middle East Technical University\\ \noindent Northern Cyprus Campus\\ \noindent Kalkanlı, Güzelyurt, KKTC\\ \noindent via Mersin 10, Turkey\\ \noindent{\tt [email protected]} \end{document}
\begin{document} \twocolumn[ \icmltitle{Finding Short Signals in Long Irregular Time Series with Continuous-Time Attention Policy Networks} \icmlsetsymbol{equal}{*} \begin{icmlauthorlist} \icmlauthor{Thomas Hartvigsen}{mit} \icmlauthor{Jidapa Thadajarassiri}{wpi} \icmlauthor{Xiangnan Kong}{wpi} \icmlauthor{Elke Rundensteiner}{wpi} \end{icmlauthorlist} \icmlaffiliation{mit}{MIT} \icmlaffiliation{wpi}{WPI} \icmlcorrespondingauthor{Thomas Hartvigsen}{[email protected]} \icmlkeywords{} \vskip 0.3in ] \printAffiliationsAndNotice{} \begin{abstract} Irregularly-sampled time series (ITS) are native to high-impact domains like healthcare, where measurements are collected over time at uneven intervals. However, for many classification problems, only small portions of long time series are often relevant to the class label. In this case, existing ITS models often fail to classify long series since they rely on careful imputation, which easily over- or under-samples the relevant regions. Using this insight, we then propose CAT, a model that classifies multivariate ITS by explicitly seeking highly-relevant portions of an input series' timeline. CAT achieves this by integrating three components: (1) A \textit{Moment Network} learns to seek relevant moments in an ITS's continuous timeline using reinforcement learning. (2) A \textit{Receptor Network} models the temporal dynamics of both observations \textit{and} their \textit{timing} localized around predicted moments. (3) A recurrent Transition Model models the sequence of transitions between these moments, cultivating a representation with which the series is classified. Using synthetic and real data, we find that CAT outperforms ten state-of-the-art methods by finding short signals in long irregular time series. \end{abstract} \section{Introduction} \leftarrowbel{sec:intro} \textbf{Background.} Irregularly-sampled time series (ITS) have uneven spaces between their observations and are common in impactful domains like healthcare \cite{hong2020holmes}, environmental science \cite{cao2018brits}, and human activity recognition \cite{singh2019multi}. Uneven gaps can arise from many sources. For example, in physiological streams, clinicians drive the collection of medical record data by requesting different lab tests and measurements in real time as they investigate the root causes of their patient's conditions \cite{lipton2016directly}. \textit{Which} measurements are taken \textit{when} differs between patients. When classifying such time series, there are often relationships between \textit{when} observations are made and the class label for the resulting time series. For instance, sicker patients may have more measurements. ITS can also be quite long, while the regions most-relevant to the classification may be quite short, taking up only a small portion of the timeline and creating a small \textit{signal-to-noise} ratio. A successful model must find the best regions in the timeline at which to capture signals in both the values themselves and the patterns in \textit{when} observations were made, or \textit{informative irregularity}, while ignoring irrelevant regions. \input{figures/prob_def_ISTS.tex} \textbf{Motivating Example.} Consider detecting if a person \textit{Fell} using their smartphone's sensors, as illustrated in Figure \ref{fig:prob_def}. To extend battery life, a listening probe is used to only collect data when certain conditions are met, for example when the accelerometer changes rapidly. Since the phone is not always moving, the stored time series are naturally irregularly-sampled. To detect a fall, some regions of the accelerometer's records are far more relevant than others. Leading up to a fall, for instance, a person may have stumbled earlier in the day. However, there can also be many false positives where the phone moves quickly even though the person is not falling (setting the phone down, for instance). Additionally, \textit{when} observations are made can also be useful: if the phone moves after a long gap, the person may be getting out of bed. Since only some regions are relevant, all a classifier needs are the few most relevant moments in the timeline. Finding these moments is especially important for the long series that naturally exist in many domains. \textbf{State-of-the-art.} There have been many recent advances in classifying ITS data, though most focus on sparse series with few observations. Many works treat ITS classification as a \textit{missing value imputation} problem \cite{che2018recurrent,lipton2016directly,zheng2017resolving}, converting ITS to regular series then performing standard classification. However, to capture short signals, many values need to be imputed to avoid aggregating intricate signals. Plus, this increases the length of the series and imputes values in irrelevant regions of the timeline. As many ITS methods rely on Recurrent Neural Networks, making time series longer will likely decay performance. On the other hand, imputing too few values easily bypasses short signals, aggregating away crucial information. Some works capture informative irregularity by computing statistical features such as \textit{missingness indicators} \cite{lipton2016directly} or the time since last observations \cite{che2018recurrent} as additional input variables, inflating the feature space. Some recent works have leaned into learning continuous-time representations \textit{directly} from raw ITS data \cite{kidger2020generalised,shukla2019interpolation,li2016scalable,cheng2020learning,rubanova2019latent,de2019gru,oh2018learning,shukla2021multi}. However, they still rely on hand-picking new \textit{reference} timesteps at which to estimate values or compute representations, falling prey to the same challenges of imputation. To-date, these methods do not adapt their reference timesteps to the inputs. Overall, current machine learning methods for classifying ITS data are expected to underperform on long series where the relevant signals are proportionally short. \textbf{Problem Definition.} We specifically address the problem of \textit{Attention-based ITS Classification} (ABC), which is to classify long ITS by finding small discriminative signals in the continuous timeline, as illustrated in Figure \ref{fig:prob_def}. Given a set of labeled ITS, where each series consists of one sequence of (\textit{timestep}, \textit{value}) pairs per variable, our aim is to produce a classifier that can correctly assign class labels $y$ to previously-unseen instances. For long series, the relevant time window, or the proportion of the timeline needed for classification, may be very small in practice. A successful model should explicitly find these \textit{discriminative moments} with which it can make an accurate classification. \textbf{Challenges.} Solving the ABC problem is challenging for the three following reasons: \begin{itemize} \item \textit{Finding Short Signals.} Short, relevant windows of a continuous timeline can be hard to identify, akin to finding a needle in a haystack. For long series, this means that much of the timeline contains effectively irrelevant information, which a model must learn to ignore. Meanwhile, the model must also while avoiding learning spurious correlations found outside relevant regions. \item \textit{Unknown Signal Locations.} Relevant signals may occur anywhere in the continuous timeline. However, rarely are the \textit{true} signal locations labeled, so we assume no prior knowledge of which moments \textit{should} be used for classification. Still, a good model must successfully find these discriminative moments, even without supervision. \item \textit{Informative Irregularity.} Discriminative information often arises in the patterns of \textit{when} observations are made \cite{rubin1976inference}. For instance, rapid measurements may indicate a sicker patient. Learning from such irregularity is often crucial to accurate classification, yet few methods exist for capturing such signals. \end{itemize} \textbf{Proposed Method.} To address these challenges, we propose the \textbf{C}ontinuous-time \textbf{At}tention policy network (CAT) as an effective approach to the ABC problem. CAT searches for relevant regions of input series via a reinforcement learning-based \textit{Moment Network}, which learns to find \textit{moments of interest} in the continuous timeline, one by one. At each predicted moment, a \textit{Receptor Network} reads and represents the local temporal dynamics in the measurements along with patterns that exist in the timing of observations through a continuous-time density function. Along the way, a recurrent \textit{Transition Model} constructs a discriminative representation of the \textit{transitions between moments of interest}, which is ultimately used to classify the series. CAT thus presents a novel paradigm for classifying ITS where intricate signals in long series are explicitly sought out and captured. Additionally, CAT generalizes recent ITS classifiers with its flexible \textit{Receptor Network}, which can easily be augmented to leverage components of other recent ITS models. \textbf{Contributions.} Our contributions are as follows: \begin{itemize} \item We identify a new, real problem setting for classifying irregularly-sampled time series on which existing state-of-the-art methods underperform. \item Using insights from this problem, we develop CAT, a novel framework for classifying long irregular time series by finding relevant moments in the \textit{continuous} timeline, generalizing recent work. \item We show that CAT successfully discovers intricate signals in ITS, outperforming the the main competitors on both synthetic and real-world data. \end{itemize} \section{Related Work} \leftarrowbel{sec:related_works} The ABC problem for ITS relates to both \textit{ITS Classification} and \textit{Input Attention}. \textbf{Classifying Irregularly-Sampled Time Series.} Classifying irregularly-sampled time series has recently become a popular and impactful problem as it generalizes many prior classification settings. To-date, most approaches \cite{lipton2016directly,zheng2017resolving,che2018recurrent} treat ITS classification as a \textit{missing value imputation} problem: Create a set of evenly-spaced bins, then aggregate multiple values within each bin and estimate one value per empty bin. After imputation, regular time series classification may be performed. Some recent ITS classifiers extend beyond simple imputation options (\textit{e.g.}, mean) approaches by either including auxiliary information such as a \textit{missingness-indicator} \cite{lipton2016directly} or \textit{time-since-last-observation} \cite{che2018recurrent} as extra features to preserve properties found in the irregularity. Others build more complex value estimators by either learning generative models \cite{cheng2020learning}, using differentiable gaussian kernel adapters \cite{shukla2019interpolation}, or including decay mechanisms in Recurrent Neural Networks (RNN) to encode information-loss when variables go unobserved over long periods of time \cite{mozer2017discrete,che2018recurrent}. Many recent works have also begun parameterizing differential equations to serve as time series models \cite{kidger2020neural,lechner2020learning,rubanova2019latent,jia2019neural,hasani2021liquid,schirmer2022modeling,salvi2022neural}, though most still estimate values at hand-picked time steps, then use the estimated values for classification. Some recent models have also integrated attention mechanisms into ITS classification \cite{shukla2021multi,chen2021continuous,tan2021cooperative}. However, they still hand-pick reference timesteps for each input time series. Given long ITS with short signals, this decision is hugely impactful, as we show in our experiments. Moreover, by relying on RNNs for classification, these recent methods easily fail to capture signals when the number of estimated values gets too large. This requires the RNN to filter out many irrelevant timesteps in a long series, which is notoriously challenging due to both their slow inference and vanishing gradients \cite{hochreiter1998vanishing}. \textbf{Input Attention.} The goal of \textit{Input Attention} is to discover relevant regions in the \textit{input} space of a given instance and it has recently broken major ground in classifying images \cite{mnih2014recurrent}, graphs \cite{lee2019attention}, text \cite{sood2020improving}, and regularly-spaced time series \cite{ismail2019input}. We refer to this as \textit{input} attention, as such methods search for relevant regions in the \textit{input space} of each instance. This approach is particularly impactful when inputs are high-dimensional as it explicitly disregards irrelevant regions of the input space. These methods also aid interpretability by clearly displaying which regions of an input were used to make a classification. Input attention has yet to be considered for ITS despite strong implications of successful models. Input attention differs from \textit{attention mechanisms for recurrent neural networks} \cite{bahdanau2014neural}, where attention distributions are predicted over the timesteps in the \textit{latent} space of an RNN. While there is some conceptual overlap, input attention is more data-driven in that it finds regions in the \textit{input} space as opposed to the \textit{latent} space of this specific neural network architecture. \section{Methodology} \leftarrowbel{sec:methods} \begin{figure*} \caption{Overview of CAT. The \textit{Receptor Network} \end{figure*} \subsection{Problem Formulation} Given a set of $N$ labeled irregularly-sampled time series $\mathcal{D} = \{(X_i, y_i)\}_{i=0}^N$, consider the $D$ variables of instance $X_i = [X_i^1, \dots, X_i^D]$. To aid readability, all descriptions are provided in terms of one instance and one variable wherever possible. For each variable $d$, $X^d = [(t^d_1, v^d_1), \dots, (t^d_{T^d}, v^d_{T^d})]$, where $t^d_i$ is the $i$-th timestamp of the $d$-th variable and $v^d_i$ is its corresponding value. Timestamps $t$ may differ between variables and the number of observations $T^d$ may be unique to variable $d$. We also assume that the inputs $X$ have short signals: Most of the relevant information comes from a small proportion of a series' timeline. There may still be multiple relevant regions, however. The goal is to learn a function $f:\mathbb{X} \to \mathcal{Y}$ that accurately maps input $X \in \mathbb{X}$ to its class $y \in \mathcal{Y}$ for previously-unseen time series, where $\mathbb{X}$ is the input space of ITS and $\mathcal{Y} =\{0, \dots, C\}$ is the set of $C$ classes. \subsection{Proposed Method} We propose a \textbf{C}ontinuous-time \textbf{At}tention Policy Network (CAT), a novel model containing four key steps that work in concert to find short discriminative signals in long ITS: \begin{enumerate} \item A \textit{Receptor Network} learns to model ITS observations (both the raw values \textit{and} informative irregularity) local to a given \textit{moment of interest}. \item A recurrent \textit{Transition Model} represents the Receptor Network's findings across multiple moments. \item A reinforcement learning \textit{Moment Network} predicts \textit{moments of interest} based on the Transition Model. A \textit{moment of interest} is timestamp around which relevant information may exist. \item After $k$ repetitions of Steps 1-3, a \textit{Discriminator Network} classifies $X$ using all steps. \end{enumerate} Beyond state-of-the-art performance, a clear benefit of CAT is its novel framework for classifying time series: CAT decomposes a time series into a sequence of local representations that is discriminative in (1) which subsequences are modeled, and (2) their relative order. This approach is more flexible than rigidly reading a time series from either left-to-right, right-to-left, or all-at-once; our model adapts the processing order to the input. Since ordering is discrete, Reinforcement Learning is a natural fit. That is, we let the model pick the order, then reward or penalize based on the final classification. This technical novelty helps CAT stand out from alternative ITS models. \subsubsection{Receptor Network} First, the Receptor Network $\mathcal{R}_\theta$ creates a vector representation of values and irregularity \textit{local} to a given moment of interest $m_i$. So given $m_i \in [0, \max T]$, $\mathcal{R}_\theta$ predicts a vector $\hat{\mathbf{x}}_i$, representing the local \textit{values} and \textit{informative irregularity} within a width-$\delta$ window of $X$ centered on moment $m_i$, where $\max T$ is the largest timestamp in $X$. Thus $\mathcal{R}_\theta$ can be placed anywhere in the continuous timeline, where it will proceed to model local signals. In our experiments, the first moment, $m_0$, is sampled from a uniform distribution across the timeline. To compute local representations of both values \textit{and} irregularity, we compute two $w$-dimensional vectors per variable: $\mathbf{p}$ represents $X$'s values and $\mathbf{q}$ represents informative irregularity, which are then encoded into a shared representation $\hat{\mathbf{x}}$. For readability, we describe $\mathcal{R}$ for one variable, omitting superscripts $d$ since all variables are processed the same way and in parallel. To compute vector $\hat{\mathbf{x}}$, all timestamps and values within this window are first extracted into two vectors: $\tau$ is a sequence of timestamps in the window $[m_i-\frac{\delta}{2}, m_i+\frac{\delta}{2}]$, and $\nu$ contains their corresponding values. We use $[\ ]$ to denote a range in the timeline beginning at the real time $m_i-\frac{\delta}{2}$ and ending at time $m_i+\frac{\delta}{2}$. To compute $\mathbf{p}$, the representation of the values within the window surrounding $m_i$, we linearly interpolate values $\nu$ to estimate $w$ values at a set of new timestamps. The $j$-th element of $\mathbf{p}$ can be interpolated with respect a to timestamp $t^\prime = m_i-\frac{\delta}{2}+\frac{j\delta}{w}$ for $j=\{1, \dots, w\}$ as \begin{equation*} \mathbf{p}_j = \frac{\left(\text{SG}(t^\prime, \tau)-t^\prime\right)\nu_{\text{LL}(t^\prime, \tau)} + \left(t^\prime - \text{LL}(t^\prime, \tau)\right)\nu_{\text{SG}(t^\prime, \tau)} }{\text{SG}(t^\prime, \tau)-\text{LL}(t^\prime, \tau)}, \end{equation*} where $\text{LL}(t^\prime, \tau)$ is the largest timestamp less than $t^\prime$ and $\nu_{\text{LL}(t^\prime, \tau)}$ is its corresponding value. Similarly, $\text{SG}(t^\prime, \tau)$ is the smallest timestamp greater than $t^\prime$ and $\nu_{\text{SG}(t^\prime, \tau)}$ is its corresponding value. By iterating $j$ across integers from 1 to $w$, we compute $w$ evenly-spaced values representing the local observations. If a timestamp $t^\prime > \max T$ or $t^\prime < \min T$, the nearest value in the window is returned, flattening the edges of the window. If no observations occur in the window, we set $\hat{x} = \{0\}^w$. To compute $\mathbf{q}$, which represents \textit{informative irregularity} within the window, we learn a function to represent the \textit{timing} of observations, quantifying irregularity through the squared exponential kernel, inspired by \cite{li2016scalable}. Thus the $j$-th element of $\mathbf{q}$ as computed with respect to each $t^\prime = m_i-\frac{\delta}{2}+\frac{j\delta}{w}$ for $j=\{1, \dots, w\}$ is \begin{equation}\leftarrowbel{eqn:density} \mathbf{q}_j = \sum_{k=1}^{|\tau|} e^{-\alpha(t^\prime-\tau_K)}, \end{equation} \noindent where $\tau_K$ is the $K$-th element of sequence $\tau$. Thus the \textit{timing} of the observations is converted to a sequence of densities, which often change by class \cite{lipton2016directly}. $\alpha$ controls the kernel distance between $t^\prime$ and $\tau_K$ and can be picked or learned during training \cite{shukla2019interpolation}. Since the output of the Receptor Network will eventually be used by the Moment Network to predict the next moment $m_{i+1}$, we also compute $\mathbf{p}$ and $\mathbf{q}$ for each variable at two granularities: One for fine-grained local information, one for coarse-grained representation of the entire series that is useful for both capturing long-term trends and for finding the next moments of interest, inspired by \cite{mnih2014recurrent}. After computing $\mathbf{p}$ and $\mathbf{q}$, a neural network predicts a $L$-dimensional representation $\mathbf{\hat{x}}_i$, creating a dense, vector representation of the width-$\delta$ window surrounding moment $m_i$: \begin{equation} \mathbf{\hat{x}}_i = \psi(\mathbf{W}[\text{F}(\{\mathbf{p}^d\}_{d=1}^D), \text{F}(\{\mathbf{q}^d\}_{d=1}^D)] + \mathbf{b}), \end{equation} where F$(\cdot)$ and $[\cdot]$ denote flattening and concatenation, respectively. $\mathbf{W}$ and $\mathbf{b}$ are a matrix and vector of learnable parameters of shape $L \times 4w$ and $4w$. $\psi$ is the rectified linear unit. To also incorporate \textit{where} the collected data come from in the timeline, we concatenate $m_i$ with $\mathbf{\hat{x}}_i$ before passing it to the Transition Model. \subsubsection{Transition Model} Next, the Transition Model $\mathcal{T}_\theta$ represents the \textit{transitions} between information gathered at each moment of interest. We follow the state-of-the-art for a vast array of sequential learning tasks and implement this component as an RNN, creating one $H$-dimensional vector representation $\mathbf{h}_i$ per moment-of-interest. To avoid vanishing gradients, we use a Gated Recurrent Unit (GRU) \cite{cho2014learning} to compute the hidden state $\mathbf{h}_i$. This recurrent component takes only $K$ steps and $K$ is typically kept very low ($K=3$ in our experiments). In contrast, most recent models instead step through a large number of imputed timestamps $T$ (typically $T \gg K$) creating slow models that are hard to optimize. \subsubsection{Moment Network} Next, the \textit{Moment Network} $\mathcal{M}_\theta$ uses the hidden state $h_i$ and predicts the next moment-of-interest $m_{i+1}$. There are no ground-truth moments, so we frame this component as a Partially-Observable Markov Decision Process (POMDP), similar to \cite{mnih2014recurrent}. We follow the standard approach and solve this POMDP using on-policy reinforcement learning. In this way, the hidden state $\mathbf{h}_i$ from the \textit{Transition Model} serves as an observation from the environment (representing the data collected at all prior moments of interest). The possible actions include all real-valued timestamps between 0 and $\max T$, and we define the reward to be the final classification success. The goal is to learn a policy $\pi(\mathbf{h}_i)$ that predicts the next moment $m_{i+1}$. Since there are infinitely-many moments in the continuous timeline, we parameterize the mean $\mu_i$ of a Normal distribution with fixed variance from which we \textit{sample} real-valued $m_{i+1}$. To acquire good samples, $\mathbf{h}_i$ is first projected into a one-dimensional probabilistic space by a neural network: $\mu_i = \sigma\left(\mathbf{W}\mathbf{h}_i+\mathbf{b})\right).$ We then scale $\mu_i$ by multiplying with $\max T$ and sample moment $m_{i+1} \sim \mathcal{N}(\mu_i, \sigma)$, with tunable $\sigma$. If $m_{i+1} > \max T$, we re-assign $m_{i+1} := \max T$, and if $m_{i+1} < 0$, $m_{i+1}:=0$. To train $\mathcal{M}_\theta$, we set reward $r_i = 1$ if the final classification is accurate, otherwise $r_i = -1$. The Moment Network thus seeks \textit{discriminative} regions, which lead to the highest rewards. CAT predicts $K$ moments of interest, iteratively cycling between the Receptor Network, Moment Network, and Transition Model $K$ times. This packs information from $K$ steps into the final hidden state $\mathbf{h}_K$. \subsubsection{Discriminator Network} The final component of CAT is a \textit{Discriminator Network} $\mathcal{D}_\theta$, which learns to project the Transition Model's final hidden state $\mathbf{h}_K$ into a $C$-dimensional probabilistic space in which it predicts $\hat{y}$ to be $X$'s class label. This final classification is made via a single linear layer: $\hat{y} = \text{softmax}(\mathbf{W}\mathbf{h}_K+\mathbf{b}).$ The discriminator is naturally connected to the transition model, so is easily expandable according to the required complexity of a task. \subsubsection{CAT Training}\leftarrowbel{sec:optimization} The Receptor Network, Transition Model, and Discriminator are optimized together to predict $\hat{y}$ accurately by minimizing cross entropy: \begin{align}\leftarrowbel{eqn:cross_entropy} \mathcal{L}_{\text{s}}(\theta_s) = -\sum_{c=0}^C y_c \log\hat{y}_c, \end{align} where $y_c$ is 1 if $X$ is in class $c$ and $\hat{y}_c$ is the corresponding prediction. $\theta_s$ denotes these networks' parameters. The Moment Network, on the other hand, samples the moments, so its learning objective is the maximization of the expected reward: $R = \sum_{i=0}^K r_i$, so $\theta_{\text{rl}}^* = \argmax_{\theta_{\text{rl}}}\mathbb{E}[R]$, where $\theta_{\text{rl}}^*$ is the optimal parameters for the \textit{Moment Network}. However, this is not differentiable. To maximize $\mathbb{E}[R]$ using backpropagation, we follow the standard protocol for on-policy reinforcement learning and optimize the Moment Network's policy using the REINFORCE algorithm \cite{williams1992simple}. Thus, we use a well-justified surrogate loss function that \textit{is} differentiable, allowing for optimization by taking steps in the direction of $\mathbb{E}[\nabla\log\pi(h_{0:k}, \mu_{0:k},r_{0:k})R]$. Thus the gradient can then be approximated for the predicted moments. Thus learning progresses, but there may be high variance in the policy updates since this is not the \textit{true} gradient for maximizing $\mathbb{E}[R]$. To reduce variance, we employ the commonly-used \textit{baseline} approach to approximate the expected reward, with which we may adjust the raw reward values, as shown in Equation \ref{eqn:reinforce}. Here, $b_j$ is a baseline predicted by a two-layer neural network and its predictions approximate the mean $R$ by reducing the mean squared error between $b_j$ and the average $R$. The weights $\theta_\text{rl}$ are thus updated by how much better than average are the outcomes. \begin{align} \mathcal{L}_{\text{rl}}(\theta_\text{rl}) = -\mathbb{E}\Bigg[\sum_{i=0}^{k} \log \pi(m_i|h_i)\bigg[\sum_{j=i}^{k} \big(R - b_{j}\big)\bigg]\Bigg]\leftarrowbel{eqn:reinforce} \end{align} Finally, the entire network can be optimized jointly via gradient descent on the sum of Equations 3 and 4: $\mathcal{L}(\theta) = \mathcal{L}_{s}(\theta_\text{s}) + \mathcal{L}_\text{rl}(\theta_\text{rl})$, where $\theta$ denotes CAT's parameters. \section{Experiments} \leftarrowbel{sec:experiments} \subsection{Datasets}\leftarrowbel{sec:datasets} We evaluate CAT using one synthetic dataset and five real-world publicly-available datasets. \textsc{M$\Pi$}: We develop a synthetic binary classification dataset to demonstrate that CAT indeed finds short signals in long ITS data. To add signals for different classes, we center a width-$\Delta$ discriminative region around a random moment in the timeline for each time series. The values for the timestamps within the width-$\Delta$ window take one of two forms, depending on the class. One class is characterized by the values $\{1, 1, 1\}$ (``$\Pi$''-shaped), and the other by the values $\{1, 0, 1\}$ (``M''-shaped). The timestamps corresponding to these values are evenly-spaced in the width-$\Delta$ window. All timestamps \textit{not} in the discriminative region are sampled uniformly across the timeline and values are sampled from a Normal distribution $\mathcal{N}(0, 1)$. In selecting $\Delta$, we determine the signal-to-noise ratio of the data: A small $\Delta$ means that the ``$\Pi$'' or ``M'' signals happen in a short period of time, so overlooking the signal is punished more. We generate 5000 time series, each with 500 timestamps, and have an equal number of instances for each class. \texttt{UWave} \cite{liu2009uwave}: The popular \texttt{UWave} dataset contains 4478 length-945 gesture pattern time series collected from a handheld device. Each series is a member of one of eight classes. We follow the preprocessing procedure outlined by \cite{li2016scalable}, randomly downsampling to 10\% of the original values to create irregularity. \texttt{ExtraSensory} \cite{vaizman2017recognizing}: Following \cite{hartvigsen2022stop}, we augment existing human activity data by simulating listening probes on smartphone data. Listening probes collect data from devices only when certain conditions are met, creating realistic ITS. For example, consider detecting hand tremors for digital health \cite{garcia2016smartphone}. A listening probe on a smartphone's accelerometer will collect data only when the phone moves rapidly, capturing hand tremors while the phone is carried. However, false positives are common: when the phone is set down or dropped, data are \textit{also} collected, resulting in irrelevant regions. Our sampling is more realistic than prior works, which randomly downsample without encoding meaning into the irregularity of samples. We extract four disjoint, non-overlapping datasets from the challenging \texttt{ExtraSensory} human activity recognition database \cite{vaizman2017recognizing} via a simulated listening probe on the 3-dimensional (x, y, and z axes) accelerometer records. When the norm of the difference between consecutive records surpasses a threshold $\gamma=0.001$, the corresponding accelerometer data are collected. We collect four datasets, one for each of four human activities: \textsc{Walking} (2636 time series), \textsc{Running} (1066 time series), \textsc{LyingDown} (7426 time series), and \textsc{Sleeping} (9276 time series). For each class, we extract data for the person who performed the activity the most since people's activity patterns are often incomparable. We then break each series into windows of 200 timestamps, then apply the listening probe. The task is to detect whether the person performed the activity within this window. We finally balance each dataset to have an equal number of positive and negative series and ensure no extracted segments overlap. \subsection{Compared Methods} We compare CAT to ten recent ITS classifiers. The first four methods are use imputation and feature expansions: linear interpolation (GRU-interp), mean imputation (GRU-mean), mean imputation with extra time-since-last-observation features (GRU-$\Delta t$), and mean imputation with a missingness indicator (GRU-S) \cite{lipton2016directly}. The second group contains state-of-the-art ITS classifiers: GRU-Decay \cite{mozer2017discrete}, GRU-D \cite{che2018recurrent}, IPN \cite{shukla2019interpolation}, mTAN \cite{shukla2021multi}, and NCDE \cite{kidger2020neural}. We also ablate CAT by replacing the Moment Network with randomly-selected moments of interest, which we refer to as CAT w/o Moment. \begin{figure} \caption{Multi-class classification with long \texttt{UWave} \end{figure} \subsection{Implementation Details} For the \texttt{UWave} dataset, we use a standard 80\% training, 10\% validation, and 10\% testing split. The \texttt{ExtraSensory} datasets contain instances taken from different windows along a single timeline. To avoid cross-contamination, we split instances \textit{in time}, aiming for 80\% training and 20\% testing splits. The training/testing process is repeated five times and we report the average and standard deviation for all experiments. All methods use 64-dimensional hidden states for their respective RNNs. For CAT, we set $k=3$, use a 50-dimensional representation for the Receptor Network, and set $\alpha = 100$ in Equation \ref{eqn:density}. All models are optimized using Adam with a learning rate of $1e^{-3}$ and weight decay of $1e^{-5}$ and all methods are run until their losses converge, taking around 200 epochs. Each model is implemented in PyTorch in our public code. \subsection{Experimental Results} \subsubsection{Experiments on Real-World Data.} First, we demonstrate that CAT indeed handles long series better than the state-of-the-art methods. To achieve this, we impute the \texttt{UWave} data with 200 timestamps, which is much higher than prior experiments \cite{shukla2019interpolation}. For ease of comparison, we also have CAT observe the data at the same ``resolution'' by setting $w = \delta*200$ where $\delta$ is the receptor-width hyperparameter. This resolution can be tuned within CAT. Our results are reported in Figure \ref{fig:uwave}. As expected, CAT achieves state-of-the-art accuracy on these data while the compared methods underperform their accuracy with roughly 100 imputed values. This indicates that CAT is far more robust to longer series than the state-of-the-art ITS classifiers. Second, we show that CAT successfully captures \textit{informative irregularity} in long series, as indicated by our results on the human activity recognition datasets (\textsc{Walking}, \textsc{Running}, \textsc{LyingDown}, and \textsc{Sleeping}). We compare all models using two settings: infrequent imputation (200 values) and frequent imputation (500 values). Intuitively, \textit{frequent} imputation leads to clearer signals, as there are more values imputed on the signal, while \textit{infrequent} imputation leads to unclear signals. To successfully classify these data given infrequent imputation, finding the relevant regions of the data is more important. On the other hand, frequent imputations provide clear signals but come with the added noise, requiring explicit discovery of the relevant regions. Again, to compare with other methods, we set $w=\delta*200$ and $w=\delta*500$ for each respective frequency. \input{tables/low_res_T} \input{tables/high_res_T} Our results for this experiment, shown in Tables \ref{tab:low_res} and \ref{tab:high_res}, show that, as expected, CAT outperforms all compared methods in both the \textit{infrequent} and the \textit{frequent} settings for all datasets by an average of over 8\%. The baselines also mainly perform their best with \textit{infrequent} imputation, while CAT performs its best at \textit{frequent} imputation as it adapts to different resolutions. Also as expected, the recent \textit{GRU-D}, \textit{IPN}, and \textit{mTAN} models are generally CAT's strongest competitors. As expected, methods that model irregularity (\textit{GRU-D}, \textit{IPN}, \textit{GRU-S}, and CAT) largely beat the methods that disregard irregularity. \textit{GRU-interp}'s poor performance indicates that the benefits of CAT do not come from the linear interpolation used by the Receptor Network. For all datasets, CAT outperforms \textit{CAT w/o Moment}, the \textit{policy-free} version of CAT that places the \textit{Receptor Network} at random moments in the timeline. In fact, \textit{CAT w/o Moment} is overall the \textit{worst}-performing method, indicating that CAT's strong performance comes from a successfully-trained \textit{Moment Network}. Therefore CAT indeed succeeds to \textit{learn} the discriminative regions of the given time series. However, it is possible that \textit{CAT w/o Moment} could still perform well with enough moments. To determine if this is the case, we vary the number of moments for both CAT and \textit{CAT w/o Moment}. As our results in Figure 5 in the Appendix show, the moment network is effective. \subsubsection{Experiments on Long Synthetic Data.}\leftarrowbel{sec:synth_exp} We finally evaluate CAT's robustness to signal length using the synthetic \textsc{M}$\Pi$ dataset. We use long, 500 timestep time series for all experiments. Therefore, for short signals, there is a huge amount of noise with very tiny relevant regions. Our results are shown in Figure \ref{fig:mpi}. First, as shown in Figure \ref{fig:signal_to_noise}, we vary the signal-to-noise ratio in $\text{M}\Pi$, as defined by the length of the length of the relevant signal for each class. Intuitively, as this ratio increases, the signal becomes easier to identify. By updating the \textit{receptor width} $\delta$ to match the signal-to-noise ratio as it is increases, we find that the \textit{Moment Network} indeed succeeds in finding the discriminative moments in the timeline, achieving nearly-perfect accuracy even when the signal only takes up 6\% of the timeline. Once the signal takes up 10\% of the timeline, CAT consistently achieves 100\% testing accuracy. We also find that the compared methods fail when the signal-to-noise ratio is lower than 0.1, achieving roughly 50\% testing accuracy. This is expected as RNNs are classically hard to train on long series, especially with such noisy inputs. \begin{figure} \caption{Effect of signal width $\Delta$.} \caption{Effect of $\delta$ with $\Delta=0.04$.} \caption{CAT's performance on Synthetic $\text{M} \end{figure} Second, as shown in Figure \ref{fig:hyperparam}, we vary the \textit{receptor width} parameter $\delta$ for a signal-to-noise ratio $\Delta$ of 0.04 to understand CAT's sensitivity to the proper selection of $\delta$. We investigate the signal-to-noise ratio of 0.04 where CAT achieves only 75\% accuracy, indicating potential sensitivity to hyperparameters (see Figure \ref{fig:signal_to_noise}). As expected, accuracy suffers both when $\delta$ is either too small (0.02) or too large (0.5). The optimal $\delta$ lies somewhere between 0.2 and 0.32 for this experiment. Quite interestingly, this is much larger than the data's signal-to-noise (0.04). While a larger receptor width $\delta$ should capture signals more easily, suggesting that the receptor still filters out the noisy regions when they overlap with the receptor's window. These results also indicate that CAT can be robust to overestimating $\delta$. \section{Conclusions} \leftarrowbel{sec:conclusions} In this work, we identify the open Attention-Based Classification problem for long and irregularly-sampled time series, which is a challenging and impactful setting common to many important domains. The Attention-Based Classification problem is to classify long irregularly-sampled time series based on small discriminative signals in the continuous timeline while learning to ignore irrelevant regions. Since prior methods rely on good selection of a set of timesteps at which to impute values, they struggle to classify time series in this setting, which we demonstrate experimentally. Using insights from prior methods, we then propose the Continuous-time Attention Policy Network (CAT), which generalizes previous works by learning to searching for short signals in a time series' potentially-long timeline. CAT includes a reinforcement learning-based Moment Network that seeks discriminative moments in the timeline, positioning a novel Receptor Network that represents signals from \textit{both} the values themselves and the patterns existing in the timing of the observations. Using a core Transition Model that learns to model the transition between moments, a Discriminator Network finally classifies the entire series. This approach can intuitively be extended to match the modeling paradigms proposed by other recent methods, like differential equation models and time-representational encodings. We validate our method on a wide range of experiments featuring four real datasets, ablation studies, impacts of hyperparameter selection, a synthetic data highlighting CAT's strengths, and timing experiments. Across the board, CAT consistently outperforms recent alternatives by successfully finding short signals in long time series. \balance \appendix \section*{Appendix} \section{Dataset Descriptions}\leftarrowbel{sec:har} All datasets statistics are shown in Table \ref{tab:datastats}. For each of the four Human Activity Recognition datasets from the \textsc{ExtraSensory} dataset (\texttt{http://extrasensory.ucsd.edu/}) \cite{vaizman2017recognizing} (\textsc{Walking}, \textsc{Running}, \textsc{LyingDown}, and \textsc{Sleeping}), we aim for an 80\% training and 20\% testing split \textit{in time}, though this is challenging to control in practice. Thus, the exact ratio differs between the series. For each dataset, we further split off 10\% of the training set for validation. \begin{table}[h] \caption{Dataset Statistics. $\text{N}_\text{train}$ denotes the number of training instances, $\text{N}_\text{train}$ is the number of testing instances, Avg. $T$ is the average number of observations per series, and $C$ is the number of classes.} \centering \leftarrowbel{tab:datastats} \begin{tabular}{lcccc} \toprule Dataset & $\text{N}_\text{train}$ & $\text{N}_\text{test}$ & Avg. $T$ & $C$\\ \midrule \textsc{$\text{M}\Pi$} & 4000 & 1000 & 500 & 2\\ \textsc{UWave} & 4030 & 448 & 94 & 8\\ \textsc{Walking} & 1616 & 1020 & 99 & 2\\ \textsc{Running} & 666 & 400 & 85 & 2\\ \textsc{LyingDown} & 6186 & 1240 & 80 & 2\\ \textsc{Sleeping} & 6462 & 2814 & 80 & 2\\ \bottomrule \end{tabular} \end{table} \section{Further M$\Pi$ Experiments}\leftarrowbel{sec:app_synth} Expanding on the synthetic experiment discussed in the Experiments section of our main paper, we also run all compared methods for each of the signal-to-noise ratios, the results of which are shown in Figure \ref{fig:synth_results_all}. Again, each series has 500 timesteps, only 3 of which are relevant to the classification task. The 3 relevant timesteps are evenly-spaced in a randomly-placed width-$\Delta$ window in the continuous timeline. In this experiment, all compared methods fail to classify these series, even when performing imputation with 500 timesteps, which does not delete the signal. Instead, they fail to \textit{focus} on the the discriminative region and so cannot perform classification. On the contrary, CAT achieves nearly-perfect accuracy with a signal-to-noise ratio as low as .06, indicating that it indeed does find the relevant regions. \section{Timing Experiments}\leftarrowbel{sec:timing} CAT's \textit{Transition Model} uses an RNN to model the transitions between \textit{moments}, as opposed to the timestamps themselves. This hints that CAT should naturally be much faster than the compared methods. We confirm this by timing the training of all methods on the \textsc{Walking} dataset with frequent imputation---see Figure \ref{fig:timing}. As expected, CAT runs over seven times faster than the next slowest method while achieving much higher testing accuracy. This is particularly meaningful for long series in time-sensitive domains such as healthcare where a model's inference time is hugely important \cite{hong2020holmes}. Our reported timing comparisons between compared methods is also largely consistent with prior works' timing experiments \cite{shukla2019interpolation}. Also as expected, the GRU-D \cite{che2018recurrent}, mTAN \cite{shukla2021multi}, and NCDE \cite{kidger2020neural} run significantly slower than the other compared methods and so omit their results from this figure. Their Accuracies are much lower than CAT's---see Table 2 in the main paper. All models were trained and evaluated on Intel Xeon Gold 6148 CPUs. \begin{figure} \caption{Ablating the impact of the Moment Network using the \textsc{Walking} \end{figure} \begin{figure} \caption{Timing performance for the high-resolution \textsc{Walking} \end{figure} \section{CAT Hyperparameters}\leftarrowbel{sec:hyperparam} We experiment with three key hyperparameters of CAT for each dataset: The receptor-width $\delta$, the hidden dimension of the Receptor Network $\mathcal{R}$, and whether or not to use the informative irregularity feature of CAT in the Receptor Network. Interestingly, we found that for $\mathcal{R}$, a hidden dimension of 50 seemed to consistently produce the best results. This hidden dimension largely controls the number of parameters in CAT and influences the timing experiments for which we also use a 50-dimensional representation. Our selections for $\delta$ values for different datasets are shown in Table \ref{tab:hyperparam}. We tune $\delta$ between three values: $0.05$, $0.1$, and $0.2$. For \textsc{UWave}, $\delta=0.05$ was best. $\delta=0.05$ was also best for all infrequent \textsc{ExtraSensory} datasets except for \textsc{WALKING}, which used $0.2$. $\delta=0.2$ was chosen for all frequent \textsc{ExtraSensory} datasets except for \textsc{SLEEPING}, for which $\delta=0.1$. For $\delta$, we observe that for the \textit{infrequent} experiments, a smaller receptor width is largely the best option while a larger width is beneficial for the \textit{frequent} experiments. This may be due to the fact that with the infrequent representation of the input series, closer focus on the comparatively-fuzzier signals is required. We also found that setting the number of steps $k=3$ consistently outperformed larger and smaller values. While large values of $k$ conceptually should still learn to classify effectively, in practice the more steps taken by a reinforcement learning agent per episode can make it more challenging to optimize effectively due to the credit assignment problem. We also find that there are cases where it is not essential to use both channels---Values and Irregularity---in the receptor network. While using the irregularity channel (computed via the squared exponential kernel in the main paper) always leads to state-of-the-art performance by CAT, its omission can sometimes improve CAT's performance slightly. When irregularity is an essential feature, however, this information cannot be removed. We show for which datasets this is true in Table \ref{tab:hyperparam}. This may be a feature of (1) how the irregularity is represented---there are other approaches---and (2) how essential it is to the task. We recommend always using the irregularity channel as the potential downside of ignoring irregularity outweighs the minor benefits of omission in some cases. \begin{table}[t] \caption{Best hyperparameter settings for CAT.} \resizebox{\linewidth}{!}{ \centering \leftarrowbel{tab:hyperparam} \begin{tabular}{lcccc} \toprule Dataset & $\delta$ & Hidden Dim. of $\mathcal{R}$ & Density\\ \midrule \textsc{UWave} & 0.05 & 50 & Off \\ Infrequent \textsc{Walking} & 0.2 & 50 & Off \\ Infrequent \textsc{Running} & 0.05 & 50 & On \\ Infrequent \textsc{LyingDown} & 0.05 & 50 & Off \\ Infrequent \textsc{Sleeping} & 0.05 & 50 & On \\ Frequent \textsc{Walking} & 0.2 & 50 & Off \\ Frequent \textsc{Running} & 0.2 & 50 & On \\ Frequent \textsc{LyingDown} & 0.2 & 50 & On \\ Frequent \textsc{Sleeping} & 0.1 & 50 & Off \\ \bottomrule \end{tabular} } \end{table} \begin{figure} \caption{Effect of changing the signal width on accuracy.} \end{figure} \end{document}
\begin{document} \begin{frontmatter} \title{On a new kind of Ansatz Spaces for Matrix Polynomials} \author[label1]{Heike Fa\ss bender} \address[label1]{Institut \emph{Computational Mathematics}/ AG Numerik, TU Braunschweig, Pockelsstr. 14, 38106 Braunschweig, Germany} \cortext[cor1]{Corresponding author, Email [email protected]} \author[label1]{Philip Saltenberger\corref{cor1}} \begin{abstract} In this paper, we introduce a new family of equations for matrix pencils that may be utilized for the construction of strong linearizations for any square or rectangular matrix polynomial. We provide a comprehensive characterization of the resulting vector spaces and show that almost every matrix pencil therein is a strong linearization regardless whether the matrix polynomial under consideration is regular or singular. These novel ``ansatz spaces'' cover all block Kronecker pencils as introduced in \cite{DopLPVD16} as a subset and therefore contain all Fiedler pencils modulo permutations. The important case of square matrix polynomials is examined in greater depth. We prove that the intersection of any number of block Kronecker ansatz spaces is never empty and construct large subspaces of block-symmetric matrix pencils among which still almost every pencil is a strong linearization. Moreover, we show that the original ansatz spaces $\mathbb{L}_1$ and $\mathbb{L}_2$ may essentially be recovered from block Kronecker ansatz spaces via pre- and postmultiplication, respectively, of certain constant matrices. \end{abstract} \begin{keyword} matrix polynomials \sep linearization \sep strong linearization \sep Fiedler pencils \sep block Kronecker pencils \sep rectangular matrix polynomial \sep structure-preserving linearization \sep eigenvector recovery \sep ansatz space \MSC[2010] 65F15 \sep 15A03 \sep 15A18 \sep 15A22 \sep 15A23 \sep 47J10 \end{keyword} \end{frontmatter} \section{Introduction} The linearization of (non) square matrix polynomials $$P(\lambda) = \sum_{i=0}^d P_i\lambda^i, P_i \in \mathbb{R}^{m \times n}$$ has received much attention in the last ten years, motivated at least in part by the ground-breaking paper \cite{MacMMM06}. In that paper, three vector spaces $\mathbb{L}_1$, $\mathbb{L}_2$ and $\mathbb{DL}$ of potential linearizations (called \enquote{ansatz spaces}) for square matrix polynomials $P(\lambda) (m = n)$ have been introduced. The spaces $\mathbb{L}_1$, $\mathbb{L}_2$ generalize the companion form of the first and second kind, resp., $$ \mathbb{L}_1(P) = \{\mathcal{L}(\lambda) = \lambda X + Y \in \mathbb{R}[\lambda]^{nd \times nd} \mid \mathcal{L}(\lambda) \big( \Lambda_{d-1} \otimes I_n \big) = v \otimes P(\lambda) , v \in \mathbb{R}^d\}, $$ $\mathbb{L}_2(P) = \lbrace \mathcal{L}(\lambda)^T \; | \; \mathcal{L}(\lambda) \in \mathbb{L}_1(P^T) \rbrace$ while the double ansatz space \begin{equation}\label{DLP} \mathbb{DL}(P) = \mathbb{L}_1(P) \cap \mathbb{L}_2(P) \end{equation} is their intersection. Here $\Lambda_j$ is the vector of the elements of the standard basis; $\Lambda_j := \Lambda_j(\lambda) = [ \; \lambda^{j} \; \lambda^{j - 1} \; \cdots \; \lambda \; 1 \; ]^T \in \mathbb{R}[\lambda]^{j + 1}$ for any integer $j \geqslant 0.$ A thorough discussion of these spaces can be found in \cite{MacMMM06} and \cite{HigMMT06}, see \cite{DopLPVD16} for more references. In particular, it is discussed in \cite{MacMMM06} that almost all pencils in these spaces are linearizations of $P(\lambda)$ and in \cite{HigMMT06} that any matrix pencil in $\mathbb{DL}(P)$ is block-symmetric. The second main source of linearizations are Fiedler pencils $F_{\sigma}(\lambda)$. Unlike the linearizations from the vector spaces discussed above, these can be defined not only for square, but also for rectangular matrices \cite{DeTDM12}. These pencils are defined in an implicit way, either in terms of products of matrices for square polynomials or as the output of a symbolic algorithm for rectangular matrices, see \cite[Section 4]{DopLPVD16} for a definition, a summary of their properties and references to further work. In \cite[Section 5]{DopLPVD16} the family of block Kronecker pencils is introduced, which include all of the Fiedler pencils (modulo permutations). For an arbitrary matrix pencil $M_0+\lambda M_1 \in \mathbb{R}^{(\eta+1)m \times (\epsilon+1)n}$ any matrix pencil of the form \begin{equation}\label{blockKronpencil} {\mathcal N}(\lambda) = \left[\begin{array}{c|c} M_0+\lambda M_1 & L_\eta^T \otimes I_m\\ \hline L_\epsilon \otimes I_n & 0_{\epsilon n \times\eta m} \end{array}\right] \in \mathbb{R}^{((\eta + 1)m + \epsilon n) \times ((\epsilon + 1)n + \eta m)} \end{equation} is called an $(\epsilon,n,\eta,m)$-block Kronecker pencil, or simply, a block Kronecker pencil. Here, \begin{equation} L_{\kappa} = L_{\kappa}(\lambda) := \begin{bmatrix} -1 & \lambda & & & \\ & -1 & \lambda & & \\ & & \ddots & \ddots & \\ & & & -1 & \lambda \end{bmatrix} \in \mathbb{R}[\lambda]^{\kappa \times (\kappa + 1)}. \label{Lkappa} \end{equation} It is proven that ${\mathcal N}(\lambda)$ is a (strong) linearization of the matrix polynomial $Q(\lambda) = (\Lambda_\eta(\lambda)^T\otimes I_m)(M_0+\lambda M_1)(\Lambda_\epsilon(\lambda)\otimes I_n)\in \mathbb{R}[\lambda]^{m\times n}$ of degree $d \leq \epsilon+\eta+1.$ Inspired by the work in \cite{DopLPVD16}, we introduce a new family of equations for matrix pencils that may be applied to square and rectangular matrix polynomials. Matrix pencils that satisfy one or more particular equations form real vector spaces that are shown to serve as an abundant source of strong linearization. Since these spaces share important properties with $\mathbb{L}_1$ and $\mathbb{L}_2$ and entirely contain all block Kronecker pencils as introduced in \cite{DopLPVD16}, we named them ``block Kronecker ansatz spaces''. Our derivations based on these ansatz spaces are basically theoretically oriented. The purpose of this paper is twofold: it builds a bridge between the two main linearization techniques - the ansatz space framework initiated in \cite{MacMMM06} and the approach via Fiedler pencils starting with \cite{AntV04} - along with the development of ansatz spaces in the style of \cite{MacMMM06} for rectangular matrix polynomials. Although we define and introduce the block Kronecker ansatz spaces for rectangular matrix polynomials, we devote special attention to the investigation of the square case. In this context we are able to show that the intersection of any number of block Kronecker ansatz spaces is never empty. As a main difference to $\mathbb{DL}$, pencils in two or more block Kronecker ansatz spaces are not block-symmetric in general but block-symmetric pencils form proper and large-dimensional subspaces therein. Still almost every matrix pencil, block-symmetric or not, is a strong linearization as long as the matrix polynomial under consideration is regular. The main contribution of this paper is to provide a comprehensive introduction of block Kronecker ansatz spaces, to prove their basic properties and to motivate these features by appropriately selected examples. To this end, in order to focus on the essential ideas and concepts, we presents our results just for the real numbers $\mathbb{R}$. This enables us to concentrate on the precise introduction of the block Kronecker spaces (over $\mathbb{R}$) avoiding technicalities that might occur considering other fields. After submission of the first version of this paper, the manuscript \cite{DoBPSZ16} was released. In \cite{DoBPSZ16} the block Kronecker ansatz spaces have been introduced independently as the family of extended block Kronecker pencils motivated, as in our case, by the results in \cite{DopLPVD16}. However, the goal of \cite{DoBPSZ16} is different than ours. While our goal is to establish a new ansatz space framework for the explicit construction of strong linearizations for matrix polynomials and to show the connections between those ansatz spaces, Fiedler pencils and block Kronecker pencils, the goal in \cite{DoBPSZ16} is to provide a unified approach to all the families of Fiedler-like pencils in any field via the more general concept of strong block minimal bases pencils. Being now aware of \cite{DoBPSZ16} we will reference to similar results throughout the paper and, moreover, point out some new insights taking the results from \cite{DoBPSZ16} into account. The paper is organized as follows: in Section \ref{sec2} some basic notation and well-known results are reviewed. Section \ref{sec3} introduces the block Kronecker ansatz space and its most important properties. Double ansatz spaces and their subspaces of block-symmetric pencils are considered in Section \ref{sec4}, while Section \ref{sec:L1L2} presents some further understanding of $\mathbb{L}_1$ and $\mathbb{L}_2$ based on our results. Some concluding remarks are given in Section \ref{sec:conclusions}. \section{Basic Notation}\label{sec2} The following notation will be used throughout the paper: $I_n$ is the $n \times n$ identity matrix, $e_i$ its $i$-th column and $0_{m \times n}$ denotes the $m \times n$ zero matrix. The Kronecker product of two matrices $A$ and $B$ is denoted $ A \otimes B$ whereas the direct product of $A$ and $B$ is $A \oplus B$, i.e. $A \oplus B = \textnormal{diag}(A,B)$. Whenever a $km \times kn$ matrix $A$ may be expressed as $A= \sum_{i,j=1}^k e_ie_j^T \otimes B_{ij}$ for certain $m \times n$ matrices $B_{ij}$, we call $A^{\mathcal{B}} = \sum_{i,j=1}^k e_je_i^T \otimes B_{ij}$ the block-transpose of $A$ (see \cite[Def. 2.1]{HigMMT06}). For $\mathbb{R}[\lambda]$, the ring of real polynomials in the variable $\lambda$, the $m \times n$ matrix ring over $\mathbb{R}[\lambda]$ is denoted by $\mathbb{R}[\lambda]^{m \times n}$. Its elements are referred to as matrix polynomials. Notice that $\mathbb{R}[\lambda]^{m \times n}$ is a vector space over $\mathbb{R}$. Certainly, a matrix polynomial $P(\lambda) \in \mathbb{R}[\lambda]^{m \times n}$ may always be expressed as \begin{align} P(\lambda) &= P_d \lambda^d + P_{d-1} \lambda^{d-1} + \cdots + P_1 \lambda + P_0 \notag \\ &= [ \, P_d \; \,P_{d-1} \; \, \cdots \; \, P_0 \,](\Lambda_d(\lambda) \otimes I_n) \label{def_matrixpol} \end{align} for appropriate matrices $P_0, \ldots , P_d \in \mathbb{R}^{m \times n}$ and some $d \in \mathbb{N}$. A matrix polynomial $P(\lambda) \in \mathbb{R}[\lambda]^{m \times n}$ is called regular given the case $m = n$ and $\textnormal{det}(P(\lambda))$ is not identically zero. Otherwise, $P(\lambda)$ is called singular. A regular matrix polynomial $P(\lambda)$ is said to be unimodular if $\textnormal{det}(P(\lambda)) \in \mathbb{R}$. A scalar $z \in \mathbb{C}$ is referred to as a (finite) eigenvalue of $P(\lambda) \in \mathbb{R}[\lambda]^{m \times n}$, if $P(z) \in \mathbb{C}^{m \times n}$ is singular. Its corresponding eigenspace is defined to be $\textnormal{null}(P(z))$, the nullspace of $P(z)$. \\ Two matrix polynomials $P(\lambda)$ and $Q(\lambda)$ are said to be unimodularly equivalent if there exist unimodular matrices $U(\lambda)$ and $V(\lambda)$ such that $P(\lambda) = U(\lambda) Q(\lambda)V(\lambda)$ holds. The equivalence is called strict whenever $U(\lambda)$ and $V(\lambda)$ may be chosen independent of $\lambda$. Given in the form (\ref{def_matrixpol}), the matrix polynomial $P(\lambda)$ has degree $k$, i.e. $\textnormal{deg}(P) = k$, whenever $P_k \neq 0$ and $P_i = 0$ for all $i > k$. If $\textnormal{deg}(P)=1$ we refer to $P(\lambda)$ as a (matrix) pencil. The subspace of all $m \times n$ matrix polynomials having at most degree $d \in \mathbb{N}$ is denoted $\mathbb{R}_d[\lambda]^{m \times n}$. For any $P(\lambda) \in \mathbb{R}[\lambda]^{m \times n}$ and any $t \geq \textnormal{deg}(P)$, $t \in \mathbb{N}$, the $t$-reversal of $P(\lambda)$ is defined as the matrix polynomial \begin{equation} \textnormal{rev}_t(P(\lambda)) = \lambda^t P \left( \frac{1}{\lambda} \right) \in \mathbb{R}[\lambda]^{m \times n}. \end{equation} The matrix polynomial $P(\lambda)$ with $\textnormal{deg}(P) = k$ is said to have an infinite eigenvalue, if zero is an eigenvalue of $\textnormal{rev}_k(P(\lambda))$. The corresponding eigenspace is $\textnormal{null}(\textnormal{rev}_k(P(0)))$. \subsection{Linearizations of Matrix Polynomials} A matrix pencil $\mathcal{L}(\lambda)$ is said to be a linearization of $P(\lambda) \in \mathbb{R}[\lambda]^{m \times n}$ if there exist two unimodular matrix polynomials $U(\lambda)$ and $V(\lambda)$ such that \begin{equation} U(\lambda) \mathcal{L}(\lambda) V(\lambda) = \left[ \begin{array}{c|c} I_s & \\ \hline & P(\lambda) \end{array} \right] \label{def_linearization} \end{equation} holds for some $s \in \mathbb{N}_0$. Moreover, assuming $\textnormal{deg}(P)=k$, the linearization $\mathcal{L}(\lambda)$ is called strong whenever $\textnormal{rev}_1( \mathcal{L}(\lambda))$ is a linearization for $\textnormal{rev}_k(P(\lambda))$ as well. It is a basic fact on strong linearizations that they preserve the finite and infinite elementary divisors of $P(\lambda)$ (see the information and the references given in \cite[Section 2]{DopLPVD16} for more details). In particular, any strong linearization $\mathcal{L}(\lambda)$ of $P(\lambda)$ has the same (finite and infinite) eigenvalues as $P(\lambda)$ and keeps on their algebraic and geometric multiplicities. Given an $n \times n$ matrix polynomial $P(\lambda) = \sum_{i=0}^k P_i \lambda^i$ of degree $\textnormal{deg}(P)=k$, it is well known, that the Frobenius companion form $$ \textnormal{Frob}_P(\lambda) = \begin{bmatrix} P_k & & & \\ & I_n & & \\ & & \ddots & \\ & & & I_n \end{bmatrix} \lambda + \begin{bmatrix} P_{k-1} & \cdots & P_1 & P_0 \\ -I_n & & & \\ & \ddots & & \\ & & -I_n & \end{bmatrix} \in \mathbb{R}_1[\lambda]^{kn \times kn}$$ is a strong linearization for $P(\lambda)$ no matter whether $P(\lambda)$ is regular or singular. Moreover, (strict) equivalence preserves (strong) linearizations. According to (\ref{def_linearization}) any matrix pencil is its own linearization. Thus, the notion of linearization does hardly make sense for matrix pencils. Since the construction of linearizations is our main concern throughout the paper, we will henceforth assume arbitrary matrix polynomials $P(\lambda)$ having degree $\textnormal{deg}(P) \geq 2$ to avoid the potential occurrence of pathological cases. \section{Block Kronecker Ansatz Spaces}\label{sec3} The following definition introduces the main object of interest throughout the remaining paper. We will consistently assume $\epsilon$ and $\eta$ to be nonnegative integers. \begin{definition}[Block Kronecker Ansatz Equation] \label{def_GAS} \ \\ Let $P(\lambda)$ be an $m \times n$ matrix polynomial of degree $k= \epsilon + \eta + 1$. We define $\mathbb{G}_{\eta + 1}(P)$ to be the set of all $((\eta + \hspace{0.01cm} 1)m + \epsilon n) \times ((\epsilon + 1)n + \eta m)$ matrix polynomials $\mathcal{L}(\lambda) = X \lambda + Y$ satisfying \begin{equation} \big( (\Lambda_{\eta}(\lambda)^T \otimes I_m) \oplus I_{\epsilon n} \big) \mathcal{L}(\lambda) \big ((\Lambda_{\epsilon}(\lambda) \otimes I_n) \oplus I_{\eta m} \big) = \alpha P(\lambda) \oplus 0_{\epsilon n \times \eta m} \label{ansatzequation1} \end{equation} for some $\alpha \in \mathbb{R}$. Equation (\ref{ansatzequation1}) is called block Kronecker ansatz equation for the matrix polynomial $P(\lambda)$. \end{definition} We will refer to $\mathbb{G}_{\eta + 1}(P)$ as a \enquote{block Kronecker ansatz space} for $P(\lambda)$. This name was chosen in compliment of the \enquote{ansatz spaces} established in \cite{MacMMM06} and the \enquote{block Kronecker pencils} introduced in \cite{DopLPVD16}. How the main ideas of both papers may be unified via the concept of block Kronecker ansatz spaces is one primary concern of this paper. \begin{remark}\label{rem1} According to \cite[Def. 3.1, Thm. 3.3]{DopLPVD16} it is immediate that (\ref{ansatzequation1}) may be formulated in the framework of dual minimal bases as well. Therefore, for any other pair of dual minimal bases \cite[Def. 2.5]{DopLPVD16} a corresponding ansatz equation may be formulated and analyzed similar to our discussion in the subsequent sections. However, most of the following results require that we know exactly how the dual minimal bases look like. To this end, we confine ourselves to (\ref{ansatzequation1}). \end{remark} Notice that, since $\eta$ may take any integer value between $0$ and $k-1$, there always exist exactly $k$ block Kronecker ansatz spaces for $P(\lambda)$. \begin{lemma}[$\mathbb{G}_{\eta + 1}(P)$ is a $\mathbb{R}$-vector space] \label{lem_vectorspace} For any $m \times n$ matrix polynomial $P(\lambda)$ of degree $k=\epsilon + \eta + 1$, $\mathbb{G}_{\eta + 1}(P)$ is a vector space over $\mathbb{R}$. \end{lemma} Since the statement of Lemma \ref{lem_vectorspace} is quite obvious, we omit the proof. Rather notice that equation (\ref{ansatzequation1}) may be reformulated as\footnote{In order to save space here and in subsequent formulas the dependence of $L_{\kappa}(\lambda)$ and $\Lambda_{\kappa}(\lambda)$ on $\lambda$ is sometimes omitted. Since there is no risk of confusion, $L_{\kappa}$ and $\Lambda_{\kappa}$ will always be understood as $L_{\kappa}(\lambda)$ and $\Lambda_{\kappa}(\lambda)$.} \begin{equation} {\small \left[ \begin{array}{c|c} \Lambda_{\eta}^T \otimes I_m & 0 \\ \hline 0 & I_{\epsilon n} \end{array} \right] \left[ \begin{array}{c|c} \mathcal{L}_{11}(\lambda) & \mathcal{L}_{12}(\lambda) \\ \hline \mathcal{L}_{21}(\lambda) & \mathcal{L}_{22}(\lambda) \end{array} \right] \left[ \begin{array}{c|c} \Lambda_{\epsilon} \otimes I_n & 0 \\ \hline 0 & I_{\eta m} \end{array} \right] = \left[ \begin{array}{c|c} \alpha P(\lambda) & 0 \\ \hline 0 & 0_{\epsilon n \times \eta m} \end{array} \right] } \label{ansatzequation2} \end{equation} where we have expressed $\mathcal{L}(\lambda)$ as a $2 \times 2$ block matrix with the leading $(\eta + 1)m \times (\epsilon + 1)n$ block $\mathcal{L}_{11}(\lambda)$. Following \cite[Def. 5.1]{DopLPVD16}, this structured $2 \times 2$ block-notation of $\mathcal{L}(\lambda) \in \mathbb{G}_{\eta + 1}(P)$ is called its natural partition. In terms of this expression, (\ref{ansatzequation2}) explicitly reads \begin{equation} \left[ \begin{array}{c|c} (\Lambda_{\eta}^T \otimes I_m) \mathcal{L}_{11}(\lambda) (\Lambda_{\epsilon} \otimes I_n) & ( \Lambda_{\eta}^T \otimes I_m) \mathcal{L}_{12}(\lambda) \\ \hline \mathcal{L}_{21}(\lambda) ( \Lambda_{\epsilon} \otimes I_n) & \mathcal{L}_{22}(\lambda) \end{array} \right] = \left[ \begin{array}{c|c} \alpha P(\lambda) & 0 \\ \hline 0 & 0_{\epsilon n \times \eta m} \end{array} \right]. \label{ansatzequation3} \end{equation} For $(\Lambda_{\eta}^T \otimes I_m) \mathcal{L}_{11}(\lambda) (\Lambda_{\epsilon} \otimes I_n)$ we will steadily be using the short hand notation $\Phi( \mathcal{L}_{11}(\lambda))$ assuming the parameters involved in this expression are clear from the context. For instance, (\ref{ansatzequation3}) implies $\Phi(\mathcal{L}_{11}(\lambda)) = \alpha P(\lambda)$. Next we will consider the off-diagonal blocks of (\ref{ansatzequation3}). Recall the definition of $L_{\kappa}(\lambda)$ (see (\ref{Lkappa})) and notice that $L_{\kappa}(\lambda) \Lambda_{\kappa}(\lambda) = 0$ (in fact $L_{\kappa}(\lambda)$ and $\Lambda_{\kappa}(\lambda)^T$ are dual minimal bases, see \cite[Sec. 2]{DopLPVD16} for more information). Consequently $(L_{\kappa}(\lambda) \otimes I_n)(\Lambda_{\kappa}(\lambda) \otimes I_n) = 0$ (see also \cite[Ex. 2.6]{DopLPVD16}). \begin{lemma} \label{lem_nullspace} Let $\mathcal{K}(\lambda)$ be an $\kappa_1 m \times (\kappa_2 + 1) n$ matrix pencil and assume \begin{equation} \mathcal{K}(\lambda) \big( \Lambda_{\kappa_2}(\lambda) \otimes I_n \big) = 0_{\kappa_1 m \times n}. \label{nullspace_equation} \end{equation} Then $\mathcal{K}(\lambda) = C(L_{\kappa_2}(\lambda) \otimes I_n)$ for some matrix $C \in \mathbb{R}^{\kappa_1 m \times \kappa_2 n}$. \end{lemma} \begin{proof} Assume ${\mathcal{K}(\lambda)} = [\, k_1 \; | \; K_1 \, ]\lambda + K_0$ with $k_1 \in \mathbb{R}^{\kappa_1 m \times n}$ satisfies (\ref{nullspace_equation}). Then $$ \Delta \mathcal{K}(\lambda) = \mathcal{K}(\lambda) - K_1(L_{\kappa_2}(\lambda) \otimes I_n) =: [ \, d_1(\lambda) \; | \; D_1 \, ] $$ is independent of $\lambda$ in all but its first block-column $d_1(\lambda) \in \mathbb{R}_1[\lambda]^{\kappa_1m \times n}$. However, from $\Delta \mathcal{K}(\lambda) ( \Lambda_{\kappa_2} (\lambda) \otimes I_n)$ we obtain \begin{align*} \mathcal{K}(\lambda) \big( \Lambda_{\kappa_2}(\lambda) \otimes I_n \big) - K_1 \big( L_{\kappa_2}(\lambda) \otimes I_n \big) \big( \Lambda_{\kappa_2}(\lambda) \otimes I_n \big) = 0_{\kappa_1 m \times n}, \end{align*} so $\Delta \mathcal{K}(\lambda)$ still satisfies (\ref{nullspace_equation}). Notice that $\Delta \mathcal{K}(\lambda) ( \Lambda_{\kappa_2} (\lambda) \otimes I_n)$ has dimension $\kappa_1 m \times n$ and that every $m \times n$ block is a matrix polynomial in the variables $1, \lambda , \lambda^2, \ldots , \lambda^{\kappa_2 +1}$. Due to the basis property of the monomials this implies $\Delta \mathcal{K}(\lambda) \equiv 0$ and proves the statement. \end{proof} Further on, via block-transposition it can be seen that any $(\kappa_1 + 1)m \times \kappa_2 n$ matrix pencil $\mathcal{K}(\lambda)$ satisfying $(\Lambda_{\kappa_1}(\lambda)^T \otimes I_m)\mathcal{K}(\lambda) = 0$ has an expression $\mathcal{K}(\lambda) = (L_{\kappa_1}(\lambda)^T \otimes I_m)C$ for some matrix $C \in \mathbb{R}^{\kappa_1 m \times \kappa_2 n}$. Hence, regarding (\ref{ansatzequation3}) once more, we obtain $$ \mathcal{L}_{21}(\lambda) = C_1 \big( L_{\epsilon}(\lambda) \otimes I_n \big) \qquad \mathcal{L}_{12}(\lambda) = \big( L_{\eta}(\lambda)^T \otimes I_m \big)C_2 $$ for matrices $C_1 \in \mathbb{R}^{\epsilon n \times \epsilon n}$ and $ C_2 \in \mathbb{R}^{\eta m \times \eta m}$. Now, considering again the $(1,1)$-block in (\ref{ansatzequation3}) and an $m \times n$ matrix polynomial $P(\lambda) = \sum_{i=0}^k P_i \lambda^i$ of degree $k = \epsilon + \eta + 1$, observe that the $(\eta + 1)m \times (\epsilon + 1)n$ matrix pencil $$ \Sigma_{\eta,P} (\lambda) = \begin{bmatrix} \lambda P_k + P_{k-1} & P_{k-2} & \cdots & P_{\eta } \\ & & & P_{\eta - 1} \\ & 0_{\eta m \times \epsilon n} & & \vdots \\ & & & P_0 \end{bmatrix} $$ satisfies $\Phi(\Sigma_{\eta , P}(\lambda)) = P(\lambda)$. Therefore, for any other $(\eta + 1)m \times (\epsilon + 1)n$ pencil $Q(\lambda)$ satisfying $\Phi( Q(\lambda)) = \alpha P(\lambda)$ for some $\alpha \in \mathbb{R}$ we obtain $$ \Phi \big( \alpha \Sigma_{\eta,P}(\lambda) - Q(\lambda) \big) = \alpha \Phi \big( \Sigma_{\eta , P}(\lambda) \big) - \Phi \big( Q(\lambda) \big) = \alpha P(\lambda) - \alpha P(\lambda) = {0.}$$ Thus, interpreting $\Phi$ as a function mapping $(\eta + 1)m \times (\epsilon + 1)n$ matrix pencils to $m \times n$ matrix polynomials $P(\lambda)$ of degree $\text{deg}(P) \leq \epsilon + \eta + 1$, $\Phi$ is linear. Moreover, $\Phi$ is easily seen to be surjective. The homomorphism theorem gives $$ \mathbb{R}_1[\lambda]^{(\eta +1)m \times (\epsilon +1)n} / \textnormal{null}(\Phi) \, \cong \, \mathbb{R}_{\epsilon + \eta + 1}[\lambda]^{m \times n} $$ and thus $\text{dim}(\text{null}(\Phi)) =\big( \eta( \epsilon + 1) + (\eta + 1) \epsilon \big)mn.$ Now note that the set $\mathcal{N}_{\epsilon , \eta}$ of all $(\eta + 1)m \times (\epsilon + 1)n$ matrix pencils $\mathcal{M}(\lambda)$ of the form \begin{equation} \mathcal{M}(\lambda) = B_1 \big( L_{\epsilon}(\lambda) \otimes I_n \big) + \big( L_{\eta}(\lambda)^T \otimes I_m \big) B_2 \label{pencilMB1B2} \end{equation} with arbitrary matrices $B_1 \in \mathbb{R}^{(\eta + 1)m \times \epsilon n}$ and $B_2 \in \mathbb{R}^{\eta m \times (\epsilon + 1)n}$ form a real vector space that is completely contained in $\text{null}(\Phi)$. Following (\ref{pencilMB1B2}), the mapping $(B_1,B_2) \mapsto \mathcal{M}(\lambda)$ is injective since $\mathcal{M}(\lambda) = 0$ can only hold for $B_1=B_2=0$ (consider once more the form of $L_{\epsilon}(\lambda)$ and $L_{\eta}(\lambda)^T$, see (\ref{Lkappa})). Therefore, we conclude that $\mathcal{N}_{\epsilon, \eta} = \textnormal{null}( \Phi)$ and obtain the following characterization of $\mathbb{G}_{\eta + 1}(P)$. \begin{theorem}[Characterization of $\mathbb{G}_{\eta + 1}(P)$] \label{thm_generalspace} \ \\ Let $P(\lambda)$ an $m \times n$ matrix polynomial of degree $k= \eta + \epsilon + 1$. Then $\mathbb{G}_{\eta + 1}(P)$ is a vector space over $\mathbb{R}$ having dimension $$ \textnormal{dim}( \mathbb{G}_{\eta + 1}(P)) = ( \epsilon n + \eta m)^2 + (\epsilon + \eta)mn + 1. $$ Any matrix pencil $\mathcal{L}(\lambda) \in \mathbb{G}_{\eta + 1}(P)$ may be characterized as \begin{equation} \mathcal{L}(\lambda) = {\small \left[ \begin{array}{c|c} \alpha \Sigma_{\eta ,P}(\lambda) + B_1(L_{\epsilon}(\lambda) \otimes I_n) + (L_{\eta}(\lambda)^T \otimes I_m)B_2 & (L_{\eta}(\lambda)^T \otimes I_m)C_2 \\ \hline C_1 (L_{\epsilon}(\lambda) \otimes I_n) & 0 \end{array} \right] }\label{def_blockspace_explicit} \end{equation} with some $\alpha \in \mathbb{R}$ and some matrices $B_1 \in \mathbb{R}^{( \eta + 1)m \times \epsilon n}, B_2 \in \mathbb{R}^{ \eta m \times ( \epsilon + 1)n}$, $C_1 \in \mathbb{R}^{ \epsilon n \times \epsilon n}$ and $ C_2 \in \mathbb{R}^{\eta m \times \eta m}$. \end{theorem} The dimension of $\mathbb{G}_{\eta + 1}(P)$ is just the sum of the dimensions of the constant matrices in expression (\ref{def_blockspace_explicit}) plus one for the scalar $\alpha$. Moreover, note that any matrix pencil $\mathcal{L}(\lambda) \in \mathbb{G}_{\eta + 1}(P)$ of the form (\ref{def_blockspace_explicit}) can be factorized uniquely as \begin{equation} \mathcal{L}(\lambda) = \left[ \begin{array}{c|c} I_{(\eta + 1)m} & B_1 \\ \hline 0 & C_1 \end{array} \right] \left[ \begin{array}{c|c} \alpha \Sigma_{\eta ,P}(\lambda) & L_{\eta}(\lambda)^T \otimes I_m \\ \hline L_{\epsilon}(\lambda) \otimes I_n & 0 \end{array} \right] \left[ \begin{array}{c|c} I_{(\epsilon + 1)n} & 0 \\ \hline B_2 & C_2 \end{array} \right]. \label{def_blockspace} \end{equation} Notice that this factorization is equivalent to (3.5) in \cite{DoBPSZ16}. \begin{example} \label{ex_Gspace} Let $P(\lambda) = \sum_{i=0}^6 P_i \lambda^i$ be an $m \times n$ matrix polynomial of degree $\textnormal{deg}(P)=6$ and consider the case $\eta = 3, \epsilon = 2$. According to (\ref{def_blockspace_explicit}) we may construct the following matrix pencil $$ \small{ \mathcal{L}(\lambda) = \left[ \begin{array}{ccc|ccc} \lambda P_6 + P_5 & P_4 & P_3 & 0 & -F & H \\ A & -(B+ \lambda A) & P_2 & 0 & E + \lambda F & - \lambda H \\ -P_3 & \lambda B & P_1 & D & - \lambda E & 0 \\ \lambda P_3 & 0 & P_0 & - \lambda D & 0 & 0\\ \hline C & -(G + \lambda C) & \lambda G & 0 & 0 & 0 \\ 0 & C & - \lambda C & 0 & 0 & 0 \end{array} \right] } $$ with arbitrary matrices $A, B \in \mathbb{R}^{m \times n}, C, G \in \mathbb{R}^{n \times n}$ and $D,E,F,H \in \mathbb{R}^{m \times m}$. It is not hard to see that $\mathcal{L}(\lambda) \in \mathbb{G}_{4}(P)$ since $\mathcal{L}(\lambda)$ may be expressed in the form (\ref{def_blockspace}) with $$\begin{bmatrix} B_1 \\ C_1 \end{bmatrix} = \left[ \begin{array}{cc} 0 & 0 \\ - A & 0 \\ 0 & 0 \\ 0 & 0 \\ \hline -C & G \\ 0 & -C \end{array} \right], \; \text{and} \; \begin{bmatrix} B_2 & C_2 \end{bmatrix} = \left[ \begin{array}{ccc|ccc} 0 & 0 & 0 & 0 & F & -H \\ 0 & B & 0 & 0 & E & 0 \\ P_3 & 0 & 0 & -D & 0 & 0 \end{array} \right], $$ and $\alpha=1$. As the next theorem will reveal, $\mathcal{L}(\lambda)$ is a strong linearization for $P(\lambda)$ if $C$, $D, E$ and $H$ are all nonsingular. In the case of $P(\lambda)$ being square and regular, these three conditions turn out to be sufficient and necessary for $\mathcal{L}(\lambda)$ being a strong linearization for $P(\lambda)$. Surprisingly, the choice of $A$ and $B$ does not have any effect in that regard. \end{example} The next theorem presents a quite natural linearization condition for matrix pencils in block Kronecker ansatz spaces (see also \cite[Thm. 3.8]{DoBPSZ16}). Notice that we a priori do not require $P(\lambda)$ to be regular or even square. \begin{theorem}[Linearization Condition for $\mathbb{G}_{\eta + 1}(P)$] \label{thm_lincondition} \ \\ Let $P(\lambda)$ be an $m \times n$ matrix polynomial and $\mathcal{L}(\lambda) \in \mathbb{G}_{\eta + 1}(P)$ as in (\ref{def_blockspace}). Then $\mathcal{L}(\lambda)$ is a strong linearization for $P(\lambda)$ if $\alpha \neq 0,$ and \begin{equation}{\small \left[ \begin{array}{c|c} I_{(\eta + 1)m} & B_1 \\ \hline 0 & C_1 \end{array} \right] \in \textnormal{GL}_{(\eta + 1)m + \epsilon n}(\mathbb{R}), \; \, \text{and} \; \, \left[ \begin{array}{c|c} I_{(\epsilon + 1)n} & 0 \\ \hline B_2 & C_2 \end{array} \right] \in \textnormal{GL}_{(\epsilon + 1)n + \eta m}( \mathbb{R}).} \label{equ_lincondition} \end{equation} Certainly (\ref{equ_lincondition}) is equivalent to $\textnormal{det}(C_1), \textnormal{det}(C_2) \neq 0$. \end{theorem} \begin{proof} Assuming the matrices \begin{equation} U = \left[ \begin{array}{c|c} I_{(\eta + 1)m} & B_1 \\ \hline 0 & C_1 \end{array} \right] \; \, \text{and} \; \, V = \left[ \begin{array}{c|c} I_{(\epsilon + 1)n} & 0 \\ \hline B_2 & C_2 \end{array} \right] \label{matrices_UV} \end{equation} are nonsingular, $\mathcal{L}(\lambda)$ in (\ref{def_blockspace}) is strictly equivalent to \begin{equation} \mathcal{F}_{\alpha , \eta , P}(\lambda) := \left[ \begin{array}{c|c} \alpha \Sigma_{\eta ,P}(\lambda) & L_{\eta}(\lambda)^T \otimes I_m \\ \hline L_{\epsilon}(\lambda) \otimes I_n & 0 \end{array} \right]. \label{m_alpha} \end{equation} According to \cite[Thm. 5.2]{DopLPVD16} the matrix pencil $\mathcal{F}_{\alpha , \eta , P}(\lambda)$ is a strong linearization for $\alpha P(\lambda)$. Thus $\alpha \neq 0$ implies $\mathcal{F}_{\alpha , \eta , P}(\lambda)$ to be a strong linearization for $P(\lambda)$, so $\mathcal{L}(\lambda)$ is a strong linearization for $P(\lambda)$ as well. \end{proof} \begin{remark} \label{rem_linequiv} Given the case of a regular $n \times n$ matrix polynomial $P(\lambda)$, the statement in Theorem \ref{thm_lincondition} becomes an equivalence. In fact, if $\mathcal{L}(\lambda)$ as in (\ref{def_blockspace}) is a strong linearization for some regular $P(\lambda)$, $\mathcal{L}(\lambda)$ is necessarily regular. This implies the matrices $U$ and $V$ to be nonsingular and the scalar $\alpha$ to be nonzero. However, for singular matrix polynomials $P(\lambda),$ (\ref{equ_lincondition}) is not necessary for $\mathcal{L}(\lambda)$ to be a strong linearization. For instance, consult \cite[Ex. 2]{DeTDM09} for an example of a strong linearization $\mathcal{L}(\lambda) \in \mathbb{G}_1(P)$ that does not satisfy (\ref{equ_lincondition}). A sufficient condition for strong linearizations in $\mathbb{G}_1(P)$ and $\mathbb{G}_k(P)$ of singular matrix polynomials $P(\lambda)$ is given in \cite[Sec. 5]{FassS16}. \end{remark} In \cite[Thm. 4.7]{MacMMM06} and \cite[Thm. 4.4]{DeTDM09} it was shown that almost every pencil in $\mathbb{L}_1(P)$ (and $\mathbb{L}_2(P)$) is a strong linearization for the (regular or singular) square matrix polynomial $P(\lambda)$. Here, a similar statement holds for $\mathbb{G}_{\eta + 1}(P)$ and rectangular, i.e. not necessarily square matrix polynomials $P(\lambda)$. \begin{theorem}[Linearizations are Generic in $\mathbb{G}_{\eta + 1}(P)$] \label{thm_generic} \ \\ Let $P(\lambda)$ be an $m \times n$ matrix polynomial of degree $k = \epsilon + \eta + 1$. Then almost every matrix pencil in $\mathbb{G}_{\eta + 1}(P)$ is a strong linearization for $P(\lambda)$. \end{theorem} Theorem \ref{thm_generic} follows directly from Theorem \ref{thm_lincondition} since $\mathbb{R} \setminus \lbrace 0 \rbrace$, $ \textnormal{GL}_{\epsilon n}(\mathbb{R})$ and $\textnormal{GL}_{\eta m}(\mathbb{R})$ are dense subsets of $\mathbb{R}$, $\mathbb{R}^{\epsilon n \times \epsilon n}$ and $\mathbb{R}^{\eta m \times \eta m}$ respectively. Furthermore, notice that all the strong linearizations in $\mathbb{G}_{\eta + 1}(P)$ are strong block minimal bases pencils, which have also been introduced in \cite{DopLPVD16}. Using \cite[Thm. 5.2]{DopLPVD16}, we now prove the \textit{Strong Linearization Theorem} for block Kronecker ansatz spaces in the style of \cite[Thm. 4.3]{MacMMM06}. Showing the connection between the linearization property and the regularity of matrix pencils, we necessarily focus on regular (i.e. square) matrix polynomials. \begin{theorem}[Strong Linearization Theorem for $\mathbb{G}_{\eta + 1}(P)$] \label{thm_master1} \ \\ Let $P(\lambda)$ be an $n \times n$ regular matrix polynomial and $\mathcal{L}(\lambda) \in \mathbb{G}_{\eta + 1}(P)$. Then the following statements are equivalent \begin{enumerate} \item $\mathcal{L}(\lambda)$ is a linearization for $P(\lambda)$. \item $\mathcal{L}(\lambda)$ is a regular matrix pencil. \item $\mathcal{L}(\lambda)$ is a strong linearization for $P(\lambda)$. \end{enumerate} \end{theorem} \begin{proof} Since $3. \Rightarrow 1. \Rightarrow 2.$ is obvious, we only need to show $2. \Rightarrow 3.$ \\ Assume $\mathcal{L}(\lambda)$ in (\ref{def_blockspace}) to be regular. This certainly requires the nonsingularity of $U$ and $V$ as in (\ref{matrices_UV}) and consequently implies the regularity of $\mathcal{F}_{\alpha , \eta , P}(\lambda)$. Now suppose $\alpha = 0$. Then the ansatz equation (\ref{ansatzequation1}) gives $$ \mathcal{F}_{0, \eta , P}(\lambda) \big( (\Lambda_{\epsilon} \otimes I_n) \oplus I_{\eta n} \big)e_i = 0 \quad \text{and} \quad e_j^T \big( (\Lambda_{\eta}^T \otimes I_n) \oplus I_{\epsilon n} \big) \mathcal{F}_{0, \eta , P} = 0 $$ for any $1 \leqslant i,j \leqslant n$. This shows that $\mathcal{F}_{0, \eta ,P}(\lambda)$ can not be regular, a contradiction. Therefore, the assumption of $\mathcal{L}(\lambda) \in \mathbb{G}_{\eta + 1}(\lambda)$ being regular implies $\alpha \neq 0$ and thus the validity of all three conditions in Theorem \ref{thm_lincondition}. \end{proof} The next theorem shows that the eigenvector recovery for pencils in $\mathbb{G}_{\eta + 1}(P)$ is as easy as for block Kronecker pencils \cite[Section 7]{DopLPVD16}. \begin{theorem} \label{thm_eigenvectors} Let $P(\lambda)$ be an $n \times n$ regular matrix polynomial of degree $k=\epsilon + \eta + 1$ and $\mathcal{L}(\lambda) \in \mathbb{G}_{\eta + 1}(P)$ be a strong linearization for $P(\lambda)$. Then the following statements hold (with $e_i \in \mathbb{R}^k$) \begin{enumerate} \item If $u \in \mathbb{C}^{kn}$ is a right eigenvector of $\mathcal{L}(\lambda)$ with finite eigenvalue $\beta \in \mathbb{C}$, then $u^\star = (e_{\epsilon + 1}^T \otimes I_n)u$ is a right eigenvector of $P(\lambda)$ corresponding to the finite eigenvalue $\beta$. \item If $u \in \mathbb{C}^{kn}$ is a right eigenvector of $\mathcal{L}(\lambda)$ with eigenvalue $\infty$, then $(e_1^T \otimes I_n)u$ is a right eigenvector of $P(\lambda)$ with eigenvalue $\infty$. \item If $y \in \mathbb{C}^{kn}$ is a left eigenvector of $\mathcal{L}(\lambda)$ with finite eigenvalue $\beta \in \mathbb{C}$, then $y^\star = (e_{\eta + 1}^T \otimes I_n)y$ is a left eigenvector of $P(\lambda)$ corresponding to the finite eigenvalue $\beta$. \item If $y \in \mathbb{C}^{kn}$ is a left eigenvector of $\mathcal{L}(\lambda)$ with eigenvalue $\infty$, then $(e_1^T \otimes I_n)y$ is a left eigenvector of $P(\lambda)$ with eigenvalue $\infty$. \end{enumerate} \end{theorem} \begin{proof} Suppose $\mathcal{L}(\lambda) \in \mathbb{G}_{\eta + 1}(P)$ is given as in (\ref{def_blockspace}), i.e. $\mathcal{L}(\lambda) = U \mathcal{F}_{\alpha, \eta , P}(\lambda) V$ using the notation of (\ref{matrices_UV}) and (\ref{m_alpha}). Now assume $u \in \mathbb{C}^{kn}\backslash\{0\}$ satisfies $\mathcal{L}(\beta)u=0$ for some $\beta \in \mathbb{C}$. Then $u^\star = \tfrac{1}{\alpha} Vu$ is a right eigenvector of $\mathcal{F}_{1, \eta , P}(\lambda)$ (recall that $U$ is nonsingular, i.e. $\textnormal{null}(U) = \emptyset$). Applying \cite[Thm. 7.6]{DopLPVD16} yields that $(e_{\epsilon + 1}^T \otimes I_n)u^\star$ is a right eigenvector of $P(\lambda)$ with eigenvalue $\beta$. Now a closer look reveals $(e_{\epsilon + 1}^T \otimes I_n)u^\star = (e_{\epsilon + 1}^T \otimes I_n)u$ due to the form of $V$. Thus $P(\beta)(e_{\epsilon + 1}^T \otimes I_n)u = 0$. The remaining statements follow by exactly the same reasoning. \end{proof} Next, we provide a comprehensive example on block Kronecker pencils and their connection to block Kronecker ansatz spaces. \begin{example}[Block Kronecker Pencils] \ \\ Consider the set of matrix pencils $\mathcal{L}(\lambda)$ having the form (\ref{def_blockspace}) with $\alpha = 1$, $C_1 = I_{\epsilon n},$ and $C_2 = I_{\eta m}$, i.e., \begin{align*} \mathcal{L}(\lambda) &= \left[ \begin{array}{c|c} I_{(\eta + 1)m} & B_1 \\ \hline 0 & I_{\epsilon n} \end{array} \right] \left[ \begin{array}{c|c} \alpha \Sigma_{\eta ,P}(\lambda) & L_{\eta}(\lambda)^T \otimes I_m \\ \hline L_{\epsilon}(\lambda) \otimes I_n & 0 \end{array} \right] \left[ \begin{array}{c|c} I_{(\epsilon + 1)n} & 0 \\ \hline B_2 & I_{\eta n} \end{array} \right] \\ &= \left[ \begin{array}{c|c} \Sigma_{\eta ,P}(\lambda) + B_1(L_{\epsilon}(\lambda) \otimes I_n) + (L_{\eta}(\lambda)^T \otimes I_m)B_2 & (L_{\eta}(\lambda)^T \otimes I_m) \\ \hline (L_{\epsilon}(\lambda) \otimes I_n) & 0 \end{array} \right]. \end{align*} These matrix pencils coincide with the family of $(\epsilon , n, \eta , m)$-block Kro\-necker pencils (\ref{blockKronpencil}) that are strong linearizations for $P(\lambda)$. The strong linearization property was proven in \cite[Thm. 5.2]{DopLPVD16}, which complies with Theorem \ref{thm_lincondition} since in this case $\alpha \neq 0$ and $C_1$ and $C_2$ are nonsingular. \end{example} \begin{remark} For any arbitrary $m \times n$ matrix polynomial $P(\lambda)$, all $(\epsilon , n, \eta , m)$-block Kronecker pencils are elements of $\mathbb{G}_{\eta+1}(P)$. They do not form a vector subspace, but an affine subspace of $\mathbb{G}_{\eta + 1}(P)$. \end{remark} It is stated in \cite[Sec. 4.2]{DopLPVD16} that for any Fiedler pencil $F_{\sigma}(\lambda)$ there exist two permutation matrices $\Pi_1$ and $\Pi_2$ such that $\Pi_1 F_{\sigma}(\lambda) \Pi_2$ is a block Kronecker pencil. Hence we may argue that block Kronecker ansatz spaces contain all block Kronecker pencils and - modulo permutations - all Fiedler pencils. Therefore, based on \cite{DopLPVD16}, we succeeded in bringing together Fiedler companion linearizations and ansatz spaces for the first time. In addition to that, it is shown in \cite{DoBPSZ16} that also the families of generalized Fiedler pencils, Fiedler pencils with repetition and generalized Fiedler pencils with repetition are - modulo permutations - elements of the block Kronecker ansatz spaces (introduced in \cite{DoBPSZ16} as the family of extended block Kronecker pencils). So, with rare exceptions, the block Kronecker ansatz spaces provide an extensive concept for the study of families of Fiedler-like pencils in combination with the ansatz space framework for the construction of linearizations known from \cite{MacMMM06}. Moreover, we were able to make the idea of ansatz spaces - which is, according to \cite{MacMMM06}, a concept valid for square matrix polynomials only - available for rectangular matrix polynomials as well. However, notice that block Kronecker ansatz spaces contain infinitely many more matrix pencils then just permuted Fiedler or block Kronecker pencils. To this, it is a basic fact that every finite dimensional vector space as $\mathbb{G}_{\eta + 1}(P)$ is isomorphic to $\mathbb{R}^N$ for some $N \in \mathbb{N}_0$. Inasmuch as $\mathbb{R}^N$ features a great many of analytical and topological properties, (\ref{def_blockspace}) strongly suggests to define these concepts for $\mathbb{G}_{\eta + 1}(P)$ in terms of the pre- and postmultiplied matrices and the scalar $\alpha$. Taking this point of view, we may argue that the set of $(\epsilon , n, \eta , m)$-block Kronecker pencils constitutes a connected and nowhere dense subset in $\mathbb{G}_{\eta + 1}(P)$. \section{Double Block Kronecker Ansatz Spaces $\mathbb{DG}_{\eta + 1}(P)$}\label{sec4} In this section we characterize matrix pencils that belong to two or more block Kronecker ansatz spaces simultaneously. Since this scheme does hardly seem promising in the case $m \neq n$, we confine ourselves to square matrix polynomials. This study is motivated by the double ansatz space $\mathbb{DL}(P)$ (\ref{DLP}). For any regular matrix polynomial $P(\lambda)$ almost all pencils in $\mathbb{DL}(P)$ are linearizations of $P(\lambda)$ \cite[Theorem 6.8]{MacMMM06}, while for singular $P(\lambda)$ none is a linearization \cite{DeTDM09}. Moreover, any matrix pencil in $\mathbb{DL}(P)$ is block-symmetric which is in general not true for pencils in double block Kronecker ansatz spaces. \begin{definition}[Double Block Kronecker Ansatz Space] \ \\ Let $P(\lambda)$ be an $n \times n$ matrix polynomial of degree $k=\epsilon + \eta + 1$ and assume $\eta \leqslant \epsilon$. Then we define $$ \mathbb{DG}_{\eta + 1}(P) := \mathbb{G}_{\eta + 1}(P) \cap \mathbb{G}_{k - \eta}(P). $$ \end{definition} Given an $n \times n$ matrix polynomial $P(\lambda)$ of degree $k=\epsilon + \eta + 1$, w.l.o.g. we will always assume $\eta \leqslant \epsilon = k - \eta - 1$ from now. This is reasonable since $$ \begin{aligned} \mathbb{DG}_{\eta + 1}(P) &= \mathbb{G}_{\eta + 1}(P) \cap \mathbb{G}_{k - \eta}(P) = \mathbb{G}_{k - \epsilon}(P) \cap \mathbb{G}_{\epsilon + 1}(P) = \mathbb{DG}_{\epsilon + 1}(P). \end{aligned} $$ Notice further that $\eta + 1 = k - \eta$ implies $k= 2 \eta + 1$. Therefore, the special case $\mathbb{DG}_{\eta + 1}(P) = \mathbb{G}_{\eta + 1} (P) \cap \mathbb{G}_{\eta + 1}(P)$ can only occur for $P(\lambda)$ having odd degree. Consider the following motivating example. \begin{example} \label{ex_blockLspace1} Let $P(\lambda) = \sum_{i=0}^6 P_i \lambda^i$ be an $n \times n$ matrix polynomial of degree $\textnormal{deg}(P)=6$ and consider the case $\eta = 0$. Then \begin{equation} \mathcal{L}(\lambda) = \left[ \begin{array}{c|ccccc} \lambda P_6 + P_5 & P_4 & P_3 & P_2 & P_1 & P_0 \\ \hdashline P_4 & P_3 - \lambda P_4 & P_2 - \lambda P_3 & P_1 - \lambda P_2 & P_0 - \lambda P_1 & - \lambda P_0 \\ P_3 & P_2 - \lambda P_3 & P_1 - \lambda P_2 & P_0 - \lambda P_1 & - \lambda P_0 & 0 \\ P_2 & P_1 - \lambda P_2 & P_0 - \lambda P_1 & - \lambda P_0 & 0 & 0 \\ P_1 & P_0 - \lambda P_1 & - \lambda P_0 & 0 & 0 & 0 \\ P_0 & - \lambda P_0 & 0 & 0 & 0 & 0 \end{array} \right] \label{ex_DG} \end{equation} is an element of $\mathbb{DG}_1(P) = \mathbb{G}_1(P) \cap \mathbb{G}_6(P).$\footnote{A closer look at the block Kronecker ansatz equation reveals, that $\mathbb{DG}_{1}(P)$ coincides with the subspace of all matrix pencils having a multiple of $e_1$ as ansatz vector in $\mathbb{DL}(P).$ We restrain the study of the connection between the classical ansatz spaces $\mathbb{L}_1, \mathbb{L}_2$ and $\mathbb{DL}$ and our approach to Section \ref{sec:L1L2}.} Further, $\mathcal{L}(\lambda)$ is a block-symmetric pencil. Now consider the case $\eta = 1$ and the matrix pencil \begin{equation} \mathcal{K}(\lambda) = \left[ \begin{array}{cc|ccc:c} \lambda P_6 + P_5 & P_4 & A & 0 & -B & -I_n \\ 0 & P_3 & P_2 - \lambda A & P_1 & P_0 + \lambda B & \lambda I_n \\ \hdashline 0 & P_2 & P_1 - \lambda P_2 & P_0 - \lambda P_1 & - \lambda P_0 & 0 \\ C & P_1 - \lambda C & P_0 - \lambda P_1 & - \lambda P_0 & 0 & 0 \\ 0 & P_0 & - \lambda P_0 & 0 & 0 & 0 \\ \hline -I_n & \lambda I_n & 0 & 0 & 0 & 0 \end{array} \right] \label{ex_DG2} \end{equation} with arbitrary $n \times n$ matrices $A,B,C$. It is readily checked that $\mathcal{K}(\lambda) \in \mathbb{DG}_{2}(P)$, i.e. $\mathcal{K}(\lambda)$ is an element of $\mathbb{G}_2(P)$ and $\mathbb{G}_5(P)$ simultaneously. Anyhow, it is obvious that $\mathcal{K}(\lambda)$ is not block-symmetric. \end{example} Example \ref{ex_blockLspace1} shows that double block Kronecker ansatz spaces $\mathbb{DG}_{\eta + 1}(P)$ need not contain exclusively block-symmetric pencils. Albeit, they are never empty and the following theorem gives a comprehensive characterization of these spaces. To this end, we introduce a truncated square version of $\Sigma_{\eta,P}(\lambda)$, namely $$ \Sigma_{\eta,P}^{\mathbb{DG}}(\lambda) = \begin{bmatrix} \lambda P_k + P_{k-1} & P_{k-2} &\cdots & P_{\epsilon} \\ & & & \vdots \\ & & & P_{\epsilon - \eta} \end{bmatrix} \in \mathbb{R}[\lambda]^{(\eta + 1)n \times (\eta + 1)n} $$ and set $ \Pi_{\eta , P}^{\mathbb{DG}}(\lambda) := \big[ \, \Sigma_{\eta , P}^{\mathbb{DG}}(\lambda) \; \mathcal{R}_{\eta,P} \, \big] \in \mathbb{R}[\lambda]^{(\eta + 1)n \times (\epsilon + 1)n}$ with \begin{equation} \mathcal{R}_{\eta,P} = \left[\begin{array}{c} 0_{\eta n \times (\epsilon - \eta)n} \\ \hline \begin{array}{ccc} P_{\epsilon - \eta - 1} & \cdots & P_0 \end{array} \end{array} \right] \in \mathbb{R}^{(\eta + 1)n \times (\epsilon - \eta)n}. \label{Rmatrix} \end{equation} Moreover, for $\epsilon \geqslant \eta$ we define the block Hankel matrix $$ \mathcal{H}_{\epsilon - \eta}(P) = \begin{bmatrix} -P_{\epsilon - \eta -1} & \cdots & -P_1 & -P_0 \\ \vdots & \iddots & \iddots & \\[0.15cm] -P_1 & -P_0 & & \\[0.21cm] -P_0 & & & \end{bmatrix} \in \mathbb{R}^{(\epsilon - \eta)n \times (\epsilon - \eta)n}. $$ Notice that this block Hankel structure already showed up in the construction of block-symmetric linearizations in \cite{HigMMT06}. We obtain the following theorem. \begin{theorem}[Characterization of $\mathbb{DG}_{\eta + 1}(P)$] \label{thm_char_DG} \ \\ Let $P(\lambda)$ be an $n \times n$ matrix polynomial of degree $k = \epsilon + \eta + 1$ and assume $\eta \leqslant \epsilon$. Then $\mathbb{DG}_{\eta + 1}(P)$ is a vector space over $\mathbb{R}$ having dimension \begin{equation} \textnormal{dim} \big( \mathbb{DG}_{\eta + 1}(P) \big) = 2k \eta n^2 + 1. \label{dimformula_DG} \end{equation} Any matrix pencil $\mathcal{L}(\lambda) \in \mathbb{DG}_{\eta + 1}(P)$ may be characterized as \begin{equation} \mathcal{L}(\lambda)= \left[ \begin{array}{c|c|c} I_{(\eta + 1)n} & B_{11} & 0_{(\eta + 1) \times (\epsilon - \eta)n} \\[0.1cm] \hline 0 & C_{11} & \alpha \mathcal{H}_{\epsilon - \eta}(P) \\ \hline 0 & C_{21} & 0_{\eta n \times (\epsilon - \eta )n} \end{array} \right] \left[ \begin{array}{c|c} \alpha \Pi_{\eta , P}^{\mathbb{DG}}(\lambda) & L_{\eta}^T \otimes I_n \\ \hline L_{\epsilon} \otimes I_n & 0 \end{array} \right] \left[ \begin{array}{c|c} I_{(\epsilon + 1)n} & 0 \\[0.1cm] \hline B_{2} & C_{2} \end{array} \right] \label{doubleansatz_expr1} \end{equation} with some $\alpha \in \mathbb{R}$ and some matrices $B_{11} \in \mathbb{R}^{(\eta + 1)n \times \eta n}$, $C_{11} \in \mathbb{R}^{(\epsilon - \eta)n \times \eta n}$, $C_{21} \in \mathbb{R}^{\eta n \times \eta n}$, $B_2 \in \mathbb{R}^{\eta n \times (\epsilon + 1)n}$ and $C_2 \in \mathbb{R}^{\eta n \times \eta n}$. Moreover, $\mathbb{DG}_{\eta + 1}(P)$ is a proper subspace of both $\mathbb{G}_{\eta + 1}(P)$ and $\mathbb{G}_{k - \eta}(P)$. \end{theorem} \begin{proof} Assume $P(\lambda)$ to be an $n \times n$ matrix polynomial of degree $k=\epsilon + \eta + 1$ with $\eta \leqslant \epsilon$ and $\mathcal{L}^\star(\lambda)$ to be a $kn \times kn$ matrix pencil in $\mathbb{DG}_{\eta + 1}(P)$. Now consider $\mathcal{L}^\star(\lambda)$ partitioned as a $3\times 3 $ block matrix as indicated in Figure \ref{fig1} \begin{figure} \caption{$\mathcal{L} \label{fig1} \end{figure} as well as in its natural partitioning as a matrix pencil in $\mathbb{G}_{\eta+1}(P)$ in (\ref{ansatzequation2}). The upper-left block $\mathcal{L}^\star_{11}(\lambda)$ is rectangular of size $(\eta+1)n\times (\epsilon+1)n,$ this corresponds to the $(1,1)$ and the $(1,2)$ blocks in the $3\times 3$ partitioning in Figure \ref{fig1}. Clearly, the $(1,3)$ block corresponds to $\mathcal{L}^\star_{12}(\lambda),$ that is equal to $(L_\eta(\lambda)^T\otimes I_n)C_2$ for a matrix $C_2 \in \mathbb{R}^{\eta n \times \eta n}$. Moreover, from (\ref{def_blockspace_explicit}) it is obvious that $\mathcal{L}^\star_{22}(\lambda) \in \mathbb{R}^{\epsilon n \times \eta n}$ is zero, thus the blocks $(2,3)$ and $(3,3)$ in Figure \ref{fig1} are zero. Now consider $\mathcal{L}^\star(\lambda)$ in its natural partitioning as a matrix pencil in $\mathbb{G}_{k-\eta}(P).$ Then the block $\mathcal{L}^\star_{11}(\lambda)$ is rectangular of size $(\epsilon+1)n\times (\eta+1)n,$ this corresponds to the $(1,1)$ and the $(2,1)$ blocks in the $3\times 3$ partitioning in Figure \ref{fig1}. Obviously, the $(3,1)$ block corresponds here to $\mathcal{L}^\star_{21}(\lambda),$ which is given as $C_{21}(L_\eta(\lambda)\otimes I_n)$ for a matrix $C_{21} \in \mathbb{R}^{\eta n \times \eta n}.$ As before, the block $\mathcal{L}^\star_{22}(\lambda) \in \mathbb{R}^{\eta n \times \epsilon n}$ is zero, hence the blocks $(3,2)$ and $(3,3)$ in Figure \ref{fig1} are zero. Thus, the fact of $\mathcal{L}^\star(\lambda)$ being an element of $\mathbb{G}_{\eta + 1}(P)$ and of $\mathbb{G}_{k - \eta}(P)$ a priori implies the unalterable zero structure of $\mathcal{L}^\star(\lambda)$ in the blocks $(2,3), (3,2)$ and $(3,3)$ of the $3\times 3$ partitioning as indicated in grey in Figure \ref{fig1}. In summary, we have identified all of the eight bordering blocks in Figure \ref{fig1}. The remaining $(2,2)$-block in the $3\times 3$ partitioning, termed \enquote{core part} $C( \mathcal{L}^\star)$ of $\mathcal{L}^\star(\lambda)$ in the following, is square of size $(\epsilon-\eta)n\times (\epsilon-\eta)n.$ Our next step is to construct a pencil $\mathcal{L}(\lambda)$ of the form \[ \mathcal{L}(\lambda) = \left[ \begin{array}{c|c|c} I_{(\eta + 1)n} & B_{11} & 0 \\[0.1cm] \hline 0 & C_{11} & C_{12} \\ \hline 0 & C_{21} & 0 \end{array} \right] \left[ \begin{array}{c|c} \alpha [\, \Sigma_{\eta, P}^{\mathbb{DG}}(\lambda) \; \, 0 \, ] & L_{\eta}^T \otimes I_n \\ \hline L_{\epsilon} \otimes I_n & 0 \end{array} \right] \left[ \begin{array}{c|c} I_{(\epsilon + 1)n} & 0 \\ \hline B_2 & C_2 \end{array} \right], \] such that the bordering blocks in $\Delta \mathcal{L}^\star(\lambda) := \mathcal{L}^\star(\lambda) - \mathcal{L}(\lambda)$ get almost entirely eliminated. In fact, we may achieve that $\Delta \mathcal{L}^\star(\lambda)$ has the form indicated in Figure \ref{fig2} by making the appropriate choices of $C_{21}, C_2 \in \mathbb{R}^{\eta n \times \eta n}$ as described above and finding suitable matrices $B_{11} \in \mathbb{R}^{(\eta + 1)n \times \eta n}, C_{11} \in \mathbb{R}^{(\epsilon - \eta)n \times \eta n},$ $C_{12} \in \mathbb{R}^{(\epsilon-\eta)n \times (\epsilon-\eta)n}$ and $B_2 \in \mathbb{R}^{\eta n \times (\epsilon + 1)n}.$ That the core part of $\Delta \mathcal{L}^\star(\lambda)$ is equal to the core part $C(\mathcal{L}^\star)$ of $\mathcal{L}^\star(\lambda)$ is achieved by setting the $C_{12}$-block of $\mathcal{L}(\lambda)$ as $0_{(\epsilon-\eta)n}.$ Furthermore, there is a leftover matrix $h^\star \in \mathbb{R}^{n \times (\epsilon - \eta)n}$ in the block (1,2) that can not be eliminated by $B_2$. \begin{figure} \caption{$\Delta \mathcal{L} \label{fig2} \end{figure} Now consider the natural $2 \times 2$ block partition of $\Delta \mathcal{L}^\star(\lambda)$ as an element of $\mathbb{G}_{\eta+1}(P)$ and in particular $\Delta \mathcal{L}^\star_{11}(\lambda)$ (which corresponds to the $(1,1)$ and $(1,2)$ block in Figure \ref{fig2}). Due to the linearity of $\Phi$ we have $$ \begin{aligned} \Phi\big( \Delta \mathcal{L}^\star_{11}(\lambda) \big) &= \Phi( \mathcal{L}^\star_{11}(\lambda)) - \alpha \Phi([\, \Sigma_{\eta, P}^{\mathbb{DG}}(\lambda) \; 0_{(\eta + 1)n \times (\epsilon -\eta)n} \, ]) \\ &= \alpha P(\lambda) - \alpha \bigg( \sum_{i= \epsilon - \eta}^k P_i \lambda^i \bigg) = \alpha \sum_{i=0}^{\epsilon - \eta - 1} P_{i} \lambda^{i}. \end{aligned} $$ Considering once again Figure \ref{fig2}, this immediately implies $$h^\star = [ \, \alpha P_{\epsilon - \eta - 1} \; \cdots \; \alpha P_0 \,].$$ Therefore, if we had chosen $\alpha \Pi_{\eta, P}^{\mathbb{DG}}(\lambda)$ instead of $\alpha [\, \Sigma_{\eta, P}^{\mathbb{DG}}(\lambda) \; 0_{(\eta + 1)n \times (\epsilon + 1)n} \, ]$, $h^\star$ would have also been deleted in $\Delta \mathcal{L}^\star(\lambda)$ as desired. Now, since the $(\epsilon - \eta)n \times (\epsilon - \eta)n$ core part $C(\mathcal{L}^\star)$ of $\mathcal{L}^\star(\lambda)$ has to be reproducible in both block Kronecker ansatz spaces, the choice $h^\star = [ \, \alpha P_{\epsilon - \eta - 1} \; \cdots \; \alpha P_0 \,]$ unexpectedly determines $C(\mathcal{L}^\star)$ completely. The unique possible form for $C( \mathcal{L}^\star)$ is $$ C( \mathcal{L}^\star) = \alpha \begin{bmatrix} P_{\mu - 1} -\lambda P_{\mu} & P_{\mu - 2} - \lambda P_{\mu - 1} & \cdots & P_0 - \lambda P_1 & - \lambda P_0 \\ P_{\mu - 2} - \lambda P_{\mu - 1} & P_{\mu - 3} - \lambda P_{\mu - 2} & \iddots & \iddots & \\ \vdots & \iddots & \iddots & & \\ P_0 - \lambda P_1 & - \lambda P_0 & & & \\ - \lambda P_0 & & & & \end{bmatrix} $$ where we have set $\mu := \epsilon - \eta - 1$ for abbreviation. Exactly this matrix pencil is obtained by setting $C_{12} = \alpha \mathcal{H}_{\epsilon - \eta}(P)$. Therefore, we have shown that $\mathcal{L}^\star(\lambda)$ may be expressed as $$ \mathcal{L}^\star(\lambda)=\left[ \begin{array}{c|c|c} I_{(\eta + 1)n} & B_{11} & 0 \\ \hline 0 & C_{11} & \alpha \mathcal{H}_{\epsilon - \eta}(P) \\ \hline 0 & C_{21} & 0 \end{array} \right] \left[ \begin{array}{c|c} \alpha \Pi_{\eta , P}^{\mathbb{DG}}(\lambda) & L_{\eta}^T \otimes I_n \\ \hline L_{\epsilon} \otimes I_n & 0 \end{array} \right] \left[ \begin{array}{c|c} I_{(\epsilon + 1)n} & 0 \\ \hline B_2 & C_2 \end{array} \right] $$ which proves the statement. \end{proof} \begin{corollary}[Non-Emptiness of $\mathbb{DG}_{\eta + 1}(P)$] \label{master2} \ \\ Let $P(\lambda)$ be an $n \times n$ matrix polynomial of degree $k= \eta + \epsilon + 1$ and assume $\eta \leqslant \epsilon$. Then $$ \mathbb{DG}_{\eta + 1}(P) \neq \emptyset. $$ \end{corollary} Recall the first case considered in Example \ref{ex_blockLspace1}. Note that Theorem \ref{thm_char_DG} shows that $\mathcal{L}(\lambda)$ as in (\ref{ex_DG}) is - modulo scalar multiplication - the only matrix pencil in $\mathbb{DG}_1(P)$ since we have $\textnormal{dim}( \mathbb{DG}_1(P)) = 1$ according to (\ref{dimformula_DG}). Thus $\mathbb{DG}_{1}(P)$ consists entirely of block-symmetric pencils.\footnote{This is not surprising since $\mathbb{DG}_1(P)$ coincides with the subset of matrix pencils having a multiple of $e_1$ as ansatz vector in $\mathbb{DL}(P)$. The vector space $\mathbb{DL}(P)$ contains entirely block-symmetric pencils. This was shown in \cite{HigMMT06}.} Regarding linearizations, the following fact can immediately be derived from Theorem \ref{thm_lincondition} and Theorem \ref{thm_master1} (see also Remark \ref{rem_linequiv}). \begin{theorem}[Linearization Condition for $\mathbb{DG}_{\eta + 1}(P)$] \label{master4} \ \\ Let $P(\lambda)$ be a square and regular matrix polynomial of degree $k= \eta + \epsilon + 1$. Let $\mathcal{L}(\lambda) \in \mathbb{DG}_{\eta + 1}(P)$ be given in the form (\ref{doubleansatz_expr1}). Assume $\epsilon \neq \eta$. Then the following statements are equivalent: \begin{enumerate} \item $\mathcal{L}(\lambda)$ is a strong linearization for $P(\lambda)$. \item $P_0 \in \textnormal{GL}_n( \mathbb{R}), C_{21} \in \textnormal{GL}_{\eta n}(\mathbb{R}), C_2 \in \textnormal{GL}_{\eta n}(\mathbb{R})$ and $\alpha \in \mathbb{R} \setminus \lbrace 0 \rbrace$. \end{enumerate} \end{theorem} In the case $\epsilon = \eta$ the equivalence in Theorem \ref{master4} holds without the condition $P_0 \in \textnormal{GL}_n( \mathbb{R})$ in the second statement (due to the disappearance of the $\mathcal{H}$-block). In this case the implication $2. \Rightarrow 1.$ holds also for singular matrix polynomials. Moreover, note that the second equivalence in Theorem \ref{master4} is actually just a correspondingly adjusted version of Theorem \ref{thm_lincondition} that takes into account the special structure of pencils in $\mathbb{DG}_{\eta + 1}(P)$ (see (\ref{doubleansatz_expr1})). In particular, the condition $P_0 \in \textnormal{GL}_n( \mathbb{R})$ reflects the nonsingularity of $\mathcal{H}_{\epsilon - \eta}(P)$. \begin{remark} Theorem \ref{master4}, in the form given above, can not be stated for singular matrix polynomials $P(\lambda).$ The second statement will never hold for singular $P(\lambda)$ since these always have a singular trailing coefficient $P_0$. This does a priori not mean that there can not be any linearizations for $P(\lambda)$ in this case, i.e. $1. \Rightarrow 2.$ certainly does not hold for singular matrix polynomials (see Remark \ref{rem_linequiv} and the reference therein). \end{remark} \begin{example}[Block Kronecker Pencils] Notice that a pure block Kronecker pencil (\ref{blockKronpencil}) can never be an element of a double block Kronecker ansatz space $$\mathbb{DG}_{\eta + 1}(P) = \mathbb{G}_{\eta + 1}(P) \cap \mathbb{G}_{k - \eta}(P)$$ for any matrix polynomial $P(\lambda)$ unless $\eta + 1 = k - \eta$. Figuratively speaking, we need some connection between $\mathbb{G}_{\eta + 1}(P)$ and $\mathbb{G}_{k - \eta}(P)$ to make a pencil $\mathcal{L}(\lambda)$ an element of both spaces. The core part $$ C( \mathcal{L}) = \begin{bmatrix} P_{\mu - 1} - \lambda P_{\mu} & P_{\mu - 2} - \lambda P_{\mu - 1} & \cdots & P_0 - \lambda P_1 & - \lambda P_0 \\ P_{\mu - 2} - \lambda P_{\mu - 1} & P_{\mu - 3} - \lambda P_{\mu - 2} & \iddots & \iddots & \\ \vdots & \iddots & \iddots & & \\ P_0 - \lambda P_1 & - \lambda P_0 & & & \\ - \lambda P_0 & & & & \end{bmatrix} $$ with $\mu := \epsilon - \eta - 1$ takes on this task. Modulo a scalar multiplication, every pencil in $\mathbb{DG}_{\eta + 1}(P)$ has the same core part, so it does essentially not depend on the specific pencil but on the matrix polynomial $P(\lambda)$. Moreover, $C( \mathcal{L})$ is block-symmetric. This block-symmetry turns out to be an important property of pencils in double block Kronecker ansatz spaces and is further studied in the next section. Notice that, given the case $\eta + 1 = k - \eta$, the core part vanishes entirely and no further restrictions remain for $\mathbb{DG}_{\eta + 1}(P)$. Only in this situation we obtain pure block Kronecker pencils. \end{example} Consider once again Theorem \ref{master4}. The compliance of the irrevocable condition $P_0 \in \textnormal{GL}_n( \mathbb{R})$ depends exclusively on the matrix polynomial $P(\lambda)$ and holds if and only if zero is not an eigenvalue of $P(\lambda)$. On the other hand, the conditions $C_{21}, C_2 \in \textnormal{GL}_{\eta n}(\mathbb{R})$ are satisfied for almost every matrix in $\mathbb{R}^{\eta n \times \eta n}$. Since the implication $2. \Rightarrow 1.$ in Theorem \ref{master4} holds without the assumption of regularity (according to Theorem \ref{thm_lincondition}), we obtain the following general density property. \begin{corollary}[Linearizations are Generic in $\mathbb{DG}_{\eta + 1}(P)$] \label{generic1} \ \\ Let $P(\lambda)$ be a square matrix polynomial and assume zero is not an eigenvalue of $P(\lambda)$. Then almost every matrix pencil in $\mathbb{DG}_{\eta + 1}(P)$ is a strong linearization for $P(\lambda)$. \end{corollary} \begin{remark} Assume $\eta = 0$ and consider $\mathbb{DG}_1(P)$. Then Theorem \ref{master4} reduces to the Eigenvalue Exclusion Theorem (see \cite[Thm. 6.7]{MacMMM06}) which is a powerful tool in the study of the space $\mathbb{DL}(P)$. It states in this particular case, that $\mathcal{L}(\lambda) \in \mathbb{DG}_1(P)$ is a strong linearization for $P(\lambda)$ if and only if no root of the $v$-polynomial $$p( \lambda ; \alpha e_1) = \alpha \lambda^{k-1}$$ (see \cite[Def. 6.1]{MacMMM06}) is an eigenvalue of $P(\lambda)$. Since $0$ is the only root of $p( \lambda ; \alpha e_1)$ this means that $P(0) = P_0$ has to be nonsingular, i.e. $P_0 \in \textnormal{GL}_n( \mathbb{R})$. Moreover, because the matrices $C_{21}$ and $C_2$ vanish completely (see (\ref{ex_DG}) in Example \ref{ex_blockLspace1}) this is the only condition to hold for $\mathcal{L}(\lambda) \in \mathbb{DG}_1(P)$ being a strong linearization for $P(\lambda)$. \end{remark} \subsection{The Superpartition Principle} Although double block Kronecker ansatz spaces usually do not contain solely block-symmetric pencils, they possess a remarkable feature that we call \enquote{superpartition property}. This property was also recognized by the authors of \cite{DoBPSZ16} and mentioned in their Remark 3.3. To its motivation, consider the following example. \begin{example} \label{ex_blockspace} Let $P(\lambda) = \sum_{i=0}^6 P_i \lambda^i$ be an $n \times n$ matrix polynomial of degree $\textnormal{deg}(P)=6$. Consider as in Example \ref{ex_blockLspace1} the case $\eta = 1$ ($\epsilon = 4$) and the corresponding matrix pencil $\mathcal{K}(\lambda)$ $$ \left[ \begin{array}{cc|ccc:c} \lambda P_6 + P_5 & P_4 & A & 0 & -B & -I_n \\ 0 & P_3 & P_2 - \lambda A & P_1 & P_0 + \lambda B & \lambda I_n \\ \hdashline 0 & P_2 & P_1 - \lambda P_2 & P_0 - \lambda P_1 & - \lambda P_0 & 0 \\ C & P_1 - \lambda C & P_0 - \lambda P_1 & - \lambda P_0 & 0 & 0 \\ 0 & P_0 & - \lambda P_0 & 0 & 0 & 0 \\ \hline -I_n & \lambda I_n & 0 & 0 & 0 & 0 \end{array} \right]. $$ As already discussed, $\mathcal{K}(\lambda) \in \mathbb{DG}_{2}(P) = \mathbb{G}_2(P) \cap \mathbb{G}_5(P).$ Now consider $\mathcal{K}(\lambda)$ in the slightly modified partitioned form \begin{equation} \left[ \begin{array}{ccc|c:cc} \lambda P_6 + P_5 & P_4 & A & 0 & -B & -I_n \\ 0 & P_3 & P_2 - \lambda A & P_1 & P_0 + \lambda B & \lambda I_n \\ 0 & P_2 & P_1 - \lambda P_2 & P_0 - \lambda P_1 & - \lambda P_0 & 0 \\ \hdashline C & P_1 - \lambda C & P_0 - \lambda P_1 & - \lambda P_0 & 0 & 0 \\ \hline 0 & P_0 & - \lambda P_0 & 0 & 0 & 0 \\ -I_n & \lambda I_n & 0 & 0 & 0 & 0 \end{array} \right]. \label{example_DG2} \end{equation} It is readily checked, that $\mathcal{K}(\lambda)$ partitioned as in (\ref{example_DG2}) may alternatively be taken as an element of $\mathbb{G}_3(P)$ and of $\mathbb{G}_4(P)$ (e.g. $\eta = 2$, $\epsilon =3$). In other words, $\mathcal{K}(\lambda) \in \mathbb{DG}_3(P)$. \end{example} The next theorem states that the phenomenon highlighted in Example \ref{ex_blockspace} always holds (see also \cite[Thm. 3.10]{DoBPSZ16}). The main reason behind this fact is easily seen to be the block-symmetric core part of pencils in double block Kronecker ansatz spaces. \begin{theorem}[Superpartition Property of $\mathbb{DG}_{\eta + 1}(P)$] \label{thm_interpolation} \ \\ Let $P(\lambda)$ be an $n \times n$ matrix polynomial of degree $k=\epsilon + \eta + 1$ and assume \\$\eta \leqslant \epsilon$. Then $\mathcal{L}(\lambda) \in \mathbb{DG}_{\eta + 1}(P)$ implies that $\mathcal{L}(\lambda) \in \mathbb{G}_{\eta + i}(P)$ for all $i = 1, 2, \ldots , k - 2 \eta.$ \end{theorem} For ease of notation in the proof of this theorem we introduce the following partitioning of $\mathcal{L}(\lambda)$ \[\mathcal{L}(\lambda) = \left[ \begin{array}{c|c} \mathcal{L}_{11}^{(i)} (\lambda) & \mathcal{L}_{12}^{(i)}(\lambda) \\ \hline \mathcal{L}_{21}^{(i)}(\lambda) & \mathcal{L}_{22}^{(i)}(\lambda) \end{array} \right], \quad \mathcal{L}_{11}^{(i)} (\lambda) \in \mathbb{R}^{(\widetilde{\eta}+1)n \times (\widetilde{\epsilon}+1)n}, ~~\widetilde{\eta} = \eta +i, \widetilde{\epsilon} = \epsilon -i. \] The condition $k = \eta + \epsilon +1 = \widetilde{\eta} + \widetilde{\epsilon}+1$ has to hold. For $i=0$ this is the natural partition (\ref{ansatzequation2}) considered so far; in particular, $\mathcal{L}_{11}^{(0)} (\lambda) = \mathcal{L}_{11} (\lambda).$ Increasing $i$ by one, the upper-left $(1,1)$-block of $\mathcal{L}(\lambda)$ is increased by one block row and decreased by one block column. \begin{remark} Due to the assumption $\mathcal{L}(\lambda) \in \mathbb{DG}_{\eta + 1}(P)$, it suffices to show that $\Phi(\mathcal{L}_{11}^{(i)}(\lambda)) = \alpha P(\lambda)$ for all $i=1, \ldots , k - 2 \eta$ holds in order to prove Theorem \ref{thm_interpolation}. To see this, consider exemplarily a matrix polynomial $P(\lambda) = \sum_{i=0}^7 P_i \lambda^i$ of degree $\textnormal{deg}(P)=7$ with $\eta = 1$. According to Theorem \ref{thm_char_DG} any pencil $\mathcal{L}(\lambda) \in \mathbb{DG}_{2}(P)$ schematically has the form \begin{center} \begin{tikzpicture}[scale=0.6] \draw[line width=0.05mm] (0,0) grid (7,7); \node at (6.5,0.5) {0}; \node at (5.5,0.5) {0}; \node at (4.5,0.5) {0}; \node at (3.5,0.5) {0}; \node at (2.5,0.5) {0}; \node at (6.5,1.5) {0}; \node at (5.5,1.5) {0}; \node at (4.5,1.5) {0}; \node at (3.5,1.5) {0}; \node at (6.5,2.5) {0}; \node at (5.5,2.5) {0}; \node at (4.5,2.5) {0}; \node at (6.5,3.5) {0}; \node at (5.5,3.5) {0}; \node at (6.5,4.5) {0}; \node at (3.5,3.5) {core part}; \draw[thick] (0,5) -- (7,5); \draw[thick] (6,0) -- (6,7); \draw[thick] (2,0) -- (2,7); \draw[thick] (0,1) -- (7,1); \fill[black!30, opacity=0.3] (0,7) rectangle (6,5); \fill[black!30, opacity=0.3] (6,0) rectangle (7,5); \fill[black!30, opacity=0.3] (0,5) rectangle (2,1); \fill[black!30, opacity=0.3] (2,1) rectangle (6,0); \end{tikzpicture} \end{center} with the indicated unalterable zero-structure and the $3 \times 3$ partitioning as in Figure 1 and 2. The following sketches indicate the natural partitioning (\ref{ansatzequation2}) of pencils in the block Kronecker ansatz spaces $\mathbb{DG}_{\kappa}(P)$, $\kappa = 3,4,5,6$ applied to the pencil $\mathcal{L}(\lambda)$: \begin{center} \begin{tikzpicture}[scale=0.35] \draw[line width=0.05mm] (0,0) grid (7,7); \node at (6.5,0.5) {0}; \node at (5.5,0.5) {0}; \node at (4.5,0.5) {0}; \node at (3.5,0.5) {0}; \node at (2.5,0.5) {0}; \node at (6.5,1.5) {0}; \node at (5.5,1.5) {0}; \node at (4.5,1.5) {0}; \node at (3.5,1.5) {0}; \node at (6.5,2.5) {0}; \node at (5.5,2.5) {0}; \node at (4.5,2.5) {0}; \node at (6.5,3.5) {0}; \node at (5.5,3.5) {0}; \node at (6.5,4.5) {0}; \draw[thick] (0,4) -- (7,4); \draw[thick] (5,0) -- (5,7); \fill[black!30, opacity=0.3] (0,7) rectangle (5,4); \fill[black!30, opacity=0.3] (5,4) rectangle (7,0); \node at (8.5,3.5) {$\rightsquigarrow$}; \end{tikzpicture} \begin{tikzpicture}[scale=0.35] \draw[line width=0.05mm] (0,0) grid (7,7); \node at (6.5,0.5) {0}; \node at (5.5,0.5) {0}; \node at (4.5,0.5) {0}; \node at (3.5,0.5) {0}; \node at (2.5,0.5) {0}; \node at (6.5,1.5) {0}; \node at (5.5,1.5) {0}; \node at (4.5,1.5) {0}; \node at (3.5,1.5) {0}; \node at (6.5,2.5) {0}; \node at (5.5,2.5) {0}; \node at (4.5,2.5) {0}; \node at (6.5,3.5) {0}; \node at (5.5,3.5) {0}; \node at (6.5,4.5) {0}; \draw[thick] (0,3) -- (7,3); \draw[thick] (4,0) -- (4,7); \fill[black!30, opacity=0.3] (0,7) rectangle (4,3); \fill[black!30, opacity=0.3] (4,3) rectangle (7,0); \node at (8.5,3.5) {$\rightsquigarrow$}; \end{tikzpicture} \begin{tikzpicture}[scale=0.35] \draw[line width=0.05mm] (0,0) grid (7,7); \node at (6.5,0.5) {0}; \node at (5.5,0.5) {0}; \node at (4.5,0.5) {0}; \node at (3.5,0.5) {0}; \node at (2.5,0.5) {0}; \node at (6.5,1.5) {0}; \node at (5.5,1.5) {0}; \node at (4.5,1.5) {0}; \node at (3.5,1.5) {0}; \node at (6.5,2.5) {0}; \node at (5.5,2.5) {0}; \node at (4.5,2.5) {0}; \node at (6.5,3.5) {0}; \node at (5.5,3.5) {0}; \node at (6.5,4.5) {0}; \draw[thick] (0,2) -- (7,2); \draw[thick] (3,0) -- (3,7); \fill[black!30, opacity=0.3] (0,7) rectangle (3,2); \fill[black!30, opacity=0.3] (3,2) rectangle (7,0); \node at (8.5,3.5) {$\rightsquigarrow$}; \end{tikzpicture} \begin{tikzpicture}[scale=0.35] \draw[line width=0.05mm] (0,0) grid (7,7); \node at (6.5,0.5) {0}; \node at (5.5,0.5) {0}; \node at (4.5,0.5) {0}; \node at (3.5,0.5) {0}; \node at (2.5,0.5) {0}; \node at (6.5,1.5) {0}; \node at (5.5,1.5) {0}; \node at (4.5,1.5) {0}; \node at (3.5,1.5) {0}; \node at (6.5,2.5) {0}; \node at (5.5,2.5) {0}; \node at (4.5,2.5) {0}; \node at (6.5,3.5) {0}; \node at (5.5,3.5) {0}; \node at (6.5,4.5) {0}; \draw[thick] (0,1) -- (7,1); \draw[thick] (2,0) -- (2,7); \fill[black!30, opacity=0.3] (0,7) rectangle (2,1); \fill[black!30, opacity=0.3] (2,1) rectangle (7,0); \end{tikzpicture} \end{center} This example shows, that the zero-structure of a pencil in $\mathbb{DG}_2(P)$ is exactly of the form that it covers all the $(2,2)$-zero blocks of pencils in $\mathbb{DG}_{\kappa}(P)$ with $2 \leq \kappa \leq 6$. Moreover, due to the special construction of the core part $C( \mathcal{L})$, the (1,2) and (2,1)-corner blocks as well as the upper-left (1,1)-block in the sketches above are always reproducible in every ansatz space $\mathbb{DG}_{\kappa}(P)$ for $2 \leq \kappa \leq 6$. Since the situation is exactly the same for other degrees of $P(\lambda)$ we only need to show that $\Phi( \mathcal{L}_{11}^{(i)}(\lambda)) = \alpha P(\lambda)$ holds for all $i=1, \ldots , k - 2 \eta$ to prove Theorem \ref{thm_interpolation}. That the latter holds is once more a consequence of the form of $C( \mathcal{L})$. \end{remark} \begin{proof}[Proof (Theorem \ref{thm_interpolation})] First of all, according to (\ref{doubleansatz_expr1}), $\mathcal{L}_{11}(\lambda)$ may be expressed as $$ \mathcal{L}_{11}(\lambda) = \big[ \, B_{11} \; 0 \; \big]( L_{\epsilon} \otimes I_n) + (L_{\eta}^T \otimes I_n) \big[ \, B_{21} \; B_{22} \, \big] + \big[ \; \Sigma_{\eta, P}^{\mathbb{DG}} \; \mathcal{R}_{\eta,P} \; \big]$$ with $B_2 = [ \, B_{21} \; B_{22} \, ], B_{21} \in \mathbb{R}^{\eta n \times (\eta + 1)n}$ and $\mathcal{R}_{\eta , P}$ as in (\ref{Rmatrix}). Then, we obtain that $\mathcal{L}_{11}^{(i)}(\lambda) $ may be expressed as \footnotesize{ $$ {\mathcal{L}_{11}^{(i)}(\lambda) = \left[ \begin{array}{c|c} \begin{array}{c} B_{11} \\ \hline C_{1,i} \end{array} & 0 \end{array} \right] (L_{\widetilde{\epsilon}} \otimes I_n) + (L_{\widetilde{\eta}}^T \otimes I_n) \left[ \begin{array}{c|c} B_{21} & B_{22,i} \\ \hline 0 & \alpha H_{\epsilon - \eta}^{(i)}(P) \end{array} \right] + \alpha \Omega_{\eta + i,P}(\lambda)} $$ } \normalsize with the $(\widetilde{\eta}+1)n \times (\widetilde{\epsilon}+1)n$ matrix pencil $\Omega_{\eta + i,P}(\lambda)$ \begin{equation} \Omega_{\eta + i,P}(\lambda) = \left[ \begin{array}{ccc|ccc} \lambda P_k + P_{k-1} & \cdots & P_{\epsilon} & & & \\ & & \vdots & & & \\ & & P_{\epsilon - \eta} & & & \\ \hline & & P_{\epsilon - \eta - 1} & & & \\ & & \vdots & & & \\ & & P_{\epsilon - \eta - i} & P_{\epsilon - \eta - i - 1} & \cdots & P_0 \end{array} \right]. \label{matrix_Omega} \end{equation} Here, $\mathcal{H}_{\epsilon - \eta}^{(i)}(P)$ denotes the upper left $in \times (\epsilon - \eta - i)n$ submatrix of $\mathcal{H}_{\epsilon - \eta}(P)$, $C_{1,i}$ the first $in$ rows of $C_{1}$, i.e. $C_{1,i} \in \mathbb{R}^{in \times (\epsilon - \eta)n}$, and $B_{22,i}$ the matrix $B_2$ missing the last $in$ columns, i.e. $B_{22,i} \in \mathbb{R}^{\eta n \times (\epsilon - \eta - i)n}$. Now, since $\Phi(\Omega_{\eta + i,P}(\lambda)) = P(\lambda)$ holds we obtain $\Phi( \mathcal{L}_{11}^{(i)}(\lambda)) = \alpha P(\lambda)$. \end{proof} \begin{remark} According to Example \ref{ex_blockspace} it is not surprising, that Theorem \ref{thm_interpolation} holds. The property of a matrix pencil $\mathcal{L}(\lambda)$ being an element of $\mathbb{DG}_{\eta + 1}(P)$ imposes several restrictions on the form of $\mathcal{L}(\lambda)$. In particular, whereas the bordering blocks in the $3\times 3$ partitioning as in Figure \ref{fig1} underly the condition of having no contribution in one space and being completely reproducible in the other (see Theorem \ref{thm_char_DG}), the core part of the pencil has to be adequate for both spaces, $\mathbb{G}_{\eta + 1}(P)$ and $\mathbb{G}_{k - \eta}(P)$. This lucky circumstance determines the (block-symmetric) form of $C(\mathcal{L})$ completely as depicted in the picture below and, no matter how $\eta$ and $\epsilon$ are chosen, guarantees that $\Phi( \mathcal{L}_{11}^{(i)}(\lambda)) = \alpha P(\lambda)$ always holds. \vspace*{0.1cm} \begin{center} \footnotesize{ \begin{tikzpicture}[scale=0.5] \matrix[matrix of math nodes, left delimiter={[}, right delimiter={]}]{ \node (P11) {\textcolor{black}{P_{\mu + 1}}}; & P_{\mu} & P_{\mu - 1} & P_{\mu - 2} & \cdots & P_1 & \node (P1k) {P_0}; \\ \node (P21) {\textcolor{black}{P_{\mu}}}; & \textcolor{black}{P_{\mu - 1}} - \lambda P_{\mu} & \textcolor{black}{P_{\mu - 2}} - \lambda P_{\mu - 1} & \textcolor{black}{P_{\mu - 3}} - \lambda P_{\mu - 2} & \cdots & \node (P2k-1) {\textcolor{black}{P_0} - \lambda P_1}; & - \lambda P_0 \\ \textcolor{black}{P_{\mu - 1}} & P_{\mu - 2} - \lambda P_{\mu - 1} & P_{\mu - 3} - \lambda P_{\mu - 2} & P_{\mu - 4} - \lambda P_{\mu - 3} & & \iddots & \\ \textcolor{black}{P_{\mu -2 }} & P_{\mu - 3} - \lambda P_{\mu - 2} & P_{\mu - 4} - \lambda P_{\mu - 3} & \cdots & \iddots & & \\ \vdots & \vdots & \vdots & \iddots & & & \\ \node (P1) {\textcolor{black}{P_1}}; & \textcolor{black}{P_0} - \lambda P_1 & - \lambda P_0 & & & & \\ \node (Pk1) {P_0}; & - \lambda P_0 & & & & & \\ }; \draw[dashed] (P11.west |- P1k.south) -- (P1k.south -| P1k.east); \draw[dashed] (P11.east |- P1k.north) -- (P11.east |- Pk1.south); \draw[color=black] (P21.south -| P21.west) -- (P21.south -| P2k-1.east); \draw[color=black] (P21.south -| P2k-1.east) -- ++(0,2.1); \draw[color=black] (P1.south -| P1.west) -- ++(6.5,0) -- ++ (0,8.4); \fill[color=black!20!white, opacity=0.2] (P11.south -| P11.east) rectangle ++(22,-8.5); \path (P11.south -| P11.east) -- ++(17,-7.8) node {core part $C(\mathcal{L})$}; \end{tikzpicture} } \end{center} \end{remark} The next algorithm presents a procedure to reformulate a pencil from $\mathbb{DG}_{\eta + 1}(P)$ as an element of $\mathbb{DG}_{\eta + i + 1}$ for all $i= 1, \ldots , \lfloor \tfrac{\epsilon - \eta}{2} \rfloor$. This implies $\mathcal{L}(\lambda) \in \mathbb{G}_{\eta + i}(P)$ for all $i=2, \ldots , k-2 \eta - 1$.\\ \noindent \textbf{Algorithm 1: Shift-Procedure for Pencil Expressions} \\ Let $P(\lambda) = \sum_{i=0}^k P_i \lambda^i$ be an $n \times n$ matrix polynomial of degree $k= \epsilon + \eta + 1$ and assume $\eta \leqslant \epsilon$. In addition, let a matrix pencil $\mathcal{L}(\lambda) \in \mathbb{DG}_{\eta + 1}(P)$ be given as in (\ref{doubleansatz_expr1}). \\ \begin{itemize} \item[1.]Choose any $i=1, \ldots , \lfloor \tfrac{\epsilon - \eta}{2} \rfloor$ and partition $\mathcal{H}_{\epsilon - \eta}(P)$ as follows: $$ \mathcal{H}_{\epsilon - \eta}(P) = \left[ \begin{array}{c} J_i(P) \\ \hline \begin{array}{c|c|c} H_i(P) & \mathcal{H}_{\epsilon - \eta - 2i}(P) & 0 \\ \hline \mathcal{H}_i(P) & 0 & 0 \end{array} \end{array} \right]. $$ \item[2.] Partition $C_{11}$ as $C_{11}^{\mathcal{B}} = \big[ \, c_1 \; c_2 \; \ldots \; c_{\epsilon - \eta} \, \big]$ with $c_i \in \mathbb{R}^{\eta n \times n}$ and compute the matrices $$ \widetilde{B}_{11}^{(i)} = \left[ \begin{array}{c|c} \begin{array}{c} B_{11} \\[0.1cm] \hline c_1^{\mathcal{B}} \\ \vdots \\ c_i^{\mathcal{B}} \end{array} & 0_{(\eta + i + 1)n \times i n} \end{array} \right] , \qquad \widetilde{C}_{21}^{(i)} = \left[ \begin{array}{c|c} \begin{array}{c} c_{(\epsilon - \eta)-i+1}^{\mathcal{B}} \\ \vdots \\ c_{\epsilon - \eta}^{\mathcal{B}} \end{array} & \mathcal{H}_i(P) \\[0.1cm] \hline C_{21} & 0_{\eta n \times i n} \end{array} \right] , $$ and $$ \widetilde{C}_{11}^{(i)} = \left[ \begin{array}{c|c} \begin{array}{c} c_{i+1}^{\mathcal{B}} \\ \vdots \\ c_{(\epsilon - \eta)-i}^{\mathcal{B}} \end{array} & H_i(P) \end{array} \right]. $$ Note $\widetilde{B}_{11}^{(i)} \in \mathbb{R}^{(\eta + 1 + i)n \times (\eta + i)n}$, $\widetilde{C}_{11}^{(i)} \in \mathbb{R}^{(\epsilon - \eta - 2i)n \times (\eta + i)n}$ and $\widetilde{C}_{21}^{(i)} \in \mathbb{R}^{(\eta + i)n \times (\eta + i)n}$. \item[3.] Compute the matrix $ \Omega_{\eta + i,P}(\lambda)$ from (\ref{matrix_Omega}) and express $\mathcal{L}(\lambda)$ as \begin{equation} {\small \left[ \begin{array}{c|c|c} I_{(\widetilde{\eta}+1)n} & \widetilde{B}_{11}^{(i)} & 0 \\ \hline 0 & \widetilde{C}_{11}^{(i)} & \alpha \mathcal{H}_{\widetilde{\epsilon} - \widetilde{\eta}}(P) \\ \hline 0 & \widetilde{C}_{21}^{(i)} & 0 \end{array} \right] \left[ \begin{array}{c|c} \alpha \Omega_{\widetilde{\eta} , P}(\lambda) & L_{\widetilde{\eta}}^T \otimes I_n \\ \hline L_{\widetilde{\epsilon}} \otimes I_n & 0 \end{array} \right] \left[ \begin{array}{c} \begin{array}{c|c} I_{(\widetilde{\epsilon}+1)n} & 0 \\ \hline \widetilde{B}_2^{(i)} & \widetilde{C}_{2}^{(i)} \end{array} \end{array} \right]} \label{shiftform} \end{equation} with $\widetilde{\eta} = \eta + i$, $\widetilde{\epsilon} = \epsilon - i$ and $ \begin{bmatrix} \widetilde{B}_2^{(i)} & \widetilde{C}_2^{(i)} \end{bmatrix} = \left[ \begin{array}{c} B_2 \hspace{2cm} C_2 \\ \hline 0_{in \times (\eta + 1)n} \; \, J_i(P) \; \, 0_{in \times \eta n} \end{array} \right]. $ \end{itemize} Now the pencil $\mathcal{L}(\lambda)$ is an element of $\mathbb{DG}_{\widetilde{\eta} + 1}(P)$. Notice that we did not formulate $\mathcal{L}(\lambda)$ in terms of $\Pi_{\eta + i,P}^{\mathbb{DG}}(\lambda)$ as in (\ref{doubleansatz_expr1}). Although this is possible, it is easier (and seems more natural) to just use $\Omega_{\eta + i,P}(\lambda)$ which is directly available.\footnote{However, having (\ref{shiftform}) we are certainly able to modify $\widetilde{B}_{11}^{(i)}$ and $\widetilde{B}_2^{(i)}$ appropriately to express $\mathcal{L}(\lambda)$ is the form (\ref{doubleansatz_expr1}).} We illustrate this procedure in the following example. \begin{example} Let $P(\lambda) = \sum_{i=0}^7 P_i \lambda^i$ be an $n \times n$ matrix polynomial of degree $\textnormal{deg}(P) = 7.$ Consider the matrix pencil $\mathcal{L}(\lambda)$ \footnotesize{ \[ \mathcal{L}(\lambda) = {\small \left[ \begin{array}{cccc:c:c:c} \lambda P_7 + P_6 & P_5 & -A & -B & -C & -D & -E \\ 0 & P_4 & \lambda A + P_3 & \lambda B + P_2 & \lambda C + P_1 & \lambda D + P_0 & \lambda E \\ \hdashline -F & P_3 + \lambda F & P_2 - \lambda P_3 & P_1 - \lambda P_2 & P_0 - \lambda P_1 & - \lambda P_0 & 0 \\ \hdashline - G & P_2 + \lambda G & P_1 - \lambda P_2 & P_0 - \lambda P_1 & - \lambda P_0 & & \\ \hdashline - H & \lambda H + P_1 & P_0 - \lambda P_1 & - \lambda P_0 & & & \\ -J & P_0 + \lambda J & - \lambda P_0 & & & & \\ - K & \lambda K & & & & & \end{array} \right]} \]} \normalsize{with arbitrary $n \times n$ matrices $A,B, \ldots , K.$} \normalsize{This matrix pencil $\mathcal{L}(\lambda)$ is an element of $\mathbb{DG}_{2}(P)$ since it can be expressed as} \footnotesize{ \[ { \left[ \begin{array}{cc|c|cccc} I_n & & 0 & & & & \\ & I_n & 0 & & & & \\ \hline & & F & -P_3 & -P_2 & -P_1 & -P_0 \\ & & G & -P_2 & -P_1 & -P_0 & \\ & & H & -P_1 & -P_0 & & \\ & & J & -P_0 & & & \\ \hline & & K & & & \end{array} \right] \Psi_1 \left[ \begin{array}{cccccc|c} I_n & & & & & & \\ & I_n & & & & & \\ & & I_n & & & & \\ & & & I_n & & & \\ & & & & I_n & & \\ & & & & & I_n & \\ \hline 0 & 0 & A & B & C & D & E \end{array} \right] }, \] } \normalsize{with} \[\Psi_j = \left[ \begin{array}{c|c} \Omega_{j,P}(\lambda) & L_j(\lambda)^T \otimes I_n \\ \hline L_{6-j}(\lambda) \otimes I_n & 0 \end{array} \right]. \] \normalsize{For $i=1$ we obtain according to Algorithm 2} \footnotesize{ \[ \small { \left[ \begin{array}{ccc|cc|cc} I_n & & & 0 & 0 & & \\ & I_n & & 0 & 0 & & \\ & & I_n & F & 0 & & \\ \hline & & & G & -P_2 & -P_1 & -P_0 \\ & & & H & -P_1 & -P_0 & \\ \hline & & & J & -P_0 & & \\ & & & K & 0 & & \end{array} \right] \Psi_2 \left[ \begin{array}{ccccc|cc} I_n & & & & & & \\ & I_n & & & & & \\ & & I_n & & & & \\ & & & I_n & & & \\ & & & & I_n & & \\ \hline 0 & 0 & A & B & C & D & E \\ 0 & 0 & -P_3 & -P_2 & -P_1 & -P_0 & \end{array} \right] .} \]} \normalsize{According to (\ref{shiftform}) this is the expression of $\mathcal{L}(\lambda)$ in the space $\mathbb{DG}_{3}(P)$. Now, since $\lfloor \tfrac{\epsilon - \eta}{2} \rfloor = 2$ we may also consider the case $i=2$. Algorithm 2 gives in this case} \footnotesize{ \[ \footnotesize{ \left[ \begin{array}{cccc|ccc} I_n & & & & 0 & 0 & 0 \\ & I_n & & & 0 & 0 & 0 \\ & & I_n & & F & 0 & 0 \\ & & & I_n & G & 0 & 0 \\ \hline & & & & H & -P_1 & -P_0 \\ & & & & J & -P_0 & 0 \\ & & & & K & 0 & 0 \end{array} \right] \Psi_3 \left[ \begin{array}{cccc|ccc} I_n & & & & & & \\ & I_n & & & & & \\ & & I_n & & & & \\ & & & I_n & & & \\ \hline 0 & 0 & A & B & C & D & E \\ 0 & 0 & -P_3 & -P_2 & -P_1 & -P_0 & \\ 0 & 0 & -P_2 & -P_1 & -P_0 & & \end{array} \right] .} \] } \normalsize{This is the expression of $\mathcal{L}(\lambda)$ as an element of $\mathbb{DG}_4(P)$. In this case, $\mathbb{DG}_4(P) = \mathbb{G}_4(P) \cap \mathbb{G}_4(P)$, so there are no additional restrictions for a pencil of $\mathbb{G}_4(P)$ for being an element of $\mathbb{DG}_4(P)$. This complies with the disappearance of the $\mathcal{H}$-block and the zero-blocks in (\ref{doubleansatz_expr1}).} \end{example} The following observation is immediate. \begin{corollary}[Inclusion Property for $\mathbb{DG}_{\eta + 1}(P)$ Spaces] \label{cor_inclusionDG} \ \\ Let $P(\lambda)$ be an $n \times n$ matrix polynomial of degree $k= \epsilon + \eta + 1$. Then we have \begin{equation} \mathbb{DG}_1(P) \, \subsetneqq \, \mathbb{DG}_2(P) \, \subsetneqq \, \cdots \, \subsetneqq \, \mathbb{DG}_{\lceil \tfrac{k}{2} \rceil}(P). \label{sequenceDG} \end{equation} \end{corollary} \subsection{Block-symmetric Pencils and the Spaces $\mathbb{BG}_{\eta + 1}(P)$} This section is dedicated to the basic study of block-symmetric pencils in double block Kronecker ansatz spaces. Block-symmetric block Kronecker pencils have already been considered in \cite{FassPS16}, whereas vector spaces of block-symmetric pencils are investigated in \cite{BueDFR15} and \cite{MacMMM06}. For motivation, consider once more the matrix pencil $\mathcal{K}(\lambda)$ in (\ref{ex_DG2}). \begin{remark} Example \ref{ex_blockLspace1} showed, in contrast to our experience with the classical double ansatz space $\mathbb{DL}(P)$, that not all matrix pencils in $\mathbb{DG}_{\eta + 1}(P)$ are block-symmetric. Nevertheless, considering $\mathcal{K}(\lambda)$ from Example \ref{ex_blockLspace1} it is not hard to see how a block-symmetric matrix pencil $\widetilde{\mathcal{K}}(\lambda)$ in $\mathbb{DG}_{2}(P)$ can be built. For $\widetilde{\mathcal{K}}(\lambda)$ we chose the $(1,1)$ block to be block-symmetric and adjust the bordering blocks to obtain a block-symmetric pencil: \begin{equation} \widetilde{\mathcal{K}}(\lambda) = \left[ \begin{array}{cc|ccc:c} \lambda P_6 + P_5 & \tfrac{1}{2} P_4 & A & C & -B & -I_n \\ \tfrac{1}{2} P_4 & P_3 & P_2 - \lambda A & P_1 - \lambda C & P_0 + \lambda B & \lambda I_n \\ \hdashline A & P_2 - \lambda A & P_1 - \lambda P_2 & P_0 - \lambda P_1 & - \lambda P_0 & 0 \\ C & P_1 - \lambda C & P_0 - \lambda P_1 & - \lambda P_0 & 0 & 0 \\ -B & P_0 + \lambda B & - \lambda P_0 & 0 & 0 & 0 \\ \hline -I_n & \lambda I_n & 0 & 0 & 0 & 0 \end{array} \right]. \label{symmetrized_pencil} \end{equation} \end{remark} \begin{definition}[Block-symmetric Block Kronecker Ansatz Space] \label{def_spaceBG} \ \\ Let $P(\lambda)$ be an $n \times n$ matrix polynomial of degree $k=\epsilon + \eta + 1$ and assume $\eta \leqslant \epsilon$. Then we define $$ \mathbb{BG}_{\eta + 1}(P) = \big\lbrace \mathcal{L}(\lambda) \in \mathbb{DG}_{\eta + 1}(P) \; \big| \; \mathcal{L}(\lambda) = \mathcal{L}(\lambda)^{\mathcal{B}} \big\rbrace . $$ \end{definition} As Example \ref{ex_blockspace} immediately suggests, in general $\mathbb{DG}_{\eta + 1}(P) \neq \mathbb{BG}_{\eta + 1}(P)$ holds. In fact, $\mathbb{BG}_{\eta + 1}(P)$ is a proper subspace of $\mathbb{DG}_{\eta + 1}(P)$ for $\eta > 0$ (see Theorem \ref{thm_char_BG} below) and therefore a nowhere dense subset in $\mathbb{DG}_{\eta + 1}(P)$. \begin{remark} To find or construct block-symmetric pencils in $\mathbb{DG}_{\eta + 1}(P)$ several aspects have to be considered. As in the previous discussion, the matrix pencils will be partitioned into a $3\times 3$ block matrix as in Figure \ref{fig1}. First and foremost (\ref{symmetrized_pencil}) reveals, that we have to take care of the bordering blocks in order to enforce pencils $\mathcal{L}(\lambda)$ in $\mathbb{DG}_{\eta + 1}(P)$ on being block-symmetric. Secondly, the upper left square diagonal block certainly has to be block-symmetric as well. Thirdly, we do not have to take care of the core part of the pencil which is, for pencils in $\mathbb{DG}_{\eta + 1}(P)$, block-symmetric anyway. These conditions were taken into account in the following algorithm. \end{remark} \noindent \textbf{Algorithm 2: Construction Procedure for Block-symmetric Pencils} \\ \noindent Let $P(\lambda) = \sum_{i=0}^k P_i \lambda^i$ be an $n \times n$ matrix polynomial of degree $k= \epsilon + \eta + 1$. \\ \begin{itemize} \item[1.] Compute the matrix $$ \Sigma^{\mathbb{BG}}_{\eta,P}(\lambda) = \begin{bmatrix} \lambda P_k + P_{k-1} & & & \\ & \lambda P_{k-2} + P_{k-3} & & \\ & & \ddots & \\ & & & \lambda P_{\epsilon - \eta +1} + P_{\epsilon - \eta} \end{bmatrix} $$ and set $ \Pi_{\eta , P}^{\mathbb{BG}}(\lambda) = \begin{bmatrix} \Sigma_{\eta , P}^{\mathbb{BG}}(\lambda) & \mathcal{R}_{\eta,P} \end{bmatrix}.$ Note that $\Sigma_{\eta, P}^{\mathbb{BG}}(\lambda) \in \mathbb{R}[\lambda]^{(\eta + 1)n \times (\eta + 1)n}.$ (For the definition of $\mathcal{R}_{\eta , P}$ see (\ref{Rmatrix})). \item[2.] Compute the matrix $$ C_1 = \left[ \begin{array}{c|c} C_{11} & \alpha \mathcal{H}_{\epsilon - \eta}(P) \\[0.1cm] \hline C_{21} & 0_{\eta n \times (\epsilon - \eta)n} \end{array} \right] \in \mathbb{R}^{ \epsilon n \times \epsilon n} $$ with arbitrary matrices $C_{11} \in \mathbb{R}^{(\epsilon - \eta)n \times \eta n}$ and $C_{21} \in \mathbb{R}^{\eta n \times \eta n}$. \item[3.] Choose an arbitrary matrix $B_{11} \in \mathbb{R}^{(\eta + 1)n \times \eta n}$ and set \begin{equation} B_1= \begin{bmatrix} B_{11} & 0_{(\eta +1)n \times (\epsilon - \eta)n} \end{bmatrix} \qquad C_2 = C_{21}^{\mathcal{B}} \qquad B_2 = \begin{bmatrix} B_{11}^{\mathcal{B}} & C_{11}^{\mathcal{B}} \end{bmatrix}. \label{help1} \end{equation} \item[4.] Construct the $kn \times kn$ matrix pencil $\mathcal{L}(\lambda) \in \mathbb{DG}_{\eta + 1}(P)$: \begin{align} \mathcal{L}(\lambda) &= \left[ \begin{array}{c|c} I_{(\eta + 1)n} & B_1 \\ \hline 0 & C_1 \end{array} \right] \left[ \begin{array}{c|c} \alpha \Pi_{\eta, P}^{\mathbb{BG}}(\lambda) & L_{\eta}^T \otimes I_n \\ \hline L_{\epsilon} \otimes I_n & 0 \end{array} \right] \left[ \begin{array}{c|c} I_{(\epsilon +1)n} & 0 \\ \hline B_2 & C_2 \end{array} \right]. \label{block_constr} \end{align} \end{itemize} The matrix pencil $\mathcal{L}(\lambda)$ is explicitly given as \footnotesize{ $$ \mathcal{L}(\lambda) = \left[ \begin{array}{c|c|c} I_{(\eta + 1)n} & B_{11} & 0 \\[0.1cm] \hline 0 & C_{11} & \alpha \mathcal{H}_{\epsilon - \eta} \\ \hline 0 & C_{21} & 0 \end{array} \right] \left[ \begin{array}{c|c} \alpha \Pi_{\eta, P}^{\mathbb{BG}}(\lambda) & L_{\eta}^T \otimes I_n \\ \hline L_{\epsilon} \otimes I_n & 0 \end{array} \right] \left[ \begin{array}{c|c} I_{(\epsilon + 1)n} & 0 \\[0.1cm] \hline \begin{array}{c|c} B_{11}^{\mathcal{B}} & C_{11}^{\mathcal{B}} \end{array} & C_{21}^{\mathcal{B}} \end{array} \right]. $$ } \normalsize{Since $\Sigma_{\eta,P}^{\mathbb{BG}}(\lambda)$ is block-symmetric by construction, (\ref{help1}) ensures the block-symmetry of $\mathcal{L}(\lambda)$ in total. To this, remember that the core part of a pencil in $\mathbb{DG}_{\eta + 1}(P)$ is always block-symmetric. It is easily seen that the conditions (\ref{help1}) are not only sufficient, but also necessary for $\mathcal{L}(\lambda)$ in (\ref{block_constr}) to be block-symmetric (recall (\ref{symmetrized_pencil}) and (\ref{ex_DG2})).} \begin{theorem}[Characterization of $\mathbb{BG}_{\eta + 1}(P)$] \label{thm_char_BG} \ \\ Let $P(\lambda)$ be an $n \times n$ matrix polynomial of degree $k=\epsilon + \eta + 1$ and assume $\eta \leqslant \epsilon$. Then $\mathbb{BG}_{\eta + 1}(P)$ is a vector space over $\mathbb{R}$ having dimension $$ \textnormal{dim} \big( \mathbb{BG}_{\eta + 1}(P) \big) = k \eta n^2 + 1. $$ Any matrix pencil $\mathcal{L}(\lambda) \in \mathbb{BG}_{\eta + 1}(P)$ may be characterized as \begin{equation} \footnotesize{ \mathcal{L}(\lambda)= \left[ \begin{array}{c|c|c} I_{(\eta + 1)n} & B_{11} & 0 \\ \hline 0 & C_{11} & \alpha \mathcal{H}_{\epsilon - \eta} \\ \hline 0 & C_{21} & 0 \end{array} \right] \left[ \begin{array}{c|c} \alpha \Pi_{\eta , P}^{\mathbb{BG}}(\lambda) & L_{\eta}^T \otimes I_n \\ \hline L_{\epsilon} \otimes I_n & 0 \end{array} \right] \left[ \begin{array}{c|c} I_{(\epsilon + 1)n} & 0 \\[0.1cm] \hline \begin{array}{c|c} B_{11}^{\mathcal{B}} & C_{11}^{\mathcal{B}} \end{array} & C_{21}^{\mathcal{B}} \end{array} \right] } \label{blockansatz_expr1} \end{equation} \normalsize{with arbitrary matrices $B_{11} \in \mathbb{R}^{(\eta + 1)n \times \eta n}$, $C_{11} \in \mathbb{R}^{(\epsilon - \eta)n \times \eta n}, C_{21} \in \mathbb{R}^{\eta n \times \eta n}$ and $\alpha \in \mathbb{R}$. Moreover, unless $\eta =0$, $\mathbb{BG}_{\eta + 1}(P)$ is a proper subspace of both $\mathbb{DG}_{\eta + 1}(P)$ and $\mathbb{DG}_{k - \eta}(P)$. } \end{theorem} The next results about $\mathbb{BG}_{\eta + 1}(P)$ are immediate consequences of Theorem \ref{master4} and Corollary \ref{cor_inclusionDG}. \begin{corollary}[Linearization Condition for $\mathbb{BG}_{\eta + 1}(P)$] \label{master5} \ \\ Let $P(\lambda)$ be a square and regular matrix polynomial of degree $k= \eta + \epsilon + 1$. Let $\mathcal{L}(\lambda) \in \mathbb{BG}_{\eta + 1}(P)$ be given in the form (\ref{blockansatz_expr1}). Assume $\epsilon \neq \eta$. Then the following statements are equivalent: \begin{enumerate} \item $\mathcal{L}(\lambda)$ is a strong linearization for $P(\lambda)$. \item $P_0 \in \textnormal{GL}_n( \mathbb{R}), C_{21} \in \textnormal{GL}_{\eta n}(\mathbb{R})$ and $\alpha \in \mathbb{R} \setminus \lbrace 0 \rbrace$. \end{enumerate} \end{corollary} For $\epsilon = \eta$ the equivalence in Corollary \ref{master5} holds without the condition $P_0 \in \textnormal{GL}_n( \mathbb{R})$ in the second statement (due to the disappearance of the $\mathcal{H}$-block). In this case, the implication $2. \Rightarrow 1.$ holds also for singular matrix polynomials according to Theorem \ref{thm_lincondition}. Moreover, certainly Corollary \ref{generic1} still holds. That is, whenever zero is not an eigenvalue of $P(\lambda)$, i.e., $P_0 \in \textnormal{GL}_n( \mathbb{R})$, almost every matrix pencil in $\mathbb{BG}_{\eta + 1}(P)$ is a strong linearization for $P(\lambda)$ regardless whether $P(\lambda)$ is regular or singular. Moreover, the inclusion property from the previous section is still valid for block-symmetric pencils. \begin{lemma}[Inclusion Property for $\mathbb{BG}_{\eta + 1}(P)$ Spaces] \ \\ Let $P(\lambda)$ be an $n \times n$ matrix polynomial of degree $k= \epsilon + \eta + 1$. Then we have \begin{equation} \mathbb{BG}_1(P) \, \subsetneqq \, \mathbb{BG}_2(P) \, \subsetneqq \, \cdots \, \subsetneqq \, \mathbb{BG}_{\lceil \tfrac{k}{2} \rceil}(P). \label{sequenceBG} \end{equation} \end{lemma} To illustrate the construction procedure from Algorithm 2 consider the following simple example. \begin{example} \label{ex_blocksymmpencils} Let $P(\lambda) = \sum_{i=0}^7 P_i \lambda ^i$ be an $n \times n$ matrix polynomial of degree $\textnormal{deg}(P)=7$. First consider the case $\eta = 1$ and $\epsilon = k - \eta - 1 = 5$. The construction procedure easily gives $$ \mathcal{H}_4 = \begin{bmatrix}-P_3 & -P_2 & - P_1 & -P_0 \\ -P_2 & -P_1 & -P_0 & \\ -P_1 & -P_0 & & \\ -P_0 & & & \end{bmatrix} \in \mathbb{R}^{4 n \times 4n}$$ and $\Pi^{\mathbb{BG}}_{1, P}(\lambda) = \begin{bmatrix} \lambda P_7 + P_6 & 0_n & 0_n & 0_n & 0_n & 0_n \\ 0_n & \lambda P_5 + P_4 & P_3 & P_2 & P_1 & P_0 \end{bmatrix} $. Choose $B_{11} = 0$, $C_{11} = 0$ and $C_{21} = I_n$. Then computing $\mathcal{L}(\lambda)$ from (\ref{block_constr}) with $\alpha = 1$ yields $$ \small{ \mathcal{L}(\lambda) = \left[ \begin{array}{cc|cccc:c} \lambda P_7 + P_6 & 0 & 0 & 0 & 0 & 0 & -I_n \\ 0 & \lambda P_5 + P_4 & P_3 & P_2 & P_1 & P_0 & \lambda I_n \\ \hdashline 0 & P_3 & P_2 - \lambda P_3 & P_1 - \lambda P_2 & P_0 - \lambda P_1 & - \lambda P_0 & 0 \\ 0 & P_2 & P_1 - \lambda P_2 & P_0 - \lambda P_1 & - \lambda P_0 & 0 & 0\\ 0 & P_1 & P_0 - \lambda P_1 & - \lambda P_0 & 0 & 0 & 0 \\ 0 & P_0 & - \lambda P_0 & 0 & 0 & 0 & 0 \\ \hline -I_n & \lambda I_n & 0& 0 & 0 & 0 & 0 \end{array} \right]} $$ which is indeed a block-symmetric $7n \times 7n$ matrix pencil. Thus $\mathcal{L}(\lambda) \in \mathbb{BG}_2(P)$. Note that the choice of $B_{11}$ and $C_{11}$ has no influence on $\mathcal{L}(\lambda)$ for being a linearization. In fact, the nonsingularity of $P_0$ and $C_{21}$ is the decisive factor, while choosing $B_{11}$ and $C_{11}$ to be singular matrices does not affect the linearization property of $\mathcal{L}(\lambda)$ at all. Now consider $\eta = 2$ and $\epsilon = k - \eta - 1=4$. Then $$ \mathcal{H}_2 = \begin{bmatrix} - P_1 & -P_0 \\ -P_0 & 0 \end{bmatrix} \in \mathbb{R}^{2n \times 2n}.$$ Now choose $C_{11}= \begin{bmatrix} -P_7 & -P_6 \\ -P_5 & -P_4 \end{bmatrix}$ and $C_{21} = \begin{bmatrix} -P_3 & -P_2 \\ -P_1 & -P_0 \end{bmatrix}$. The computation in (\ref{block_constr}) gives \footnotesize{ $$ \mathcal{K}(\lambda) = \left[ \begin{array}{ccc|cc:cc} \lambda P_7 + P_6 & 0 & 0 & P_7 & P_5 & P_3 & P_1 \\ 0 & \lambda P_5 + P_4 & 0 & P_6 - \lambda P_7 & P_4 - \lambda P_5 & P_2 - \lambda P_3 & P_0 - \lambda P_1 \\ 0 & 0 & \lambda P_3 + P_2 & P_1 - \lambda P_6 & P_0 - \lambda P_4 & - \lambda P_2 & - \lambda P_0 \\ \hdashline P_7 & P_6 - \lambda P_7 & P_1 - \lambda P_6 & P_0 - \lambda P_1 & - \lambda P_0 & 0 & 0 \\ P_5 & P_4 - \lambda P_5 & P_0 - \lambda P_4 & - \lambda P_0 & 0 & 0 & 0 \\ \hline P_3 & P_2 - \lambda P_3 & - \lambda P_2 & 0 & 0 & 0 & 0 \\ P_1 & P_0 - \lambda P_1 & - \lambda P_0 & 0 & 0 & 0 & 0 \end{array} \right] $$ } \normalsize{which is block-symmetric. Therefore we have $\mathcal{K}(\lambda) \in \mathbb{BG}_3(P)$.} \end{example} \begin{remark} Consider $\mathcal{L}(\lambda)$ and $\mathcal{K}(\lambda)$ from the last example. $\mathcal{L}(\lambda)$ is a strong linearization for $P(\lambda)$ if and only if $\textnormal{det}(P_0) \neq 0,$ whereas $\mathcal{K}(\lambda)$ is a strong linearization for $P(\lambda)$ if and only if $\textnormal{det}(P_0), \textnormal{det}(P_1), \textnormal{det}(P_2) \neq 0$ (see Theorem \ref{thm_lincondition}). Neither the classical ansatz space approach (see \cite{MacMMM06}) nor the pure block Kronecker pencils from \cite{DopLPVD16} cover block-symmetric pencils like these. \end{remark} \section{Block Kronecker Ansatz Spaces and the Classical Ansatz Spaces} \label{sec:L1L2} As this was pointed out before, there is a strong connection between the classical ansatz spaces $\mathbb{L}_1(P), \mathbb{L}_2(P)$ and $\mathbb{DL}(P)$ and the block Kronecker ansatz spaces introduced in this paper. This section is devoted to the establishment of this connection. Let $P(\lambda)$ be an $n \times n$ matrix polynomial of degree $k$. For $\eta = 0$ the ansatz equation (\ref{ansatzequation1}) has the form $$ \mathcal{L}(\lambda) \big( \Lambda_{k-1}(\lambda) \otimes I_n) = \alpha e_1 \otimes P(\lambda)$$ which coincides with the ansatz equation for $\mathbb{L}_1(P)$ (see (3.4) in \cite{MacMMM06}) for the choice $v = \alpha e_1$. According to Theorem \ref{thm_generalspace} every matrix pencil $\mathcal{L}(\lambda)$ in $\mathbb{G}_1(P)$ may be expressed as $$ \mathcal{L}(\lambda) = \left[ \begin{array}{c|c} I_n & B_1 \\ \hline 0 & C_1 \end{array} \right] \left[ \begin{array}{c} \alpha \Sigma_{0,P}(\lambda) \\ \hline L_{k-1}(\lambda) \otimes I_n \end{array} \right] = \left[ \begin{array}{c|c} I_n & B_1 \\ \hline 0 & C_1 \end{array} \right] \mathcal{F}_{\alpha, 0, P}(\lambda). $$ Multiplying $\mathcal{L}(\lambda)$ from the left with $$ \mathcal{V}_{\textnormal{left}} = \left[ \begin{array}{c|c} v \otimes I_n & \begin{array}{c} 0_{n \times \epsilon n} \\ \hline I_{\epsilon n} \end{array} \end{array} \right] \in \mathbb{R}^{(\epsilon + 1)n \times (\epsilon + 1)n}$$ gives a pencil that satisfies $\mathcal{L}(\lambda) ( \Lambda_{k-1}(\lambda) \otimes I_n) = v \otimes P(\lambda)$ (due to the multiplication with $v \in \mathbb{R}^k$, the scalar $\alpha \in \mathbb{R}$ is ignored until further notice, i.e. we set $\alpha = 1$). On the other hand it is easily seen, that any matrix pencil of the form \begin{equation} \mathcal{L}(\lambda) = \left[ \begin{array}{c|c} v \otimes I_n & \begin{array}{c} B_1 \\ \hline C_1 \end{array} \end{array} \right] \mathcal{F}_{1,0,P}(\lambda) = \left[ \begin{array}{c|c} v \otimes I_n & \begin{array}{c} B_1 \\ \hline C_1 \end{array} \end{array} \right] \textnormal{Frob}_P(\lambda) \label{L1_compactform} \end{equation} satisfies $\mathcal{L}(\lambda)( \Lambda_{k-1}(\lambda) \otimes I_n) = v \otimes P(\lambda)$. Now, verifying that (\ref{L1_compactform}) is essentially just a reformulation of \cite[Thm. 3.5]{MacMMM06}, we have derived an equivalent, but alternative description of $\mathbb{L}_1(P)$. In the context of orthogonal bases, this result was already obtained in \cite{FassS16}. \begin{corollary}[Characterization of $\mathbb{L}_1(P)$] \label{cor_spaceL1} \ \\ Let $P(\lambda)$ be an $n \times n$ matrix polynomial of degree $k$. Then $\mathcal{L}(\lambda)$ satisfies the classical ansatz equation $\mathcal{L}(\lambda)( \Lambda_{k-1} \otimes I_n) = v \otimes P(\lambda)$ if and only if \begin{equation} \mathcal{L}(\lambda) = \big[ \, v \otimes I_n \; \mathcal{Z} \, \big] \textnormal{Frob}_P(\lambda) \qquad \mathcal{Z}= \left[ \begin{array}{c} B_1 \\ \hline C_1 \end{array} \right] \label{L1} \end{equation} for some arbitrary matrix $\mathcal{Z} \in \mathbb{R}^{kn \times (k-1)n}$. \end{corollary} The characterization in (\ref{L1}) together with Theorem \ref{thm_lincondition} yields a very simple linearization condition for pencils in $\mathbb{L}_1(P)$ for regular matrix polynomials $P(\lambda)$ that is equivalent to but different from the well known $Z$-rank condition (see \cite[Cor. 2]{FassS16}). \begin{corollary}\label{lemma_neu} A matrix pencil $\mathcal{L}(\lambda) \in \mathbb{L}_1(P)$ as in (\ref{L1}) is a strong linearization for a regular $P(\lambda) = \sum_{i=0}^k P_i\lambda^i \in \mathbb{R}[\lambda]^{n \times n}$ with $P_k \neq 0$ if and only if $ [ \, v \otimes I_n \; \mathcal{Z} \, ]$ is a nonsingular matrix, i.e. $\textnormal{rank}([ \, v \otimes I_n \; \mathcal{Z} \, ])=kn.$ \end{corollary} In this case, the eigenvectors of $\mathcal{L}(\lambda)$ are exactly the eigenvectors of $\textnormal{Frob}_P(\lambda)$ (see \cite[Thm. 3.8]{MacMMM06}). A similar characterization of $\mathbb{L}_2(P)$ can be derived in an analogous way \cite[Thm. 2]{FassS16}. Therefore, we obtain that $\mathbb{L}_2(P)$ consists of all matrix pencils $\mathcal{L}(\lambda)$ having the form \begin{equation} \mathcal{L}(\lambda) = \text{Frob}^{\mathcal{B}}_P(\lambda) \left[ \begin{array}{c} v^T \otimes I_n \\ \hline \mathcal{Z} \end{array} \right] \label{L2} \end{equation} for some arbitrary matrix $\mathcal{Z} = [ \, B_1 \; | \; C_1 \, ] \in \mathbb{R}^{(k-1)n \times kn}$. These matrix pencils satisfy the (second) classical ansatz equation $(\Lambda_{k-1}^T \otimes I_n) \mathcal{L}(\lambda) = v^T \otimes P(\lambda)$. Similar as before, (\ref{L2}) can be seen as a reformulation of \cite[Lemma 3.11]{MacMMM06} and we obtain statements analogous to Corollaries \ref{cor_spaceL1} and \ref{lemma_neu}. The ansatz space $\mathbb{DL}(P)$ was introduced in \cite{MacMMM06} as the intersection of $\mathbb{L}_1(P)$ and $\mathbb{L}_2(P)$. As the final result of this section we state the following lemma that connects the three kinds of ansatz spaces introduced in this paper and the $\mathbb{DL}(P)$ space. \begin{lemma} Let $P(\lambda)$ be an $n \times n$ matrix polynomial of degree $k \geq 2$. Then $$ \bigcap_{\eta = 0}^{k-1} \mathbb{G}_{\eta + 1}(P) = \bigcap_{\eta = 0}^{k-1} \mathbb{DG}_{\eta + 1}(P) = \bigcap_{\eta = 0}^{k-1} \mathbb{BG}_{\eta + 1}(P) = \mathbb{DL}(P)|_{\langle e_1 \rangle}.$$ Here $\langle e_1 \rangle$ denotes the one-dimensional subspace of $\mathbb{R}^k$ spanned by $e_1$. \end{lemma} \begin{proof} Since $\mathbb{G}_1(P) \cap \mathbb{G}_k(P) = \mathbb{DL}(P)|_{\langle e_1 \rangle}$ the lemma follows from the observations in (\ref{sequenceDG}) and (\ref{sequenceBG}). \end{proof} Corollary \ref{cor_spaceL1} has particularly nice consequences for the ansatz spaces $\mathbb{L}_1(P),$ $\mathbb{L}_2(P)$ and $\mathbb{DL}(P)$. In fact, many well-known results on $\mathbb{L}_1(P)$ admit easily accessible proofs considering the form (\ref{L1}) instead of \cite[Thm. 3.5]{MacMMM06} (see \cite{FassS16}). In the next section we show that the standard basis of $\mathbb{DL}(P)$, i.e. the rectangular matrices $\mathcal{Z}_i$ corresponding to the basis pencils $$\mathcal{B}_i(\lambda) = \big[ \, e_i \otimes I_n \; \mathcal{Z}_i \, \big] \textnormal{Frob}_P(\lambda) \in \mathbb{DL}(P) \quad i=1, \ldots , k$$ can in fact be immediately determined from a tableau containing the matrix coefficients of $P(\lambda)$ without any computation at all. \subsection{Application: Computing the Standard Basis of $\mathbb{DL}(P)$} Consider the double ansatz space $\mathbb{DL}(P) = \mathbb{L}_1(P) \cap \mathbb{L}_2(P)$ (\ref{DLP}). Any matrix pencil $\mathcal{L}(\lambda) \in \mathbb{DL}(P)$ is blocksymmetric \cite[Theorem 3.4]{HigMMT06}. In \cite[Section 3.3]{HigMMT06} it is discussed how to compute the \enquote{standard basis pencils} in $\mathbb{DL}(P)$ corresponding to the standard basis $\{e_1, \ldots, e_k\} \in \mathbb{R}^k.$ Certainly, computing the standard basis for $\mathbb{DL}(P)$, see \cite[Sec. 3.3]{HigMMT06}, for $\mathbb{DL}(P)$ from \cite[Theorem 3.5]{HigMMT06} seems not to be a complicated task. However, regarding the expression (\ref{L1}) for matrix pencils in $\mathbb{L}_1(P)$, computing a particular blocksymmetric pencil $\mathcal{L}(\lambda) \in \mathbb{L}_1(P)$ for some given ansatz vector $v \in \mathbb{R}^k$ breaks down to the computation of the corresponding matrix $\mathcal{Z} \in \mathbb{R}^{kn \times (k-1)n}$. Thus, computing $\mathcal{Z}_j$ for $\mathcal{B}_j := [ \, (e_j \otimes I_n) \; \mathcal{Z}_j \, ] \textnormal{Frob}_P(\lambda) \in \mathbb{DL}(P)$ seems even simpler and does only require the computation of one $kn \times (k-1)n$ matrix instead of the set-up of two $kn \times kn$ matrices. In fact in was shown in \cite{FassS16} that $\mathcal{Z}$ has some blocksymmetric structure, too. To this end, let $P(\lambda)=\sum_{i=0}^k P_i \lambda^k$ be a square matrix polynomial of degree $k$. Using the matrix coefficients of $P(\lambda)$ we define the $\mathcal{P}$-Tableau corresponding to $P(\lambda)$ as in Figure \ref{P-tableau}. \begin{figure} \caption{$\mathcal{P} \label{P-tableau} \end{figure} Now the matrices $\mathcal{Z}_i$ that correspond to a blocksymmetric matrix pencil $\mathcal{L}(\lambda) = \mathcal{B}_j(\lambda) \in \mathbb{L}_1(P)$ having the form (\ref{L1}) with ansatz vector $e_j$ can easily be determined by the tableau. Therefore, regard the tableau as a $k \times 2(k-1)$ matrix and denote the left half by $\mathcal{J}_P$ and the right half by $\mathcal{H}_P$. \begin{lemma} \label{lem_standardbasis} Let $P(\lambda) = \sum_{i=0}^k P_i \lambda^k$ be a square matrix polynomial of degree $k$ and $\mathcal{L}(\lambda) \in \mathbb{L}_1(P)$ with ansatz vector $v=e_i$. Then $\mathcal{L}(\lambda) = [\, (e_i \otimes I_n) \; \mathcal{Z}_i \, ] \textnormal{Frob}_P(\lambda) \in \mathbb{L}_2(P)$ if and only if \begin{equation} \mathcal{Z}_i = \left\{ \begin{array}{ll} \mathcal{H}_P & i = 1\\ \mathcal{J}_P(1:i,k-i:k-1) \oplus \mathcal{H}(i+1:k,1:i+1) & 1 < i < k\\ \mathcal{J}_P & i = k \end{array} \right. \label{formula_blocksymm} \end{equation} \end{lemma} \begin{proof} First observe that any matrix pencil $\mathcal{L}(\lambda) \in \mathbb{L}_1(P)$ may be expressed as \begin{align} \mathcal{L}(\lambda) &= \big[ \, (v \otimes I_n) \; \mathcal{Z} \, \big] \textnormal{Frob}_P(\lambda) \notag \\ &= v \otimes \Sigma_{0,P}(\lambda) + \mathcal{Z}L_{k-1}(\lambda) \notag \\ &= \big[ \, (v \otimes P_k) \; \mathcal{Z} \, \big] \lambda + \big( v \otimes \Sigma_{0,P}(0) + \mathcal{Z} L_{k-1}(0) \big) \label{pencil_L1L0} \end{align} Now notice that (\ref{pencil_L1L0}) expresses $\mathcal{L}(\lambda)$ in the form $\mathcal{L}(\lambda) = \mathcal{L}_1 \lambda + \mathcal{L}_0$ with two $kn \times kn$ matrices $\mathcal{L}_1$ and $\mathcal{L}_0$. This form was mainly considered in \cite{HigMMT06}. Comparing $X_m$ from \cite[(3.8a)]{HigMMT06} with $\mathcal{Z}_m$ as defined in Lemma \ref{lem_standardbasis} and considering \cite[Thm. 3.5]{HigMMT06} shows the statement. \end{proof} To illustrate Lemma \ref{lem_standardbasis} consider the following examples. Deviating from our notation, the polynomial coefficients in the example below are denoted $A,B, C, \ldots$ to highlight the similarity to \cite[Table 1/2]{MacMMM06} and \cite[Table 3.1/3.2]{HigMMT06}. \begin{example} Let $P(\lambda)= A \lambda^2 + B \lambda + C$ be an $n \times n$ matrix polynomial of degree $\textnormal{deg}(P(\lambda))=2$. Then the matrix $\mathcal{Z}$ in (\ref{L1}) has dimension $2n \times n$. Therefore, the $\mathcal{P}$-tableau has dimension $2n \times 2n$ and is easily computed as \begin{center} \begin{tikzpicture}[scale=0.8] \draw[help lines] (0,0) grid (2,2); \node at (0.5,0.5) {\footnotesize{$B$}}; \node at (1.5,0.5) {\footnotesize{$-C$}}; \node at (0.5,1.5) {\footnotesize{$A$}}; \node at (1.5,1.5) {\footnotesize{$0$}}; \draw[line width=0.4mm] (1,0) -- (1,2); \end{tikzpicture} \end{center} and we have $\mathcal{Z}_1 = \mathcal{H}_P$ and $\mathcal{Z}_2 = \mathcal{J}_P$. Now let $P(\lambda) = A \lambda^3 + B \lambda ^2 + C \lambda + D$ be of degree $\textnormal{deg}(P(\lambda))=3$. Then the matrix $\mathcal{Z}$ in (\ref{L1}) has dimension $3n \times 2n$ and the $\mathcal{P}$-tableau dimension $3n \times 4n$. It is given by \begin{center} \begin{tikzpicture} \draw[help lines] (0,0) grid (4,3); \node at (0.5, 0.5) {\footnotesize{$B$}}; \node at (1.5, 0.5) {\footnotesize{$C$}}; \node at (2.5, 0.5) {\footnotesize{$-D$}}; \node at (3.5, 0.5) {\footnotesize{$0$}}; \node at (0.5, 1.5) {\footnotesize{$A$}}; \node at (1.5, 1.5) {\footnotesize{$B$}}; \node at (2.5, 1.5) {\footnotesize{$-C$}}; \node at (3.5, 1.5) {\footnotesize{$-D$}}; \node at (0.5, 2.5) {\footnotesize{$0$}}; \node at (1.5, 2.5) {\footnotesize{$A$}}; \node at (2.5, 2.5) {\footnotesize{$0$}}; \node at (3.5, 2.5) {\footnotesize{$0$}}; \draw[line width=0.4mm] (2,0) -- (2,3); \end{tikzpicture} \end{center} The three structures according to formula (\ref{formula_blocksymm}) are \begin{center} \begin{tikzpicture}[scale=0.8] \draw[help lines] (0,0) grid (4,3); \node at (0.5, 0.5) {\footnotesize{$B$}}; \node at (1.5, 0.5) {\footnotesize{$C$}}; \node at (2.5, 0.5) {\footnotesize{$-D$}}; \node at (3.5, 0.5) {\footnotesize{$0$}}; \node at (0.5, 1.5) {\footnotesize{$A$}}; \node at (1.5, 1.5) {\footnotesize{$B$}}; \node at (2.5, 1.5) {\footnotesize{$-C$}}; \node at (3.5, 1.5) {\footnotesize{$-D$}}; \node at (0.5, 2.5) {\footnotesize{$0$}}; \node at (1.5, 2.5) {\footnotesize{$A$}}; \node at (2.5, 2.5) {\footnotesize{$0$}}; \node at (3.5, 2.5) {\footnotesize{$0$}}; \draw[color=black, line width=0.4mm] (2,0) rectangle (4,3); \node[fill=white,scale=0.5] at (2,3) {\Huge{$\oplus$}}; \end{tikzpicture} \quad \begin{tikzpicture}[scale=0.8] \draw[help lines] (0,0) grid (4,3); \node at (0.5, 0.5) {\footnotesize{$B$}}; \node at (1.5, 0.5) {\footnotesize{$C$}}; \node at (2.5, 0.5) {\footnotesize{$-D$}}; \node at (3.5, 0.5) {\footnotesize{$0$}}; \node at (0.5, 1.5) {\footnotesize{$A$}}; \node at (1.5, 1.5) {\footnotesize{$B$}}; \node at (2.5, 1.5) {\footnotesize{$-C$}}; \node at (3.5, 1.5) {\footnotesize{$-D$}}; \node at (0.5, 2.5) {\footnotesize{$0$}}; \node at (1.5, 2.5) {\footnotesize{$A$}}; \node at (2.5, 2.5) {\footnotesize{$0$}}; \node at (3.5, 2.5) {\footnotesize{$0$}}; \draw[color=black, line width=0.4mm] (2,0) -- ++(1,0) -- ++(0,1) -- ++(-1,0) -- ++(0,2) -- ++(-1,0) -- ++(0,-2) -- ++(1,0) -- ++(0,-1); \node[fill=white,scale=0.5] at (2,1) {\Huge{$\oplus$}}; \end{tikzpicture} \quad \begin{tikzpicture}[scale=0.8] \draw[help lines] (0,0) grid (4,3); \node at (0.5, 0.5) {\footnotesize{$B$}}; \node at (1.5, 0.5) {\footnotesize{$C$}}; \node at (2.5, 0.5) {\footnotesize{$-D$}}; \node at (3.5, 0.5) {\footnotesize{$0$}}; \node at (0.5, 1.5) {\footnotesize{$A$}}; \node at (1.5, 1.5) {\footnotesize{$B$}}; \node at (2.5, 1.5) {\footnotesize{$-C$}}; \node at (3.5, 1.5) {\footnotesize{$-D$}}; \node at (0.5, 2.5) {\footnotesize{$0$}}; \node at (1.5, 2.5) {\footnotesize{$A$}}; \node at (2.5, 2.5) {\footnotesize{$0$}}; \node at (3.5, 2.5) {\footnotesize{$0$}}; \draw[color=black, line width=0.4mm] (0,0) rectangle (2,3); \node[fill=white,scale=0.5] at (2,0) {\Huge{$\oplus$}}; \end{tikzpicture} \end{center} for $e_1$, $e_2$ and $e_3 \in \mathbb{R}^{3}$ respectively. Therefore, any matrix pencil $\mathcal{L}(\lambda)$ in $\mathbb{DL}(P)$ with ansatz vector $v \in \mathbb{R}^3$ can be expressed as $ \mathcal{L}(\lambda) = [ \, (v \otimes I_n) \; \mathcal{Z} \, ] \textnormal{Frob}_P(\lambda)$ with $$ \mathcal{Z} = v_1 \begin{bmatrix} 0 & 0 \\ -C & -D \\ -D & 0 \end{bmatrix} + v_2 \begin{bmatrix} A & 0 \\ B & 0 \\ 0 & -D \end{bmatrix} + v_3 \begin{bmatrix} 0 & A \\ A & B \\ B & C \end{bmatrix}. $$ \end{example} \section{Conclusions} \label{sec:conclusions} In this paper, we introduced a family of equations for matrix pencils that turn out to be a new source of linearizations for square and rectangular matrix polynomials $P(\lambda)$. We showed that these equations define vector spaces $\mathbb{G}_{\eta + 1}(P)$ of matrix pencils in which almost every pencil is a strong linearization regardless whether $P(\lambda)$ is regular or singular. These spaces were named \enquote{block Kronecker ansatz spaces} since they contain the entire family of block Kronecker pencils as introduced in \cite{DopLPVD16} and share important properties with the \enquote{ansatz spaces} from \cite{MacMMM06}. We showed that the intersection of two block Kronecker ansatz spaces $\mathbb{DG}_{\eta + 1}(P) = \mathbb{G}_{\eta + 1}(P) \cap \mathbb{G}_{k - \eta}(P)$ is never empty and contains a proper subspace $\mathbb{BG}_{\eta + 1}(P)$ of block-symmetric matrix pencils. Still almost every pencil is a strong linearization in either $\mathbb{DG}_{\eta + 1}(P)$ and $\mathbb{BG}_{\eta + 1}(P)$ given the case that zero is not an eigenvalue of $P(\lambda)$. Moreover, we presented a simple approach to the construction of matrix pencils in $\mathbb{DG}_{\eta + 1}(P)$ and $\mathbb{BG}_{\eta + 1}(P)$ and showed that these spaces form nested sequences of vector spaces for increasing choices of $\eta$. Block Kronecker ansatz equations may be defined for other polynomial bases as well (see, e.g., \cite{LawP16} for a clever generalization of block Kronecker pencils for the Chebyshevbasis). Moreover, as we pointed out in Remark \ref{rem1}, the conceptual ideas presented here may even be formulated in the abstract framework of dual bases (i.e. \enquote{strong block minimal bases pencils}, see \cite{DopLPVD16} for more information). A deeper study in this direction is, at least to the authors opinion, likely to give attractive novel results on how Fiedler pencils, block Kronecker pencils and ansatz spaces interact. \end{document}
{\beta}egin{document} \title{f Finite groups determined\ by an inequality of the orders\ of their elements} {\beta}egin{abstract} In this note we introduce and characterize a class of finite groups for which the element orders satisfy a certain inequality. This is contained in some well-known classes of finite groups. \end{abstract} \noindent oindent{{\beta}f MSC (2010):} Primary 20D10, 20D20; Secondary 20D15, 20D25, 20E34. \noindent oindent{{\beta}f Key words:} finite groups, element orders, CP-groups, Frobenius groups. {\sigma}ection{Introduction} Let ${\rm CP}_1$, CP and CN be the classes of finite groups in which the centralizers of all nontrivial elements contain only elements of prime order, of prime power order and are nilpotent, respectively. Clearly, we have ${\rm CP}_1 {\sigma}ubset {\rm CP} {\sigma}ubset {\rm CN}$. Moreover, the classes ${\rm CP}_1$ and CP consist of exactly those finite groups all of whose elements have prime order and prime power order, respectively. They have been studied in many papers, as \cite{1}-\cite{3}, \cite{5}-\cite{7} and \cite{13}. In the following we consider the finite groups $G$ such that $$o(xy) \leq {\rm max}\{o(x), o(y)\}, \mbox{ for all } x, y \in G. \leqno(*)$$These form another interesting subclass of CP, that will be denoted by ${\rm CP}_2$. Its exhaustive description is the main goal of this note. Most of our notation is standard and will not be repeated here. Basic notions and results on group theory can be found in \cite{4,8,9,11}. First of all, we observe that if a finite group $G$ belongs to ${\rm CP}_2$, then for every $x, y \in G$ satisfying $o(x) \noindent eq o(y)$ we have $$o(xy)={\rm max}\{o(x), o(y)\},$$that is the order map is very close to a monoid homomorphism from $(G, \cdot)$ to $(\mathbb{N}^*, {\rm max})$. {\beta}igskip An immediate characterization of finite groups contained in ${\rm CP}_2$ is indicated in the following theorem. {\beta}igskip\noindent oindent{{\beta}f Theorem A.} {\it Let $G$ be a finite group and set $\pi_e(G)=\{o(x) \mid x \in G \}$. Then the following conditions are equivalent: {\beta}egin{itemize} \item[\rm a)] $G$ belongs to ${\rm CP}_2$. \item[\rm b)] For every ${\alpha}lpha \in \pi_e(G)$, the set $G_{{\alpha}lpha}=\{x \in G \mid o(x) \leq {\alpha}lpha\}$ is a normal subgroup of $G$. \end{itemize}} Next, we will focus on establishing some connections between ${\rm CP}_2$ and the previous classes CP and ${\rm CP}_1$. {\beta}igskip\noindent oindent{{\beta}f Proposition B.} {\it The class ${\rm CP}_2$ is properly contained in the class ${\rm CP}$.} {\beta}igskip On the other hand, by taking ${\sigma}igma = (12)(34), \tau = (235) \in A_5$, one obtains $$5=o({\sigma}igma\tau) > 3 = {\rm max}\{o({\sigma}igma), o(\tau)\},$$and therefore ${\rm CP}_2$ does not contain the alternating group $A_5$. Since $A_5$ belongs to ${\rm CP}_1$, we conclude that ${\rm CP}_1$ is not contained in ${\rm CP}_2$. It is obvious that the converse inclusion also fails (for example, any abelian $p$-group belongs to ${\rm CP}_2$, but not to ${\rm CP}_1$). {\beta}igskip\noindent oindent{{\beta}f Remarks.} {\beta}egin{itemize} \item[\rm 1.] Other two remarkable classes of finite $p$-groups, more large as the class of abelian $p$-groups, are contained in ${\rm CP}_2$: regular $p$-groups (see Theorem 3.14 of \cite{11}, II, page 47) and $p$-groups whose subgroup lattices are modular (see Lemma 2.3.5 of \cite{10}). Moreover, by the main theorem of \cite{12}, we infer that the powerful $p$-groups for $p$ odd also belong to ${\rm CP}_2$. \item[\rm 2.] The smallest nonabelian $p$-group contained in ${\rm CP}_2$ is the quaternion group $Q_8$, while the smallest $p$-group not contained in ${\rm CP}_2$ is the dihedral group $D_8$. Notice that all quaternion groups $Q_{2^n}$, for $n \geq 4$, as well as all dihedral groups $D_n$, for $n \noindent eq 1, 2, 4$, are not contained in ${\rm CP}_2$. \item[\rm 3.] The class ${\rm CP}_2$ contains finite groups which are not $p$-groups, too. The smallest example of such a group is $A_4$. Remark that the groups $A_n$, $n \geq 5$, does not belong to ${\rm CP}_2$, and this is also valid for the symmetric groups $S_n$, $n \geq 3$. \end{itemize} Clearly, ${\rm CP}_2$ is closed under subgroups. On the other hand, the above results imply that ${\rm CP}_2$ is not closed under direct products or extensions. The same thing can be said with respect to homomorphic images, as shows the following example. {\beta}igskip\noindent oindent{{\beta}f Example.} Let $p$ be a prime and $G$ be the semidirect product of an elementary abelian $p$-group $A$ of order $p^p$ by a cyclic group of order $p^2$, generated by an element $x$ which permutes the elements of a basis of $A$ cyclically. Then it is easy to see that $G$ belongs to ${\rm CP}_2$, $x^p\in Z(G)$ and the quotient $Q=\frac{G}{\langle x^p \rangle}$ is isomorphic to a Sylow $p$-subgroup of $S_{p^2}$. Obviously, in $Q$ a product of two elements of order $p$ can have order $p^2$, and hence it does not belong to ${\rm CP}_2$. {\beta}igskip The next result collects other basic properties of the finite groups contained in ${\rm CP}_2$. {\beta}igskip\noindent oindent{{\beta}f Proposition C.} {\it Let $G$ be a finite group contained in ${\rm CP}_2$. Then: {\beta}egin{itemize} \item[\rm a)] There is a prime $p$ dividing the order of $G$ such that $F(G)=O_p(G)$. \item[\rm b)] Both $Z(G)$ and $\Phi(G)$ are $p$-groups. \item[\rm c)] $Z(G)$ is trivial if $G$ is not a $p$-group. \end{itemize}} We are now able to present our main result, that gives a complete des\-crip\-tion of the class ${\rm CP}_2$. {\beta}igskip\noindent oindent{{\beta}f Theorem D.} {\it A finite group $G$ is contained in ${\rm CP}_2$ if and only if one of the following statements holds: {\beta}egin{itemize} \item[\rm a)] $G$ is a $p$-group and $\Omega_n(G) = \{x \in G \mid x^{p^{n}}=1\}$, for all $n \in \mathbb{N}$. \item[\rm b)] $G$ is a Frobenius group of order $p^{{\alpha}lpha}q^{{\beta}eta}$, $p < q$, with kernel $F(G)$ of order $p^{{\alpha}lpha}$ and cyclic complement. \end{itemize}} Since all $p$-group and all groups of order $p^{{\alpha}lpha}q^{{\beta}eta}$ are solvable, Theorem D leads to the following corollary. {\beta}igskip\noindent oindent{{\beta}f Corollary E.} {\it The class ${\rm CP}_2$ is properly contained in the class of finite sol\-va\-ble groups.} {\beta}igskip\noindent oindent{{\beta}f Remark.} The finite supersolvable groups and the CLT-groups constitute two important subclasses of the finite solvable groups. Since $A_4$ belongs to ${\rm CP}_2$, we infer that ${\rm CP}_2$ is not included in these classes. Conversely, a finite supersolvable group or a CLT-group does not necessarily possess the structure described above, and thus they are not necessarily contained in the class ${\rm CP}_2$. {\beta}igskip As we already have seen, both ${\rm CP}_1$ and ${\rm CP}_2$ are subclasses of CP, and each of them is not contained in the other. Consequently, an interesting problem is to find the intersection of these subclasses. This can be made by using again Theorem D. {\beta}igskip\noindent oindent{{\beta}f Corollary F.} {\it A finite group $G$ is contained in the intersection of \,${\rm CP}_1$ and ${\rm CP}_2$ if and only if one of the following statements holds: {\beta}egin{itemize} \item[\rm a)] $G$ is a $p$-group of exponent $p$. \item[\rm b)] $G$ is a Frobenius group of order $p^{{\alpha}lpha}q$, $p < q$, with kernel $F(G)$ of order $p^{{\alpha}lpha}$ and exponent $p$, and cyclic complement. Moreover, in this case we have $G'=F(G)$. \end{itemize}} \noindent oindent{{\beta}f Remark.} $A_4$ is an example of a group of type b) in the above corollary. Mention that for such a group $G$ the number of Sylow $q$-subgroups is $p^{{\alpha}lpha}$. It is also clear that $G$ possesses a nontrivial partition consisting of Sylow subgroups: $F(G)$ and all conjugates of a Frobenius complement. {\beta}igskip Finally, we indicate a natural problem concerning the class of finite groups introduced in our paper. {\beta}igskip\noindent oindent{{\beta}f Open problem.} Give a precise description of the structure of finite $p$-groups contained in ${\rm CP}_2$. {\sigma}ection{Proofs of the main results} {\beta}igskip\noindent oindent{{\beta}f Proof of Theorem A.} Assume first that $G$ belongs to ${\rm CP}_2$. Let ${\alpha}lpha \in \pi_e(G)$ and $x, y \in G_{{\alpha}lpha}$. Then, by $(*)$, we have $$o(xy) \leq {\rm max}\{o(x), o(y)\} \leq {\alpha}lpha,$$which shows that $xy \in G_{{\alpha}lpha}$. This proves that $G_{{\alpha}lpha}$ is a subgroup of $G$. Moreover, $G_{{\alpha}lpha}$ is normal in $G$ because the order map is constant on each conjugacy class. Conversely, let $x, y \in G$ and put ${\alpha}lpha = o(x), {\beta}eta = o(y)$. By supposing that ${\alpha}lpha \leq {\beta}eta$, one obtains $x, y \in G_{{\beta}eta}$. Since $G_{{\beta}eta}$ is a subgroup of $G$, it follows that $xy \in G_{{\beta}eta}$. Therefore $$o(xy) \leq {\beta}eta = {\rm max}\{o(x), o(y)\},$$completing the proof. \rule{1,5mm}{1,5mm} {\beta}igskip\noindent oindent{{\beta}f Proof of Proposition B.} Let $G$ be finite group in ${\rm CP}_2$ and take $x\in G$. It is well-known that $x$ can be written as a product of (commuting) elements of prime power orders, say $x=x_1x_2\cdots x_k$. Then the condition $(*)$ implies that $$\prod_{i=1}^k o(x_i)=o(x)\leq {\rm max}\{o(x_i)\mid \,i=\overline{1,k}\,\},$$and so $k=1$. Hence $x$ is of prime power order, i.e. $G$ is contained in CP. Obviously, the inclusion of ${\rm CP}_2$ in CP is strict (we already have seen that $A_5$ belongs to CP, but not to ${\rm CP}_2$). \rule{1,5mm}{1,5mm} {\beta}igskip\noindent oindent{{\beta}f Proof of Proposition C.} {\beta}egin{itemize} \item[\rm a)] We know that $F(G)$ is the product of the subgroups $O_p(G)$, where $p$ runs over the prime divisors of $\mid G \mid$. Suppose that there are two distinct primes $p$ and $q$ dividing the order of $F(G)$. This leads to the existence of two elements $x$ and $y$ of $F(G)$ such that $o(x) = p$ and $o(y) = q$. Since $F(G)$ is nilpotent, we obtain $xy = yx$ and so $o(xy) = pq$, a contradiction. Thus $F(G) = O_p(G)$, for a prime divisor $p$ of $\mid G \mid$. \item[\rm b)] It is well-known that both $Z(G)$ and $\Phi(G)$ are normal nilpotent subgroup of $G$. By the maximality of $F(G)$, it follows that $Z(G)$ and $\Phi(G)$ are contained in $F(G)$, and therefore they are also $p$-groups. \item[\rm c)] Assume that $Z(G)$ is not trivial and take $x\in Z(G)$ with $o(x)=p$. If $G$ is not a $p$-group, it contains an element $y$ of prime order $q\noindent eq p$. Then $o(xy)=pq$, contradicting Proposition B. \rule{1,5mm}{1,5mm} \end{itemize} {\beta}igskip\noindent oindent{{\beta}f Proof of Theorem D.} If $G$ is a $p$-group, then the conclusion is obvious.\ Assume now that $G$ is not a $p$-group. We will proceed by induction on $|G|$. Since $G$ belongs to ${\rm CP}_2$, all the numbers in $\pi_e(G)$ are prime powers. Let $q^n$ be the largest number of $\pi_e(G)$, where $q$ is a prime, and let $N=\{g\in G\mid o(g)<q^n\}$. Then $N\unlhd G$ and ${\rm exp}(G/N)=q$. Since $|N|<|G|$, by the inductive hypothesis it follows that either $N$ is a $p$-group or $N$ is a Frobenius group with kernel $K$ of order $p^{{\alpha}lpha}$ and cyclic complement $H$ of order $r^{{\beta}eta}$, where $p, r$ are distinct primes. We will prove that in both cases $G$ is a Frobenius group whose kernel and complement are $p$-groups. {\beta}igskip\hspace{10mm}{{\beta}f Case 1.} $N$ is a $p$-group. \noindent oindent Since $G$ is not a $p$-group, we can take $Q\in Syl_q(G)$, where $p\noindent eq q$. So $G=N\rtimes Q$. Since every element of $N$ is of prime power order, we have $C_N(h)=1$ for all $1\noindent eq h\in Q$. Thus, $G$ is a Frobenius group with kernel $N$ and complement $Q$. {\beta}igskip\hspace{10mm}{{\beta}f Case 2.} $N$ is a Frobenius group. {\beta}igskip\hspace{15mm}{{\beta}f Subcase 2.1.} $q\noindent eq p$ and $q\noindent eq r$. \noindent oindent By a similar argument as that of Case 1, we know that $G$ is a Frobenius group with kernel $N$. But $N$ is not nilpotent, a contradiction. {\beta}igskip\hspace{15mm}{{\beta}f Subcase 2.2.} $q=r$. \noindent oindent Let $Q\in Syl_q(G)$. Then $G=K\rtimes Q$. By a similar argument as that of Case 1, we know that $G$ is a Frobenius group with kernel $K$ and complement $Q$. {\beta}igskip\hspace{15mm}{{\beta}f Subcase 2.3.} $q=p$. \noindent oindent We observe that all elements of $G{\sigma}etminus N$ are of order $q^n$, and $g^q\in K$, where $g\in G{\sigma}etminus N$ and $K\in Syl_q(N)$. So if $N_G(H)\cap(G{\sigma}etminus N)\noindent eq 1$, then $N_G(H)\cap K\noindent eq 1$. But $N$ is a Frobenius group and $N_N(H)=H$. It follows that $N_G(H)=H$. Since $H$ is cyclic, $N_G(H)=C_G(H)$. One obtains that $G$ is $r$-nilpotent and thus $G=P\rtimes H$, where $P\in Syl_p(G)$. A similar argument as that of Case 1 shows that $G$ is again a Frobenius group with kernel $P$ and complement $H$. Finally, we prove that $H$ is cyclic. By Burnside's Theorem we only need to prove that $H$ is not a 2-group. If not, let $L=\{g\in G\mid o(g)=2\}$. Then $L\unlhd G$. It follows that $K\times L\leq G$, where $K$ is the Frobenius kernel. This contradicts the fact that all elements of $G$ are of prime power order. \rule{1,5mm}{1,5mm} {\beta}igskip\noindent oindent{{\beta}f Proof of Corollary F.} The equivalence follows directly by Theorem D. In this way, we have to prove only that $G'=F(G)$ in the case b). Obviously, $G' {\sigma}ubseteq F(G)$. For the converse inclusion, let $x \in F(G)$ be a nontrivial element. Then $o(x) = p$. If $y$ is an arbitrary element of order $q$ in $G$, then we have $$o(xy) \leq {\rm max}\{o(x), o(y)\} = q,$$and therefore $o(xy) \in \{p, q\}$. If we assume that $o(xy) = p$, it results $$q = o(y) = o(x^{-1}xy) \leq {\rm max} \{o(x^{-1}), o(xy)\} = p,$$a contradiction. This shows that $o(xy) = q$. Then there is $z \in G$ such that $xy \in \langle y\rangle^z$, say $xy = z^{-1}y^kz$ with $k \in \mathbb{Z}$. Since the element $$xy^{1-k}=z^{-1}y^kzy^{-k}=[z,y^k]$$has order $p$, we infer that $k$ must be equal to 1. Hence $$x = [z,y] \in G',$$which completes the proof. \rule{1,5mm}{1,5mm} {\beta}igskip {\beta}igskip {{\beta}f Acknowledgements.} The author is grateful to the reviewer for its remarks which improve the previous version of the paper. {\beta}egin{thebibliography}{10} {\beta}ibitem{1} Bannuscher, W., Tiedt, G., {\it On a theorem of Deaconescu}, Rostock. Math. Kolloq. {{\beta}f 47} (1994), 23-27. {\beta}ibitem{2} Brandl, R., {\it Finite groups all of whose elements are of prime power order}, Boll. Un. Mat. Ital. A {{\beta}f 18} (1981), 491-493. {\beta}ibitem{3} Deaconescu, M., {\it Classification of finite groups with all elements of prime order}, Proc. Amer. Math. Soc. {{\beta}f 106} (1989), 625-629. {\beta}ibitem{4} Gorenstein, D., {\it Finite simple groups}, Plenum Press, New York-London, 1982. {\beta}ibitem{5} Heineken, H., {\it On groups all of whose elements have prime power order}, Math. Proc. Royal Irish Acad. {{\beta}f 106} (2006), 191-198. {\beta}ibitem{6} Higman, G., {\it Groups and rings having automorphisms without nontrivial fixed elements}, J. London Math. Soc. {{\beta}f 32} (1957), 321-334. {\beta}ibitem{7} Higman, G., {\it Finite groups in which every element has prime power order}, J. London Math. Soc. {{\beta}f 32} (1957), 335-342. {\beta}ibitem{8} Huppert, B., {\it Endliche Gruppen}, I, Springer Verlag, Berlin-Heidelberg-New York, 1967. {\beta}ibitem{9} Isaacs, I.M., {\it Finite group theory}, Amer. Math. Soc., Providence, R.I., 2008. {\beta}ibitem{10} Schmidt, R., {\it Subgroup lattices of groups}, de Gruyter Expositions in Ma\-the\-ma\-tics {{\beta}f 14}, de Gruyter, Berlin, 1994. {\beta}ibitem{11} Suzuki, M., {\it Group theory}, I, II, Springer Verlag, Berlin, 1982, 1986. {\beta}ibitem{12} Wilson, L., {\it On the power structure of powerful p-groups}, J. Group Theory {{\beta}f 5} (2002), 129-144. {\beta}ibitem{13} Yang, W., Zhang, Z., {\it Locally soluble infinite groups in which every e\-le\-ment has prime power order}, Southeast Asian Bull. Math. {{\beta}f 26} (2003), 857-864. \end{thebibliography} \vspace*{1,5mm}\\ ace*{4ex}{\sigma}mall {\beta}egin{minipage}[t]{5cm} Marius T\u arn\u auceanu \\ Faculty of Mathematics \\ ``Al.I. Cuza'' University \\ Ia\c si, Romania \\ e-mail: {\tt [email protected]} \end{minipage} \end{document}
\begin{document} \begin{abstract} We consider weighted $L^p$-Hardy inequalities involving the distance to the boundary of a domain in the $n$-dimensional Euclidean space with nonempty boundary. Using criticality theory, we give an alternative proof of the following result of F.~G.~Avkhadiev (2006) \noindent{\bf Theorem.} {\em Let $\Omega \subsetneqq \mathbb{R}^n$, $n\geq 2$, be an arbitrary domain, $1<p<\infty$ and $\alpha + p>n$. Let $\mathrm{d}_\Omega(x) =\mathrm{dist}(x,\partial \Omega )$ denote the distance of a point $x\in \Omega$ to $\partial \Omega$. Then the following Hardy-type inequality holds $$ \int_{\Omega }\frac{|\nablabla \varphi |^p}{\mathrm{d}_\Omega^{\alpha}}\,\mathrm{d}x \geq \left( \frac{\alpha +p-n}{p}\right)^p \int_{\Omega }\frac{|\varphi|^p}{\mathrm{d}_\Omega^{p+\alpha}}\,\mathrm{d}x \qquad \forall \varphi\in C^{\infty }_c(\Omega),$$ and the lower bound constant $\left( \frac{\alpha +p-n}{p}\right)^p$ is sharp. } \end{abstract} \maketitle \section{Introduction}\label{sec0} Let $\Omega$ be a domain in ${\mathbb{R}}^n$, $n\geq 2$ with nonempty boundary, and let $\mathrm{d}_\Omega (x)=\mathrm{dist}(x,\partial \Omega )$ denote the distance of a point $x\in \Omega} \def\Gx{\Xi} \def\Gy{\Psi$ to the boundary of $\Omega$. Fix $p\in (1,\infty )$. We say that the {\em $L^p$-Hardy inequality} is satisfied in $\Omega $ if there exists $c>0$ such that \begin{equation} \label{mainhardy} \int_{\Omega }|\nablabla \vgf|^p\,\mathrm{d}x \geq c \int_{\Omega }\frac{|\vgf|^p}{\mathrm{d}_\Omega^p}\,\mathrm{d}x \qquad \mbox{ for all $\vgf\in C^{\infty }_c(\Omega)$}. \end{equation} The {\em $L^p$-Hardy constant} of $\Omega$ is the best constant $c$ for inequality (\ref{mainhardy}) which is denoted here by $H_p(\Omega )$. It is a classical result that goes back to Hardy himself (see for example \cite{BEL, permalkuf}) that if $n=1$ and $\Omega \subsetneqq\mathbb{R}$ is a bounded or unbounded interval, then the $L^p$-Hardy inequality holds and $H_{p}(\Omega)$ coincides with the widely known constant $$ c_p=\biggl(\frac{p-1}{p}\biggr)^p. $$ Recall that if $\Omega $ is bounded and has a sufficiently regular boundary in ${\mathbb{R}}^n$, then the $L^p$-Hardy inequality holds and $0< H_p(\Omega )\le c_p$ (for instance, see \cite{anc,mamipi}). Moreover, if $\Omega$ is convex, or more generally, if it is weakly mean convex, i.e., if $\Delta \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi\leq 0$ in the distributional sense in $\Omega$ (see \cite{gromov,giga and giovanni, LLL, psaradakis}), then $H_p(\Omega )=c_p $ \cite{barfilter, dam, mamipi}. On the other hand, it is also well-known (see for example \cite{BEL,permalkuf}) that if $\Omega ={\mathbb{R}}^n\setminus\{0\}$ and $p\ne n$, then the $L^p$-Hardy inequality holds and $H_{p}(\Omega)$ coincides with the other widely known constant $$ c_{p,n}=\biggl|\frac{p-n}{p}\biggr|^p, $$ which indicates that the $L^p$-Hardy inequality does not hold for ${\mathbb{R}}^n\setminus\{0\}$ if $p=n$. In the present paper we study a {\em weighted} $L^p$-Hardy inequality involving the distance function to the boundary. We give a new proof for the following result. \begin{theorem}\label{main_thm} Let $\Omega} \def\Gx{\Xi} \def\Gy{\Psi \subsetneqq \mathbb{R}^n$ be an arbitrary domain, where $n\geq 2$. Fix $1<p<\infty$ and $\alpha} \def\gb{\beta} \def\gg{\gamma + p>n$. Then \begin{equation}\label{dp15} \int_{\Omega }\frac{|\nablabla \vgf|^p}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{\alpha} \def\gb{\beta} \def\gg{\gamma}}\,\mathrm{d}x \geq \left( \frac{\alpha} \def\gb{\beta} \def\gg{\gamma +p-n}{p}\right)^p \int_{\Omega }\frac{|\vgf|^p}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{p+\alpha} \def\gb{\beta} \def\gg{\gamma}}\,\mathrm{d}x \qquad \forall \vgf\in C^{\infty }_c(\Omega), \end{equation} and the lower bound constant $$c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}:= \left( \frac{\alpha} \def\gb{\beta} \def\gg{\gamma +p-n}{p}\right)^p$$ is sharp. In particular, for $p>n$ we have $$H_p(\Omega )\geq c_{p,n}=\left( \frac{p-n}{p}\right)^p \quad \mbox{for any domain } \Omega} \def\Gx{\Xi} \def\Gy{\Psi \subsetneqq \mathbb{R}^n.$$ \end{theorem} \begin{remark} Theorem~\ref{main_thm} was proved by F.~G.~Avkhadiev in \cite{avk} using a cubic approximation of $\Omega} \def\Gx{\Xi} \def\Gy{\Psi$. One should note that J.~L.~Lewis \cite{Lewis} proved that \eqref{mainhardy} holds true (for $\alpha} \def\gb{\beta} \def\gg{\gamma=0$) with {\em a fixed positive constant independent on} $\Omega} \def\Gx{\Xi} \def\Gy{\Psi$, and in \cite{Wannebo}, A.~Wannebo generalized Lewis' result to the case $\alpha} \def\gb{\beta} \def\gg{\gamma + p>n$. \end{remark} We need the following version of the Harnack convergence principle which will be used several times throughout the paper. \begin{proposition}[Harnack convergence principle]\label{propdp1} Consider an exhaustion $\{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_i\}_{i\!=\!1}^\infty$ of smooth bounded domains such that $$ \left\{ x \in \overline{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}~:~ \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi(x)>\frac{1}{i}\right\} \subseteq \Omega} \def\Gx{\Xi} \def\Gy{\Psi_i \Subset \Omega} \def\Gx{\Xi} \def\Gy{\Psi_{i+1}, \mbox{ and } \cup_{i \in \mathbb{N}}\Omega} \def\Gx{\Xi} \def\Gy{\Psi_i = \Omega} \def\Gx{\Xi} \def\Gy{\Psi.$$ For each $i\in \mathbb{N}$, let $u_i$ be a positive (weak) solutions of the equation \begin{equation*}\label{dp2} -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_i}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u_i|^{p-2}\nabla u_i ) -\mu_i\frac{|u_i|^{p-2}u_i}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_i}^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}}=0 \qquad \text{ in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi_i \end{equation*} such that $u_i(x_0)=1$, where $x_0 \in \Omega} \def\Gx{\Xi} \def\Gy{\Psi_1$, and $\mu} \def\gn{\nu} \def\gp{\pi_i \in \mathbb{R}$. If $\mu} \def\gn{\nu} \def\gp{\pi_i\to\mu} \def\gn{\nu} \def\gp{\pi$, then there exists $0<\gb<1$ such that, up to a subsequence, $\{u_i\}$ converges in $C^{0,\gb}_{\mathrm{loc}}(\Omega} \def\Gx{\Xi} \def\Gy{\Psi)$ to a positive (weak) solution $u\in W^{1,p}_{\mathrm{loc}}(\Omega} \def\Gx{\Xi} \def\Gy{\Psi)$ of the equation \begin{equation*} -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u|^{p-2}\nabla u ) -\mu\frac{|u|^{p-2}u}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}}=0 \qquad \text{ in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi . \end{equation*} \end{proposition} \begin{proof} Since $\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_i}\to \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}$, the theorem follows directly from \cite[Proposition 2.7]{GP}. \end{proof} The paper is organized as follows: In Section~\ref{sec2} we give our proof of Theorem~\ref{main_thm} while in Appendix we outline two alternative proofs. \section{Proof of Theorem \ref{main_thm}}\label{sec2} Our proof of Theorem~\ref{main_thm} is based on a simple construction of a (weak) positive supersolutions to the associated Euler-Lagrange Lagrange equations \begin{equation}\label{EL_eq} -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma}|\nablabla u|^{p-2}\nablabla u) - \mu} \def\gn{\nu} \def\gp{\pi \frac{|u|^{p-2}u}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}} =0 \qquad \mbox{in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi, \end{equation} for any $\mu} \def\gn{\nu} \def\gp{\pi<c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}$. Theorem~\ref{main_thm} then follows from the Harnack convergence principle (Proposition~\ref{propdp1}) together with the Agmon-Allegretto-Piepenbrink-type (AAP) theorem~\cite[Theorem~4.3]{pinpsa} which asserts that the Hardy inequality \eqref{dp15} holds true if and only if \eqref{EL_eq} admits a positive (super)solution for $\mu} \def\gn{\nu} \def\gp{\pi=c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}$. It seems that the method of the proof can be used to prove lower bounds for the best Hardy constant in different situations. \begin{proof}[Proof of Theorem \ref{main_thm}] A direct computation shows that for any $y \in \mathbb{R}^n$,the function $$u_{y}(x):=|x-y|^{(\alpha} \def\gb{\beta} \def\gg{\gamma+p-n)/(p-1)}$$ is a positive solution of the equation $$-\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_y}^{-\alpha} \def\gb{\beta} \def\gg{\gamma}(x)|\nablabla u|^{p-2}\nablabla u)=0 \qquad \mbox{in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi_y := \mathbb{R}^n\setminus \{ y \},$$ where $\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_y}(x)=|x-y|$. Hence, using the supersolution construction \cite{depi}, it follows that $$v_y(x):= u_{y}^{(p-1)/p}(x) =|x-y|^{(\alpha} \def\gb{\beta} \def\gg{\gamma+p-n)/p}$$ is a positive solution of the equation $$-\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_y}^{-\alpha} \def\gb{\beta} \def\gg{\gamma}|\nablabla u|^{p-2}\nablabla u) - c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}\frac{|u|^{p-2}u}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_y}^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}} =0 \qquad \mbox{in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi_y.$$ Moreover, it is known \cite{BEL} (see also \cite{avk06}) that $c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}$ is the best constant for the inequality \begin{equation*} \int_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_y} \frac{|\nablabla \vgf|^p}{|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma}} \,\mathrm{d}x \geq \mu} \def\gn{\nu} \def\gp{\pi \int_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_y}\frac{|\vgf|^p}{|x-y|^{p+\alpha} \def\gb{\beta} \def\gg{\gamma}}\,\mathrm{d}x \qquad \forall \vgf\in C^{\infty}_c(\Omega} \def\Gx{\Xi} \def\Gy{\Psi_y). \end{equation*} Hence, the lower bound for the Hardy constant for the functional inequality \begin{equation*} \int_{\Omega }\frac{|\nablabla \vgf|^p}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{\alpha} \def\gb{\beta} \def\gg{\gamma}}\,\mathrm{d}x \geq \mu} \def\gn{\nu} \def\gp{\pi \int_{\Omega }\frac{|\vgf|^p}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{p+\alpha} \def\gb{\beta} \def\gg{\gamma}}\,\mathrm{d}x \qquad \forall \vgf\in C^{\infty }_c(\Omega), \end{equation*} in a domain $\Omega} \def\Gx{\Xi} \def\Gy{\Psi\subsetneqq \mathbb{R}^n$ is less or equal to $c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}$. It remains to prove that \eqref{EL_eq} admits positive supersolutions in $\Omega} \def\Gx{\Xi} \def\Gy{\Psi$ for $$\mu} \def\gn{\nu} \def\gp{\pi= \mu} \def\gn{\nu} \def\gp{\pi_\gd:= c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}-\gd>0, \qquad \forall\, 0<\gd<c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n},$$ where $\Omega} \def\Gx{\Xi} \def\Gy{\Psi \subsetneqq \mathbb{R}^n$ is an arbitrary domain. We divide the proof into two steps. \textbf{Step 1:} Assume first that $\Omega} \def\Gx{\Xi} \def\Gy{\Psi$ is a smooth bounded domain. Fix $\gd$ as above, and choose $\vge>0$ small enough such that \begin{align}\label{dp12} \vge< \min \left\{ \left(\frac{ c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}}{\mu_\gd}\right)^{1/p} \!-1,\; \frac{\mu_\gd(\alpha} \def\gb{\beta} \def\gg{\gamma+p-n)}{p|\alpha} \def\gb{\beta} \def\gg{\gamma| c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n} }\right\}. \end{align} For $x \in \Omega} \def\Gx{\Xi} \def\Gy{\Psi$, let $ P(x)\in \partial \Omega} \def\Gx{\Xi} \def\Gy{\Psi$ be the projection $x$ of into $\partial \Omega} \def\Gx{\Xi} \def\Gy{\Psi$ which is well defind for a.e. $x\in \Omega} \def\Gx{\Xi} \def\Gy{\Psi$, that is, $|x-P(x)|=\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi(x)$. For any $y\in \partial \Omega} \def\Gx{\Xi} \def\Gy{\Psi$, consider the set \begin{align*} D_{y,\vge}\!: =\!\Big\{ x\in \Omega} \def\Gx{\Xi} \def\Gy{\Psi \mid & ~ |x-y|\!< \!(1+ \vge) \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi(x),~ \cos(x-y,x-P(x))\!>\!1-\vge, \\ & \quad \text{ and }\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi(x)\!>\!\vge/2 \big\}. \end{align*} If \begin{align}\label{dp14} \Omega} \def\Gx{\Xi} \def\Gy{\Psi_\vge= \{ x \in \Omega} \def\Gx{\Xi} \def\Gy{\Psi \mid \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi(x)>\vge\}, \end{align} then $\displaystyle\cup_{y \in \partial \Omega} \def\Gx{\Xi} \def\Gy{\Psi} D_{y,\vge}$ is an open covering of the compact set $\overline{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}_\vge$. Therefore, there exist $y_i,~i=1,2,\cdots,m $ such that $\Omega} \def\Gx{\Xi} \def\Gy{\Psi_\vge \subseteq \displaystyle\cup_{i=1}^{m} D_{y_i,\vge}$ We note that $u_y$ is a positive supersolution to the equation \begin{equation*} -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u|^{p-2}\nabla u ) + \vge |\alpha} \def\gb{\beta} \def\gg{\gamma| k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}\frac{|u|^{p-2}u }{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}} =0 \qquad \text{ in } D_{y,\vge}. \end{equation*} where $k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}:= \left( \frac{\alpha} \def\gb{\beta} \def\gg{\gamma+p-n}{p-1}\right)^{p-1}$. Indeed, for $\alpha} \def\gb{\beta} \def\gg{\gamma\geq0$, \begin{align*} -\mathrm{div}\,(&\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u_y|^{p-2}\nabla u_y ) \\& = \alpha} \def\gb{\beta} \def\gg{\gamma \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{-\alpha} \def\gb{\beta} \def\gg{\gamma} k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}\left(\frac{ \nabla \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi\cdot (x-y)|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma-n}}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi} -|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma-n} \right)\\ & \geq \alpha} \def\gb{\beta} \def\gg{\gamma \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{-\alpha} \def\gb{\beta} \def\gg{\gamma} k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma-n} \left(\frac{ |\nabla \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi||x-y|(1-\vge)}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi} -1\right)\\ &\geq \alpha} \def\gb{\beta} \def\gg{\gamma \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{-\alpha} \def\gb{\beta} \def\gg{\gamma} k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma-n} \left( 1-\vge-1 \right)\\ & = -\vge \alpha} \def\gb{\beta} \def\gg{\gamma \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{-\alpha} \def\gb{\beta} \def\gg{\gamma} k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma-n} \qquad \mbox{in } D_{y,\vge}. \end{align*} Hence, \begin{align*} -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u_y|^{p-2}\nabla u_y )& + \vge |\alpha} \def\gb{\beta} \def\gg{\gamma| k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}\frac{|u_y|^{p-2}u_y }{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}}\\ & \geq \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{-\alpha} \def\gb{\beta} \def\gg{\gamma} k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma-n} \left(-\vge\alpha} \def\gb{\beta} \def\gg{\gamma + \vge |\alpha} \def\gb{\beta} \def\gg{\gamma| \frac{|x-y|^p}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{p}} \right)\\ & \geq \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{-\alpha} \def\gb{\beta} \def\gg{\gamma} k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma-n} \left(-\vge\alpha} \def\gb{\beta} \def\gg{\gamma + \vge |\alpha} \def\gb{\beta} \def\gg{\gamma| \right)=0 \quad \mbox{in } D_{y,\vge}. \end{align*} Similarly, for $\alpha} \def\gb{\beta} \def\gg{\gamma<0$ \begin{align*} -\mathrm{div}\,(&\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u_y|^{p-2}\nabla u_y ) + \vge |\alpha} \def\gb{\beta} \def\gg{\gamma| k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}\frac{|u_y|^{p-2}u_y }{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}}\\ & \geq \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{-\alpha} \def\gb{\beta} \def\gg{\gamma} k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma-n} \left( \frac{ \alpha} \def\gb{\beta} \def\gg{\gamma \nabla \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi\cdot (x-y)}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi} -\alpha} \def\gb{\beta} \def\gg{\gamma + \vge |\alpha} \def\gb{\beta} \def\gg{\gamma| \frac{|x-y|^p}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{p}} \right)\\ & \geq \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{-\alpha} \def\gb{\beta} \def\gg{\gamma} k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma-n} \left( \frac{\alpha} \def\gb{\beta} \def\gg{\gamma |\nabla \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi||x-y|}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi} -\alpha} \def\gb{\beta} \def\gg{\gamma + \vge |\alpha} \def\gb{\beta} \def\gg{\gamma| \right)\\ & \geq \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{-\alpha} \def\gb{\beta} \def\gg{\gamma} k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma-n} \left( \alpha} \def\gb{\beta} \def\gg{\gamma(1+\vge) -\alpha} \def\gb{\beta} \def\gg{\gamma + \vge |\alpha} \def\gb{\beta} \def\gg{\gamma| \right)\\ & \geq \mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{-\alpha} \def\gb{\beta} \def\gg{\gamma} k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}|x-y|^{\alpha} \def\gb{\beta} \def\gg{\gamma-n} \left( \vge\alpha} \def\gb{\beta} \def\gg{\gamma + \vge |\alpha} \def\gb{\beta} \def\gg{\gamma| \right) =0 \qquad \mbox{in } D_{y,\vge}. \end{align*} Now, the weak comparison principle \cite[Lemma~5.1]{pinpsa} implies that $$u_\gd:= \min \{u_{y_i}\mid 1\leq i\leq m\}$$ is a supersolution to the equation \begin{equation}\label{dp1} -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u|^{p-2}\nabla u ) + \vge |\alpha} \def\gb{\beta} \def\gg{\gamma| k_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}\frac{|u|^{p-2}u }{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}} =0\quad \text{ in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi_\vge. \end{equation} \textbf{Claim 1:} There exists a positive solution to the following equation \begin{equation}\label{dp11} -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u|^{p-2}\nabla u ) - \left( \mu_\gd- \frac{\vge p|\alpha} \def\gb{\beta} \def\gg{\gamma| c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}}{\alpha} \def\gb{\beta} \def\gg{\gamma+p-n}\right) \frac{|u|^{p-2}u }{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}} =0 \text{ in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi_\vge. \end{equation} Employing the AAP-type theorem \cite[Theorem~4.3]{pinpsa}, it is enough to prove that there exists a positive supersolution to \eqref{dp11} in $\Omega} \def\Gx{\Xi} \def\Gy{\Psi_\vge$. We use the supersolution construction \cite{depi} and prove that $v_\gd:= u_\gd^{(p-1)/p}$ is a supersolution to \eqref{dp11}. Using the fact that $u_\gd$ is a supersolution to \eqref{dp1}, we deduce that \begin{align*} & -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla v_\gd|^{p-2}\nabla v_\gd ) - \left( \mu_\gd- \frac{\vge p|\alpha} \def\gb{\beta} \def\gg{\gamma| c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}}{\alpha} \def\gb{\beta} \def\gg{\gamma+p-n}\right)\frac{|v_\gd|^{p-2}v_\gd}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}}\\ & =\! - \!\left(\!\frac{p-1}{p}\!\right)^{p-1}\!\!\!\!\!\mathrm{div}\,\!(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u_\gd|^{p-2}\nabla u_\gd u_\gd^{-(p-1)/p}) \!-\! \! \left(\! \mu_\gd \!-\! \frac{\vge p|\alpha} \def\gb{\beta} \def\gg{\gamma| c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}}{\alpha} \def\gb{\beta} \def\gg{\gamma+p-n}\!\right)\!\! \frac{|u_\gd|^{\! (p-1)^2/p}}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}}\\ & \geq \left(\frac{p-1}{p}\right)^{p}\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u_\gd|^{p} u_\gd^{-(2p-1)/p} -\mu_\gd\frac{|u_\gd|^{(p-1)^2/p}}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}}\\ & = \frac{|u_\gd|^{(p-1)^2/p}}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}}\left[\left(\frac{p-1}{p}\right)^p\frac{|\nabla u_\gd|^pd^p}{u_\gd^p}- \mu_\gd \right] \qquad \mbox{in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi_{\vge}. \end{align*} Therefore, we need to prove that $\left[\left(\frac{p-1}{p}\right)^p\frac{|\nabla u_\gd|^pd^p}{u_\gd^p}- \mu_\gd \right]\geq 0$. Indeed, for a.e. $x \in \Omega} \def\Gx{\Xi} \def\Gy{\Psi_\vge$, $u_\gd= u_{y_{i_0}}$ for some $i_0$ in a neighborhood of $x$. Using the definition of $\vge$ and $D_{y,\vge}$, we get \begin{align*} & \left(\frac{p-1}{p}\right)^p\frac{|\nabla u_\gd|^pd^p}{u_\gd^p}- \mu_\gd\\ &= \left(\frac{p-1}{p}\right)^p\left(\frac{\alpha} \def\gb{\beta} \def\gg{\gamma+p-n}{p-1}\right)^p \frac{d^p}{|x-y_{i_0}|^p}- \mu_\gd\geq \frac{c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}}{(1+\vge)^p} - \mu_\gd >0. \end{align*} Hence, Claim 1 is proved. \textbf{Claim 2:} There exists a positive solution to the following equation \begin{equation}\label{dp13} -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u|^{p-2}\nabla u ) -\mu_\gd\frac{|u|^{p-2}u}{\mathrm{d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}}=0 \quad \text{ in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi. \end{equation} Let $\vge_0>0$ be small enough such that \eqref{dp12} holds, and set $\vge_i := \min\{ \vge_0,\frac{1}{i}\}$. Clearly, $ \Omega} \def\Gx{\Xi} \def\Gy{\Psi_{\vge_i} \Subset \Omega} \def\Gx{\Xi} \def\Gy{\Psi_{\vge_{i+1}} $ for $i$ large enough, and $\Omega} \def\Gx{\Xi} \def\Gy{\Psi= \cup_{i=1}^{\infty} \Omega} \def\Gx{\Xi} \def\Gy{\Psi_{\vge_i}$, where $\Omega} \def\Gx{\Xi} \def\Gy{\Psi_{\vge_i}$ is defined in \eqref{dp14}. Employing Claim~1, it follows that for $i\geq 1$ there exists a positive solution $u_i$ to \eqref{dp11} in $\Omega} \def\Gx{\Xi} \def\Gy{\Psi_{\vge_i}$ satisfying $u_i(x_0)=1$. In light of the Harnack convergence principle (Proposition \ref{propdp1}), it follows that Claim~2 holds. \textbf{Step 2: } Assume now that $\Omega} \def\Gx{\Xi} \def\Gy{\Psi$ is an arbitrary domain. Choose a smooth compact exhaustion $\{ \Omega} \def\Gx{\Xi} \def\Gy{\Psi_i\}$ of $\Omega} \def\Gx{\Xi} \def\Gy{\Psi$. That is, $\{ \Omega} \def\Gx{\Xi} \def\Gy{\Psi_i\}$ is a sequence of smooth bounded domains such that $ \Omega} \def\Gx{\Xi} \def\Gy{\Psi_i \Subset \Omega} \def\Gx{\Xi} \def\Gy{\Psi_{i+1}\Subset \Omega} \def\Gx{\Xi} \def\Gy{\Psi$, $\Omega} \def\Gx{\Xi} \def\Gy{\Psi= \cup_{i=1}^{\infty} \Omega} \def\Gx{\Xi} \def\Gy{\Psi_i$, and $$\max_{\substack{x\in \partial\Omega} \def\Gx{\Xi} \def\Gy{\Psi_i\cap B_i\\ y\in \partial\Omega} \def\Gx{\Xi} \def\Gy{\Psi\cap B_i}}\{\text{dist}(x,\partial \Omega} \def\Gx{\Xi} \def\Gy{\Psi) , \text{dist}(y,\partial \Omega} \def\Gx{\Xi} \def\Gy{\Psi_i) \} < \frac{1}{i},$$ where $B_i=\{|x|<i\}$. Observe that $\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_i} \,\rightarrow \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}$ a.e. in $\Omega} \def\Gx{\Xi} \def\Gy{\Psi$. Indeed, for $x\in \overline{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_i\cap B_i}$ one has $$|\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}(x) - \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_i}\!(x)|=|\text{dist}(x,\partial \Omega} \def\Gx{\Xi} \def\Gy{\Psi)-\text{dist}(x,\partial \Omega} \def\Gx{\Xi} \def\Gy{\Psi_i)|<\frac1i \,.$$ Invoking Claim 2, it follows that for each $i\geq 1$, there exists $u_i>0$ satisfying $u_i(x_0)=1$ and the equation \begin{equation*} -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_i}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u_i|^{p-2}\nabla u_i ) - \mu_\gd \frac{|u_i|^{p-2}u_i }{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi_i}^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}} =0 \quad \text{ in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi_i. \end{equation*} Using again the Harnack convergence principle (Proposition \ref{propdp1}), we obtain a positive solution $u_\gd$ to \eqref{dp13} satisfying $u_\gd(x_0)=1$. Letting $\gd\to 0$, we get by Harnack convergence principle a positive solution $u_0$ to the equation \begin{equation}\label{dp21} -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u|^{p-2}\nabla u ) - c_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n} \frac{|u|^{p-2}u }{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{\alpha} \def\gb{\beta} \def\gg{\gamma+p}} =0 \quad \text{ in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi \end{equation} that satisfies $u_0(x_0)=1$. In light of the AAP-type theorem we obtain the Hardy inequality \begin{align*} \int_{\Omega }\frac{|\nablabla \vgf|^p}{\mathrm{d}_\Omega^{\alpha}}\,\mathrm{d}x \geq \left( \frac{\alpha +p-n}{p}\right)^p \int_{\Omega }\frac{|\vgf|^p}{\mathrm{d}_\Omega^{p+\alpha}}\,\mathrm{d}x \qquad \forall \vgf\in C^{\infty }_c(\Omega). \qquad \qedhere \end{align*} \end{proof} \appendix \section{Different Proofs} Here we give two alternative proofs of Theorem~\ref{main_thm}, both of them do not use an exhaustion argument. On the other hand, both rely on the following folklore lemma which is of independent interest, see for example, propositions 1.1.3. and 2.2.2. in \cite{CS} (cf. \cite[Theorem~1.6]{LLL}, where the case of $C^2$-domains is discussed). \begin{lemma}\label{semiconcave} Let $\Omega} \def\Gx{\Xi} \def\Gy{\Psi\! \subsetneqq \!\mathbb{R}^n$ be a domain. (i) The inequality \[-\Delta \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi} \geq -\frac{n-1}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}} \, ,\] holds true in the sense of distributions in $\Omega$. (ii) Moreover, \begin{equation}\label{lapdq} \int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \nablabla\psi\cdot\nablabla{\rm d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi {\rm d}x \geq -(n-1)\int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi\frac{\psi}{{\rm d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi}{\rm d}x\quad\forall~\psi\in C^\infty_c(\Omega} \def\Gx{\Xi} \def\Gy{\Psi), \psi\geq0. \end{equation} \end{lemma} \begin{proof}\label{semiconcave remark} (i) Since the function $|x|^2-\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^2(x)$ is convex, it follows that its distributional Laplacian is a nonnegative Radon measure (see \cite[Theorem 2-\S6.3]{evans and gariepy} and \cite[Lemma 2.1]{psaradakis} for the details). Hence, $$ \langle(n-1)-\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}\Delta \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}, \vgf \,\rightarrowngle = \int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \vgf \,\mathrm{d}\nu \qquad \forall~\vgf \in C^\infty_c(\Omega} \def\Gx{\Xi} \def\Gy{\Psi),$$ where $\gn$ is a nonnegative Radon measure, and $\langle \cdot, \cdot\,\rightarrowngle : \mathcal{D'}(\Omega} \def\Gx{\Xi} \def\Gy{\Psi)\times \mathcal{D}(\Omega} \def\Gx{\Xi} \def\Gy{\Psi)$ is the canonical duality pairing between distributions and test functions. Consequently, the distributional Laplacian of $- \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}$ is itself a signed Radon measure $\mu$. Thus, \begin{equation*}\label{lapd} - \langle\Delta \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}, \psi \,\rightarrowngle\!=\!-\!\int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \!\!\Gd \psi {\rm d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi{\rm d}x \!=\! \int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi\!\! \psi{\rm d}\mu \! \geq \! -(n-1)\!\!\int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \frac{\psi}{{\rm d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi}{\rm d}x \;\; \forall~\psi\in C^\infty_c(\Omega} \def\Gx{\Xi} \def\Gy{\Psi), \psi\geq0 . \end{equation*} (ii) Since $\nablabla {\rm d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \in L^\infty(\Omega} \def\Gx{\Xi} \def\Gy{\Psi,\mathbb{R}^n)$, it follows that $$-\langle (\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi})_{x_i, x_i}, \psi \,\rightarrowngle = \int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi (\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi})_{x_i} \psi_{x_i}\,\mathrm{d}x.$$ Therefore, $\Delta \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}$, the distributional divergence of $\nablabla \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}$, satisfies $$ - \langle\Delta \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}, \psi \,\rightarrowngle= \int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \nablabla {\rm d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi\cdot \nablabla \psi \,\mathrm{d}x \qquad\forall~\psi\in C^\infty_c(\Omega} \def\Gx{\Xi} \def\Gy{\Psi).$$ Hence, $$ \int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \nablabla {\rm d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi\cdot \nablabla \psi \,\mathrm{d}x = - \langle\Delta \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}, \psi \,\rightarrowngle \geq -(n-1)\int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi\frac{\psi}{{\rm d}_\Omega} \def\Gx{\Xi} \def\Gy{\Psi}{\rm d}x \;\; \forall~\psi\in C^\infty_c(\Omega} \def\Gx{\Xi} \def\Gy{\Psi), \psi\geq0 . \qedhere$$ \end{proof} \begin{lemma}\label{a3} Let $\Omega} \def\Gx{\Xi} \def\Gy{\Psi\! \subsetneqq \!\mathbb{R}^n$ be a domain. Let $$1<p<\infty, \quad \alpha} \def\gb{\beta} \def\gg{\gamma\in \mathbb{R}^n, \quad \mbox{and }\;\; 0<\gg < \frac{\alpha} \def\gb{\beta} \def\gg{\gamma+p-n}{p-1}\,.$$ Then $\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^\gg$ is a (weak) positive supersolution of the equation \begin{equation*} -\mathrm{div}\,(\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma} |\nabla u|^{p-2}\nabla u)- C_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n,\gg} \frac{|u|^{p-2}u}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{p+\alpha} \def\gb{\beta} \def\gg{\gamma}}=0 \quad \text{ in } \Omega} \def\Gx{\Xi} \def\Gy{\Psi, \end{equation*} where $C_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n,\gg} := |\gg|^{p-1}(\alpha} \def\gb{\beta} \def\gg{\gamma-n+1-(\gg-1)(p-1))>0$. \end{lemma} \begin{proof} Using \eqref{lapdq} we obtain \begin{multline*} \int_{\Omega }\!\!\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{-\alpha} \def\gb{\beta} \def\gg{\gamma}|\nablabla( \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^\gg)|^{p-2}\nablabla( \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^\gg) \!\cdot \!\nablabla \vgf \mathrm{d}x \! =\! |\gg|^{p-2}\gg \!\! \int_{\Omega } \!\!\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{(\gg-1)(p-1)-\alpha} \def\gb{\beta} \def\gg{\gamma} \nablabla \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi} \! \cdot \! \nablabla \vgf \mathrm{d}x\\ =\!|\gg|^{p-1}\!\!\! \int_{\Omega }\!\!\! \left(\! \nablabla( \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}) \! \cdot \! \nablabla (\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{\!(\gg-1)(p-1)-\alpha} \def\gb{\beta} \def\gg{\gamma} \!\! \vgf) \!-\! ((\gg \! - \!1)(p \! - \! 1)-\alpha} \def\gb{\beta} \def\gg{\gamma)\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{(\!\gg-1)(p-1)-\alpha} \def\gb{\beta} \def\gg{\gamma-1}\! \vgf \!\right) \!\mathrm{d}x\\ \geq C_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n,\gg} \int_{\Omega } \mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{{(\gg-1)(p-1)-\alpha} \def\gb{\beta} \def\gg{\gamma-1}}\vgf \,\mathrm{d}x . \qedhere \end{multline*} \end{proof} \begin{remark} Observe that $$ C_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n}= \max\left\{ C_{\alpha} \def\gb{\beta} \def\gg{\gamma,p,n,\gg} \mid {\gg \in \left(0,\frac{\alpha} \def\gb{\beta} \def\gg{\gamma+p-n}{p-1}\right)}\right\} ,$$ and the maximum is obtained with $\gg=(\alpha} \def\gb{\beta} \def\gg{\gamma+p-n)/p$. \end{remark} \begin{proof}[Alternative proof of Theorem~\ref{main_thm} I] Using Lemma \ref{a3} for $\gg=(\alpha} \def\gb{\beta} \def\gg{\gamma+p-n)/p$, we deduce that $\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{(\alpha} \def\gb{\beta} \def\gg{\gamma+p-n)/p}$ is positive (weak) supersolution to \eqref{dp21}. Consequently, the AAP-type theorem \cite[Theorem~4.3]{pinpsa} implies the Hardy-type inequality \eqref{dp15}. \end{proof} \begin{proof}[Alternative proof of Theorem~\ref{main_thm} II] Let $\Omega} \def\Gx{\Xi} \def\Gy{\Psi\! \subsetneqq \!\mathbb{R}^n$ be a domain, and fix $s>n$. Using Lemma~\ref{semiconcave}, the following $L^1$-Hardy inequality is proved in \cite[Theorem 2.3]{psaradakis}: \begin{align}\label{dp17} \int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \frac{|\nabla \vgf|}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{s-1}}\,\mathrm{d}x\geq (s-n)\int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \frac{| \vgf |}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{s}}\,\mathrm{d}x \qquad \forall~ \vgf \in C_c^\infty(\Omega} \def\Gx{\Xi} \def\Gy{\Psi). \end{align} Substituting $\vgf=|\psi|^p$ in \eqref{dp17} and using H\"older inequality, we obtain \begin{align*} \frac{s-n}{p}\int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \frac{| \psi|^p}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{s}} \,\mathrm{d}x &\leq \int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \frac{|\psi|^{p-1}|\nabla \psi|}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{s-1}} \,\mathrm{d}x = \int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \frac{| \psi|^{p-1}}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{s-s/p}}\frac{| \nabla \psi|}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{s/p-1}} \,\mathrm{d}x\\[2mm] &\leq \left(\int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \frac{| \psi|^p}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{s}} \right)^{1-1/p}\left(\int_\Omega} \def\Gx{\Xi} \def\Gy{\Psi \frac{| \nabla \psi|^p}{\mathrm{d}_{\Omega} \def\Gx{\Xi} \def\Gy{\Psi}^{s-p}} \right)^{1/p} \qquad \forall~ \psi \in C_c^\infty(\Omega} \def\Gx{\Xi} \def\Gy{\Psi). \end{align*} Hence, for $s= \alpha} \def\gb{\beta} \def\gg{\gamma+p$, we get \eqref{dp15}. \end{proof} \begin{center} {\bf Acknowledgements} \end{center} D.~G. and Y.~P. acknowledge the support of the Israel Science Foundation (grant 637/19) founded by the Israel Academy of Sciences and Humanities. \end{document} \end{document}
\begin{document} \conferenceinfo{}{} \title{New Results for Adaptive and Approximate Counting of Inversions} \numberofauthors{1} \author { \alignauthor Saladi Rahul \\[2mm] \affaddr{Dept. of Computer Science and Engg., University of Minnesota Twin-Cities,\\ 4-192 Keller Hall, 200 Union St. S.E., Minneapolis, MN 55455, USA} \email{[email protected]} } \maketitle \begin{abstract} Counting inversions is a classic and important problem in databases. The number of inversions, $K^*$, in a list $L=(L(1),L(2),\ldots,L(n))$ is defined as the number of pairs $i < j$ with $L(i) > L(j)$. In this paper, new results for this problem are presented: \begin{enumerate} \item In the I/O-model, an {\it adaptive} algorithm is presented for calculating $K^{*}$. The algorithm performs $O(\fracac{N}{B}+ \fracac{N}{B}\log_{M/B}(\fracac{K^*}{NB}))$ I/Os. When $K^{*}=O(NM)$, then the algorithm takes only $O(\fracac{N}{B})$ I/Os. This algorithm can be modified to match the state of the art for the comparison based model and the RAM model. \item In the RAM model, a linear-time algorithm is presented to obtain a tight estimate of $K^*$; specifically a value which lies with high probability in the range \\ $[(1-\fracac{\log N}{N^{1/4}})K^*,(1+\fracac{\log N}{N^{1/4}})K^*]$. The state of the art linear-time algorithm works for the {\em special} case where $L$ is a permutation, i.e., each $L(i)$ is a distinct integer in the range $[1,N]$. In this paper, we handle a {\em general} case where each $L(i)$ is a real number. \end{enumerate} \end{abstract} \section{Introduction} In this paper we revisit the classic database problem of counting inversions. The number of inversions, $K^*$, in a list $L=(L(1), L(2),\ldots, L(N))$ is defined as the number of pairs $i < j$ with $L(i) > L(j)$. Each value $L(i)$ is a real number. \subsection{Motivation} \noindent {\em Classical motivation.} Interest in studying the counting inversions problem has been shown by various communities in computer science. It is considered an important measure to test the ``sortedness'' of the data. For example, sorting data is a critical operation in large-scale applications. Typically, such applications have multiple sorting algorithms and they perform some ``tests'' on the data to decide the most suitable sorting algorithm (an insertion-sort type algorithm is fast if the data is almost sorted). One of the important test happens to be counting inversions . We refer the reader to the book of Knuth \cite{knuth3} and the survey report of Estivill-Castro and Wood \cite{ew92} for a detailed discussion on how counting inversions is crucial to the engineering of a fast sorting algorithm. \noindent {\em Modern motivation.} Modern applications have revised the interest in the problem of counting inversions. We briefly mention the applications here: (a) The number of inversions between two permutations is important for {\em rank aggregation} in Internet-based applications \cite{dkns01}, and (b) The robustness of a {\em ranking function} (of database entries) can be tested via counting inversions. We strongly refer the reader to Ajtai {\em et al.} \cite{ajks02} for a nice detailed description of how modern applications benefit from counting inversions. \subsection{Previous work on counting inversions} {\em Non-adaptive algorithms.} The standard textbook solution for the counting inversions problem takes $O(n\log n)$ time by mergesort. . There have been improvements over the $O(n\log n)$ time algorithm in the RAM model. Using Dietz's dynamic ranking structure \cite{d89} counting inversions can be done in $O(n\log n/\log \log n)$ time. Few years back, Chan and Patrsacu \cite{cp10} could significantly improve the running time to $O(n\sqrt{\log n})$. Interestingly, in the RAM model counting inversions seems to be harder than sorting: the best known deterministic sorting algorithm takes $O(N\log\log N)$ time~\cite{h04} and the best known randomized sorting algorithm takes $O(N\sqrt{\log\log N})$ expected time~\cite{ht02}. {\em Adaptive algorithms.} One approach to develop faster algorithms is to build solutions which adapt based on the number of inversions. Mehlhorn~\cite{m79} presented an $O\left(N + N\log\left(\fracac{K^{*}}{N}\right)\right)$ time algorithm to count inversions in the comparison based model. Adapting the approach of Pagh, Pagh, and Thorup~\cite{ppt04}, Elmasry~\cite{e15} presented an $O\left(N + N\sqrt{\log\left(\fracac{K^{*}}{N}\right)}\right)$ time algorithm in the RAM model. {\em Approximate algorithms.} The other approach to develop faster algorithms to count inversions is to approximate the value of $K^{*}$. To obtain faster algorithms, Andersson and Petersson \cite{ap98}, and Chan and Patrascu \cite{cp10} studied the approximate version of counting inversions problem. If the number of inversion in the list is $K^{*}$, then their algorithm will report a value within an additive error of $\mathbf{var}epsilon K^{*}$. {\em Streaming setting.} The focus of this paper is the RAM model and the I/O-model. However, there are other interesting models in which this problem has been studied. For example, the last decade saw the streaming community getting interested \cite{ajks02,gz03}. \subsection{Our Results} In this paper, we present two new results on the problem of counting inversions. \noindent {\em Adaptive algorithm.} In the I/O-model we present an adaptive algorithm which counts the number of inversions using $O(\fracac{N}{B} + \fracac{N}{B}\log_{M/B}(\fracac{K^{*}}{NB}))$ I/Os. Previously, such adaptive algorithms were known only in the comparison model~\cite{m79} and the RAM model~\cite{e15}. Neither of these solution can be trivially modified to work efficiently in the I/O-model. For example, adapting the algorithm of \cite{e15} to the I/O-model requires $\Omega(N)$ I/Os, since it inserts one point at a time. Interestingly, our algorithm can be modified to match the state of the art for the comparison based model and the RAM model. In that sense, our algorithm subsumes the results of \cite{e15,m79}. Please see the appendix for a brief description of the I/O-model. \noindent {\em Approximate algorithm.} This problem is studied in the RAM model. We present an $O(N)$ time algorithm which reports a value in the range \\ $\left[ \left(1-\fracac{\log N}{N^{1/4}}\right)K^{*},\left(1+ \fracac{\log N}{N^{1/4}}\right)K^{*}\right]$. The estimate is correct with probability $1-1/N^c$, where $c$ is a constant independent of $N$. Chan and Patrascu \cite{cp10} also presented an $O(N)$ time algorithm for this problem. However, their solution works only for the special case where $L$ is a permutation, i.e., each $L(i)$ is a distinct integer in the range $[1,N]$. Because they consider a permutation, they make use of the Spearman's Footrule~\cite{dg77} which already gives a $2$-factor approximation of $K^{*}$. In this paper, we study the more challenging setting where each element in $L(i)$ is a real number. A new approach is needed to handle this setting. \section{Red-blue dominance counting} We start by defining the {\em red/blue dominance counting problem}. We are given a red list $R=(R(1), R(2),\ldots, R(n))$ and a blue list $B=(B(1), B(2),\ldots, B(n))$. Each element in $R$ is mapped to a two-dimensional point: $R(i)$ is mapped to a point $(i,R(i))$. Similarly, each element, say $B(i)$, in $B$ mapped to a point $(i, B(i))$. A pair $(r,b)$ is called an {\em domination pair} if $r$ is a red point dominated by a blue point $b$. A blue point $b$ dominates a red point $r$ if $b$ has a larger $x$-coordinate than $r$ and $b$ has a smaller $y$-coordinate than $r$ (see Figure~\ref{fig:main-figure}(a)). Throughout the paper, we will interpret $R$ and $B$ as one of the following: (1) a list of $N$ elements storing real-values, or (2) a pointset in two-dimensional plane. It will be clear from the context which interpretation is being taken. Let $K^{*}$ be the number of domination pairs in $R$ and $B$. Counting inversions is a special case of this problem by letting the red point set be equal to the blue point set. In this paper, we present two results for the red-blue dominance counting problem. \begin{theorem}\label{thm:main-1} {\em (Adaptive algorithm)} Red-blue dominance counting problem can be solved using \\ $O(\fracac{N}{B} + \fracac{N}{B}\log_{M/B}(\fracac{K^{*}}{NB}))$ I/Os, where $K^{*}$ is the number of domination pairs. When $K^{*}=O(NM)$, then the algorithm uses only $O(\fracac{N}{B})$ I/Os. This problem is studied in the $I/O$-model. \end{theorem} \begin{theorem}\label{thm:approx} {\em (Approximate algorithm)} Red-blue approximate dominance counting problem can be solved in $O(N)$ time. For a fixed constant $c$, with probability $1-1/N^c$ the algorithm will report a value in the range \\ $\left[ \left(1-\fracac{\log N}{N^{1/4}}\right)K^{*},\left(1+ \fracac{\log N}{N^{1/4}}\right)K^{*}\right]$. This problem is studied in the RAM-model. \end{theorem} \begin{figure*}\label{fig:main-figure} \end{figure*} In Section~\ref{sec:red-blue} we will define the concept of red-blue cells along with their properties. At first look, it might not be clear to the reader as to why we need red-blue cells. Then in Section~\ref{sec:adaptive} we will make use of them to obtain the adaptive algorithm and then in Section~\ref{sec:approx} we will use them along with random sampling techniques to obtain the approximate algorithm. \section{Construction of Red-Blue Cells}\label{sec:red-blue} Given the lists $R$ and $B$, and a parameter $K$, we want to construct a set of {\em red-blue cells} $C_1, C_2,\ldots,C_\ell$. A red-cell is a rectangle of the form $(-\infty,x) \times (y,\infty)$, and a blue-cell of the form $(x,\infty) \times (-\infty,y)$. With each cell $C_i$ we associate a set of red points $R_i \subseteq R$ and a set of blue points $B_i \subseteq B$. Consider the following two cases: \noindent (1) If $K^{*} \leq K$, then we want to construct red-blue cells which satisfy the following {\em three} properties: \begin{enumerate}[label=\Alph*)] \item $\forall i \in [1,\ell], \min\{|R_i|,|B_i|\}=O(K/N)$. \item For every domination pair $(r,b)$ there will exist exactly a single integer $i$ such that $b\in B_i$ and $r \in R_i$. \item $\sum_{i=1}^{\ell} |R_i|=O(N)$, and $\sum_{i=1}^{\ell} |B_i|=O(N)$. \end{enumerate} \noindent (2) If $K^{*} >K$, then we either construct the cells with the properties described above, or we are allowed to report a {\em failure}. \begin{lemma}\label{lem:cells} The red-blue cells can be constructed in $O(N/B)$ I/Os. \end{lemma} The rest of this section is dedicated to proving Lemma~\ref{lem:cells}. \subsection{First step: Red cells} {\em Shallow cuttings} for various geometric objects are widely used in computational geometry to answer range searching and related problems (for example, \cite{ac09,m92}). Shallow cuttings as described in this section have been used before by Vengroff and Vitter \cite{vv96}. On the technical side, our key contribution is a novel and a non-trivial application of shallow cuttings. Consider a red pointset $R$. Informally, a $k$-shallow cutting on the pointset $R$ has the form of a ``staircase'' which is a one-dimensional, monotone sequence of orthogonal line-segments. Formally, a $k$-shallow cutting is a curve ${\cal C}=c_1d_1c_2d_2\ldots d_{t-1}c_t$ of alternating horizontal line segments $c_id_i=[x(c_i),x(d_i)] \times [y(c_i)]$ and vertical line segments $d_ic_{i+1}=[x(d_i)] \times [y(c_{i+1}), y(d_i)]$. See Figure~\ref{fig:main-figure}(b). The points $c_1,c_2,\ldots,c_t$ are called {\it outward corners} and the points $d_1,d_2,\ldots,d_{t-1}$ are called {\it inward corners}. With each outward corner $c_i=(x,y)$, we associate a {\em cell} $C_i =(-\infty,x) \times (y,+\infty)$. If a point $q$ is dominated by at least one outward corner, then $q$ is said to lie {\it above} the curve ${\cal C}$. On the other hand, if a point $q$ dominates at least one inward corner, then $q$ is said to lie {\it below} the curve ${\cal C}$. The curve ${\cal C}$ has the following properties: \begin{enumerate} \item Every point on curve ${\cal C}$ dominates at least $k$ points in $R$, but it dominates no more than $2k$ points in $R$. \item If a point $q$ dominates less than $k$ points of $R$, then $q$ lies above the curve ${\cal C}$. \item $t=O(n/k)$, i.e., the number of cells are no more than $O(n/k)$. \end{enumerate} \begin{lemma}\label{lem:sc} The $k$-shallow cutting on $R$ can be constructed using $O(N/B)$ I/Os. The inward and the outward corners are reported in increasing order of their $x$-coordinate value. \end{lemma} \begin{proof} There exists a simple algorithm to construct the $k$-shallow cutting on $R$. The details of this construction can be found in \cite{vv96}. \end{proof} \noindent {\em Algorithm.} Given $R$ and $B$, we construct the first set of cells, which we call {\em red cells}. \begin{enumerate} \item Construct a $\left\lceileil\fracac{2K}{N}\right\rceileil$-shallow cutting ${\cal C}_r$ on $R$. \item For each blue point $b\in B$ check if it lies on/below or above the curve ${\cal C}_r$. If $b$ lies on/below ${\cal C}_r$, then it is classified as {\em deep}. Otherwise, it is classified as {\em shallow} and $b$ is {\em assigned} to any arbitrary cell in the cutting containing it. \item Delete all the shallow blue points from the dataset. If the number of deep blue points are greater than $N/2$, then we report a {\em failure} and stop the algorithm concluding that $K^{*} > K$. Otherwise, we go to the next step. \item For each cell $C_i$ we define $R_i$ to be the set of red points which lie in that cell, and $B_i$ to be the set of blue points assigned to that cell. \end{enumerate} \noindent {\em Analysis.} Now we analyze the running time of the above algorithm. Using Lemma~\ref{lem:sc}, step~$1$ can be performed in $O(N/B)$ I/Os. Step~$2$ is performed as follows: for each blue point (say $b$) find the outward corner (say $c_i$) immediately to its right. $b$ is assigned to cell $C_i$ if it lies in the cell of $C_i$; otherwise $b$ lies on/below ${\cal C}_r$ and is classified as deep. The blue points can be assigned using $O(N/B)$ I/Os since the blue points and the outward corners are already sorted along $x$-axis. Next we show that when $K^{*} \leq K$ then the algorithm does not report a failure. Each deep blue point dominates $\geq \left\lceileil\fracac{2K}{N}\right\rceileil$ red points (by Property~$2$ of shallow cuttings). Since there are at most $K^{*}$ domination pairs, the number of deep blue points is $\leq \fracac{K^{*}}{\lceileil\fracac{2K}{N}\rceileil} \leq \fracac{K}{\lceileil\fracac{2K}{N}\rceileil} \leq N/2$. Hence, the algorithm will not report failure when $K^{*} \leq K$. Now we prove that none of the three properties of the red-blue cells have been violated. For each cell $C_i$, the outward corner $c_i$ dominates $O(K/N)$ red points. Therefore, $|R_i|=O(K/N)$ and hence, property (A) is not violated. By step~$4$ of our algorithm, we ensure property (B) for every domination pair $(r,b)$ where $b$ is a shallow point. The other domination pairs will be taken care of in the next steps. Since each point in $B$ is assigned to exactly one cell, $\sum_{i=1}^{\ell} |B_i|\leq N$. By Property~$3$ of shallow cuttings, the number of cells constructed is $O(N^2/K)$, and by Property~$1$ of shallow cuttings, each cell contains $O(K/N)$ red points. Therefore, $\sum_{i=1}^{\ell} |R_i|=O(N^2/K) \times O(K/N)=O(N)$. Therefore, property (C) has not yet been violated. \noindent {\em Remark.} Note that a red point in $R$ can belong to many $R_i$'s, whereas a shallow blue point in $B$ will belong to exactly one $B_i$. \subsection{Second step: Blue cells} After the first step, all the domination pairs $(r,b)$ involving the shallow blue points have been taken care of. In the next two steps, we discuss how to build additional cells which will capture domination pairs involving the deep blue points. We will use shallow cuttings again, but this time we will change the orientation of our cells. A $k$-shallow cutting on the deep blue points is a curve ${\cal C}=c_1d_1c_2d_2\ldots d_{t-1}c_t$ of alternating vertical line segments $c_id_i=[x(c_i)] \times [y(d_i),y(c_i)]$ and horizontal line segments \\ $d_ic_{i+1}=[x(c_{i+1}),x(d_i)] \times [y(d_i)]$. See Figure~\ref{fig:main-figure}(c). The points $c_1,c_2,\ldots,c_t$ are called {\it outward corners} and the points $d_1,d_2,\ldots,d_{t-1}$ are called {\it inward corners}. With each outward corner $c_i=(x,y)$, we associate a {\em blue cell} $C_i =(x,\infty) \times (-\infty,y)$. If a point $q$ is dominated by at least one inward corner, then $q$ is said to lie {\it above} the curve ${\cal C}$. On the other hand, if a point $q$ dominates at least one outward corner, then $q$ is said to lie {\it below} the curve ${\cal C}$. The curve ${\cal C}$ should have the following properties: \begin{enumerate} \item Every point on curve ${\cal C}$ is dominated by at least $k$ deep blue points, but it is dominated by no more than $2k$ deep blue points. \item If a point $q$ is dominated by less than $k$ deep blue points, then $q$ lies below the curve ${\cal C}$. \item $t=O(n/k)$. \end{enumerate} \noindent {\em Algorithm.} Let $B_D$ be the set of deep blue points. Given $R$ and $B_D$, the following steps are performed: \begin{enumerate} \item Construct $\lceileil\fracac{2K}{N}\rceileil$-shallow cutting ${\cal C}_b$ on all the deep blue points. \item For each red point $r$ check if it lies on/above or below the curve ${\cal C}_b$. If $r$ lies on/above ${\cal C}_b$, then it is classified as {\em deep}. Otherwise, it is classified as {\em shallow} and $r$ is assigned to any arbitrary cell in the cutting containing it. \item If the number of deep red points are greater than $N/2$, then we report a {\em failure} and stop the algorithm concluding that $K^{*} > K$. Otherwise, we go to the next step. \item For each cell $C_i$ we define $B_i$ to be the set of deep blue points which lie in that cell, and $R_i$ to be the set of red points assigned to that cell. \end{enumerate} Following the analysis from the previous step, the number of I/Os performed in this step is also bounded by $O(N/B)$, and it can be shown that none of the properties of the red-blue cells are violated yet. \subsection{Third step: Recursion} In the second step, all the domination pairs $(r,b)$ such that $r$ is a shallow red point and $b$ is a deep blue point will be taken care of. After the first two steps, now we are left with deep red points $R_D$ and deep blue points $B_D$. We know that $|R_D| < N/2$ and $|B_D| < N/2$; else a failure would have been reported. \noindent {\em Algorithm.} Recurse on $R_D$ and $B_D$, and all occurrences of $N$ in the algorithm are replaced with $N/2$. The algorithm stops when the red and the blue set is smaller than a suitable constant $C$. Let $T(N)$ denote the total number of I/Os performed by this algorithm. Then, \[ T(N) \leq \left\{\begin{array}{ll} O(1) & \mbox{if $n \leq C$};\\ O\left(\fracac{N}{B}\right) + T(N/2) & \mbox{otherwise}. \end{array}\right.\] Solving this recurrence we get $T(N) = O\left(\fracac{N}{B}\right)$. By a similar recurrence, Property~(C) of red-blue cells is satisfied. It is easy to verify that Property~(A) and (B) are also satisfied. This finishes the proof of Lemma~\ref{lem:cells}. \section{The Adaptive Algorithm} \label{sec:adaptive} Now we are ready to prove Theorem~\ref{thm:main-1}. \subsection{First step: A non-adaptive algorithm} The first step in building our adaptive solution is the construction of a {\em non-adaptive} algorithm. \begin{theorem}\label{thm:non-adaptive} Consider a list $R$ of $N_r$ elements and a list $B$ of $N_b$ blue elements. Then there exists a non-adaptive algorithm for red-blue dominance counting problem which requires $O\left(\fracac{N}{B}\log_{M/B}\left(\fracac{\min\{N_r,N_b\}}{B}\right)\right)$ I/Os, where $N=N_r + N_b$. \end{theorem} \begin{proof} We will only give a high-level description of this algorithm. Most of the details are fairly standard. Without loss of generality, assume that $N_r=\min\{N_r,N_b\}$. As in distribution sort, in $O(N_r/B)$ I/Os the list $R$ is split into $\Theta\left(\sqrt{\fracac{M}{B}}\right)$ lists $R_1, R_2,\ldots,R_f$ of roughly equal size, such that for any $i< j$, any element in $R_i$ is smaller than any element in $R_j$. The order of the elements in any $R_i$ is systematic with their order in $R$. An element is $B$ is defined to {\em belong} to a set $R_i$ if the value of the blue element lies between the value of the smallest and the largest element in $R_i$. By performing a synchronized scan of all the $R_i$'s, in $O(N_b/B)$ I/Os, for each element in $B$ (say it belongs to $R_i$) we can compute the number of red points in $\bigcup_{i+1}^f R_i$ it dominates. Finally, $\forall i\in [1,f]$, we recurse on $R_i$ and the set of blue points which belong to $R_i$. The number of levels of recursion will be $O(\log_{M/B}\fracac{N_r}{B})$. \end{proof} \subsection{Second step: $K$-capped structure} Now we will solve the {\em $K$-capped red-blue dominance counting} problem: Given a set $R$ of $N$ red points, a set $B$ of $N$ blue points, and a value $K$, we need to compute $K^{*}$, but if $K^{*} > K$, then we are allowed to report {\em failure}. We will prove the following result. \begin{theorem}\label{thm:k-capped} $K$-capped red-blue dominance counting problem can be solved using $O\left(\fracac{N}{B} + \fracac{N}{B}\log_{M/B}(\fracac{K}{NB})\right)$ I/Os. \end{theorem} Now we prove Theorem~\ref{thm:k-capped}. \noindent {\em Algorithm.} Using Lemma~\ref{lem:cells}, construct red-blue cells on $R$ and $B$ with parameter $K$. If Lemma~\ref{lem:cells} reports a failure, then we stop the algorithm. Otherwise, we obtain a set of cells $C_1,\ldots, C_{\ell}$. For each $i\in [1,\ell]$, based on $R_i$ and $B_i$ associated with $C_i$, we run the non-adaptive algorithm of Theorem~\ref{thm:non-adaptive}. Finally, add up the count obtained from all the cells. \noindent {\em Analysis.} The number of $I/Os$ performed will be bounded by \begin{align*} &\sum_{i=1}^{\ell} O\left( \left(\fracac{|R_i|+ |B_i|}{B} \right)\log_{M/B}\left( \fracac{\min\{N_r,N_b\}}{B}\right) \right) \\ &\leq \left(\log_{M/B}\fracac{K}{NB}\right) \sum_{i=1}^{\ell} O\left( \fracac{|R_i|+ |B_i|}{B} \right) \quad \text{by property (A)}\\ &\leq O\left(\fracac{N}{B}\log_{M/B}\left(\fracac{K}{NB}\right)\right) \quad \text{by property (C)} \end{align*} \subsection{Third step} Using a trick from the computational geometry literature, the solution to the $K$-capped red-blue dominance counting problem (Theorem~\ref{thm:k-capped}) can be used to efficiently solve the red-blue dominance counting problem (Theorem~\ref{thm:main-1}). We use Chan's guessing trick from \cite{c96b}. The algorithm is executed as a series of rounds. In round~$i$ (starting from $i=1$), we construct the $K_i$-capped structure of Theorem~\ref{thm:k-capped} for \[K_i=(NB)\cdot \left(\fracac{M}{B}\right)^{\cdot2^i}\] If the algorithm returns the value of $K^{*}$, then we are done and the algorithm terminates. Otherwise, we proceed to round~$i+1$. Let $j$ be the number of rounds performed before termination. If $j=1$ then the number of I/Os performed is $O(N/B)$. Otherwise, if $j >1$ then in round $j-1$ since we reported failure, $K^{*} > (NB)\cdot \left(\fracac{M}{B}\right)^{\cdot2^{j-1}} \implies 2^j < 2\log_{M/B}\fracac{K^*}{NB}$. The total number of I/Os performed in all the $j$ rounds is bounded by $\sum_{i=1}^{j}O\left(\fracac{N}{B}\log_{M/B}(\fracac{K_i}{NB})\right) =\sum_{i=1}^{j}O\left(\fracac{N}{B}\cdot 2^{i}\right) =O\left(\fracac{N}{B}\cdot 2^{j}\right) \\ = O\left(\fracac{N}{B}\log_{M/B}(\fracac{K^{*}}{NB})\right)$. \noindent {\em Remark.} This algorithm can be modified to match the state of the art adaptive algorithms for the comparison based model~\cite{m79} and the RAM model~\cite{e15}. This involves replacing the non-adaptive I/O-model algorithm of Theorem~\ref{thm:non-adaptive} with the non-adaptive algorithm in the comparison based model which takes $O(N\log N)$ time and the non-adaptive algorithm in the RAM model~\cite{cp10}. \section{The Approximation Algorithm}\label{sec:approx} In this section we will prove Theorem~\ref{thm:approx}. Our solution is based on an interesting combination of random sampling and red-blue cells. The number of domination pairs, $K^{*}$, can lie in the range $[0,N^2)$. We will split the solution into three different cases and handle each of them separately. \subsection{When $K^{*}\in [0,N]$} By setting $M$ and $B$ to be appropriate constants, the I/O-model solution of Theorem~\ref{thm:k-capped} maps to the RAM model. We obtain the following result. \begin{lemma}\label{lem:ram} $K$-capped red-blue dominance counting problem can be solved in $O\left(N+N\log_2(\fracac{K}{N})\right)$ time in the RAM model. \end{lemma} Using Lemma~\ref{lem:ram} with $K=N$, we can either obtain the exact number of inversions in $O(N)$ time, or it will report a failure which implies that $K^* > N$. \subsection{When $K^* \in [N, N\sqrt{N}\log N]$} \noindent {\em Algorithm.} The following steps are performed: \noindent (1) Construct the red-blue cells for parameter $K = N\sqrt{N}\log N$ using Lemma~\ref{lem:cells}. If a failure is reported, then we conclude that $K^{*} > N\sqrt{N}\log N$ and stop the algorithm. Otherwise, go to the next step. \noindent (2) Pick $N$ samples. Each sample is a pair $(r,b)$ such that if $b\in B_i$ then $r\in R_i$. Each sample is picked by the following three stage process: \begin{enumerate} \item Pick a set $B_i$. A set $B_i$ is sampled with probability $\fracac{|R_i||B_i|}{\sum_{i=1}^{\ell} |R_i||B_i|}$. \item Sample a point in $B_i$. Each point in $B_i$ is sampled with probability $\fracac{1}{|B_i|}$. \item Sample a point in $R_i$. Each point in $R_i$ is sampled with probability $\fracac{1}{|R_i|}$. \end{enumerate} \noindent (3) Let $X$ be the number of samples which are domination pairs. Then we report $X\cdot C\sqrt{N}\log N$ as the answer, where the constant $C$ is defined later. \begin{lemma} Consider a pair $(r,b)$ such that $r\in R_i$ and $b\in B_i$. The probability of the pair $(r,b)$ being picked is $\fracac{1}{\sum_{i=1}^{\ell} |R_i||B_i|}$, i.e., each pair is picked with equal probability. \end{lemma} \begin{proof} The probability of the pair $(r,b)$ being picked is $\fracac{|R_i||B_i|}{\sum_{i=1}^{\ell} |R_i||B_i|} \times \fracac{1}{|B_i|} \times \fracac{1}{|R_i|}=\fracac{1}{\sum_{i=1}^{\ell} |R_i||B_i|}$ \end{proof} \begin{lemma} The sample space is $O(N\sqrt{N}\log N)$. In other words, $\sum_{i=1}^{\ell} |R_i||B_i| =O(N\sqrt{N}\log N)$. \end{lemma} \begin{proof} We split the summation $\sum_{i=1}^{\ell} |R_i||B_i|$ into two disjoint summations: one in which $|R_i|=\min\{ |R_i|,|B_i|\}$, and other one in which $|B_i|=\min\{ |R_i|,|B_i|\}$. Consider the first summation: \begin{align*} \sum_{i=1}^{\ell} |R_i||B_i| &\leq O(K/N)\sum_{i=1}^{\ell} |B_i|\quad \text{by property (A)} \\ &\leq O(K/N) \cdot O(N) \quad \text{by property (C)}\\ &= O(N\sqrt{N}\log N) \end{align*} The same bound can be shown for the other summation as well. \end{proof} \begin{lemma} For a fixed constant $c$, with high probability $1-1/N^c$, the estimate will lie in the range \\ $\left[ \left(1-\fracac{\log N}{N^{1/4}}\right)K^{*},\left(1+ \fracac{\log N}{N^{1/4}}\right)K^{*}\right]$. \end{lemma} \begin{proof} Recall that $X$ is the number of domination pairs picked in the $N$ samples. For $i \in [1,N]$, define $X_i=1$ if the $i$-th sample picked is a domination pair; otherwise $X_i=0$. Therefore, $X=\sum_{i=1}^N X_i$. The expected value of $X$, i.e., $E[X]$ will be equal to \\ $N\cdot\fracac{K^{*}}{\sum_{i=1}^{\ell} |R_i||B_i|}=N\cdot\fracac{K^{*}}{CN\sqrt{N}\log N}=\fracac{K^{*}}{C\sqrt{N}\log N}$, where $C$ is the constant inside $O(N\sqrt{N}\log N)$. To apply Chernoff bounds, we need to perform the following set of calculations. Set a parameter $\mathbf{var}epsilon = \fracac{\log N}{N^{1/4}}$ and use the fact that $K^{*} \geq N$, to observe that \begin{align*} \mathbf{var}epsilon^2 E[X] = \mathbf{var}epsilon^2 \fracac{K^{*}}{C\sqrt{N}\log N} > \mathbf{var}epsilon^2 \fracac{\sqrt{N}}{C\log N} = \fracac{\log N}{C} \\ \end{align*} By applying Chernoff bounds, we get \begin{align*} \textbf{Pr}\bigg[ \bigg|X-E[X]\bigg|>\mathbf{var}epsilon E[X]\bigg] &< e^{-\Omega(\mathbf{var}epsilon^2 E[X])} < e^{-\Omega(\log N)} < N^{-c} \end{align*} \end{proof} \subsection{When $K^{*} \in [N\sqrt{N}\log N, N^2]$} \noindent {\em Algorithm.} The following steps are performed: \noindent (1) Pick $N$ random samples. Each sample is of the form $(r,b)$ where $r\in R$ and $b\in B$. Each red point in $R$ is picked with probability $\fracac{1}{N}$ and each blue point in $B$ is picked with probability $\fracac{1}{N}$. \noindent (2) Let $X$ be the number of samples which are domination pairs. Then we report $X\cdot N$ as the answer. \begin{lemma} Let $c$ be a sufficiently large constant. Then with high probability $1-1/N^c$, the estimate will lie in the range $\left[ \left(1-\fracac{1}{N^{1/4}}\right)K^{*},\left(1+ \fracac{1}{N^{1/4}}\right)K^{*}\right]$. \end{lemma} \begin{proof} Let $X$ be the number of domination pairs picked in the $N$ samples. For $i \in [1,N]$, define $X_i=1$ if the $i$-th sample picked is a domination pair; otherwise $X_i=0$. Therefore, $X=\sum_{i=1}^N X_i$. Now, $E[X]=N\cdot\fracac{K^{*}}{N^2}=\fracac{K^{*}}{N}$. To apply Chernoff bounds, we need to perform the following set of calculations. Set a parameter $\mathbf{var}epsilon = 1/N^{1/4}$ and use the fact that $K^{*} \geq N\sqrt{N}\log N$, to observe that \begin{align*} \mathbf{var}epsilon^2 E[X] = \mathbf{var}epsilon^2 \fracac{K^{*}}{N} > \mathbf{var}epsilon^2 \fracac{N\sqrt{N}\log N}{N} =\log N \\ \end{align*} By applying Chernoff bounds, we get \begin{align*} Pr\bigg[ \bigg|X-E[X]\bigg|>\mathbf{var}epsilon E[X]\bigg] &< e^{-\Omega(\mathbf{var}epsilon^2 E[X])} < e^{-\Omega(\log N)} < N^{-c} \end{align*} \end{proof} \section*{Appendix: I/O-model} In this model~\cite{av88}, a machine is equipped with $M$ words of main memory, and a disk that has been formatted into {\em blocks} of $B$ words each. The values of $M$ and $B$ satisfy $M \ge 2B$. An I/O either reads a disk block into memory, or writes $B$ words of memory into a disk block. The {\em time} of an algorithm is measured in the number of I/Os performed, while the {\em space} is measured in the number of disk blocks occupied. \begin{comment} \section*{Appendix: Construction of a $k$-shallow cutting} A $k$-shallow cutting on a set $P$ of $n$ points in can be constructed in $O(n)$ time as shown in the algorithm below: \noindent (1) Set $c_1\longleftarrow (2k,\infty)$.\\ (2) Set $CL(c_1)$ $\longleftarrow$ conflict list of cell of $c_1$. Also, set $i \longleftarrow 0$.\\ (3) {\bf do} \\ (4) $i \longleftarrow i+1$\\ (5) Start a vertical segment from $c_i$ till some point $d_{i}$ is found which dominates $k$ points of $P$.\\ (6) Set $CL(d_i)$ $\longleftarrow$ conflict list of cell of $d_i$.\\ (7) Start a horizontal segment from $d_i$ till some point $c_{i+1}$ is found which dominates $2k$ points of $P$.\\ \hspace*{0.35 in} (7a) If such a point $c_{i+1}$ is found, then define $CL(c_{i+1})$ $\longleftarrow$ conflict list of cell of $c_{i+1}$.\\ \hspace*{0.35 in} (7b) Otherwise, set $c_{i+1} \longleftarrow (\infty,y(d_i))$ and stop the algorithm. \\ (8) {\bf od}\\ (9) Report the $k$-shallow cutting ${\cal C} \longleftarrow c_1d_1c_2d_2\ldots c_t$.\\ Now we analyse the running time of the above algorithm. First, let us compute the construction time of a horizontal segment $d_ic_{i+1}$ at step~$7$. Starting from the smallest $x$-coordinate point in $P$ to the right of $d_i$, we ``visit'' points of $P$ in increasing order of their $x$-coordinate value till some point $c_{i+1}$ is found which dominates $2k$ points of $P$. The number of points visited is $n_i + k$, where $n_i$ is the number of points lying on/above the segment $d_ic_{i+1}$ and $k$ is the number of points lying below the segment $d_ic_{i+1}$ (except possibly the last segment $d_{t-1}c_t$ where $<k$ points of $P$ can lie below). Since the curve ${\cal C}$ is monotone, the points of $P$ visited during the construction of any two horizontal segments are disjoint. Therefore, the number of horizontal segments in ${\cal C}$ is bounded by $O(n/k)$. The time spent in the construction of all the horizontal segments is bounded by $O(\sum_{i=1}^{t-1}(n_i +k))=O((\sum_{i=1}^{t-1} n_i) + tk)=O(n)$. Now let us compute the construction time of a vertical segment $c_id_i$ at step~$5$ and a conflict list $CL(d_i)$ at step~$6$. The $y$-coordinate of $d_i$ is set to any value between the $k$th-largest and the $(k+1)$th-largest $y$-coordinate in $CL(c_i)$. We run the standard linear-time selection algorithm on $CL(c_i)$ to find the $k$th- and $(k+1)$th-largest $y$-coordinate. Then $CL(d_i)$ is constructed by pruning those points in $CL(c_i)$ whose $y$-coordinate is greater than or equal to $y(d_i)$. All these steps can be performed in $O(k)$ time. Since the number of vertical segments in ${\cal C}$ is also bounded by $O(n/k)$, the time spent in the construction of all the vertical segments in bounded by $O(n)$. \end{comment} \end{document}
\begin{document} \title{Wave propagation in one-dimensional quasiperiodic media} \begin{abstract} This work is devoted to the resolution of the Helmholtz equation $-(\mu\, u')' - \rho\, \omega^2 u = f$ in a one-dimensional unbounded medium. We assume the coefficients of this equation to be local perturbations of \textit{quasiperiodic} functions, namely the traces along a particular line of higher-dimensional periodic functions. Using the definition of quasiperiodicity, the problem is lifted onto a higher-dimensional problem with periodic coefficients. The periodicity of the augmented problem allows us to extend the ideas of the DtN-based method developed in \cite{flissthese, jolyLiFliss} for the elliptic case. However, the associated mathematical and numerical analysis of the method are more delicate because the augmented PDE is degenerate, in the sense that the principal part of its operator is no longer elliptic. We also study the numerical resolution of this PDE, which relies on the resolution of Dirichlet cell problems as well as a constrained Riccati equation. \end{abstract} \section{Introduction and motivation}\label{sec:introduction_motivation} We consider the Helmholtz equation \begin{equation} \label{eq:whole_line_problem} \displaystyle - \frac{d}{d x} \Big( \mu \; \frac{d u}{d x} \Big) - \rho \; \omega^2 \; u = f \quad \textnormal{in} \quad \R, \end{equation} where the coefficients $\mu$ and $\rho$ have positive upper and lower bounds: \begin{equation}\label{eq:coef_ellipt} \displaystyle \exists\; \mu_\pm, \rho_\pm, \quad \forall\; x \in \R, \qquad 0 < \mu_- \leq \mu(x) \leq \mu_+ \quad \textnormal{and} \quad 0 < \rho_- \leq \rho(x) \leq \rho_+. \end{equation} The source term $f$ belongs to $L^2(\R)$ and is assumed to have a compact support: \begin{equation} \label{eq:source_terme} \exists\; a>0,\quad \supp f \subset (-a, a). \end{equation} \noindent \noindent Equation \eqref{eq:whole_line_problem} is encountered when one is looking for time-harmonic solutions $u(x)\, e^{\ensuremath{\mathrm{i}} \omega t}$ of the linear wave equation in heterogeneous media. For real frequencies $\omega$, the well-posedness of this problem is unclear. In fact, on one hand, one expects that the physical solution $u$, if it exists, may not belong to $H^1(\R)$ due to possible wave propagation phenomena and a lack of decay at infinity. On the other hand, uniqueness of a solution in $H^1_{\textit{loc}}(\R)$ does not hold in general. In this case, one needs a so-called a \emph{radiation condition} that imposes the behaviour of the solution at infinity. Such a condition can be obtained in practice using the \emph{limiting absorption principle}, which consists in (\emph{i}) adding some absorption -- that is some imaginary part to $\omega$: $\mathop{\mathfrak{Im}}\nolimits \omega$, and (\emph{ii}) studying the limit of the solution $u \equiv u(\omega)$ as the absorption tends to $0$. The limiting absorption principle is a classical approach to study time-harmonic wave propagation problems in unbounded domains; see for instance \cite{agmon1975spectral, eidus1986limiting, wilcox1966wave}. More recently, it has been successfully applied for locally perturbed periodic media \cite{flissthese, hoang2011limiting, kirsch2018radiation, radosz2015new}. \noindent In this paper, we will only address the case with absorption, that is \begin{equation}\label{eq:dissipation} \text{the frequency }\omega \text{ satisfies }\mathop{\mathfrak{Im}}\nolimits \omega > 0. \end{equation} Under these assumptions, \eqref{eq:whole_line_problem} admits a unique solution in $H^1(\R)$ by Lax-Milgram's theorem. Moreover, it can be shown (using for instance an argument similar to the one in \cite{combes1973asymptotic}) that this solution satisfies a sharp exponential decay property \begin{equation}\label{eq:exp_decay} \exists\; c,\,\alpha > 0, \quad \forall\; x \in \R, \quad |u(x)|\leq c \,\euler^{-\alpha \mathop{\mathfrak{Im}}\nolimits \omega|x|}. \end{equation} Exploiting \eqref{eq:exp_decay}, a naive numerical method for treating the unboundedness would consist in truncating the computational domain (with homogeneous Dirichlet boundary conditions for instance) at a certain distance related to $\mathop{\mathfrak{Im}}\nolimits \omega$. However the cost and the accuracy of the method would deteriorate when $\mathop{\mathfrak{Im}}\nolimits \omega$ tends to $0$. Our objective in this paper is to develop a numerical method which is robust when $\mathop{\mathfrak{Im}}\nolimits \omega$ tends to $0$, in the particular case of locally perturbed quasiperiodic media. More precisely, we solve the problem in the bounded domain $(-a,a)$ (which is independent of $\mathop{\mathfrak{Im}}\nolimits \omega$) by constructing transparent boundary conditions of Dirichlet-to-Neumann type: \begin{equation}\label{eq:DtN_coef} \displaystyle \pm\; \mu\; \frac{d u}{d x} + \lambda^\pm \; u = 0 \quad \textnormal{on} \quad x = \pm a, \end{equation} where $\lambda^\pm$ are called \emph{Dirichlet-to-Neumann} (DtN) coefficients. These coefficients are defined by \begin{equation} \displaystyle \lambda^\pm = \mp\; \Big[\mu\; \frac{d u^\pm}{d x}\Big](\pm a), \label{eq:DtN_coefficients} \end{equation} where $u^\pm$ is the unique solution in $H^1(\pm a, \pm \infty)$ of \begin{equation} \left| \begin{array}{r@{\ }c@{\ }l@{\quad}l} \displaystyle - \frac{d}{d x} \Big( \mu \; \frac{d u^\pm}{d x} \Big) - \rho \; \omega^2 \; u^\pm &=& 0, \quad \textnormal{for}& \pm x > a, \\[8pt] \displaystyle u^\pm (\pm a) &=& 1. \end{array} \right. \label{eq:half_line_problems_0} \end{equation} Knowing $\lambda^\pm$, one is then reduced to compute $u|_{(-a,a)}$ by solving the problem \begin{equation} \left| \begin{array}{r@{\ }c@{\ }l@{\quad}l} \displaystyle - \frac{d}{d x} \Big( \mu \; \frac{d u^i}{d x} \Big) - \rho \; \omega^2 \; u^i &=& f, \quad \textnormal{for} & x \in (-a, a), \\[10pt] \displaystyle \Big[\pm \mu\; \frac{d u^i}{d x} + \lambda^\pm \; u^i\Big](\pm a) &=& 0. \end{array} \right. \label{eq:interior_problem} \end{equation} The well-posedness of this problem is a direct consequence of the sign property \[ \mathop{\mathfrak{Im}}\nolimits \lambda^\pm < 0, \] which, through a Green's formula, results itself from the presence of dissipation \eqref{eq:dissipation} in \eqref{eq:half_line_problems_0}. Then the solution $u$ of \eqref{eq:whole_line_problem} is given by \begin{equation}\label{eq:solution_of_whole_line_problem} \forall\; x \in \R, \quad u(x) = \left\{ \begin{array}{c@{\quad}l} \displaystyle u^i(-a)\; u^-(x), & x < -a,\\[6pt] \displaystyle u^i(x), & x \in (-a, a),\\[6pt] \displaystyle u^i(a)\; u^+(x), & x > a. \end{array} \right. \end{equation} In general, the problem is that computing $\lambda^\pm$, that is to say solving \eqref{eq:half_line_problems_0}, is as difficult as the original problem. However, this is no longer true when the exterior medium (\emph{i.e.} outside $(-a,a)$) has a certain structure: \begin{itemize} \item if the exterior medium is homogeneous ($\rho$ and $\mu$ are constant), these coefficients can be computed explicitly; \item if the exterior medium is periodic ($\rho$ and $\mu$ are periodic), several methods for the computation of these DtN coefficients are developed in \cite{flissthese, jolyLiFliss, kirsch2018radiation}; \item if the exterior medium is a weakly random perturbation of a periodic medium, the coefficients can be approximated via an asymptotic analysis; see \cite{fliss_giovangigli}. \end{itemize} Our main objective in this paper is to compute the DtN coefficients for a quasiperiodic exterior medium, in order to develop a numerical method according to \eqref{eq:half_line_problems_0}, \eqref{eq:interior_problem}, \eqref{eq:solution_of_whole_line_problem}. \noindent The outline of the rest of the paper is as follows. In Section \ref{sec:quasiperiodicity}, we introduce the fundamental notion of quasiperiodic functions (in $1$D) and define what is a locally perturbed quasiperiodic medium in the context of the problem \eqref{eq:whole_line_problem}. Sections \ref{sec:the_half_line_quasiperiodic_problem} and \ref{sec:resolution_half_guide_problem} are the most important sections of the paper. In Section \ref{sec:the_half_line_quasiperiodic_problem}, we link the solution of the 1D half-line problem with quasiperiodic coefficients to the solution of a degenerate directional Helmholtz equation posed in dimension $n$, with $n >1$ defined as in Section \ref{sec:quasiperiodicity}. This is the so-called lifting approach whose principle is presented in Section \ref{sec:lifting_in_a_higher_dimensional_periodic_problem}. More precisely, in Section \ref{sec:link_with_a_periodic_half_guide_problem}, we characterize the solution of the 1D quasiperiodic problem as the trace along a (broken) line of a $n$D problem posed in a domain with the geometry of a half-waveguide: $(0,1)^{n-1} \times \R_+$. In between, we need to dedicate the (rather long) Section \ref{sec:preliminary_material} to fix the notations used in the rest of the paper and present some useful preliminary material about an adapted functional framework for the rigorous setting of our method. This concerns anisotropic Sobolev spaces with an emphasis on trace theorems and related Green's formula. In Section \ref{sec:resolution_half_guide_problem}, we provide a method for solving the half-waveguide problem of Section \ref{sec:link_with_a_periodic_half_guide_problem}. In Section \ref{sec:structure_of_the_solution}, we describe the structure of the solution with the help of a propagation operator ${\cal P}$ and local cell problems. In Section \ref{sec:Riccati}, we show that the operator ${\cal P}$ is characterized as a particular solution of a Riccati equation. In Section \ref{sec:the_DtN_operator_and_the_DtN_coefficient}, we first build a directional DtN operator $\Lambda$ for the half-waveguide problem, from which we deduce the DtN coefficients $\lambda^\pm$ we are looking for (\emph{cf.} \eqref{eq:DtN_coefficients}). Finally, in Section \ref{sec:about_Riccati_equation}, we analyze the Riccati equation from a spectral point of view and in Section \ref{sec:propagation_operator} we describe the spectrum of ${\cal P}$. In Section \ref{sec:resolution_algorithm} devoted to numerical results, we restrict ourselves to $n=2$ for the sake of simplicity. The first two subsections are devoted to the discretization of the cell problems evoked above. We have considered two approaches: one, natural but naive, consists \textcolor{surligneur}{in using 2D} Lagrange finite elements (Section \ref{sec:methode_2D}) while the other, called the quasi-1D method, is better fitted to the anisotropy of the problem (Section \ref{sec:methode_quasi1D}). In Section \ref{sec:discrete_Riccati_equation}, we explain how we can construct a discrete propagation operator from a discrete Riccati equation that we choose to solve via a spectral approach, while Section \ref{sec:discrete_DtN_coefficient} simply mimics Section \ref{sec:the_DtN_operator_and_the_DtN_coefficient} at the discrete level. Section \ref{sec:numerical_results} is devoted to numerical results. In the first three subsections, we provide various validations of our method for the half-line problem (Sections \ref{sec:results:half_line_guide} and \ref{sec:results:absorption}) and the whole line problem (Section \ref{sec:results:whole_line_problem}). At last, in Section \ref{sec:results:spectral_approximation_P}, we address the question of the approximation of the spectrum of the propagation operator ${\cal P}$ by the one of its discrete approximation. \paragraph{Particular notation used throughout the paper.} In what follows, \begin{enumerate} \item the equality modulo $1$ is denoted by \[ \forall\; y\in\R, \quad \ z = y\,[1] \quad \Longleftrightarrow \quad z\in[0,1) \ \ \text{and} \ \ y-z \in \Z. \] and for all $p, q \in \N,\ p < q$, we set $\llbracket p, q\rrbracket:=\{j\in\N,\ p\leq j\leq q\}$. \item We introduce $\mathscr{C}_{\textit{per}}(\R^n)$ as the space of continuous functions $F : \R^n \to \R$ that are $1$--periodic with respect to each variable, and $\mathscr{C}^\infty_0(\mathcal{O})$ as the space of smooth functions that are compactly supported in $\mathcal{O} \subset \R^n$. \item For $i \in\llbracket 1,n\rrbracket$, we denote by $\vec{{\textit{\textbf{e}}}}_i$ the $i$-th unit vector from the canonical basis of $\R^n$. For any element ${\textit{\textbf{y}}} = (y_1, \dots, y_n)$ in $\R^n$, we define $\hat{{\textit{\textbf{y}}}}$ as the vector $(y_1, \dots, y_{n-1}) \in \R^{n-1}$, so that ${\textit{\textbf{y}}} = (\hat{{\textit{\textbf{y}}}}, y_n)$. For ${\textit{\textbf{y}}} = (y_1, \dots, y_n)$ and $\itbf{z} = (z_1, \dots, z_n)$, the Euclidean inner product of ${\textit{\textbf{y}}}$ and $\itbf{z}$ is denoted ${\textit{\textbf{y}}} \cdot \itbf{z} := y_1\,z_1 + \cdots y_n\,z_n$, and the associated norm is $|{\textit{\textbf{y}}}| := \sqrt{{\textit{\textbf{y}}} \cdot {\textit{\textbf{y}}}}$. \end{enumerate} \section{Quasiperiodicity} \label{sec:quasiperiodicity} \subsection{Quasiperiodic functions of one real variable} In this section, we present a brief overview of the main properties of quasiperiodic functions. We refer to \cite{besicovitch, bohr, levitan} for more complete presentations. Quasiperiodicity is defined as follows. \begin{defi} \label{def:quasiperiodic_function} A continuous function $f : \R \to \R$ is said to be \emph{quasiperiodic of order $n > 1$} if there exist a constant real vector ${\boldsymbol{\theta}} = ({\boldsymbol{\theta}}i_1, \dots, {\boldsymbol{\theta}}i_n)$, with ${\boldsymbol{\theta}}i_i > 0$ for all $i \in\llbracket 1,n\rrbracket$, and a continuous function $F : \R^n \to \R$, $1$--periodic with respect to each variable, such that \begin{equation} \label{eq:def_quasiperiodic_function} \displaystyle \forall\; x \in \R, \quad f(x) = F(x\,{\boldsymbol{\theta}}). \end{equation} The vector ${\boldsymbol{\theta}}$ is called a \emph{cut direction}, and $F$ is a periodic \emph{extension} of $f$. \noindent A geometrical interpretation of this definition is to see the one-dimensional function $f$ as the trace of a $n$-dimensional function $F$ along the line passing through $(0, 0)$ and parallel to the vector ${\boldsymbol{\theta}}$. This is illustrated in Figure \ref{fig:example_quasiperiodic_function} for $n = 2$ and ${\boldsymbol{\theta}} = (1, \sqrt{2})$. \end{defi} \begin{figure} \caption{Function $F: (y_1, y_2) \mapsto \cos 2\pi y_1 + \cos 2\pi y_2$ in its periodicity cell (left), and whose trace along ${\boldsymbol{\theta} \label{fig:example_quasiperiodic_function} \end{figure} \noindent Periodic functions are obviously quasiperiodic. Other examples of quasiperiodic functions are finite sums or products of periodic functions: if $f_1$ and $f_2$ are periodic, then $f_1 + f_2$ and $f_1 f_2$ can be expressed under the form \eqref{eq:def_quasiperiodic_function}. Note that $f_1 + f_2$ and $f_1 f_2$ are \textit{not} periodic if $f_1$ and $f_2$ are \textit{continuous} functions with non-commensurable least periods. For instance, with $f_1(x) = \cos 2\pi x$ and $f_2(x) = \cos 2\pi\sqrt{2} x$, one easily checks that the sum $f_1 + f_2$, represented in Figure \ref{fig:example_quasiperiodic_function}, is not periodic since it equals $2$ only when $x = 0$. \noindent In Definition \ref{def:quasiperiodic_function}, it is easy to see that neither the periodic extension nor the cut direction are uniquely defined. Given $(F, {\boldsymbol{\theta}})$, it is always possible to lower the value of $n$, and change the function $F$ accordingly, so that the coefficients ${\boldsymbol{\theta}}i_1,\dots,{\boldsymbol{\theta}}i_n$ are \emph{linearly independent} \emph{over the integers} (see \cite[Chapter 2]{levitan}), that is \begin{equation} \label{eq:linear_independance} \displaystyle \forall\; \textit{\textbf{k}} \in \Z^n, \qquad \textit{\textbf{k}}\cdot{\boldsymbol{\theta}} = 0 \quad \Longleftrightarrow \quad \textit{\textbf{k}}= 0. \end{equation} For $n = 2$ and ${\boldsymbol{\theta}} = ({\boldsymbol{\theta}}i_1, {\boldsymbol{\theta}}i_2)$, the above condition amounts to saying that the ratio ${\boldsymbol{\theta}}i_1 / {\boldsymbol{\theta}}i_2$ is irrational. Due to this observation, vectors that satisfy \eqref{eq:linear_independance} will be abusively referred to as \emph{irrational vectors}. A consequence of \eqref{eq:linear_independance} is given by Kronecker's approximation theorem. \begin{thm}[\textnormal{\cite[Theorem 444]{hardy}}] \label{thm:kronecker} If ${\boldsymbol{\theta}}$ is an irrational vector, then the set ${\boldsymbol{\theta}}\, \R + \N^n$ is dense in $\R^n$. \end{thm} \noindent If ${\boldsymbol{\theta}}$ is an irrational vector, and if $F \in \mathscr{C}_{\textit{per}}(\R^n)$ satisfies $F({\boldsymbol{\theta}}\, \R) = 0$, then Theorem \ref{thm:kronecker} ensures that $F = 0$. In other words, under the linear independence assumption, $F$ is uniquely determined by its restriction on the line ${\boldsymbol{\theta}}\, \R$. \noindent For $n = 2$, Theorem \ref{thm:kronecker} implies that the broken line $\big\{(x\,{\boldsymbol{\theta}}i_1[1],x\,{\boldsymbol{\theta}}i_2[1]),\;x\in\R\big\}$ is dense in the unit cell $(0,1)^2$. To illustrate this, Figure \ref{fig:fibrage} represents the set $\big\{(x\,{\boldsymbol{\theta}}i_1[1],x\,{\boldsymbol{\theta}}i_2[1]),\;x\in(0,M)\big\}$ in the unit cell for different values of $M$, when (\emph{1}) $\theta_1/\theta_2 \in \Q$ (see the first row), and when (\emph{2}) $\theta_1/\theta_2 \in \R \setminus \Q$ (see the second row for ${\boldsymbol{\theta}} = (\sqrt{2}, 1)$ and the third one for ${\boldsymbol{\theta}} = (\pi, 1)$). For $M$ large enough, in the first case, this set is reduced to a \textit{finite} union of segments, whereas in the second case, it seems to fill the unit cell without ever passing through the same positions. It is also interesting to see that for ${\boldsymbol{\theta}} = (\sqrt{2}, 1)$, the unit cell is somehow filled uniformly, contrary to the case where ${\boldsymbol{\theta}} = (\pi, 1)$. \begin{figure} \caption{Representation of the set $\big\{(x\,{\boldsymbol{\theta} \label{fig:fibrage} \end{figure} \noindent Finally, it is worth mentioning that Definition \ref{def:quasiperiodic_function} extends to higher-dimensional continuous functions as well. Moreover, the notion of quasiperiodicty can be defined at a discrete level, to describe the properties of tilings that are cuts and projections of higher-dimensional periodic tilings. These quasiperiodic tilings have been extensively studied \cite{gardner_1977_aperiodic, meyer1995quasicrystals, penrose_pentaplexity_1979, senechal1996quasicrystals}, and are used for modelling quasicrystals \cite{shechtmanAl}. \subsection{Locally perturbed quasiperiodic media} A locally perturbed quasiperiodic medium is a medium corresponding to functions $\mu$ and $\rho$ that satisfy \eqref{eq:coef_ellipt} and that are quasiperiodic outside a bounded interval, which can be supposed to be $(-a,a)$ (see \eqref{eq:source_terme}) without any loss of generality. More precisely, \[ \mu(x) = \left| \begin{array}{c l} \mu_i(x) & x \in (-a,a)\\[3pt] \mu_p(x\,{\boldsymbol{\theta}} ) & x \in \R \setminus (-a,a) \end{array} \right. \quad \textnormal{and} \quad \rho(x) = \left| \begin{array}{c l} \rho_i(x) & x \in (-a,a)\\[3pt] \rho_p( x\,{\boldsymbol{\theta}}) & x \in \R \setminus (-a,a), \end{array} \right. \] where the functions $\mu_p,\, \rho_p$ belong to $\mathscr{C}_{\textit{per}}(\R^n)$ with $n > 1$, and ${\boldsymbol{\theta}} \in \R^n$ is an irrational vector (see Condition \eqref{eq:linear_independance}). \begin{rmk}\label{rem} (a). Since ${\boldsymbol{\theta}}$ is an irrational vector, Kronecker's approximation theorem \ref{thm:kronecker} ensures that the functions $\mu_p$ and $\rho_p$ are entirely determined by their restrictions on the line $\R\, {\boldsymbol{\theta}}$. Therefore, $\mu_p$ and $\rho_p$ satisfy \eqref{eq:coef_ellipt} with respectively the same bounds as $\mu$ and $\rho$. (b). The present study \textcolor{surligneur}{can be extended} without difficulty to the case where $\mu$ (resp. $\rho$) coincides with two different quasiperiodic functions in $(-\infty, -a)$ and in $(a, +\infty)$: \[ \text{for}\; \pm x>\pm a, \quad\ \mu(x) = \mu_p^\pm(x\,{\boldsymbol{\theta}}^\pm\, ) \quad \text{and} \quad \rho(x) = \rho_p^\pm(x\,{\boldsymbol{\theta}}^\pm\, ), \] where $\mu_p^\pm,\, \rho_p^\pm$ belong to $\mathscr{C}_{\textit{per}}(\R^{n^\pm})$ with $n^\pm > 1$, and where ${\boldsymbol{\theta}}^\pm \in \R^{n^\pm}$ are irrational vectors. \end{rmk} \section{The half-line quasiperiodic problem} \label{sec:the_half_line_quasiperiodic_problem} We now focus on the half-line quasiperiodic problems \eqref{eq:half_line_problems_0}. As these problems are very similar to each other, it is sufficient to study the half-line problem set on $(a,+\infty)$ and suppose without loss of generality that $a = 0$. Let $\mu_{\boldsymbol{\theta}} := \mu_p({\boldsymbol{\theta}}\,\cdot)$ and $\rho_{\boldsymbol{\theta}} := \rho_p({\boldsymbol{\theta}}\,\cdot)$. Therefore, the problem we consider in this section is the following: \begin{equation} \left| \begin{array}{r@{\ }c@{\ }l@{\quad}l} \displaystyle - \frac{d}{d x} \Big( \mu_{\boldsymbol{\theta}} \; \frac{d u^+_{\boldsymbol{\theta}}}{d x} \Big) - \rho_{\boldsymbol{\theta}} \; \omega^2 \; u^+_{\boldsymbol{\theta}} &=& 0, \quad \textnormal{in} & \R_+, \\[8pt] \displaystyle u^+_{\boldsymbol{\theta}}(0) &=& 1. \end{array} \right. \label{eq:half_line_problem} \end{equation} \begin{rmk} \color{surligneur} The function $u^+_{\boldsymbol{\theta}}$ corresponds exactly to the solution $u^+$ of \eqref{eq:half_line_problems_0} that was introduced in Section \ref{sec:introduction_motivation} for very general media. The reason why this solution is relabeled $u^+_{\boldsymbol{\theta}}$ is due to the fact that, because we consider here quasiperiodic media, the coefficients $\mu$ and $\rho$ that appear in \eqref{eq:half_line_problems_0} have been replaced by $\mu_{\boldsymbol{\theta}}$ and $\rho_{\boldsymbol{\theta}}$. \end{rmk} \subsection{Lifting in a higher-dimensional periodic problem} \label{sec:lifting_in_a_higher_dimensional_periodic_problem} We wish to exhibit some structure of the solution $u^+_{\boldsymbol{\theta}}$. As the coefficients $\mu_{\boldsymbol{\theta}}$ and $\rho_{\boldsymbol{\theta}}$ in \eqref{eq:half_line_problem} are by definition traces of $n$--dimensional functions along the half-line ${\boldsymbol{\theta}}\, \R_+$, it is natural to seek $u^+_{\boldsymbol{\theta}}$ as the trace along the same line of a {\color{surligneur}function ${\textit{\textbf{y}}} \in \R^n \mapsto \widetilde{U}^+_{\boldsymbol{\theta}}({\textit{\textbf{y}}})$, that is to say: \begin{equation} \operatorname{a.e.}\; x \in \R, \quad u^+_{\boldsymbol{\theta}}(x) = \widetilde{U}^+_{\boldsymbol{\theta}} (x\, {\boldsymbol{\theta}}), \end{equation} where $\widetilde{U}^+_{\boldsymbol{\theta}}$ shall be characterized as the solution of a $n$--dimensional PDE (in some sense, an “augmented” problem in which ${\textit{\textbf{y}}}$ is the augmented space variable) with periodic coefficients, as illustrated in Figure \ref{fig:illustration_lifting_approach}}. This so-called \textit{lifting approach} has been used in the homogenization setting for the analysis of some correctors in presence of periodic halfspaces \cite{gerard2012, gerard2011} or periodic structures separated by an interface \cite{blancLeBrisLions}, as well as for the homogenization of quasicrystals and Penrose tilings \cite{bouchitte, wellanderAl}. However, to our knowledge, very little seems to have been done in other contexts (such as wave propagation), and in particular for numerical analysis and simulation purposes. \noindent To build a higher-dimensional PDE, one has to exploit the correspondence between the derivative of $u^+_{\boldsymbol{\theta}}$ and the partial derivatives of $\widetilde{U}^+_{\boldsymbol{\theta}}$: according to the chain rule, for any smooth enough function $F: \R^n \to \C$, one has \begin{equation} \label{eq:chain_rule} \displaystyle \forall\; x \in \R, \quad \frac{d}{d x}[ F({\boldsymbol{\theta}}\, x) ] = ( \Dt{} F ) ({\boldsymbol{\theta}}\, x), \quad \text{with} \quad \Dt{} = {\boldsymbol{\theta}} \cdot \nabla = \sum_{i = 1}^n {\boldsymbol{\theta}}i_i\, \frac{\partial}{\partial y_i}. \end{equation} This leads us to introduce the $n$--dimensional PDE set on a half-space (see Remark \ref{rmk:lifting_approach}) \begin{subequations} \label{eq:half-space_problem} \begin{align} \displaystyle - \Dt{} \big( \mu_p \; \Dt{} \widetilde{U}^+_{\boldsymbol{\theta}} \big) - \rho_p \; \omega^2 \; \widetilde{U}^+_{\boldsymbol{\theta}} &= 0, \quad \textnormal{for} \quad y_n > 0, \intertext{where we recall that the coefficients $\mu_p,\, \rho_p : \R^n \to \R$ are continuous and $1$--periodic with respect to each variable. In addition, the boundary condition in \eqref{eq:half_line_problem} can be lifted onto the inhomogeneous Dirichlet boundary condition} \displaystyle \widetilde{U}^+_{\boldsymbol{\theta}} &= \widetilde{\varphi}, \quad \textnormal{on} \quad y_n = 0, \end{align} \end{subequations} \noindent where the data $\widetilde{\varphi} : \R^{n-1} \to \C$ could be chosen continuous and must satisfy $\widetilde{\varphi}(0) = 1$, for the sake of consistency with the fact that $u^+_{\boldsymbol{\theta}}(0) = 1$. Furthermore, to exploit the periodicity of the coefficients $\mu_p$ and $\rho_p$ with respect to the transverse variables $y_j,\,j<n$, we \textcolor{surligneur}{can} impose the following: \begin{equation} \label{eq:varphi_per} \text{$\widetilde{\varphi}$ is $1$--periodic,} \end{equation} so that it is natural to impose that \begin{equation} \label{eq:U_theta_periodic} \text{$\widetilde{U}^+_{\boldsymbol{\theta}}(\varphi)$ is $1$--periodic with respect to the transverse variables $y_j,\ j<n$.} \end{equation} In Section \ref{sec:link_with_a_periodic_half_guide_problem}, we show how to reduce the above to a half-guide problem with periodic coefficients. In order to do so, we shall need some preliminary materials, which is the object of the next section. \begin{rmk}\label{rmk:lifting_approach} (a). One could have defined the augmented problem \eqref{eq:half-space_problem} on other half-spaces $\{{\textit{\textbf{y}}} \in \R^n,\ y_i > 0\}$. The choice of the half-space is purely arbitrary. (b). At first glance, one could imagine restricting the whole study to a constant boundary data $\widetilde{\varphi} = 1$. Though, in practice, this can be the case, the method used to solve the half-guide problem requires to investigate the structure of $\widetilde{U}^+_{\boldsymbol{\theta}}(\widetilde{\varphi})$ for any $\widetilde{\varphi}$ in an appropriate function space (see Section \ref{sec:resolution_half_guide_problem} for more details). \end{rmk} \begin{figure} \caption{Illustration of the lifting approach for $n = 2$\label{fig:illustration_lifting_approach} \label{fig:illustration_lifting_approach} \end{figure} \subsection{Preliminary material} \label{sec:preliminary_material} The main objective of this section is to establish rigorously some Green's formulas that are formally obvious, such as the one of Proposition \ref{prop:Green_formula_H1per}. This requires first to introduce the adapted functional framework and, since Green's formulas involve boundary integrals, to establish relevant trace theorems. Section \ref{sub:trace} is devoted to these trace theorems, while we present the corresponding Green's formulas in Section \ref{sub:Green}. Finally, Section \ref{sub:oblique_cov} highlights a simple but useful link between the derivative $\Dt{}$ and a single partial derivative with respect to one real variable, through a so-called oblique change of variables. \subsubsection{Anisotropic Sobolev spaces and trace theorems}\label{sub:trace} For any open set $\mathcal{O} \subset \R^n$, let us first define the directional Sobolev space \begin{equation} H^1_{\boldsymbol{\theta}}(\mathcal{O}) := \big\{U \in L^2(\mathcal{O})\ /\ \Dt{} U \in L^2(\mathcal{O}) \big\}, \end{equation} which is a Hilbert space, provided with the scalar product \[ (U, V)_{H^1_{\boldsymbol{\theta}}(\mathcal{O})} := \int_{\mathcal{O}} \Big(\Dt{} U\, \Dt{} \overline{V} + U\, \overline{V}\Big). \] Let us denote $\|\cdot\|_{H^1_{\boldsymbol{\theta}}(\mathcal{O})}$ the induced norm. We begin with the following density property, whose proof can be found in \cite[Appendix 1]{temam1968stabilite}. \begin{lem} \label{prop:density_Cinfy_H1cut} The space $\mathscr{C}^\infty_0(\overline{\mathcal{O}})$ is dense in $H^1_{\boldsymbol{\theta}}(\mathcal{O})$. \end{lem} \noindent We denote the half-space $\R^n_+:=\{{\textit{\textbf{y}}}\in\R^n,\;y_n>0 \}$ and the half-cylinder $\Omega^\diese:=(0,1)^{n-1}\times\R^+$ in the following. Let us introduce also the sets, for $a \in \{0, 1\}$ and for any integer $i\in\llbracket 1, n\rrbracket $, \[ \Sigma_{i,a} = \{{\textit{\textbf{y}}} \in \R^n_+,\ y_i = a\}\quad \text{and}\quad \Sigma_{i,a}^\diese = \{{\textit{\textbf{y}}} \in \Sigma_{i,a},\ y_j \in(0,1),\; j\in \llbracket 1, n-1\rrbracket,\; j\neq i\}. \] This definition is illustrated in Figure \ref{fig:domains_3D}. Note that $\Sigma_{n,a}^\diese$ is bounded whereas $\Sigma_{i,a}^\diese$ for $i\neq n$ is unbounded in the direction $y_n$. Moreover, \[ \partial\Omega^\diese = \Sigma_{n,0}^\diese \cup \bigg[ \bigcup_{i=1}^{n-1} \big(\overline{\Sigma}_{i,0}^\diese\cup\overline{\Sigma}_{i,1}^\diese\big) \bigg]. \] \noindent A trace operator can be defined from $H^1_{{\boldsymbol{\theta}}}(\R^n_+)$ on $\Sigma_{i,a}$. The main idea for doing so consists in using a one-dimensional trace theorem on the ${\boldsymbol{\theta}}$--oriented line that starts from a point $(z_1,\dots,z_{i-1}, a, z_{i+1},\dots,z_n) \in \Sigma_{i, a}$, to obtain an inequality which will be integrated with respect to $z_j$, $j \neq i$. The 1D trace theorem which will be used is the following. \begin{figure} \caption{$n = 2$} \caption{$n = 3$} \caption{Domains $\Omega^\diese$, $\Sigma_{i, a} \label{fig:domains_3D} \end{figure} \begin{prop} \label{lem:1Dtrace_theorem} Let \textcolor{surligneur}{$L \in \R^*_+ \cup \{+\infty\}$}. Then the mapping $\gamma_L : u \mapsto u(0)$ is continuous from $H^1(0, L)$ to $\C$. Moreover, the operator norm of $\gamma_L$ is given by\color{surligneur}{ \begin{equation} \displaystyle \|\gamma_L\|^2 = \frac{\euler^{L} + \euler^{-L}}{\euler^{L} - \euler^{-L}} =: [\tanh L]^{-1}\ \ \textnormal{for}\ L > 0, \quad \textnormal{and} \quad \|\gamma_\infty\|^2 = 1. \end{equation} } \end{prop} \begin{dem} The continuity property is a classical result which can be proved by density. By definition, $\|\gamma_L\| := \sup\{|u(0)|,\ \|u\|_{H^1(0, L)} = 1\}$. This corresponds to a constrained optimization problem. Using the standard theory, this leads to introduce a Lagrange multiplier $\lambda$ and to find a pair $(\lambda, u_L) \in \C\setminus\{0\}\times H^1(0, L) $ such that $\|u_L\|_{H^1(0, L)} = 1$ and \begin{equation} \forall\; v \in H^1(0, L) \quad \lambda\, u_L(0)\, \overline{v(0)} = \int_0^L \Big( \frac{d u_L}{d x}\,\frac{d \overline{v}}{d x} + u_L\, \overline{v} \Big) \; dx, \end{equation} in which case, we have $\|\gamma_L\|^2 = \lambda$. The explicit solution of this problem leads to the result. \end{dem} \noindent Note that, in particular, $\smash{\displaystyle \|\gamma_L\|^2 \underset{L \to 0}{sm} L^{-1}}$. \noindent We are now able to define traces on $\Sigma_{i, a}$ in the following sense. \begin{prop} \label{prop:trace_H1cut_demi_espace} Fix $a \in \{0, 1\}$ and $i\in \llbracket 1, n\rrbracket $. The mapping $\gamma_{i,a} : \mathscr{C}^\infty_0(\overline{\R^n_+}) \to \mathscr{C}^\infty_0(\Sigma_{i,a})$ defined by $\gamma_{i,a} U = \restr{U}{\Sigma_{i,a}}$ extends by continuity to a linear mapping still denoted $\gamma_{i,a}$, from $H^1_{{\boldsymbol{\theta}}}(\R^{n}_+)$ to $L^2(\Sigma_{i,a})$, and which satisfies the estimate \begin{equation} \displaystyle \forall\; U \in H^1_{{\boldsymbol{\theta}}}(\R^{n}_+),\quad \|\gamma_{i,a} U\|^2_{L^2(\Sigma_{i,a})} \leq \frac{1}{{\boldsymbol{\theta}}i_i}\; \|U\|^2_{H^1_{{\boldsymbol{\theta}}}(\R^{n}_+)}.\label{eq:trace_H1cut_demi_espace} \end{equation} \end{prop} \begin{dem} One can simply prove the continuity estimate \eqref{eq:trace_H1cut_demi_espace} for any function $U \in \mathscr{C}^\infty_0(\overline{\R^n_+})$ and conclude using the density result of Proposition \ref{prop:density_Cinfy_H1cut}. \noindent ($i$) \smash{\underline{\textit{Case} $i \in \llbracket 1, n-1 \rrbracket$}}: Without loss of generality, we set $i = 1$. Define \begin{equation}\displaystyle \Gamma_{1, a} := \{{\textit{\textbf{z}}} = (z_2, \dots, z_n),\quad (a, {\textit{\textbf{z}}}) \in \Sigma_{1, a}\} \equiv \R^{n-1}_+, \quad \textnormal{where} \quad (a, {\textit{\textbf{z}}}) = (a, z_2, \dots, z_n). \label{eq:Gamma_1a} \end{equation} For $U \in \mathscr{C}^\infty_0(\overline{\R^n_+})$ and given any ${\textit{\textbf{z}}} = (z_2, \dots, z_n) \in \Gamma_{1, a}$, consider the function \begin{equation} \displaystyle \forall\; x > 0, \quad u_{{\textit{\textbf{z}}}, {\boldsymbol{\theta}}}(x) = U(x\,{\boldsymbol{\theta}} + (a, \itbf{z})). \end{equation} As $u_{{\textit{\textbf{z}}}, {\boldsymbol{\theta}}}$ belongs to $H^1(\R^*_+)$, Lemma \ref{lem:1Dtrace_theorem} for $L = +\infty$ combined with an integration with respect to ${\textit{\textbf{z}}} \in \Gamma_{1, a}$ leads to \begin{equation} \displaystyle \int_{\Gamma_{1, a}} |u_{{\textit{\textbf{z}}}, {\boldsymbol{\theta}}}(0)|^2\; d \itbf{z} \leq \int_{\Gamma_{1, a}} \|u_{{\textit{\textbf{z}}}, {\boldsymbol{\theta}}}\|^2_{H^1(\R^*_+)} d \itbf{z}. \label{eq:preuve_trace_1} \end{equation} On the other hand, let us introduce the transformation \begin{equation} \label{eq:preuve_trace_3} \displaystyle \operatorname{T} : {\textit{\textbf{y}}} \mapsto \big((y_1-a)/{\boldsymbol{\theta}}i_1, y_2 - (y_1-a)\, {\boldsymbol{\theta}}i_2/{\boldsymbol{\theta}}i_1, \cdots, y_n - (y_1 - a)\, {\boldsymbol{\theta}}i_n/{\boldsymbol{\theta}}i_1\big), \end{equation} which defines a $\mathscr{C}^1$--diffeomorphism with a Jacobian determinant $\det \mathbf{J}_{\operatorname{T}} = 1/\theta_1 \neq 0$. Since the inverse image $\{\operatorname{T}^{-1}(x, {\textit{\textbf{z}}}),\ {\textit{\textbf{z}}} \in \Gamma_{1, a},\; x > 0\}$ is nothing but the polyhedron \[ \mathcal{Q}_{1, a} := \{{\textit{\textbf{y}}} \in \R^n_+,\ y_1 > a,\ y_n > (y_1 - a)\, {\boldsymbol{\theta}}i_n/{\boldsymbol{\theta}}i_1\} \subset \R^n_+, \] it follows from the chain rule and from the change of variables ${\textit{\textbf{y}}} \mapsto \operatorname{T}{\textit{\textbf{y}}}$ that \begin{equation} \displaystyle \frac{d u_{\itbf{z}, {\boldsymbol{\theta}}}}{dx}(x) = \Dt{} U(x\,{\boldsymbol{\theta}} + (a, \itbf{z})) \quad \textnormal{and} \quad \int_{\Gamma_{1, a}} \|u_{{\textit{\textbf{z}}}, {\boldsymbol{\theta}}}\|^2_{H^1(\R^*_+)}\; d \itbf{z} = \frac{1}{{\boldsymbol{\theta}}i_1} \, \|U\|^2_{H^1_{\boldsymbol{\theta}}(\mathcal{Q}_{1, a})}. \label{eq:preuve_trace_2} \end{equation} Finally, since $u_{{\textit{\textbf{z}}}, {\boldsymbol{\theta}}}(0) = U(a, z_2, \cdots, z_n)$, Equations \eqref{eq:preuve_trace_1} and \eqref{eq:preuve_trace_2} imply \begin{equation}\label{eq:preuve_trace_4} \|U\|^2_{L^2(\Sigma_{1, a})} \leq \frac{1}{{\boldsymbol{\theta}}i_1} \, \|U\|^2_{H^1_{\boldsymbol{\theta}}(\mathcal{Q}_{1, a})} \leq \frac{1}{{\boldsymbol{\theta}}i_1} \, \|U\|^2_{H^1_{\boldsymbol{\theta}}(\R^n_+)}, \end{equation} which is exactly the desired estimate. \noindent ($ii$) \smash{\underline{\textit{Case} $i = n$}}: starting from the function $u_{{\textit{\textbf{z}}}, {\boldsymbol{\theta}}}(x) := U(x\,{\boldsymbol{\theta}} + ({\textit{\textbf{z}}}, a))$ defined for $x > 0$ and for any ${\textit{\textbf{z}}} = (z_1, \dots, z_{n-1})$ with $({\textit{\textbf{z}}}, a) \in \Sigma_{n, a}$, the proof uses the exact same arguments as above, except the inverse image under $\operatorname{T}$ becomes the whole half-space $\mathcal{Q}_{n, a} := \{{\textit{\textbf{y}}} \in \R^n_+,\ y_n > a\}$. \end{dem} \noindent {The previous result does not hold in general for functions which are only $H^1_{\boldsymbol{\theta}}$ in sub-domains of the half-space $\R^n_+$}. In particular when it comes to the half-cylinder $\Omega^\diese$, one is led to apply the one-dimensional trace theorem on segments that become smaller in the neighbourhood of the “corners”, \emph{i.e.} the intersections of two faces. To overcome this difficulty, let us consider the sets (see Figure \ref{fig:domains_3D_bis}) \begin{equation} \label{eq:Sigma_ia_securite} \forall\; 0 < b < 1/2, \quad \Sigma_{i, a}^{\diese, b} = \{{\textit{\textbf{y}}} \in \Sigma^\diese_{i,a},\quad \operatorname{dist}({\textit{\textbf{y}}},\, \partial \Sigma^\diese_{i, a}) := \inf_{{\textit{\textbf{z}}} \, \in\, \partial \Sigma^\diese_{i, a}} |{\textit{\textbf{y}}} - {\textit{\textbf{z}}}| > b\}. \end{equation} Using these domains, the traces on $\Sigma_{i,a}^\diese$ can be defined as locally integrable functions in the sense of the following proposition. \begin{figure} \caption{ From left to right: $\Sigma^{\diese, b} \label{fig:domains_3D_bis} \end{figure} \begin{prop} \label{prop:trace_L2loc_demi_cylindre} Let $a \in \{0, 1\}$ and $i\in \llbracket 1, n\rrbracket $. The mapping $\gamma_{i,a}^\diese : \mathscr{C}^\infty_0(\overline{\Omega}^\diese) \to \mathscr{C}^\infty_0(\Sigma^\diese_{i,a})$ defined by $\gamma_{i, a}^\diese U = \restr{U}{\Sigma_{i, a}^\diese}$ extends by continuity to a linear mapping still denoted $\gamma_{i, a}^\diese $, from $H^1_{{\boldsymbol{\theta}}}(\Omega^\diese)$ to $L^2_{\textit{loc}}(\Sigma_{i, a}^\diese)$, and which satisfies the estimate \begin{equation} \displaystyle \label{eq:trace_L2loc_demi_cylindre} \forall\; 0 < b < 1/2,\quad \exists\; C_b > 0, \quad \forall\; U \in H^1_{{\boldsymbol{\theta}}}(\Omega^\diese), \quad \|\gamma_{i, a}^\diese U\|^2_{L^2(\Sigma_{i, a}^{\diese, b})} \leq \frac{C_b}{{\boldsymbol{\theta}}i_i}\, \|U\|^2_{H^1_{{\boldsymbol{\theta}}}(\Omega^\diese)}. \end{equation} \end{prop} \begin{dem} Using the density result stated in Proposition \ref{prop:density_Cinfy_H1cut}, one only has to show \eqref{eq:trace_L2loc_demi_cylindre} for $U \in \mathscr{C}^\infty_0(\overline{\Omega}^\diese)$. Let us assume that $i = 1$ and $a = 0$, the arguments in the following extending without any difficulty to $i\in \llbracket 1, n \rrbracket$ and $a \in \{0, 1\}$. Define \begin{equation}\displaystyle \Gamma^\diese_{1, 0} := \{{\textit{\textbf{z}}} = (z_2, \dots, z_n),\quad (0, {\textit{\textbf{z}}}) \in \Sigma^\diese_{1, 0}\} \equiv (0, 1)^{n-1} \times \R_+. \label{eq:GammaDiese_1a} \end{equation} We introduce the length function defined by \[\displaystyle \forall\; {\textit{\textbf{z}}} \in \Gamma^\diese_{1, 0}, \quad \lambda_{1, 0}({\textit{\textbf{z}}}) := \big| \{{\boldsymbol{\theta}}\,\R + (0, {\textit{\textbf{z}}})\} \cap \Omega^\diese \big| = \sup\{x > 0, \; x\,{\boldsymbol{\theta}}i_1\leq 1,\ x\,{\boldsymbol{\theta}}i_i + z_i \leq 1\ \ \forall\; i\in\llbracket2, n-1\rrbracket \}. \] We deduce easily that \begin{equation} \label{eq:expression_longueur} \lambda_{1, 0}({\textit{\textbf{z}}}) = \min\bigg\{ \frac{1}{{\boldsymbol{\theta}}i_1};\; \min_{2 \leq j \leq n-1} \Big( \frac{1 - z_j}{{\boldsymbol{\theta}}i_j} \Big) \bigg\}. \end{equation} For $U \in \mathscr{C}^\infty_0(\overline{\Omega}^\diese)$ and ${\textit{\textbf{z}}} \in \Gamma^\diese_{1, 0}$, we define \begin{equation} \label{eq:preuve_trace_demi_cylindre_-1} \displaystyle \forall\; 0 < x < \lambda_{1, 0}({\textit{\textbf{z}}}), \quad u_{{\textit{\textbf{z}}}, {\boldsymbol{\theta}}}(x) = U(x\,{\boldsymbol{\theta}} + (0, \itbf{z})). \end{equation} Since $u_{{\textit{\textbf{z}}}, {\boldsymbol{\theta}}} \in \smash{H^1\big(0, \lambda_{1, 0}({\textit{\textbf{z}}})\big)}$, {Lemma \ref{lem:1Dtrace_theorem}} and an integration with respect to ${\textit{\textbf{z}}}$ give \begin{equation} \displaystyle \int_{\Gamma^\diese_{1, 0}} w_{1, 0}({\textit{\textbf{z}}})\; |u_{{\textit{\textbf{z}}}, {\boldsymbol{\theta}}}(0)|^2\; d{\textit{\textbf{z}}} \leq \int_{\Gamma^\diese_{1, 0}} \|u_{{\textit{\textbf{z}}}, {\boldsymbol{\theta}}}\|^2_{H^1(0, \gamma_{i, a}({\textit{\textbf{z}}}))} \; d{\textit{\textbf{z}}}, \quad \textnormal{with} \ w_{1, 0}({\textit{\textbf{z}}}) = \tanh[ \lambda_{1, 0}({\textit{\textbf{z}}})]. \label{eq:preuve_trace_demi_cylindre_0} \end{equation} On the other hand, consider the $\mathscr{C}^1$--diffeomorphism $\operatorname{T}$ given by \eqref{eq:preuve_trace_3}. The set $\mathcal{Q}^{\diese}_{1, 0} := \{\operatorname{T}^{-1}(x, {\textit{\textbf{z}}}),\ \ 0 < x < \lambda_{1, 0}({\textit{\textbf{z}}}),\ {\textit{\textbf{z}}} \in \Gamma^{\diese}_{1, 0}\}$ is clearly included in $\Omega^\diese$. {Thus, by analogy with \eqref{eq:preuve_trace_4} in} the proof of Proposition \ref{prop:trace_H1cut_demi_espace}, we have from \eqref{eq:preuve_trace_demi_cylindre_-1}, the chain rule, and the change of variables ${\textit{\textbf{y}}} \mapsto \operatorname{T}{\textit{\textbf{y}}}$ that \begin{equation} \displaystyle \int_{\Gamma^\diese_{1, 0}} w_{1, 0}({\textit{\textbf{z}}})\; |U(0, {\textit{\textbf{z}}})|^2\; d{\textit{\textbf{z}}} \leq \frac{1}{{\boldsymbol{\theta}}i_1} \, \|U\|^2_{H^1_{\boldsymbol{\theta}}(\Omega^\diese)}. \label{eq:preuve_trace_demi_cylindre_1} \end{equation} More generally, we can show that $\gamma^\diese_{i, a}$ can be defined from $H^1_{\boldsymbol{\theta}}(\Omega^\diese)$ to the weighted space $L^2(\Sigma^\diese_{i, a}, w_{i, a}\, d{\textit{\textbf{z}}})$, where the weight $w_{i, a}$ is given in \eqref{eq:preuve_trace_demi_cylindre_0} for $i = 1$ and $a = 0$. Now, the expression \eqref{eq:expression_longueur} of $\lambda_{1, 0}$ implies that $w_{1, 0}$ degenerates at the neighbourhood of the corners $z_j = 1$. However, the weight $w_{1, 0}$ is bounded from below on $\Sigma^{\diese, b}_{1, 0}$ with \begin{equation} \label{eq:preuve_trace_demi_cylindre_2} \displaystyle \inf_{(0, {\textit{\textbf{z}}}) \in \Sigma^{\diese, b}_{1, 0}} w_{1, 0}({\textit{\textbf{z}}}) = \tanh \bigg[ \min\Big\{ \frac{1}{{\boldsymbol{\theta}}i_1};\; b\min_{2 \leq j \leq n-1} \frac{1}{{\boldsymbol{\theta}}i_j} \Big\} \bigg] > 0. \end{equation} If we set $\smash{C_b := [\inf_{(0, {\textit{\textbf{z}}}) \in \Sigma^{\diese, b}_{1, 0}} w_{1, 0}({\textit{\textbf{z}}})]^{-1} > 0}$, then \eqref{eq:trace_L2loc_demi_cylindre} follows directly from \eqref{eq:preuve_trace_demi_cylindre_1} by integrating with respect to $\{{\textit{\textbf{z}}},\ (0, {\textit{\textbf{z}}}) \in \Sigma^{\diese, b}_{1, 0}\}$, instead of $\Gamma^\diese_{1, 0}$. \end{dem} \color{black} \begin{rmk}\label{rem:traceH1theta} The best constant in the previous proposition necessarily blows up when $b$ tends to 0. The above proof shows that traces could be defined on the whole faces in appropriate weighted $L^2$-spaces. More details about traces in anisotropic spaces can be found in \cite{joly1992some}. \end{rmk} \subsubsection{Green's formulas}\label{sub:Green} {Let us now introduce} the set $H^1_{{\boldsymbol{\theta}}, \textit{loc}}(\R^n_+)$ of functions which are \textcolor{surligneur}{$H^1_{\boldsymbol{\theta}}$ in any half-cylinder $S \times \R_+$ where $S$ is a bounded open set in $\R^{n-1}$}. More rigorously, we define for any $\varphi \in \mathscr{C}^\infty_0(\R^{n-1})$ the $n$--dimensional function $\check{\varphi} \in \mathscr{C}^\infty(\R^{n})$ such that \begin{equation} \label{eq:cut_off_etendu_Rn} \check{\varphi}(y_1,\ldots,y_{n-1},y_n)= \varphi(y_1,\ldots,y_{n-1}). \end{equation} Note that for any $U\in L^2_\textit{loc}(\R^n_+)$, the support of $\check{\varphi}\, U$ is bounded in the directions $y_j,\,j\neq n$. Starting from this remark, we define \begin{equation} \begin{array}{c} H^1_{{\boldsymbol{\theta}}, \textit{loc}}(\R^n_+) := \Big\{U \in L^2_{\textit{loc}}(\R^n_+), \quad \check{\varphi}\, U\; \in H^1_{\boldsymbol{\theta}}(\R^+_n)\ \ \forall\;\!\varphi \in \mathscr{C}^\infty_0(\R^{n-1}) \Big\}. \end{array} \end{equation} \noindent Let us introduce a 1D cut-off function $\chi\in \mathscr{C}^\infty_0(\R)$ such that $\chi=1$ on $(0,1)$, from which we define $\check{\chi}_\diese\in \mathscr{C}^\infty_0(\R^n)$ as \begin{equation} \label{eq:definition_cutoff} \check{\chi}_\diese(y_1,\ldots,y_{n-1}, y_n)=\chi(y_1)\ldots\chi(y_{n-1}). \end{equation} We deduce in particular that \begin{equation}\label{eq:restr_halfspace} \forall\; U\in H^1_{{\boldsymbol{\theta}}, \textit{loc}}(\R^{n}_+),\quad \restr{U}{\Omega^\diese}=\restr{(\check{\chi}_\diese\, U)}{\Omega^\diese}\;\in\;H^1_{\boldsymbol{\theta}}({\Omega^\diese}). \end{equation} Moreover, by Proposition \ref{prop:trace_H1cut_demi_espace}, it is obvious that we can define without any ambiguity the trace map $\gamma_{i,a}^\diese$ to $H^1_{{\boldsymbol{\theta}}, \textit{loc}}(\R^{n}_+)$ as follows \begin{equation} \label{eq:trace_H1loc} \forall\; U\in H^1_{{\boldsymbol{\theta}}, \textit{loc}}(\R^{n}_+),\quad \gamma_{i,a}^\diese U:=\restr{\gamma_{i,a} (\check{\chi}_\diese U)}{\Sigma_{i,a}^\diese}\;\in L^2(\Sigma_{i,a}^\diese). \end{equation} For simplicity, when considering traces on $\Sigma_{i,a}^\diese$, we shall write $U$ instead of $\gamma_{i,a}^\diese U$. We can now state the following Green's formula. \begin{prop}\label{prop:Green_formula} For any $U, V \in H^1_{{\boldsymbol{\theta}}, \textit{loc}}(\R^{n}_+)$, we have the Green's formula \begin{equation} \int_{\Omega^\diese} \left( \Dt{} U \; \overline{V} + U\; \Dt{} \overline{V} \right)\; d {\textit{\textbf{y}}} = \frac{1}{{\boldsymbol{\theta}}i_n} \int_{\Sigma^\diese_{n,0}} U \; \overline{V} \;d s + \sum_{i = 1}^{n-1} \frac{1}{{\boldsymbol{\theta}}i_i} \Big( \int_{\Sigma^\diese_{i,1}} U \; \overline{V} \;d s - \int_{\Sigma^\diese_{i,0}} U \; \overline{V} \;d s \Big). \label{eq:Green_formula} \end{equation} \end{prop} \begin{dem} Let $U, V \in H^1_{{\boldsymbol{\theta}}, \textit{loc}}(\R^{n}_+)$. By definition, for any $\chi \in \mathscr{C}^\infty_0(\R)$ such that $\chi = 1$ on $(0, 1)$, the functions $\check{\chi}_\diese\, U$ and $\check{\chi}_\diese\, V$ belong to $H^1_{\boldsymbol{\theta}}(\R^n_+)$, where $\check{\chi}_\diese$ is defined in \eqref{eq:definition_cutoff}. Since Proposition \ref{prop:density_Cinfy_H1cut} ensures that $\mathscr{C}^\infty_0(\overline{\R^n_+})$ is dense in $H^1_{\boldsymbol{\theta}}(\R^n_+)$, there exist two sequences $(U_k)_{k \in \N}, (V_k)_{k \in \N}$ of functions in $\mathscr{C}^\infty_0(\overline{\R^n_+})$, such that \[\displaystyle U_k \to \check{\chi}_\diese\, U \quad \textnormal{and} \quad V_k \to \check{\chi}_\diese\, V \quad \textnormal{in} \quad H^1_{\boldsymbol{\theta}}(\R^n_+), \quad k \to +\infty. \] It follows from Green's formula for smooth functions that $U_k$ and $V_k$ satisfy \eqref{eq:Green_formula} for any $k \in \N$. Passing to the limit and using the trace continuity result stated in Propsition \ref{prop:trace_H1cut_demi_espace} imply that \eqref{eq:Green_formula} is satisfied by $\check{\chi}_\diese\, U$ and $\check{\chi}_\diese\, V$, \emph{i.e.} by $U$ and $V$, since $\check{\chi}_\diese = 1$ in $\Omega^\diese$. \end{dem} \noindent We next focus on functions which are periodic with respect to their $(n-1)$ first variables. More precisely, for any $U \in L^2(\Omega^\diese)$ and any $\varphi \in L^2(\Sigma^\diese_{n, 0})$, we introduce the respective periodic extensions $\widetilde{U} \in L^2_{\textit{loc}}(\R^n_+)$ and $\widetilde{\varphi} \in L^2_{\textit{loc}} (\Sigma_{n, 0})$ as defined {for any $i\in \llbracket 1, n-1\rrbracket $} by \begin{equation}\label{eq:per_extension} \left\{ \begin{array}{l@{\quad}l@{\quad \textnormal{and} \quad}l} \operatorname{a.e.}\; {\textit{\textbf{y}}} \in \R^n_+, & \widetilde{U}({\textit{\textbf{y}}} + \vec{{\textit{\textbf{e}}}}_i) = \widetilde{U}({\textit{\textbf{y}}}) & \restr{\widetilde{U}}{\Omega^\diese}= U. \\[8pt] \operatorname{a.e.}\; {\textit{\textbf{s}}} \in \Sigma_{n, 0}, & \widetilde{\varphi}({\textit{\textbf{s}}} + \vec{{\textit{\textbf{e}}}}_i) = \widetilde{\varphi}({\textit{\textbf{s}}}) & \restr{\widetilde{\varphi}}{\Sigma^\diese_{n, 0}}= \varphi. \end{array} \right. \end{equation} An appropriate functional framework is provided by the space \begin{equation} \label{eq:H1thetaper} \displaystyle H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese) = \Big\{ U \in L^2(\Omega^\diese),\ \widetilde{U} \in H^1_{{\boldsymbol{\theta}}, \textit{loc}}(\R^n_+) \Big\} \ \subset H^1_{{\boldsymbol{\theta}}}(\Omega^\diese), \end{equation} where the inclusion follows from \eqref{eq:restr_halfspace} and \eqref{eq:per_extension}. If $\mathscr{C}^\infty_{\textit{per}}(\Omega^\diese)$ denotes the set of smooth functions in $\mathscr{C}^\infty(\Omega^\diese)$ which are $1$--periodic with respect to their first $n-1$ variables, that is, \begin{equation} \displaystyle \label{eq:Cinfyper0} \mathscr{C}^\infty_{\textit{per}}({\Omega}^\diese) = \Big\{V \in \mathscr{C}^\infty(\Omega^\diese),\quad \widetilde{V} \in \mathscr{C}^\infty(\R^n_+) \Big\}, \end{equation} then one can show the following result by adapting classical properties of $H^1$ functions. \begin{lem} \label{prop:density_Cinfyper_H1cutper} The space $\mathscr{C}^\infty_{\textit{per}}({\Omega}^\diese)$ is dense in $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$. \end{lem} \noindent Note that the traces of functions in $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$ on $\Sigma_{i,a}^\diese$ are well-defined in $L^2$ by \eqref{eq:trace_H1loc}. Moreover, using the continuity estimate \eqref{eq:trace_H1cut_demi_espace} we have \begin{equation} \label{eq:trace_func_per} \gamma_{i,a}^\diese\in\mathcal{L}(H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese),L^2(\Sigma_{i,a}^\diese)). \end{equation} {One has the characterization} \begin{equation}\label{eq:charac_H1per} \displaystyle H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese) = \Big\{ U \in H^1_{\boldsymbol{\theta}}(\Omega^\diese),\quad \gamma_{i,0}^\diese U =\gamma_{i,1}^\diese U\ \ \forall\; i\in \llbracket 1, n-1\rrbracket \Big\}, \end{equation} where {the traces of functions in $ H^1_{\boldsymbol{\theta}}(\Omega^\diese)$ are defined in Proposition \ref{prop:trace_L2loc_demi_cylindre}} and the equality of traces has to be understood up to the identification of functions on $\Sigma_{i,0}^\diese$ and $\Sigma_{i,1}^\diese$. It is clear from \eqref{eq:charac_H1per} that $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$ is a closed subspace of $H^1_{{\boldsymbol{\theta}}}(\Omega^\diese)$, thus it is an Hilbert space when equipped with the norm of $H^1_{{\boldsymbol{\theta}}}(\Omega^\diese)$. From Proposition \ref{prop:Green_formula} and \eqref{eq:charac_H1per}, we deduce the Green's formula on $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$. \begin{prop}\label{prop:Green_formula_H1per} For any $U, V \in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$, we have the Green's formula \begin{equation} \int_{\Omega^\diese} \left( \Dt{} U \; \overline{V} + U\; \Dt{} \overline{V}\right) d {\textit{\textbf{y}}} = \frac{1}{{\boldsymbol{\theta}}i_n} \int_{\Sigma^\diese_{n,0}} U \; \overline{V} \;d s. \label{eq:Green_formula_H1per} \end{equation} \end{prop} \noindent From the Green's formula \eqref{eq:Green_formula_H1per}, we can easily deduce the following result. \begin{cor}\label{cor:rest_H1thetaper} Let $a>0$, and define the sets with common boundary $\Sigma^\diese_{n, a}$ (see Figure \ref{fig:domains_3D_bis}): \begin{equation}\label{eq:Omega_diese_a_plus} \Omega^\diese_{a,+} := \Omega^\diese \cap \{y_n > a\} \quad \textnormal{and} \quad \Omega^\diese_{a,-} := \Omega^\diese \cap \{y_n < a\}. \end{equation} Consider a function $U \in L^2(\Omega^\diese)$ such that $U_\pm := U|_{\Omega^\diese_{a,\pm}} \in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese_{a, \pm})$, where $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese_{a,\pm})$ is defined as in \eqref{eq:charac_H1per}. Then \[ U\in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)\quad \Longleftrightarrow \quad \gamma_{n,a}^\diese U_+ =\gamma_{n,a}^\diese U_-. \] \end{cor} \noindent We finish this section with a more technical Green's formula, used in the proof of Proposition \ref{prop:half_guide_FV}, involving functions $U$ that only belong to $H^1_{\boldsymbol{\theta}}(\Omega^\diese)$, provided that the test function $V$ vanishes in the neighborhood of the skeleton $T_n$ defined by \begin{equation} \label{eq:definition_table} \displaystyle T_2 = \overline{\Sigma}^\diese_{2, 0} \quad \textnormal{and} \quad T_n = \overline{\Sigma}^\diese_{n, 0} \cup \bigg[ \bigcup_{j = 1}^{n-1} \big(\partial \Sigma^\diese_{j, 0} \cup \partial \Sigma^\diese_{j, 1} \big) \bigg] \quad \textnormal{for $n \geq 3$}. \end{equation} This domain is represented in Figure \ref{fig:domains_3D_bis} for $n = 3$. \begin{prop}\label{prop:formule_Green_demi_cylindre} For $U \in H^1_{{\boldsymbol{\theta}}}(\Omega^\diese)$ and $V \in \mathscr{C}^\infty_0(\overline{\Omega}^\diese \setminus T_n)$, the Green's formula \eqref{eq:Green_formula} still holds. \end{prop} \begin{dem} Consider $U \in H^1_{{\boldsymbol{\theta}}}(\Omega^\diese)$ and $V\in \mathscr{C}^\infty_0(\overline{\Omega}^\diese \setminus T_n)$. Since by Proposition \ref{prop:density_Cinfy_H1cut}, $\mathscr{C}^\infty_0(\overline{\Omega}^\diese)$ is dense in $H^1_{{\boldsymbol{\theta}}}(\Omega^\diese)$, there exists a sequence $(U_k)_{k \in \N}$ of functions in $\mathscr{C}^\infty_0(\overline{\Omega}^\diese)$ which tends to $U$. It follows from Green's formula in $\Omega^\diese$ for smooth functions that $U_k$ and $V$ satisfy \eqref{eq:Green_formula} for any $k \in \N$. For $0 < b < 1/2$, let $\Omega^{\diese, b}$ be the domain \begin{equation} \displaystyle \Omega^{\diese, b} = \{{\textit{\textbf{y}}} \in \Omega^\diese,\quad \operatorname{dist}({\textit{\textbf{y}}},\, T_n) := \inf_{{\textit{\textbf{z}}} \, \in\, T_n} |{\textit{\textbf{y}}} - {\textit{\textbf{z}}}| > b\}. \end{equation} Since $V \in \mathscr{C}^\infty_0(\overline{\Omega}^\diese \setminus T_n)$, there exists a real number $0 < b < 1/2$ such that $\restr{V}{\Omega^{\diese, b}} \in \mathscr{C}^\infty_0(\Omega^{\diese, b})$. Consequently, for any $i\in \llbracket 1, n-1\rrbracket $, the surface integral on $\Sigma^\diese_{i, a}$ is reduced to the set $\Sigma^{\diese, b}_{i, a}$ defined by \eqref{eq:Sigma_ia_securite}. When $k$ tends to $+\infty$, we can then use the trace continuity result stated in Proposition \ref{prop:trace_L2loc_demi_cylindre} on $\Sigma^{\diese, b}_{i, a}$, to deduce that \eqref{eq:Green_formula} is satisfied by $U$ and $V$. \end{dem} \color{black} \subsubsection{An oblique change of variables}\label{sub:oblique_cov} Before stating \textcolor{surligneur}{Proposition \ref{prop:trace_lifting} which is} the main result of this section, let us introduce the change of variables in $\R^n_+$: \begin{equation}\label{eq:changt_vars} ({\textit{\textbf{s}}}, x) \in \R^n_+ \mapsto {\textit{\textbf{y}}} = ({\textit{\textbf{s}}}, 0) + x\,{\boldsymbol{\theta}} \in \R^n_+, \end{equation} and denote by $\Omega^\diese_{\boldsymbol{\theta}}$ the image of $\Omega^\diese$ by the above transformation: \begin{equation}\label{eq:Omegadiesetheta} \Omega^\diese_{\boldsymbol{\theta}} := \{ ({\textit{\textbf{s}}}, 0) + x\, {\boldsymbol{\theta}}, \ \ {\textit{\textbf{s}}} \in (0, 1)^{n-1},\ x > 0\}. \end{equation} This is illustrated in Figure \ref{fig:domains_3D_bis} for $n = 3$ and in Figure \ref{fig:domains} for $n = 2$ and $|{\boldsymbol{\theta}}| = 1$. The following simple lemma will be used in the sequel. \begin{lem}\label{lem:integrale} For any $V \in L^1(\Omega^\diese)$, we have \begin{equation} \displaystyle\label{eq:integrale} \int_{\Omega^\diese_{\boldsymbol{\theta}}} \widetilde{V}({\textit{\textbf{y}}})\; d{\textit{\textbf{y}}} = \int_{\Omega^\diese} \widetilde{V}({\textit{\textbf{y}}})\; d{\textit{\textbf{y}}}, \end{equation} where $\widetilde{V} \in L^1_{\textit{loc}}(\R^n_+)$ denotes the periodic extension of $V$, defined by \eqref{eq:per_extension}. \end{lem} \begin{dem} We will use the notation ${\textit{\textbf{k}}} = (k_1, \dots, k_d) \in \Z^d$ for a vector of integers. For any set $\mathcal{O} \subset \R^n$, let $\smash{\mathbbm{1}_{\mathcal{O}}}$ be the indicator function of $\mathcal{O}$, that is, the function which equals $1$ in $\mathcal{O}$ and $0$ elsewhere. By density, it suffices to prove \eqref{eq:integrale} for $V \in \mathscr{C}^\infty_0(\Omega^\diese)$. By additivity of integration, \[ \int_{\Omega^\diese_{\boldsymbol{\theta}}} \widetilde{V}({\textit{\textbf{y}}})\; d{\textit{\textbf{y}}} = \int_{\R^n_+} \mathbbm{1}_{\Omega^\diese_{\boldsymbol{\theta}}}({\textit{\textbf{y}}}) \; \widetilde{V}({\textit{\textbf{y}}})\; d{\textit{\textbf{y}}} = \sum_{{\textit{\textbf{k}}} \in \Z^{n-1}} \int_{\Omega^\diese + ({\textit{\textbf{k}}}, 0)} \mathbbm{1}_{\Omega^\diese_{\boldsymbol{\theta}}}({\textit{\textbf{y}}}) \; \widetilde{V}({\textit{\textbf{y}}})\; d{\textit{\textbf{y}}}, \] where the sum over ${\textit{\textbf{k}}} \in \Z^{n-1}$ is finite because of $\mathbbm{1}_{\Omega^\diese_{\boldsymbol{\theta}}}$ and because $V$ is compactly supported. We then use the change of variables ${\textit{\textbf{z}}} \mapsto {\textit{\textbf{z}}} + ({\textit{\textbf{k}}}, 0)$ which leads to \begin{align} \displaystyle \int_{\Omega^\diese_{\boldsymbol{\theta}}} \widetilde{V}({\textit{\textbf{y}}})\; d{\textit{\textbf{y}}} &= \sum_{{\textit{\textbf{k}}} \in \Z^{n-1}} \int_{\Omega^\diese} \mathbbm{1}_{\Omega^\diese_{\boldsymbol{\theta}}}({\textit{\textbf{z}}} + ({\textit{\textbf{k}}}, 0)) \; \widetilde{V}({\textit{\textbf{z}}})\; d{\textit{\textbf{z}}} \quad \textnormal{because $\widetilde{V}$ is periodic} \nonumber \\ &= \int_{\Omega^\diese} \Big[ \sum_{{\textit{\textbf{k}}} \in \Z^{n-1}} \mathbbm{1}_{\Omega^\diese_{\boldsymbol{\theta}} - ({\textit{\textbf{k}}}, 0)} ({\textit{\textbf{z}}}) \Big]\; \widetilde{V}({\textit{\textbf{z}}})\; d{\textit{\textbf{z}}} \quad \textnormal{by linearity.}\label{eq:preuve_lemme_integrales_1} \end{align} Furthermore, by noticing that the collection of sets $\{\Omega^\diese_{\boldsymbol{\theta}} - ({\textit{\textbf{k}}}, 0),\ \ {\textit{\textbf{k}}} \in \Z^{n-1}\}$ forms a partition of $\R^n_+$, it follows that \begin{equation} \displaystyle \forall\; {\textit{\textbf{z}}} \in \Omega^\diese, \quad \sum_{{\textit{\textbf{k}}} \in \Z^{n-1}} \mathbbm{1}_{\Omega^\diese_{\boldsymbol{\theta}} - ({\textit{\textbf{k}}}, 0)} ({\textit{\textbf{z}}}) = \mathbbm{1}_{\R^n_+}({\textit{\textbf{z}}}) = 1. \label{eq:preuve_lemme_integrales_2} \end{equation} Combining \eqref{eq:preuve_lemme_integrales_1} and \eqref{eq:preuve_lemme_integrales_2} implies that \eqref{eq:integrale} is satisfied for $V \in \mathscr{C}^\infty_0(\Omega^\diese)$. \end{dem} \noindent The inversion of the change of variables \eqref{eq:changt_vars} leads us to introduce: \begin{equation} \displaystyle \label{eq:transverse_coo} \forall\; {\textit{\textbf{y}}} \in \R^n, \quad {\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}) := \hat{{\textit{\textbf{y}}}\;} - (y_n/{\boldsymbol{\theta}}i_n)\, \hat{{\boldsymbol{\theta}}\;} \in \R^{n-1}, \end{equation} so that, \begin{equation} {\textit{\textbf{y}}} = ({\textit{\textbf{s}}}, 0) + x\,{\boldsymbol{\theta}} \quad \Longleftrightarrow \quad {\textit{\textbf{s}}} = {\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}) \quad \textnormal{and}\quad x = y_n/{\boldsymbol{\theta}}i_n. \end{equation} \noindent The next proposition emphasizes the fact that through the change of variables \eqref{eq:changt_vars}, the differential operator $\Dt{}$ simply becomes the partial derivative with respect to $y_n$ (which is obvious for smooth functions). \begin{prop}\label{prop:trace_lifting} Let $\Psi \in L^2(\Omega^\diese)$. Then the periodic function $\Psi_{\boldsymbol{\theta}}$ defined as \begin{equation}\label{eq:fonction} \displaystyle \operatorname{a.e.}\; {\textit{\textbf{y}}} \in \R^n_+, \quad \widetilde{\Psi}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}) := \widetilde{\Psi}({\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}),y_n/{\boldsymbol{\theta}}i_n), \end{equation} (where $\widetilde{\Psi}$ is the periodic extension of $\Psi$) belongs to $L^2(\Omega^\diese)$ and \begin{equation} \label{eq:norme_fonction} \begin{array}{r@{\ =\ }l} \displaystyle\|{\Psi}_{\boldsymbol{\theta}}\|_{L^2(\Omega^\diese)} &\displaystyle \sqrt{{\boldsymbol{\theta}}i_n}\; \|{\Psi}\|_{L^2(\Omega^\diese)}. \end{array} \end{equation} Moreover, if $\partial_{y_n}\Psi\in L^2(\Omega^\diese)$, then $\Psi_{\boldsymbol{\theta}}$ belongs to $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$ with directional derivative \begin{equation} \label{eq:derivee_fonction} \displaystyle \operatorname{a.e.}\; {\textit{\textbf{y}}} \in \R^n_+, \quad D_{\boldsymbol{\theta}} \widetilde{\Psi}_{\boldsymbol{\theta}} ({\textit{\textbf{y}}}) = \frac{\partial \widetilde{\Psi}}{\partial y_n}({\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}),y_n/{\boldsymbol{\theta}}i_n). \end{equation} \end{prop} \begin{dem} The map $({\textit{\textbf{s}}}, x) \mapsto ({\textit{\textbf{s}}}, 0) + x\, {\boldsymbol{\theta}}$ from $\Sigma^\diese_{n, 0} \times \R_+$ to $\Omega^\diese_{\boldsymbol{\theta}}$ defines a $\mathscr{C}^1$--diffeomorphism with a non-vanishing Jacobian ${\boldsymbol{\theta}}i_n \neq 0$. Therefore, by using the definition \eqref{eq:Omegadiesetheta} of $\Omega^\diese_{\boldsymbol{\theta}}$, a change of variables as well as the property $s_{\boldsymbol{\theta}}(({\textit{\textbf{s}}}, 0) + x\, {\boldsymbol{\theta}}) = {\textit{\textbf{s}}}$, we obtain that \[ \setlength{\abovedisplayskip}{4pt} \setlength{\belowdisplayskip}{4pt} \displaystyle \int_{\Omega^\diese_{\boldsymbol{\theta}}} |\widetilde{\Psi}_{\boldsymbol{\theta}}({\textit{\textbf{y}}})|^2\; d{\textit{\textbf{y}}} = {\boldsymbol{\theta}}i_n\; \int_{\Sigma^\diese_{n, 0}} \int_0^{+\infty} |\widetilde{\Psi}_{\boldsymbol{\theta}}(({\textit{\textbf{s}}}, 0) + x\, {\boldsymbol{\theta}})|^2\; dx\, d{\textit{\textbf{s}}} = {\boldsymbol{\theta}}i_n\; \int_{\Sigma^\diese_{n, 0}} \int_0^{+\infty} |\widetilde{\Psi}({\textit{\textbf{s}}}, x)|^2\; dx\, d{\textit{\textbf{s}}}. \] We deduce from Lemma \ref{lem:integrale} that $\Psi_{\boldsymbol{\theta}} \in L^2(\Omega^\diese)$, and that \eqref{eq:norme_fonction} holds. \noindent Now in order to derive the expression of $D_{\boldsymbol{\theta}} \widetilde{\Psi}_{\boldsymbol{\theta}}$ in the sense of distributions, consider a test function $\Phi \in \mathscr{C}^\infty_0(\R^n_+)$. The change of variables $({\textit{\textbf{s}}}, x) \mapsto ({\textit{\textbf{s}}}, 0) + x\, {\boldsymbol{\theta}}$ combined with Fubini's theorem for integrable functions leads to \begin{equation} \int_{\R^n_+} \widetilde{\Psi}_{\boldsymbol{\theta}}({\textit{\textbf{y}}})\; D_{\boldsymbol{\theta}} \Phi({\textit{\textbf{y}}}) \; d{\textit{\textbf{y}}} = {\boldsymbol{\theta}}i_n \int_{\R^{n-1}} \int_0^{+\infty} \widetilde{\Psi}({\textit{\textbf{s}}}, x)\; D_{\boldsymbol{\theta}} \Phi(({\textit{\textbf{s}}}, 0) + x\, {\boldsymbol{\theta}}) \; dx d{\textit{\textbf{s}}}. \label{eq:preuve_fonction_var_sep2} \end{equation} Furthermore the 1D function $\phi_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ defined by $\phi_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}(x) := \Phi(({\textit{\textbf{s}}}, 0) + x\, {\boldsymbol{\theta}})$ belongs to $\mathscr{C}^\infty_0(\R_+)$ and we have $[d\phi_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}/dx](x) = D_{\boldsymbol{\theta}} \Phi(({\textit{\textbf{s}}}, 0) + x\, {\boldsymbol{\theta}})$ from the chain rule. Since $\partial_{y_n}\Psi$ is in $L^2$, we can integrate by parts the inner integral in \eqref{eq:preuve_fonction_var_sep2} to obtain \begin{align} \int_{\R^n_+} \widetilde{\Psi}_{\boldsymbol{\theta}}({\textit{\textbf{y}}})\; D_{\boldsymbol{\theta}} \Phi({\textit{\textbf{y}}}) \; d{\textit{\textbf{y}}} &= -{\boldsymbol{\theta}}i_n \int_{\R^{n-1}} \int_0^{+\infty} \frac{\partial \Psi}{\partial y_n}({\textit{\textbf{s}}},x)\; \phi_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}(x) \; dx d{\textit{\textbf{s}}} \nonumber \\ &= - \int_{\R^n_+} \frac{\partial \Psi}{\partial y_n}({\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}),y_n/{{\boldsymbol{\theta}}i_n})\; \Phi({\textit{\textbf{y}}}) \; d{\textit{\textbf{y}}}, \end{align} where the last equality comes from the change of variables ${\textit{\textbf{y}}} \mapsto ({\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}), y_n/{\boldsymbol{\theta}}i_n)$. This gives the expression of $D_{\boldsymbol{\theta}} \widetilde{\Psi}_{\boldsymbol{\theta}}$ in \eqref{eq:derivee_fonction}. \end{dem} \begin{rmk} \setlength{\abovedisplayskip}{1pt} \setlength{\belowdisplayskip}{1pt} It will be often useful to use \eqref{eq:derivee_fonction} in the form \begin{equation} \operatorname{a.e.}\; ({\textit{\textbf{s}}}, x) \in \R^n_+, \quad D_{\boldsymbol{\theta}} \widetilde{\Psi}_{\boldsymbol{\theta}} (({\textit{\textbf{s}}}, 0) + x\, {\boldsymbol{\theta}}) = \frac{\partial \widetilde{\Psi}}{\partial y_n}({\textit{\textbf{s}}}, x).\label{eq:derivee_fonction_2} \end{equation} \end{rmk} \noindent The previous proposition allows in particular to deduce the surjectivity of the trace operator from $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$ to $L^2(\Sigma^\diese_{n, 0})$. \begin{cor}\label{cor:trace_lifting} Let $\varphi \in L^2(\Sigma^\diese_{n, 0})$, and $\psi \in H^1(\R_+)$ such that $\psi(0)=1$. Then the periodic function defined by \begin{equation}\label{eq:fonction_var_sep} \displaystyle \operatorname{a.e.}\; {\textit{\textbf{y}}} \in \R^n_+, \quad \mathcal{R}\varphi \,({\textit{\textbf{y}}}) := \widetilde{\varphi}({\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}))\; \psi(y_n/{\boldsymbol{\theta}}i_n) \end{equation} belongs to $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$, and its trace is $\restr{\mathcal{R}\varphi}{\Sigma^\diese_{n, 0}} = \varphi$. Moreover, $\mathcal{R}$ defines a continuous map from $L^2(\Sigma^\diese_{n, 0})$ to $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$. \end{cor} \subsection{Link with a periodic half-guide problem} \label{sec:link_with_a_periodic_half_guide_problem} For any boundary data $\varphi \in L^2(\Sigma^\diese_{n, 0})$, we can now introduce $U^+_{\boldsymbol{\theta}}$ as the solution in $H^1_{{\boldsymbol{\theta}}}(\Omega^\diese)$ of the half-guide problem \begin{equation} \label{eq:half_guide_problem} \left| \begin{array}{r@{\ }c@{\ }ll} \displaystyle - \Dt{} \big( \mu_p \; \Dt{} U^+_{\boldsymbol{\theta}} \big) - \rho_p \; \omega^2 \; U^+_{\boldsymbol{\theta}} &=& 0, \quad \textnormal{in}\quad \Omega^\diese, \\[8pt] \displaystyle \restr{U^+_{\boldsymbol{\theta}}}{\Sigma^\diese_{n, 0}} &=& \varphi, \\[8pt] \displaystyle \restr{U^+_{\boldsymbol{\theta}}}{\Sigma^\diese_{i, 0}} &=& \restr{U^+_{\boldsymbol{\theta}}}{\Sigma^\diese_{i, 1}} &\forall\; i \in \llbracket 1, n-1 \rrbracket, \\[8pt] \restr{\mu_p\; \Dt{} U^+_{\boldsymbol{\theta}}}{\Sigma^\diese_{i, 0}} &=& \restr{\mu_p\; \Dt{} U^+_{\boldsymbol{\theta}}}{\Sigma^\diese_{i, 1}} &\forall\; i \in \llbracket 1, n-1 \rrbracket. \end{array} \right. \end{equation} Note that the third equation above implies that $U^+_{\boldsymbol{\theta}} \in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$, the first one implies that $\smash{\mu_p\; \Dt{} U^+_{\boldsymbol{\theta}} \in H^1_{{\boldsymbol{\theta}}}(\Omega^\diese)}$, and finally the fourth one implies that $\smash{\mu_p\; \Dt{} U^+_{\boldsymbol{\theta}} \in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)}$. The space of the boundary data \textcolor{surligneur}{can} seem surprising {compared to the Helmholtz equation with an elliptic principal part}, but recall from Corollary \ref{cor:trace_lifting} that the trace mapping on $\Sigma^\diese_{n, 0}$ is surjective from $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$ to $L^2(\Sigma^\diese_{n, 0})$. \noindent With the functional framework introduced in the previous section, we can now show that Problem \eqref{eq:half_guide_problem} is well-posed. \begin{prop}\label{prop:half_guide_FV} For any $\varphi \in L^2(\Sigma^\diese_{n, 0})$, Problem \eqref{eq:half_guide_problem} is equivalent to the variational formulation \begin{equation} \label{eq:half_guide_FV} \left| \begin{array}{l} \textnormal{\textit{Find $U^+_{\boldsymbol{\theta}} \in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$ such that $\restr{U^+_{\boldsymbol{\theta}}}{\Sigma^\diese_{n, 0}} = \varphi$ and}} \\[8pt] \displaystyle \forall\; V \in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese) \textnormal{ such that }\,\restr{V}{\Sigma^\diese_{n, 0}}=0, \quad \int_{\Omega^\diese} \left( \mu_p \; \Dt{} U^+_{\boldsymbol{\theta}} \; \Dt{} \overline{V} - \rho_p \; \omega^2 \; U^+_{\boldsymbol{\theta}} \; \overline{V} \right) = 0, \end{array} \right. \end{equation} for which Lax-Milgram's theorem applies. \end{prop} \begin{dem} The variational formulation is obtained {by multiplying the first equation of \eqref{eq:half_guide_problem} by $V \in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$, and by using} Green's formula \eqref{eq:Green_formula_H1per}. The application of the Lax-Milgram's theorem in $\{V \in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese),\ \gamma_{n, 0} V = 0\}$, thanks to Corollary \ref{cor:trace_lifting}, is direct. \noindent For the equivalence, as usual, one picks test functions $V\in \mathscr{C}^\infty_0(\Omega^\diese)$ to deduce that the solution $U^+_{\boldsymbol{\theta}} \in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$ of \eqref{eq:half_guide_FV} satisfies the first equation of \eqref{eq:half_guide_problem}. This implies that $\mu_p\; \Dt{} U^+_{\boldsymbol{\theta}} \in H^1_{{\boldsymbol{\theta}}}(\Omega^\diese)$. The real difficulty is to show that $U^+_{\boldsymbol{\theta}}$ satisfies the fourth equation in \eqref{eq:half_guide_problem} or equivalently that $\mu_p\; \Dt{} U^+_{\boldsymbol{\theta}} \in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$. According to Proposition \ref{prop:trace_L2loc_demi_cylindre}, we have \[\displaystyle \forall\; 1 \leq i \leq n-1, \quad \restr{\mu_p\; \Dt{} U^+_{\boldsymbol{\theta}}}{\Sigma^\diese_{i, a}} \in L^2_\textit{loc}(\Sigma^\diese_{i, a}). \] Therefore, Proposition \ref{prop:formule_Green_demi_cylindre} allows us to use Green's formula \eqref{eq:Green_formula} for $U = \mu_p\; \Dt{} U^+_{\boldsymbol{\theta}} $ and for $V \in \mathscr{C}^\infty_0(\overline{\Omega}^\diese \setminus T_n) \cap H^1_{{\boldsymbol{\theta}}, \textit{per}}({\Omega}^\diese)$, where $T_n$ is the skeleton defined in \eqref{eq:definition_table}. By combining this with the fact that $U^+_{\boldsymbol{\theta}}$ solves \eqref{eq:half_guide_FV} and the first equation of \eqref{eq:half_guide_problem}, one obtains that for any integer $i \in \llbracket 1, n-1 \rrbracket$, \[ \forall\; V\in \mathscr{C}^\infty_0(\overline{\Omega}^\diese \setminus T_n)\cap H^1_{{\boldsymbol{\theta}}, \textit{per}}({\Omega}^\diese),\quad \Big( \int_{\Sigma^\diese_{i,1}} \mu_p\; \Dt{} U^+_{\boldsymbol{\theta}} \; \overline{V} \;d s - \int_{\Sigma^\diese_{i,0}} \mu_p\; \Dt{} U^+_{\boldsymbol{\theta}} \; \overline{V} \;d s \Big)=0. \] Furthermore, $\mathscr{C}^\infty_0(\Sigma^\diese_{i, 0})$ is included in $\{\restr{V}{\Sigma^\diese_{i, 0}},\ V \in \mathscr{C}^\infty_0(\overline{\Omega}^\diese \setminus T_n)\cap H^1_{{\boldsymbol{\theta}}, \textit{per}}({\Omega}^\diese) \}$. In fact, any $\psi \in \mathscr{C}^\infty_0(\Sigma^\diese_{i, 0})$ admits the extension $\Psi: {\textit{\textbf{y}}} \in \Omega^\diese \mapsto \psi(y_1,\dots,y_{i-1},y_{i+1},\dots,y_n)$, which belongs to $\mathscr{C}^\infty_0(\overline{\Omega}^\diese \setminus T_n)\cap H^1_{{\boldsymbol{\theta}}, \textit{per}}({\Omega}^\diese)$. Finally, since $\mathscr{C}^\infty_0(\Sigma_{i,0}^\diese)$ is dense in $L^2(\Sigma^\diese_{i,0})$, it is easy to show that the fourth equation of \eqref{eq:half_guide_problem} holds and that $\restr{\mu_p\; \Dt{} U^+_{\boldsymbol{\theta}}}{\Sigma^\diese_{i, 1}} \in L^2(\Sigma^\diese_{i, 1})$ for any $i \in \llbracket 1, n-1 \rrbracket$. \end{dem} \begin{figure} \caption{The half-cylinders $\Omega^\diese$ and $\Omega^\diese_{\boldsymbol{\theta} \label{fig:domains} \end{figure} \noindent We now make the link between $U^+_{\boldsymbol{\theta}}(\varphi)$ and the solution of the half-line problem \eqref{eq:half_line_problem} that fully justifies the introduction of the half-guide problem \eqref{eq:half_guide_problem}. \noindent To do so, first, let us introduce the quasiperiodic coefficients defined for any ${\textit{\textbf{s}}} \in \R^{n-1}$ by \begin{equation} \displaystyle \forall\; x \in \R, \quad \mu_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}(x) := \mu_p\big(({\textit{\textbf{s}}}, 0) + x\, {\boldsymbol{\theta}}\big) \quad \textnormal{and} \quad \rho_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}(x) := \rho_p\big(({\textit{\textbf{s}}}, 0) + x\, {\boldsymbol{\theta}}\big), \end{equation} as well as the one-dimensional problems \begin{equation} \left| \begin{array}{r@{\ }c@{\ }l@{\quad}l} \displaystyle - \frac{d}{d x} \Big( \mu_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} \; \frac{d u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}}{d x} \Big) - \rho_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} \; \omega^2 \; u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} &=& 0, \quad \textnormal{in} & \R_+, \\[8pt] \displaystyle u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}(0) &=& 1. \end{array} \right. \label{eq:half_line_problems} \end{equation} Note that \eqref{eq:half_line_problem} corresponds to \eqref{eq:half_line_problems} taken with ${\textit{\textbf{s}}} = 0$. \noindent Under the assumptions \eqref{eq:coef_ellipt} and \eqref{eq:dissipation}, Problem \eqref{eq:half_line_problems} admits a unique solution $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ in $H^1(\R_+)$ for any ${\textit{\textbf{s}}} \in \R^{n-1}$. Moreover, $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ decays exponentially at infinity, uniformly with respect to ${\textit{\textbf{s}}}$, that is, there exist constants $\alpha, c > 0$ depending only on $\mu_\pm, \rho_\pm$ such that \begin{equation} \label{eq:exp_decay_halfline_s} \quad \forall\; {\textit{\textbf{s}}} \in \R^{n-1}, \quad \big\|\euler^{-\alpha \mathop{\mathfrak{Im}}\nolimits \omega\, x}\, u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}\big\|_{H^1(\R^+)} \leq c. \end{equation} Furthermore, thanks to the continuity of $\mu_p$ and $\rho_p$, we can show that $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ is continuous with respect to ${\textit{\textbf{s}}}$, as stated in the next proposition. \begin{prop} \label{prop:properties_us} The mapping ${\textit{\textbf{s}}} \in \R^{n-1} \mapsto u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$, which associates with a real vector ${\textit{\textbf{s}}}$ the solution in $H^1(\R_+)$ of the problem \eqref{eq:half_line_problems}, defines a uniformly continuous function which is periodic of period $1$ in each direction. \end{prop} \begin{dem} To show that ${\textit{\textbf{s}}} \mapsto u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ is $1$--periodic in each direction, one simply has to note that since $\mu_{{\textit{\textbf{s}}},{\boldsymbol{\theta}}}$ and $\rho_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ are $1$--periodic with respect to each $s_i$, both $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ and $u^+_{{\textit{\textbf{s}}} + \vec{{\textit{\textbf{e}}}}_i, {\boldsymbol{\theta}}}$ satisfy the same half-line problem \eqref{eq:half_line_problems}. Thus, by well-posedness of \eqref{eq:half_line_problems}, $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} = u^+_{{\textit{\textbf{s}}} + \vec{{\textit{\textbf{e}}}}_i, {\boldsymbol{\theta}}}$. \noindent Now let us prove the regularity of ${\textit{\textbf{s}}} \mapsto u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$. For any ${\textit{\textbf{s}}}_1, {\textit{\textbf{s}}}_2 \in \R^{n-1}$, by writing the variational formulations satisfied by $u^+_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}}$ and $u^+_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}}$, and by substracting one from the other, we obtain \begin{multline*}\displaystyle \forall\; v \in H^1_0(\R_+), \quad \int_{\R_+} \Big[ \mu_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}}\; \frac{d}{d x} (u^+_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}} - u^+_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}}) \; \overline{\frac{d v}{d x}} - \rho_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}} \; \omega^2 \; (u^+_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}} - u^+_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}}) \; \overline{v} \Big] = \\ \int_{\R_+} \Big[ (\mu_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}} - \mu_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}})\; \frac{d u^+_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}}}{d x} \; \overline{\frac{d v}{d x}} - (\rho_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}} - \rho_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}}) \; \omega^2 \; u^+_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}} \Big]. \end{multline*} Now choose $v = u^+_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}} - u^+_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}} \in H^1_0(\R_+)$ in the above equality. The well-posedness of \eqref{eq:half_line_problems}, a Cauchy-Schwarz inequality applied to the right-hand side and \eqref{eq:exp_decay_halfline_s} imply that there exists a real number $c > 0$ independent of ${\textit{\textbf{s}}}$ and ${\boldsymbol{\theta}}$ such that \begin{equation}\label{eq:cont}\displaystyle \big\|u^+_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}} - u^+_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}} \big\|_{H^1(\R_+)} \leq c\; \Big( \|\mu_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}} - \mu_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}}\|_\infty + \|\rho_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}} - \rho_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}}\|_\infty \Big). \end{equation} The functions $ \mu_p$ and $\rho_p$ are continuous and $1$--periodic in each direction: from Heine-Cantor theorem, they are uniformly continuous. Let us define the modulus of uniform continuity \[ \forall\; \mu \in \mathscr{C}^0(\R^n),\ \forall\; \varepsilon>0, \quad \delta(\mu,\varepsilon)=\sup_{\itbf{y},\itbf{z}}\{|\mu(\itbf{y})-\mu(\itbf{z})|,\;|\itbf{y}-\itbf{z}|<\varepsilon\} \] A function $\mu$ is uniformly continuous if $\delta(\mu,\varepsilon)$ tends to $0$ as $\varepsilon$ tends to $0$. It follows from \eqref{eq:cont} that \[ \big\|u^+_{{\textit{\textbf{s}}}_1, {\boldsymbol{\theta}}} - u^+_{{\textit{\textbf{s}}}_2, {\boldsymbol{\theta}}} \big\|_{H^1(\R_+)} \leq c\; \Big(\delta(\mu_p,|{\textit{\textbf{s}}}_1-{\textit{\textbf{s}}}_2|) + \delta(\rho_p,|s_1-s_2|) \Big). \] Therefore, ${\textit{\textbf{s}}} \mapsto u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ is continuous from $\R^{n-1}$ in $H^1(\R^+)$. \end{dem} \begin{prop}\label{prop:structure_half_guide} Let ${\textit{\textbf{s}}}_{\boldsymbol{\theta}}$ be the mapping defined by \eqref{eq:transverse_coo}, and $\widetilde{U}^+_{\boldsymbol{\theta}}$ (resp. $\widetilde{\varphi}$) be the periodic extension of $U^+_{\boldsymbol{\theta}}$ (resp. $\varphi$) the solution of \eqref{eq:half_guide_problem}. Then, we have \begin{equation} \label{eq:concatenation_half_guide} \operatorname{a.e.}\; {\textit{\textbf{y}}} \in \R^n_+, \quad \widetilde{U}^+_{\boldsymbol{\theta}}(\widetilde{\varphi})({\textit{\textbf{y}}}) = \widetilde{\varphi}\big({\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}})\big)\; u^+_{{\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}), {\boldsymbol{\theta}}} (y_n/{\boldsymbol{\theta}}i_n), \end{equation} {\color{surligneur}or equivalently \begin{equation}\displaystyle\label{eq:preuve_lien_2D_1D_1} \operatorname{a.e.}\; ({\textit{\textbf{s}}}, x) \in \R^{n-1} \times \R_+, \quad \widetilde{U}^+_{\boldsymbol{\theta}}(\widetilde{\varphi})(({\textit{\textbf{s}}}, 0) + {\boldsymbol{\theta}}\, x) = \widetilde{\varphi}({\textit{\textbf{s}}})\; u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} (x). \end{equation}} Moreover if $\widetilde{\varphi}$ is continuous in the neighbourhood of $0$ and satisfies $\widetilde{\varphi}(0) = 1$, then \begin{equation}\label{eq:lien_2D_1D} \operatorname{a.e.}\; x \in \R, \quad u^+_{\boldsymbol{\theta}}(x) = \widetilde{U}^+_{\boldsymbol{\theta}}(\widetilde{\varphi})(x\, {\boldsymbol{\theta}}) \end{equation} \end{prop} \begin{dem} We begin by proving \eqref{eq:concatenation_half_guide}. Let us denote for $\operatorname{a.e.}\; {\textit{\textbf{y}}} \in \R^n_+$, $U^{}_1({\textit{\textbf{y}}})$ the right-hand side of \eqref{eq:concatenation_half_guide}. Note that $\Psi:({\textit{\textbf{s}}},x)\mapsto \widetilde{\varphi}({\textit{\textbf{s}}})\; u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} (x)$ is $1$--periodic with respect to ${\textit{\textbf{s}}}$ (thanks to Proposition \ref{prop:properties_us}), and belongs to $L^2(\Omega^\diese)$ since \[ \|\Psi\|^2_{L^2(\Omega^\diese)} = \int_{\Sigma^\diese_{n, 0}} |\varphi({\textit{\textbf{s}}})|^2\ \|u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}\|^2_{L^2(\R_+)} \; d {\textit{\textbf{s}}} \leq {\boldsymbol{\theta}}i_n\, c^2 \; \|\varphi\|^2_{L^2(\Sigma^\diese_{n, 0})},\ \ \textnormal{with}\ \ c = \sup_{{\textit{\textbf{s}}}} \|u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}\|_{L^2(\R_+)}. \] Moreover, since for all ${\textit{\textbf{s}}}$, $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} \in H^1(\R^+)$, $\partial_{y_n}\Psi$ is also in $L^2(\Omega^\diese)$ (using similar inequalities to the above). By Proposition \ref{prop:trace_lifting}, $U^{}_1$ belongs to $H^1_{{\boldsymbol{\theta}},\textit{per}}(\Omega^\diese)$ with \[ \displaystyle \operatorname{a.e.}\; {\textit{\textbf{y}}} \in \R^n_+, \quad D_{\boldsymbol{\theta}}\, \widetilde{U_1} ({\textit{\textbf{y}}}) = \widetilde{\varphi}\big({\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}})\big)\; \frac{d u^+_{{\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}), {\boldsymbol{\theta}}}}{d x} (y_n/{\boldsymbol{\theta}}i_n). \] Finally, since $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}(0) = 1$, it is clear that $\restr{U^{}_1}{\Sigma^\diese_{n, 0}} = \varphi$. By repeating the same argument, we can show that $\mu_p D_{\boldsymbol{\theta}}\, U^{}_1$ belongs to $H^1_{{\boldsymbol{\theta}},\textit{per}}(\Omega^\diese)$ with \[ \displaystyle \operatorname{a.e.}\; {\textit{\textbf{y}}} \in \R^n_+, \quad D_{\boldsymbol{\theta}}\, [\mu_p\,D_{\boldsymbol{\theta}}\, \widetilde{U_1}] ({\textit{\textbf{y}}}) = \widetilde{\varphi}\big({\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}})\big)\; \frac{d}{d x}\Big(\mu_{{\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}),{\boldsymbol{\theta}}} \frac{d u^+_{{\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}), {\boldsymbol{\theta}}}}{d x}\Big) (y_n/{\boldsymbol{\theta}}i_n). \] Since $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ satisfies \eqref{eq:half_line_problems}, it is clear that $U_1$ satisfies \eqref{eq:half_guide_problem}. By well-posedness of \eqref{eq:half_guide_problem}, we have $U^{}_1 = U^+_{\boldsymbol{\theta}}$. \noindent \textcolor{surligneur}{The equivalence between \eqref{eq:concatenation_half_guide} and \eqref{eq:preuve_lien_2D_1D_1} is directly obtained using the change of variables $({\textit{\textbf{s}}}, x) \mapsto (({\textit{\textbf{s}}}, 0) + {\boldsymbol{\theta}}\, x)$. Moreover, w}e have from Proposition \ref{prop:properties_us} that ${\textit{\textbf{s}}} \mapsto u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ is continuous. If in addition to that, $\widetilde{\varphi}$ is continuous in a neighbourhood of $0$, then \eqref{eq:preuve_lien_2D_1D_1} becomes true \emph{for any} ${\textit{\textbf{s}}}$ in that neighbourhood. In particular, \eqref{eq:preuve_lien_2D_1D_1} can be written for ${\textit{\textbf{s}}} = 0$, thus leading to \eqref{eq:lien_2D_1D}. \end{dem} \noindent In particular, we deduce from the above proprosition that \begin{equation} \label{eq:link_DU_du} \operatorname{a.e.}\; {\textit{\textbf{y}}} \in \R^n_+, \quad \Dt{} \widetilde{U}^+_{\boldsymbol{\theta}}(\widetilde{\varphi})({\textit{\textbf{y}}}) = \widetilde{\varphi}\big({\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}})\big)\; \frac{d u^+_{{\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}), {\boldsymbol{\theta}}}}{dx} (y_n/{\boldsymbol{\theta}}i_n). \end{equation} \begin{rmk} The half-guide solution $U^+_{\boldsymbol{\theta}}$ depends on $\varphi$ whereas $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ does not. In this sense, the relation \eqref{eq:concatenation_half_guide} \textcolor{surligneur}{can} seem surprising at first sight. Numerical results presented in Section \ref{sec:numerical_results} will illustrate this property. \end{rmk} \section{Resolution of the half-guide problem} \label{sec:resolution_half_guide_problem} The advantage of the lifting process lies in the periodic nature of \eqref{eq:half_guide_problem}, which allows us to exploit tools that are well-suited for periodic waveguides. In this paper, we use a DtN-based method \cite{flissthese, jolyLiFliss}, developed for the elliptic\footnote{By \textit{elliptic} Helmholtz equation, we refer to the Helmholtz equation with an \textit{elliptic principal part}.} Helmholtz equation $- \nabla \cdot (\mu_p\; \nabla U) - \rho_p\; \omega^2\; U = 0$ in unbounded periodic guides. This method does not rely on decay properties, and therefore remains robust when the absorption tends to $0$. As we essentially transpose this method to our directional Helmholtz equation, we will see below that the framework remains exactly the same, although the analysis has to be adapted. Let us mention the \textit{recursive doubling method} \cite{yuan2007recursive, ehrhardt2008numerical}, suited for bounded periodic waveguides, and a method \cite{zhang2021numerical} based on the Floquet-Bloch transform, although its extension to our non-elliptic equation seems unclear. \noindent In what follows, $\mathcal{C}^\diese_\ell$ is the cell defined for every $\ell \in \N$ by \begin{equation} \mathcal{C}^\diese_0 = (0, 1)^n \quad \textnormal{and} \quad \mathcal{C}^\diese_\ell = \mathcal{C}^\diese_0 + \ell\, \vec{{\textit{\textbf{e}}}}_n, \quad \textnormal{so that} \quad \Omega^\diese = \bigcup_{\ell \in \N} \mathcal{C}^\diese_\ell. \end{equation} For $\ell > 0$, we call $\Sigma^\diese_{n, \ell}$ the interface between the cells $\mathcal{C}^\diese_\ell$ and $\mathcal{C}^\diese_{\ell+1}$, that is, $\Sigma^\diese_{n, \ell} = \Sigma^\diese_{n, 0} + \ell\, \vec{{\textit{\textbf{e}}}}_n$. By periodicity, each cell $\mathcal{C}^\diese_\ell$ can be identified to $\mathcal{C}^\diese_0$. Similarly, each interface $\Sigma^\diese_{n, \ell}$ can be identified to $\Sigma^\diese_{n, 0}$. The cells and interfaces are represented in Figure \ref{fig:domains}. \subsection{Structure of the solution} \label{sec:structure_of_the_solution} The solution $U^+_{\boldsymbol{\theta}}(\varphi)$ of \eqref{eq:half_guide_problem} has a particular structure that we explain in this section. Denote by $\mathcal{P} \in \mathcal{L}\big(L^2(\Sigma^\diese_{n, 0})\big)$ the operator \begin{equation} \label{eq:definition_propagation_operator} \forall\; \varphi \in L^2(\Sigma^\diese_{n, 0}), \quad \mathcal{P}\varphi := \restr{U^+_{\boldsymbol{\theta}}(\varphi)}{\Sigma^\diese_{n, 1}}, \end{equation} where $L^2(\Sigma^\diese_{n, 1})$ and $L^2(\Sigma^\diese_{n, 0})$ have been identified to each other in an obvious manner. This identification will be used systematically in what follows, even if not mentioned. Note that the operator $\mathcal{P}$ is well-defined, due to the continuity of the trace operator on $\Sigma^\diese_{i, a}$ \eqref{eq:trace_func_per}. \begin{prop} \label{prop:periodic_structure_solution} For any $\varphi$ in $L^2(\Sigma^\diese_{n, 0})$, we have \begin{equation} \label{eq:periodic_structure_solution} \forall\; \ell \in \N,\;\operatorname{a.e.}\; {\textit{\textbf{y}}} \in \Omega^\diese, \quad U^+_{\boldsymbol{\theta}}(\varphi)({\textit{\textbf{y}}} + \ell\, \vec{{\textit{\textbf{e}}}}_n) = U^+_{\boldsymbol{\theta}}(\mathcal{P}^\ell \varphi)({\textit{\textbf{y}}}). \end{equation} Moreover, the spectral radius of $\mathcal{P}$ is strictly less than one. \end{prop} \begin{dem} We only present the outline of the proof, which is quite similar to the one in \cite{flissthese, jolyLiFliss}. Given $\varphi \in L^2(\Sigma^\diese_{n, 0})$, consider the function $U^{}_1$ defined in $\Omega^\diese$ by $U^{}_1({\textit{\textbf{y}}}) = U^+_{\boldsymbol{\theta}}(\varphi)({\textit{\textbf{y}}} + \vec{{\textit{\textbf{e}}}}_n)$ for almost any ${\textit{\textbf{y}}} \in \Omega^\diese$. Since the coefficients $\mu_p$ and $\rho_p$ are periodic, one deduces that $U^{}_1$ satisfies the volume equation as well as the periodicity condition in \eqref{eq:half_guide_problem}. Furthermore, \[\displaystyle \restr{U^{}_1}{\Sigma^\diese_{n, 0}} = \restr{U^+_{\boldsymbol{\theta}}(\varphi)}{\Sigma^\diese_{n, 1}} = \mathcal{P}\varphi. \] Thus, by well-posedness of \eqref{eq:half_guide_problem}, we have \eqref{eq:periodic_structure_solution} for $\ell = 1$. The result \eqref{eq:periodic_structure_solution} for $\ell \geq 2$ is proved by induction. \noindent It remains to show that the spectral radius is strictly less than $1$. To this end, by analogy with \eqref{eq:exp_decay_halfline_s}, one can show the existence of constants $\alpha, c > 0$ such that \begin{equation}\label{eq:exp_decay_halfguide}\displaystyle \forall\; \varphi \in L^2(\Sigma^\diese_{n, 0}), \quad \big\|e^{\alpha\mathop{\mathfrak{Im}}\nolimits \omega\, y_n/{\boldsymbol{\theta}}i_n}\, U^+_{\boldsymbol{\theta}} \big\|_{H^1_{\boldsymbol{\theta}}(\Omega^\diese)} \leq c\; \|\varphi\|_{L^2(\Sigma^\diese_{n, 0})}. \end{equation} Since $\mathcal{P}^\ell \varphi = U^+_{\boldsymbol{\theta}}(\varphi)(\cdot, \ell)$, the estimate above implies that $\|\mathcal{P}^\ell\| \leq c\; e^{-\alpha\mathop{\mathfrak{Im}}\nolimits \omega\, \ell/{\boldsymbol{\theta}}i_n}$. Hence, using Gelfand's formula \cite[\S 10.3]{rudin1991functional}, the spectral radius can be estimated as follows: \[ \rho(\mathcal{P}) = \lim_{\ell \to +\infty}\|\mathcal{P}^\ell\|^{1/\ell} \leq e^{-\beta\mathop{\mathfrak{Im}}\nolimits \omega/{\boldsymbol{\theta}}i_n} < 1. \] \end{dem} \noindent The operator $\mathcal{P}$ is called the \emph{propagation operator}, as it describes how the solution of \eqref{eq:half_guide_problem} evolves from one interface to another. Provided that $\mathcal{P}$ is known, the solution $U^+_{\boldsymbol{\theta}}(\varphi)$ may be constructed using \emph{local cell problems}. Let us first introduce the appropriate functional framework in a periodicity cell \begin{equation} \label{eq:H1thetaper_celle} \displaystyle H^1_{{\boldsymbol{\theta}}, \textit{per}}(\mathcal{C}_0^\diese) := \Big\{ U \in H^1_{{\boldsymbol{\theta}}}(\mathcal{C}_0^\diese),\ \widetilde{U} \in H^1_{{\boldsymbol{\theta}}, \textit{loc}}(\mathcal{B}_0) \Big\} , \end{equation} where $\mathcal{B}_0:=\R^n_+ \cap\{ 0<y_n<1 \}$. Similarly to Section \ref{sub:trace}, one can show that any function of $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\mathcal{C}_0^\diese)$ has a $L^2$ trace on the boundary of $\mathcal{C}_0^\diese$. We can prove in particular that \[ H^1_{{\boldsymbol{\theta}}, \textit{per}}(\mathcal{C}^\diese_0) = \Big\{U \in H^1_{\boldsymbol{\theta}}(\mathcal{C}^\diese_0)\ /\ \restr{U}{y_i = 0} = \restr{U}{y_i = 1},\ \forall\; i \in \llbracket 1, n-1 \rrbracket \Big\}. \] We can now introduce the local cell problems: for all $\varphi\in L^2(\Sigma^\diese_{n, 0})$, for $j\in\{0,1\}$, let $E^j(\varphi)\in H^1_{{\boldsymbol{\theta}}, \textit{per}}(\mathcal{C}^\diese_0) $ satisfy \begin{equation} \label{eq:local_cell_problem} \left| \begin{array}{r@{\ }c@{\ }l@{\quad}l} \displaystyle - \Dt{} \big( \mu_p \; \Dt{} E^j \big) - \rho_p \; \omega^2 \; E^j &=& 0, \quad \textnormal{in} & \mathcal{C}^\diese_0, \\[4pt] \multicolumn{4}{l}{ \restr{\mu_p\; \Dt{} E^j}{y_i = 0} =\restr{\mu_p\; \Dt{} E^j}{y_i = 1} \quad \forall\; i \in \llbracket 1, n-1 \rrbracket,} \end{array} \right. \end{equation} defined for $j = 0, 1$, with the boundary conditions \begin{equation} \label{eq:local_cell_BC} \left| \begin{array}{c@{\quad}c@{\quad}c} \restr{E^0}{\Sigma^\diese_{n, 0}} = \varphi &\textnormal{and}& \restr{E^0}{\Sigma^\diese_{n, 1}} = 0, \\[4pt] \restr{E^1}{\Sigma^\diese_{n, 0}} = 0 &\textnormal{and}& \restr{E^1}{\Sigma^\diese_{n, 1}} = \varphi. \end{array} \right. \end{equation} A variational formulation can be derived as in Proposition \ref{prop:half_guide_FV}, and the well-posedness follows once again from a lifting argument (see Proposition \ref{prop:trace_lifting}) combined with Lax-Milgram's theorem in $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\mathcal{C}^\diese_0)$. \noindent Proposition \ref{prop:periodic_structure_solution} implies that $\restr{U^+_{\boldsymbol{\theta}}(\varphi)(\cdot + \ell\, \vec{{\textit{\textbf{e}}}}_n)}{\Sigma^\diese_{n, 0}} = \mathcal{P}^\ell \varphi$. Hence, if the propagation operator $\mathcal{P}$ is known, by linearity, the solution of the half-guide problem can be entirely constructed cell by cell as follows: \begin{equation} \displaystyle \forall\; \ell \in \N, \quad \restr{U^+_{\boldsymbol{\theta}}(\varphi)(\cdot + \ell\, \vec{{\textit{\textbf{e}}}}_n)}{\mathcal{C}^\diese_0} = E^0(\mathcal{P}^{\ell} \varphi) + E^1(\mathcal{P}^{\ell+1} \varphi). \label{eq:UfromEis} \end{equation} \subsection{Characterization of the propagation operator: the Riccati equation}\label{sec:Riccati} In the sequel, $\langle \cdot, \cdot \rangle$ denotes the canonical $L^2$ scalar product on $\Sigma^\diese_{n, 0}$ (or equivalently on $\Sigma^\diese_{n, 1}$). \noindent In order to characterize the propagation operator $\mathcal{P}$, it is useful to introduce the \emph{local DtN operators} $\mathcal{T}^{jk} \in \mathcal{L}(L^2(\Sigma^\diese_{n, 0}))$, defined for $j, k = 0, 1$ by \begin{equation} \label{eq:DtNloc} \forall\; \varphi \in L^2(\Sigma^\diese_{n, 0}), \quad \mathcal{T}^{jk} \varphi = (-1)^{k+1}\; {\boldsymbol{\theta}}i_n\; \restr{\left[\mu_p\; \Dt{} E^j(\varphi)\right]}{\Sigma^\diese_{n, k}}. \end{equation} where $E^j(\varphi)$ satisfies \eqref{eq:local_cell_problem}-\eqref{eq:local_cell_BC}. By Green's formula \eqref{eq:Green_formula}, note that for all $j, k = 0, 1$ and for $(\varphi, \psi) \in L^2(\Sigma^\diese_{n, 0})^2$, these operators satisfy \begin{equation}\label{eq:DtNloc_weak_form} \displaystyle \Big\langle \mathcal{T}^{j k} \varphi,\;{\psi} \Big\rangle = \int_{\mathcal{C}^\diese_0} \left[ \mu_p\; \Dt{} E^j(\varphi) \; \Dt{} \overline{E^k(\psi)} - \rho_p\; \omega^2\; E^j(\varphi)\; \overline{E^k(\psi)}\, \right]. \end{equation} Before deriving other useful properties of the local DtN operators, we need to introduce some additional notations. For any closed operator $\mathcal{A} \in \mathcal{L}(L^2(\Sigma^\diese_{n, 0}))$, we denote $\mathcal{A}^*$ the adjoint of $\mathcal{A}$, and $\overline{\mathcal{A}}$ its «\,\textit{complex conjugate}\,», that is, \[ \forall\; \varphi \in L^2(\Sigma^\diese_{n, 0}), \quad \overline{\mathcal{A}}\varphi = \overline{\mathcal{A} \overline{\varphi}}. \] \noindent It is not difficult to see that $\overline{\mathcal{A}^*} = \overline{\mathcal{A}}^*$, and $\overline{\overline{\mathcal{A}}} = \mathcal{A}$. \begin{prop} \label{prop:pties_local_DtN} The local DtN operators $\mathcal{T}^{jk}$ satisfy \begin{equation} \label{eq:adjoints_local_DtN} \left[\mathcal{T}^{00}\right]^* = \overline{\mathcal{T}^{00}}, \quad \left[\mathcal{T}^{11}\right]^* = \overline{\mathcal{T}^{11}}, \quad \left[\mathcal{T}^{01}\right]^* = \overline{\mathcal{T}^{10}}, \quad \left[\mathcal{T}^{10}\right]^* = \overline{\mathcal{T}^{01}}. \end{equation} Furthermore, the operators $\mathcal{T}^{00}$, $\mathcal{T}^{11}$, and $\mathcal{T}^{00} + \mathcal{T}^{11}$ are invertible. \end{prop} \begin{dem} The property \eqref{eq:adjoints_local_DtN} follows from Green's formula applied to $E^j(\varphi)$ and $\overline{E^k(\overline{\psi})}$, see for instance \cite[Proposition 2.2.4]{flissthese} in the case of the Helmholtz equation. \noindent The operators $\mathcal{T}^{00}$, $\mathcal{T}^{11}$, and $\mathcal{T}^{00} + \mathcal{T}^{11}$ are bounded. We are going to show that they are also coercive. Their invertibility will then follow from Lax-Milgram's theorem. From the expression \eqref{eq:DtNloc_weak_form}, one has the existence of a constant $c \equiv c(\mu_-, \rho_-, |\omega|) > 0$ such that \[\displaystyle - |\omega|\; \mathop{\mathfrak{Im}}\nolimits\left[ \frac{1}{\omega} \Big\langle\mathcal{T}^{kk} \varphi,\; {\varphi} \Big\rangle \right] \geq c\; \mathop{\mathfrak{Im}}\nolimits \omega\; \|E^k(\varphi)\|^2_{H^1_{\boldsymbol{\theta}}(\mathcal{C}^\diese_0)} \geq \tilde{c}\, \mathop{\mathfrak{Im}}\nolimits \omega\, \|\varphi\|^2_{L^2(\Sigma^\diese_{n, 0})}, \] since from \eqref{eq:trace_func_per}, the trace application from $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\mathcal{C}^\diese_0)$ to $L^2(\Sigma^\diese_{n, 0})$ is continuous. It follows that the operators $\mathcal{T}^{00}$ and $\mathcal{T}^{11}$ are coercive, and therefore invertible. The inequalities above summed for $k = 0, 1$ imply the coercivity and hence the invertibility of $\mathcal{T}^{00} + \mathcal{T}^{11}$ as well. \end{dem} \noindent As seen earlier, the solution of the half-guide problem \eqref{eq:half_guide_problem} is given by \eqref{eq:UfromEis}. Now let us use the characterization of $H^1_{\textit{per}, {\boldsymbol{\theta}}}(\Omega^\diese)$, namely, Corollary \ref{cor:rest_H1thetaper} with $a = 1$, so that $\Omega^\diese_{a, -} = \mathcal{C}^\diese_0$ and $\Omega^\diese_{a, +} = \Omega^\diese \setminus \mathcal{C}^\diese_0$. Since $\mu_p\; \Dt{}U^+_{\boldsymbol{\theta}}(\varphi)$ belongs to $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$, the directional derivative of $U^+_{\boldsymbol{\theta}}(\varphi)$ is continuous across the interface $\Sigma^\diese_{n, 1}$, \emph{i.e.} \begin{equation} \restr{\left[\mu_p\; {\Dt{} U^+_{\boldsymbol{\theta}}(\varphi)}\right]}{\Sigma^\diese_{n, 1}} = \restr{\left[\mu_p\; {\Dt{} U^+_{\boldsymbol{\theta}}(\varphi)((\cdot + \vec{{\textit{\textbf{e}}}}_n)}\right]}{\Sigma^\diese_{n, 0}}, \end{equation} or equivalently, \begin{equation} \begin{array}{l} \restr{\left[\mu_p\; \Dt{} E^0(\varphi)\right]}{\Sigma^\diese_{n, 1}} + \; \restr{\left[\mu_p\; \Dt{} E^1(\mathcal{P} \varphi)\right]}{\Sigma^\diese_{n, 1}} \\[15pt] \ \;\qquad\qquad\qquad\qquad=\; \restr{\left[\mu_p\; \Dt{} E^0(\mathcal{P} \varphi)\right]}{\Sigma^\diese_{n, 0}} +\; \restr{\left[\mu_p\; \Dt{} E^1(\mathcal{P}^2 \varphi)\right]}{\Sigma^\diese_{n, 0}}. \end{array} \label{eq:cty_across_Sigma} \end{equation} By using the definition of the local DtN operators $\mathcal{T}^{jk}$, \eqref{eq:cty_across_Sigma} leads to the following characterization. \begin{prop} The propagation operator $\mathcal{P}$ \textcolor{surligneur}{defined by \eqref{eq:definition_propagation_operator}} is the unique solution of the constrained Riccati equation \begin{equation} \label{eq:Riccati} \left| \begin{array}{l} \textnormal{\textit{Find $\mathcal{P} \in \mathcal{L}(L^2(\Sigma^\diese_{n, 0}))$ such that $\rho(\mathcal{P}) < 1$ and}} \\[12pt] \multicolumn{1}{c}{\displaystyle \mathcal{T}^{10}\mathcal{P}^2 + (\mathcal{T}^{00} + \mathcal{T}^{11})\, \mathcal{P} + \mathcal{T}^{01} = 0.} \end{array} \right. \end{equation} \end{prop} \begin{dem} The proof is identical to the one for the elliptic Helmholtz equation \cite[Theorem 4.1]{jolyLiFliss}. We know from Proposition \ref{prop:periodic_structure_solution} that $\mathcal{P}$ has a spectral radius which is strictly less than $1$. Moreover \eqref{eq:cty_across_Sigma} ensures that $\mathcal{P}$ satisfies the Riccati equation. \noindent In order to prove the uniqueness, let us consider an operator $\mathcal{P}_1$ which satisfies \eqref{eq:Riccati}. The function defined cell by cell by \[\displaystyle \forall\; \varphi \in L^2(\Sigma^\diese_{n, 0}), \quad \forall\; \ell \in \N^*, \quad \restr{U_1(\varphi)(\cdot + \ell\, \vec{{\textit{\textbf{e}}}}_n)}{\mathcal{C}^\diese_0} = E^0(\mathcal{P}_1^{\ell} \varphi) + E^1(\mathcal{P}_1^{\ell+1} \varphi), \] solves \eqref{eq:half_guide_problem} in each cell $\mathcal{C}_\ell$ and is continuous across each interface $\Sigma^\diese_{n, \ell}$, by definition \eqref{eq:local_cell_problem}, \eqref{eq:local_cell_BC} of $E^0$ and $E^1$. By Corollary \ref{cor:rest_H1thetaper}, $U_1$ is locally $H^1_{\boldsymbol{\theta}}$ in $\Omega^\diese$. \noindent Moreover, since $\mathcal{P}_1$ satisfies \eqref{eq:Riccati}, the directional derivative $\mu_p \Dt{} U_1$ is continuous across each interface. Thus, using Corollary \ref{cor:rest_H1thetaper}, we deduce that $U_1$ satisfies \eqref{eq:half_guide_problem} in $\Omega^\diese$. \noindent Furthermore, given that $\rho(\mathcal{P}_1) < 1$, Gelfand's formula and the well-posedness of the cell problems ensure that there exist positive constants $c, \rho_*$, with $\rho_* < 1$ such that, for $\ell \in \N$ large enough, \[ \|U_1(\varphi)\|_{H^1_{\boldsymbol{\theta}}(\mathcal{C}^\diese_\ell)} \leq c\; \rho_*^\ell\; \|\varphi\|_{L^2(\Sigma^\diese_{n, 0})}. \] Hence $U_1(\varphi)$ belongs to $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\Omega^\diese)$ and satisfies the half-guide problem \eqref{eq:half_guide_problem}. By well-posedness of \eqref{eq:half_guide_problem}, $U_1(\varphi)$ and $U^+_{\boldsymbol{\theta}}(\varphi)$ coincide, and thus have the same trace on $\Sigma^\diese_{n, 1}$, that is $\mathcal{P}_1 \varphi = \mathcal{P} \varphi$ for any $\varphi \in L^2(\Sigma^\diese_{n, 0})$. \end{dem} \noindent As a consequence, the propagation operator can be obtained by solving the Riccati equation in \eqref{eq:Riccati}, and by choosing the unique solution whose spectral radius is strictly less than $1$. One important thing to retain from the above is that both the propagation operator and the solution of the half-guide problem only require the computation of $E^0$, $E^1$, and the operators $\mathcal{T}^{00}$, $\mathcal{T}^{10}$, $\mathcal{T}^{01}$, and $\mathcal{T}^{11}$, which involve problems defined on a periodicity cell. However, the resolution of the constrained Riccati equation \eqref{eq:Riccati} is not obvious at all. The properties of this equation are investigated in further details in Section \ref{sec:about_Riccati_equation}. \subsection{The DtN operator and the DtN coefficient} \label{sec:the_DtN_operator_and_the_DtN_coefficient} The goal of this part is to see how the half-guide problem and the local cell problems can be used to compute the DtN coefficient $\lambda^+$. We recall that \[\displaystyle \lambda^+ = - \mu_{\boldsymbol{\theta}}(0) \; \frac{d u^+_{\boldsymbol{\theta}}}{d x}(0). \] Therefore, it is natural to introduce the DtN operator $\Lambda \in \mathcal{L}(L^2(\Sigma^\diese_{n, 0}))$ defined by \begin{equation} \label{eq:def_DtN_operator} \displaystyle \forall\; \varphi \in L^2(\Sigma^\diese_{n, 0}), \quad \Lambda \varphi := - {\boldsymbol{\theta}}i_n\; \restr{\left[\mu_p\; \Dt{} U^+_{\boldsymbol{\theta}}(\varphi)\right]}{\Sigma^\diese_{n, 0}}. \end{equation} This operator also has the following properties, whose proof is exactly identical to the one of Proposition \ref{prop:pties_local_DtN}. \begin{prop} \label{prop:pties_DtN} One has $\Lambda^* = \overline{\Lambda}$. Moreover, $\Lambda$ and $\Lambda + \mathcal{T}^{11}$ are invertible operators. \end{prop} \noindent \noindent Taking the directional derivative of \eqref{eq:UfromEis} (for $\ell=0$) on $\Sigma^\diese_{n, 0}$ and using the definition \eqref{eq:DtNloc} of the local DtN operators $\mathcal{T}^{00}$ and $\mathcal{T}^{10}$ leads to \begin{equation} \label{eq:DtN_operator} \displaystyle \Lambda = \mathcal{T}^{00} + \mathcal{T}^{10} \mathcal{P}. \end{equation} Besides, by writing the formula \eqref{eq:link_DU_du} after multiplication by $\mu_p$, and by evaluating it for ${\textit{\textbf{y}}} = ({\textit{\textbf{s}}}, 0)$, so that ${\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}) ={\textit{\textbf{s}}}$, we obtain \begin{equation} \label{eq:expression_Lambda} \Lambda \varphi({\textit{\textbf{s}}}) = {\boldsymbol{\theta}}i_n\; \lambda^{}_{\boldsymbol{\theta}}({\textit{\textbf{s}}})\; \varphi({\textit{\textbf{s}}}), \quad \textnormal{with} \quad \lambda^{}_{\boldsymbol{\theta}}({\textit{\textbf{s}}}) = - \Big[\mu_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} \; \frac{d u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}}{d x}\Big](0), \end{equation} namely, $\Lambda$ is a multiplication operator. We deduce from \eqref{eq:expression_Lambda} the DtN coefficient $\lambda^+$. \begin{prop}\label{prop:DtNcoeff_from_DtNoperator} The function $\lambda^{}_{\boldsymbol{\theta}} : \R^{n-1} \to \C$ defined by \eqref{eq:expression_Lambda} is continuous. Moreover, if $\varphi \in \mathscr{C}_{\textit{per}}(\R^{n-1})$ is a given function which satisfies $\varphi(0) = 1$, then we have \begin{equation} \lambda^+ = \lambda^{}_{\boldsymbol{\theta}}(0) = \frac{1}{{\boldsymbol{\theta}}i_n}\; (\Lambda \varphi)(0).\label{eq:DtNcoeff_from_DtNoperator} \end{equation} \end{prop} \begin{dem} Using Green's formula, we have that for all ${\textit{\textbf{s}}} \in \R^{n-1}$ \[\displaystyle \lambda^{}_{\boldsymbol{\theta}}({\textit{\textbf{s}}}) = a_{\textit{\textbf{s}}}(u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}, u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}), \quad \textnormal{with} \quad a_{\textit{\textbf{s}}}(u, v) = \int_{\R_+} \Big( \mu_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} \; \frac{d u}{d x} \; \overline{\frac{d v}{d x}} - \rho_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} \; \omega^2 \; u \; \overline{v} \Big). \] The continuity of $u \mapsto a_{\textit{\textbf{s}}}(u, u)$ results directly from the properties of the coefficients $\mu_p$ and $\rho_p$. Moreover, Proposition \ref{prop:properties_us} ensures that the function ${\textit{\textbf{s}}} \mapsto u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ is continuous. Therefore, as the composition of these two functions, $\lambda^{}_{\boldsymbol{\theta}}$ is also continuous. If in addition $\varphi$ is continuous, then $\Lambda \varphi$ is also continuous. Hence, $(\Lambda \varphi) (0) = {\boldsymbol{\theta}}i_n\; \lambda^{}_{\boldsymbol{\theta}}(0) \varphi(0)$ which yields the desired result. \end{dem} \subsection{Spectral properties of the Riccati equation} \label{sec:about_Riccati_equation} We now present some properties regarding Equation \eqref{eq:Riccati}. \textcolor{surligneur}{These properties will be exploited for the numerical resolution of the Riccati equation, by constructing the operator $\mathcal{P}$ from its eigenpairs (this will be done in Section \ref{sec:discrete_Riccati_equation} after space discretization). For this reason, it is worhwhile to reformulate a spectral version (Proposition \ref{prop:spectrum_P_riccati}) of the Riccati equation that would characterize these eigenpairs, while taking into account the spectral radius constraint. This is precisely the purpose of this section.} \noindent Recall that $\mathcal{T}(\mathcal{P}) = 0$, where $\mathcal{T}$ is the bounded operator defined by \begin{equation} \forall\; X \in \mathcal{L}\big(L^2(\Sigma^\diese_{n, 0})\big), \quad \mathcal{T}(X) = \mathcal{T}^{10} X^2 + (\mathcal{T}^{00} + \mathcal{T}^{11}) X + \mathcal{T}^{01}. \end{equation} In the sequel, we will write $\mathcal{T}(\lambda)$ for $\mathcal{T}(\lambda I)$. We begin with the following factorization lemma. \begin{lem} \label{lme:factorization_Riccati_operator} Let $\mathcal{P}$ be the propagation operator defined by \eqref{eq:definition_propagation_operator}. For any number $\lambda \in \C$, \begin{equation} \mathcal{T}(\lambda) = (\lambda \overline{\mathcal{P}^*} - I)\; (\Lambda + \mathcal{T}^{11})\; (\mathcal{P} - \lambda), \end{equation} where $\mathcal{T}^{11}$ is defined by \eqref{eq:DtNloc} and $\Lambda$ is defined by \eqref{eq:def_DtN_operator}. \end{lem} \begin{dem} Let $\lambda \in \C$. Since the propagation operator satisfies $\mathcal{T}(\mathcal{P}) = 0$, one obtains that \begin{align} \mathcal{T}(\lambda) &= \mathcal{T}(\lambda) - \mathcal{T}(\mathcal{P}) \nonumber\\ &= \left[ \mathcal{T}^{10} (\lambda + \mathcal{P}) + \mathcal{T}^{00} + \mathcal{T}^{11} \right]\; (\lambda - \mathcal{P}) \nonumber\\ &= (\lambda \mathcal{T}^{10} + \Lambda + \mathcal{T}^{11}) \; (\lambda - \mathcal{P}), \quad \textnormal{from \eqref{eq:DtN_operator}.}\label{eq:T(lambda I)} \end{align} We use once again the fact that $\mathcal{T}(\mathcal{P}) = 0$ which, by the expression \eqref{eq:DtN_operator}, is equivalent to $\mathcal{T}^{01} = -(\Lambda + \mathcal{T}^{11})\; \mathcal{P}$. By transposing this equation, and by taking the complex conjugate, one obtains that $\overline{\left[\mathcal{T}^{01}\right]^*} = -\overline{\mathcal{P}^* \vphantom{\cramped{\mathcal{T}^{11}}}}\; \overline{(\Lambda + \mathcal{T}^{11})^*}$. Since $\left[\mathcal{T}^{11}\right]^* = \overline{\mathcal{T}^{11}}$ and $\left[\mathcal{T}^{01}\right]^* = \overline{\mathcal{T}^{10}}$ as ensured by Proposition \ref{prop:pties_local_DtN}, and since $\Lambda^* = \overline{\Lambda}$ from Proposition \ref{prop:pties_DtN}, it follows that \[ \mathcal{T}^{10} = -\overline{\mathcal{P}^*}\; (\Lambda + \mathcal{T}^{11}). \] Inserting this expression of $\mathcal{T}^{10}$ in \eqref{eq:T(lambda I)} therefore leads to \[ \mathcal{T}(\lambda) = \left[ - \lambda \overline{\mathcal{P}^*}\; (\Lambda + \mathcal{T}^{11}) + \Lambda + \mathcal{T}^{11} \right] \; (\lambda - \mathcal{P}) = (I - \lambda \overline{\mathcal{P}^*})\; (\Lambda + \mathcal{T}^{11})\; (\lambda - \mathcal{P}). \] which is the desired result. \end{dem} \noindent The previous factorization lemma allows one to characterize the spectrum of the propagation operator as follows. \begin{prop}\label{prop:spectrum_P_riccati} For any complex number $\lambda$, one has \begin{equation} \label{eq:spectrum_P_riccati} \displaystyle \lambda \in sgma(\mathcal{P})\quad \Longleftrightarrow \quad 0 \in sgma\big[\mathcal{T}(\lambda)\big]\ \ \text{and}\ \ |\lambda| < 1. \end{equation} \end{prop} \begin{dem} Proving \eqref{eq:spectrum_P_riccati} amounts to showing that for any $\lambda \in \C$ such that $|\lambda| < 1$, $\mathcal{P} - \lambda$ is invertible if and only if $\mathcal{T}(\lambda)$ is invertible. To this end, using Lemma \ref{lme:factorization_Riccati_operator}, it is sufficient to prove that $(\lambda \overline{\mathcal{P}^*} - I)\; (\Lambda + \mathcal{T}^{11})$ is an invertible operator. Proposition \ref{prop:pties_DtN} ensures the invertibility of $\Lambda + \mathcal{T}^{11}$ already. It thus remains to show that $\lambda \overline{\mathcal{P}^*} - I$ is invertible, which is true when $|\lambda| < 1$. Indeed, if $\lambda = 0$, then $\lambda \overline{\mathcal{P}^*} - I = - I$ is obviously invertible. Otherwise, it is not difficult to see that $\mathcal{P}$ and $\overline{\mathcal{P}^*}$ have the same spectrum. Hence, given that $|1/\lambda| > 1 > \rho(\overline{\mathcal{P}^*})$, it follows that $1/\lambda$ does not belong to $sgma(\overline{\mathcal{P}^*})$. In other words, $\overline{\mathcal{P}^*} - (1/\lambda)\,I$ is an invertible operator. \end{dem} \begin{rmk} Note that the property \eqref{eq:spectrum_P_riccati} can be proved easily (and without Lemma \ref{lme:factorization_Riccati_operator}) for the point spectrum: \begin{equation} \label{eq:point_spectrum_P_riccati} \displaystyle \lambda \in sgma_p(\mathcal{P})\quad \Longleftrightarrow \quad 0 \in sgma_p\big[\mathcal{T}(\lambda)\big]\ \ \text{and}\ \ |\lambda| < 1. \end{equation} This property was already proved in \cite{jolyLiFliss} for the Helmholtz equation. In this context, this was sufficient since the operator $\mathcal{P}$ was compact, which is no longer the case here. \end{rmk} \noindent Finally, it is worth noting that the values $\lambda \neq 0$ for which $0 \in sgma\big[\mathcal{T}(\lambda)\big]$ can be paired in the following way. \begin{prop}\label{prop:pairs_Riccati} For any complex number $\lambda \neq 0$, one has the following equivalence: \begin{equation} \displaystyle 0 \in sgma\big[\mathcal{T}(\lambda)\big] \quad \Longleftrightarrow \quad 0 \in sgma\big[\mathcal{T}(1/\lambda)\big]. \end{equation} \end{prop} \begin{dem} Let $\lambda \in \C^*$. From the properties of the local DtN operators (see Proposition \ref{prop:pties_local_DtN}), we deduce that \begin{equation} \overline{\left[\mathcal{T}(\lambda)\right]^*} = \lambda^2\, \mathcal{T}^{01} + \lambda (\mathcal{T}^{00} + \mathcal{T}^{11}) + \mathcal{T}^{10} = \lambda^2\, \mathcal{T}(1/\lambda). \end{equation} The operators $\mathcal{T}(\lambda)$ and $\overline{\left[\mathcal{T}(\lambda)\right]^*}$ have the same spectrum, hence the result. \end{dem} \begin{rmk} \label{rmk:pairs_Riccati} As Proposition \ref{prop:pairs_Riccati} shows, the values $\lambda \neq 0$ for which \[ 0 \in sgma\big[\mathcal{T}(\lambda)\big] \] come by pairs $(\lambda,\lambda^{-1})$. From a numerical point of view, it suffices to choose $\lambda$ such that $|\lambda|<1$ and discard $\lambda^{-1}$. \end{rmk} \subsection{Spectral properties of the propagation operator} \label{sec:propagation_operator} \textcolor{surligneur}{This section, contrary to Section \ref{sec:about_Riccati_equation} is not related to the construction of our numerical method; it is of theoretical interest. On one hand, the result of this section, that is Proposition \ref{prop:properties_P}, is useful for interpreting some of the numerical results in Section \ref{sec:results:absorption}. On the other hand, it emphasizes the differences between the spectral properties of $\mathcal{P}$, and the ones of the corresponding operator for classical waveguide problems.} For the elliptic Helmholtz equation, $\mathcal{P}$ is compact (see \cite[Theorem 3.1]{jolyLiFliss}) and its spectrum hence consists only in \textcolor{surligneur}{isolated eigenvalues which accumulate to $0$. However, the picture is completely different in this case, because the spectrum has no isolated points}. \noindent One useful way to study the properties of the propagation operator (especially its spectrum) is through an analytic formula: according to \eqref{eq:concatenation_half_guide}, $\mathcal{P}$ can be expressed for all $\varphi$ in $L^2(\Sigma^\diese_{n, 0})$ and for ${\textit{\textbf{s}}} \in \R^{n-1}$ as \begin{equation} \label{eq:expression_P} \mathcal{P} \varphi({\textit{\textbf{s}}}) = p_{\boldsymbol{\theta}}({\textit{\textbf{s}}})\; \widetilde{\varphi}\big({\textit{\textbf{s}}} - {\boldsymbol{\theta}}slope\big), \quad \textnormal{with} \quad p_{\boldsymbol{\theta}}({\textit{\textbf{s}}}) = u^+_{{\textit{\textbf{s}}} - {\boldsymbol{\theta}}slope, {\boldsymbol{\theta}}} (1/{\boldsymbol{\theta}}i_n) \quad \textnormal{and} \quad {\boldsymbol{\theta}}slope = \hat{{\boldsymbol{\theta}}\,}/{\boldsymbol{\theta}}i_n \in \R^{n-1}. \end{equation} Note that since ${\boldsymbol{\theta}}$ is an irrational vector, ${\boldsymbol{\theta}}slope$ is also an irrational vector. The properties of the mapping ${\textit{\textbf{s}}} \mapsto u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ stated in Proposition \ref{prop:properties_us} imply that the fonction $p_{\boldsymbol{\theta}}$ is continuous and $1$-periodic in each direction. \noindent Operators that can be written under the form \eqref{eq:expression_P} are known as \emph{weighted shift operators}, and have been studied for instance in \cite{antonevich2012linear}. In particular, the spectral properties of $\mathcal{P}$ are given by the following result. { \begin{prop} \label{prop:properties_P} Let $p_{\boldsymbol{\theta}}: \Sigma^\diese_{n, 0} \to \C$ be the function defined in \eqref{eq:expression_P}. Then, $p_{\boldsymbol{\theta}}({\textit{\textbf{s}}}) \neq 0$ for all $s$ in $\Sigma^\diese_{n, 0}$, and the spectral radius of $\mathcal{P}$ is given by \begin{equation} \label{eq:radius_spectrum} \rho(\mathcal{P})= \exp \left(\int_{\Sigma^\diese_{n, 0}} \log |p_{\boldsymbol{\theta}}({\textit{\textbf{s}}})|\; d {\textit{\textbf{s}}} \right). \end{equation} Moreover, the spectrum of $\mathcal{P}$ is a circle of radius $\rho(\mathcal{P})$. \end{prop} \noindent This result can be found in \cite[Theorem 2.1]{antonevich2012linear} for $n = 2$. We give below the proof for $n > 2$, which requires the following lemma (see Theorem 6.1 and Example 6.1 of \cite{kuipers}), known as a particular case of Birkhoff's ergodic theorem for continuous functions. \begin{lem} \label{lem:mean_value_discrete} Let $\psi : \Sigma^\diese_{n, 0} \to \C$ be continuous and $1$--periodic in each direction. Let $\alpha \in \R^{n-1}$ be an irrational vector. Then, we have the following uniform convergence: \[\displaystyle \lim_{\ell \to +\infty} \Big\|\frac{1}{\ell} \sum_{m = 0}^{\ell-1} \psi(\cdot - m \boldsymbol{\alpha}) - \int_{\Sigma^\diese_{n, 0}} \psi \Big\|_{\infty} = 0. \] \end{lem} } \begin{dem}[of Proposition \ref{prop:properties_P}] Let us first show by contradiction that $p_{\boldsymbol{\theta}}$ or equivalently the function ${\textit{\textbf{s}}} \mapsto u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}(1/{\boldsymbol{\theta}}i_n)$ is nowhere vanishing. To do so, we use an argument of unique continuation. In fact, assume that there exists $s \in \Sigma^\diese_{n, 0}$ such that $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}(1/{\boldsymbol{\theta}}i_n) = 0$. Then $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ satisfies the problem \[ - \frac{d}{d x} \left( \mu_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} \; \frac{d u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}}{d x} \right) - \rho_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} \; \omega^2 \; u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} = 0, \ \textnormal{in} \ (1/{\boldsymbol{\theta}}i_n,+\infty), \quad \textnormal{and} \quad u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}(1/{\boldsymbol{\theta}}i_n) = 0. \] From the well-posedness of this problem, it follows that $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} = 0$ in $(1/{\boldsymbol{\theta}}i_n,+\infty)$. Therefore, by unique continuation, one deduces that $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}} = 0$ in $\R_+$, which contradicts the boundary condition $u^+_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}(0) = 1$. \noindent{ We now establish the expression of the spectral radius $\rho(\mathcal{P})$. One has $\smash{\displaystyle \rho(\mathcal{P}) = \lim_{\ell \to +\infty}\|\mathcal{P}^\ell\|^{1/\ell}}$ from Gelfand's formula, and by induction, $\mathcal{P}^\ell$ can be expressed under the form \[\displaystyle \mathcal{P}^\ell \varphi({\textit{\textbf{s}}}) = p^{(\ell)}_{\boldsymbol{\theta}}({\textit{\textbf{s}}})\; \varphi({\textit{\textbf{s}}} - \ell{\boldsymbol{\theta}}slope), \quad \textnormal{with} \quad p^{(\ell)}_{\boldsymbol{\theta}}({\textit{\textbf{s}}}) = \prod_{m = 0}^{\ell-1} p_{\boldsymbol{\theta}}({\textit{\textbf{s}}} - m {\boldsymbol{\theta}}slope). \] Since the translation operator $\varphi \mapsto \varphi(\cdot - \ell{\boldsymbol{\theta}}slope)$ is isometric and bijective, the norm of $\mathcal{P}^\ell$ is equal to the norm of the multiplication operator $\varphi \mapsto p^{(\ell)}_{\boldsymbol{\theta}}\, \varphi$, that is $\|p^{(\ell)}_{\boldsymbol{\theta}}\|_\infty$. Hence, given that $p_{\boldsymbol{\theta}}({\textit{\textbf{s}}}) \neq 0$ for all ${\textit{\textbf{s}}}$, one has \[ \rho(\mathcal{P}) = \lim_{\ell \to +\infty} \Big\|\prod_{m = 0}^{\ell-1} p_{\boldsymbol{\theta}}(\cdot - m {\boldsymbol{\theta}}slope) \Big\|^{1/\ell}_\infty = \lim_{\ell \to +\infty} \exp \Big\|\frac{1}{\ell} \sum_{m = 0}^{\ell-1} \log\big(|p_{\boldsymbol{\theta}}(\cdot - m {\boldsymbol{\theta}}slope)|\big) \Big\|_\infty \] Since ${\boldsymbol{\theta}}$ is an irrational vector, ${\boldsymbol{\theta}}slope = \hat{{\boldsymbol{\theta}}} / {\boldsymbol{\theta}}i_n$ is also an irrational vector. Therefore, Lemma \ref{lem:mean_value_discrete} can be applied with $\boldsymbol{\alpha} = {\boldsymbol{\theta}}slope$, and $\psi : {\textit{\textbf{s}}} \mapsto \log |p_{\boldsymbol{\theta}}({\textit{\textbf{s}}} )|$, which is well-defined and continuous. Hence the spectral radius is given by \[ \rho(\mathcal{P}) = M_{\log}(p_{\boldsymbol{\theta}}) := \exp \left(\int_{\Sigma^\diese_{n, 0}} \log |p_{\boldsymbol{\theta}}({\textit{\textbf{s}}})|\; d {\textit{\textbf{s}}} \right). \] Let us now characterize the spectrum. To begin, note that the inverse of $\mathcal{P}$ is well-defined, since $p_{\boldsymbol{\theta}}$ vanishes nowhere: for all $\varphi\in L^2(\Sigma^\diese_{n, 0}),\; \mathcal{P}^{-1} \varphi({\textit{\textbf{s}}}) := p_{\boldsymbol{\theta}}({\textit{\textbf{s}}})^{-1}\; \widetilde{\varphi}\big({\textit{\textbf{s}}} + {\boldsymbol{\theta}}slope\big)$. Therefore, all the computations above can be applied to $\mathcal{P}^{-1}$, thus yielding \[ \rho(\mathcal{P}^{-1}) = M_{\log}(p^{-1}_{\boldsymbol{\theta}}) = \frac{1}{M_{\log}(p_{\boldsymbol{\theta}})} = \frac{1}{\rho(\mathcal{P})} \] Since the spectrum of $\mathcal{P}$ is always included in the annulus $\smash{\displaystyle \rho(\mathcal{P}^{-1})^{-1} \leq |z| \leq \rho(\mathcal{P})}$, it follows that $sgma(\mathcal{P})$ is included in the circle $|z| = \rho(\mathcal{P}) = M_{\log}(p_{\boldsymbol{\theta}})$. \noindent Conversely, for ${\textit{\textbf{k}}} \in \Z^{n-1}$, let $S_{\textit{\textbf{k}}}$ be the multiplication operator by ${\textit{\textbf{s}}} \in \R^{n-1} \mapsto \exp(2\ensuremath{\mathrm{i}}\pi\, {\textit{\textbf{k}}}\cdot {\textit{\textbf{s}}})$. From the expression \eqref{eq:expression_P} of the propagation operator, we obtain that \[ S_{\textit{\textbf{k}}}\; \mathcal{P}\; S^{-1}_{\textit{\textbf{k}}} = e^{2\ensuremath{\mathrm{i}}\pi\, {\textit{\textbf{k}}}\, \cdot\, {\boldsymbol{\theta}}slope}\, \mathcal{P}. \] The operators $\mathcal{P}$ and $e^{2\ensuremath{\mathrm{i}}\pi {\textit{\textbf{k}}}\, \cdot\, {\boldsymbol{\theta}}slope}\, \mathcal{P}$ are similar, and thus have the same spectrum. Now consider an element $\lambda_0$ of $sgma(\mathcal{P})$. Then, $|\lambda_0| = M_{\log}(p_{\boldsymbol{\theta}})$, and $\lambda_{\textit{\textbf{k}}} := e^{2\ensuremath{\mathrm{i}}\pi {\textit{\textbf{k}}}\, \cdot\, {\boldsymbol{\theta}}slope}\, \lambda_0$ also belongs to $sgma(\mathcal{P})$ for all ${\textit{\textbf{k}}} \in \Z^{n-1}$. Since ${\boldsymbol{\theta}}slope$ is irrational, we have from Kronecker's theorem (Theorem \ref{thm:kronecker}) that the set $\{\lambda_{\textit{\textbf{k}}},\ {\textit{\textbf{k}}} \in \Z^{n-1}\}$ is dense in the circle $|z| = |\lambda_0| = M_{\log}(p_{\boldsymbol{\theta}})$. Consequently, this whole circle is included in the spectrum, since the latter is a closed set. } \end{dem} \section{Resolution algorithm and discretization issues for \texorpdfstring{$n = 2$}{n = 2}} \label{sec:resolution_algorithm} In order to compute the solution of Equation \eqref{eq:whole_line_problem}, the previous sections provide an algorithm which sums up as follows: \begin{enumerate}[label=\arabic*., ref = \arabic*] \item\label{item:step1} \textit{\underline{Compute the solution $u^+_{\boldsymbol{\theta}}$ of \eqref{eq:half_line_problems_0} and the DtN coefficient $\lambda^+$ defined by \eqref{eq:DtN_coefficients}}} by using the following procedure: \begin{enumerate}[label=(\textit{\alph*})., ref = \textit{1.\alph*}] \item\label{item:stepa} for any boundary data $\varphi \in L^2(\Sigma^\diese_{n, 0})$, compute the solutions $E^0(\varphi)$, $E^1(\varphi)$ of the local cell problems \eqref{eq:local_cell_problem}; \item\label{item:stepb} compute the local DtN operators $(\mathcal{T}^{00}, \mathcal{T}^{01}, \mathcal{T}^{10}, \mathcal{T}^{11})$, defined by \eqref{eq:DtNloc}--\eqref{eq:DtNloc_weak_form}; \item\label{item:stepc} compute the propagation operator $\mathcal{P}$ as the unique solution of the constrained Riccati equation \eqref{eq:Riccati}; \item\label{item:stepd} using an arbitrarily chosen boundary data $\varphi \in \mathscr{C}_{\textit{per}}(\R^{n-1})$ which satisfies $\varphi(0) = 1$, \begin{itemize} \item from \eqref{eq:UfromEis}, construct the solution $U^+_{\boldsymbol{\theta}}$ of the half-guide problem cell by cell; \item deduce the half-line solution $u^+_{\boldsymbol{\theta}}$ via the formula \eqref{eq:lien_2D_1D}; \end{itemize} \item\label{item:stepe} compute the DtN operator $\Lambda$ defined by \eqref{eq:DtN_operator}, and deduce $\lambda^+$ from \eqref{eq:DtNcoeff_from_DtNoperator}. \end{enumerate} \item\label{item:step2} \textit{\underline{Compute the solution $u^-_{\boldsymbol{\theta}}$ of \eqref{eq:half_line_problems_0} and the DtN coefficient $\lambda^-$ defined by \eqref{eq:DtN_coefficients}}} by \textcolor{surligneur}{using exactly the same procedure as in} Step \ref{item:step1} (but independently from Step \ref{item:step1}). \item Finally, \textit{\underline{solve the interior problem \eqref{eq:interior_problem} in $(-a, a)$, and extend the solution everywhere}} by using \eqref{eq:solution_of_whole_line_problem}, as well as Step \ref{item:step1} and Step \ref{item:step2}. \end{enumerate} \noindent For convenience sake, the quasiperiodicity order is set to $n = 2$. The most original aspects of the algorithm are the steps \eqref{item:stepa}--\eqref{item:stepd}, and the rest of this section focuses on the discretization of these four steps. We present in Sections \ref{sec:methode_2D} and \ref{sec:methode_quasi1D} two different methods that are linked to a choice of discretization of the step \eqref{item:stepa}, which influences the implementation of the steps \eqref{item:stepb} and \eqref{item:stepd}. The treatment of the step \eqref{item:stepc} is independent of this choice, and will be presented in Section \ref{sec:discrete_Riccati_equation}. \begin{figure} \caption{Two-dimensional mesh for the $2$D method (left), and family of one-dimensional meshes for the quasi-$1$D method (right)\label{fig:meshes_2D_quasi1D} \label{fig:meshes_2D_quasi1D} \end{figure} \subsection{A fully two-dimensional method} \label{sec:methode_2D} The first method is inspired from the resolution of the elliptic Helmholtz equation (see \cite{flissthese} for instance), and consists in solving directly the local cell problems on an unstructured mesh of the periodicity cell $\mathcal{C}^\diese_0 = (0, 1)^2$ (see Figure \ref{fig:meshes_2D_quasi1D}). \noindent We start from a triangular mesh $\mathscr{T}_h(\mathcal{C}^\diese_0)$ of $\mathcal{C}^\diese_0 = (0, 1)^2$ with a mesh step $h > 0$. We assume that this mesh is \emph{periodic}, in the sense that one can identify the mesh nodes on the boundary $y_i = 0$ with those on $y_i = 1$, for $1 \leq i \leq 2$. In particular for $i = 1$, this condition allows us to handle the periodic boundary conditions. \noindent Now let $\mathcal{V}_h(\mathcal{C}^\diese_0)$ be the usual $H^1$--conforming approximation by Lagrange finite elements of order $d > 0$. We also introduce \[\displaystyle \mathcal{V}_{h, \textit{per}}(\mathcal{C}^\diese_0) := \big\{V \in \mathcal{V}_h(\mathcal{C}^\diese_0) \ /\ \restr{V}{y_1 = 0} = \restr{V}{y_1 = 1}\big\} \] as an internal approximation of $H^1_{{\boldsymbol{\theta}}, \textit{per}}(\mathcal{C}^\diese_0)$. Finally, to approximate $L^2(\Sigma^\diese_{2, 0})$ and $L^2(\Sigma^\diese_{2, 1})$, we consider the following subspaces: \[\displaystyle \forall\; a \in \{0, 1\}, \quad \mathcal{V}_{h, \textit{per}}(\Sigma^\diese_{2, a}) = \big\{ \restr{V_h}{\Sigma^\diese_{2, a}}\ / \ V_h \in \mathcal{V}_{h, \textit{per}}(\mathcal{C}^\diese_0) \big\}. \] Since the mesh nodes on $\Sigma^\diese_{2, 0}$ and $\Sigma^\diese_{2, 1}$ can be identified to each other by periodicity of $\mathscr{T}_h(\mathcal{C}^\diese_0)$, we can also make the identification $\mathcal{V}_{h, \textit{per}}(\Sigma^\diese_{2, 0}) \equiv \mathcal{V}_{h, \textit{per}}(\Sigma^\diese_{2, 1}) \equiv \mathcal{V}_{h, \textit{per}}(0, 1)$, as in the continuous case. Set $N := \dim \mathcal{V}_{h, \textit{per}}(0, 1)$, and consider a basis $\smash{(\varphi_p)_{1 \leq p \leq N}}$. \noindent For any data $\varphi_h \in \mathcal{V}_{h, \textit{per}}(0, 1)$, we denote by $E^0_h(\varphi_h), E^1_h(\varphi_h) \in \mathcal{V}_{h, \textit{per}}(\mathcal{C}^\diese_0)$ the solutions of the discrete counterpart of the local cell problems \eqref{eq:local_cell_problem}--\eqref{eq:local_cell_BC} defined in a weak sense. In practice, one has to compute $\smash{E^j_h(\varphi_p)}$, where $\smash{(\varphi_p)_{1 \leq p \leq N}}$ is a basis of $\mathcal{V}_{h, \textit{per}}(0, 1)$. \noindent Similarly to the weak expression \eqref{eq:DtNloc_weak_form} of the continuous local DtN operators, the discrete local DtN operators $\mathcal{T}^{j k}_h \in \mathcal{L}(\mathcal{V}_{h, \textit{per}}(0, 1))$, $j, k = 0, 1$, are defined for any $\varphi_h, \psi_h \in \mathcal{V}_{h, \textit{per}}(0, 1)$ as follows: \[\displaystyle \Big\langle \mathcal{T}^{j k}_h \varphi_h, \; \psi_h \Big\rangle = \int_{\mathcal{C}^\diese_0} \left[ \mu_p\; \Dt{} E^j_h(\varphi_h) \; \Dt{} \overline{E^k_h(\psi_h)} - \rho_p\; \omega^2\; E^j_h(\varphi_h)\; \overline{E^k_h(\psi_h)}\, \right]. \] In practice, these operators are represented as $N \times N$ matrices $\mathbb{T}^{jk}$ whose components are given by $\smash{\mathbb{T}^{jk}_{pq} = \big\langle \mathcal{T}^{j k}_h \varphi_q, \; \varphi_p \big\rangle}$, for $p, q \in \llbracket 1, N \rrbracket$. \noindent Let $\varphi_h \in \mathcal{V}_{h, \textit{per}}(0, 1) \subset \mathscr{C}_{\textit{per}}(\R)$ such that $\varphi_h(0) = 1$. The computation of the propagation operator $\mathcal{P}_h \in \mathcal{L}(\mathcal{V}_{h, \textit{per}}(0, 1))$ is presented in Subsection \ref{sec:discrete_Riccati_equation}. Once this operator is determined, the solution of the half-guide problem \eqref{eq:half_guide_problem} can be approximated with the function defined cell by cell by \[\displaystyle \forall\; \ell \in \N, \quad \restr{U^+_{{\boldsymbol{\theta}}, h}(\varphi_h)(\cdot + \ell\, \vec{{\textit{\textbf{e}}}}_n)}{\mathcal{C}^\diese_0} = E^0_h(\mathcal{P}^{\ell}_h\, \varphi_h) + E^1_h(\mathcal{P}^{\ell+1}_h \, \varphi_h). \] \noindent Finally, a suitable approximation of the solution of the half-line problem \ref{eq:half_line_problem} is provided by \[\displaystyle \forall\; x \in \R, \quad u^+_{{\boldsymbol{\theta}}, h}(x) = U^+_{{\boldsymbol{\theta}}, h}(\varphi)({\boldsymbol{\theta}}\, x). \] \subsection{A quasi one-dimensional method} \label{sec:methode_quasi1D} Though easy to implement, the two-dimensional approach described in the previous section does not exploit the fibered properties of the directional derivative $\Dt{}$. However, the periodic half-guide problem can be seen as a concatenation in a certain sense of one-dimensional half-line problems. This fibered structure is the core of the method presented in this section. \subsubsection{Presentation} \label{sec:a_quasi_one_dimensional_method_presentation} For any $s \in \R$, we consider the one-dimensional cell problems \begin{equation} \left| \begin{array}{r@{\ }c@{\ }l@{\quad}l} \displaystyle - \frac{d}{d x} \Big( \mu_{s, {\boldsymbol{\theta}}} \; \frac{d e^j_{s, {\boldsymbol{\theta}}}}{d x} \Big) - \rho_{s, {\boldsymbol{\theta}}} \; \omega^2 \; e^j_{s, {\boldsymbol{\theta}}} &=& 0, \quad \textnormal{in} & (0, 1/{\boldsymbol{\theta}}i_2) := I_{\boldsymbol{\theta}}, \\[6pt] \displaystyle e^0_{s, {\boldsymbol{\theta}}}(0) = 1\ \quad \textnormal{and}\ \quad e^0_{s, {\boldsymbol{\theta}}}(1/{\boldsymbol{\theta}}i_2) &=& 0, \\[6pt] \displaystyle e^1_{s, {\boldsymbol{\theta}}}(0) = 0\ \quad \textnormal{and}\ \quad e^1_{s, {\boldsymbol{\theta}}}(1/{\boldsymbol{\theta}}i_2) &=& 1. \end{array} \right. \label{eq:QP_local_cell_problems} \end{equation} Then, by analogy with Proposition \ref{prop:structure_half_guide}, one easily shows that the local cell problems are concatenations of one-dimensional cell problems, in the following sense. \begin{prop}\label{prop:structure_local_cell_problem} For any boundary data $\varphi$ in $L^2(0, 1)$, the solutions $E^0(\varphi)$ and $E^1(\varphi)$ of the local cell problems \eqref{eq:local_cell_problem} are given by \begin{equation} \label{eq:concatenation_local_cell_problems} \operatorname{a.e.}\; {\textit{\textbf{y}}} \in \mathcal{C}^\diese_0, \quad E^j(\varphi)({\textit{\textbf{y}}}) = \widetilde{\varphi}\big( {\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}) + j\, {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2 \big)\; e^j_{{\textit{\textbf{s}}}_{\boldsymbol{\theta}}({\textit{\textbf{y}}}), {\boldsymbol{\theta}}} \bigg( \frac{y_2}{{\boldsymbol{\theta}}i_2} \bigg), \end{equation} where $e^j_{s, {\boldsymbol{\theta}}}$ denotes the solution of the cell problems \eqref{eq:QP_local_cell_problems}. \end{prop} \noindent Proposition \ref{prop:structure_local_cell_problem} also highlights the structure of the local DtN operators. To see this, let us introduce the \emph{local DtN functions} $t^{jk}_{\boldsymbol{\theta}}$ defined for $j, k = 0, 1$, by \begin{equation}\displaystyle \forall\; s \in \R, \quad t^{jk}_{\boldsymbol{\theta}}(s) = (-1)^{k+1} {\boldsymbol{\theta}}i_2\; \bigg[\mu_{s, {\boldsymbol{\theta}}}\; \frac{d e^j_{s, {\boldsymbol{\theta}}}}{d x}\bigg] \bigg( \frac{j}{{\boldsymbol{\theta}}i_2} \bigg). \label{eq:local_DtN_functions} \end{equation} Note that by periodicity of $\mu_p$ and $\rho_p$, the maps $s \mapsto e^j_{s, {\boldsymbol{\theta}}}$ and $t^{jk}_{\boldsymbol{\theta}}$ are $1$--periodic. \noindent By applying the directional derivative operator $\Dt{}$ to \eqref{eq:concatenation_local_cell_problems}, and by using the relationship between $\Dt{} E^j(\varphi)$ and $d e^j_{s, {\boldsymbol{\theta}}} / dx$ given by \eqref{eq:derivee_fonction_2}, it follows that the local DtN operators defined by \eqref{eq:DtNloc} are weighted translation operators, similarly to the propagation operator. \begin{prop} The operators $\mathcal{T}^{jk}$ can be written for $\varphi \in L^2(0, 1)$ and $s \in (0, 1)$ as \begin{equation} \label{eq:DtNloc_wto} \begin{array}{l@{\quad}c@{\quad}l} \mathcal{T}^{00} \varphi(s) = t^{00}_{\boldsymbol{\theta}}(s)\; \widetilde{\varphi}(s) &\textnormal{and}& \mathcal{T}^{10} \varphi(s) = t^{10}_{\boldsymbol{\theta}}(s)\; \widetilde{\varphi}(s + {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2), \\[8pt] \mathcal{T}^{11} \varphi(s) = t^{11}_{\boldsymbol{\theta}}(s - {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2)\; \widetilde{\varphi}(s) &\textnormal{and}& \mathcal{T}^{01} \varphi(s) = t^{01}_{\boldsymbol{\theta}}(s - {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2)\; \widetilde{\varphi}(s - {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2), \end{array} \end{equation} where we recall that $\widetilde{\varphi}$ denotes the periodic extension of $\varphi$ on $\R$, defined by \eqref{eq:per_extension}. \end{prop} \noindent Finally, the solution $u^+_{\boldsymbol{\theta}}$ of the half-line problem \eqref{eq:half_line_problem} can be computed directly from the functions $e^j_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ and from the propagation operator. In fact, given a function $\varphi \in \mathscr{C}_{\textit{per}}(\Sigma^\diese_{n, 0})$ such that $\varphi(0) = 1$, taking formally the trace along ${\boldsymbol{\theta}}\, \R$ in \eqref{eq:UfromEis} leads to \begin{equation} \forall\; \ell \in \N, \quad \restr{u^+_{\boldsymbol{\theta}}(\cdot + \ell/{\boldsymbol{\theta}}i_2)}{I_{\boldsymbol{\theta}}} = (\widetilde{\mathcal{P}^{\ell} \varphi})(\ell\, {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2)\; e^0_{\ell {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2, {\boldsymbol{\theta}}} + (\widetilde{\mathcal{P}^{\ell+1} \varphi})((\ell+1)\, {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2)\; e^1_{\ell {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2, {\boldsymbol{\theta}}}. \label{eq:ufromQPeis} \end{equation} The proof of this result is similar to those of \eqref{eq:UfromEis} and Proposition \ref{prop:periodic_structure_solution}. \noindent Expressions \eqref{eq:concatenation_local_cell_problems}, \eqref{eq:DtNloc_wto}, and \eqref{eq:ufromQPeis} form the basis of the \emph{quasi one-dimensional} or \emph{quasi-1D} strategy, which consists in approximating the solutions $e^j_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ as well as the functions $t^{jk}_{\boldsymbol{\theta}}$ and finally the local DtN operators $\mathcal{T}^{jk}$. Then once the propagation operator is computed by solving the constrained Riccati equation \eqref{eq:Riccati}, the solution $u^+_{\boldsymbol{\theta}}$ may be constructed directly cell by cell using \eqref{eq:ufromQPeis}. \subsubsection{Discretization} The quasi-1D approach requires two distinct approximate spaces associated to the transverse and the ${\boldsymbol{\theta}}$--oriented directions (see Figure \ref{fig:meshes_2D_quasi1D}). \paragraph{Transverse direction.} We begin with a one-dimensional mesh $\mathscr{T}_h(0, 1)$ of $\Sigma^\diese_{2, 0} \equiv (0, 1)$ with a mesh step $h > 0$. Let $\mathcal{V}_h(0, 1)$ be the approximation space of $H^1(0, 1)$ by Lagrange finite elements of order $d > 0$. We denote by $\smash{(\varphi_p)_{0 \leq p \leq N}}$ the usual nodal basis, which satisfies in particular $\smash{\varphi_p(s_q) = \delta_{p,q}}$, where $(s_p)_{0 \leq p \leq N}$ are points (including the mesh vertices) such that $0 = s_0 < \dots < s_N = 1$. Then an internal approximation of $L^2(0, 1)$ is \[\displaystyle \mathcal{V}_{h, \textit{per}}(0, 1) := \vect\{ \varphi_0 + \varphi_N, \varphi_1, \dots , \varphi_{N-1}\}, \] which is chosen so that $\mathcal{V}_{h, \textit{per}}(0, 1) \subset \mathscr{C}_{\textit{per}}(0, 1)$. In particular, from the definition of the basis functions $\varphi_i$, one has the following decomposition \begin{equation}\displaystyle \forall\;\! \varphi_h \in \mathcal{V}_{h, \textit{per}}(0, 1), \quad \varphi_h = \sum_{p = 0}^{N} \varphi_h(s_p)\, \varphi_p, \quad \textnormal{with} \quad \varphi_h(s_0) = \varphi_h(s_N). \label{eq:decomposition_Sigma0} \end{equation} \paragraph{${\boldsymbol{\theta}}$--oriented direction.} Let $\mathscr{T}_{h_{\boldsymbol{\theta}}}(I_{\boldsymbol{\theta}})$ denote a mesh of the line segment $I_{\boldsymbol{\theta}}$ with a mesh step $h_{\boldsymbol{\theta}} > 0$. Set $\mathcal{V}_{h_{\boldsymbol{\theta}}}(I_{\boldsymbol{\theta}})$ as the approximation space of $H^1(I_{\boldsymbol{\theta}})$ by Lagrange finite elements of order $d_{\boldsymbol{\theta}} > 0$ and define $\mathcal{V}_{h_{\boldsymbol{\theta}}, 0}(I_{\boldsymbol{\theta}}) := \mathcal{V}_{h_{\boldsymbol{\theta}}}(I_{\boldsymbol{\theta}}) \cap H^1_0(I_{\boldsymbol{\theta}})$. \noindent The approximation of $e^0_{s, {\boldsymbol{\theta}}}$ and $e^1_{s, {\boldsymbol{\theta}}}$ can be seen as a two-step process. First, for any $s \in \R$, consider the solution $e^j_{s, {\boldsymbol{\theta}}, h_{\boldsymbol{\theta}}}$ of the discrete variational formulation associated to \eqref{eq:QP_local_cell_problems}. \noindent In practice, the solution $e^j_{s, {\boldsymbol{\theta}}, h_{\boldsymbol{\theta}}}$ can only be computed for a finite number of $s \in (0, 1)$. This is where the discretization in the transverse direction comes into play: given $x \in I_{\boldsymbol{\theta}}$, the function $\smash{{\textit{\textbf{s}}} \mapsto e^j_{s, {\boldsymbol{\theta}}, h_{\boldsymbol{\theta}}}(x)}$ may be interpolated in $\mathcal{V}_{h, \textit{per}}(0, 1)$. The interpolation process requires to compute the discrete solution $e^j_{s, {\boldsymbol{\theta}}, h_{\boldsymbol{\theta}}}$ only for $s = s_p$, $p \in \llbracket 0 , N-1 \rrbracket$. Then, using the decomposition formula \eqref{eq:decomposition_Sigma0}, $e^j_{{\textit{\textbf{s}}}, {\boldsymbol{\theta}}}$ shall be approximated by \begin{equation} \displaystyle \forall\; (s, x) \in (0, 1) \times I_{\boldsymbol{\theta}}, \quad e^j_{s, {\boldsymbol{\theta}}, {\underline{\textit{\textbf{h}}}}}(x) = \sum_{p = 0}^N e^j_{s_p, {\boldsymbol{\theta}}, h_{\boldsymbol{\theta}}}(x)\; \varphi_p(s), \quad \textnormal{with} \quad {\underline{\textit{\textbf{h}}}} = (h, h_{\boldsymbol{\theta}}). \end{equation} where $e^j_{0, {\boldsymbol{\theta}}, h_{\boldsymbol{\theta}}} = e^j_{1, {\boldsymbol{\theta}}, h_{\boldsymbol{\theta}}}$ (because $e^j_{s, {\boldsymbol{\theta}}}$ is $1$--periodic with respect to $s$). From the solutions $e^j_{s, {\boldsymbol{\theta}}, {\underline{\textit{\textbf{h}}}}}$, we introduce the discrete local DtN functions \[\displaystyle \displaystyle \forall\; s \in (0, 1), \quad t^{jk}_{{\boldsymbol{\theta}}, {\underline{\textit{\textbf{h}}}}}(s) = {\boldsymbol{\theta}}i_n\; \int_0^{1/{\boldsymbol{\theta}}i_n} \Big( \mu_{s, {\boldsymbol{\theta}}} \; \frac{d e^j_{s, {\boldsymbol{\theta}}, {\underline{\textit{\textbf{h}}}}}}{d x} \; \overline{\frac{d e^k_{s, {\boldsymbol{\theta}}, h}}{d x}} - \rho_{s, {\boldsymbol{\theta}}} \; \omega^2 \; e^j_{s, {\boldsymbol{\theta}}, h} \; \overline{e^k_{s, {\boldsymbol{\theta}}, {\underline{\textit{\textbf{h}}}}}} \; \Big), \] which are inspired from the weak expression \eqref{eq:local_DtN_functions} of the local DtN functions $t^{jk}_{\boldsymbol{\theta}}$. Then, by analogy with \eqref{eq:DtNloc_wto}, we define the discrete DtN operators $\mathcal{T}^{jk}_{\underline{\textit{\textbf{h}}}} \in \mathcal{L}(\mathcal{V}_{h, \textit{per}}(0, 1))$ for any $\varphi_h$, $\psi_h \in \mathcal{V}_{h, \textit{per}}(0, 1)$ as follows: \begin{equation} \displaystyle \Big\langle \mathcal{T}^{jk}_{\underline{\textit{\textbf{h}}}} \varphi_h,\; \psi_h \Big\rangle = \int_0^1 t^{jk}_{{\boldsymbol{\theta}}, {\underline{\textit{\textbf{h}}}}}(s - k\, {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2)\; \varphi_h(s + (j - k)\,{\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2) \; \overline{\psi_h(s)}\; d s. \label{eq:DtNloc_wto_weak_form} \end{equation} These discrete DtN operators, when computed for $\varphi_h$, $\psi_h$ being the basis functions of $\mathcal{V}_{h, \textit{per}}(0, 1)$, are represented as $N \times N$ matrices, where $N = \dim \mathcal{V}_{h, \textit{per}}(0, 1)$. The integrals which appear in \eqref{eq:DtNloc_wto_weak_form} are evaluated in practice using a specifically designed quadrature rule whose description is omitted here. \noindent Finally, let $\varphi_h \in \mathcal{V}_{h, \textit{per}}(0, 1) \subset \mathscr{C}_{\textit{per}}(\R)$ such that $\varphi_h(0) = 1$. Then using \eqref{eq:ufromQPeis}, the solution of the half-line problem \eqref{eq:half_line_problem} can be approximated with the function defined cell by cell by \[\displaystyle \forall\; \ell \in \N, \quad \restr{u^+_{{\boldsymbol{\theta}}, {\underline{\textit{\textbf{h}}}}}(\cdot + \ell/{\boldsymbol{\theta}}i_2)}{I_{\boldsymbol{\theta}}} = (\mathcal{P}^{\ell}_{\underline{\textit{\textbf{h}}}} \varphi_h)(\ell\, {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2)\; e^0_{\ell {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2, {\boldsymbol{\theta}}, {\underline{\textit{\textbf{h}}}}} + (\mathcal{P}^{\ell+1}_{\underline{\textit{\textbf{h}}}} \varphi_h)((\ell+1)\, {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2)\; e^1_{\ell {\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2, {\boldsymbol{\theta}}, {\underline{\textit{\textbf{h}}}}}. \] where $\mathcal{P}_{\underline{\textit{\textbf{h}}}} \in \mathcal{L}(\mathcal{V}_{h, \textit{per}}(0, 1))$ corresponds to a suitable discrete $\R^{N \times N}$ approximation of $\mathcal{P}$. The computation of such an operator is the subject of the next subsection. \subsection{Approximation of the propagation operator} \label{sec:discrete_Riccati_equation} In order to find a suitable approximation $\mathcal{P}_h \in \mathcal{L}(\mathcal{V}_{h, \textit{per}}(0, 1))$ of the propagation operator $\mathcal{P}$, it is natural to introduce the discrete constrained Riccati equation \begin{equation}\label{eq:Riccati_discrete} \left| \begin{array}{l} \textnormal{\textit{Find $\mathcal{P}_h \in \mathcal{L}(\mathcal{V}_{h, \textit{per}}(0, 1))$ such that $\rho(\mathcal{P}_h) < 1$ and $\displaystyle \mathcal{T}_h(\mathcal{P}_h) = 0$, where}} \\[12pt] \multicolumn{1}{c}{\displaystyle \mathcal{T}_h(\mathcal{P}_h) := \mathcal{T}^{10}_h\mathcal{P}^2_h + (\mathcal{T}^{00}_h + \mathcal{T}^{11}_h)\, \mathcal{P}_h + \mathcal{T}^{01}_h,} \end{array} \right. \end{equation} and where $(\mathcal{T}^{00}_h, \mathcal{T}^{01}_h, \mathcal{T}^{10}_h, \mathcal{T}^{11}_h)$ are obtained via one of the methods described in Sections \ref{sec:methode_2D} and \ref{sec:methode_quasi1D}. Using the same arguments as for the elliptic Helmholtz equation \cite{flissthese}, it can be proved that this discrete equation admits a unique solution. \noindent In order to solve \eqref{eq:Riccati_discrete}, two methods have been proposed in \cite{jolyLiFliss}: a \emph{spectral decomposition method}, and a \emph{modified Newton method}. Here, we only describe the spectral approach. \noindent The spectral decomposition method consists in characterizing $\mathcal{P}_h$ by means of its eigenpairs $(\lambda_i, \psi_i)$ of $\mathcal{P}_h$. Doing so however raises an important question: is $\mathcal{P}_h$ completely defined by its eigenpairs? This is equivalent to wondering if $\mathcal{P}_h$ is diagonalizable or not. The diagonalizability of $\mathcal{P}_h$ is an open question, but for the sake of simplicity, we will assume in the sequel that this is the case, namely \[\displaystyle \textnormal{The family of eigenfunctions $(\psi_i)_{1 \leq i \leq N}$ forms a basis of $\mathcal{V}_{h, \textit{per}}(0, 1)$.} \] In practice, this is the situation that we always have encountered. Moreover, in the case where this assumption fails to be true, one can still adapt the method, and recover $\mathcal{P}_h$ through a Jordan decomposition. (See \cite[Section 2.3.2.3]{flissthese} for more details.) \noindent The spectral approach relies on the results presented in Section \ref{sec:about_Riccati_equation}, which remain true for the discrete equation. In particular, by analogy with Proposition \ref{prop:spectrum_P_riccati}, $(\lambda_h, \psi_h) \in \C \times \mathcal{V}_{h, \textit{per}}(0, 1)$ is an eigenpair of $\mathcal{P}_h$ if and only if it satisfies \[\displaystyle \mathcal{T}_h(\lambda_h)\, \psi_h = 0, \quad \textnormal{with} \quad \psi_h \neq 0 \quad \textnormal{and} \quad |\lambda_h| < 1. \] Solving the Riccati equation hence comes down to solving a quadratic eigenvalue problem: \begin{equation}\label{eq:quadratic_EVP} \left| \begin{array}{l} \textnormal{\textit{Find $(\lambda_h, \psi_h) \in \C \times \mathcal{V}_{h, \textit{per}}(0, 1)$ such that $\psi_h \neq 0$, $|\lambda_h| < 1$ and}} \\[12pt] \multicolumn{1}{c}{\displaystyle \lambda^2_h\, \mathcal{T}^{10}_h\psi_h + \lambda_h\, (\mathcal{T}^{00}_h + \mathcal{T}^{11}_h) \psi_h + \mathcal{T}^{01}_h \psi_h = 0.} \end{array} \right. \end{equation} If one sets $N = \dim \mathcal{V}_{h, \textit{per}}(0, 1)$, then \eqref{eq:quadratic_EVP} can be reduced to a $2N \times 2N$ linear eigenvalue problem, thus yielding $2N$ eigenvalues. In order to pick the $N$ eigenvalues of the propagation operator, we need a criterion. To do so, note that with the 2D or the quasi-1D method, the properties of the local DtN operators (Proposition \ref{prop:pties_local_DtN}) remain preserved for the discrete operators $\mathcal{T}^{jk}_h$. Hence Proposition \ref{prop:pairs_Riccati} admits the following discrete version: \[ \Ker \mathcal{T}_h(\lambda_h) \neq \{0\} \quad \Longleftrightarrow \quad \Ker \mathcal{T}_h(1/\lambda_h) \neq \{0\}. \] Therefore, as already expected with Remark \ref{rmk:pairs_Riccati}, the solutions of \eqref{eq:quadratic_EVP} can be grouped into pairs $(\lambda_h, 1/\lambda_h)$, where $0 < |\lambda_h| < 1$. Consequently, in order to compute $\mathcal{P}_h$, one can solve \eqref{eq:quadratic_EVP} (using for instance linearization techniques), and choose the $N$ eigenpairs $(\lambda_h, \psi_h)$ which satisfy $|\lambda_h| < 1$. \subsection{The DtN coefficient}\label{sec:discrete_DtN_coefficient} Finally, consider a function $\varphi_h \in \mathcal{V}_{h, \textit{per}}(0, 1) \subset \mathscr{C}_{\textit{per}}(\R)$ which satisfies $\varphi_h(0) = 1$. Then by analogy with \eqref{eq:DtN_operator}, and in the spirit of Proposition \ref{prop:DtNcoeff_from_DtNoperator}, we define the discrete DtN operator and the discrete DtN coefficient as follows: \[\displaystyle \Lambda_h = \mathcal{T}^{10}_h \mathcal{P}_h + \mathcal{T}^{00}_h \quad \textnormal{and} \quad \lambda^+_h = \frac{(\Lambda_h \varphi_h)(0)}{{\boldsymbol{\theta}}i_2}, \] where $\mathcal{T}^{10}_h$ and $\mathcal{T}^{00}_h$ are computed using one of the methods presented in Sections \ref{sec:methode_2D} and \ref{sec:methode_quasi1D}, and where $\mathcal{P}_h$ is the solution of the discrete Riccati equation \eqref{eq:Riccati_discrete}. \subsection{Numerical results} \label{sec:numerical_results} We present some numerical results to validate the method, to illustrate its efficiency, and to compare the multi-dimensional and the quasi one-dimensional methods in the case where the order of quasiperiodicity is set to $n = 2$. Simulations will be carried out with the periodic coefficients $\mu_p$ and $\rho_p$, defined for ${\textit{\textbf{y}}} = (y_1, y_2) \in \R^2$ by \[\displaystyle \mu_p({\textit{\textbf{y}}}) = 1.5 + \cos (2\piy_1)\, \cos (2\piy_2) \quad \textnormal{and} \quad \rho_p({\textit{\textbf{y}}}) = 1.5 + 0.5\, sn (2\piy_1) + 0.5\, sn (2\piy_2). \] We set ${\boldsymbol{\theta}} = (\cos\pi/3, sn\pi/3)$. As the ratio ${\boldsymbol{\theta}}i_2 / {\boldsymbol{\theta}}i_1 = \sqrt{3}$ is irrational, ${\boldsymbol{\theta}}$ is an irrational vector. For $a = 1$, the source term $f$ is the cut-off function \[\displaystyle \forall\; x \in \R, \quad f(x) = \exp \left( 100\, \big(1 - 1 / (1 - x^2)\big) \right)\; \chi_{(-1, 1)}, \] and the local perturbations $\mu_i$ and $\rho_i$ are defined as piecewise constants, so that the coefficients $\mu$ and $\rho$ of the model problem \eqref{eq:whole_line_problem} are represented in Figure \ref{fig:coefficients_mu_rho}. \noindent \begin{figure} \caption{The locally perturbed quasiperiodic coefficients $\mu$ and $\rho$, and the source term $f$. \label{fig:coefficients_mu_rho} \label{fig:coefficients_mu_rho} \end{figure} \subsubsection{The half-line and the half-guide solutions}\label{sec:results:half_line_guide} The model problem \eqref{eq:whole_line_problem} is solved by computing the solutions of the half-line problems \eqref{eq:half_line_problems_0}, as well as the DtN coefficients $\lambda^\pm$. In this part, only results regarding the numerical resolution of the problem \eqref{eq:half_line_problem} are going to be presented, as the problem set on $(-\infty, -a)$ provides the same overall results. \paragraph{Error analysis} In order to validate the method, we introduce for $L > 0$ the unique function $u^+_{{\boldsymbol{\theta}}, L}$ in $H^1(0, L)$ that satisfies Problem \eqref{eq:half_line_problem} on the truncated domain $(0, L)$, with $u^+_{{\boldsymbol{\theta}}, L}(L) = 0$. Similarly, define $\Omega_L := (0, 1)^{n-1} \times (0, L)$, and for any $\varphi \in L^2(\Sigma^\diese_{n, 0})$, let $U^+_{{\boldsymbol{\theta}}, L}(\varphi) \in H^1_{{\boldsymbol{\theta}}}(\Omega_L)$ denote the unique function that satisfies \eqref{eq:half_guide_problem} on $\Omega_L$, with $\restr{U^+_{{\boldsymbol{\theta}}, L}(\varphi)}{y_2 = L} = 0$. \noindent In presence of absorption, the solutions $u^+_{\boldsymbol{\theta}}$ and $U^+_{\boldsymbol{\theta}}(\varphi)$ decay exponentially at infinity (see \eqref{eq:exp_decay_halfline_s} and \eqref{eq:exp_decay_halfguide}), and by studying the problems satisfied by $u^+_{{\boldsymbol{\theta}}, L} - u^+_{\boldsymbol{\theta}}$ and $U^+_{{\boldsymbol{\theta}}, L}(\varphi) - U^+_{\boldsymbol{\theta}}(\varphi)$, it can be proved as in \cite{fliss_giovangigli} that there exist constants $\alpha, c > 0$ such that for any $L > 0$, \begin{equation} \label{eq:estimate_reference_solution} \begin{array}{c} \|u^+_{{\boldsymbol{\theta}}, L} - u^+_{\boldsymbol{\theta}}\|_{H^1(0, L)} \leq c \,e^{-\alpha \mathop{\mathfrak{Im}}\nolimits \omega L}\; \|u^+_{\boldsymbol{\theta}}\|_{H^1(0, L)} \\[10pt] \|U^+_{{\boldsymbol{\theta}}, L}(\varphi) - U^+_{\boldsymbol{\theta}}(\varphi)\|_{H^1_{\boldsymbol{\theta}}(\Omega_L)} \leq c \, e^{-\alpha \mathop{\mathfrak{Im}}\nolimits \omega L}\; \|U^+_{\boldsymbol{\theta}}(\varphi)\|_{H^1_{\boldsymbol{\theta}}(\Omega_L)}. \end{array} \end{equation} with $\alpha = \sqrt{\rho_- / \mu_+}$. In particular, if $L$ is chosen large enough, then $u^+_{{\boldsymbol{\theta}}, L}$ and $U^+_{{\boldsymbol{\theta}}, L}(\varphi)$ can be viewed as suitable approximations of $u^+_{\boldsymbol{\theta}}$ and $U^+_{\boldsymbol{\theta}}(\varphi)$, and thus can serve as reference solutions. In the upcoming results, to make the truncation errors in \eqref{eq:estimate_reference_solution} negligible with respect to the errors induced by the numerical method, we choose $L$ so that \begin{equation} \label{eq:criterion_length_truncated_domain} \exp\big(-\sqrt{\rho_- / \mu_+}\, \mathop{\mathfrak{Im}}\nolimits \omega\, L\big) \leq 10^{-10}. \end{equation} The corresponding solutions $u^+_{{\boldsymbol{\theta}}, L}$ and $U^+_{{\boldsymbol{\theta}}, L}(\varphi)$, which will be denoted by $u^+_{\textit{ref}}$ and $U^+_{\textit{ref}}(\varphi)$ respectively, are computed via $\mathbb{P}^1$ Lagrange finite elements, with a mesh step $h = 5 \times 10^{-4}$. \noindent In the following, the boundary data is fixed to $\varphi = 1$, and is omitted in the notation of $U^+_{\boldsymbol{\theta}}$ and $U^+_{\textit{ref}}$. Also, we only plot relative errors corresponding to the $1$D solution, as the errors for the $2$D solution behave similarly. In Figure \ref{fig:erreurs}, the relative error \begin{equation} \displaystyle \label{eq:relative_error} \varepsilon(u^+_{\boldsymbol{\theta}}) := \frac{\|u^+_{{\boldsymbol{\theta}}, h} - u^+_{\textit{ref}}\,\|_{H^1(0, 4/{\boldsymbol{\theta}}i_2)}}{\|u^+_{\textit{ref}}\,\|_{H^1(0, 4/{\boldsymbol{\theta}}i_2)}} \end{equation} is represented with respect to the mesh step $h$, and for both the $2$D and the quasi-$1$D method (with $h_{\boldsymbol{\theta}} = h$ for the quasi-1D method). The solutions are computed using Lagrange finite elements of degree $1$. \noindent One sees that the errors tend to $0$ as $h$ at least, as expected for $\mathbb{P}^1$ Lagrange finite elements. With the quasi-$1$D method however, $\varepsilon(u^+_{\boldsymbol{\theta}})$ behaves as $h^2$. This is a special superconvergence phenomenon, which is probably due to the fact that the problems solved in practice with the quasi-1D method are one-dimensional. Note also that in general, the quasi-1D method appears to be more accurate than the 2D method. \begin{figure} \caption{Relative error in $H^1$ norm of the half-line solution for different values of $\omega$.\label{fig:erreurs} \label{fig:erreurs:a} \label{fig:erreurs:b} \label{fig:erreurs} \end{figure} \noindent For a fixed mesh step, the relative error increases with the real frequency $\mathop{\mathfrak{Re}}\nolimits \omega$. This is a well-known particularity of the Helmholtz equation: since $\mathop{\mathfrak{Re}}\nolimits \omega$ represents the spatial frequency of the time-harmonic waves, the discretization parameter $h$ has to be adapted in order to take their oscillations into account. \paragraph{Representation of the half-guide solution} The half-guide solution is represented in Figure \ref{fig:half_guide_omega} for different values of $\omega$, when $\varphi = 1$. \begin{figure} \caption{Real part of the half-guide solution computed using the quasi-1D approach, with $\mathbb{P} \label{fig:half_guide_solution_omega:a} \label{fig:half_guide_solution_omega:b} \label{fig:half_guide_solution_omega:c} \label{fig:half_guide_omega} \end{figure} \paragraph{Dependence with respect to the boundary data} The goal of this part is to see how the half-line and the half-guide solutions depend on the boundary data $\varphi$. To do so, we choose three different datas: \begin{equation} \varphi_1(s) = 1, \quad \varphi_2(s) = \cos (2\pi s), \quad \textnormal{and} \quad \varphi_3(s) = 1 - \mathbbm{1}_{[1/3, 2/3]}(s). \end{equation} We set $\omega = 8 + 0.25\,\ensuremath{\mathrm{i}}$, and we display results obtained with the quasi-1D method, knowing that the 2D method yields the same conclusions. The computations are carried out using $\mathbb{P}^1$ Lagrange finite elements, with mesh steps $h = h_{\boldsymbol{\theta}} = 2 \times 10^{-3}$. \begin{figure} \caption{Real part of the half-line solution computed using the quasi-1D approach, with $\mathbb{P} \label{fig:half_line_solution_phi} \end{figure} \begin{figure} \caption{Real part of the half-guide solution computed using the quasi-1D approach, with $\mathbb{P} \label{fig:half_guide_solution:a} \label{fig:half_guide_solution:b} \label{fig:half_guide_solution:c} \label{fig:half_guide_solution} \end{figure} \noindent As expected, and as Figures \ref{fig:half_line_solution_phi} and \ref{fig:half_guide_solution:a}--\ref{fig:half_guide_solution:c} show, the aspect of half-guide solution changes extensively with respect to the boundary data, whereas the half-line solution looks invariant. \subsubsection{The whole line problem}\label{sec:results:whole_line_problem} The solutions $u^\pm_{\boldsymbol{\theta}}$ of the half-line problems \eqref{eq:half_line_problems_0} allow one to compute the DtN coefficients $\lambda^\pm$, to solve \eqref{eq:interior_problem}, and then to compute the solution $u$ of Problem \eqref{eq:whole_line_problem} using \eqref{eq:solution_of_whole_line_problem}. Recall that the coefficients $\mu$, $\rho$, and the source term $f$ are shown in Figure \ref{fig:coefficients_mu_rho}. The solution of \eqref{eq:whole_line_problem} is represented in Figure \ref{fig:whole_line_solution} for different values of $\omega$. \begin{figure} \caption{$\omega = 8 + 0.25\,\ensuremath{\mathrm{i} \end{figure} \begin{figure} \caption{$\omega = 20 + 0.25\,\ensuremath{\mathrm{i} \caption{$\omega = 20 + 0.05\,\ensuremath{\mathrm{i} \caption{Real part of the solution of \eqref{eq:whole_line_problem} \label{fig:whole_line_solution} \end{figure} \subsubsection{About the dependence with respect to the absorption}\label{sec:results:absorption} We come back to the numerical resolution of Problem \eqref{eq:half_line_problem}, and we study the convergence of the $2$D and quasi-1D methods depending on the absorption, especially when it tends to $0$. As in Section \ref{sec:results:half_line_guide}, the solutions are computed with Lagrange finite elements of degree $1$. The relative error $\varepsilon(u^+_{\boldsymbol{\theta}})$ defined \eqref{eq:relative_error} is represented in Figure \ref{fig:erreurs_imagomega} for both the $2$D and the quasi-1D method, and for different values of $\mathop{\mathfrak{Im}}\nolimits \omega$. \begin{figure} \caption{Relative error in $H^1$ norm of the half-line solution for different values of $\omega$.\label{fig:erreurs_imagomega} \label{fig:erreurs_imagomega:a} \label{fig:erreurs_imagomega:b} \label{fig:erreurs_imagomega:c} \label{fig:erreurs_imagomega} \end{figure} \noindent As Figure \ref{fig:erreurs_imagomega} shows, the error deteriorates with $\mathop{\mathfrak{Im}}\nolimits \omega$. It would mean that the numerical method becomes less efficient as the absorption decreases. This issue is closely related to the well-posedness of the local cell problems with Dirichlet boundary conditions when $\mathop{\mathfrak{Im}}\nolimits \omega = 0$. In fact, for the elliptic Helmholtz equation, it is known (see \cite[Section 3.2.1.1]{flissthese} for instance) that the local cell problems are well-posed except for a \emph{countable} set of frequencies which correspond to the eigenvalues of the associated differential operator. In our case however, as the differential operator has a non-elliptic principal part, it also has a continuous spectrum, and one can show that when $\mu_p$ and $\rho_p$ are non-constant, the local cell problems are well-posed \emph{only for frequencies in a bounded set} (that can even be empty). An alternative to avoid this problem is to use a Robin-to-Robin operator instead of the DtN operator, which would involve solving well-posed local cell problems with Robin boundary conditions, as it is done in \cite{fliss2010exact} for periodic media. This will be done in a forthcoming paper for quasiperiodic media. \subsubsection{About the spectral approximation of the propagation operator}\label{sec:results:spectral_approximation_P} As explained in Subsection \ref{sec:discrete_Riccati_equation}, the discrete propagation operator $\mathcal{P}_h$ is computed by means of its eigenpairs. { In this section, the eigenvalues of $\mathcal{P}_h$ are compared with the spectrum of the exact propagation operator which, according to Proposition \ref{prop:properties_P}, is a circle of radius \[\displaystyle M_{\log}(p_{\boldsymbol{\theta}}) = \exp \Big(\int_0^1 \log |p_{\boldsymbol{\theta}}(s)|\; d s \Big), \quad \textnormal{with} \quad p_{\boldsymbol{\theta}}(s) = u^+_{s-{\boldsymbol{\theta}}i_1/{\boldsymbol{\theta}}i_2, {\boldsymbol{\theta}}}(1/sn {\boldsymbol{\theta}}i_2). \] To compute this radius, $u^+_{s, {\boldsymbol{\theta}}}$ is approximated by the unique function $u^+_{s, {\boldsymbol{\theta}}, L}$ that satisfies \eqref{eq:half_line_problems} on a truncated domain $(0, L)$, with $\smash{u^+_{s, {\boldsymbol{\theta}}, L}(L) = 0}$. One can show similar estimates to \eqref{eq:estimate_reference_solution}, and if $L$ is chosen large enough (for instance, if $L$ satisfies \eqref{eq:criterion_length_truncated_domain}), then $\smash{u^+_{s, {\boldsymbol{\theta}}, L}}$ can be used as a reference solution. In practice, $\smash{u^+_{s, {\boldsymbol{\theta}}, L}}$ is computed for several $s$, and finally the integral that defines $M_{\log}(p_{\boldsymbol{\theta}})$ is evaluated using a rectangular quadrature rule. } \noindent The spectra of $\mathcal{P}_h$ and $\mathcal{P}$ are shown in Figure \ref{fig:spectrum_P} for $\omega = 8 + 0.25\, \ensuremath{\mathrm{i}}$, and for different values of the discretization parameter $h$ (with $h_{\boldsymbol{\theta}} = h$ for the quasi-1D method). Figure \ref{fig:eigenvalues_in_band} represents the number $N_h$ of eigenvalues of $\mathcal{P}_h$ that are close by $5\%$ to $sgma(\mathcal{P})$, namely \begin{equation} N_h = \# \bigg\{\lambda_h \in sgma(\mathcal{P}_h)\ \ \Big/\ \ \bigg| \frac{|\lambda_h| - M_{\log}(p_{\boldsymbol{\theta}})}{M_{\log}(p_{\boldsymbol{\theta}})} \bigg| \leq 5\%\bigg\}. \end{equation} In Figure \ref{fig:eigenvalues_in_band}, one sees that $N_h$ increases with $1/h$, which means that more and more eigenvalues of $\mathcal{P}_h$ are close to $sgma(\mathcal{P})$ when $h$ decreases. In other words, a finer discretization leads to a better approximation of the spectrum. The number $N_h$ of such eigenvalues also seems to increase linearly with $1/h$ (up to a subsequence for the quasi-1D method). Finally, note that $N_h$ is higher with the quasi-1D method than with the 2D method. \begin{figure} \caption{Number of eigenvalues of $\mathcal{P} \label{fig:eigenvalues_in_band} \end{figure} \begin{figure} \caption{Eigenvalues of the discrete propagation operator (circle-shaped markers) compared to the spectrum of the exact propagation operator (circle in dashed line) for $\omega = 8 + 0.25\, \ensuremath{\mathrm{i} \label{fig:spectrum_P} \end{figure} \noindent As Figure \ref{fig:spectrum_P} shows, the eigenvalues of $\mathcal{P}_h$ are all included in the disk of radius $\rho(\mathcal{P})$, but one observes some spectral pollution. This is a classical phenomenon when one approximates the spectrum of an operator which is neither compact nor self-adjoint. What is striking however, is that the pollution behaviours are very different depending on the method used. On one hand, the eigenvalues obtained with the 2D approach tend to accumulate to $0$. A likely explanation for this phenomenon is that solving the local cell problems on 2D meshes does not take their directional structure into account. Since the location of the eigenvalues of $\mathcal{P}_h$ is similar to the one obtained in the elliptic case, for which $\mathcal{P}$ is compact (see \cite[Theorem 3.1]{jolyLiFliss}), we believe the 2D method somehow regularizes the half-guide problem \eqref{eq:half_guide_problem} by introducing an elliptic (discrete) approximation of the corresponding differential operator. On the other hand, with the quasi-1D approach, the spectrum of $\mathcal{P}_h$ “oscillates” as the discretization parameter $h$ tends to $0$. This phenomenon has to do with the particular nature of $\mathcal{P}$ which is a weighted translation operator. We strongly suspect that one can extract a subsequence $(\mathcal{P}_{h'})$ whose spectrum converges towards $sgma(\mathcal{P})$ in a sense to be defined precisely, as it is suggested by the peaks in Figure \ref{fig:eigenvalues_in_band}. The investigation of this assumption as well as the construction of such a subsequence are subject to ongoing works. \noindent \textcolor{surligneur}{With both approaches, it has been observed numerically that the eigenfunctions associated to the spurious eigenvalues were highly oscillating functions that were badly approximated by the discretization, whereas the components of the half-guide solution with respect to these eigenfunctions are very small. This might explain why the spectral pollution does not have a visible influence on the approximation of the half-guide and the half-line solutions, as the errors in Figure \ref{fig:erreurs} seem to suggest.} \section{Perspectives and ongoing works} A numerical method has been proposed to solve Helmholtz equation in $1$D unbounded quasiperiodic media. Using the presence of absorption, we justified that this equation could be lifted onto a higher-dimensional problem which, in turn, can be solved using a Dirichlet-to-Neumann approach. For the discretization, we presented a multi-dimensional method, as well as a so-called quasi one-dimensional method. As shown by numerical simulations, both methods provide a suitable approximation of the solution as long as there is absorption. However, the quasi-1D method proved to be more efficient than the 2D method, as it takes the anisotropy of the problems involved into account. \noindent The method presented opens up numerous perspectives, and raises multiple questions that are subject to ongoing works. For instance, it would be interesting to approximate efficiently the spectrum of the propagation operator, even though the spectral pollution seems to have no major impact on the efficiency of the overall method. Another key extension concerns the case where the absorption tends to $0$. This extension, which will be presented in a subsequent paper, involves replacing the DtN method by a Robin-to-Robin method as explained in Section \ref{sec:results:half_line_guide}, and finding a way to characterize the propagation operator which is no longer uniquely defined. \noindent Finally, an approach which is similar to the one presented in this paper can be used to study the propagation of waves in presence of a $2$D periodic half-space when the interface does not lie in any direction of periodicity, or in presence of two $2$D periodic half-spaces with non-commensurable periods. \printbibliography \end{document}
\begin{document} \cleardoublepage \title{Real structures on minimal ruled surfaces} \author{Jean-Yves Welschinger \\} \maketitle \makeatletter\renewcommand{\@makefnmark}{}\makeatother \footnotetextsans{Keywords : Ruled surface, real algebraic surface.} \footnotetextsans{Classification AMS : 14J26, 14P25.} {\bf Abstract :} In this paper, we give a complete description of the deformation classes of real structures on minimal ruled surfaces. In particular, we show that these classes are determined by the topology of the real structure, which means, using the terminology of \cite{KhDg2}, that real minimal ruled surfaces are quasi-simple. As an intermediate result, we obtain the classification, up to conjugation, of real structures on decomposable ruled surfaces. \section*{Introduction} Let $X$ be a smooth compact complex surface. A {\it real structure} on $X$ is an antiholomorphic involution $c_X : X \to X$. The {\it real part} of $(X , c_X)$ is by definition the fixed point set of $c_X$. If $X$ admits a holomorphic submersion on a smooth compact complex irreducible curve $B$ whose fibers have genus zero, then it is called a {\it minimal ruled surface}. These surfaces are all algebraic, minimal and of kodaira dimension $- \infty$ (see \cite{Beau}). Real minimal ruled surfaces are one of the few examples of real algebraic surfaces of special type whose classification under real deformation is not known, see the recent results \cite{KhDg2}, \cite{KhDg}, \cite{Cat} and the survey \cite{Kh} for detailed history and references. The purpose of this paper is to fill this gap. Since all the ruled surfaces considered in this paper will be minimal, from now on we will call them ``ruled'' rather than ``minimal ruled''. Rational surfaces are well known (see \cite{KhDg2}), so we can restrict ourselves to non-rational ruled surfaces. The ruling $p : X \to B$ is then unique and any real structure $c_X$ on $X$ is fibered over a real structure $c_B$ on $B$ in the sense that $c_B \circ p = p \circ c_X$. The topology of the real part of $X$ as well as the topology of the real curve $(B , c_B)$ provide us with a topological invariant under real deformation which we call {\it the topological type} of the surface. This invariant is encoded by a quintuple of integers : the number of tori and Klein bottles of ${\Bbb{R}} X$, the genus of $B$, the number of components of ${\Bbb{R}} B$ and the type of $(B , c_B)$ (see \S \ref{subsectiontoptype}). The main result of this paper is the following (see theorem \ref{theoremdeformation} and proposition \ref{proposexistence}) : \begin{theo} \label{theointro} Two real (minimal) non-rational ruled surfaces are in the same real deformation class if and only if they have the same topological type and homeomorphic quotients. Moreover, any allowable quintuple of integers is realized as the topological type of a real non-rational ruled surface. \end{theo} Note that as soon as the bases of the surfaces have non-empty real parts, the condition on the quotients can be removed. A quintuple of integers is called {\it allowable} when it satisfies the few obvious conditions satisfied by topological types of real non-rational ruled surfaces, see \S \ref{subsectiontoptype} for a precise definition. Remember that any compact complex surface lying in the deformation class of a non-rational ruled surface is itself a non-rational ruled surface (see \cite{BPV} for example). A definition of real deformation classes can be given as follows. Equip the Poincar\'e's disk $\Delta \subset {\Bbb{C}}$ with the complex conjugation $conj$. A {\it real deformation} of surfaces is a proper holomorphic submersion $\pi : Y \to \Delta$ where $Y$ is a complex manifold of dimension $3$ equipped with a real structure $c_Y$ and $\pi$ satisfies $\pi \circ c_Y = conj \circ \pi$. Then, when $t \in ]-1 , 1 [ \in \Delta$, the fibers $Y_t = \pi^{-1} (t)$ are invariant under $c_Y$ and hence are compact real surfaces. Two real surfaces $X'$ and $X''$ are said to be {\it in the same deformation class} if there exists a chain $X'=X_0, \dots , X_k=X''$ of compact real surfaces such that for every $i \in \{ 0 , \dots , k-1 \}$, the surfaces $X_i$ and $X_{i+1}$ are isomorphic to some real fibers of a real deformation. Remember that every ruled surface is the projectivisation $P (E)$ of a rank two complex vector bundle $E$ over $B$ (see \cite{Beau}). Moreover $P (E)$ and $P (E')$ are isomorphic if and only if $E' = E \otimes L$ where $L$ is a complex line bundle over $B$. A ruled surface is said to be {\it decomposable} if $E$ is decomposable, that is if $E$ is the direct sum of two complex line bundles. The paper is organized as follows. In the first section, we give constructions of some particular real structures on decomposable ruled surfaces. In the second section we obtain a classification, up to conjugation, of real structures on decomposable ruled surfaces (see theorem \ref{theoremrealstruct}). This result, of independant interest, plays a crucial r\^ole in the proof of theorem \ref{theointro}. In this section is also given a result independant of real algebraic geometry, which concerns the lifting of automorphisms of the ruled surface $X$ to automorphisms of the rank two vector bundle $E$, see proposition \ref{propaut}. Finally, the third section is devoted to the proof of theorem \ref{theointro}. This gives a complete description of the deformation classes of real structures on ruled surfaces. In particular, it shows that these classes are determined by the topology of the real structure, which means, using the terminology of \cite{KhDg2}, that real ruled surfaces are quasi-simple. \section{Construction of some particular real structures} \subsection{Meromorphic functions and real structures} Let $B$ be a smooth compact complex irreducible curve. Denote by $\mathop{\rm Pic}\nolimits(B)$ the group of complex line bundles over $B$. This group is identified with the group of divisors modulo principal ones. Let $\phi : B \to B$ be a holomorphic or anti-holomorphic automorphism, and let $D=\sum_{i=1}^k n_i p_i$, $n_i \in {\Bbb{Z}}$, $p_i \in B$, be a divisor on $B$. Then we denote by $\phi^* (D)$ the divisor $\sum_{i=1}^k n_i \phi^{-1} (p_i)$ and by $\phi (D)$ the divisor $\sum_{i=1}^k n_i \phi (p_i)$. The morphism on the quotient $ \mathop{\rm Pic}\nolimits(B)$ of the group of divisors induced by $\phi^*$ will also be denoted by $\phi^*$. We denote by $L_0$ the trivial line bundle over $B$ and by $L^*$ the line bundle dual to $L$, so that $L \otimes L^* = L_0$. Suppose from now on that $B$ is equipped with a {\it real structure} $c_B$, that is an anti-holomorphic involution $c_B$. \begin{lemma} \label{lemmaL} Let $L \in \mathop{\rm Pic}\nolimits (B)$ be a line bundle such that $c_B^* (L)=L$. Then, for every divisor $D$ associated to $L$, there exists a meromorphic function $f_D$ on $B$ such that $\mathop{\rm div}\nolimits (f_D) = c_B (D) - D$ and $f_D \times \overline{f_D \circ c_B} = \pm 1$. \end{lemma} {\bf Proof :} By assumption, $D$ and $c_B (D)$ are linearly equivalent. As a consequence, there exists a meromorphic function $f$ such that $\mathop{\rm div}\nolimits (f) = c_B (D) - D$. Then, $g = \overline{f \circ c_B}$ is a meromorphic function on $B$ satisfying $\mathop{\rm div}\nolimits (g) = D -c_B (D) $. So $fg$ is a holomorphic function on $B$. This means that there exists a constant $\lambda \in {\Bbb{C}}^*$ such that $f \times \overline{f \circ c_B} = \lambda$. But for all $x \in B$, $$\lambda = f \times \overline{f \circ c_B} (c_B (x))= f \circ c_B (x) \times \overline{f (x)} = \overline{ f(x) \times \overline{f \circ c_B (x)}} = \overline{\lambda }$$ Thus $\lambda \in {\Bbb{R}}^*$, and we define $f_D = \frac{1}{\sqrt{|\lambda|}} f$. $\square$\\ \begin{reml} \label{remarkL} As soon as ${\Bbb{R}} B$ is non-empty, $f_D \times \overline{f_D \circ c_B} = +1$, since for every $x \in {\Bbb{R}} B$ we have $f_D \times \overline{f_D \circ c_B} (x) = |f (x)|^2 \geq 0$. Nevertheless, when ${\Bbb{R}} B = \emptyset$, there always exists a divisor $D$ on $B$, of degree congruent to $g(B) -1 \mod (2)$ where $g(B)$ is the genus of $B$, such that $f_D \times \overline{f_D \circ c_B} = -1$ (see \cite{GH}, proposition 2.2). Note also that the function $f_D$ given by lemma \ref{lemmaL} is not unique, since for every constant $\lambda \in {\Bbb{C}}$ such that $|\lambda| = 1$, the function $\lambda f_D$ has the same properties., \end{reml} \begin{lemma} \label{lemmaL*} Let $L \in \mathop{\rm Pic}\nolimits (B)$ be a line bundle such that $c_B^* (L)=L^*$. Then, for every divisor $D$ associated to $L$, there exists a meromorphic function $f_D$ on $B$ such that $\mathop{\rm div}\nolimits (f_D) = D + c_B (D)$ and $f_D = \overline{f_D \circ c_B}$. \end{lemma} {\bf Proof :} By assumption, $c_B (D)$ and $-D$ are linearly equivalent. As a consequence, there exists on $B$ a meromorphic function $f$ such that $\mathop{\rm div}\nolimits (f) = D + c_B (D)$. Then, $g = \overline{f \circ c_B}$ is a meromorphic function on $B$ satisfying $\mathop{\rm div}\nolimits (g) = c_B (D) + D = \mathop{\rm div}\nolimits (f)$. Thus there exists a constant $\lambda \in {\Bbb{C}}^*$ such that $g=\lambda f$. But, $$\lambda = \frac{g \circ c_B }{f \circ c_B } = \frac{\overline{f}}{f \circ c_B } = \overline{ \Big(\frac{f}{\overline{f \circ c_B }} \Big)} = \frac{1}{\overline{\lambda }}$$ Hence there exists $\theta \in {\Bbb{R}}$ such that $\lambda = \exp (2i\theta)$, and we define $f_D = exp (i\theta) f$. $\square$ \begin{rem} \label{remarksign} The function $f_D$ given by lemma \ref{lemmaL*} is not unique : for every $\lambda \in {\Bbb{R}}^*$, the function $\lambda f_D$ has the same properties. Note that every zero or pole of $f_D$ on ${\Bbb{R}} B$ has even order, so that the sign of $f_D$ is constant on every component of ${\Bbb{R}} B$. \end{rem} \subsection{Some particular real structures} Let $D = \sum_{i=1}^k n_i p_i $ be a divisor on $B$, where $p_i \in B$ and $n_i \in {\Bbb{Z}}$ ($i \in \{1, \dots , k\}$). We can assume that the set $\{ p_i \, | \, 1 \leq i \leq k \}$ is invariant under $c_B$ (add some points with zero coefficients to $D$ if necessary). Denote by $U_0 = B \setminus \{ p_i \, | \, 1 \leq i \leq k \}$ and for every $i \in \{1, \dots , k\}$, choose a holomorphic chart $(U_{p_i} , \phi_{p_i})$ such that $U_{p_i} \cap U_{p_j} = \emptyset$ if $i \neq j$, $c_B (U_{p_i}) = U_{c_B(p_i)}$ and $\phi_{p_i} : U_{p_i} \to \Delta = \{ z \in {\Bbb{C}} \, | \, |z| < 1 \}$ is a biholomorphism. Require in addition that $\phi_{p_i} (p_i) = 0 \in \Delta$ and $\phi_{c_B(p_i)} \circ c_B \circ \phi_{p_i}^{-1} (z) = \overline{z}$ for all $z \in \Delta$ and $i \in \{1, \dots , k\}$ (such charts always exist, see \cite{Nat}). Such an atlas is called {\it compatible} with the divisor $D$ and the group $<c_B>$. For every $i \in \{1, \dots , k\}$, denote by $\psi_i$ the morphism : $$\begin{array}{rcl} (U_{p_i} \setminus p_i) \times {\Bbb{C}} & \to & U_0 \times {\Bbb{C}} \\ (x,z) & \mapsto & (x , \phi_{p_i} (x)^{-n_i} z). \end{array}$$ The morphisms $\psi_i$ allow to glue together the trivialisations $U_{p_i} \times {\Bbb{C}}$, $i \in \{0, \dots , k\}$, in order to define the line bundle $L$ associated to $D$. Such trivialisations are called {\it compatible} with the divisor $D$ and the group $<c_B>$. Let $L$ (resp. $X$) be a line bundle (resp. a ruled surface) over $B$. The real structure $c_L$ on $L$ (resp. $c_X$ on $X$) is said to be {\it fibered over} $c_B$, or that it {\it lifts} $c_B$, if $p \circ c_L = c_B \circ p$ (resp. $p \circ c_X = c_B \circ p$) where $p$ is the projection $L \to B$ (resp. $X \to B$). \begin{lemma} \label{lemmaRL} There exists a real structure on $L \in \mathop{\rm Pic}\nolimits (B)$ which lifts $c_B$ if and only if $c_B^* (L)=L$ and for every couple $(D,f_D)$ given by lemma \ref{lemmaL}, $f_D \times \overline{f_D \circ c_B} = + 1$. \end{lemma} {\bf Proof :} ${\bf \Longrightarrow :}$ Let $s$ be a meromorphic section of $L$ and $D = \mathop{\rm div}\nolimits (s)$. Let $c_L$ be a real structure on $L$ and $\tilde{s}= c_L \circ s \circ c_B$. Then $\tilde{s}$ is another meromorphic section of $L$. This implies that $\mathop{\rm div}\nolimits (\tilde{s})$ and $\mathop{\rm div}\nolimits (s)$ are linearly equivalent. Since $\mathop{\rm div}\nolimits (\tilde{s}) = c_B (\mathop{\rm div}\nolimits (s))$, we deduce that $c_B^* (L) = L$. Moreover, $\tilde{s} = fs$ where $f$ is a meromorphic function on $B$ satisfying $\mathop{\rm div}\nolimits (f) = c_B (D) - D$. Since $s=c_L \circ \tilde{s} \circ c_B = c_L \circ (fs) \circ c_B = \overline{f \circ c_B} \times \tilde{s} = \overline{f \circ c_B} \times fs$, we have $\overline{f \circ c_B} \times f = +1$. Changing the section $s$, the same is obtained for any couple $(D,f_D)$ given by lemma \ref{lemmaL}. ${\bf \Longleftarrow :}$ Let $L$ be a line bundle such that $c_B^* (L)=L$ and $(D,f_D)$ a couple given by lemma \ref{lemmaL} such that $f_D \times \overline{f_D \circ c_B} = + 1$. Denote $D= \sum_{i=1}^k n_i p_i$ and let $U_0 = B \setminus \{ p_i \, | \, 1 \leq i \leq k \}$ and $(U_{p_i} , \phi_{p_i})$, $i \in \{1, \dots , k\}$, be an atlas compatible with the divisor $D$ and the group $<c_B>$. The maps $$\begin{array}{rcl} U_0 \times {\Bbb{C}} & \to & U_0 \times {\Bbb{C}} \\ (x,z) & \mapsto & (c_B (x) , f_D \circ c_B (x) \overline{z}), \end{array}$$ and for every $i \in \{1, \dots , k\}$, $$\begin{array}{rcl} U_{p_i} \times {\Bbb{C}} & \to & U_{c_B(p_i)} \times {\Bbb{C}} \\ (x,z) & \mapsto & (c_B (x) , f_D \circ c_B (x) \overline{\phi_{p_i} (x)}^{n_{c_B(p_i)}-n_{p_i}} \overline{z}) \end{array}$$ glue together to form an antiholomorphic map $c_L$ on $L$. This map lifts $c_B$ and is an involution, hence the result. $\square$ \begin{prop} \label{propRL*} Let $L \in \mathop{\rm Pic}\nolimits (B)$ be a line bundle such that $c_B^* (L) = L^*$. Then to every couple $(D, f_D)$ given by lemma \ref{lemmaL*} is associated a real structure $c_{f_D}$ on the ruled surface $X= P(L \oplus L_0)$ which lifts $c_B$. The real part of $(X , c_{f_D})$ is orientable and consists of $t^+$ tori, where $t^+$ is the number of components of ${\Bbb{R}} B$ on which $f_D$ is non-negative (see remark \ref{remarksign}). \end{prop} \begin{rem} \label{remarkcx+} For the sake of simplicity, when there will not be any ambiguity on the choice of the function $f_D$, we will denote by $c_X^+$ (resp. $c_X^-$) the real structure $c_{f_D}$ (resp. $c_{-f_D}$). The real part of $(X, c_{-f_D})$ consists of $t^-$ tori, where $t^-$ is the number of components of ${\Bbb{R}} B$ on which $f_D \leq 0$. Obviously, $t^+ + t^- = \mu ({\Bbb{R}} B)$, where $\mu ({\Bbb{R}} B)$ is the number of components of ${\Bbb{R}} B$. Thus, when $\mu ({\Bbb{R}} B)$ is odd, the real structures $c_X^+$ and $c_X^-$ on $X$ cannot be conjugated, since the numbers of components of their real parts do not have the same parity. Nevertheless, these two real structures may sometimes be conjugated. This situation will be studied in the next section, proposition \ref{propconjcx+}. \end{rem} {\bf Proof :} Let $(D, f_D)$ be a couple given by lemma \ref{lemmaL*}, so that $f_D = \overline{f_D \circ c_B}$ and $\mathop{\rm div}\nolimits (f_D) = D + c_B (D)$. Let $p_i \in B$ and $n_i \in {\Bbb{Z}}$, $i \in \{1, \dots , k\}$, be such that $D = \sum_{i=1}^k n_i p_i $. We can assume that the set $\{ p_i \, | \, 1 \leq i \leq k \}$ is invariant under $c_B$. Let $U_0 = B \setminus \{ p_i \, | \, 1 \leq i \leq k \}$ and $(U_{p_i} , \phi_{p_i})$, $i \in \{1, \dots , k\}$, be an atlas compatible with the divisor $D$ and the group $<c_B>$. The morphisms : $$\begin{array}{rcl} (U_{p_i} \setminus p_i) \times {\Bbb{C}} P^1 & \to & U_0 \times {\Bbb{C}} P^1 \\ (x,(z_1 : z_0)) & \mapsto & (x , (\phi_{p_i} (x)^{-n_i} z_1 : z_0)) \end{array}$$ ($i \in \{1, \dots , k\}$) allow to glue together the trivialisations $U_{p_i} \times {\Bbb{C}} P^1$, $i \in \{0, \dots , k\}$, in order to define the ruled surface $X$. Now, the maps $$\begin{array}{rcl} U_0 \times {\Bbb{C}} P^1& \to & U_0 \times {\Bbb{C}} P^1 \\ (x,(z_1 : z_0)) & \mapsto & (c_B (x) , (\overline{z_0} : f_D \circ c_B (x) \overline{z_1})), \end{array}$$ and for every $i \in \{1, \dots , k\}$, $$\begin{array}{rcl} U_{p_i} \times {\Bbb{C}} P^1& \to & U_{c_B(p_i)} \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (c_B (x) , (\overline{z_0} : f_D \circ c_B (x)\overline{\phi_{p_i} (x)}^{-n_{c_B(p_i)}-n_{p_i}} \overline{z_1}) \end{array}$$ glue together to form an antiholomorphic map $c_{f_D}$ on $X$. This map lifts $c_B$ and is an involution. The first part of proposition \ref{propRL*} is proved. Now, the fixed point set of $c_{f_D}$ in $U_0 \times {\Bbb{C}} P^1$ is : $$\{ (x, (\theta : \sqrt{f_D (x)})) \in U_0 \times {\Bbb{C}} P^1 \, | \, x \in {\Bbb{R}} B, f_D (x) \geq 0 \text{ and } \theta \in {\Bbb{C}} , |\theta |=1 \}.$$ The connected components of this fixed point set are then tori or cylinders depending on whether the corresponding component of ${\Bbb{R}} B$ is completely included in $U_0$ or not. Similarly, the fixed point set of $c_{f_D}$ in $U_{p_i} \times {\Bbb{C}} P^1$ is : $$\{ (x, (\theta_i : \sqrt{f_D (x) \times x_i^{-2n_i}})) \in U_{p_i} \times {\Bbb{C}} P^1 \, | \, x \in {\Bbb{R}} B, f_D (x) \geq 0 \text{ and } \theta_i \in {\Bbb{C}} , |\theta_i |=1 \},$$ where $x_i = \phi_{p_i} (x)$. This fixed point set is a cylinder if $p_i \in {\Bbb{R}} B$ and is empty otherwise. The gluing maps between these cylinders are given by $\theta = - \theta_i$ if $x_i = \phi_{p_i} (x) < 0 $ and by $\theta = \theta_i$ if $x_i = \phi_{p_i} (x) > 0 $. Since both $id$ and $-id$ preserve the orientation of the circle $U^1 = \{ z \in {\Bbb{C}} \, | \, |z| =1 \}$, the results of these gluings are always tori. Thus, the real part of $(X, c_{f_D})$ consists only of tori and the number of such tori is the number of components of ${\Bbb{R}} B$ on which $f_D \geq 0$, that is $t^+$. $\square$ \section{Conjugacy classes of real structures on decomposable ruled surfaces} \subsection{Lifting of automorphisms of $X$} I could not find the following proposition in the literature, so I give it here. \begin{prop} \label{propaut} Let $L$ be a complex line bundle over $B$ and $X$ be the ruled surface $P (E)$, where $E=L \oplus L_0$. If $L \neq L^*$ or if $L = L_0$, then every automorphism of $X$ fibered over the identity of $B$ lifts to an automorphism of $E$. If $L = L^*$ and $L \neq L_0$, then the automorphisms of $X$ fibered over the identity of $B$ which lift to automorphisms of $E$ form an index two subgroup of the group of automorphisms of $X$ fibered over the identity. In that case, the automorphisms of $X$ which do not lift are of the form $$\phi_\lambda = \left[ \begin{array}{cc} 0&\lambda s\\ s&0 \end{array} \right],$$ where $\lambda \in {\Bbb{C}}^*$ and $s$ is a non-zero meromorphic section of $L$. \end{prop} \begin{rem} The automorphims $\phi_\lambda$ introduced in proposition \ref{propaut} are holomorphic involutions of $X$. \end{rem} {\bf Proof :} Denote by ${\cal O}_B^*$ the sheaf of holomorphic functions on $B$ which do not vanish and by ${\cal A}ut (E)$ (resp. ${\cal A}ut (X)$) the sheaf of automorphisms of $E$ (resp. of $X$) fibered over the identity. These sheafs satisfy the exact sequence : $$1 \to {\cal O}_B^* \to {\cal A}ut (E) \to {\cal A}ut (X) \to 1$$ We deduce the following long exact sequence : $$1 \to H^0 (B, {\cal O}_B^*) \to H^0 (B, {\cal A}ut (E)) \to H^0 (B, {\cal A}ut (X)) \to H^1 (B, {\cal O}_B^*) \to H^1 (B, {\cal A}ut (E)) \to \dots$$ We are searching for the image of the morphism $H^0 (B, {\cal A}ut (E)) \to H^0 (B, {\cal A}ut (X))$. To compute this image, let us study the kernel of the map $i_* : H^1 (B, {\cal O}_B^*) \to H^1 (B, {\cal A}ut (E))$. Remember that the group $H^1 (B, {\cal O}_B^*)$ is isomorphic to $\mathop{\rm Pic}\nolimits (B)$. Such an isomorphism can be defined as follows : fix a divisor $\sum_{j=1}^t r_j q_j$, where for $j \in \{1, \dots , t\}$, $r_j \in {\Bbb{Z}}$ and $q_j \in B$. Denote by $U_0 = B \setminus \{ q_j \, | \, 1 \leq j \leq t \}$ and for every $j \in \{1, \dots , t\}$, choose a holomorphic chart $(U_{q_j} , \phi_{q_j})$ of $B$ such that $U_{q_j} \cap U_{q_{j'}} = \emptyset$ if $j \neq j'$, $\phi_{q_j} : U_{q_j} \to \Delta = \{ z \in {\Bbb{C}} \, | \, |z| < 1 \}$ is a biholomorphism and $\phi_{q_j} (q_j) = 0 \in \Delta$. Denote by ${\cal U}$ the covering of $B$ defined by $U_0, \dots, U_t$ and consider the following sections of ${\cal O}_B^*$ ($j \in \{1, \dots , t\}$) : \begin{eqnarray*} l^1_{0j} : U_0 \cap U_j & \to & {\Bbb{C}}^* \\ x & \mapsto & \phi_{q_j} (x)^{r_j} = x_j^{r_j}, \end{eqnarray*} where by definition $x_j = \phi_{q_j} (x) \in \Delta$. These sections define a $1$-cocycle of $B$ with coefficient in ${\cal O}_B^*$ and we denote with the same letter $l^1$ its cohomology class in $H^1 ({\cal U}, {\cal O}_B^*)$ and in $H^1 (B, {\cal O}_B^*)$. This construction defines an isomorphism between $\mathop{\rm Pic}\nolimits (B)$ and $H^1 (B, {\cal O}_B^*)$. So let $l^1 \in H^1 (B, {\cal O}_B^*)$ be associated to the divisor $\sum_{j=1}^t r_j q_j$. Then $m^1 = i_* (l^1)$ is the cohomology class of the $1$-cocycle with coefficient in ${\cal A}ut (E)$ defined by the following sections ($j \in \{1, \dots , t\}$) : \begin{eqnarray*} m^1_{0j} : U_0 \cap U_j & \to & {\cal A}ut (E) \\ x & \mapsto & \left[ \begin{array}{cc} x_j^{r_j}&0\\ 0&x_j^{r_j} \end{array} \right]. \end{eqnarray*} Suppose that $m^1 = 0 \in H^1 (B, {\cal A}ut (E))$. Then $\sum_{j=1}^t r_j q_j$ is of degree zero, since $0=\det (m^1) = 2l^1 \in H^1 (B, {\cal O}_B^*)$. Moreover, since the map $H^1 ({\cal U},{\cal A}ut (E)) \to H^1 (B, {\cal A}ut (E))$ is injective (see \cite{Mir}, lemma $3.11$, p$294$), $m^1$ is the coboundary of a $0$-cochain given in the covering ${\cal U}$ by the following sections ($j \in \{0, \dots , t\}$) : \begin{eqnarray*} m^0_{j} : U_j & \to & {\cal A}ut (E) \\ x & \mapsto & \left[ \begin{array}{cc} a_j (x)&c_j (x)\\ b_j (x)&d_j (x) \end{array} \right], \end{eqnarray*} where $a_j$, $d_j$ are $0$-cochains with coefficients in ${\cal O}_B$, $c_j$ is a $0$-cochain with coefficient in ${\cal O}_B (L)$, $d_j$ is a $0$-cochain with coefficient in ${\cal O}_B (L^*)$ and $a_j d_j - b_jc_j$ does not vanish. Then, the equality $m^1 = \delta m^0$ can be written : $$\forall j \in \{1, \dots , t\}, \quad m^1_{0j}= m^0_0 (m_j^0)^{-1},$$ which rewrites as $m^0_0 = x_j^{r_j} m_j^0$ ($j \in \{1, \dots , t\}$). Hence, we deduce that for $j \in \{1, \dots , t\}$, $a_0 = x_j^{r_j} a_j$, $d_0 = x_j^{r_j} d_j$, $b_0 = x_j^{r_j} b_j$ and $c_0 = x_j^{r_j} c_j$. As soon as $a_0$ (resp. $d_0$) is non-zero, this implies that $a_0$ (resp. $d_0$) is a meromorphic function over $B$ satisfying $\mathop{\rm div}\nolimits (a_0) \geq \sum_{j=1}^t r_j q_j$ (resp. $\mathop{\rm div}\nolimits (d_0) \geq \sum_{j=1}^t r_j q_j$). Since these two divisors are of degree zero, they are equal. So $\sum_{j=1}^t r_j q_j$ is a principal divisor and $l^1 = 0$. When $a_0 = d_0 = 0$, we deduce that $b_0$ (resp. $c_0$) is a meromorphic section of $L^*$ (resp. of $L$) satisfying $\mathop{\rm div}\nolimits (b_0) \geq \sum_{j=1}^t r_j q_j$ (resp. $\mathop{\rm div}\nolimits (c_0) \geq \sum_{j=1}^t r_j q_j$). Since $\deg (L) = -\deg (L^*)$, these divisors are equal. We then deduce that $L = L^*$ and that this line bundle is associated to the divisor $\sum_{j=1}^t r_j q_j$. In conclusion, when $L \neq L^*$, the morphism $i_*$ is injective and when $L = L^*$, $L \neq L_0$, the kernel of $i_*$ is included into the subgroup of $H^1 (B, {\cal O}_B^*) = \mathop{\rm Pic}\nolimits (B)$ generated by $L$, which is of order two. In that case, it is not difficult to check that the kernel of $i_*$ is exactly this subgroup of order two. Indeed, with the preceding notations, it suffices to let $a_0$ and $d_0$ be equal to $0$ and let $b_0$ and $c_0$ be equal to a same meromorphic section of $L$. This constructs a $0$-cochain $m^0$ such that $\delta m^0 = i_* (L)$. The first part of the proposition is proved. To check the second part of the proposition, note that when $L = L^* \neq L_0$, $H^0 (B , L) = H^0 (B , L^*) = 0$, so that the automorphisms of $E = L \oplus L_0$ fibered over the identity of $B$ are of the form $$ \left[ \begin{array}{cc} a&0\\ 0&d \end{array} \right],$$ where $a,d \in {\Bbb{C}}^*$. The automorphisms of $X$ fibered over the identity which lift to $E$ are then of the form $$ \left[ \begin{array}{cc} 1&0\\ 0&\lambda \end{array} \right] \quad (\lambda \in {\Bbb{C}}^*).$$ It follows that the automorphisms $\phi_\lambda$ do not lift to automorphims of $E$ and that they are the only ones with this property. $\square$ \subsection{The conjugation's theorem} Denote by $ c_{L_0}$ the real structure on $L_0$ defined by : $$\begin{array}{rcl} B \times {\Bbb{C}} & \to & B \times {\Bbb{C}} \\ (x,z) & \mapsto & (c_B (x) , \overline{z}) \end{array}$$ This real structure lifts $c_B$. \begin{theo} \label{theoremrealstruct} Let $L$ be a line bundle over a smooth compact complex irreducible curve $B$ equipped with a real structure $c_B$ and let $X=P(L \oplus L_0)$ be the associated decomposable ruled surface. 1. Suppose that $L \neq L^*$ and that there exists a real structure $c_L$ on $L$ which lifts $c_B$. Then there exists, up to conjugation by a biholomorphism of $X$, one and only one real structure on $X$ which lifts $c_B$. It is the real structure induced by $c_L \oplus c_{L_0}$. 2. Suppose that $c_B^* (L) = L^*$. If $L \neq L^*$, then every real structure on $X$ which lifts $c_B$ is conjugated to one of the two structures $c_X^+$ or $c_X^-$ given by proposition \ref{propRL*}. The same result occurs when $L=L_0$ or when $L = L^*$ and there is no real structure on $L$ which lifts $c_B$. 3. Suppose that $c_B^* (L) = L = L^*$, that $L \neq L_0$ and that there exists a real structure $c_L$ on $L$ which lifts $c_B$. Then every real structure on $X$ which lifts $c_B$ is conjugated to the real structure $c_L \oplus c_{L_0}$, or to one of the two structures $c_X^+$ or $c_X^-$ given by proposition \ref{propRL*}. In any other case, $X$ does not admit real structures fibered over $c_B$. \end{theo} \begin{rem} It follows from lemma \ref{lemmaRL} and remark \ref{remarkL} that when ${\Bbb{R}} B \neq \emptyset$, there exists a real structure on $L$ which lifts $c_B$ if and only if $c_B^* (L) = L$. Note that in the third case, the real structures $c_X^+$ and $c_X^-$ are not conjugated to $c_L \oplus c_{L_0}$, since they are exchanging the two disjoint holomorphic sections of zero square of $X$ and $c_L \oplus c_{L_0}$ does not. Note also that when $X = B \times {\Bbb{C}} P^1$, or when $\mu ({\Bbb{R}} B)$ is odd, the real structures $c_X^+$ and $c_X^-$ on $X$ are not conjugated (see remark \ref{remarkcx+}). Nevertheless, these two real structures may sometimes be conjugated, see proposition \ref{propconjcx+}. \end{rem} \begin{prop} \label{propexistence} Let $L$ be a line bundle over $(B, c_B)$ and let $X=P(L \oplus L_0)$. Then there exists a real structure on $X$ which lifts $c_B$ if and only if there exists a real structure on $L$ which lifts $c_B$ or $c_B^* (L) = L^*$. \end{prop} {\bf Proof :} ${\bf \Longrightarrow :}$ To begin with, suppose that $\deg (L) \neq 0$. Then, without loss of generality, we can assume that $d= \deg (L) >0$. The holomorphic section $e$ of $X$ associated to $L$ satisfy $e \circ e = -d <0$, since its normal bundle is $L^*$. Any other section $\tilde{e}$ of $X$ is homologous to $e + kv$, where $k \in {\Bbb{Z}}$ and $v$ is the integer homology class of a fiber. When $\tilde{e} \neq e$, we have $\tilde{e} \circ e \geq 0$, which means that $k \geq d$. Then $\tilde{e} \circ \tilde{e} \geq d$ and this proves that $e$ is the only holomorphic section of $X$ with negative square. Thus this section is invariant under the real structure of $X$, and so is its normal bundle. This implies that there exists a real structure on $L^*$ which lifts $c_B$. Using duality, there exists one on $L$ which lifts $c_B$. Suppose now that $\deg (L) = 0$. If $L$ is the trivial bundle, then $X = B \times {\Bbb{C}} P^1$ and nothing has to be proved. Otherwise, the sections of $X$ associated to $L$ and $L_0$ are the only ones with zero squares. Indeed, a third holomorphic section with zero square should be disjoint from them and these three sections would give a trivialisation of $X$. This would contradict the assumption that $X \neq B \times {\Bbb{C}} P^1$. As a consequence, we deduce the following alternative : either the real structure $c_X$ preserves these two sections, or it exchanges them. In the first case, $c_X$ preserves the normal bundles and we conclude as before. In the second case, $c_X$ exchanges the normal bundles and so defines a morphism $\hat{c}_X : L^* \to L$, fibered over $c_B$. Let $s$ be a meromorphic section of $L^*$, so that $\mathop{\rm div}\nolimits (s) = -D$ where $D$ is a divisor associated to $L$. Then $\hat{c}_X \circ s \circ c_B$ is a meromorphic section of $L$ and $\mathop{\rm div}\nolimits (\hat{c}_X \circ s \circ c_B) = c_B^* (\mathop{\rm div}\nolimits (s)) = - c_B^* (D)$. Hence $c_B^* (L) = L^*$. ${\bf \Longleftarrow :}$ If there exists a real structure on $L$ which lifts $c_B$, then taking the direct sum with $c_{L_0}$ we get a real structure on $L \oplus L_0$ which lifts $c_B$. This structure induces on $X=P(L \oplus L_0)$ a real structure which lifts $c_B$. If $c_B^* (L) = L^*$, the result follows from proposition \ref{propRL*}. $\square$\\ {\bf Proof of theorem \ref{theoremrealstruct} :} When $X = B \times {\Bbb{C}} P^1$, the second part of theorem \ref{theoremrealstruct} is clear. Indeed, in this case every real structure on $X$ which lifts $c_B$ is the direct sum of $c_B$ and a real structure on ${\Bbb{C}} P^1$. Moreover, the group of automorphisms of $X$ fibered over the identity is then equal to $\{ id \} \times Aut ({\Bbb{C}} P^1)$. So the second part of theorem \ref{theoremrealstruct} follows from the well known fact that, up to conjugation, there are two real structures on ${\Bbb{C}} P^1$. Thus, from now on, we can assume that $L \neq L_0$. It follows from proposition \ref{propexistence} that if there exists a real structure on $X$ which lifts $c_B$, then either there exists a real structure $c_L$ on $L$ which lifts $c_B$, or $c_B^* (L) = L^*$. This already proves the last line of theorem \ref{theoremrealstruct}. We will show the theorem in three steps. In the first step, we will prove that if there exists a real structure $c_L$ on $L$ which lifts $c_B$, then every real structure on $X$ of the form $c_X \circ \phi$, where $c_X$ is the real structure of $X$ induced by $c_L \oplus c_{L_0}$ and $\phi$ is an automorphism of $X$ fibered over the identity of $B$ which lifts to an automorphism of $E=L \oplus L_0$, is conjugated to $c_X$. In the second step, we will prove that if $c_B^* (L) = L^*$, then every real structure on $X$ of the form $c_X^+ \circ \phi$, where $\phi$ is an automorphism of $X$ fibered over the identity of $B$ which lifts to an automorphism of $E=L \oplus L_0$, is conjugated either to $c_X^+$ or to $c_X^-$. Finally, in the third step, we will prove that if $c_B^* (L) = L^* = L$, then every real structure on $X$ of the form $c_X^+ \circ \phi$, where $\phi$ is an automorphism of $X$ fibered over the identity of $B$ which does not lift to an automorphism of $E=L \oplus L_0$, is conjugated to a real structure of the form $c_L \oplus c_{L_0}$, where $c_L$ is a real structure on $L$ which lifts $c_B$. Furthermore, this conjugation is given by an automorphism of $X$ fibered over the identity of $B$ which lifts to an automorphism of $E=L \oplus L_0$. In particular, when there is no real structure on $L$ which lifts $c_B$, every antiholomorphic map of the form $c_X^+ \circ \phi$, where $\phi$ is an automorphism of $X$ fibered over the identity of $B$ which does not lift to an automorphism of $E=L \oplus L_0$, is not an involution. The theorem follows from these three steps and proposition \ref{propaut}.\\ {\bf First step :} Suppose that there exists a real structure $c_L$ on $L$ which lifts $c_B$ and let $c_X$ be the real structure of $X$ induced by $c_L \oplus c_{L_0}$. Let $\tilde{c}_X$ be another real structure on $X$ which is of the form $c_X \circ \phi$, where $\phi$ is an automorphism of $X$ fibered over the identity of $B$ which lifts to an automorphism of $E=L \oplus L_0$. The aim of this first step is to prove that $c_X$ and $\tilde{c}_X$ are conjugated. Let $\Phi$ be an automorphism of $E= L \oplus L_0$ which lifts $\phi$. Then $\Phi \in End (E) = E \otimes E^* = L \oplus L^* \oplus L_0 \oplus L_0$. Thus there exist $a,d \in {\Bbb{C}}^*$, $b \in H^0 (B , L^*)$ and $c \in H^0 (B , L)$ such that $$\Phi = \left[ \begin{array}{cc} a&c\\ b&d \end{array} \right]$$ By assumption, the line bundle $L$ is not trivial, so that either $L$ or $L^*$ has no non-zero holomorphic section. Without loss of generality, we can assume that it is $L$, so that $c=0$ and $$\Phi = \left[ \begin{array}{cc} a&0\\ b&d \end{array} \right].$$ By assumption, $\tilde{c}_X^2 = id$, which implies that $c_X \circ \phi \circ c_X = \phi^{-1}$. So there exists $\lambda \in {\Bbb{C}}^*$ such that $c_E \circ \Phi \circ c_E = \lambda \Phi^{-1}$. But $$\Phi^{-1} = \frac{1}{ad}\left[ \begin{array}{cc} d&0\\ -b&a \end{array} \right],$$ and $$c_E \circ \Phi \circ c_E = \left[ \begin{array}{cc} \overline{a}&0\\ c_{L_0} \circ b \circ c_L&\overline{d} \end{array} \right].$$ Put $\tilde{\lambda} = \frac{1}{ad} \lambda$, we have $\tilde{\lambda} d=\overline{a}$, $\tilde{\lambda} a=\overline{d}$ and $-\tilde{\lambda} b= c_{L_0} \circ b \circ c_L$. The two first conditions imply that $|\tilde{\lambda}| = 1$. Thus there exists $\theta \in {\Bbb{R}}$ such that $\tilde{\lambda} = \exp (2i \theta )$. So the previous conditions can be rewritten as $\exp (i \theta ) d = \overline{\exp (i \theta )a} $, $\exp (i \theta ) a= \overline{\exp (i \theta )d} $ and $- \exp (i \theta )b= c_{L_0} \circ (\exp (i \theta )b) \circ c_L$. Hence we can assume that $$\Phi = \left[ \begin{array}{cc} a&0\\ b&d \end{array} \right],$$ where $d=\overline{a}$ and $b=-c_{L_0} \circ b \circ c_L$ (replace $\Phi$ by $\exp (i \theta ) \Phi$ which also lifts $\phi$). Now, denote by $\Psi$ the automorphism of $E$ defined by $$\Psi = \left[ \begin{array}{cc} 1&0\\ \frac{1}{2}b&\overline{a} \end{array} \right]. $$ Then $$\Psi^{-1} = \frac{1}{\overline{a}} \left[ \begin{array}{cc} \overline{a}&0\\ -\frac{1}{2}b &1 \end{array} \right] \text{, and}$$ \begin{eqnarray*} \Psi^{-1} \circ c_E \circ \Psi &=& \frac{1}{\overline{a}}\left[ \begin{array}{cc} \overline{a}c_L&0\\ -\frac{1}{2}b\circ c_L + \frac{1}{2}c_{L_0} \circ {b} &a c_{L_0} \end{array} \right] \\ &=&\frac{1}{\overline{a}}\left[ \begin{array}{cc} \overline{a}c_L&0\\ c_{L_0} \circ {b} &a c_{L_0} \end{array} \right] \quad \text{since } -b \circ c_L = c_{L_0} \circ {b}\\ &=&\frac{1}{\overline{a}} c_E \circ \Phi. \end{eqnarray*} Denote by $\psi$ the automorphism of $X$ induced by $\Psi$, we then deduce that $\psi^{-1} \circ c_X \circ \psi = \tilde{c}_X$, which was the aim of this first step.\\ {\bf Second step :} Suppose that $c_B^* (L) = L^*$ and fix a real structure $c_X^+$ on $X$ given by proposition \ref{propRL*} (see remark \ref{remarkcx+}). Let $\tilde{c}_X$ be another real structure on $X$ which is of the form $c_X^+ \circ \phi$, where $\phi$ is an automorphism of $X$ fibered over the identity of $B$ which lifts to an automorphism of $E=L \oplus L_0$. The aim of this second step is to prove that $\tilde{c}_X$ is conjugated either to $c_X^+$ or to $c_X^-$. Let $\Phi$ be an automorphism of $E= L \oplus L_0$ which lifts $\phi$. Since $\deg (L) = 0$ and $L$ is not trivial, $H^0 (B,L) = H^0 (B , L^*) = 0$. As a consequence, there exists $a,d \in {\Bbb{C}}^*$ such that $$\Phi = \left[ \begin{array}{cc} a&0\\ 0&d \end{array} \right]. $$ Since $\tilde{c}_X^2 = id$, $\frac{a}{d} \in {\Bbb{R}}^*$ and we can assume that $a=1$, $d \in {\Bbb{R}}^*$ (replace $\Phi$ by $\frac{1}{a} \Phi$). Let $\psi$ be the automorphism of $X$ defined by $$\psi = \left[ \begin{array}{cc} 1&0\\ 0&\delta \end{array} \right], $$ where $\delta = \frac{1}{\sqrt{|d|}}$. Then $\psi$ conjugates $\tilde{c}_X$ to one of the two real structures ${c}_X^+$ or ${c}_X^-$. \\ {\bf Third step :} Suppose that $c_B^* (L) = L^*$ and fix a real structure $c_X^+$ on $X$ given by proposition \ref{propRL*}. Let $\tilde{c}_X$ be another real structure on $X$ which is of the form $c_X^+ \circ \phi$, where $\phi$ is an automorphism of $X$ fibered over the identity of $B$ which does not lift to an automorphism of $E=L \oplus L_0$. The aim of this third step is to prove that $\tilde{c}_X$ is conjugated to a real structure of the form $c_L \oplus c_{L_0}$ where $c_L$ is a real structure on $L$ which lifts $c_B$. Note that the automorphism $\phi$ and the involution $c_X^+$ both exchange the sections of $X$ associated to $L$ and $L_0$. Thus $\tilde{c}_X$ preserves these two sections. As a consequence, it preserves also the normal bundles of these sections and so induce a real structure on the line bundle $L$ which lifts $c_B$. Consider then the real structure $c_L \oplus c_{L_0}$ on $X$, it follows from the first and the second step that it is conjugated to $\tilde{c}_X$ by an automorphism of $X$ which lifts to an automorphism of $E$. $\square$ \subsection{When are $c_X^+$ and $c_X^-$ conjugated ?} In this subsection is given a sufficient condition for $c_X^+$ and $c_X^-$ to be conjugated (see proposition \ref{propconjcx+}). One important example where this occurs is given by corollary \ref{corspin}. \begin{prop} \label{propconjcx+} Let $L$ be a line bundle over $(B, c_B)$ such that $c_B^* (L) = L^*$ and let $X=P(L \oplus L_0)$ be the associated ruled surface. Let $(D, f_D)$ be a couple given by lemma \ref{lemmaL*} and $c_{f_D}$, $c_{-f_D}$ be the associated real structures of $X$ (see proposition \ref{propRL*}). Suppose that there exists $\varphi \in Aut (B)$ of finite order such that $\varphi \circ c_B = c_B \circ \varphi$ and : a. either $\varphi^* (L) = L$ and there exists a meromorphic function $g$ on $B$ such that $\mathop{\rm div}\nolimits (g) = \varphi (D) - D$ and $f_D \circ \varphi \times g \circ \varphi \times \overline{g \circ c_B \circ \varphi} = -f_D$, b. or $\varphi^* (L) = L^*$ and there exists a meromorphic function $h$ on $B$ such that $\mathop{\rm div}\nolimits (h) = \varphi (D) + D$ and $h \circ \varphi \times \overline{h \circ c_B \circ \varphi} = -f_D \times f_D \circ \varphi$. Then, the real structures $c_{f_D}$ and $c_{-f_D}$ are conjugated in $X$. \end{prop} \begin{rem} When ${\Bbb{R}} B \neq \emptyset$, the conditions $a.$ and $b.$ can be replaced by $\varphi^* (L) \in \{ L , L^* \}$ and there exists $x \in {\Bbb{R}} B$ such that $f_D \times f_D \circ \varphi (x) <0$. Indeed, it is not difficult to check that in the situation $a$, there always exists a meromorphic function $g$ on $B$ such that $\mathop{\rm div}\nolimits (g) = \varphi (D) - D$ and $f_D \circ \varphi \times g \circ \varphi \times \overline{g \circ c_B \circ \varphi} = \epsilon f_D$ where $\epsilon = \pm 1$. Similarly, in the situation $b$, there always exists a meromorphic function $h$ on $B$ such that $\mathop{\rm div}\nolimits (h) = \varphi (D) + D$ and $h \circ \varphi \times \overline{h \circ c_B \circ \varphi} = \epsilon f_D \times f_D \circ \varphi$, where $\epsilon = \pm 1$. Hence, conditions $a$ or $b$ are equivalent to require that $\epsilon = -1$, which is equivalent, when ${\Bbb{R}} B \neq \emptyset$, to require that there exists $x \in {\Bbb{R}} B$ such that $f_D \times f_D \circ \varphi (x) <0$. Note that when $g(B) \geq 2$, the conditions given by proposition \ref{propconjcx+} are in fact necessary and sufficient for $c_{f_D}$ and $c_{-f_D}$ to be conjugated, but this will not be needed in what follows. \end{rem} \begin{cor} \label{corspin} Let $g \geq 1$ be an odd integer. Then there exists a smooth compact irreducible real algebraic curve $(B,c_B)$ of genus $g$ and empty real part together with a complex line bundle $L$ over $B$ satisfying $c_B^* (L) = L^*$, such that the real structures $c_{X}^+$ and $c_{X}^-$ on $X = P(L \oplus L_0)$ are conjugated. \end{cor} {\bf Proof :} Let us consider first the case $g=1$. Let $B$ be the elliptic curve ${\Bbb{C}} / {\Bbb{Z}}[i]$ equipped with the real structure $c_B (z) = \overline{z} + \frac{1}{2}$, so that ${\Bbb{R}} B = \emptyset$. Let $p_0 = 0$, $q_0 = \frac{1}{2}$, $p_1 = \frac{i}{2}$ and $q_1 =\frac{1}{2} + \frac{i}{2}$. $$\vcenter{\hbox{\input{realstr1.pstex_t}}}$$ Let $D = p_1 - p_0$ and denote by $L$ the associated complex line bundle over $B$. Then $c_B^* (L)=L = L^*$. Denote by $\varphi$ the involutive automorphism of $B$ defined by $\varphi (z) = z + \frac{1}{2}$. Then $\varphi \circ c_B = c_B \circ \varphi$ and $\varphi^* (L) = L$. We will prove that $\varphi$ satisfies condition $a$ of proposition \ref{propconjcx+}. For this, let $f$ be a meromorphic function on $B$ given by lemma \ref{lemmaL*}, such that $\overline{f \circ c_B} = f$ and $\mathop{\rm div}\nolimits (f) = D + c_B (D)$. Then $ f \circ \varphi = f$. Indeed, there exists a holomorphic section $s$ of the line bundle $L$ such that $\mathop{\rm div}\nolimits (s) = D$ and $s \otimes (s \circ \varphi) = f$. Thus $f \circ \varphi = (s \circ \varphi) \otimes s = s \otimes (s \circ \varphi) = f$. Now let $g$ be a meromorphic function on $B$ such that $\mathop{\rm div}\nolimits (g) = \varphi (D) - D = q_1 - p_1 - q_0 + p_0$ and $g \times \overline{g \circ c_B} = -1$. Such a function is given by lemma \ref{lemmaL} and \cite{GH}, proposition $2.2$, since $D$ belong to the nontrivial component of the real part of $(\mathop{\rm Jac}\nolimits (B) , c_B)$. Then $f \circ \varphi \times g \circ \varphi \times \overline{g \circ c_B \circ \varphi} = -f$, so that the condition $a$ of proposition \ref{propconjcx+} is satisfied. We deduce that the real structures $c_{X}^+$ and $c_{X}^-$ on $X = P(L \oplus L_0)$ defined by $f$ and $-f$ (see proposition \ref{propRL*}) are conjugated. Now, let us consider the case $g=2k+1$, $k \geq 1$. For $j \in \{ 0, \dots , 2k-1 \}$, denote by $\tilde{p}_j = \frac{j}{2k} i \in B$ and $\tilde{q}_j = \frac{1}{2} + \frac{j}{2k} i \in B$ (so that $p_1 = \tilde{p}_k$ and $q_1 = \tilde{q}_k$). Denote by $B_k$ the double covering of $B$ ramified over the $4k$ points $\tilde{p}_j$, $\tilde{q}_j$, $j \in \{0 , \dots, 2k-1 \}$. This covering can be chosen so that its characteristic class in $H^1 (B \setminus \{\tilde{p}_j,\tilde{q}_j \, | \, j \in \{0 , \dots, 2k-1 \} \} ; {\Bbb{Z}} / 2{\Bbb{Z}})$ is Poincar\'e dual to the sum of the $2k$ segments $\{ (0,t) \, | \, t \in ]\frac{2j}{2k} , \frac{2j+1}{2k}[, j \in \{0 , \dots, k-1 \} \}$ and $\{ (\frac{1}{2},t) \, | \, t \in ]\frac{2j}{2k} , \frac{2j+1}{2k}[, j \in \{0 , \dots, k-1 \} \}$. $$\vcenter{\hbox{\input{realstr3.pstex_t}}}$$ Denote by $\pi_k : B_k \to B$ the projection associated to the covering. The automorphism $\varphi $ of $B$ lifts to an automorphism $\varphi_k$ of $B_k$ such that $\varphi \circ \pi_k = \pi_k \circ \varphi_k$. Similarly, the real structure $c_B$ lifts to a real structure $c_{B_k}$ on $B_k$ such that $c_B \circ \pi_k = \pi_k \circ c_{B_k}$ and ${\Bbb{R}} B_k = \emptyset$. Denote by $L_k = \pi_k^* (L)$. This bundle satisfies $c_{B_k}^* (L_k) = L_k = L_k^* = \varphi_k^* (L_k)$. Finally, denote by $f_k = f \circ \pi_k$ and $g_k = g \circ \pi_k$. Then $f_k = \overline{f_k \circ c_{B_k}}$ and $g_k \times \overline{g_k \circ c_{B_k}} = -1$. Moreover, $\mathop{\rm div}\nolimits (f_k) =c_{B_k}^* (D_k) + D_k$, where $D_k = \pi_k^* (D) = 2p_1 - 2p_0$, and $\mathop{\rm div}\nolimits (g_k) = \varphi_k^* (D_k) - D_k$. We have, $f_k \circ \varphi_k \times g_k \circ \varphi_k \times \overline{g_k \circ c_{B_k} \circ \varphi_k} = -f_k$, so that the condition $a$ of proposition \ref{propconjcx+} is satisfied. We deduce that the real structure $c_{X_k}^+$ and $c_{X_k}^-$ on $X_k = P(L_k \oplus L_0)$ defined by $f_k$ and $-f_k$ (see proposition \ref{propRL*}) are conjugated. $\square$\\ {\bf Proof of proposition \ref{propconjcx+} :} Denote $D = \sum_{i=1}^k n_i p_i $, where $p_i \in B$ and $n_i \in {\Bbb{Z}}$, $i \in \{1, \dots , k\}$. We can assume that the set $\{ p_i \, | \, 1 \leq i \leq k \}$ is invariant under $\varphi$ (add some points with zero coefficients to $D$ if necessary). Denote by $U_0 = B \setminus \{ p_i \, | \, 1 \leq i \leq k \}$ and for every $i \in \{1, \dots , k\}$, choose some holomorphic chart $(U_{p_i} , \phi_{p_i})$ such that $U_{p_i} \cap U_{p_j} = \emptyset$ if $i \neq j$, $\varphi (U_{p_i}) = U_{\varphi(p_i)}$ and $\phi_{p_i} : U_{p_i} \to \Delta = \{ z \in {\Bbb{C}} \, | \, |z| < 1 \}$ is a biholomorphism. We require in addition that $\phi_{p_i} (p_i) = 0$ and $$\begin{array}{rcl} \phi_{\varphi(p_i)} \circ \varphi \circ \phi_{p_i}^{-1} : \Delta & \to & \Delta\\ x & \mapsto & \exp(\frac{2i\pi}{m_i}) x \text{ if $ p_i$ is a fixed point of order $m_i$ of $\varphi$.} \end{array}$$ (we put $m_i = 1$ if $\varphi (p_i) \neq p_i$. This atlas and these trivialisations are compatible with $D$ and the group $<\varphi>$. It always exists, see \cite{Nat}.) For every $i \in \{1, \dots , k\}$, denote by $\psi_i$ the morphism : $$\begin{array}{rcl} (U_{p_i} \setminus p_i) \times {\Bbb{C}} P^1 & \to & U_0 \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (x , (\phi_{p_i} (x)^{-n_i} z_1 : z_0)). \end{array}$$ The morphisms $\psi_i$ allow to glue together the trivialisations $U_{p_i} \times {\Bbb{C}} P^1$, $i \in \{0, \dots , k\}$, in order to define the ruled surface $X$. Now suppose we are in the case a. Let $g$ be the meromorphic function on $B$ such that $\mathop{\rm div}\nolimits (g) = \varphi (D) - D$ and $f_D \circ \varphi \times g \circ \varphi \times \overline{g \circ c_B \circ \varphi} = -f_D$. Consider the maps : $$\begin{array}{rcl} U_0 \times {\Bbb{C}} P^1 & \to & U_0 \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (\varphi (x), (g \circ \varphi (x) z_1 : z_0)), \end{array}$$ and for every $i \in \{1, \dots , k\}$, $$\begin{array}{rcl} U_{p_i} \times {\Bbb{C}} P^1& \to & U_{p_j} \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (\varphi (x), (g \circ \varphi (x) \phi_{p_i} (x)^{n_j - n_i} \exp(\frac{2i\pi }{m_i})z_1 : z_0)), \end{array}$$ where $p_j$ denotes the point $\varphi (p_i)$. These maps glue together to form an element $\Phi_g \in Aut (X)$ fibered over $\varphi$. The map $\Phi_g^{-1}$ is given by : $$\begin{array}{rcl} U_0 \times {\Bbb{C}} P^1 & \to & U_0 \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (\varphi^{-1} (x), (z_1 : g(x) z_0)). \end{array}$$ And the map $c_X^-$ is given by : $$\begin{array}{rcl} U_0 \times {\Bbb{C}} P^1 & \to & U_0 \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (c_B (x), (\overline{z_0} : -f_D \circ c_B (x) \overline{z_1})). \end{array}$$ Thus $\Phi_g^{-1} \circ c_X^- \circ \Phi_g$ is given in this trivialisation by : $$\begin{array}{rcl} U_0 \times {\Bbb{C}} P^1 & \to & U_0 \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (c_B (x), (\overline{z_0} : -f_D \circ c_B \circ \varphi (x) \times \overline{g \circ \varphi (x)} \times g \circ c_B \circ \varphi (x) \overline{z_1})). \end{array}$$ Since $f_D \circ \varphi \times g \circ \varphi \times \overline{g \circ c_B \circ \varphi} = -f_D$, we conclude that $\Phi_g^{-1} \circ c_X^- \circ \Phi_g = c_X^+$. Suppose now we are in the case b. Let $h$ be the meromorphic function on $B$ such that $\mathop{\rm div}\nolimits (h) = \varphi (D) + D$ and $h \circ \varphi \times \overline{h \circ c_B \circ \varphi} = -f_D \times f_D \circ \varphi$. Consider then the maps : $$\begin{array}{rcl} U_0 \times {\Bbb{C}} P^1 & \to & U_0 \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (\varphi (x), (z_0 : h \circ \varphi (x) z_1)) \end{array}$$ and for all $i \in \{1, \dots , k\}$, $$\begin{array}{rcl} U_{p_i} \times {\Bbb{C}} P^1& \to & U_{p_j} \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (\varphi (x), (z_0 : h \circ \varphi (x) \phi_{p_i} (x)^{-n_i - n_j} \exp(-\frac{2i\pi }{m_i})z_1)), \end{array}$$ where $p_j$ denotes the point $\varphi (p_i)$. These maps glue together to form an element $\Phi_h \in Aut (X)$ fibered over $\varphi$. The map $\Phi_h^{-1}$ is given by : $$\begin{array}{rcl} U_0 \times {\Bbb{C}} P^1 & \to & U_0 \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (\varphi^{-1} (x), (z_0 : h(x) z_1)). \end{array}$$ And the map $c_X^-$ is given by : $$\begin{array}{rcl} U_0 \times {\Bbb{C}} P^1 & \to & U_0 \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (c_B (x), (\overline{z_0} : -f_D \circ c_B (x) \overline{z_1})). \end{array}$$ Thus $\Phi_h^{-1} \circ c_X^- \circ \Phi_h$ is given in this trivialisation by : $$\begin{array}{rcl} U_0 \times {\Bbb{C}} P^1 & \to & U_0 \times {\Bbb{C}} P^1\\ (x,(z_1 : z_0)) & \mapsto & (c_B (x), ( -f_D \circ c_B \circ \varphi (x) \overline{z_0} : \overline{h \circ \varphi (x)} \times h \circ c_B \circ \varphi (x) \overline{z_1})). \end{array}$$ Since $h \circ \varphi \times \overline{h \circ c_B \circ \varphi} = -f_D \times f_D \circ \varphi$, we conclude that $\Phi_h^{-1} \circ c_X^- \circ \Phi_h = c_X^+$. $\square$ \section{Deformation classes of real structures on ruled surfaces} \subsection{The real part of $(\mathop{\rm Jac}\nolimits (B) , -c_B^*)$} \label{subsectionpartition} Remember the following well known result (see \cite{GH}, propositions 3.2 and 3.3 for instance) : \begin{prop} \label{propjac} Let $(B , c_B)$ be a smooth compact irreducible real algebraic curve. The Jacobian $\mathop{\rm Jac}\nolimits (B)$ of $B$ is equipped with the real structure $-c_B^*$. Then if ${\Bbb{R}} B \neq \emptyset$, the real part of $(\mathop{\rm Jac}\nolimits (B) , -c_B^*)$ has $2^{\mu ({\Bbb{R}} B) - 1}$ connected components, where $\mu ({\Bbb{R}} B)$ is the number of components of ${\Bbb{R}} B$. If ${\Bbb{R}} B = \emptyset$, the real part of $(\mathop{\rm Jac}\nolimits (B) , -c_B^*)$ is connected if $g(B)$ is even and consists of two connected components otherwise. $\square$ \end{prop} (Note that multiplication of $c_B^*$ by $-1$ does not change the topology of the real part of $\mathop{\rm Jac}\nolimits (B)$.)\\ Let $L$ be a complex line bundle over $B$ such that $c_B^* (L) = L^*$, that is an element of the real part of $(\mathop{\rm Jac}\nolimits (B) , -c_B^*)$, where $\mathop{\rm Jac}\nolimits (B)$ is identified with the part of $\mathop{\rm Pic}\nolimits (B)$ of degree zero. Let $(D, f_D)$ be a couple given by lemma \ref{lemmaL*}. The function $f_D$ is real and of constant sign on every component of ${\Bbb{R}} B$, thus it induces a partition of ${\Bbb{R}} B$ in two elements ${\Bbb{R}} B \cap \overline{f_D^{-1} ({\Bbb{R}}_+^* )}$ and ${\Bbb{R}} B \cap \overline{f_D^{-1} ({\Bbb{R}}_-^*)}$. It follows from theorem \ref{theoremrealstruct} that this partition only depends on the bundle $L$ and not on the choice of $(D, f_D)$, since it corresponds to the projections on ${\Bbb{R}} B$ of the real parts of $(P (L \oplus L_0) , c_X^+)$ and $(P (L \oplus L_0) , c_X^-)$. For the same reason, this partition actually only depends on the connected component of the real part of $(\mathop{\rm Jac}\nolimits (B) , -c_B^*)$ and hence is an invariant associated to these components. Note that when ${\Bbb{R}} B \neq \emptyset$ has $\mu ({\Bbb{R}} B)$ components, the number of partitions of ${\Bbb{R}} B$ in two elements is $2^{\mu ({\Bbb{R}} B) - 1}$. \begin{lemma} \label{lemmapartition} When ${\Bbb{R}} B \neq \emptyset$, the partitions associated to the real components of $(\mathop{\rm Jac}\nolimits (B) , -c_B^*)$ establish a bijection between the set of these components and the set of partitions of ${\Bbb{R}} B$ in two elements. \end{lemma} {\bf Proof :} Let $L$ and $L'$ be two complex line bundles which belong to ${\Bbb{R}} \mathop{\rm Jac}\nolimits (B)$ and such that their associated partitions of ${\Bbb{R}} B$ are the same. We will prove that they belong to the same component of ${\Bbb{R}} \mathop{\rm Jac}\nolimits (B)$. The result follows, since the ``partition'' map is then injective and hence bijective for cardinality reasons. Let $D$ (resp. $D'$) be a divisor associated to $L$ (resp. $L'$). Let $f_D$ (resp. $f_{D'}$) be a non-zero meromorphic function on $B$ such that $\overline{f_D \circ c_B} = f_D$ (resp. $\overline{f_{D'} \circ c_B} = f_{D'}$) and $\mathop{\rm div}\nolimits (f_D) = D + c_B (D)$ (resp. $\mathop{\rm div}\nolimits (f_{D'}) = D' + c_B (D')$). It follows from lemma \ref{lemmaL*} that such meromorphic functions exist. Since the partitions of $L$ and $L'$ are the same, we can assume that $f_D$ and $f_{D'}$ have the same signs on every components of ${\Bbb{R}} B$ (replace $f_{D'}$ by $-f_{D'}$ otherwise). For every $t \in [0,1]$, let $g_t = (1-t)f_D + t f_{D'}$. Then $g_0 = f_D$, $g_1 = f_{D'}$ and for every $t \in [0,1]$, $\overline{g_t \circ c_B} = g_t$. Moreover, for every $t \in [0,1]$, $g_t$ is non-zero and of constant sign on each component of ${\Bbb{R}} B$. Thus every real zero and real pole of $g_t$ is of even order. This implies that there exists a continuous path $(D_t)_{t \in [0,1]}$ of divisors such that $D_0 = D$ and for every $t \in [0,1]$, $\mathop{\rm div}\nolimits (g_t) = D_t + c_B (D_t)$. In particular, $L$ and $L_1$ are in the same component of ${\Bbb{R}} \mathop{\rm Jac}\nolimits (B)$, where $L_1$ is the complex line bundle associated to $D_1$. It suffices then to prove that $L_1$ and $L'$ lie in the same component of ${\Bbb{R}} \mathop{\rm Jac}\nolimits (B)$. But $D_1 + c_B (D_1) = D' + c_B (D') = \mathop{\rm div}\nolimits (g_1)$. So the divisor $E=D_1 - D'$ satisfy $c_B (E) = -E$. Thus there exist $k \in {\Bbb{N}}$ and $p^1, \dots , p^k \in B$ such that $E = \sum_{i=1}^k n_i (p^i - c_B (p^i))$. For every $i \in \{ 1, \dots , k \}$, choose a continuous path $(p^i_\tau)_{\tau \in [0,1]}$ such that $p^i_0 = p^i$ and $p^i_1 \in {\Bbb{R}} B$. For every $\tau \in [0,1]$, let $E_\tau = \sum_{i=1}^k n_i (p^i_\tau - c_B (p^i_\tau))$. Then $E_0 = E$, $E_1 = 0$ and for every $\tau \in [0,1]$, $c_B (E_\tau) = -E_\tau$. The path $F_\tau = D' + E_\tau$ is a continuous path of divisors such that $F_0 = D_1$, $F_1 = D'$ and for every $\tau \in [0,1]$, $F_\tau + c_B (F_\tau) = \mathop{\rm div}\nolimits (g_1)$. This implies that the bundles $L_1$ and $L'$ belong to the same component of ${\Bbb{R}} \mathop{\rm Jac}\nolimits (B)$, hence the result. $\square$ \subsection{The topological type of a real ruled surface} \label{subsectiontoptype} Remember that to every smooth compact irreducible real algebraic curve $(B,c_B)$ is associated a triple $(g, \mu , \epsilon)$, called the {\it topological type} of $(B,c_B)$, where $g$ is the genus of $B$, $\mu$ is the number of connected components of ${\Bbb{R}} B$ and $\epsilon = 1$ (resp. $\epsilon = 0$) if $B$ is dividing (resp. if $B$ is non-dividing). Two smooth compact irreducible real algebraic curves are in the same deformation class if and only if they have the same topological type (see \cite{Nat2}). Moreover, there exists a smooth compact irreducible real algebraic curve of topological type $(g, \mu , \epsilon)$ if and only if $\epsilon = 0$ and $0 \leq \mu \leq g$ or $\epsilon = 1$, $1 \leq \mu \leq g+1$ and $\mu = g+1 \mod (2)$. Except from the ellipsoid, that is ${\Bbb{C}} P^1 \times {\Bbb{C}} P^1$ equipped with the real structure $(x,y) \mapsto (\overline{y} , \overline{x})$, for every real structure $c_X$ on a ruled surface $p : X \to B$, there exists a real structure $c_B$ on the base $B$ such that $p \circ c_X = c_B \circ p$. In particular, the connected components of ${\Bbb{R}} X$ are tori or Klein bottles. Note also that in the case of ${\Bbb{C}} P^1 \times {\Bbb{C}} P^1$, the ruling given by the projection $p$ is not unique, whereas it is for any other ruled surface. Since real structures on rational ruled surfaces are well known (see theorem \ref{theodefrat}), we will assume from now on that {\bf the genus of the base is non-zero}. So let $(X, c_X)$ be a real non-rational ruled surface of base $(B,c_B)$. The {\it topological type} of $(X, c_X)$ is by definition the quintuple $(t,k,g, \mu , \epsilon)$, where $(g, \mu , \epsilon)$ is the topological type of $(B,c_B)$, $k$ is the number of Klein bottles of ${\Bbb{R}} X$ and $t$ the number of tori of ${\Bbb{R}} X$. Obviously $t,k \geq 0$ and $t+k \leq \mu$. A quintuple $(t,k,g, \mu ,\epsilon)$ is called {\it allowable} if $t,k \geq 0$, $t+k \leq \mu$, $g \geq 1$ and either $\epsilon = 0$ and $0 \leq \mu \leq g$ or $\epsilon = 1$, $1 \leq \mu \leq g+1$ and $\mu = g+1 \mod (2)$. \begin{prop} \label{proposexistence} There exists a real ruled surface of topological type $(t,k,g, \mu ,\epsilon)$ if and only if the quintuple $(t,k,g, \mu ,\epsilon)$ is allowable. \end{prop} {\bf Proof :} If $(t,k,g, \mu ,\epsilon)$ is the topological type of a real ruled surface, then the quintuple $(t,k,g, \mu ,\epsilon)$ is clearly allowable. Now, let $(t,k,g, \mu ,\epsilon)$ be an allowable quintuple. It is well known (see \cite{Nat2} for instance) that there exists a smooth compact connected real algebraic curve $(B,c_B)$ whose topological type is $(g, \mu , \epsilon)$. If $\mu = 0$, the ruled surface $(B \times {\Bbb{C}} P^1 , c_B \times conj)$, where $conj$ is a real structure on ${\Bbb{C}} P^1$, is of topological type $(0,0,g,0,0)$. If $\mu \neq 0$, choose a partition ${\cal P}$ of ${\Bbb{R}} B$ in two elements such that one of them contains $t+k$ components of ${\Bbb{R}} B$ and the other one $\mu - t -k$. It follows from lemma \ref{lemmapartition} that there exists a line bundle $L$ over $B$ such that $c_B^* (L) = L^*$ and the partition associated to $L$ is ${\cal P}$. Thus, it follows from proposition \ref{propRL*} that there exists a real structure $c_X^+$ on the ruled surface $X=P(L \oplus L_0)$ such that the real part of $X$ consists of $t+k$ tori. Choose $k$ of these tori and make an elementary transformation on each of them, that is the composition of the blowing up at one point and the blowing down of the strict transform of the fiber passing through this point. The result is still a real ruled surface of base $(B,c_B)$ and the real part of this ruled surface consists of $t$ tori and $k$ Klein bottles, hence the result. $\square$ \subsection{The deformation's theorem} \label{subsectiondeformation} Let $\Delta \subset {\Bbb{C}}$ be the Poincar\'e's disk equipped with the complex conjugation $conj$. A {\it real deformation} of surfaces is a proper holomorphic submersion $\pi : Y \to \Delta$ where $(Y,c_Y)$ is a real analytic manifold of dimension $3$ and $\pi$ satisfies $\pi \circ c_Y = conj \circ \pi$. When $t \in ]-1 , 1 [ \in \Delta$, the fibers $Y_t = \pi^{-1} (t)$ are invariant under $c_Y$ and are then compact real analytic surfaces. Two real analytic surfaces $X'$ and $X''$ are said to be {\it in the same deformation class} if there exists a chain $X'=X_0, \dots , X_k=X''$ of compact real analytic surfaces such that for every $i \in \{ 0 , \dots , k-1 \}$, the surfaces $X_i$ and $X_{i+1}$ are isomorphic to some real fibers of a real deformation. \begin{prop} The topological type of a real non-rational ruled surface is invariant under deformation. \end{prop} {\bf Proof :} Let $(X,c_X) \to (B,c_B)$ be a real ruled surface of topological type $(t,k,g,\mu , \epsilon)$ with $g \geq 1$. Let $\pi : Y \to \Delta$ be a real deformation of surfaces such that $(Y_0 , c_Y|_{Y_0}) = (X,c_X)$. Then every fiber of $\pi$ is a ruled surface with base of genus $g$ (see \cite{BPV} for instance). Now since the deformation is trivial from the differentiable point of view, the topology of the real part and the topology of the involution on the base are invariant under deformation, hence the result. $\square$\\ For the sake of completeness, let us recall the following well known result, see \cite{KhDg2} or \cite{Kh} : \begin{theo} \label{theodefrat} There are four deformation classes of real structures on rational ruled surfaces, one for which the real part is a torus, one for which the real part is a sphere and two for which the real part is empty. These two later have non-homeomorphic quotients. $\square$ \end{theo} Remember that the real structure for which the real part is a sphere is very special. It only exists on ${\Bbb{C}} P^1 \times {\Bbb{C}} P^1$ and is fibered over no real structure on the base ${\Bbb{C}} P^1$. This comes from the existence of two rulings on ${\Bbb{C}} P^1 \times {\Bbb{C}} P^1$ and the involution $(x,y) \mapsto (y,x)$ reversing them. This is the main reason why we do not include the case of rational ruled surfaces in theorem \ref{theoremdeformation}. \begin{theo} \label{theoremdeformation} Two real non-rational ruled surfaces are in the same deformation class if and only if they have the same topological type $(t,k,g, \mu ,\epsilon)$, except when $\mu=0$. There are two deformation classes of real non-rational ruled surfaces of topological type $(0,0,g,0,0)$. For one such class of ruled surfaces $(X, c_X)$, the quotient $X' = X/c_X$ is spin, for the other one it is not. \end{theo} Using the terminology introduced in \cite{KhDg2}, this means that real ruled surfaces are quasi-simple. The definition of the topological type of a real ruled surface is given in \S \ref{subsectiontoptype}. Note that every allowable quintuple is the topological type of a real ruled surface (see proposition \ref{proposexistence}). \begin{rem} If $X = P(E)$ is a real non-rational ruled surface of topological type $(t,k,g, \mu ,\epsilon)$ with $t+k < \mu$ and $k \neq 0$, then $X$ is not decomposable, whereas any other topological type is realized by a decomposable real ruled surface. Remember also that the deformation classes of complex ruled surfaces are described by the genus of the base and by whether the surface is spin or not. Then, real structures for which $k$ is even only exist on spin ruled surfaces and real structures for which $k$ is odd only exist on non-spin ruled surfaces. \end{rem} Let us sketch the proof of theorem \ref{theoremdeformation} : Let $(X , c_X)$ be a real ruled and non-decomposable surface with base $(B, c_B)$. If $X$ admits a real holomorphic section, then we will prove that $(X , c_X)$ is in the same deformation class that a real decomposable ruled surface (see proposition \ref{proprealholsect}). If $X$ does not admit a real holomorphic section, then we will prove that there exists a complex line bundle $L \in \mathop{\rm Pic}\nolimits (B)$ satisfying $c_B^* (L) = L^*$, such that $(X , c_X)$ is in the same deformation class that the surface obtained from $(P(L \oplus L_0), c_X^\pm)$ after at most one elementary transformation on each component of its real part (see proposition \ref{propelemtrans}). After these two steps, it is possible to reduce the study of deformation classes of real strucures on ruled surfaces to the study of deformation classes of real strucures on decomposable ruled surfaces. It suffices then to check the theorem \ref{theoremdeformation} for decomposable real ruled surfaces. \begin{prop} \label{proprealholsect} Let $(X , c_X)$ be a real ruled surface of base $(B, c_B)$ which admits a real holomorphic section. Then there exists a real deformation $\pi : Y \to \Delta$ such that for every $t \in {\Bbb{R}}^* \cap \Delta$, $(Y_t , c_Y|_{Y_t})$ is isomorphic to $(X , c_X)$ and such that $(Y_0 , c_Y|_{Y_0})$ is isomorphic to $(P(L \oplus L_0), c_L \oplus c_{L_0})$ where $L \in \mathop{\rm Pic}\nolimits (B)$ and $c_L$ is a real structure on $L$ which lifts $c_B$. \end{prop} The definition of a real deformation has been given in the begining of \S \ref{subsectiondeformation}.\\ {\bf Proof :} Let $E$ be a rank two complex vector bundle over $B$ such that $X=P(E)$. The real holomorphic section of $X$ is given by a complex sub-line bundle $M$ of $E$. Denote by $N$ the quotient line bundle $E/M$ so that the bundle $E$ is an extension of $N$ by $M$. Let $\mu \in H^1 (B , M \otimes N^*)$ be the extension class of this bundle and let $\mu^1$ be a $1$-cocycle with coefficients in the sheaf ${\cal O}_B ( M \otimes N^*)$, defined on a covering ${\cal U}=(U_i)_{i \in I}$ of $B$, realising the cohomology class $\mu \in H^1 (B , M \otimes N^*)$. The bundle $E$ is then obtained as the gluing of the bundles $(M \oplus N)|_{U_i}$ by the gluing maps : \begin{eqnarray*} (M \oplus N)|_{U_i \cap U_j} & \to & (M \oplus N)|_{U_j \cap U_i} \\ (m,n) & \mapsto & \left[ \begin{array}{cc} 1&\mu_{ij}\\ 0&1 \end{array} \right] \left( \begin{array}{c} m\\ n \end{array} \right) =(m+\mu_{ij} n , n). \end{eqnarray*} We can assume that for every open set $U_i$ of ${\cal U}$, there exists $\overline{\i} \in I$ such that $U_{\overline{\i}} = c_B (U_i)$ (add these open sets to ${\cal U}$ if not). We can also assume that there exists $J \subset I$ such that the open sets $(U_i)_{i \in J}$ cover $B$ and such that the real structure $c_X : X|_{U_i} \to X|_{U_{\overline{\i}}}$ lifts to an antiholomorphic map $E|_{U_i} \to E|_{U_{\overline{\i}}}$ (take a refinement of ${\cal U}$ if not). Since by hypothesis the section of $X$ associated to $M$ is real, these antiholomorphic maps are of the form : \begin{eqnarray*} (M \oplus N)|_{U_i} & \to & (M \oplus N)|_{U_{\overline{\i}}} \\ (x,(m,n)) & \mapsto & (c_B (x) , \left[ \begin{array}{cc} a_i&b_i\\ 0&d_i \end{array} \right] \left( \begin{array}{c} m\\ n \end{array} \right) ), \end{eqnarray*} where $a_i$ (resp. $b_i$, resp. $d_i$) is an antiholomorphic morphism $M|_{U_i} \to M|_{U_{\overline{\i}}}$ (resp. $N|_{U_i} \to M|_{U_{\overline{\i}}}$, resp. $N|_{U_i} \to N|_{U_{\overline{\i}}}$) which lifts $c_B$. Since $c_X$ is an involution, we have for every $i \in J$, $a_{\overline{\i}} \circ a_i = d_{\overline{\i}} \circ d_i \in {\cal O}_B^*|_{U_i}$ and $a_{\overline{\i}} \circ b_i + b_{\overline{\i}} \circ d_i =0 \in {\cal O}_B (N^* \otimes M)|_{U_i}$. Moreover, for $i,j \in J$ such that $U_i \cap U_j \neq \emptyset$, the gluing conditions are the following : $a_i = \lambda a_j$, $d_i = \lambda d_j$ and $b_i + \mu_{\overline{\i} \overline{\j}} \circ d_i = \lambda (a_j \circ \mu_{ij} + b_j)$ where $\lambda \in {\cal O}_B^*|_{U_i \cap U_j}$. Now let $Y$ be the complex analytic manifold of dimension three defined as the gluing of the charts ${\Bbb{C}} \times P(M \oplus N)|_{U_i}$, $i \in J$, with change of charts given by the maps : \begin{eqnarray*} {\Bbb{C}} \times P(M \oplus N)|_{U_i} & \to & {\Bbb{C}} \times P(M \oplus N)|_{U_j} \\ (t,x,(m:n)) & \mapsto & (t,x,\left[ \begin{array}{cc} 1&t \mu_{ij}\\ 0&1 \end{array} \right] \left( \begin{array}{c} m\\ n \end{array} \right)) =(t,x, (m+t \mu_{ij} n : n)). \end{eqnarray*} The projection on the first coordinate defines a holomorphic submersion $\pi : Y \to {\Bbb{C}}$. The surface $\pi^{-1} (0)$ is isomorphic to the decomposable ruled surface $P(M \oplus N)$, whereas, as soon as $t \in {\Bbb{C}}^*$, the fiber $Y_t = \pi^{-1} (t)$ is isomorphic to the ruled surface $X=P(E)$. Such an isomorphism $\psi_t : Y_t \to X$ is given in the charts $P(M \oplus N)|_{U_i}$, $i \in J$, by : \begin{eqnarray*} P(M \oplus N)|_{U_i} & \to & P(M \oplus N)|_{U_j} \\ (x,(m:n)) & \mapsto & (x, (m : t n)). \end{eqnarray*} Denote by $c_Y$ the real structure on $Y$ defined on charts ${\Bbb{C}} \times P(M \oplus N)|_{U_i}$ by : \begin{eqnarray*} {\Bbb{C}} \times P(M \oplus N)|_{U_i} & \to & {\Bbb{C}} \times P(M \oplus N)|_{U_{\overline{\i}}} \\ (t,x,(m:n)) & \mapsto & (\overline{t},c_B (x),\left[ \begin{array}{cc} a_i&\overline{t} b_i\\ 0&d_i \end{array} \right] \left( \begin{array}{c} m\\ n \end{array} \right)). \end{eqnarray*} This real structure satisfies $\pi \circ c_Y = conj \circ \pi$ where $conj$ is the complex conjugation on ${\Bbb{C}}$. Moreover, when $t \in {\Bbb{R}}^*$, $\phi_t$ gives an isomorphism between the real ruled surfaces $(Y_t , c_Y|_{Y_t})$ and $(X , c_X)$. Hence, the restriction of $\pi : Y \to {\Bbb{C}}$ over $\Delta \subset {\Bbb{C}}$ is a real deformation which satisfies proposition \ref{proprealholsect}. $\square$ \begin{prop} \label{propelemtrans} Let $(X , c_X)$ be a real ruled surface of base $(B, c_B)$, which does not admit any real holomorphic section. Then, there exists $L \in \mathop{\rm Pic}\nolimits (B)$ satisfying $c_B^* (L)=L^*$ and a ruled surface $(X' , c_{X'})$ obtained from $(P(L \oplus L_0) , c_X^\pm)$ after at most one elementary transformation on each of its real components, such that $(X , c_X)$ and $(X' , c_{X'})$ are in the same deformation class. \end{prop} Remember that an {\it elementary transformation} on the ruled surface $X$ is by definition the composition of a blowing up of $X$ at one point and the blowing down of the strict transform of the fiber passing through this point. \begin{lemma} \label{lemmaelemtransfsect} Let $X=P(L \oplus L_0)$ be a decomposable ruled surface of base $B$. Let $s : B \to X$ be the section defined by $L$ and $D$ be a divisor associated to $L$. Then the ruled surface obtained from $X$ after an elementary transformation at the point $s (x)$, $x \in B$, is the surface $P(L(x) \oplus L_0)$ where $L(x)$ is the complex line bundle associated to the divisor $D+x$. $\square$ \end{lemma} \begin{lemma} \label{lemmaveryample} Let $(X , c_X)$ be a real ruled surface of base $(B, c_B)$, which does not admit any real holomorphic section. Then $X$ has a very ample holomorphic section $S$ which is transversal to its image under $c_X$. \end{lemma} {\bf Proof :} Let us first construct a very ample section on $X$. Let $E$ be a rank two complex vector bundle over $B$ such that $X=P(E)$, and let $A$ be an ample line bundle over $B$. Then by definition, for sufficiently large $n$, the bundle $E^* \otimes A^n$ is generated by its global sections. Choosing $N$ such global sections, it provides a surjective morphism of bundles $B \times {\Bbb{C}}^N \to E^* \otimes A^n$. This induces an injective morphism between the dual bundles $E \otimes (A^*)^n \to B \times {\Bbb{C}}^N$ and thus an embedding $X \to B \times {\Bbb{C}} P^{N-1}$. Fixing an embedding $B \to {\Bbb{C}} P^3$, we deduce an embedding $X \to {\Bbb{C}} P^3 \times {\Bbb{C}} P^{N-1}$. Finally, combining this with Segre embedding, we obtain an embedding $X \to {\Bbb{C}} P^{4N-1}$ associated to a very ample linear system of sections on $X$. Now, let us prove that in this linear system, there exists a smooth section $S$ transversal to $c_X (S)$. From Bertini's theorem (see \cite{Hart}, theorem $8.18$) there exists, in this linear system, a smooth section $S$ associated to a hyperplane $H$ of ${\Bbb{C}} P^{4N-1}$ transversal to $X$. By hypothesis, $S$ cannot be real, so that the intersection $c_X (S) \cap S$ consists of a finite number of points. We will prove that after a small perturbation of $H$, this intersection can be assumed transversal. Indeed, let $x \in c_X (S) \cap S$. If $x \in {\Bbb{R}} X$, the intersection of $H$ with $T_x X$ is a line, which is the tangent of $S$ at $X$. The section $S$ is transverse to $c_X (S)$ at $x$ if and only if this line is not fixed by the differential $d_x c_X$. Since the fixed point set of this involution is of half dimension, the intersection of $S$ and $c_X (S)$ at $x$ can be made transversal after a small perturbation of $H$, keeping the intersection point $x$. Now, if $x \notin {\Bbb{R}} X$, then since the section $S$ is smooth, the points $x$ and $c_X (x)$ belong to two different fibers of $X$ and in particular to non-real ones. Suppose that the line $D_x \subset {\Bbb{C}} P^{4N-1}$ joining them is transversal to both the planes $T_x X$ and $T_{c_X (x)} X$. Then there exists a pencil of hyperplanes of ${\Bbb{C}} P^{4N-1}$ containing $H$ and parametrised both by the lines of $T_x X \subset {\Bbb{C}} P^{4N-1}$ and the lines of $T_{c_X (x)} X \subset {\Bbb{C}} P^{4N-1}$. This means that each line of $T_{x} X$ passing through $x$, and similarly each line of $T_{c_X (x)}$ passing through $c_X (x)$, is contained in one and only one hyperplane of this pencil. Also, this pencil contains no other hyperplane. $$\vcenter{\hbox{\input{realstr2.pstex_t}}}$$ This pencil thus provides us with a holomorphic identification between the projective lines $P(T_{x} X)$ and $P(T_{c_X (x)} X)$. Under this identification, the differential $d_x c_X$ reads as an anti-holomorphic involution of $T_{x} X$ and once more, the section $S$ is transversal to $c_X (S)$ at $x$ if and only its tangent line is not fixed by this involution $d_x c_X$. This can always be garanted after a small perturbation of $H$. Since small perturbations do not perturb the transversality of transversal points, this process strictly increases the number of transversal points between $S$ and $c_X (S)$ and so gives the result after a finite number of steps. It thus only remains to prove that the line $D_x$ can indeed be assumed transverse to both the planes $T_x X$ and $T_{c_X (x)} X$, after a small perturbation of $H$ if necessary. For this, note that the embedding $B \to {\Bbb{C}} P^3$ can be chosen real. The set of points of $B$ whose tangent is not a real line of ${\Bbb{C}} P^3$ is a then dense open subset $U \subset B$ (for the usual topology, not the Zariski's one), invariant under $c_B$. The set $U$ is in fact the complementary of the real part of the dual curve. Let $x \in X$ be a point such that $y=p(x) \in U$ where $p$ is the projection $X \to B$. Since the line joining $y$ to $c_B (y)$ is real, it is not tangent to $B$ at $y$ and $c_B (y)$. Let $H_1$ be a hyperplane of ${\Bbb{C}} P^3$ passing through $y$ and $c_B (y)$ and transverse to $B$. Then $H_1 \times {\Bbb{C}} P^{N-1}$ is transverse to $X$ in ${\Bbb{C}} P^3 \times {\Bbb{C}} P^{N-1}$. Let $H_2$ be a hyperplane of ${\Bbb{C}} P^{N-1}$ such that ${\Bbb{C}} P^3 \times H_2$ does contain neither $x$ nor $c_X (x)$. Then the divisor $(H_1 \times {\Bbb{C}} P^{N-1}) + ({\Bbb{C}} P^3 \times H_2)$ is associated to a hyperplane $H_0$ of ${\Bbb{C}} P^{4N-1}$, which contains both $x$ and $c_X (x)$ and which is transverse to $X$ at these points. Then $H_0$ contains the line $D_x$ and since by construction it also contains the fibers through $x$ and $c_X (x)$, its transversality with $X$ at $x$ and $c_X (x)$ implies the one of $D_x$. Hence for any point $x$ belonging to the open set $p^{-1} (U)$ of $X$, the line $D_x$ is transverse to $X$ at $x$ and $c_X (x)$. Since it is not hard to observe that any non-real intersection point of $S$ and $c_X (S)$ can be moved to $p^{-1} (U)$ after a small perturbation of $H$, this completes to proof of lemma \ref{lemmaveryample}. $\square$ \\ {\bf Proof of proposition \ref{propelemtrans} :} Let $S \subset X$ be a very ample holomorphic smooth section, transverse to its image under $c_X$. Such a section is given by lemma \ref{lemmaveryample}. The set $c_X (S) \cap S$ is finite and invariant under $c_X$. Denote by $X_1$ the ruled surface obtained from $X$ after an elementary transformation on every point of this set. Since it is invariant under $c_X$, the real structure $c_X$ induces a real structure $c_{X_1}$ on $X_1$. Moreover, the strict transform $S_1$ of $S$ satisfies $c_{X_1} (S_1) \cap S_1 = \emptyset$. Thus $X_1$ is a decomposable ruled surface, and $c_{X_1}$ exchanges the two holomorphic sections $S_1$ and $c_X (S_1)$. The inverse of an elementary transformation is still an elementary transformation, so we deduce that $(X, c_X)$ is obtained from the real decomposable ruled surface $(X_1 , c_{X_1})$ after performing elementary transformations on points $\{ x_1 , \dots , x_k , y_1 , \dots , y_l , \overline{y}_1 , \dots , \overline{y}_l \}$ where $c_{X_1} (x_i) = x_i$ and $c_{X_1} (y_j) =\overline{y}_j$. Note that all the points $\{ x_1 , \dots , x_k , y_1 , \dots , y_l , \overline{y}_1 , \dots , \overline{y}_l \}$ belong to different fibers of $X_1$. It remains to see that this number of points can be reduced to one at most for each component of ${\Bbb{R}} X_1$, changing the decomposable real ruled surface $X_1$ if necessary. For every $j \in \{1 , \dots , l\}$, choose a piecewise analytic path $y_j (t)$, $t \in [0,1]$, such that $y_j (0) = y_j$, $y_j (1) \in S_1$ and $p(y_j (t))$ is constant, which means that $y_j (t)$ stays in a same fiber of $X_1$. Let $\overline{y}_j (t) = c_{X_1} (y_j (t))$ and denote by $X_2$ the ruled surface obtained from $X_1$ after elementary transformations in the points $y_1 (1) , \dots , y_l (1) , \overline{y}_1 (1) , \dots , \overline{y}_l (1)$. The real structure $c_{X_1}$ induces a real structure $c_{X_2}$ on $X_2$. The surface $(X_2 , c_{X_2})$ is in the same deformation class that $(X_1 , c_{X_1})$. Moreover, $X_2$ is also a decomposable ruled surface. Indeed, the strict transform $S_2$ of $S_1$ is a holomorphic section of $X_2$ satisfying $c_{X_2} (S_2) \cap S_2 = \emptyset$. Thus $(X, c_X)$ is in the same deformation class that the surface obtained from the real decomposable ruled surface $(X_2 , c_{X_2})$ after performing elementary transformations on the strict transforms of the points $x_1 , \dots , x_k \in {\Bbb{R}} X_1$, still denoted by $x_1 , \dots , x_k \in {\Bbb{R}} X_2$. Now for each pair of points $x_1 , x_2$ lying in a same connected component of ${\Bbb{R}} X_2$, we can make the elementary transformation on the point $x_2$. Then, the image of the fiber passing through $x_2$ is a real point $x'_2$ in the new surface $X'_2$ obtained. So we can choose an analytic path from $x_1$ to $x'_2$ in the real part of $X'_2$ and we deduce that the surface obtained from $X_2$ after making the elementary transformations on the points $x_1 , x_2$ is in the same deformation class that the one obtained from $X'_2$ after an elementary transformation on $x'_2$, which is $X_2$ itself. Hence each pair of points lying in a same connected component of ${\Bbb{R}} X_2$ can be removed and so $(X, c_X)$ is in the same deformation class that the surface obtained from the real decomposable ruled surface $(X_2 , c_{X_2})$ after performing at most one elementary transformation on each of its real components. Since $c_{X_2}$ exchanges two disjoint holomorphic sections of $X_2$, it follows from theorem \ref{theoremrealstruct} that $(X_2 ,c_{X_2})$ is of the form $(P(L \oplus L_0) , c_X^+)$ where $L \in \mathop{\rm Pic}\nolimits (B)$ and $c_B^* (L) = L^*$. $\square$ \begin{lemma} \label{lemmespin} Let $g \geq 1$ be an odd integer and $(B,c_B)$ be a smooth compact irreducible real algebraic curve of genus $g$ and empty real part. Let $L$ be a complex line bundle over $B$ satisfying $c_B^* (L) = L^*$. Then the real ruled surfaces $(P(L \oplus L_0), c_{X}^+)$ and $(P(L \oplus L_0), c_{X}^-)$ are in the same deformation class. \end{lemma} (In lemma \ref{lemmespin}, the real structures $c_{X}^+$ and $c_{X}^-$ on $X= P(L \oplus L_0)$ are those given by proposition \ref{propRL*}.)\\ {\bf Proof :} Without changing the deformation class of $X= P(L \oplus L_0)$, we can assume that the base of this surface is the real algebraic curve $(B,c_B)$ given by corollary \ref{corspin}. Then, if $L$ belong to the same real component of $(\mathop{\rm Jac}\nolimits (B) , -c_B^*)$ that the bundle given by corollary \ref{corspin}, we can assume, without changing the deformation class of $X= P(L \oplus L_0)$, that $L$ is exactly this bundle. In that case, the result comes from corollary \ref{corspin}. Let $X= P(L \oplus L_0)$ be the ruled surface given by corollary \ref{corspin}, and $\Phi : X \to X$ be the automorphism conjugating $c_{X}^+$ and $c_{X}^-$. Let $x_1$ be a point on the section of $X$ associated to $L$ and $y_1 = c_{X}^+ (x_1) = c_{X}^- (x_1)$. Let $x_2 = \Phi (x_1)$ and $y_2 = \Phi (y_1) = c_{X}^+ (x_2) = c_{X}^- (x_2)$. Denote by $Y_1$ (resp. $Y_2$) the ruled surface obtained from $X$ after one elementary transformation on the points $x_1$ and $y_1$ (resp. $x_2$ and $y_2$). Then the real structures $c_{X}^+$ and $c_{X}^-$ lift to the real structures $c_{Y_1}^\pm$ (resp. $c_{Y_2}^\pm$) on $Y_1$ (resp. $Y_2$), and $\Phi$ lifts to a biholomorphism $\Psi : Y_1 \to Y_2$ such that $c_{Y_1}^+ = \Psi^{-1} \circ c_{Y_2}^- \circ \Psi$ and $c_{Y_1}^- = \Psi^{-1} \circ c_{Y_2}^+ \circ \Psi$. But the real ruled surface $(Y_1 , c_{Y_1}^-)$ is in the same deformation class that $(Y_2 , c_{Y_2}^-)$. Indeed, it suffices to choose an analytic path $x_t$ linking $x_1$ to $x_2$ in the section of $X$ associated to $L$ and to consider the surfaces $(Y_t ,c_{Y_t}^-)$ obtained from $(X, c_{X}^-)$ after an elementary transformation on the points $x_t$ and $c_{X}^- (x_t)$. Hence the real ruled surfaces $(Y_1 , c_{Y_1}^-)$ and $(Y_1 , c_{Y_1}^+)$ are in the same deformation class. To conclude, it remains to see that they do not come from the same connected component of $(\mathop{\rm Jac}\nolimits (B) , -c_B^*)$ that $(X, c_{X}^\pm)$. This follows from the fact that the quotients $Y_1 / c_{Y_1}^\pm$ and $X / c_{X}^\pm$ are not homeomorphic. Indeed, these two quotients are sphere bundles over the non-orientable surface $B' = B / c_B$. But $Y_1 / c_{Y_1}^\pm$ is obtained from $X / c_{X}^\pm$ after one elementary transformation in one point. Thus one of these two quotient is spin, and one is not. Hence the result. $\square$ \\ {\bf Proof of theorem \ref{theoremdeformation} :} Let $(X_1 ,c_{X_1})$ and $(X_2 ,c_{X_2})$ be two real non-rational ruled surfaces of bases $(B_1 ,c_{B_1})$ and $(B_2 ,c_{B_2})$ respectively, which have the same topological type $(t,k,g,\mu , \epsilon)$. We have to prove that they are in the same deformation class, as soon as $\mu \neq 0$. Let us first consider the case of decomposable ruled surfaces, that is let us assume that $X_1$ and $X_2$ are decomposable. If $t+k < \mu$, it follows from theorem \ref{theoremrealstruct} that $X_1 = P(L_1 \oplus L_0)$ (resp. $X_2 = P(L_2 \oplus L_0)$), where $L_1 \in \mathop{\rm Pic}\nolimits (B_1)$ (resp. $L_2 \in \mathop{\rm Pic}\nolimits (B_2)$) and $c_{B_1}^* (L_1) = L_1^*$ (resp. $c_{B_2}^* (L_1) = L_2^*$). Moreover, it follows from proposition \ref{propRL*} that in this case $k=0$. The partition ${\cal P}_1$ (resp. ${\cal P}_2$) in two elements of ${\Bbb{R}} B_1$ (resp. ${\Bbb{R}} B_2$) associated to $L_1$ (resp. $L_2$) consists of one element containing $t$ components of ${\Bbb{R}} B_1$ (resp. ${\Bbb{R}} B_2$) and one element containing $\mu - t$ components of ${\Bbb{R}} B_1$ (resp. ${\Bbb{R}} B_2$) (see \S \ref{subsectionpartition} for the definition of the partition). Since $(B_1 ,c_{B_1})$ and $(B_2 ,c_{B_2})$ have same topological type $(g,\mu , \epsilon)$, there exists a piecewise analytic path of smooth real algebraic curves connecting them (see \cite{Nat2}). Moreover, this path can be chosen such that the $t$ components of ${\Bbb{R}} B_2$, which form an element of the partition ${\cal P}_2$, deform into the $t$ components of ${\Bbb{R}} B_1$ which form an element of the partition ${\cal P}_1$. This follows from the presentation in \cite{Nat2} of a real algebraic curve as the gluing of a Riemann surface with boundary with its conjugate, the gluing maps being either identity or antipodal. Thus $(X_2 ,c_{X_2})$ is in the same deformation class that a ruled surface $(\widetilde{X}_2 ,c_{\widetilde{X}_2})$ of base $(B_1 ,c_{B_1})$. Moreover, $\widetilde{X}_2 = P(\widetilde{L}_2 \oplus L_0)$ where $\widetilde{L}_2 \in \mathop{\rm Pic}\nolimits (B_1)$, $c_{B_1}^* (\widetilde{L}_2) = \widetilde{L}_2^*$ and the partitions associated to $\widetilde{L}_2$ and $L_1$ are the same. From lemma \ref{lemmapartition} follows that $\widetilde{L}_2$ and $L_1$ are in the same component of the real part of $(\mathop{\rm Jac}\nolimits (B_1) , -c_{B_1}^*)$ and hence the surfaces $(\widetilde{X}_2 , c_{\widetilde{X}_2})$ and $(X_1 ,c_{X_1})$ are in the same deformation class. If $t+k = \mu$, it follows from theorem \ref{theoremrealstruct} that $X_1 = P(L_1 \oplus L_0)$ (resp. $X_2 = P(L_2 \oplus L_0)$), where $L_1 \in \mathop{\rm Pic}\nolimits (B_1)$ (resp. $L_2 \in \mathop{\rm Pic}\nolimits (B_2)$) and either $c_{B_1}^* (L_1) = L_1^*$ (resp. $c_{B_2}^* (L_1) = L_2^*$), or $c_{B_1}^* (L_1) = L_1$ (resp. $c_{B_2}^* (L_1) = L_2$). In the first case, $L_1$ (resp $L_2$) is in the same component of the real part of $(\mathop{\rm Jac}\nolimits (B_1) , -c_{B_1}^*)$ (resp. $(\mathop{\rm Jac}\nolimits (B_2) , -c_{B_2}^*)$) that $L_0$, since $t+k = \mu$. Thus $(X_1 ,c_{X_1})$ (resp. $(X_2 ,c_{X_2})$) is in the same deformation class that $(B_1 \times {\Bbb{C}} P^1 , c_X^\pm)$ (resp. $(B_2 \times {\Bbb{C}} P^1 , c_X^\pm)$). Moreover, when $\mu \neq 0$, only one of the two real structures $c_X^\pm$, say $c_X^+$, satisfies $t+k = \mu$. In the second case, denote by $D_+ - D_-$ a divisor associated to $L_1$, where $D_+$, $D_-$ are positive divisors and invariant under $c_{B_1}$. Then $X_1 = P(L_{D_+} \oplus L_{D_-})$ and $c_{X_1} = c_{L_{D_+}} \oplus c_{L_{D_-}}$. Thus, it follows from lemma \ref{lemmaelemtransfsect} that $(X_1 ,c_{X_1})$ is obtained from $(B_1 \times {\Bbb{C}} P^1 , c_{L_0} \oplus c_{L_0})$ after performing elementary transformations on the points of the section associated to $L_{D_+}$ (resp. $L_{D_-}$) over the locus of $D_+ \in B_1$ (resp. $D_- \in B_1$). Without changing the deformation class of the surface, we can assume that the elementary transformations are only done on real points of $(B_1 \times {\Bbb{C}} P^1 , c_{L_0} \oplus c_{L_0})$ with at most one on each of its real components. Indeed, the extra real points can be removed as in proposition \ref{propelemtrans} and every couple of conjugated imaginary points can be moved to real points following a standard deformation : embedd the disk $(\Delta , conj)$ in a real section of $X$, and for every $t \in \Delta$, denote by $Y_t$ the surface obtained from $X$ after an elementary transformation on the points $t$ and $-t$ in $\Delta$ (we still denote by $\Delta$ its image in $X$ by the chosen embedding). The dimension $3$ complex manifold $Y$ obtained gets two real structures, one which lifts $conj$ in $\Delta$ and one which lifts $-conj$. This thus define two real deformations of ruled surfaces and shows that the real ruled surfaces obtained from $X$ after making elementary transformations on the points $\pm \frac{1}{2} \in \Delta$ or $\pm \frac{i}{2} \in \Delta$ are in the same deformation class. Hence, without changing the deformation class of the surface $(X_1 ,c_{X_1})$, we can assume that the elementary transformations are done only on real points of $(B_1 \times {\Bbb{C}} P^1 , c_{L_0} \oplus c_{L_0})$ with at most one on each of its real components. The total number of such elementary transformations is then $k$ since the topological type of $(X_1 ,c_{X_1})$ is $(t,k,g,\mu , \epsilon)$. If $X_1$ and $X_2$ are two such surfaces, there exists a piecewise analytic path of smooth real algebraic curves connecting $(B_1 ,c_{B_1})$ and $(B_2 ,c_{B_2})$, such that the $k$ components of ${\Bbb{R}} B_2$ over which are done the elementary transformations deform on the $k$ components of ${\Bbb{R}} B_1$ over which are done the elementary transformations. Hence in both cases, $(X_1 ,c_{X_1})$ and $(X_2 ,c_{X_2})$ are in the same deformation class. Since the real structures $c_X^+$ and $c_{L_0} \oplus c_{L_0}$ are conjugated on $B_1 \times {\Bbb{C}} P^1$, which follows from theorem \ref{theoremrealstruct} for instance, we deduce that the real decomposable ruled surfaces $(X_1 ,c_{X_1})$ and $(X_2 ,c_{X_2})$ are in the same deformation class if and only if they have the same topological type $(t,k,g,\mu , \epsilon)$, except when $\mu = 0$. In that case, if $g$ is even, it follows from proposition \ref{propjac} that the same method as before leads to the fact that $(X_1 ,c_{X_1})$ and $(X_2 ,c_{X_2})$ are in the same deformation class that $(B \times {\Bbb{C}} P^1 , c_X^+)$ or $(B \times {\Bbb{C}} P^1 , c_X^-)$. But the quotient $(B \times {\Bbb{C}} P^1) / c_X^+$ is spin and $(B \times {\Bbb{C}} P^1) / c_X^-$ is not, so the surfaces $(B \times {\Bbb{C}} P^1 , c_X^+)$ and $(B \times {\Bbb{C}} P^1 , c_X^-)$ are not in the same deformation class. If $g$ is odd, it follows from proposition \ref{propjac} that the same method as before leads to the fact that $(X_1 ,c_{X_1})$ and $(X_2 ,c_{X_2})$ are in the same deformation class that $(P(L \oplus L_0) , c_X^\pm)$, where $L$ belongs to one of the two components of the real part of $( Jac (B) , -c_B^*)$. But it follows from lemma \ref{lemmespin} that $(P(L \oplus L_0) , c_X^+)$ and $(P(L \oplus L_0) , c_X^-)$ are in a same deformation class. The result follows from the fact that $(B \times {\Bbb{C}} P^1) / c_X^\pm$ is spin and $P(L \oplus L_0) / c_X^\pm$ is not when $L$ is not in the same component of the real part of $( Jac (B) , -c_B^*)$ that $L_0$. Now let us prove the theorem in the general case, which means that we no more assume that $X_1$ and $X_2$ are decomposable. From propositions \ref{proprealholsect} and \ref{propelemtrans} follow that these surfaces are either in the same deformation class that some real decomposable ruled surfaces, or in the same deformation class that some ruled surface obtained from a decomposable one of the form $(P(L \oplus L_0) , c_X^\pm)$ after at most one elementary transformation on each of its real components. In this second case, we can assume that $L$ does not belong to the same component of the real part of $( Jac (B) , -c_B^*)$ that $L_0$ (otherwise the surface can be deformed to a decomposable ruled surface). Since the topological types of these surfaces are different from those realized by decomposable ruled surface, we can assume that either $X_1$ and $X_2$ are both decomposable, or that they are both from this second class. In the first case, the theorem follows from what we have already done. Let us assume we are in the second case. Then there exists $L_1 \in \mathop{\rm Pic}\nolimits (B_1)$ (resp. $L_2 \in \mathop{\rm Pic}\nolimits (B_2)$) such that $c_{B_1}^* (L_1) = L_1^*$ (resp. $c_{B_2}^* (L_2) = L_2^*$) and $(X_1 , c_{X_1})$ is obtained from $(P(L_1 \oplus L_0) , c_X^+)$ after making $k$ elementary transformations in $k$ disjoint real components. The surfaces $(P(L_1 \oplus L_0) , c_X^+)$ and $(P(L_2 \oplus L_0) , c_X^+)$ have same topological type $(t+k , 0 , g , \mu , \epsilon)$, with $\mu >0$. Thus they are in the same deformation class. Moreover, in the same way as before, this deformation can be chosen so that the $k$ marked real components of $(P(L_2 \oplus L_0) , c_X^+)$ deforms to the $k$ marked real components of $(P(L_1 \oplus L_0) , c_X^+)$. It follows that $(X_1 ,c_{X_1})$ and $(X_2 ,c_{X_2})$ are in the same deformation class. $\square$ \addcontentsline{toc}{part}{\hspace*{\mathop{\rm ind}\nolimitsentation}Bibliographie} \nocite{*} \noindent Ecole Normale Sup\'erieure de Lyon\\ Unit\'e de Math\'ematiques pures et appliqu\'ees\\ $46$, all\'ee d'Italie\\ $69364$, Lyon C\'edex $07$ (FRANCE)\\ e-mail : {\tt [email protected]} \end{document}
\begin{document} \input epsf \begin{abstract} How to find ``best rational approximations'' of maximal commutative subgroups of $GL(n,\r)$? In this paper we pose and make first steps in the study of this problem. It contains both classical problems of Diophantine and simultaneous approximations as a particular subcases but in general is much wider. We prove estimates for $n=2$ for both totaly real and complex cases and write the algorithm to construct best approximations of a fixed size. In addition we introduce a relation between best approximations and sails of cones and interpret the result for totally real subgroups in geometric terms of sails. \end{abstract} \maketitle \tableofcontents \section*{Introduction: the problem and its relationships} We pose and investigate a problem of approximation of maximal commutative subgroups of $GL(n,\r)$ by rational subgroups, or more geometrically in other words a problem of approximation of arbitrary simplicial cones in ${\r}^n$ by rational simplicial cones. This problem is a natural multidimensional generalization of a problem on rational approximations of real numbers that is contained in the case of $n=1$. As a particular example it also contains a simultaneous approximation problem and closely related to multidimensional generalizations of continued fractions. The problem of approximation of real spectrum maximal commutative subgroups has much in common with the problem of approximations of nondegenerate simplicial cones. This in particular allows to use methods dealing with multidimensional continued fractions. { \parindent=0cm {\bf Maximal commutative subgroups.} We consider a Cartan subgroup of the group $GL(n,\r)$ or maximal abelian semisimple subgroups of $GL(n,\r)$. Some times it is convenient to consider such subgroup as the set of all matrices, commuting with given semisimple element $A \in GL(n,\r)$, i.e., the centralizer $C_{GL(n,\r)}(A)$. The centralizer is commutative if and only if $A$ has distinct eigenvalues. So we work with centralizers of ``generic'' matrices. For the field of real numbers not all Cartan subgroups are mutually conjugate: the general Cartan subgroup in $GL(n,\r)$ has $k$ one-dimensional and $l$ two-dimensional minimal eigenspaces (where $k{+}2l=n$). We will study mainly the Cartan subgroups with only one-dimensional minimal eigenspaces, which we call "real Cartan subgroup", but all the definitions are extended to the general Cartan subgroups of $GL(n,\r)$ and can be extended to the case of the Cartan subgroup of $GL(n,\Bbb C)$ or more general semisimple groups. In that case all elements of the Cartan subgroup has real eigenvalues. } We will use term "maximal commutative subgroup" or shortly MCRF, and denote the space of it as ${\frak C}_n$. { \parindent=0cm {\bf The space of simplicial cones.} It is convenient to deal with geometric analog of MCRF-subgroups. Let us describe a relation of real maximal commutative subgroups and nondegenerate simplicial cones. } A {\it nondegenerate simplicial cone} in ${\r}^n$ is a conical convex hull of a set of $n$ unordered linearly-independent vectors. Further we omit ``nondegenerate'', since we work only with nondegenerate cones. Together with any simplicial cone $K$ one may study its symmetric with respect to origin cone $-K$. All further discussions, constructions, notions, and statements are invariant with respect to the map $x \mapsto -x$ of ${\r}^n$, and hence they all deal with both cones $K$ and its symmetric one $-K$. Therefore, we identify the cones $K$ and $-K$ and define $Simpl_n$ as {\it a space of pairs of symmetric cones}. There exists a natural $(2^{n-1})$-folded covering of the space ${\frak C}_n$ of all maximal commutative subgroups by the space $Simpl_n$: $$ Simpl_n \rightarrow {\frak C}_n $$ the cones map to the subalgebras whose eigendirections are the extremal rays of the cones. So for any element of $Simpl_n$ we have a maximal commutative subgroups. Therefore, approximation problems, which we discuss below and which are local problems, can be studied in terms of the groups as well as in terms of simplicial cones. A space $Simpl_n$ of all simplicial cones in ${\r}^n$ can be defined directly with coordinates of cones generators, nevertheless it is very important to understand this space as a {\it homogeneous space of the group $GL(n,\r)$} in the following way. Consider a group $GL(n,\r), n>1$ of all linear invertible transformations in ${\r}^n$ with a fixed basis. Take $D_n$ --- the subgroup of the diagonal matrices in the chosen basis which have positive numbers on the diagonal, i.e. a positive part of the corresponding Cartan subgroup or connected component of the unity of that subgroup. The elements of this subgroup leaves invariant each of the $2^n$ of coordinate cones. The left homogeneous space $GL(n,\r)/D_n$ can be considered as a space of all connected parts of the Cartan subgroups of the group $GL(n,\r)$. To get a cone (or actually a pair of symmetric cones $K$ and $-K$) we should add a symmetric group of coordinate permutations $S_n$ (Weil group) which is also contained in the normalizer of $D_n$. Denote by ${\hat D}_n$ the skew-product $S_n\rightthreetimes D_n$ of the symmetric group and the subgroup of diagonal matrices. {\it A homogeneous space $$ GL(n,\r)/{\hat D}_n $$ of left conjugacy classes in $GL(n,\r), n>1$ with respect to the subgroup ${\hat D}_n$ is naturally identified with the space of all $($pairs of$)$ nondegenerate simplicial cones $Simpl_n$.} Indeed, the subgroup of $GL(n,\r)$ preserving the positive coordinate cone $\r_+^n$ as well as its reflection coincides with the group ${\hat D}_n$, and $GL(n,\r)$ transitively acts on $Simpl_n$. Notice that it is sometimes convenient to take the group $SL(n,\r)$ instead of $GL(n,\r)$ (factoring the last by the subgroups of positive scalar matrices and taking ${\hat D}_n$ as the subgroup of positive diagonal matrices with unit determinant in $ SL(n,\r)$: $$ Simpl_n =SL(n,\r)/\{ {\hat D}_n \cap SL(n,\r)\} $$ A homogeneous space $Simpl_n, n>1$ is not compact. This space admits a transitive right action of the whole group $GL(n,\r)$ and it possess an essential absolutely continuous measure $\mu_n$, that is quasihomogeneous with respect of the action. This measure is called M\"obius measure, it was studied in~\cite{KarMob}. We are mostly interested in the actions of $SL(n,\z)$ and $SL(n,\q)$ on the space $Simpl_n$ but not in the action of the whole group $GL(n,\r), n>1$. These actions are ergodic. \begin{definition} Consider a simplicial cone $C \in Simpl_n$. The boundary of the convex hall of the integer points in this cone without an origin, i.e. $$ \partial \Big(\conv \Big\{ C \cap \z^n\setminus (0,\ldots, 0)\Big\}\Big), $$ is called {\it the sail of the simplicial cone}. \end{definition} The space of the simplicial cones could be identified with the space of the sails of simplicial cones. { \parindent=0cm {\it Remark.} Note that one can consider the sail for other convex bodies, for instance of the interiors of conics. } For the simplest case of $n=2$ a simplicial cone is a convex angle between two rays on the plane, and the space $Simpl_2$ of all cones is a two dimensional torus without a diagonal modulo the involution: $\{S^1 \times S^1\ \smallsetminus Diag\} /\approx$, where $Diag$ is the diagonal in $S^1 \times S^1 $ and $\approx$ is a factorization: $(x,y)\approx (y,x)$. Here the points of the circles $S^1$ are the oriented lines in $\r^2$ that contains critical rays of the angles, and quasiinvariant measure is the Lebesque measure. Actually $Simpl_2$ is a M\"obius strip without a boundary or equivalently a punctured projective plane. The geometry of the corresponding cone includes a part of the classical theory of continuous fraction. The sail for $n=2$ is the boundary of noncompact convex polygon. The two-dimensional case is tightly connected with classical continued fractions (see in Section~2). { \parindent=0cm {\bf The problem of approximations.} The described relation between simplicial cones and real spectrum (i.e.~having real eigenvalues, see further) maximal commutative subgroups in $GL(n,\r)$ preserving the corresponding cones is a covering (up to an identification of the cone and its central symmetrical image). Therefore approximations of such subgroups and approximations of simplicial cones (we speak about this further) are the same up to the lifting. Recall that we have fixed a system of coordinates in $\r^n$, and hence we have a special coordinate simplicial cone $K_0=\r^n_+$ (a hyperoctant). } \begin{definition} A {\it rational simplicial cone} $($or respectively a {\it rational commutative subgroup}$)$ is a cone $($a subgroup$)$ whose all extremal rays $($eigen-directions$)$ contains points distinct to the origin with all rational coordinates, actually this implies the existence of points with all integer coordinates as well. A simplicial cone $($maximal commutative subgroup$)$ is called {\it algebraic} if there exists a matrix $g\in SL(n,\z)$ with distinct eigenvalues whose eigen-directions generates this cone $($respectively integer matrix whose centralizer in $SL(n,\r)$ coincides with this subgroup$)$. \end{definition} It is clear that the rational cones form the orbit of the coordinate cone $K_0$ with respect to the group $SL(n,\q)$. An example of an algebraic simplicial cone is the conical convex hull of the two eigenvectors of the Fibonacci matrix: $$g= \left( \begin{array}{cc} 1 & 1 \\ 1 & 0 \\ \end{array} \right) $$ \begin{definition} Consider some cone $C \in Simpl_n$ and take nonzero linear forms $L_1, \ldots, L_n$ that annulates the hyperfaces of the cone. A {\it Markoff-Davenport form} is $$ \Phi_C(x)= \frac{\prod\limits_{k=1}^{n}\big( L_k(x_1,\ldots,x_n) \big)} {\Delta(L_1, \ldots, L_n)} $$ where $\Delta(L_1, \ldots, L_n)$ is the volume of the parallelepiped spanned by $L_k$ for $k=1,\ldots, n$ in the dual space. \end{definition} This form is defined by a cone uniquely up to a sign. Now having Markoff-Davenport form $\Phi$ one can define distances between two cones. For two cones $C_1$ and $C_2$ consider two forms $$ \Phi_{C_1}(v)+ \Phi_{C_2}(v) \quad \hbox{and} \quad \Phi_{C_1}(v)- \Phi_{C_2}(v). $$ Take the maximal absolute values of the coefficients of these forms separately, the minimal of them would be the distance between $C_1$ and $C_2$. Further in Subsection~1.1 we define Markoff-Davenport form in a more general situation. Now we are ready to formulate the main problem of approximations: { \parindent=0cm \emph{\textbf{For a given simplicial cone $($or maximal commutative subgroup of $SL(n,\r)$$)$ find a rational simplicial cone $($rational maximal commutative real subgroup$)$ that for a chosen Markoff-Davenport metric is the closest rational simplicial cone $($subgroup$)$ in some fixed class of rational cones $($subgroups$)$}.} } Such classes of rational cones can chosen to be finite classes including only cones having fixed ``sizes'' of integer points on their rays (for more information see below in Section~1). First of all the approximations problem by rational simplicial cones (subgroup) must be considered for algebraic cones (subgroups). The most intriguing things are connected with generalization of the beautiful theory of Markoff-Lagrange spectra~\cite{Mar} and Markoff-Davenport $n$-ary forms~\cite{Dav1}. { \parindent=0cm {\bf Relations with theory of multidimensional continued fractions.} The problem on approximation of commutative subgroups or simplicial cones formulated above and studied in this work is intimately connected with the theory of multidimensional continued fractions but does not reduce to that. } The recent work by V.~I.~Arnold ~\cite{Arn1} and the following works by him~\cite{Arn2}, E.~I.~Korkina~\cite{Kor2}, G.~Lachaud~\cite{LacBook}, J.-O.~Mussafir~\cite{Mou2}, Karpenkov~\cite{Kar1}, etc., revived the interest to one of classical generalizations of continued fractions theory, considered for the first time by F.~Klein in~\cite{Kle1}. From geometrical point of view the generalization deals with {\it sails}. The classical theory of ordinary continued fractions i.e. theory of Gauss transformations in algebro-dynamical terms related to the case $n=2$ was made by R.~L.~Adler and L.~Flatto in~\cite{Adl}. M.~L.~Kontsevich, Yu.~M.~Suhov in~\cite{Kon} made an improved version admitting an extension to multidimensional case. In the work~\cite{Kon} the authors considered the following approach to these questions: to study the homogeneous space $SL(n,\r)/SL(n, \z)$, i.e. the space of lattices in $SL(n,\r)$, and the action of the Cartan subgroup $D_n$ on it. For $n=2$ this action is reduced to the action of the group $\r^1$ and as it is known from~\cite{Adl} it is a special suspension over the Gauss automorphism that lies in a definition of continued fractions. One can suppose that the solution of the approximation problem reduced to the geometry of the sails in the following sense: in order to find the best approximation of the cone (equivalently maximal commutative subgroup) one must find the appropriate basis of the vectors which belong to the vertices of the sail of this cone or adjacent cone. Up to now {\it this is an open question}. The experiments show that it could be not always the case (see for instance in Example~\ref{antisail}). Let us show connections of our problem with this geometry. First of all the space $Simpl_n$ as we had mentioned can be interpreted as the {\it space of sails of simplicial cones}. Let us compare our approach to the geometry of sails with \cite{Kon}. One can think of dynamical systems as of triples: (a space, a group action, an invariant or quasiinvariant measure). Then in~\cite{Kon} the authors study the dynamical system $$ \{SL(n,\r)/SL(n,\z), \quad D_n,\quad \nu_n\}. $$ i.e. in our terms it is multidimensional suspension (time here is a Cartan subgroup) in a given or an arbitrary cone. Our approach to theory of sails is in some sense dual to the approach of~\cite{Kon}. We consider another dynamical system, namely, the action of a discrete (noncommutative) group $SL(n,\z)$ (or $SL(n,\q)$) in the space of sails (or equivalently simplicial cones): $$ \{Simpl_n(= SL(n,\r)/{\hat D_n}) , \quad SL(n,\z),\quad \mu_n\}. $$ Roughly speaking the ``time'' and the subgroup defining the homogeneous space has been transposed. Both approaches have their own advantages and limitations. However the main aim of the current work is not in studying of multidimensional sails, their statistics and other properties, but in their applications to approximations. { \parindent=0cm {\bf More about geometry of sails.} The geometry of sails is very interesting by itself. One of the essential subjects here is a statistical analysis of their geometric characteristics with respect to the measure on the space of the sails $Simpl_n$. For instance, {\it what is the measure of sails with given properties: say with given number of faces of some given combinatorial type} (see~\cite{Kon},~\cite{Avd1}, \cite{Avd2}, \cite{KarZam}, \cite{KarMob}). This would generalize Gauss-Kuzmin theorem (see in~\cite{Kuz}) and some others for ordinary continued fractions. The work in this direction has just started and it is not much known now, first theorems on this subject can be found in~\cite{KarMob}. } Faces of different dimensions of a sail were studied in~\cite{LacBook}, \cite{Mou2}, \cite{GL}, \cite{Kor1}, \cite{KarPyr}. In algebraic cases all faces are polyhedra. It is also natural to consider the sails in the adjacent hyperoctants. The important problem here is to study the condition for a polygonal surface to be a sail form some cone. This problem was posed by V.~I.~Arnold and was studied in several papers (\cite{Arn4}, \cite{Arn2}, \cite{Kar1}, \cite{Kar4D}, \cite{KarPyr}, \cite{KarAlg}, \cite{Kor2}, \cite{Kor3} \cite{LacBook}, \cite{Mou2}). In~\cite{Tsu} H.~Tsuchihashi showed the relation between sails of cones and cusp singularities, introducing a new application to toric geometry. This relation is studied in detail in~\cite{KarTrig} for the two-dimensional case. Actually in the study of $Simpl_n$ the other multidimensional generalizations of continued fractions can be useful. This in particular includes the considered before convex-geometric (\cite{Kle1}, \cite{Arn2}, \cite{Kor2}, \cite{LacBook}, \cite{Kar1}) local minima type (\cite{Min}, \cite{Byk}), Voronoy (\cite{VAlg}, \cite{Buc}), and algorithmic(\cite{Per}, \cite{Sch}) generalizations of continued fractions. { \parindent=0cm {\bf Connections with limit shape problems.} Another link of the approximation problem is with so called limit shape problems. We want only to emphasize here that the problems like limit shape problems about Young diagrams or convex lattice polygons (see~\cite{Ver}) can be considered in the simplicial cones (instead of traditional posing in the hyperoctant ${\z}_n^+$), and in this case the rational approximation of the cone becomes an important argument. We hope to consider this in the appropriate place. } { \parindent=0cm {\bf Description of obtained results.} Let us briefly describe the results of this work. Apparently the problem of approximations of arbitrary commutative subgroups in $SL(n,\r)$ was never stated in such generality. By the problem of approximation we mean the problem of finding of best approximation of a simplicial cone by rational cones (similar to the classical problem on best approximations of real numbers by rational numbers). This problem is very complicated already in the case of $n=2$. That is also applied even to the algebraic cones. We give several estimates that suggest an idea that best approximations are not always related to sails or to sails of adjacent cones (see also in Example~\ref{antisail}). } First, we show that the classical case of approximations of real numbers by rational numbers is really one of particular cases of the proposed new approximation model. In addition we also indicate that simultaneous approximations are also covered by our approach. Further we work in general case of $n=2$. We give upper and lower estimates for the discrepancy between best approximations and original simplicial cones in the following important case (Theorem~\ref{Lag}): {\it let $\alpha_1, \alpha_2\in \r$ both have infinite continued fractions with bounded elements, consider a simplicial cone bounded by two lines $y=\alpha_1$ and $y=\alpha_2$, then the growth rate of the best approximation of size $N$ is bounded by $C_1/N^2$ and $C_2/N^2$ while $N$ tends to infinity.} Then we translate this statement to the language of sails and their generalizations (Theorem~\ref{sails}) and finally show an algorithm to construct best approximations of a fixed size. { \parindent=0cm {\it Remark.} In this paper we work in a slightly extended way including commutative subgroups of $SL(n,\r)$ having complex conjugate eigenvectors as well. This is the main reason for our choice to use terminology of commutative subgroups instead of simplicial cones (that are convenient only for the totally real case). } We conclude the paper with several examples of approximations in the three-dimensional case, coming from simultaneous approximations. { \parindent=0cm The paper is organized as follows. In Section 1 we give basic notions and definitions of maximal subgroup approximation theory. We introduce sizes and discrepancies for the subgroups and define the notion of ``best approximations'' in our context. In Section~2 we briefly show how the classical theory of Diophantine approximations is embedded into theory of subgroup approximations. } Further we make first steps to study a general two-dimensional case. It is rather complicated since we need to approximate an object defined by four entries of $2\times 2$ matrices that vary. Hence this case is comparable with a general case of simultaneous approximations of vectors in $\r^4$. Nevertheless it is simpler to find the best approximations in the case of subgroups, especially in special algebraic case when a certain periodicity of approximations take place. In Section~3 we write estimates for the quality of best approximations for both hyperbolic and non-hyperbolic cases of rays whose continued fractions has bounded elements. This in particular includes an algebraic case. We also show geometric origins of the bounds in terms of continued fractions for the hyperbolic algebraic case. Finally in Section~4 we study in a couple examples the case of simultaneous approximations of vectors in $\r^3$ in the frames of subgroup approximations. We test two algebraic examples coming from totally real and non-totally real cases. \section{Rational approximations of MCRF-groups} In this section we give general definitions and formulate basic concepts of maximal commutative subgroups approximations. We recall a definition of a Markoff-Davenport form in Subsection~1.1. Further in Subsection~1.2 we define rational subgroups and choose ``size'' for them. We define the distance function (discrepancy) between two subgroups in Subsection~1.3. As we have already mentioned we will continue with terminology of maximal commutative subgroups. In case when we deal with real spectra subgroups the statements can be directly translated to the case of simplicial cones. \subsection{Regular subgroups and Markoff-Davenport forms} Consider a real space $\r^n$ and fix some coordinate basis in it. A real operator is called {\it regular} if all its eigenvalues are distinct (but not necessary real). A maximal commutative subgroup of $GL(n,\r)$ is said to be {\it regular}, or {\it MCRS-group} for short, if it contains regular operators. We say that a one-dimensional complex space is an {\it eigenspace} of an MCRF-group if it is an eigenspace of one of its regular operators. Actually any two regular operators of the same MCRS-group have the same eigenspaces, therefore each MCRF-group has exactly $n$ distinct eigenspaces. Consider an arbitrary MCRS-group $\A$ and denote its eigenspaces by $l_1,\ldots, l_n$. Denote by $L_i$ a nonzero linear form over $\c^n$ that attains zero values at all vectors of the complex lines $l_j$ for $j\ne i$. Let $\Delta(L_1, \ldots, L_n)$ be the determinant of the matrix having in the $k$-th column the coefficients of the form $L_k$ for $k=1,\ldots, n$ in the dual basis. \begin{definition} We say that the form $$ \frac{\prod\limits_{k=1}^{n}\big( L_k(x_1,\ldots,x_n) \big)} {\Delta(L_1, \ldots, L_n)} $$ is the {\it Markoff-Davenport form} for the MCRS-group $\A$ and denote it by $\Phi_{\A}$. \end{definition} \begin{example} Consider an MCRS-group containing a Fibonacci operator $$ \left( \begin{array}{cc} 1& 1\\ 1& 0\\ \end{array} \right). $$ Fibonacci operator has two eigenlines $$ y=-\theta x \quad \hbox{and}\quad y=\theta^{-1}x, $$ where $\theta$ is the {\it golden ration} $\frac{1+\sqrt{5}}{2}$. So the Markoff-Davenport form of Fibonacci operator is $$ \frac{(y+\theta x)(y-\theta^{-1}x)}{\theta-\theta^{-1}}= \frac{1}{\sqrt{5}}(-x^2+xy+y^2). $$ \end{example} A Markoff-Davenport form is uniquely defined by an MCRS-group up to a sign, since the linear forms $L_i$ are uniquely defined by the MCRS-group up to multiplication by a scalar and permutations. By definition any MCRS-group contains a real operator with distinct roots, therefore all the coefficients of the Markoff-Davenport form are real. \begin{remark} The minima of the absolute values of such forms on the integer lattice were studied by A.~Markoff in~\cite{Mar} for two-dimensional case, and further by H.~Davenport in~\cite{Dav1}, \cite{Dav2}, and~\cite{Dav3} for three-dimensional totally real case. A few three-dimensional totally real examples were exhoustively studied by A.~D.~Bryuno, V.~I.~Parusnikov (see for instance in~\cite{BP}). The first steps in general multidimensional case were made in paper~\cite{SL3Z}. \end{remark} \subsection{Rational subgroups and their sizes} We start with the following definition. \begin{definition} An MCRS-group $\A$ is called {\it rational} if all its eigenspaces contain {\it Gaussian} vectors, i. e. vectors whose coordinates are of type $a+Ib$ for integers $a$ and $b$, where $I^2=-1$. Denote the set of all rational MCRS-groups of dimension $n$ by $\rat_n$. \end{definition} \begin{example}\label{ex2} The following two operators $$ \begin{array}{l} \left( \begin{array}{rr} 0& -1\\ 1&0\\ \end{array} \right) \quad \hbox{with eigenvectors $(I, 1)$ and $(-I, 1)$}, \\ \left( \begin{array}{ll} 1& 1\\ 4& 1\\ \end{array} \right) \quad \hbox{with eigenvectors $(1, 2)$ and $(1, -2)$} \end{array} $$ represents rational MCRS-groups (denote them by $\A_i$ and $\A_{ii}$) with real and complex conjugate eigen-directions. \end{example} For a complex vector $v=(a_1{+}Ib_1,\ldots, a_n{+}Ib_n)$ denote by $|v|$ the norm $$ \max\limits_{i=1,\ldots,n}\left(\sqrt{a_i^2+b_i^2}\right). $$ A Gaussian vector is said to be {\it primitive} if all its coordinates are relatively prime. Suppose that a complex one-dimensional space has Gaussian vectors, then the minimal value of the norm $|*|$ for the Gaussian vectors is attained at primitive Gaussian vectors. \begin{definition} Consider a rational MCRS-group $\A$. Let $l_1,\ldots, l_n$ be the eigenspaces of $\A$. The {\it size} of $\A$ is a real number $$ \max\limits_{i=1,\ldots,n}\big\{ |v_i| \big| \hbox{$v_i$ -- is a primitive Gaussian vector in $l_i$} \big\}, $$ we denote it by $\nu(\A)$. \end{definition} The sizes of operators in Example~\ref{ex2} are $1$ and $2$ respectively. \subsection{Discrepancy functional and approximation model} We are focused mostly on the following approximation problem: {\it how to approximate an MCRS-group by rational MCRS-groups $($or even by a certain subset of rational MCRS-groups$)$}? Let us first define a natural distance between MCRF-groups. Let $\A_1$ and $\A_2$ be two MCRS-groups. Consider the following two symmetric bilinear forms $$ \Phi_{\A_1}(v)+ \Phi_{\A_2}(v) \quad \hbox{and} \quad \Phi_{\A_1}(v)- \Phi_{\A_2}(v) $$ for vectors in $\r^n$. Take the maximal absolute values of the coefficients of these forms (separately). The minimal of these two maximal values we consider as a distance between $\A_1$ and $\A_2$, we call it {\it discrepancy} and denote by $\rho(\A_1,\A_2)$. Let us calculate the discrepancy between the MCRS-groups of Example~\ref{ex2}. We have $$ \big|\Phi_{\A_i}(v)\pm\Phi_{\A_{ii}}(v)\big|= \left|I\frac{x^2+y^2}{2} \pm\frac{y^2-4x^2}{4}\right| $$ therefore $ \rho(\A_i,\A_{ii})=\frac{\sqrt{3}}{2}$. \begin{definition} Let $\Omega \subset \rat_n$ for a fixed $n$. The problem of {\it best approximations} of an MCRS-group $\A$ by MCRS-groups in $\Omega$ is as follows. {\it For a given positive integer $N$ find a rational MCRS-group $\A_N$ in $\Omega$ with size not exceeding $N$ such that} $$ \rho(\A,\A_N)=\min\big\{\rho(\A,\A') \big|\A'\in \Omega, \nu(\A')\le N \big\}. $$ \end{definition} \remark{There are another important classes of MCRS-groups that contain matrices of $GL(n,\z)$ and $GL(n,\q)$ respectively. The MCRS-group is said to be {\it algebraic} if it contains regular operators of $GL(n,\z)$. It is natural to consider approximations of MCRS-groups by algebraic MCRS-groups, and approximations of algebraic MCRS-groups by rational MCRS-groups. } \section{Diophantine approximations and MCRS-group approximations}\label{classicsubsection} A classical problem of approximating real numbers by rational numbers is a particular case of the problem of best approximations of MCRS-groups. For a real $\alpha$ denote by $\A[\alpha]$ an MCRS-group of $GL(2,\r)$ defined by the two spaces $x=0$ and $y=\alpha x$. Consider any two MCRS-groups $\A[{\alpha_1}]$ and $\A[{\alpha_2}]$ with positive $\alpha_1$ and $\alpha_2$ and calculate a discrepancy between them. $$ \begin{array}{c} \displaystyle \Phi_{\A[{\alpha_1}]}-\Phi_{\A[{\alpha_2}]}= \frac{x(y-\alpha_1x)}{1}-\frac{x(y-\alpha_2x)}{1}= (\alpha_2-\alpha_1)x^2 \\ \displaystyle \Phi_{\A[\alpha_1]}+\Phi_{\A[\alpha_2]}= \frac{x(y-\alpha_1x)}{1}+\frac{x(y-\alpha_2x)}{1}= 2xy-(\alpha_2+\alpha_1)x^2 \end{array} $$ Since $\alpha_1>0$ and $\alpha_2>0$ we have $$ \rho(\A[\alpha_1],\A[\alpha_2])=|\alpha_1-\alpha_2|. $$ Denote by $\Omega_{[0,1]}^\q$ a subset of all $\A[\alpha]$ for rational $\alpha$ in the segment $[0,1]$. For any couple of relatively prime integers $(m,n)$ satisfying $0\le \frac{m}{n}\le 1$ we have $$ \nu\Big(\A\Big[\frac{m}{n}\Big]\Big)=n. $$ A classical problem of approximations of real numbers by rational numbers having bounded denominators in our terminology is as follows. \begin{theorem} Consider a real number $\alpha$, $0\le \alpha \le 1$. Let $[0,a_1,\ldots]$ $($or $[0,a_1,\ldots, a_k]$$)$ be an ordinary infinite $($finite$)$ continued fraction for $\alpha$. Then the set of best approximations consists of MCRS-groups $\A[m/n]$ for $m/n=[0,a_1,\ldots, a_{l-1},a_{l}]$ where $l=1,2,\ldots$ $($In case of finite continued fraction we additionally have $\A[m/n]$ for $m/n=[0,a_1,\ldots, a_{k-1},a_{k}{-}1]$$)$. \qed \end{theorem} \section{General approximations in two-dimensional case} In this section we prove estimates on the quality of best approximations for MCRS-groups whose eigen-directions are expressed by continued fractions with bounded denominators. We study separately the cases of hyperbolic and non-hyperbolic MCRS-groups. Especially we study geometric interpretation of the bounds in turms of geometric continued fractions for the algebraic hyperbolic MCRS-groups. \subsection{Hyperbolic case} An MCRS-group is called {\it hyperbolic} if it contains a hyperbolic operator (whose all eigenvalues are all real and pairwise distinct). \subsubsection{Lagrange estimates for a special case} In this subsection we prove an analog of Lagrange theorem on the approximation rate for an MCRS-groups that has eigenspaces defined by $y=\alpha_1 x$ and $y=\alpha_2 x$ with bounded elements of the continued fractions for $\alpha_1$ and $\alpha_2$. In particular this includes all algebraic MCRS-groups. Here we do not consider the case when one of the eigenspaces is $x=0$, this case was partially studied in Section~\ref{classicsubsection}. \begin{theorem}\label{Lag} Let $\alpha_1$ and $\alpha_2$ be real numbers having infinite continued fractions with bounded elements. Consider an MCRS-group $\A$ with eigenspaces $y=\alpha_1 x$ and $y=\alpha_2 x$. Then there exist positive constants $C_1$ and $C_2$ such that for any positive integer $N$ the best approximation $\A_N$ in $\Omega$ satisfies $$ \frac{C_1}{N^2}< \rho(\A,\A_N) < \frac{C_2}{N^2}. $$ \end{theorem} We will start the proof with the following two lemmas. Denote by $\A_{\delta_1, \delta_2}$ the MCRS-group defined by the lines $y=(\alpha_i+\delta_i) x$ for $i=1,2$. \begin{lemma}\label{lemma1} Consider a positive real number $\varepsilon_1$ such than $\varepsilon_1<1/|\alpha_1-\alpha_2|$. Suppose that $\rho(\A,\A_{\delta_1, \delta_2})<\varepsilon_1$ then $$ \begin{array}{l} |\delta_1|< \frac{(1+|\alpha_1|)(\alpha_1-\alpha_2)^2}{|\alpha_2|(1-\varepsilon_1|\alpha_1-\alpha_2|)}\varepsilon_1 \qquad \hbox{and} \qquad |\delta_2|< \frac{(1+|\alpha_2|)(\alpha_1-\alpha_2)^2}{|\alpha_1|(1-\varepsilon_1|\alpha_1-\alpha_2|)}\varepsilon_1. \end{array} $$ \end{lemma} \begin{proof} Let us remind that the Markoff-Davenport form of $\A_{\delta_1,\delta_2}$ is $$ \Phi{\A_{\delta_1, \delta_2}}(x,y)=\frac{\big(y-(\alpha_1 +\delta_1) x\big)\big(y-(\alpha_2+\delta_2) x\big)}{(\alpha_2+\delta_2)-(\alpha_1+\delta_1)}. $$ Consider the absolute values of the coefficients at $y^2$ and at $xy$ for the difference of Markoff-Davenport forms for the MCRS-groups $\A$ and $\A_{\delta_1,\delta_2}$. By the conditions of the lemma these coefficients are less then $\varepsilon_1$: $$ \left| \frac{\delta_2-\delta_1}{(\alpha_1-\alpha_2)(\alpha_1-\alpha_2+\delta_1-\delta_2)}\right|<\varepsilon_1 \quad \hbox{and} \quad \left| \frac{\alpha_1\delta_2-\alpha_2\delta_1}{(\alpha_1-\alpha_2)(\alpha_1-\alpha_2+\delta_1-\delta_2)}\right|<\varepsilon_1. $$ From the first inequality we have: $$ |\delta_1-\delta_2|<\frac{(\alpha_1-\alpha_2)^2}{1-\varepsilon_1|\alpha_1-\alpha_2|}\varepsilon_1. $$ The second inequality implies: $$ |\delta_1|<\frac{|(\alpha_1-\alpha_2)(\alpha_1-\alpha_2+\delta_1-\delta_2)|\varepsilon_1+ |\alpha_1(\delta_1-\delta_2)|}{|\alpha_2|}, $$ and therefore $$ |\delta_1|<\frac{|\alpha_1-\alpha_2|(|\alpha_1-\alpha_2|+\frac{(\alpha_1-\alpha_2)^2} {1-\varepsilon_1|\alpha_1-\alpha_2|}\varepsilon_1)\varepsilon_1+ |\alpha_1|\frac{(\alpha_1-\alpha_2)^2}{1-\varepsilon_1|\alpha_1-\alpha_2|}\varepsilon_1}{|\alpha_2|}= \frac{(1+|\alpha_1|)(\alpha_1-\alpha_2)^2}{|\alpha_2|(1-\varepsilon_1|\alpha_1-\alpha_2|)}\varepsilon_1. $$ The inequality for $\delta_2$ is obtained in the same way. \end{proof} \begin{lemma}\label{lemma2} Let $\varepsilon_2$ be a positive real number. Suppose $|\delta_1|<\varepsilon_2$ and $|\delta_2|<\varepsilon_2$, then $$ \rho(\A,\A_{\delta_1,\delta_2}) <\frac{\max\Big(2,2(|\alpha_1|+|\alpha_2|),\alpha_1^2{+}\alpha_2^2+|\alpha_1{-}\alpha_2|\varepsilon_2\Big)} {(|\alpha_1-\alpha_2|)(|\alpha_1-\alpha_2|+2\varepsilon_2)} \varepsilon_2. $$ \end{lemma} \begin{proof} The statement of lemma follows directly form the estimate of the coefficients for the difference of Markoff-Davenport forms for the MCRS-groups $\A$ and $\A_{\delta_1,\delta_2}$. \end{proof} {\it Proof of Theorem~\ref{Lag}.} Let us start with the first inequality. Let $\alpha_1=[a_0,a_1,\ldots]$, and $m_i/n_i=[a_0,a_1,\ldots, a_i]$. Without loss of generality we assume that $N>a_0$. Suppose $k$ is the maximal positive integer for which $m_k\le N$ and $n_k\le N$. Then we have $$ \begin{array}{c} \displaystyle \min\left(\left|\alpha_1-\frac{m}{n}\right|\bigg| |m|{\le} N, |n|{\le} N \right)\ge \left|\alpha_1-\frac{m_{k+1}}{n_{k+1}}\right|\ge \frac{1}{n_{k+1}(n_{k+1}+n_{k+2})}\ge\\ \displaystyle \frac{1}{(a_{k+1}+1)n_k\big((a_{k+1}+1)n_k+(a_{k+1}+1)(a_{k+2}+1)n_k\big)}\ge \frac{1}{(a_{k+1}+1)^2(a_{k+2}+2)}\cdot\frac{1}{N^2}. \end{array} $$ For the second and the third inequalities we refer to~\cite{Khin}. The same calculations are valid for $\alpha_2$. Hence we get $C_1$ from Lemma~\ref{lemma1}. Now we prove the second inequality. $$ \left|\alpha_1-\frac{m_k}{n_k}\right|<\frac{1}{n_kn_{k+1}}<\frac{a_{k+1}+1}{n^2_{k+1}}< \frac{(a_{k+1}+1)}{N^2}\max\big(1,(\alpha_1+1)^2\big). $$ The first inequality is classical and can be found in~\cite{Khin}. We take maximum in the last inequality for the case of $m_{k+1}>N$ and $n_{k+1}<N$. From conditions of the theorem the set of $a_i$'s is bounded. Therefore, there exists a constant $C'_{2,1}$ such that for any $N$ there exists an approximation of $\alpha_1$ of quality smaller than $C'_{2,1}/N^2$. The same holds for $\alpha_2$. Therefore, we can apply Lemma~\ref{lemma2} in order to obtain the constant $C_2$. \qed Let us say a few words about the case of unbounded elements of continued fractions for $\alpha_i$. Take any positive $\varepsilon$. If the elements of a continued fraction (say for $\alpha_1$) are growing fast enough than there exists a sequence $N_i$ for which the approximations $\A_{N_i}$ are of a quality $\frac{C}{(N_i)^1+\varepsilon}$. We show this in the following example. \begin{example} Let $M$ be a positive integer. Consider $\alpha_1=[a_0,a_1,\ldots]$, such that $a_0=1$, $a_n=(n_{k-1})^{M-1}$. Denote $\frac{m_k}{n_k}=[a_0,\ldots,a_k]$. Let $\alpha_2=0$. Take $N_k=\frac{n_k+n_{k+1}}{2}$. Then there exists a positive constant $C$ such that for any integer $i$ we have $$ \rho(\A,\A_{N_i})\ge \frac{C}{N_i^{1+1/M}}. $$ \end{example} \begin{proof} For any $i$ we have $$ n_{i+1}\ge a_in_i=n_i^{M-1}n_i=n_i^{M}. $$ Therefore, the best approximation with denominator and numerator less than $N_k$ is not better than $$ \left|\alpha_1-\frac{m_k}{n_k}\right|\ge \frac{1}{n_k(n_{k+1}+n_k)}\ge \frac{1}{n_{k+1}^M(n_{k+1}+n_k)}\ge \frac{2^{1+1/M}}{N_k^{1+1/M}}. $$ Now we apply Lemma~\ref{lemma1} to complete the proof. \end{proof} We suspect the existence of {\it badly approximable} MCRS-group $\A$ and a constant $C$ such that there are only finitely many solutions $N$ of the following equation $$ \rho(\A,\A_N)\le \frac{C}{N}, $$ like in the case of simultaneous approximations of vectors in $\r^3$ (see for instance in~\cite{Laga}). \subsubsection{Periodic sails and best approximations in algebraic case} Let us show one relation between classical geometry of numbers (for example see in~\cite{Arn2}) and best simultaneous approximations. First we recall the notion of sails. Consider an arbitrary cone $C$ in $\r^2$ with vertex at the origin and boundary rays $r_1$ and $r_2$. We also suppose that the angle between $r_1$ and $r_2$ is non-zero and less than $\pi$. Denote the set of all integer points in the closure of the cone except the origin by $I_{r_1,r_2}$. The {\it sail} of this cone is the boundary of the convex hull of $I_{r_1,r_2}$. It is homeomorphic to a line and contains rays in case of $r_i$ has an integer point distinct to the origin. \begin{definition} Define inductively the {$n$-sail} for the cone $C$. --- let {\it 1-sail} be the sail of $C$. --- suppose all $k$-sails for $k<k_0$ are defined then let {\it $k_0$-sail} be $$ \partial\Big(\conv\Big(I_{r_1,r_2}\setminus \bigcup\limits_{k=1}^{k_0-1} \hbox{$k$-sail} \Big)\Big), $$ where $\conv(M)$ denote the convex hull of $M$. \end{definition} The $k$-sails have the following interesting property. \begin{proposition} Consider a cone $C$. The $k$-sail of $C$ is homothetic to the $1$-sail of $C$ and the coefficient of homothety is $k$. \qed \end{proposition} Now consider an arbitrary MCRS-group. Let $l_1$ and $l_2$ be the two eigenlines for all the operators of MCRS-group. The union of all four $k$-sails for the cones defined by the lines $l_1$ and $l_2$ is a {\it $k$-geometric continued fraction} of the MCRS-group. Further we proceed with an algebraic case. So a hyperbolic MCRS-group $\A$ contains an $GL(2,\z)$-operator with distinct eigenvalues. In this case the mentioned operator acts on a $k$-geometric continued fraction (for any $k$) as a transitive shift. In addition the values of the function $$ \Phi_\A(m,n), \quad \hbox{for $m,n\in \z$,} $$ are contained in the set $\alpha \z$ where the value $\alpha$ is attained at some point of the 1-geometric continued fraction. The value $\alpha=\alpha(\A)$ is an essential characteristic of $\A$, it is sometimes called {\it Markoff minima} of the form $\Phi_\A$. \begin{lemma}\label{1234} Let an integer point $(m,n)$ be in the $k$-geometric continued fraction of $\A$. Then $$ |\Phi_\A(m,n)|\ge k\alpha. $$ \end{lemma} \begin{proof} We use induction. The statement clearly holds for $k=1$. Suppose the statement holds for $k=k_0$ let us prove it for $k=k_0+1$. From the step of induction we have the following: for any cone the convex hull of real points $|\Phi_\A(a,b)|= k_0\alpha$ contains the $k_0$-sail of the cone. From the other hand all integer points with $|\Phi_\A(m,n)|= k_0\alpha$ (if any) are on the boundary of this convex hull. Hence all of them are in $k_0$-sail, and thus they are not contained in $(k_0{+}1)$-sail. \end{proof} \begin{theorem}\label{sails} Let $\A$ be an algebraic MCRS-group. Then there exists a positive constants $C$ such that for any positive integer $N$ the following holds. Let the best approximation $\A_N\in \Omega$ be defined by primitive vectors $v_1$ and $v_2$ contained in $k_1$- and $k_2$-geometric continued fractions respectively, then $k_1,k_2<C$. \end{theorem} \begin{proof} By Lemma~\ref{1234} it is sufficient to prove that the set of values of $|\Phi_\A(v_i)|$ is bounded. Let $\A$ has eigenlines $y=\alpha_i x$, $i=1,2$. Notice that $$ |\Phi_\A(m,n)|=\left|\frac{(m-\alpha_1 n)(m-\alpha_2 n)}{\alpha_1-\alpha_2}\right|= \left|\frac{m}{n}-\alpha_1\right|\cdot\left|\frac{m-\alpha_2n}{\alpha_1-\alpha_2} n\right| $$ Let $v_1=(x_1,y_1)$. By Lemma~\ref{lemma1} (without loss of generality we suppose that $v_1$ corresponds to $\delta_1$ in the lemma) the first multiplicative is bounded by $\tilde C/N^2$ for some constant $\tilde C$ that does not depend on $N$. Hence, $$ |\Phi_\A(x_1,y_1)|\le \tilde C \left|\frac{y_1^2}{N^2}\cdot\frac{\frac{x_1}{y_1}-\alpha_2 }{\alpha_1-\alpha_2}\right|\le \tilde C \left|\frac{\frac{x_1}{y_1}-\alpha_2}{\alpha_1-\alpha_2}\right| $$ Finally, the last expression is uniformly bounded. The same holds for $v_2$. Therefore, the set of values of $|\Phi_\A(v_i)|$ is bounded. \end{proof} \begin{conjecture} We conjecture that for almost all $N$ the vectors $v_1$ and $v_2$ defining $\A_N$ are in $1$-geometric continued fraction. \end{conjecture} \subsubsection{Technique of calculation of best approximations in the hyperbolic case} In this subsection we show a general technique of calculation of best approximations for an arbitrary MCRS-group $\A$ with eigenspaces $y=\alpha_1 x$ and $y=\alpha_2 x$ for distinct real numbers $\alpha_1$ and $\alpha_2$. \begin{proposition}\label{lemma3} Let $m$ and $n$ be two integers. Suppose $|\alpha_1-\frac{m}{n}|<\varepsilon_3$ $($or $|\alpha_2-\frac{m}{n}|<\varepsilon_3$ respectively$)$, then the following holds: $$ \left|\alpha_1-\frac{m}{n}\right|> \frac{|\alpha_1-\alpha_2|}{|\alpha_1-\alpha_2|+\varepsilon_3}\frac{|\Phi_\A(m,n)|}{n^2} \quad \left( \left|\alpha_2-\frac{m}{n}\right|>\frac{1}{|\alpha_1-\alpha_2|+\varepsilon_3} \frac{|\Phi_\A(m,n)|}{n^2} \right). $$ \end{proposition} \begin{proof} We have $$ \begin{array}{l} \left|\alpha_1-\frac{m}{n}\right|=\frac{1}{n}|m-\alpha_1 n|=\frac{1}{n} \frac{|m-\alpha_1 n|(m-\alpha_2 n)}{m-\alpha_2 n} =\frac{|\Phi_\A(m,n)|}{n^2}\frac{|\alpha_1-\alpha_2|}{|\alpha_1-\alpha_2+(\frac{m}{n}-\alpha_1)|}> \frac{|\alpha_1-\alpha_2|}{|\alpha_1-\alpha_2|+\varepsilon_3}\frac{|\Phi_\A(m,n)|}{n^2}. \end{array} $$ The same holds for the case of the approximations of $\alpha_2$. \end{proof} {\bf Procedure of best approximation calculation}. {\bf 1).} Find best Diophantine approximations of $\alpha_1$ and $\alpha_2$ using continued fractions in the square $N\times N$. Suppose for $\alpha_i$ it is $m_i/n_i$, and the following best approximation is $m'_i/n'_i$. {\bf 2).} Consider now the MCRS-group $\overline\A$ with invariant lines $y=\frac{m_i}{n_i}x$. By Lemma~\ref{lemma2} we get an upper bound for $\rho(\A,\overline\A)$ (where $\varepsilon_2=\max(1/(n_1n'_1),1/(n_2n'_2))$). {\bf 3).} Now having the estimate for discrepancy we use Lemma~\ref{lemma1} to get estimates $C_1$ and $C_2$ for $\big|\alpha_1-\frac{p_1}{q_1}\big|$ and $\big|\alpha_2-\frac{p_2}{q_2} \big|$ for the best approximation of $\A$ with rays $y=\frac{p_1}{q_1}x$ and $y=\frac{p_2}{q_2}x$. {\bf 4).} By Proposition~\ref{lemma3} we write an estimate for $\frac{\Phi_\A(p_i,q_i)}{q_i^2}$ for $i=1,2$. {\bf 5).} Finally we compare the discrepancies for all MCRS-groups that satisfies the estimates for $\frac{\Phi_\A(k_i,l_i)}{l_i^2}$ obtained in 4). \begin{example} Consider an MCRS-group containing Fibonacci matrix: $$ \left( \begin{array}{cc} 0 & 1 \\ 1 & 1 \\ \end{array} \right ). $$ Denote by $F_n$ the $n$-th Fibonacci number. Consider any integer $N\ge 100$. {\bf 1).} Consider a positive integer $k$ such that $F_k\le N<F_{k+1}$ and choose an approximation $\overline\A$ with eigenspaces $F_{k-1}y-F_kx=0$ and $F_ky+F_{k-1}x=0$. Then $$ \left|\alpha_1-\frac{F_k}{F_{k-1}}\right|\le 1/(F_{k-1}F_k), \quad \left|\alpha_1+\frac{F_{k-1}}{F_k}\right|\le 1/(F_{k}F_{k+1}) $$ {\bf 2).} So, $\varepsilon_2=1/(F_{k-1}F_k)<1/(55\cdot 89)$. Therefore, $$ \rho(\A_,\A_{\delta_1,\delta_2}) <\frac{\max\Big(2,2\sqrt{5},3+\sqrt{5}/4895\Big)} {5+\frac{2\sqrt{5}}{4895}} \frac{1}{F_{k-1}F_k}<\frac{2\sqrt{5}} {5+\frac{2\sqrt{5}}{4895}}\frac{(89/55)^3}{N^2}<\frac{3.79}{N^2}. $$ {\bf 3).} Hence, by Lemma~\ref{lemma1} we get ($\varepsilon_1<3.79/100^2$): $$ \begin{array}{l} |\delta_1|< \frac{80.35}{N^2} \qquad \hbox{and} \qquad |\delta_2|< \frac{18.97}{N^2}. \end{array} $$ {\bf 4).} The estimates for $\frac{\Phi_\A(p_1,q_1)}{q_1^2}$ and $\frac{\Phi_\A(p_2,q_2)}{q_2^2}$ for the corresponding rays of best approximation are as follows. $$ \frac{|\Phi_\A(m_1,n_1)|}{n_1^2}<\frac{80.65}{N^2}, \quad \frac{|\Phi_\A(m_2,n_2)|}{n_2^2}<\frac{18.99}{N^2}. $$ {\bf 5).} Notice that the number of approximations whose discrepancies we compare in this step is bounded by some constant not depending on $N$. We have completed the computations for $N=10^6$, the answer in this case is the matrix with eigenspaces: $F_{29}y-F_{30}x=0$ and $F_{30}y+F_{29}x=0$. We conjecture that for the Fibonacci matrix we always get the best approximation with eigenspaces $F_{k-1}y-F_kx=0$ and $F_ky+F_{k-1}x=0$. \end{example} We conclude this subsection with an example showing that the continued fractions do not always give best approximations. \begin{example}\label{antisail} Consider an operator $A$ with eigenvectors: $$ v_1=(1,2) \qquad \hbox{and} \qquad v_2=(2,3), $$ and the corresponding maximal subgroup $\A$. Then there are four different best approximations of size 1, they have invariant lines defined by the following couples of vectors: $$ \begin{array}{c} \Big(w_1=(1,0), w_2=(1,1)\Big), \quad \Big(w_1=(1,0), w_2=(1,-1)\Big), \\ \Big(w_1=(1,0), w_2=(0,1)\Big), \quad \hbox{and} \quad \Big(w_1=(0,1), w_2=(1,1)\Big). \end{array} $$ (the discrepancy between $\A$ and any of them equals $6$). The continued fraction (or the union of sails) of $A$ contains only four integer points $$ (1,2), \quad (2,3), \quad (-1,-2), \quad \hbox{and} \quad (-2,-3). $$ Therefore the invariant lines of all four best approximations do not contain vectors of the sail of $A$. \end{example} \begin{remark} Actually, for a generic MCRS-group the best approximation of any size $N>0$ is unique. In the previous example we have four best approximations since we are approximating MCRS-group defined by vectors with integer coefficients. \end{remark} \subsection{Non-hyperbolic case} Now we prove similar statements for the complex case. \subsubsection{Lagrange estimates for a special case} In this subsection we prove an analog of Lagrange theorem on the approximation rate for an MCRS-groups that has complex conjugate eigenspaces defined by $y=(\alpha+I\beta) x$ and $y=(\alpha-I\beta) x$ with bounded elements of the continued fractions for $\alpha$ and $\beta$. In particular this includes all complex algebraic MCRS-groups. \begin{theorem}\label{Lag2} Let $\alpha$ and $\beta$ be real numbers having infinite continued fractions with bounded elements. Consider an MCRS-group $\A$ with eigenspaces $y=(\alpha+I\beta) x$ and $y=(\alpha-I\beta) x$. Then there exist positive constants $C_1$ and $C_2$ such that for any positive integer $N$ the best approximation $\A_N$ in $\Omega$ satisfies $$ \frac{C_1}{N^2}< \rho(\A,\A_N) < \frac{C_2}{N^2}. $$ \end{theorem} We will start the proof with the following two lemmas. Denote by $\A_{\delta_1, \delta_2}$ the MCRS-group defined by the lines $y=\big((\alpha +\delta_1)\pm I(\beta+\delta_2)\big) x$ for $i=1,2$. \begin{lemma}\label{lemma2_1} Consider a positive real number $\varepsilon_1$ such than $\varepsilon_1<\frac{1}{2(1+|\beta|)}$. Suppose that $\rho(\A,\A_{\delta_1, \delta_2})<\varepsilon_1$ then $$ \begin{array}{l} |\delta_1|< \frac{2|\alpha-\beta|\beta^2}{|\alpha -\beta|-2\varepsilon_1|\beta|(1+|\beta|)}\varepsilon_1 \qquad \hbox{and} \qquad |\delta_2|< \frac{2(1+|\beta|+|\alpha-\beta|)\beta^2}{|\alpha -\beta|-2\varepsilon_1|\beta|(1+|\beta|)}\varepsilon_1. \end{array} $$ \end{lemma} \begin{proof} Consider the absolute values of the coefficients at $y^2$ and at $xy$ for the difference of Markoff-Davenport forms for the MCRS-groups $\A$ and $\A_{\delta_1,\delta_2}$. By the conditions of the lemma these coefficients are less then $\varepsilon_1$: $$ \left| \frac{\delta_2-\delta_1}{2\beta(\beta+\delta_2)}\right|<\varepsilon_1 \quad \hbox{and} \quad \left| \frac{\alpha\delta_2-\beta\delta_1}{2\beta(\beta+\delta_2)}\right|<\varepsilon_1. $$ Hence we have $$ \left|\frac{(\alpha-\beta)\delta_2}{2\beta(\beta+\delta_2)}\right| \le +\left| \frac{\alpha\delta_2-\beta\delta_1}{2\beta(\beta+\delta_2)}\right| +|\beta|\left|\frac{\delta_2-\delta_1}{2\beta(\beta+\delta_2)}\right| <(1+|\beta|)\varepsilon_1. $$ This gives us the estimate for $\delta_2$. For $\delta_1$ we have $$ \begin{array}{l} |\delta_1|< 2|\beta|\left||\beta|+\frac{2(1+|\beta|)\beta^2}{|\alpha -\beta|-2\varepsilon_1|\beta|(1+|\beta|)}\varepsilon_1\right| \varepsilon_1+ \frac{2(1+|\beta|)\beta^2}{|\alpha -\beta|-2\varepsilon_1|\beta|(1+|\beta|)}\varepsilon_1 = \frac{2(1+|\beta|+|\alpha-\beta|)\beta^2}{|\alpha -\beta|-2\varepsilon_1|\beta|(1+|\beta|)}\varepsilon_1. \end{array} $$ The proof is completed. \end{proof} \begin{lemma}\label{lemma2_2} Let $\varepsilon_2$ be a positive real number. Suppose $|\delta_1|<\varepsilon_2$ and $|\delta_2|<\varepsilon_2$, then $$ \rho(\A,\A_{\delta_1,\delta_2}) <\frac{\max\Big(2,2(|\alpha|+|\beta|),|\alpha^2{-}\beta^2|+2|\alpha\beta|+2|\beta|\varepsilon_2\Big)} {|\beta|(|\beta|+\varepsilon_2)} \varepsilon_2. $$ \end{lemma} \begin{proof} The statement of lemma follows directly form the estimate of the coefficients for the difference of Markoff-Davenport forms for the MCRS-groups $\A$ and $\A_{\delta_1,\delta_2}$. \end{proof} {\it Proof of Theorem~\ref{Lag2}.} The remaining part of the proof almost completely repeats the end of the proof of Theorem~\ref{Lag}, so we omit it here. \qed \subsubsection{Technique of calculation of best approximations in the hyperbolic case} Here we show a general technique of calculation of best approximations for an arbitrary MCRS-group $\A$ with eigenspaces $y=(\alpha\pm I\beta)x$ for real number $\alpha$ and positive real $\beta$. \begin{proposition}\label{lemma2_3} Let $a$ satisfy $|\alpha+I\beta|<\varepsilon_3$, then the following holds: $$ \left|(\alpha+I\beta)-a\right|> \frac{2\beta|\Phi_\A(1,a)|}{2\beta+\varepsilon_3}. $$ \end{proposition} \begin{proof} We have $ \begin{array}{l} \left|(\alpha+I\beta)-a\right|= \frac{\left|(\alpha+I\beta)-a\right|((\alpha-I\beta)-a)}{(\alpha-I\beta)-a}= \frac{2\beta|\Phi_\A(1,a)|}{|((\alpha+I\beta)-a)-2I\beta|}> \frac{2\beta|\Phi_\A(1,a)|}{2\beta+\varepsilon_3}. \end{array} $ \end{proof} {\bf Procedure of best approximation calculation}. {\bf 1).} Find best Diophantine approximations of $\alpha$ and $\beta$ using continued fractions in the square $N\times N$. Suppose for $\alpha$ and $\beta$ it are $m_1/n_1$, and $m_2/n_2$, and the next best approximation are $m'_1/n'_1$, and $m'_2/n'_2$. {\bf 2).} Consider the MCRS-group $\overline\A$ with invariant lines $y=\big(\frac{m_1}{n_1}\pm I\frac{m_2}{n_2}\big)x$. By Lemma~\ref{lemma2_2} we get an upper bound for $\rho(\A,\overline\A)$ (where $\varepsilon_2=\max(1/(n_1n'_1),1/(n_2n'_2))$). {\bf 3).} Now having the estimate on discrepancy we use Lemma~\ref{lemma2_1} to get estimates $C_1$ and $C_2$ for the best approximation of $\A$: $\big|\alpha-\frac{p_1}{q_1}\big|$ and $\big|\beta-\frac{p_2}{q_2}\big|$ respectively. {\bf 4).} By Proposition~\ref{lemma2_3} we write an estimate for $\big|\Phi_\A\big(1,\frac{p_1}{q_1}+I\frac{p_2}{q_2}\big)\big|$. {\bf 5).} Finally we compare the discrepancies for all MCRS-groups that satisfies the estimates obtained in 4). \section{Simultaneous approximations in $\r^3$ and MCRS-group approximations} Theory of simultaneous approximation of a real vector by vectors with rational coefficients can be considered as a special case of MCRS-group approximations similarly to the Diophantine case. In this section we study several examples of simultaneous approximations in frames of MCRS-group approximations. The first example is an eigen-direction of a hyperbolic operator (see in Subsection~3.2) and the second is an eigen-direction of a nonhyperbolic operator (see in Subsection~3.3). \subsection{General construction} Let $[a,b,c]$ be a vector in $\r^3$. Consider the maximal commutative subgroup $\A[a,b,c]$ defined by three vectors $$ (a,b,c), \quad (0,1,I), \quad (0,1,-I). $$ The problem of approximation here is in approximation of the subgroup $\A[a,b,c]$ by $\A[a',b',c']$ for integer vectors $(a',b',c')$. For this case we have: $$ \Phi_{\A[a,b,c]}(x,y,z)=I\left( -\frac{b^2+c^2}{2a^2}x^3+\frac{b}{a}x^2y+\frac{c}{a}x^2z-\frac{1}{2} xy^2-\frac{1}{2}xz^2 \right). $$ Therefore, $$ \begin{array}{r} \rho\big(\A[a,b,c],\A[a',b',c']\big)=\min\left(\max\left(\left|\frac{b}{a}-\frac{b'}{a'}\right|, \left|\frac{c}{a}-\frac{c'}{a'}\right|, \left|\frac{b^2+c^2}{2a^2}-\frac{{b'}^2+{c'}^2}{2{a'}^2}\right|\right), \right.\\ \left. \max \left(\left|\frac{b}{a}+\frac{b'}{a'}\right|, \left|\frac{c}{a}+\frac{c'}{a'}\right|, \left|\frac{b^2+c^2}{2a^2}+\frac{{b'}^2+{c'}^2}{2{a'}^2}\right|\right) \right). \end{array} $$ \subsection{A ray of non-hyperbolic operator} Consider the non-hyperbolic algebraic ope\-rator $$ B= \left( \begin{array}{ccc} 0&1&1\\ 0&0&1\\ 1&0&0\\ \end{array} \right). $$ This operator is in some sense the simplest non-hyperbolic operator we can have (see for more information~\cite{SL3Z}). Denote the eigenvalues of $E_1$ by $\xi_1$, $\xi_2$, and $\xi_3$ such that $\xi_1$ is real, $\xi_2$ and $\xi_3$ are complex conjugate. Notice also that $$ |\xi_1|>|\xi_2|=|\xi_3|. $$ We approximate the eigenspace corresponding to $\xi_1$. Let $v_{\xi_1}$ be the vector in this eigenspace having the first coordinate equal to 1. Note that $$ \xi_1\approx 1.3247179573 \quad \hbox{and} \quad v_{\xi_1}\approx(1, .5698402911, .7548776662). $$ The set of best approximations $\A_N$ with $N\le 10^6$ contains of 48 elements. These elements are of type $B^{n_i}(1,0,0)$ where $n_1=4$, and for $2\le i \le 48$ we have $n_i=i+4$. We conjecture that all the set of best approximations coincide with the set of points $B^{k}(1,0,0)$ where $k=4$, or $k\ge 6$, the approximation rate in this case is $CN^{-3/2}$. \subsection{Two-dimensional golden ratio} Let us consider an algebraic operator $$ G= \left( \begin{array}{ccc} 3&2&1\\ 2&2&1\\ 1&1&1\\ \end{array} \right). $$ This operator is usually called {\it two-dimensional golden ratio}. It is the simplest hyperbolic operator from many points of view, his two-dimensional continued fraction in the sense of Klein was studied in details by E.~I.~Korkina in~\cite{Kor2} and~\cite{Kor3}. The group of all integer operators of $GL(3,\z)$ commuting with $G$ is generated by the following two operators: $$ E_1= \left( \begin{array}{ccc} 1&1&1\\ 1&1&0\\ 1&0&0\\ \end{array} \right) \quad \hbox{and} \quad E_2= \left( \begin{array}{ccc} 0&1&1\\ 1&0&0\\ 1&0&-1\\ \end{array} \right). $$ Note that $G=E_1^2$ and $E_2=(E_1-Id)^{-1}$, where $Id$ is an identity operator. Operator $E_1$ is a {\it three-dimensional Fibonacci operator}. Denote the eigenvalues of $E_1$ by $\xi_1$, $\xi_2$, and $\xi_3$ in such a way that the following holds: $$ |\xi_1|>|\xi_2|>|\xi_3|. $$ Let us approximate the eigenspace corresponding to $\xi_1$. Denote by $v_{\xi_1}$ the vector of this eigenspace having the last coordinate equal to 1. Note that $$ \xi_1\approx 2.2469796037 \quad \hbox{and} \quad v_{\xi_1}\approx(2.2469796037, 1.8019377358, 1). $$ The set of best approximations $\A_N$ with $N\le 10^6$ contains 40 elements. These elements are in the set $$ \Big\{E_1^mE_2^n(1,0,0)\Big|m,n\in\z\Big\}. $$ All the points of the sequence can be found from the next table. In the column $c$ we get $m=m_c$, $n=n_c$ for the approximation $E_1^{m_c}E_2^{n_c}(1,0,0)$. \begin{center} \begin{tabular}{|l||c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|} \hline {\bf $i$} & 1 & 2 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11 & 12 & 13 & 14 & 15 & 16 & 17 & 18 & 19 & 20 & 21 & 22 \\ \hline \hline {\bf m}& 1 & 2 & 3 & 3 & 4 & 4 & 5 & 5 & 6 & 6 & 6 & 7 & 7 & 8 & 8 & 9 & 9 & 10 & 10 & 11 & 11 \\ \hline {\bf n}& 1 & 1 & 2 & 1 & 2 & 1 & 3 & 2 & 3 & 2 & 1 & 3 & 2 & 3 & 2 & 4 & 3 & 4 & 3 & 5 & 4 \\ \hline \end{tabular} \begin{tabular}{|l||c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|} \hline {\bf $i$} & 23 & 24 & 25 & 26 & 27 & 28 & 29 & 30 & 31 & 32 & 33 & 34 & 35 & 36 & 37 & 38 & 39 & 40 & 41 \\ \hline \hline {\bf m}& 11 & 12 & 12 & 13 & 13 & 14 & 14 & 15 & 15 & 15 & 16 & 16 & 17 & 17 & 18 & 18 & 19 & 19 & 19 \\ \hline {\bf n} & 3 & 4 & 3 & 5 & 4 & 5 & 4 & 6 & 5 & 4 & 5 & 4 & 6 & 5 & 6 & 5 & 7 & 6 & 5 \\ \hline \end{tabular} \end{center} In addition to this table we have $\A_3=(3,2,1)$ as best approximation. We conjecture that all the set of best approximations except $\A_3$ is contained in the set of all points of type $E_1^{m}E_2^{n}(1,0,0)$, the approximation rate in this case is $CN^{-3/2}$. \end{document}
\begin{document} \title{Fault-Tolerant Modular Reconstruction of Rational Numbers} \begin{abstract} In this paper we present two efficient methods for reconstructing a rational number from several residue-modulus pairs, some of which may be incorrect. One method is a natural generalization of that presented by Wang, Guy and Davenport in~\cite{WGD1982} (for reconstructing a rational number from \textit{correct} modular images), and also of an algorithm presented by Abbott in~\cite{Abb1991} for reconstructing an \textit{integer} value from several residue-modulus pairs, some of which may be incorrect. We compare our heuristic method with that of B\"ohm, Decker, Fieker and Pfister~\cite{BDFP2012}. \end{abstract} \noindent \textbf{Keywords:} fault-tolerant rational reconstruction, chinese remaindering \section{Introduction} The problem of intermediate expression swell is well-known in computer algebra, but has been greatly mitigated in many cases by the use of modular methods. There are two principal techniques: those based on the \textit{Chinese Remainder Theorem}, and those based on \textit{Hensel's Lemma}. In this paper we consider only the former approach. Initially modular methods were used in cases where integer values were sought (\eg~for computing GCDs of polynomials with integer coefficients); the answer was obtained by a direct application of the Chinese Remainder Theorem. Then in 1981 Wang presented a method allowing the reconstruction of \textit{rational numbers}~\cite{Wan1981} from their modular images: the original context was the computation of partial fraction decompositions. Wang's idea was justified in a later paper~\cite{WGD1982} which isolated the rational number reconstruction algorithm from the earlier paper. More recently, Collins and Encarnaci\'on~\cite{CoEn1994} corrected a mistake in Wang's paper, and described how to obtain an especially efficient implementation. Wang's method presupposes that all residue-modulus pairs are correct; consequently, the moduli used must all be coprime to the denominator of the rational to be reconstructed. A well-known problem of modular methods is that of \textbf{bad reduction}: this means that the modular result is not correct for some reason. Sometimes it will be obvious when the modular result is bad (and these can be discarded), but other times it can be hard to tell. The \textit{Continued Fraction Method} for the fault-tolerant reconstruction of \textit{integer values} when some of the modular images may be bad was presented in~\cite{Abb1991}. In this paper we consider the problem of reconstructing a rational number from its modular images allowing for some of the modular images to be erroneous. We combine the corrected version of Wang's algorithm with the \textit{Continued Fraction Method}. Our resulting new \textit{FTRR} Algorithm (see section~\ref{FTRR}) reconstructs rational numbers from several modular images allowing some of them to be bad. The \textit{FTRR} Algorithm contains both old methods as special cases: when it is known that all residues are correct we obtain Wang's method (as corrected in~\cite{CoEn1994}), and if the denominator is restricted to being~$1$ then we obtain the original \textit{Continued Fraction Method}. Finally, we note that the correction highlighted in~\cite{CoEn1994} is a natural and integral part of our method. Our \textit{FTRR} Algorithm gives a strong guarantee on its result: if a suitable rational exists then it is unique and the algorithm will find it; conversely if no valid rational exists then the algorithm says so. However, the uniqueness depends on bounds which must be given in input, including an upper bound for the number of incorrect residues. Since this information is often not known in advance, we present also the \textit{HRR} Algorithm (see section~\ref{HRR})~---~it is a heuristic reconstruction technique based on the sample principles as \textit{FTRR}. This heuristic variant is much simpler to apply since it requires only the residue-modulus pairs as input. It will find the correct rational provided the correct modular images sufficiently outnumber the incorrect ones; if this is not the case then \textit{HRR} will usually return an \textit{indication of failure} but it may sometimes reconstruct an incorrect rational. In section~\ref{comparison} we briefly compare our \textit{HRR} algorithm with the \textit{Error Tolerant Lifting} Algorithm presented in~\cite{BDFP2012} which is based on lattice reduction, and which serves much the same purpose as \textit{HRR}. We mention also some \textit{combinatorial} reconstruction schemes (presented in~\cite{Abb1991}) which can be readily adapted to perform fault tolerant rational reconstruction. \subsection{Envisaged Setting} \label{envisaged-setting} We envisage the computation of one or more rational numbers (\eg~coefficients of a polynomial) by \textit{chinese remainder style} modular computations where not all cases of bad reduction can be detected. If we know upper bounds for numerator and denominator, and also for the number of bad residue-modulus pairs then we can apply the \textit{FTRR} algorithm of section~\ref{FTRR}. Otherwise we apply the \textit{HRR} algorithm of section~\ref{HRR}. Naturally, in either case we require that the bad residue-modulus pairs are not too common. When using \textit{FTRR} we use the sufficient precondition (inequality~(\ref{FTRR-precondition})) to decide whether more residue-modulus pairs are needed; when we have enough pairs we simply apply the reconstruction algorithm to obtain the answer. When using \textit{HRR}, we envisage that the computation is organized as follows. Many modular computations are made iteratively, and every so often an attempt is made to reconstruct the sought after rational number(s). If the attempt fails, further iterations are made. If the attempt succeeds then a check is made of the ``convincing correctness'' of the reconstructed rational (see step~(4) of Algorithm \textit{HRR}); if the rational is not ``convincing'' then again further iterations are made. The perfect reconstruction algorithm would require only the minimum number of residue-modulus pairs (thus not wasting ``redundant'' iterations), and never reconstructs an incorrect rational (thus not wasting time checking ``false positives''). Our \textit{HRR} algorithm comes close to having both characteristics. \section{Notation and Assumptions} We are trying to reconstruct a rational number, $p/q$, from many residue-modulus pairs: $x_i \bmod m_i$ for $i=1,2,\ldots,s$. For each index~$i$ satisfying $q x_i \equiv p \bmod{m_i}$ we say that $x_i$ is a \textbf{good residue} and $m_i$ is a \textbf{good modulus}; otherwise, if the equivalence does not hold, we call them a \textbf{bad residue} and a \textbf{bad modulus}. For simplicity, we assume that the moduli $m_i$ are pairwise coprime: this assumption should be valid in almost all applications. For clarity of presentation, it will be convenient to suppose that the moduli are labelled in increasing order so that $m_1 < m_2 < \cdots < m_s$. For our algorithms to work well it is best if the moduli are all of roughly similar size; otherwise, in an extreme situation where there is one modulus which is larger than the product of all the other moduli, reconstruction cannot succeed if that one large modulus is bad. We say that a rational $p/q$ is \textbf{normalized} if $q > 0$ and $\gcd(p,q)=1$. \subsection{Continued Fractions} Here we recall a few facts about continued fractions; proofs and further properties may be found in~\cite{HW1979}, for instance. Let $x \in {\mathbb R}$; then~$x$ has a unique representation as a \textbf{continued fraction}: \begin{equation} x = [ a_0, a_1, a_2, \ldots ] = a_0 + \frac{1}{a_1+\frac{1}{a_2+\frac{1}{\ldots}}} \end{equation} where all $a_j \in {\mathbb Z}$; for $j > 0$ the integers $a_j$ are positive, and are called \textbf{partial quotients}. If $x \in {\mathbb Q}$ then there are only finitely many partial quotients; otherwise there are infinitely many. We define the $k$-th \textbf{continued fraction approximant to~$x$} to be the rational $r_k/s_k$ whose continued fraction is $[a_0, a_1, \ldots, a_k]$. These \textit{approximants} give ever closer approximations to $x$, that is the sequence $|x - r_k/s_k|$ is strictly decreasing. We also have that: \begin{equation} \label{cfa-num-den-growth} \begin{array}{l} \phantom{\sum} a_k \, r_{k-1} \quad \le \quad r_k \quad < \quad (a_k+1)\, r_{k-1} \\ \phantom{\sum} a_k \, s_{k-1} \quad \le \quad s_k \quad < \quad (a_k+1)\, s_{k-1} \end{array} \end{equation} We recall here Theorem~184 from~\cite{HW1979} which will play a crucial role. \begin{theorem} Let $x \in {\mathbb R}$ and $\frac{r}{s} \in {\mathbb Q}$. If $|x - \frac{r}{s}| < \frac{1}{2s^2}$ then $\frac{r}{s}$ appears as a continued fraction approximant to~$x$. \end{theorem} \section{Main Proposition} Our main proposition provides the key to reconstructing a rational from a \textit{single} residue-modulus pair, $X \bmod M$. \begin{theorem} \label{main-thm} Let $X \bmod M$ be a residue-modulus pair; thus $X,M \in {\mathbb Z}$ with $M \ge 2$. Let $P, Q \in {\mathbb N}$ be positive bounds for numerator and denominator respectively. Suppose there exists a factorization $M = M_{\mathrm{good}} \, M_{\mathrm{bad}} \in {\mathbb N}$ such that $2 P Q M_{\mathrm{bad}}^2 < M$, and suppose also that there exists a rational $p/q \in {\mathbb Q}$ with $|p| \le P$ and $1 \le q \le Q$ which satisfies $p \equiv qX \bmod M_{\mathrm{good}}$. Then $\frac{p}{q}$ is unique, and is given by $$\frac{p}{q} = X - M \cdot \frac{R}{S}$$ where $\frac{R}{S}$ is the last continued fraction approximant to $\frac{X}{M}$ with denominator $\le Q M_{\mathrm{bad}}$; moreover, the next approximant has denominator $> M_{\mathrm{good}}/2 |p|$. \end{theorem} \begin{proof} By hypothesis we have $p = qX - k M_{\mathrm{good}}$ for some $k \in {\mathbb Z}$. Dividing by $qM$ we obtain: \begin{equation}\label{eq1} \frac{p}{qM} = \frac{X}{M} - \frac{k}{q M_{\mathrm{bad}}} \end{equation} We shall write $\frac{R}{S}$ for the normalized form of $\frac{k}{q M_{\mathrm{bad}}}$; thus $\gcd(R,S)=1$ and $0 < S \le q M_{\mathrm{bad}} \le Q M_{\mathrm{bad}}$. The condition $2 P Q M_{\mathrm{bad}}^2 < M$ implies that $|p| < \frac{M_{\mathrm{good}}}{2 Q M_{\mathrm{bad}}}$. We use this to estimate how well $\frac{R}{S}$ approximates $\frac{X}{M}$: $$ \left| \frac{X}{M} - \frac{R}{S} \right| \quad=\quad \frac{|p|}{qM} \quad<\quad \frac{1}{2 q Q M_{\mathrm{bad}}^2} \quad\le\quad \frac{1}{2S^2} $$ Applying Theorem~184 from~\cite{HW1979} we see that $\frac{R}{S}$ is indeed one of the continued fraction approximants for $\frac{X}{M}$. Next we show that $\frac{R}{S}$ is the \textit{last} approximant with denominator $\le Q M_{\mathrm{bad}}$. We start by showing that if $\frac{r}{s}$ is any rational number with $1 \le s \le \frac{M_{\mathrm{good}}}{2 |p|}$ and different from $\frac{R}{S}$ then $\left| \frac{X}{M} - \frac{r}{s} \right| \ge \left| \frac{X}{M} - \frac{R}{S} \right|$. First note that: $$ \left| \frac{r}{s} - \frac{R}{S} \right| \quad\ge\quad \frac{1}{sS} \quad\ge\quad \frac{2 |p|}{M_{\mathrm{good}}} \cdot \frac{1}{q M_{\mathrm{bad}}} \quad=\quad \frac{2 |p|}{q M} $$ Whence $\left| \frac{X}{M} - \frac{r}{s} \right| \ge \left| \frac{r}{s} - \frac{R}{S} \right| - \left| \frac{R}{S} - \frac{X}{M} \right| \ge \frac{|p|}{q M} = \left| \frac{X}{M} - \frac{R}{S} \right|$. Therefore, any approximant coming after $\frac{R}{S}$, and hence closer to $\frac{X}{M}$, must have denominator $> M_{\mathrm{good}}/2 |p|$. The claim that $\frac{p}{q} = X-M \cdot \frac{R}{S}$ follows immediately from equation~(\ref{eq1}). \end{proof} \begin{corollary} \label{largest-partial-quotient} Let $j$ be the index of the approximant $\frac{R}{S}$ in Theorem~\ref{main-thm}. Let $M_{\mathrm{good}}^* = \gcd(p - qX, M)$, and $M_{\mathrm{bad}}^* = \frac{M}{M_{\mathrm{good}}^*}$. Let $Q_{\mathrm{max}}$ be the greatest integer strictly less than $\frac{M_{\mathrm{good}}^*}{2 |p| M_{\mathrm{bad}}^*}$. Then the $(j+1)$-th partial quotient is at least $\frac{Q_{\mathrm{max}}}{q}-1$. If $M_{\mathrm{good}}^* \ge 2 |p| q M_{\mathrm{bad}}^* \bigl(\max(2 |p|,q) M_{\mathrm{bad}}^* + 2 \bigr)$ then the $(j+1)$-th partial quotient is the largest of all. \end{corollary} \begin{proof} Observe that $M_{\mathrm{good}}^* \ge M_{\mathrm{good}}$ and $M_{\mathrm{bad}}^* \le M_{\mathrm{bad}}$ regardless of the original factorization $M = M_{\mathrm{good}} M_{\mathrm{bad}}$ used in the theorem. By applying the theorem with $P = |p|$ and $Q = q$, and using the factorization $M=M_{\mathrm{good}}^* M_{\mathrm{bad}}^*$ we see that $S \le q M_{\mathrm{bad}}^*$; furthermore the $(j+1)$-th approximant has denominator greater than $M_{\mathrm{good}}^*/2 |p| > Q_{\mathrm{max}} M_{\mathrm{bad}}^*$. Thus by the final inequality of formula~(\ref{cfa-num-den-growth}) the $(j+1)$-th partial quotient must be at least $\frac{Q_{\mathrm{max}}}{q} -1$. Since~$S$, the denominator of the $j$-th approximant, is at most $q M_{\mathrm{bad}}^*$ no partial quotient with index less than or equal to~$j$ can exceed $q M_{\mathrm{bad}}^*$. Also, since the denominator of the $(j+1)$-th approximant is greater than $M_{\mathrm{good}}^*/2 |p|$ and the denominator of the final approximant is at most~$M$, every partial quotient with index greater than $j+1$ is less than $2 |p| M_{\mathrm{bad}}^*$. The hypothesis relating $M_{\mathrm{good}}^*$ to $M_{\mathrm{bad}}^*$ thus guarantees that the $(j+1)$-th partial quotient is the largest. \end{proof} \begin{example} Let $X=7213578109$ and $M=101 \times 103 \times 105 \times 107 \times 109$. Let $P=Q=100$. By magic we know that $M_{\mathrm{bad}} = 101$, so we seek the last approximant to $\frac{X}{M}$ with denominator at most $Q M_{\mathrm{bad}} = 10100$. It is the $10$-th approximant and has value $\frac{R}{S} = 2116/3737$. Hence the candidate rational is $\frac{p}{q} = X - M \cdot \frac{R}{S} = \frac{13}{37}$ which does indeed satisfy the numerator and denominator bounds. The next approximant has denominator $9701939$, and as predicted by the theorem this is greater than $M_{\mathrm{good}}/2|p| \approx 4851359$. The next partial quotient is $2596 > \frac{Q_{\mathrm{max}}}{q} -1 \approx 1297$ as predicted by the corollary. \end{example} \section{The Fault Tolerant Rational Reconstruction Algorithm} \label{FTRR} We present our first algorithm for reconstructing rational numbers based on Theorem~\ref{main-thm}. The algorithm expects as inputs: \begin{itemize} \setlength{\itemsep}{-3pt} \item a set of residue-modulus pairs $\{ x_i \bmod m_i : i = 1,\ldots,s \}$, \item upper bounds $P$ (for the numerator), and $Q$ (for the denominator) of the rational to be reconstructed, \item an upper bound $e$ for the number of bad residue-modulus pairs. \end{itemize} We recall that the moduli $m_i$ are coprime, and are in increasing order so that $m_1 < m_2 < \cdots < m_s$. We define $M_{\mathrm{max}} = m_{s-e+1} m_{s-e+2} \cdots m_s$, the product of the~$e$ largest moduli; this implies that $M_{\mathrm{bad}} \le M_{\mathrm{max}}$ and $M_{\mathrm{good}} \ge m_1 m_2 \cdots m_s$. Thus to be able to apply Theorem~\ref{main-thm} we require that \begin{equation} \label{FTRR-precondition} M=m_1 m_2 \cdots m_s > 2 P Q M_{\mathrm{max}}^2 \end{equation} Comparing this with the condition given in~\cite{WGD1982} we see that an extra factor of $M_{\mathrm{max}}^2$ appears: this is to allow for a loss of information ``up to $M_{\mathrm{max}}$'', and to allow for an equivalent amount of redundancy requisite for proper reconstruction. If the denominator bound $Q = 1$ then the precondition~(\ref{FTRR-precondition}) simplifies to that for the \textit{Continued Fraction Method}~\cite{Abb1991}. \subsection{The FTRR Algorithm} The main loop of this algorithm is quite similar to that in~\cite{WGD1982}: it just runs through the continued fraction approximants for $X/M$, and selects the last one with ``small denominator''; there is a simple final computation to produce the answer. \begin{description} \setlength{\itemsep}{0pt} \item [1] Input $e$, $P$, $Q$, and $\{ x_i \bmod m_i : i=1,\ldots,s \}$ \item [2] If $x_i \equiv 0 \bmod m_i$ for at least $s-e$ indices~$i$ then return 0. \item [3] Set $M = \prod_i m_i$. Compute integer $X$ satisfying $X \equiv x_i \bmod m_i$ for each~$i$ (via Chinese remaindering). \item [4] Compute $M_{\mathrm{max}} = m_{s-e+1} m_{s-e+2} \cdots m_s$. \item [5] If $\gcd(X,M) > P M_{\mathrm{max}}$ then return \textsc{failure}. \item [6] Put $u = (1, 0, M) \in {\mathbb Z}^3$ and $v = (0, 1, X) \in {\mathbb Z}^3$. \item [7] While $|v_2| \le Q M_{\mathrm{max}}$ do \item [7.1] $q = \lfloor u_3/v_3 \rfloor$ \item [7.2] $u = u-qv$; swap $u \longleftrightarrow v$ \item [8] Set $r = X+M \cdot \frac{u_1}{u_2}$ as a normalized rational. \item [9] Check whether $r$ is a valid answer:\\ \ie~$|\mathop{\mathrm{num}}\nolimits(r)| \le P$ and $\mathop{\mathrm{den}}\nolimits(r) \le Q$ and at most~$e$ bad moduli. \item [10] If $r$ is valid, return $r$; otherwise return \textsc{failure}. \end{description} \noindent \textbf{Note} that in the algorithm the successive values of $-\frac{u_1}{u_2}$ at the end of each iteration around the main loop are just the continued fraction approximants to $X/M$. \begin{example} For some inputs to algorithm \textit{FTRR} there is no valid answer. If the input parameters are $e=0$, $P=Q=1$ and $x_1=2$ with modulus $m_1 = 5$ then with the given bounds the only possible valid answers are $\{-1,0,1\}$ but $2 \bmod 5$ does not correspond to any of these~---~the check in step~(9) detects this. \end{example} \subsection{Correctness of FTRR Algorithm} We show that \textit{FTRR} finds the right answer if it exists, and otherwise it produces \textsc{failure}. We first observe that if the correct result is~$0$ and at most~$e$ residue-modulus pairs are faulty then step~(2) detects this, and rightly returns~$0$. We may henceforth assume that the correct answer, if it exists, is a non-zero rational $\frac{p}{q} \in {\mathbb Q}$ with $|p| \le P$ and $1 \le q \le Q$. \begin{lemma} If there is a valid non-zero solution $p/q$ then $\gcd(X,M) \le P M_{\mathrm{max}}$. \end{lemma} \begin{proof} As the $m_i$ are coprime $\gcd(X,M) = \prod_i \gcd(x_i,m_i)$. If the modulus $m_i$ is good then $\gcd(x_i,m_i) \,|\, p$; conversely, if $\gcd(x_i,m_i) \,{\mathpalette\notrelation{\,|}}\, p$ then $m_i$ is a bad modulus. Hence $\prod_{m_i \hbox{\scriptsize good}} \gcd(x_i,m_i) \,|\, p$; while $\prod_{m_i \hbox{\scriptsize bad}} \gcd(x_i,m_i) \le \prod_{m_i \hbox{\scriptsize bad}} m_i \le M_{\mathrm{max}}$. It is now immediate that $\gcd(X,M) \le P M_{\mathrm{max}}$. \end{proof} From the lemma we deduce that the check in step~(5) eliminates only $(X,M)$ pairs which do not correspond to a valid answer. We also observe that for all $(X,M)$ pairs which pass the check in step~(5) the denominator of the normalized form of $X/M$ is at least $2 Q M_{\mathrm{max}}$, so the loop exit condition in step~(7) will eventually trigger. The values~$X$ and~$M$ computed in step~(3) are precisely the corresponding values in the statement of Theorem~\ref{main-thm}. However, we do not know the correct factorization $M = M_{\mathrm{good}} M_{\mathrm{bad}}$; but since there are at most~$e$ bad residue-modulus pairs we do know that $M_{\mathrm{bad}} \le M_{\mathrm{max}}$, and this inequality combined with the requirement~(\ref{FTRR-precondition}) together imply that $2 P Q M_{\mathrm{bad}}^2 < M$ so we may apply the proposition. Thus the algorithm simply has to find the last continued fraction approximant $\frac{R}{S}$ with denominator not exceeding $Q M_{\mathrm{max}}$, which is precisely what the main loop does: at the end of each iteration $-\frac{u_1}{u_2}$ and $-\frac{v_1}{v_2}$ are successive approximants to $\frac{X}{M}$, and the loop exits when $|v_2| > Q M_{\mathrm{max}}$. So when execution reaches step~(8), the fraction $-\frac{u_1}{u_2}$ is precisely the approximant $\frac{R}{S}$ of the proposition. Thus step~(8) computes the candidate answer in $r$, and step~(9) checks that the numerator and denominator lie below the bounds~$P$ and~$Q$, and that there are no more than~$e$ bad moduli. If the checks pass, the result is valid and is returned; otherwise the algorithm reports \textsc{failure}. \subsection{Which Residues were Faulty?} \label{identify-bad-moduli} Assume the algorithm produced a normalized rational $p/q$, and we want to determine which moduli (if any) were faulty. We could simply check which images of $p/q$ modulo each $m_i$ are correct. However, there is another, more direct way of identifying the bad moduli: we show that the bad $m_i$ are exactly those which have a common factor with~$S$, that is the final value of $u_2$. If $m_i$ is a good modulus then we have $\gcd(m_i, q)=1$ because otherwise if the gcd were greater than~$1$ then $p \equiv q x_i \bmod m_i$ implies that the gcd divides~$p$ too, contradicting the assumption that~$p$ and~$q$ are coprime. Multiplying equation~(\ref{eq1}) from the proof of Theorem~\ref{main-thm} by $q M$ we obtain $p = q X - M \cdot \frac{R}{S}$ whence $M \cdot \frac{R}{S}$ is an integer. By definition of a bad modulus $m_i$ we must have $M\cdot \frac{R}{S} \not\equiv 0 \bmod m_i$. Since $m_i \,|\, M$, we must have $\gcd(m_i, S) > 1$. \section{The Heuristic Algorithm} \label{HRR} The main problem with the \textit{FTRR} Algorithm is that we do not generally know good values for the input bounds $P,Q$ and~$e$. In this \textit{heuristic} variant the only inputs are the residue-modulus pairs; the result is either a rational number or an indication of \textsc{failure}. The algorithm is heuristic in that it may (rarely) produce an incorrect result, though if sufficiently many residue-pairs are input (with fewer than $\frac{1}{3}$ of them being bad) then the result will be correct. \subsection{Algorithm HRR: Heuristic Rational Reconstruction} \begin{description} \item [1] Input $x_i \bmod m_i$ for $i=1,\ldots,s$. Set $A_{\mathrm{crit}} = 10^{6}$ (see note below). \item [2] Put $M = \prod_i m_i$. Compute $X \in {\mathbb Z}$ such that $|X| < M$ and $X \equiv x_i \bmod m_i$ via Chinese remaindering. \item [3] If $\gcd(X,M)^2 > A_{\mathrm{crit}} M$ then return $0$. \item [4] Let $A_{\mathrm{max}}$ be the largest partial quotient in the continued fraction of $X/M$.\\ If $A_{\mathrm{max}} < A_{\mathrm{crit}}$ then return \textsc{failure}. \item [5] Put $u = (1, 0, M) \in {\mathbb Z}^3$ and $v = (0, 1, X) \in {\mathbb Z}^3$, and set $q=0$. \item [6] While $q \not= A_{\mathrm{max}}$ do \item [6.1] $q = \lfloor u_3/v_3 \rfloor$ \item [6.2] $u = u-qv$; swap $u \longleftrightarrow v$ \item [7] Return $N/D$ the normalized form of $X + M u_1/u_2$; we could also return $M_{\mathrm{bad}} = \gcd(M,u_2)$. \end{description} The idea behind the algorithm is simply to exploit Corollary~\ref{largest-partial-quotient} algorithmically. This corollary tells us that, provided $M_{\mathrm{good}}$ is large enough relative to $M_{\mathrm{bad}}$, we can reconstruct the correct rational from the last approximant before the largest partial quotient. Moreover, if the proportion of residue-modulus pairs which are bad is less than $\frac{1}{2}$ then $M_{\mathrm{good}}$ will eventually become large enough (provided the moduli are all roughly the same size). Since zero requires special handling, there is a special check in step~(3) for this case. The heuristic will produce zero if ``significantly more than half of the residues'' are zero~---~strictly this is true only if all the moduli are prime and of about the same magnitude. \subsubsection*{The role of $A_{\mathrm{crit}}$} To avoid producing too many \textit{false positives} we demand that the largest partial quotient be greater than a certain threshold, namely $A_{\mathrm{crit}}$. The greater the threshold, the less likely we will get a false positive; but too great a value will delay the final recognition of the correct value. The suggested value $A_{\mathrm{crit}} = 10^6$ worked well in our trials. \subsubsection*{Alternative criterion for avoiding false positives} Our implementation in CoCoALib~\cite{CoCoALib} actually uses a slightly different ``convincingness criterion'' in step~(4). Let $A_{\mathrm{max}}$ be the largest partial quotient, and $A_{\mathrm{next}}$ the second largest. Our alternative criterion is to report \textsc{failure} if $A_{\mathrm{max}}/A_{\mathrm{next}}$ is smaller than a given threshold~---~in our trials a threshold value of $4096$ worked well, but our implementation also lets the user specify a different threshold. \subsubsection{Complexity of HRR} Under the natural assumption that each residue satisfies $| x_i | \le m_i$, we see that the overall complexity of algorithm \textit{HRR} is $O \bigl( (\log M)^2 \bigr)$, the same as for Euclid's algorithm. Indeed the chinese remaindering in step~(2) can be done with a modular inversion (via Euclid's algorithm) and two products for each modulus. The computation of the partial quotients in step~(4) is Euclid's algorithm once again. And the main loop in step~(6) is just the extended Euclidean algorithm. We note that the overall computational cost depends on how often \textit{HRR} is called in the envisaged lifting loop (see subsection~\ref{envisaged-setting}). Assuming that the moduli chosen are all about the same size, a reasonable compromise approach is a ``geometrical strategy'' where \textit{HRR} is called whenever the number of main iterations reaches the next value in a geometrical progression. This compromise avoids excessive overlifting and also avoids calling \textit{HRR} prematurely too often. The overall cost of \textit{HRR} with such a strategy remains $O \bigl( (\log M)^2 \bigr)$ where~$M$ here denotes the combined modulus in the final, successful call to \textit{HRR}. In practice, if the cost of calling \textit{HRR} is low compared to the cost of one modular computation in the main loop then it makes sense to call \textit{HRR} more frequently. The geometrical strategy should begin only when (if ever) the cost of a call to \textit{HRR} is no longer relatively insignifiant. \subsection{Simultaneous Rational Reconstruction} In~\cite{BS2011} the authors presented an interesting algorithm for the simultaneous reconstruction of several rational numbers having a ``small common denominator''; moreover, in certain cases the algorithm would require a remarkably low combined modulus~---~smaller than the product of numerator and denominator of some of the reconstructed rationals. Nevertheless, it is not obvious how that algorithm could be modified to handle bad moduli. The case of simultaneous reconstruction arises, for instance, when the final result is a vector or polynomial. While each component of the vector or each coefficient of the polynomial could be reconstructed separately, we can do slightly better: for example, we normally expect to find the same bad moduli for each component/coefficient, and it often happens that there is a ``small common denominator''. We outline how \textit{HRR} can be used to reconstruct several rationals simultaneously; of course, \textit{HRR} can be replaced by another ``single rational'' reconstruction algorithm. For simplicity we assume that the Chinese remaindering has already been done, so we have a single common modulus~$M$ and several residues $X_1, \ldots, X_k$ each one corresponding to a rational number to be reconstructed. \begin{description} \setlength{\itemsep}{0pt} \item [1] Input $X_1, \ldots, X_k \in {\mathbb Z}$ and the common modulus $M \in {\mathbb Z}$ with $M>2$. \item [2] Set $D=1$; this will be our common denominator. \item [3] For $i=1,\ldots,k$ do \item [3.1] Apply \textit{HRR} to $D\,X_i$ and~$M$; if this fails, return \textsc{failure}. \item [3.2] Let $M_{\mathrm{bad}}$ be the bad modulus factor; replace $M = M/M_{\mathrm{bad}}$. \item [3.3] Let $R/S$ be the reconstructed rational, and set $d = \gcd(R,D)$. \item [3.4] Set $q_i = \frac{R/d}{SD/d}$. \item [3.5] Set $D = SD$, the new common denominator. \item [4] Return $q_1,\ldots,q_k$. \end{description} \noindent Notes: \begin{itemize} \setlength{\itemsep}{0pt} \item Step~(3.2) is useful only if the bad moduli for each coefficient are essentially the same; if this is not the case, it can be skipped. \item The order of the $X_i$ is potentially important; if some of the coefficients are expected to be simpler than others then the simpler ones should appear at the start~---~\eg~it often happens tha the coefficients of the highest and lowest degree terms in a polynomial are simpler than the ``central'' coefficients. \item Though not strictly necessary, it is probably worth reducing the $X_i$ modulo the updated value of $M$ in step~(3.2). \item In practice it may be useful to return the bad modulus factors found (even in the case of \textsc{failure}). \end{itemize} \begin{example} Let $M=12739669845=101 \times 103 \times 105 \times 107 \times 109$, and let $X_1 = -5790759020$, $X_2 = -2410207808$ and $X_3 = -9484324233$. We start with the common denominator $D=1$. On iteration $i=1$, we compute $\mathop{\mathrm{HRR}}\nolimits(X_1,M) = 5/11$ with no bad moduli. So we set $q_1=5/11$ and update $D = 11$. On iteration $i=2$, we compute $\mathop{\mathrm{HRR}}\nolimits(D\, X_2, M) = 209/37$ with no bad moduli. So we set $q_2 = \frac{209/37}{D} = 19/37$, and update $D = 407$. On iteration $i=3$, we compute $\mathop{\mathrm{HRR}}\nolimits(D \, X_3, M) = 204$ with no bad moduli. So we set $q_3 = \frac{204}{D} = 204/407$; there is no need to update $D$ since \textit{HRR} produced an integer. The final answer is $(q_1, q_2, q_3) = (5/11, 19/37, 204/407)$. Note that attempting to compute $q_3$ directly by calling $\mathop{\mathrm{HRR}}\nolimits(X_3,M)$ fails; indeed, multiplying by the common denominator when we computed $\mathop{\mathrm{HRR}}\nolimits(D \, X_3, M)$ has let us reconstruct a more complex rational than we could obtain by direct reconstruction. This also highlights the fact that the success of the reconstruction can depend on the order of the residues $X_i$; had $X_3$ been the first residue the algorithm would have failed (because the modulus $M$ is ``too small''). \end{example} \section{Comparison with Other Methods} \label{comparison} \subsection{Reconstruction via Lattice Reduction} A reconstruction technique based on 2-dimensional lattice reduction is presented as Algorithm~6 \textit{Error Tolerant Lifting} (abbr.~\textit{ETL}) in~\cite{BDFP2012}. This algorithm is similar in scope to our \textit{HRR}, and not really comparable to our \textit{FTRR} algorithm (which needs extra inputs from the user). In practice there are two evident differences between \textit{ETL} and our \textit{HRR}. The first is that \textit{ETL} produces many more \textit{false positives} than \textit{HRR}; our refinement (\textbf{B}) below proposes a way to rectify this. The second is that \textit{ETL} finds balanced rationals more easily than unbalanced ones, \ie~it works best if the numerator and denominator contain roughly the same number of digits. For balanced rationals, \textit{ETL} and \textit{HRR} need about the same number of residue-modulus pairs; for unbalanced rationals \textit{ETL} usually needs noticeably more residue-modulus pairs than \textit{HRR}. \subsubsection{Practical Refinements to \textit{ETL}} We propose two useful refinements to \textit{ETL} as it is described in~\cite{BDFP2012}. \begin{description} \item[A] We believe that a final checking step should be added to the \textit{ETL} algorithm so that it rejects results where half or more of the moduli are bad. Consider the following example: the moduli are $11,13,15,17,19$ and the corresponding residues are $-4,-4,-4,1,1$. The rather surprising result produced by \textit{ETL} is~$1$; it seems difficult to justify this result as being correct. Here we see explicitly the innate tendancy of \textit{ETL} to favour ``trusting'' larger moduli over smaller ones. \item [B] The aim of this second refinement is to reduce the number of false positives which \textit{ETL} produces. We suggest replacing their acceptance criterion $a_{i+1}^2 + b_{i+2}^2 < N$ by a stricter condition such as $a_{i+1}^2 + b_{i+2}^2 < N/100$. This change may require one or two more ``redundant'' residue-modulus pairs before \textit{ETL} finds the correct answer, but it does indeed eliminate most of the false positives. \end{description} \subsubsection{Comparison of Efficiency} We define the \textbf{efficiency} of a reconstruction method to be the logarithm of the combined modulus when reconstruction first succeeds. Our trials involved reconstructing rationals from a succession of residue-modulus pairs where the moduli were all about the same size; so the efficiency is essentially proportional to the number of pairs required for reconstruction to succeed. For simplicity, we shall use the number of pairs as our measure here. We claim that the \textit{efficiency} is the most appropriate measure of how well the algorithm performs because the computational cost of obtaining a new residue-modulus pair (potentially the result of a lengthy computation such as a Gr\"obner basis) generally far exceeds the cost of attempting reconstruction, so counting the number of pairs needed gives a good estimate of actual total computational cost. This point of view is valid provided the rational to be reconstructed does not have especially large numerator or denominator. We have implemented \textit{HRR} and \textit{ETL} in CoCoALib~\cite{CoCoALib} and CoCoA-5~\cite{CoCoA}. Using these implementations we compared the efficiency of \textit{HRR} and \textit{ETL} by generating a random rational $N/D$ (with a specified number of bits each for the numerator and denominator), and then generating the modular images $x_i \bmod m_i$ where the $m_i$ run through successive primes starting from~$1013$. Note that in this first trial there are no bad residue-modulus pairs. We then counted how many residue-modulus pairs were needed by the algorithms before they were able to reconstruct the original rational. We then repeated the experiment but this time, with probability 10\%, each residue was replaced by a random value to simulate the presence of bad residues. As expected, the number of residue-modulus pairs needed for successful reconstruction increased by about 25\%. In each case the successful reconstruction took less than $0.1$ seconds. \begin{center} \begin{tabular}{|l|c|c|c|c|} \hline & $\phantom{\int_q^b}\frac{2000}{0}$ bits & $\frac{1600}{400}$ bits & $\frac{1200}{800}$ bits & $\frac{1000}{1000}$ bits \\ \hline \textit{HRR} 0\% bad & 190 & 191 & 190 & 190 \\ \textit{ETL} 0\% bad & 361 & 293 & 224 & 189 \\ \hline \textit{HRR} 10\% bad & 244 & 236 & 246 & 244 \\ \textit{ETL} 10\% bad & 457 & 375 & 283 & 242 \\ \hline \end{tabular} \end{center} Observe that the number of pairs needed by \textit{HRR} is essentially constant, while \textit{ETL} matches the efficiency of \textit{HRR} only for perfectly balanced rationals; as soon as there is any disparity between the sizes of numerator and denominator \textit{HRR} becomes significantly more efficient. \subsection{Combinatorial Methods} \label{combinatorial} It is shown in~\cite{Sto1963} that reconstruction of integers by Chinese Remaindering is possible provided no more than half of the \textit{redundant residues} are faulty. The correct value is identified using a \textit{voting system} (see~\cite{Sto1963} for details). We can extend the idea of a voting system to allow it to perform fault tolerant rational reconstruction: the only difference is that for each subset of residue-modulus pairs we effect an exact rational reconstruction (rather than an exact integer reconstruction). However the problem of poor computational efficiency remains. An elegant and efficient scheme for fault-tolerant chinese remaindering for integers was given in~\cite{Ram1983}; however the method is valid only for at most one bad modulus. Several generalizations of Ramachandran's scheme were given in~\cite{Abb1991}; however, these are practical really only for at most~$2$ bad moduli. Like the voting system, these schemes could be easily adapted to perform fault-tolerant rational reconstruction, but in the end the \textit{Continued Fraction Method} (upon which \textit{FTRR} is based) is more flexible and more efficient. \section{Conclusion} We have presented two new algorithms for solving the problem of fault tolerant rational reconstruction, \textit{FTRR} and \textit{HRR}. The former is a natural generalization both of the original rational reconstruction algorithm~\cite{WGD1982} and of the fault tolerant integer reconstruction algorithm~\cite{Abb1991}. The latter is a heuristic variant which is easier to use in practice since it does not require certain bounds as input. Our \textit{HRR} algorithm and the \textit{ETL} algorithm from~\cite{BDFP2012} offer two quite distinct (yet simple) approaches to the same problem. They have comparable practical efficiency when reconstructing balanced rationals, whereas \textit{HRR} is usefully more efficient when reconstructing unbalanced rationals. \end{document}
\begin{document} \title[Approximate Herbrand and Definable Functions]{An Approximate Herbrand's Theorem and Definable Functions in metric structures} \author{Isaac Goldbring} \textnormal{\thh}anks{The author's work was partially supported by NSF grant DMS-1007144.} \address {University of California, Los Angeles, Department of Mathematics, 520 Portola Plaza, Box 951555, Los Angeles, CA 90095-1555, USA} \email{[email protected]} \urladdr{www.math.ucla.edu/~isaac} \begin{abstract} We develop a version of Herbrand's theorem for continuous logic and use it to prove that definable functions in infinite-dimensional Hilbert spaces are piecewise approximable by affine functions. We obtain similar results for definable functions in Hilbert spaces expanded by a group of generic unitary operators and Hilbert spaces expanded by a generic subspace. We also show how Herbrand's theorem can be used to characterize definable functions in some absolutely ubiquitous structures from classical logic. \end{abstract} \mathcal{M}aketitle \section{Introduction} The main motivation for this paper comes from the study of definable functions in metric structures; this study was initiated by the author in \cite{GoldUry}, where a study of the definable functions in Urysohn's metric space was undertaken, and continued in \cite{Gold}, where the definable linear operators in (infinite-dimensional) Hilbert spaces were characterized. However, lacking any understanding of arbitrary definable functions in Hilbert spaces, we conjectured that they were, in some sense, ``piecewise affine'' in analogy with the classical case of an infinite vector space over a division ring. In unpublished lecture notes by van den Dries on motivic integration \cite{vdD}, we came upon a proof of the piecewise affineness of definable functions in such vector spaces using the following classical theorem of Herbrand: \begin{thm}[Herbrand \cite{herb}]\curly{L}bel{classicalherb} Suppose that $\curly{L}$ is a first-order signature and $T$ is a universal $\curly{L}$-theory with quantifier elimination. Let $\varphi(\vec x,\vec y)$ be a formula, where $\vec x=(x_1,\ldots,x_m)$, $y=(y_1,\ldots,y_n)$, $m\geq 1$. Then there are $\curly{L}$-terms $$t_{11}(\vec x),\ldots,t_{1n}(\vec x),\ldots,t_{k1}(\vec x),\ldots,t_{kn}(\vec x), \quad (k\in \n^{>0})$$ such that $$T\mathcal{M}odels \forall \vec x\forall \vec y\left(\varphi(\vec x,\vec y)\to \bigvee_{i=1}^k \varphi(\vec x,t_{i1}(\vec x),\ldots,t_{in}(\vec x)\right).$$ \end{thm} Although this theorem is not immediately applicable to the case of an infinite vector space $V$ over a division ring (for the axioms expressing that $V$ is infinite are existential), Herbrand's theorem does apply to the theory of $V$ with constants added for names of elements of $V$. Since terms in this extended language name affine functions, we get the aforementioned characterization of definable functions in $V$. (According to van den Dries, this use of Herbrand's theorem is well-known and often used.) Although Theorem \ref{classicalherb} has an easy model-theoretic proof using compactness, we should remark that the result was first established using proof-theoretic techniques; see \cite{Buss1} and \cite{Buss2} for more on the history of Herbrand's result. In this paper, we prove a version of Herbrand's theorem for continuous logic (Theorem \ref{contherb} and Corollary \ref{herbT} below) and use it to characterize definable functions in Hilbert spaces and some of their generic expansions, proving, in the case of pure Hilbert spaces, that definable functions are ``piecewise approximable by affine functions.'' Along the way, we note that this method works whenever $T$ is a $\exists \forall$-axiomatizable theory with quantifier elimination. In particular, we show that one can use Herbrand's theorem to understand definable functions in some absolutely ubiquitous structures from classical logic. We assume that the reader is familiar with the basic definitions of continuous logic; otherwise, they can consult the survey article \cite{BBHU}. The author would like to thank Vinicius C.L., Aleksander Ivanov, and Dugald Macpherson for helpful discussions concerning this work and Matthias Aschenbrenner for pointing out the paper \cite{Lac} on absolutely ubiquitous structures. \section{Herbrand's Theorem in Continuous Logic} \ \noindent In this section, we let $\curly{L}$ denote an arbitrary continuous signature. We will use the following abuse of notation: whenever $\curly{D}elta$ is a set of closed $\curly{L}$-conditions and $\sigma$ is an $\curly{L}$-sentence, we write $\sigma \in \curly{D}elta$ to indicate that the condition ``$\sigma=0$'' belongs to $\curly{D}elta$. \begin{df} Suppose that $\curly{D}elta$ is a set of closed $\curly{L}$-conditions. \begin{enumerate} \item We say that $\curly{D}elta$ is \emph{closed under min} if whenever $\sigma_1,\ldots,\sigma_n$ are sentences with $\sigma_i\in \curly{D}elta$ for each $i$, then $\mathcal{M}in_{1\leq i\leq n}\sigma_i\in \curly{D}elta$. \item We say that $\curly{D}elta$ is \emph{closed under weakening} if whenever $\sigma\in \curly{D}elta$, then $\sigma\mathbin{\mathpalette\dotminussym{}} r\in \curly{D}elta$ for every $r\in [0,1]$. \end{enumerate} \end{df} The following lemma is in a similar spirit to Lemma 3.4 of \cite{Usvy}; the classical version, whose proof we mimic, can be found in \cite{CK}. \begin{lemma}\curly{L}bel{axiom} Suppose that $T$ is a satisfiable $\curly{L}$-theory and $\curly{D}elta$ is a set of closed $\curly{L}$-conditions that is closed under min and weakening. Then the following are equivalent: \begin{enumerate} \item $T$ is axiomatizable by a collection of conditions $\Gamma\subseteq \curly{D}elta$; \item For all $\curly{L}$-structures $\mathcal{M}$ and $\mathcal{N}$ satisfying $\mathcal{M}\mathcal{M}odels T$ and $\sigma^\mathcal{N}=0$ for all $\sigma\in \curly{D}elta$ with $\sigma^\mathcal{M}=0$, we have $\mathcal{N}\mathcal{M}odels T$. \end{enumerate} \end{lemma} \begin{proof} Clearly $(1)\mathcal{M}athbb{R}ightarrow (2)$, so we need to prove $(2)\mathcal{M}athbb{R}ightarrow(1)$. Consider the set $\Gamma=\{``\sigma=0\text{''} \ : \ \sigma\in \curly{D}elta \text{ and }T\mathcal{M}odels \sigma=0\}$. We claim that $\Gamma$ axiomatizes $T$. Suppose $\mathcal{N}\mathcal{M}odels \Gamma$. Let $$\Sigma=\{``\delta \geq \frac{r}{2}\text{''} \ : \ \mathcal{N}\mathcal{M}odels \delta=r, \ r>0, \ \delta\in \curly{D}elta\}.$$ We claim that $T\cup \Sigma$ is consistent. Suppose otherwise. Then there are $\delta_1,\ldots,\delta_k$, $r_1,\ldots,r_k$ such that $T\mathcal{M}odels \mathcal{M}in_{1\leq i\leq k} (\delta_i\mathbin{\mathpalette\dotminussym{}} \frac{r_i}{2})=0$. Since $\curly{D}elta$ is closed under min and weakening, we have that $\mathcal{M}in_{1\leq i\leq k} (\delta_i\mathbin{\mathpalette\dotminussym{}} \frac{r_i}{2})\in \Gamma$, so $\mathcal{N}\mathcal{M}odels \mathcal{M}in_{1\leq i\leq k} (\delta_i\mathbin{\mathpalette\dotminussym{}} \frac{r_i}{2})=0$, which is a contradiction to the fact that $\delta_i^\mathcal{N}=r_i$ for each $i$. Let $\mathcal{M}\mathcal{M}odels T\cup\Sigma$. Now suppose that $\sigma\in \curly{D}elta$ and $\sigma^\mathcal{M}=0$. Then $\sigma^\mathcal{N}=0$, else $``\sigma\geq \frac{r}{2}\text{''}\in \Sigma$ for some $r>0$, contradicting $\sigma^\mathcal{M}=0$. By (2), we have $\mathcal{N}\mathcal{M}odels T$. \end{proof} Given an $\curly{L}$-structure $\mathcal{M}$, let $\curly{D}(\mathcal{M})$ be the set of closed $\curly{L}(\mathcal{M})$-conditions of the form $\sigma=0$, where $\sigma$ is an atomic $\curly{L}(\mathcal{M})$ sentence and $\sigma^\mathcal{M}=0$; this is just the \emph{atomic diagram of $\mathcal{M}$}. The following lemma is proved just as in classical logic. \begin{lemma} If $\mathcal{N}\mathcal{M}odels D(\mathcal{M})$, then the $\curly{L}$-reduct of $\mathcal{N}$ contains a substructure isomorphic to $\mathcal{M}$. \end{lemma} Let us call a sentence $\sigma$ \emph{universal} if it is of the form $\sup_{\vec x}\varphi(\vec x)$, where $\varphi$ is quantifier-free. Let us call a closed condition $``\sigma=0$'' \emph{universal} if $\sigma$ is universal. We call a closed condition ``$\sigma=0$'' \emph{almost universal} if there is a universal sentence $\tau$ such that, in every $\curly{L}$-structure $\mathcal{M}$, we have $\sigma^\mathcal{M}=0$ if and only if $\tau^\mathcal{M}=0$. \begin{lemma} The set of almost universal conditions is closed under min and weakening. \end{lemma} \begin{proof} Suppose that $\sigma=0$ and $\tau=0$ are almost universal conditions. Suppose that $\sigma=0$ is equivalent to $\sup_{\vec x}\sigma'(\vec x)=0$ and $\tau=0$ is equivalent to $\sup_{\vec y}\tau'(\vec y)=0$, with $\sigma'$, $\tau'$ quantifier-free and $\vec x$, $\vec y$ disjoint tuples of distinct variables. Then $\mathcal{M}in(\sigma,\tau)=0$ is equivalent to $\sup_{\vec x}\sup_{\vec y}(\mathcal{M}in(\sigma'(\vec x),\tau'(\vec y)))=0$. Similarly, the condition $\sigma\mathbin{\mathpalette\dotminussym{}} r=0$ is equivalent to $\sup_{\vec x}(\sigma'(\vec x)\mathbin{\mathpalette\dotminussym{}} r)=0$. \end{proof} \noindent If $\Gamma$ is a set of closed $\curly{L}$-conditions, we set $$\Gamma^+:=\{``\sigma\leq \frac{1}{n}\text{''} \ : \ \sigma\in \Gamma, n\geq 1\}.$$ We say that $T$ has a \emph{universal axiomatization} if $T$ is axiomatizable by a set of universal conditions. Clearly if $T$ is axiomatizable by a set of almost universal conditions, then $T$ has a universal axiomatization. \begin{cor}\curly{L}bel{univ} The following are equivalent: \begin{enumerate} \item $T$ has a universal axiomatization; \item For any $\mathcal{M}\mathcal{M}odels T$ and substructure $\mathcal{N}$ of $\mathcal{M}$, we have $\mathcal{N}\mathcal{M}odels T$. \end{enumerate} \end{cor} \begin{proof} Clearly (1) implies (2), so we prove that (2) implies (1). We use the criterion developed in Lemma \ref{axiom} applied to the set of almost universal conditions. Suppose that $\mathcal{M}\mathcal{M}odels T$ and for all almost universal conditions $``\sigma=0$'', we have $\sigma^\mathcal{M}=0$ implies $\sigma^\mathcal{N}=0$. We want $\mathcal{N}\mathcal{M}odels T$. Let $T'=T\cup \curly{D}(\mathcal{N})^+$. We claim that $T'$ is satisfiable. Fix atomic $\curly{L}(\mathcal{N})$-sentences $\sigma_1(\vec b),\ldots,\sigma_n(\vec b)$ such that $\sigma_i^\mathcal{N}(\vec b)=0$. Then $\mathcal{N}\mathcal{M}odels \inf_{\vec x}\mathcal{M}ax(\sigma_i(\vec x))=0$. Suppose, towards a contradiction, that $\mathcal{M}\not\mathcal{M}odels \inf_{\vec x}\mathcal{M}ax(\sigma_i(\vec x))=0.$ Then there is $r\in (0,1]$ such that $\mathcal{M}\mathcal{M}odels \sup_{\vec x}(r\mathbin{\mathpalette\dotminussym{}} \mathcal{M}ax(\sigma_i(\vec x)))=0$. By assumption, we have $\mathcal{N}\mathcal{M}odels \sup_{\vec x}(r\mathbin{\mathpalette\dotminussym{}} \mathcal{M}ax(\sigma_i(\vec x)))=0$, which is a contradiction. Consequently, for any $k\geq 1$, there is $\vec a\in M$ such that $\mathcal{M}\mathcal{M}odels \mathcal{M}ax(\sigma_i(\vec a))\leq \frac{1}{k}$. It follows by compactness that $T'$ is satisfiable. Let $\curly{A}'\mathcal{M}odels T'$ and let $\curly{A}$ be the $\curly{L}$-reduct of $\curly{A}'$. Then $\curly{A}\mathcal{M}odels T$ and $\mathcal{N}$ is (isomorphic to) a substructure of $\curly{A}$, whence $\mathcal{N}\mathcal{M}odels T$. \end{proof} \begin{df} Suppose that $\mathcal{M}$ is an $\curly{L}$-structure and $A\subseteq M$. Let $\curly{L}ngle A\rangle_0$ be the $\curly{L}$-prestructure generated by $A$. Then the closure of $\curly{L}ngle A\rangle_0$ in $M$ is the completion of $\curly{L}ngle A\rangle_0$, whence a substructure of $\mathcal{M}$, called the \emph{substructure of $\mathcal{M}$ generated by $A$}. \end{df} \noindent By Theorem 3.5 of \cite{BBHU}, any $\curly{L}$-formula $\varphi(\vec x)$ has a modulus of uniform continuity $\curly{D}elta_\varphi:(0,1]\to (0,1]$, that is, for any $\curly{L}$-structure $\mathcal{M}$, any $\epsilon>0$, and any tuples $\vec a,\vec b$ from $M$, if $d(\vec a,\vec b)<\curly{D}elta_\varphi(\epsilon)$, then $|\varphi^\mathcal{M}(\vec a)-\varphi^\mathcal{M}(\vec b)|\leq \epsilon$. \begin{thm}[Continuous Herbrand Theorem]\curly{L}bel{contherb} Suppose that $T$ is a complete $\curly{L}$-theory with quantifier elimination that admits a universal axiomatization. Let $\vec x=(x_1,\ldots,x_m)$ and $\vec y=(y_1,\ldots,y_n)$. Then for any formula $\varphi(\vec x,\vec y)$ and any $\epsilon>0$, there are $\curly{L}$-terms $$t_{11}(\vec x),\ldots,t_{1n}(\vec x),\ldots,t_{k1}(\vec x),\ldots,t_{kn}(\vec x) \quad (k\in \n^{>0})$$ such that, for any $\mathcal{M}\mathcal{M}odels T$ and any $\vec a\in M^m$, if $\mathcal{M}\mathcal{M}odels \inf_{\vec y} \varphi(\vec a,\vec y)=0$, then $$\mathcal{M} \mathcal{M}odels \mathcal{M}in_{1\leq i \leq k}\varphi(\vec a,t_{i1}(\vec a),\ldots,t_{in}(\vec a))\leq \epsilon.$$ \end{thm} \begin{proof} Consider the set of closed $\curly{L}$-conditions $\Gamma(\vec x)$ given by $$\{\inf_{\vec y}\varphi(\vec x,\vec y)=0\}\cup \{\varphi(\vec x,t_1(\vec x),\ldots,t_n(\vec x))\geq 2\epsilon \ : \ t_1(\vec x),\ldots,t_n(\vec x)\ \curly{L}\text{-terms}\}.$$ By compactness, it is enough to prove that $\Gamma$ is unsatisfiable. Suppose, towards a contradiction, that $\mathcal{M}\mathcal{M}odels \Gamma(\vec a)$, where $\vec a=(a_1,\ldots,a_m)\in M^m$. Fix $\delta\in (0,1]$ such that $\delta<\frac{\epsilon}{3}$. Let $\chi(\vec x)$ be a quantifier-free $\curly{L}$-formula such that $T\mathcal{M}odels \sup_{\vec x}\left (|\inf_{\vec y}\varphi(\vec x,\vec y)-\chi(\vec x)|\mathbin{\mathpalette\dotminussym{}} \delta\right)=0$. Then $\chi^\mathcal{M}(\vec a)\leq \delta$. Let $\mathcal{N}$ be the substructure of $\mathcal{M}$ generated by $\{a_1,\ldots,a_m\}$. Then since $\chi(\vec x)$ is quantifier-free, we have $\chi^\mathcal{N}(\vec a)\leq \delta$. Since $\mathcal{N}\mathcal{M}odels T$, we have $\mathcal{N}\mathcal{M}odels \inf_{\vec y}\varphi(\vec a,\vec y)\leq 2\delta$. Thus, there is $\vec c\in N^n$ such that $\varphi^\mathcal{N}(\vec a,\vec c)\leq 3\delta$. Now let $t_i(\vec x)$ be a term so that $d(t_i(\vec a),\vec c_i)<\curly{D}elta_\varphi(\delta)$, whence $\varphi^\mathcal{N}(\vec a,t_1(\vec a),\ldots,t_n(\vec a))\leq 4\delta$. Let $\textnormal{\thh}eta(\vec x,\vec y)$ be a quantifier-free $\curly{L}$-formula so that $T\mathcal{M}odels \sup_{\vec x, \vec y}\left(|\varphi(\vec x,\vec y)-\textnormal{\thh}eta(\vec x,\vec y)|\mathbin{\mathpalette\dotminussym{}} \delta\right)=0$. Then $\textnormal{\thh}eta^\mathcal{N}(\vec a,t_1(\vec a),\ldots,t_n(\vec a))\leq 5\delta$, whence $\textnormal{\thh}eta^\mathcal{M}(\vec a,t_1(\vec a),\ldots,t_n(\vec a))\leq 5\delta$ and hence $\varphi^\mathcal{M}(\vec a,t_1(\vec a),\ldots,t_n(\vec a))\leq 6\delta$. Since $6\delta<2\epsilon$, this is a contradiction to the fact that $\mathcal{M}\mathcal{M}odels \Gamma(\vec a)$. \end{proof} The following rephrasing of the previous theorem more closely resembles the usual statement of Herbrand's theorem. \begin{cor}\curly{L}bel{herbT} Suppose that $T$ is a complete $\curly{L}$-theory with quantifier elimination that admits a universal axiomatization. Let $\vec x=(x_1,\ldots,x_m)$ and $\vec y=(y_1,\ldots,y_n)$. Then for any formula $\varphi(\vec x,\vec y)$ and any $\epsilon>0$, there are $\curly{L}$-terms $$t_{11}(\vec x),\ldots,t_{1n}(\vec x),\ldots,t_{k1}(\vec x),\ldots,t_{kn}(\vec x) \quad (k\in \n^{>0})$$ and an increasing continuous function $\alpha:[0,1]\to [0,1]$ satisfying $\alpha(0)=0$ such that $$T\mathcal{M}odels \sup_{\vec x}(( \mathcal{M}in_{1\leq i \leq k}\varphi(\vec x,t_{i1}(\vec x),\ldots,t_{in}(\vec x))\mathbin{\mathpalette\dotminussym{}} \epsilon)\mathbin{\mathpalette\dotminussym{}} \alpha(\inf_{\vec y}(\varphi(\vec x,\vec y)))=0.$$ \end{cor} \begin{proof} This is immediate from the preceding theorem and Proposition 7.15 of \cite{BBHU}. \end{proof} \section{Primitive theories with QE} In this short section, $\curly{L}$ continues to denote an arbitrary (continuous) signature and $T$ denotes an $\curly{L}$-theory. \begin{df} Following \cite{Lac} (in the classical setting), we say that $T$ is \emph{primitive} if there exists sets of closed $\curly{L}$-conditions $\Gamma$ and $\curly{D}elta$, where $\Gamma$ consists of universal conditions and $\curly{D}elta$ consists of existential conditions, such that $\Gamma\cup \curly{D}elta$ axiomatizes $T$. \end{df} \begin{rmk} In classical logic, it is mentioned in \cite{Lac} that $T$ is primitive if and only if: whenever $\mathcal{M}_0,\mathcal{M}_1\mathcal{M}odels T$ and $\mathcal{M}_0\subseteq \mathcal{N}\subseteq \mathcal{M}_1$, then $\mathcal{N}\mathcal{M}odels T$. It is also mentioned in \cite{Lac} that $T$ is $\exists \forall$-axiomatizable if and only if: whenever $\mathcal{M}_0,\mathcal{M}_1\mathcal{M}odels T$, $\mathcal{M}_0\preceq \mathcal{M}_1$, and $\mathcal{M}_0\subseteq \mathcal{N}\subseteq \mathcal{M}_1$, then $\mathcal{N}\mathcal{M}odels T$. It follows that for model-complete theories $T$, $T$ is primitive if and only if $T$ is $\exists \forall$-axiomatizable. An interesting example of a model-complete $\exists \forall$-theory is Example 3 of \cite{Lac2}. \end{rmk} \begin{prop}\curly{L}bel{prim} Suppose that $T$ is a complete, model-complete primitive $\curly{L}$-theory. Let $\mathcal{M}\mathcal{M}odels T$ and let $T_\mathcal{M}$ be the $\curly{L}(\mathcal{M})$-theory of $\mathcal{M}$. Then $T_\mathcal{M}$ is universally axiomatizable. Moreover, $T_\mathcal{M}$ has quantifier elimination if $T$ does. \end{prop} \begin{proof} Let $\Gamma$ be a set of universal sentences and $\curly{D}elta$ a set of existential sentences such that $\Gamma\cup \curly{D}elta$ axiomatizes $T$. In order to prove that $T_\mathcal{M}$ has a universal axiomatization, it suffices to prove that $T_\mathcal{M}$ is axiomatized by $\Gamma\cup \omegaperatorname{D}(\mathcal{M})$. Suppose that $\mathcal{N}\mathcal{M}odels \Gamma\cup \omegaperatorname{D}(\mathcal{M})$. Then $\mathcal{M}$ is a substructure of $\mathcal{N}$. Now any axiom from $\curly{D}elta$ is true in $\mathcal{N}$ since it is witnessed by things in $\mathcal{M}$. Consequently, $\mathcal{N}\mathcal{M}odels T$, whence $\mathcal{N}\mathcal{M}odels T_\mathcal{M}$ by model-completeness of $T$. The moreover statement is clear. \end{proof} We will meet some examples of (classical and continuous) primitive theories with quantifier elimination in the next section. The following proposition explains how we use Herbrand's theorem in connection with definable functions. \begin{prop}\curly{L}bel{deffunc} Suppose that $T$ is primitive and admits quantifier elimination. Suppose $\mathcal{M}\mathcal{M}odels T$ and $f:M^n\to M$ is a definable function. Then for any $\epsilon>0$, there are $\curly{L}(M)$-terms $t_1(\vec x),\ldots,t_k(\vec x)$ such that: for all $\vec a\in M^n$, there is $i\in \{1,\ldots,k\}$ with $d(f(\vec a),t_i(\vec a))\leq \epsilon$. \end{prop} \begin{proof} Fix $\epsilon>0$. Let $\varphi(\vec x,y)$ be an $\curly{L}(M)$-formula such that $$|d(f(\vec a),b)-\varphi^\mathcal{M}(\vec a,b)|\leq \frac{\epsilon}{3}$$ for all $\vec a\in M^n$ and $b\in M$. By Herbrand's theorem applied to $T_\mathcal{M}$ (which is applicable by Proposition \ref{prim}), there are $\curly{L}(M)$-terms $t_1(\vec x),\ldots,t_k(\vec x)$ such that, for all $\vec a\in M^n$, if $\mathcal{M}\mathcal{M}odels \inf_y(\varphi(\vec a,y)\mathbin{\mathpalette\dotminussym{}}\frac{\epsilon}{3})=0$, then $$\mathcal{M}\mathcal{M}odels (\varphi(\vec a,t_i(\vec a))\mathbin{\mathpalette\dotminussym{}} \frac{\epsilon}{3})\leq \frac{\epsilon}{3}$$ for some $i\in \{1,\ldots,k\}$. Notice that the antecedent of the preceding conditional statement holds since $\varphi^\mathcal{M}(\vec a,f(\vec a))\leq \frac{\epsilon}{3}$. Consequently, for every $\vec a\in M^n$, there is $i\in \{1,\ldots,k\}$ such that $d(f(\vec a),t_i(\vec a))\leq \epsilon$. \end{proof} \begin{rmk} Fix a definable function $f:M^n\to M$. Fix $\epsilon>0$ and let the $\curly{L}(\mathcal{M})$-terms $t_1(\vec x),\ldots,t_k(\vec x)$ be as in the conclusion of the previous proposition. Suppose that $\mathcal{M}\preceq \mathcal{N}$ and $f:N^n\to N$ is the natural extension of $f$ to a definable function in $\mathcal{N}$. Then, for every $\vec a\in N^n$, there is $i\in \{1,\ldots,k\}$ such that $d(f(\vec a),t_i(\vec a))\leq \epsilon$. Indeed, repeat the proof of the preceding proposition, using Corollary \ref{herbT} instead of Theorem \ref{contherb}. \end{rmk} \section{Applications} In this section, we present some (classical and continuous) primitive theories with quantifier-elimination and use Proposition \ref{deffunc} above to understand the definable functions in models of these theories. \subsection{Infinite-dimensional Hilbert spaces and some of their generic expansions} \ \ \noindent In this subsection, we suppose that $\mathcal{M}athbb{K}\in \{\r,\c\}$ and we set $$\d:=\{\curly{L}mbda \in \mathcal{M}athbb{K} \ : \ |\curly{L}mbda|\leq 1\}.$$ Also, $\curly{L}$ denotes the (1-sorted) continuous signature for unit balls of $\mathcal{M}athbb{K}$-Hilbert spaces. More specifically, $\curly{L}$ contains: \begin{itemize} \item a constant symbol $0$; \item a binary function symbol $f_{\alpha,\beta}$ for every $\alpha,\beta\in \d$ with $|\alpha|+|\beta|\leq 1$; \item a binary predicate symbol $\curly{L}ngle \cdot,\cdot \rangle$ that takes values in $[-1,1]$. \end{itemize} If $H$ is a $\mathcal{M}athbb{K}$-Hilbert space, the unit ball of $H$, $B_1(H)$, is naturally an $\curly{L}$-structure, where $0$ is interpreted as the zero vector of $H$, $f_{\alpha,\beta}$ is interpreted as the function $(x,y)\mathcal{M}apsto \alpha x+\beta y$, and $\curly{L}ngle \cdot ,\cdot\rangle$ is interpreted as the inner product of $H$. For sake of readability, we often write $H$ instead of $B_1(H)$ when speaking of this way of treating $B_1(H)$ as an $\curly{L}$-structure. Let $T$ be the $\curly{L}$-theory of (the unit ball of) an infinite-dimensional $\mathcal{M}athbb{K}$-Hilbert space. Then $T$ is primitive as the Hilbert space axioms are universal and the axioms for infinite-dimensionality are existential. We must remark that we cannot work in the many-sorted setting for Hilbert spaces (as in \cite{Gold}) because the axioms for the inclusion mappings are $\forall \exists$; indeed, for $n\leq m$, one must declare that the inclusion mapping $I_{n,m}:B_n(H)\to B_m(H)$ is onto the set of elements of $B_m(H)$ of norm at most $n$. In the rest of this subsection, $H\mathcal{M}odels T$ and $H^*$ is an elementary extension of $H$. In order to make any sense of Proposition \ref{deffunc} in this context, we must first understand $\curly{L}(H)$-terms. \begin{lemma}\curly{L}bel{term} If $t(x)$ is an $\curly{L}(H)$-term, then there are $\curly{L}mbda\in \d$ and $v\in B_1(H)$ so that $t(a)=\curly{L}mbda a+v$ for all $a\in B_1(H)$. \end{lemma} \begin{proof} One proves this by induction on the complexity of $t(x)$, the base case being immediate. Now suppose that $t_i(x)=\curly{L}mbda_ix+v_i$ for $i=1,2$ and $\alpha,\beta$ are so that $|\alpha|+|\beta|\leq 1$. Then $$f_{\alpha,\beta}(t_1(a),t_2(a))=\alpha t_1(a)+\beta t_2(a)=(\alpha \curly{L}mbda_1+\beta \curly{L}mbda_2)a+(\alpha v_1+\beta v_2).$$ It remains to observe that $|\alpha \curly{L}mbda_1+\beta\curly{L}mbda_2|\leq 1$. \end{proof} \begin{cor}\curly{L}bel{affine} Let $f:H\to H$ be definable. Then given $\epsilon>0$, there are $\curly{L}mbda_1,\ldots,\curly{L}mbda_k\in \mathcal{M}athbb{D}$ and $v_1,\ldots,v_k\in B_1(H)$ such that, for all $a\in B_1(H^*)$, there is $i\in \{1,\ldots,k\}$ with $d(f(a),\curly{L}mbda_ia+v_i)\leq \epsilon$. \end{cor} Fix $a\in B_1(H^*)$. Then there are sequences $(\curly{L}mbda_n)$ from $ \mathcal{M}athbb{D}$ and $(v_n)$ from $B_1(H)$ with $\curly{L}mbda_na+v_n\to f(a)$ as $n\to \infty$. By taking subsequences, we may suppose that $\curly{L}mbda_n\to \curly{L}mbda\in \d$. It then follows that $(v_n)$ is a Cauchy sequence in $B_1(H)$, whence $v_n\to v\in B_1(H)$. It follows that $f(a)=\curly{L}mbda a+v$. We have just proven the following result: \begin{cor} For any $a\in B_1(H^*)$, there are $\curly{L}mbda\in \mathcal{M}athbb{D}$ and $v\in B_1(H)$ such that $f(a)=\curly{L}mbda a+v$. \end{cor} \begin{cor} Suppose that $H^*$ is $\omegamega_1$-saturated and $f(H^\perp)\subseteq H^\perp$. Fix $\epsilon>0$ and let $\curly{L}mbda_1,\ldots,\curly{L}mbda_m$ be a finite $\epsilon$-net for $\d$. Then there is a finite-dimensional subspace $K$ of $H$ such that, for all $a\in B_1(H^*)\cap K^\perp$, there is $i\in\{1,\ldots,m\}$ such that $d(f(a),\curly{L}mbda_ia)<\epsilon$. \end{cor} \begin{proof} Let $a\in B_1(H^*)\cap H^\perp$. Take $\curly{L}mbda\in \d$ and $v\in B_1(H)$ such that $f(a)=\curly{L}mbda a+v$. Then $$0=\curly{L}ngle f(a),v\rangle =\curly{L}ngle \curly{L}mbda a+v,v\rangle=\curly{L}ngle v,v\rangle.$$ Thus, $f(a)=\curly{L}mbda a$. Let $(a_n)$ be an orthonormal basis for $H$. Then the following set of conditions is unsatisfiable in $H^*$: $$\{\curly{L}ngle x,a_n\rangle=0 \ : \ n<\omegamega\} \cup \{d(f(x),\curly{L}mbda_i x)\geq \epsilon \ : \ i=1,\ldots,m\}.$$ By saturation, there is $n<\omegamega$ such that, setting $K:=\omegaperatorname{span}(a_1,\ldots,a_n)$, we have $d(f(x),\curly{L}mbda_ix)<\epsilon$ for all $x\in B_1(H^*)\cap K^\perp$. \end{proof} How does Corollary \ref{affine} relate to functions definable in the many-sorted language for Hilbert spaces considered in \cite{Gold}? In order to elucidate this, we first clarify how the syntax of continuous logic works in the case that the predicates take values in intervals other than $[0,1]$. (This is omitted in the survey \cite{BBHU} and was communicated to me by Ward Henson.) Let $\curly{L}'$ be a many-sorted (continuous) signature with sort set $S$. In particular, one associates to each predicate symbol $P$ of $\curly{L}$ a closed, bounded interval $I_P$ in $\r$. Then one also associates to each formula $\varphi$ a closed, bounded interval $I_\varphi$ in $\r$ as follows: \begin{itemize} \item Given two terms $t_1(\vec x)$ and $t_2(\vec x)$ of arity $(s_1,\ldots,s_n,s_{n+1})$, the formula $\varphi(\vec x)=d(t_1(\vec x),t_2(\vec x))$ is an atomic formula with $I_\varphi:=[0,N]$, where $N$ is the bound on the metric of sort $s_{n+1}$. \item If $P$ is a predicate symbol of arity $(s_1,\ldots,s_n)$ and $t_1(\vec x),\ldots,t_n(\vec x)$ are terms such that $t_i$ takes values in sort $s_i$, then the formula $\varphi(\vec x)=P(t_1(\vec x),\ldots,t_n(\vec x))$ is an atomic formula with $I_\varphi:=I_P$. \item Suppose that $\varphi_1(\vec x),\ldots,\varphi_n(\vec x)$ are formulae with associated intervals $I_{\varphi_1},\ldots,I_{\varphi_n}$. Suppose that $u$ is a continuous function with domain $I_{\varphi_1}\times \cdots \times I_{\varphi_n}$ and range $I$, a closed, bounded interval in $\r$. Then $\varphi(\vec x)=u(\varphi_1(\vec x),\ldots,\varphi_n(\vec x))$ is a formula with $I_\varphi:=I$. \item If $\varphi$ is a formula with associated interval $I_\varphi$, then $\psi=\sup_x \varphi$ is a formula with $I_\psi:=I_\varphi$. Similarly for $\inf_x\varphi$. \end{itemize} For an interval $I=[a,b]\subseteq \r$ with $a<b$, define $u_I:I\to [0,1]$ by $u_I(x):=\frac{1}{b-a}(x-a)$. Note that $u_I$ is a homeomorphism with inverse $u_I^{-1}(x)=a+(b-a)x$. We let $\curly{L}_{\mathcal{M}s}$ denotes the many-sorted theory of Hilbert spaces used in \cite{Gold}. \begin{lemma} For any quantifier-free $\curly{L}_{\mathcal{M}s}$-formula $\varphi(\vec x)$, where $\vec x$ is a tuple of variables of sort $B_1(H)$, there is a quantifier-free $\curly{L}$-formula $\psi(\vec x)$ with $I_{\psi}=[0,1]$ such that $$H\mathcal{M}odels \sup_{\vec x}|u_{I_\varphi}(\varphi(\vec x))-\psi(\vec x)|=0.$$ In particular, when $I_\varphi=[0,1]$, we have $H\mathcal{M}odels \sup_x |\varphi(\vec x)-\psi(\vec x)|=0$. \end{lemma} \begin{proof} The proof goes by induction on the complexity of $\varphi$, the main work taking place in the case when $\varphi$ is atomic, which involves a painful case distinction. Let us illustrate the idea by considering terms $t_i(x,y)=\curly{L}mbda_ix+\mathcal{M}u_iy$ ($i=1,2$) where $|\curly{L}mbda_i|,|\mathcal{M}u_i|\leq n$. (In the general situation, terms can be much more complicated due to the number of variables and the inclusion maps.) First suppose that $\varphi(x,y)=d(t_1(x,y),t_2(x,y))$. Since each $t_i$ takes values in $B_{2n}$, we have $I_{\varphi}=[0,4n]$. Then $I_{\varphi}(\varphi(x,y))=\frac{1}{4n}d(t_1(x,y),t_2(x,y))$. Let $\psi(x,y)=\|\frac{\curly{L}mbda_1-\curly{L}mbda_2}{4n}x+\frac{\mathcal{M}u_1-\mathcal{M}u_2}{4n}y\|$. Since $|\frac{\curly{L}mbda_1-\curly{L}mbda_2}{4n}|+|\frac{\mathcal{M}u_1-\mathcal{M}u_2}{4n}|\leq 1$, we have that $\psi$ is an $\curly{L}$-formula with $I_{\psi}=[0,1]$. Clearly $\psi$ is as desired. Now suppose that $\varphi(x,y)=\curly{L}ngle t_1(x,y),t_2(x,y)\rangle$. Now $I_{\varphi}=[-4n^2,4n^2]$, so $u_{I_{\varphi}}(\varphi(x,y))=\frac{1}{8n^2}(\curly{L}ngle t_1(x,y),t_2(x,y)\rangle +4n^2)$. This time, let $$\psi(x,y)=\frac{1}{2}\curly{L}ngle \frac{\curly{L}mbda_1}{2n}x+\frac{\mathcal{M}u_1}{2n}y,\frac{\curly{L}mbda_2}{2n}+\frac{\mathcal{M}u_2}{2n}y\rangle+\frac{1}{2}.$$ It is easily verified that this $\psi$ is as desired. For the induction step, suppose that $\varphi=u(\varphi_1,\ldots,\varphi_n)$, where $$u:I_{\varphi_1}\times \cdots \times I_{\varphi_n}\to I_{\varphi}$$ is a surjective continuous function. By the induction hypothesis, there are $\curly{L}$-formulae $\psi_i(x)$ ($i=1,\ldots,n$) with each $I_{\psi_i}=[0,1]$ such that $H\mathcal{M}odels \sup_{\vec x}|u_{I_{\varphi_i}}(\varphi_i(\vec x))-\psi_i(\vec x)|=0$. Consider the $\curly{L}$-formula $$\psi(x)=u_{I_\varphi}(u(u_{I_{\varphi_1}}^{-1}(\psi_1(\vec x)),\ldots,u_{I_{\varphi_n}}^{-1}(\psi_n(\vec x)))).$$ It is clear that $H\mathcal{M}odels \sup_x|u_{\varphi}(\varphi(\vec x))-\psi(\vec x))|=0$. \end{proof} \begin{cor} If $P:B_1(H)^n\to [0,1]$ is a uniformly continuous function, then $P$ is an $\curly{L}$-definable predicate if and only if $P$ is an $\curly{L}_{\omegaperatorname{ms}}$-definable predicate \end{cor} \begin{proof} This follows from the preceding corollary and the fact that the $\curly{L}_{ms}$-theory of $H$ admits quantifier-elimination. \end{proof} \begin{cor} Suppose that $f:H\to H$ is an $\curly{L}_{\omegaperatorname{ms}}$-definable function such that $f(B_1(H))\subseteq B_1(H)$. Then $f|B_1(H)$ is an $\curly{L}$-definable function. \end{cor} \noindent The definition of an $\curly{L}_{\omegaperatorname{ms}}$-definable function is given in \cite{Gold}. \begin{rmk} It follows from the preceding corollary and Corollary \ref{affine} that for any $\curly{L}_{\omegaperatorname{ms}}$-definable function $f:H\to H$, any $n\geq 1$, and any $\epsilon >0$, there are scalars $\curly{L}mbda_1,\ldots,\curly{L}mbda_k$ and vectors $v_1,\ldots,v_k \in B_{m(n,f)}(H)$ such that, for all $x\in B_n(H)$, there is $i\in \{1,\ldots,k\}$ with $d(f(x),\curly{L}mbda_i x+v_i)\leq \epsilon$. Using the main result of \cite{Gold}, we can give a different proof of this fact in the case that $f$ is linear. Indeed, write $f=\curly{L}mbda I+K$, where $K$ is a compact operator. Let $\{v_1,\ldots,v_k\}$ be a finite $\epsilon$-net for $K(B_n(H))$. Then for $a\in B_1(H)$, we have $d(K(a),v_i)\leq \epsilon$ for some $i\in \{1,\ldots,k\}$, whence $d(f(a),\curly{L}mbda a+v_i)\leq \epsilon$. (Notice here that $\curly{L}mbda_i=\curly{L}mbda$ for all $i$.) \end{rmk} \ We now suppose that $\mathcal{M}athbb{K}=\c$ and set $\mathcal{M}athbb{S}^1:=\{\curly{L}mbda\in \c \ : \ |\curly{L}mbda|=1\}$. We let $\curly{L}_U:=\curly{L}\cup \{U,U^{-1}\}$, where $U$ and $U^{-1}$ are both unary function symbols. We let $T_U^\forall$ denote the $\curly{L}$-theory obtained from $T$ by adding (universal) axioms saying that $U$ is linear, preserves the inner product, and $U$ and $U^{-1}$ are inverses. ($T_U$ axiomatizes the theory of an infinite-dimensional Hilbert space equipped with a unitary operator; one adds a symbol for $U^{-1}$ so as to avoid the $\forall \exists$ axiom stating that $U$ is onto.) We add to $T^\forall_U$ the following axioms: $$\inf_x[|\curly{L}ngle x,x\rangle -1|\dotplus d(Ux,\sigma x)|]=0,$$ where $\sigma$ ranges over a countable dense subset of $\mathcal{M}athbb{S}^1$. (These axioms assert that the spectrum of $U$ is $\mathcal{M}athbb{S}^1$.) Then $T_U$ is complete and admits quantifier elimination (see \cite{BUZ}); $T_U$ is the theory of infinite-dimensional Hilbert spaces equipped with a generic automorphism. Since $T_U$ is primitive, we can once again apply Proposition \ref{deffunc}. \begin{lemma} If $t(x)$ is an $\curly{L}_U(H)$-term, then there are $l,m\in\mathcal{M}athbb{Z}$, $l\leq m$, $\alpha_l,\ldots,\alpha_m\in \mathcal{M}athbb{D}$ and a vector $v\in B_1(H)$ such that, for all $a\in B_1(H)$, we have $$t(a)=v+\sum_{j=l}^m\alpha_jU^j(a).$$ \end{lemma} \begin{proof} This is proved by induction on the complexity of $t(x)$ exactly as in Lemma \ref{term}. \end{proof} Suppose that $(H^*,U^*)$ is an elementary extension of $(H,U)$. \begin{cor} Suppose that $f:H\to H$ is an $\curly{L}_U$-definable function and $\epsilon>0$. Then there are $l,m\in \mathcal{M}athbb{Z}$, $l\leq m$, $\curly{L}mbda^1_l,\ldots,\curly{L}mbda^1_m,\ldots,\curly{L}mbda^k_l,\ldots,\curly{L}mbda^k_m\in \mathcal{M}athbb{D}$, and $v_1,\ldots,v_k\in B_1(H)$, such that, for all $a\in B_1(H^*)$, there is $i\in \{1,\ldots,k\}$ such that $$d(f(x),v_i+\sum_{j=l}^m \alpha^i_jU^j(x))<\epsilon.$$ \end{cor} One can generalize this situation as follows: Let $G$ be a countable (discrete group) and let $\curly{L}_G$ be the language for Hilbert spaces as above augmented by unary function symbols $\tau_g$ for $g\in G$. Let $T_G$ be the universal $\curly{L}_G$-theory of a unitary representation of $G$ on an infinite-dimensional Hilbert space. (As above, the axiom $\sup_xd((\tau_g(\tau_{g^{-1}}(x)),x)=0$ allows us to assert that $\tau_g$ is onto without using a $\forall\exists$ axiom.) Let $\pi:G\to U(H)$ be a unitary representation of $G$ on an (infinite-dimensional) Hilbert space $H$ such that $(H,\pi)$ is an existentially closed model of $T_G$ (such an existentially closed model exists because $T_G$ is an inductive theory). Let $\Sigma$ be the set of existential consequences of $(H,\pi)$. Then it is shown in \cite{Ber} that $T_{GA}:=T_G\cup \Sigma$ axiomatizes the class of existentially closed models of $T_G$, whence is the model companion of $T_G$. Moreover, since $T_G$ has the amalgamation property (see \cite{Ber}), it follows that $T_{GA}$ admits quantifier elimination. As above, one can show that any $\curly{L}_G$ term $t(x)$ has the form $v+\sum_{i=1}^n\curly{L}mbda_i g_ix$ for some $v\in B_1(H)$, some $\curly{L}mbda_1,\ldots,\curly{L}mbda_n\in \d$, and some $g_1,\ldots,g_n\in G$. (Here we abuse notation and write $gx$ instead of $\tau_g(x)$.) Consequently, we have: \begin{cor} Let $(H,\pi)$ be any model of $T_{GA}$ and let $f:H\to H$ be an $\curly{L}_G$-definable function. Then, for any $\epsilon>0$, there are $v_1,\ldots,v_k\in B_1(H)$, scalars $\curly{L}mbda^1_1,\ldots,\curly{L}mbda^1_m,\ldots,\curly{L}mbda^k_1,\ldots, \curly{L}mbda^k_m\in \d$, and group elements $g_1,\ldots,g_k\in G$ such that, for all $a\in B_1(H^*)$, there is $i\in \{1,\ldots,k\}$ such that $$d(f(a),v_i+\sum_{j=1}^m\curly{L}mbda^i_jg_j a)<\epsilon.$$ \end{cor} There is yet another expansion of Hilbert spaces that fits into this context. Let $\curly{L}_P:=\curly{L}\cup \{P\}$, where $P$ is a new unary predicate symbol. We consider the theory $T_P$ obtained from the theory of infinite-dimensional Hilbert spaces obtained by adding the following axioms (the latter two are axiom schemes, including one such axiom for every $n\geq 1$): \begin{itemize} \item $P$ is linear; \item $\sup_x d(P^2(x),P(x))=0$; \item $\sup_{x,y}|\curly{L}ngle P(x),y\rangle-\curly{L}ngle x,P(y)\rangle|=0$; \item $\inf_{v_1}\cdots \inf_{v_n} \mathcal{M}ax (\mathcal{M}ax_{i,j}|\curly{L}ngle v_i,v_j\rangle\mathbin{\mathpalette\dotminussym{}} \delta_{ij}|,\mathcal{M}ax_i d(P(v_i),v_i)))=0$; \item $\inf_{v_1}\cdots \inf_{v_n} \mathcal{M}ax (\mathcal{M}ax_{i,j}|\curly{L}ngle v_i,v_j\rangle\mathbin{\mathpalette\dotminussym{}} \delta_{ij}|,\mathcal{M}ax_i d(P(v_i),0)))=0$. \end{itemize} The first three axioms say that $P$ is a projection operator on $H$ and the latter two axiom schemes say that $P(H)$ and $P(H)^\perp$ are infinite-dimensional. Then $T_P$ is a complete theory with quantifier elimination (\cite{BV}); in fact, it is the theory of beautiful pairs of Hilbert spaces and its unique separable model is the Fraisse limit of the family of finite-dimensional Hilbert spaces equipped with projection operators. Since $T_P$ is a primitive theory with quantifier elimination, we may use Proposition \ref{deffunc}. Let $(H,P)$ be a model of $T_P$. Then in $(H,P)$, all $\curly{L}$-terms $t(x)$ are easily seen to equivalent to terms be of the form $\alpha x+\beta P(x)+v$, where $\alpha,\beta\in \d$ and $v\in B_1(H)$. Thus: \begin{prop} Let $f:B_1(H)\to B_1(H)$ be an $\curly{L}_P$-definable function. Then for any $\epsilon>0$, there are $v_1,\ldots,v_k\in B_1(H)$ and $\alpha_1,\ldots,\alpha_k,\beta_1,\ldots,\beta_k\in \d$ such that, for all $a\in B_1(H)$, there is $i\in\{1,\ldots,k\}$ such that $$d(f(a),\alpha_ia+\beta_iP(a)+v_i)<\epsilon.$$ Consequently, for any elementary extension $(H^*,P^*)$ of $(H,P)$ and any $a\in B_1(H^*)$, there are $\alpha,\beta\in \d$ and $v\in B_1(H)$ such that $f(a)=\alpha a+\beta P^*(a)+v$. \end{prop} \subsection{Absolutely ubiquitous structures} \ \ \noindent A source of primitive theories in classical logic comes from the notion of an \emph{absolutely ubiquitous} structure. Suppose that $\curly{L}$ is a finite first-order signature and $\mathcal{M}$ is a countable $\curly{L}$-structure. Recall that $\mathcal{M}$ is said to be \emph{locally finite} if every finitely generated substructure of $\mathcal{M}$ is finite and $\mathcal{M}$ is said to be \emph{uniformly locally finite} if there is a function $g:\n^{>0}\to\n^{>0}$ such that, for all $A\subseteq M$, if $|A|\leq n$, then $|\curly{L}ngle A\rangle|\leq g(n)$, where $\curly{L}ngle A\rangle$ denotes the substructure of $\mathcal{M}$ generated by $A$. Also recall that the \emph{age of $\mathcal{M}$}, denoted $\curly{A}ge(\mathcal{M})$, is the set of isomorphism classes of finitely generated substructures of $\mathcal{M}$. Finally, we say that $\mathcal{M}$ is absolutely ubiquitous if: \begin{enumerate} \item $\mathcal{M}$ is uniformly locally finite, and \item whenever $\mathcal{N}$ is a countable, locally finite $\curly{L}$-structure with $\curly{A}ge(\mathcal{M})=\curly{A}ge(\mathcal{N})$, then $\mathcal{M}\cong \mathcal{N}$. \end{enumerate} It follows immediately from the definition that if $\mathcal{M}$ is an absolutely ubiquitous $\curly{L}$-structure and $T:=\omegaperatorname{Th}(\mathcal{M})$, then $T$ is primitive and $\aleph_0$-categorical, whence model-complete (see also Lemma 2.1 of \cite{M2}). Consequently, if $T$ has quantifier elimination, then $T$ meets the hypothesis of Proposition \ref{deffunc}. It is interesting to ask when an absolutely ubiquitous structure has quantifier elimination? Note that an absolutely ubiquitous structure admits quantifier elimination if and only if it is ultrahomogeneous. Thus, we can use the classifications of absolutely ubiquitous graphs \cite{M} and ultrahomogeneous countable graphs \cite{LW} to see that there are only two situations when a countable ultrahomogeneous graph is absolutely ubiquitous: \begin{itemize} \item a disjoint union of finitely many copies of the complete graph on $\aleph_0$ many vertices; \item a $k$-partite graph, where each part is of size $\aleph_0$. \end{itemize} It follows from Proposition \ref{deffunc} that if $G$ is such a graph and $f:G^n\to G$ is a definable function, then there are vertices $g_1,\ldots,g_k\in G$ so that, for any $\vec a\in G^n$, we have $f(a)=a_i$ for some $i$ or $f(a)=g_j$ for some $j$. It is interesting to note that in the case of absolutely ubiquitous structures in finite \emph{relational} signatures, we can always expand the language to ensure that we have quantifier elimination while maintaining absolute ubiquity. To see this, suppose that $\mathcal{M}$ is an $\curly{L}$-structure, where $\curly{L}$ is a finite relational (classical) signature. We say that $\mathcal{M}$ is \emph{finitely partitioned} if there is a finite partition $X_1,\ldots,X_n$ of $M$ such that $\Sym(X_1)\times\cdots \times \Sym(X_n)$ is a subgroup of $\curly{A}ut(\mathcal{M})$. The main result of \cite{HM} states that $\mathcal{M}$ is absolutely ubiquitous if and only if $\mathcal{M}$ is finitely partitioned. Suppose now that $\mathcal{M}$ is absolutely ubiquitous. Let $X_1,\ldots,X_n$ be a finite partition of $\mathcal{M}$ witnessing that $\mathcal{M}$ is finitely partitioned. Consider the signature $\curly{L}':=\curly{L}\cup\{R_1,\ldots,R_n\}$, where $R_1,\ldots,R_n$ are new unary function symbols, and consider the expansion $\mathcal{M}':=(\mathcal{M};X_1,\ldots,X_n)$ of $\mathcal{M}$ to an $\curly{L}'$-structure. Then $X_1,\ldots,X_n$ witness that $\mathcal{M}'$ is finitely partitioned, whence $\mathcal{M}'$ is absolutely ubiquitous. However, we now have: \begin{lemma} $\mathcal{M}'$ is ultrahomogeneous, whence $\omegaperatorname{Th}(\mathcal{M}')$ admits quantifier elimination. \end{lemma} \begin{proof} Suppose that $A,B\subseteq M$ are finite and $f:A\to B$ is a partial automorphism of $\mathcal{M}'$. Then for any $i\in \{1,\ldots,n\}$, $f(A\cap X_i)\subseteq X_i$. Extend $f$ to $\tilde{f}:M\to M$ so that $\tilde{f}|X_i\in \omegaperatorname{Sym}(X_i)$ for each $i\in\{1,\ldots,n\}$. Then by assumption, $\tilde{f}\in \curly{A}ut(\mathcal{M}')$. \end{proof} \begin{cor} Given any definable (in $\mathcal{M}'$) function $f:M^n\to M$, there are elements $b_1,\ldots,b_m\in M$ so that, for all $\vec a\in M^n$, we have either $f(\vec a)=a_i$ for some $i\in \{1,\ldots,n\}$ or $f(\vec a)=b_j$ for some $j\in \{1,\ldots,m\}$. \end{cor} What about when the language has function symbols? Here is an example from \cite{V}: Let $\mathcal{M}:=(\n^n,E_1,\ldots,E_n,f)$, where $E_i$ is the binary relation on $\n^n$ given by $E_i(\vec a,\vec b)\Leftrightarrow a_i=b_i$, and $f$ is the $n$-ary function on $\n^n$ given by $f(\vec a_1,\ldots, \vec a_n)=(a_{11},\ldots,a_{nn})$. It is argued in \cite{V} that $\mathcal{M}$ is an absolutely ubiquitous structure with quantifier elimination. It is shown in \cite{M2} that if $G$ is an absolutely ubiquitous group (considered as a structure in the pure group language), then $G$ has a characteristic subgroup $A$ of finite index such that $A$ is a finite direct product of elementary abelian groups of infinite rank. Conversely, if $ G$ is a countable group with a characteristic subgroup $A$ of index $m<\infty$ which is a finite direct product of elementary abelian groups of infinite rank such that either $G=A\times F$ for some group $F$ of cardinality $m$ or $m$ is relatively prime to the orders of elements of $A$, then $G$ is absolutely ubiquitous. If the absolutely ubiquitous group $G$ admits quantifier elimination, then given any definable function $f:G^n\to G$, there is a tuple $\vec b$ from $G$ and words $w_1(\vec x,\vec b),\ldots,w_k(\vec x,\vec b)$, such that, for all $\vec a\in G^n$, there is $i\in \{1,\ldots,k\}$ such that $f(\vec a)=w_i(\vec a,\vec b)$. The question remains: which absolutely ubiquitous groups admit quantifier elimination? It is easy to see that if $G$ itself is a finite direct product of elementary abelian groups of infinite rank, then $G$ is ultrahomogeneous, so admits quantifier elimination. More generally: \begin{prop} If $G=A\times F$, where $A$ is a finite direct product of elementary abelian groups of infinite rank, $F$ is a finite ultrahomogeneous group, and $\omegaperatorname{gcd}(|a|,|b|)=1$ for all $a\in A$ and $b\in F$, then $G$ is ultrahomogeneous. \end{prop} \begin{proof} Suppose that $\phi:B\to C$ is an isomorphism, where $B$ and $C$ are finite subgroups of $G$. Let $A_1,F_1$ denote the projections of $B$ onto $A$ and $F$ respectively; note that $A_1$ and $F_1$ are finite subgroups of $A$ and $F$ respectively. Next note that, for each $a\in A_1$, we have that $(a,1)\in B$. Indeed, if $(a,b)\in B$, then choosing $n\in \n$ such that $|b|$ divides $n$ and $n\equiv 1\mathcal{M}od |a|$, we see that $(a,1)=(a,b)^n\in B$. Likewise, for every $b\in F_1$, we have $(1,b)\in B$. Now observe that, for all $(a,1)\in B$, there is $a'\in A$ such that $\phi(a,1)=(a',1)$. Indeed, writing $\phi(a,1)=(a',b)$, we have $(1,1)=\phi(a,1)^{|a|}=(1,b^{|a|})$, whence $b=1$. Similarly, for every $b\in F$, there is $b'\in F$ such that $\phi(1,b)=(1,b')$. We can thus define $\phi':A_1\to A$ by $\phi'(a)=a'$, where $\phi(a,1)=(a',1)$; note that $\phi'$ is an isomorphism between finite subgroups of $A$, so can be lifted to an automorphism $\tilde{\phi}':A\to A$. Likewise, one obtains a partial automorphism $\phi'':F_1\to F$ that can be lifted to an automorphism $\tilde{\phi}'':F\to F$. Finally, $\tilde{\phi}:G\to G$ defined by $\tilde{\phi}(a,b)=(\tilde{\phi}'(a),\tilde{\phi''}(b))$ is an automorphism of $G$ extending $\phi$ \end{proof} \begin{rmk} The ultrahomogeneous finite groups are characterized in \cite{CF}. \end{rmk} \begin{question} Given an absolutely ubiquitous group $G$, is there an extension $\curly{L}'$ of the language of groups by relation symbols and an expansion $\mathcal{M}athcal{G}$ of $G$ to an $\curly{L}'$-structure so that $\mathcal{M}athcal{G}$ admits quantifier elimination and is still absolutely ubiquitous (or at least has a primitive theory)? If the answer to this question is positive, then definable functions in absolutely ubiquitous groups are piecewise given by words as mentioned above. \end{question} \end{document}
\begin{document} \title{The Scenario Approach for Stochastic Model Predictive Control with Bounds on Closed-Loop Constraint Violations hanks{This manuscript is the preprint of a paper submitted to Automatica and it is subject to Elsevier copyright. Elsevier maintains the sole rights of distribution or publication of the work in all forms and media. If accepted, the copy of record will be available at { t http://www.journals.elsevier.com/automatica/} \begin{abstract} Many practical applications of control require that constraints on the inputs and states of the system be respected, while optimizing some performance criterion. In the presence of model uncertainties or disturbances, for many control applications it suffices to keep the state constraints at least for a prescribed share of the time, as \eg in building climate control or load mitigation for wind turbines. For such systems, a new control method of Scenario-Based Model Predictive Control (SCMPC) is presented in this paper. It optimizes the control inputs over a finite horizon, subject to robust constraint satisfaction under a finite number of random scenarios of the uncertainty and/or disturbances. While previous approaches have shown to be conservative (\ie to stay far below the specified rate of constraint violations), the new method is the first to account for the special structure of the MPC problem in order to significantly reduce the number of scenarios. In combination with a new framework for interpreting the probabilistic constraints as average-in-time, rather than pointwise-in-time, the conservatism is eliminated. The presented method retains the essential advantages of SCMPC, namely the reduced computational complexity and the handling of arbitrary probability distributions. It also allows for adopting sample-and-remove strategies, in order to trade performance against computational complexity. \end{abstract} \section{Introduction}\label{Sec:Intro} Model Predictive Control (MPC) is a powerful approach for handling multi-variable control problems with constraints on the states and inputs. Its feedback control law can also incorporate feedforward information, \eg about the future course of references and/or disturbances, and the optimization of a performance criterion of interest. Over the past two decades, the theory of linear and robust MPC has matured considerably \cite{Mayne:2000}. There are also widespread practical applications in diverse fields \cite{QinBadg:2003}. Yet many potentials of MPC are still not fully uncovered. One active line of research is Stochastic MPC (SMPC), where the system dynamics are of a stochastic nature. They may be affected by additive disturbances \cite{BatinaEtAl:2002,CannonEtAl:2011,ChatEtAl:2011,CinqEtAl:2011,KouvEtAl:2010,LiEtAl:2002}, by random uncertainty in the system matrices \cite{CannonEtAl:2009a}, or both \cite{CannonEtAl:2009b,MunozEtAl:2005,PrimSung:2009,SchwNik:1999}. In this framework, a common objective is to minimize a cost function, while the system state is subject to chance constraints, \ie constraints that have to be satisfied only with a given probability. Stochastic systems with chance constraints arise naturally in some applications, such as building climate control \cite{OldeEtAl:2012}, wind turbine control \cite{CannonEtAl:2009b}, or network traffic control \cite{YanBit:2005}. Alternatively, they can be considered as relaxations of robust control problems, in which the robust satisfaction of state constraints can be traded for an improved cost performance. A major challenge in SMPC is the solution to chance-constrained finite-horizon optimal control problems (FHOCPs) in each sample time step. These correspond to non-convex stochastic programs, for which finding an exact solution is computationally intractable, except for very special cases \cite{KallMay:2011,Shapiro:2009}. Moreover, due to the multi-stage nature of these problems, it generally involves the computation of multi-variate convolution integrals \cite{CannonEtAl:2011}. In order to obtain a tractable solution, various sample-based approximation approaches have been considered, \eg \cite{Batina:2004,BlackEtAl:2010,SkafBoyd:2009}. They share the significant advantage of coping with generic probability distributions, as long as a sufficient number of random samples (or `scenarios') can be obtained. The open-loop control laws can be approximated by sums of basis functions, as in the Q-design procedure proposed by \cite{SkafBoyd:2009}. However, these early approaches of Scenario-Based MPC (SCMPC) remain computationally demanding \cite{Batina:2004} and/or of a heuristic nature, \ie without specific guarantees on the satisfaction of the chance constraints \cite{BlackEtAl:2010,SkafBoyd:2009}. More recent approaches \cite{CalFag:2013a,CalFag:2013b,Matusko:2012,PrandEtAl:2012,Schildi:2012,VayaEtAl:2012} are based on advances in the field of scenario-based optimization. However, these approaches share the drawback of being \emph{conservative} when applied in a receding horizon fashion, \ie the focus is either on obtaining a robust solution \cite{CalFag:2013a,CalFag:2013b,VayaEtAl:2012} or the chance constraints are over-satisfied by the closed loop system \cite{Matusko:2012,PrandEtAl:2012,Schildi:2012}. This conservatism of SCMPC represents a major practical issue, that is resolved by the contributions of this paper. In contrast to the previous results, the novel approach interprets the chance constraints as a time average, rather than pointwise-in-time with a high confidence, which is much less restrictive. Furthermore, the sample size is reduced by exploiting the structural properties of the finite-horizon optimal control problem \cite{SchildEtAl:2014}. The approach also allows for the presence of multiple simultaneous chance constraints on the state, and an a-posteriori removal of adverse samples for improving the controller performance \cite{Matusko:2012}. In the most general setting, this paper considers linear systems with stochastic additive disturbances and uncertainty in the system matrices, which may only be known through a sufficient number of random samples. The computational complexity can be traded against performance of the controller by removing samples a-posteriori, starting from a simple convex linear or quadratic program and converging to the optimal SMPC solution in the limit. The paper is organized as follows: Section \ref{Sec:ProbStat} presents a rigorous formulation of the optimal control problem that one would like to solve; Section \ref{Sec:SCMPC} describes how an approximated solution is obtained by SCMPC; Section \ref{Sec:Samples} develops the theoretical details, including the technical background and closed-loop properties; Section \ref{Sec:Exam} demonstrates the application of the method to a numerical example; and Section \ref{Sec:Conc} presents the main conclusions. \section{Optimal Control Problem}\label{Sec:ProbStat} Consider a discrete-time control system with a linear stochastic transition map \begin{equation}\label{Equ:DynSystem} x_{t+1}=A(\de_{t})x_{t}+B(\de_{t})u_{t}+w(\de_{t})\ec\quad x_{0}=\bar{x}_{0}\ec \end{equation} for some fixed initial condition $\bar{x}_{0}\in\BRn$. The \emph{system matrix} $A(\de_{t})\in\BR^{n\times n}$ and the \emph{input matrix} $B(\de_{t})\in\BR^{n\times m}$ as well as the additive disturbance $w(\de_{t})\in\BRn$ are random, as they are (known) functions of a primal uncertainty $\de_{t}$. For notational simplicity, $\de_{t}$ comprises all uncertain influences on the system at time $t$. \begin{assumption}[Uncertainty]\label{Ass:Uncertainty} (a) The uncertainties $\{\de_{0},\de_{1},...\}$, are independent and identically distributed (i.i.d.) random variables on a probability space $(\Delta,\Pb)$. (b) A `sufficient number' of \iid samples from $\de_{t}$ can be obtained, either empirically or by a random number generator. \end{assumption} The support set $\Delta$ of $\de_{t}$ and the probability measure $\Pb$ on $\Delta$ are entirely generic. In fact, $\Delta$ and $\Pb$ need not be known explicitly. The `sufficient number' of samples, which is required instead, will become concrete in later sections of the paper. Note that any issues arising from the definition of a $\sigma$-algebra on $(\Delta,\Pb)$ are glossed over in this paper, as they are unnecessarily technical. Instead, every relevant subset of $\Delta$ is assumed to be measurable. The system \eqref{Equ:DynSystem} can be controlled by inputs $\{u_{0},u_{1},...\}$, to be chosen from a set of feasible inputs $\BU\subset\BRm$. Since the future evolution of the system \eqref{Equ:DynSystem} is uncertain, it is generally impractical to indicate all future inputs explicitly. Instead, each $u_{t}$ should be determined by a static feedback law \begin{equation*} \psi:\BRn\to\BU\qquad\text{with}\qquad u_{t}=\psi(x_{t})\ec \end{equation*} based only on the current state of the system. The optimal state feedback law $\psi$ should be determined in order to minimize the time-average of expected stage costs $\ell:\BRn\times\BRm\to\BR_{0+}$, \begin{equation}\label{Equ:AvgCost} \frac{1}{T}\sum_{t=0}^{T-1}\E\bigl[\ell\bigl(x_{t},u_{t}\bigr)\bigr]\ef \end{equation} Each stage cost is taken in expectation $\E\bigl[\,\cdot\,\bigr]$, since its arguments $x_{t}$ and $u_{t}$ are random variables, being functions of $\{\de_{0},...,\de_{t-1}\}$. The time horizon $T$ is considered to be very large, yet it may not be precisely known at the point of the controller design. The minimization of the cost is subject to keeping the state inside a state constraint set $\BX$ for a given fraction of all time steps. For many applications, the robust satisfaction of the state constraint (\ie $x_{t}\in\BX$ at all times $t$) is too restrictive for the choice of $\psi$, and results in a poor performance in terms of the cost function. This is especially true in cases where the lowest values of the cost function are achieved close to the boundary of $\BX$. Moreover, it may be impossible to enforce if the support of $w(\de_{t})$ is unknown and possibly unbounded. In order to make this more precise, let $M_{t}:=\If_{\BX\co}(x_{t+1})$ denote the random variable indicating that $x_{t+1}\notin\BX$, \ie $\If_{\BX\co}:\BRn\to\{0,1\}$ is the indicator function on the complement $\BX\co$ of $\BX$. The expected time-average of constraint violations should be upper bounded by some $\ep\in(0,0.5)$, \begin{equation}\label{Equ:AvgViol} \E\bigl[\frac{1}{T}\sum_{t=0}^{T-1}M_{t}\bigr]\leq\ep\ef \end{equation} \begin{assumption}[Control Problem]\label{Ass:Control} (a) The state of the system can be measured at each time step $t$. (b) The set of \emph{feasible inputs} $\BU$ is bounded and convex. (c) The \emph{state constrained set} $\BX$ is convex. (d) The stage cost $\ell(\cdot,\cdot)$ is a convex function. \end{assumption} Assumption \ref{Ass:Control}(b) holds for most practical applications, and very large artificial bounds can always be introduced for input channels without natural bounds. Typical choices for the stage cost $\ell$ include \begin{subequations}\label{Equ:StageCost}\begin{align} &\ell(\xi,\upsilon):=\bigl\|Q_\ell\xi\bigr\|_{1}+\bigl\|R_\ell\upsilon\bigr\|_{1}\ec\\ \text{or}\quad &\ell(\xi,\upsilon):=\bigl\|Q_\ell\xi\bigr\|_\infty+\bigl\|R_\ell\upsilon\bigr\|_\infty\ec\\ \text{or}\quad &\ell(\xi,\upsilon):=\bigl\|Q_\ell\xi\bigr\|_{2}^{2}+\bigl\|R_\ell\upsilon\bigr\|_{2}^{2}\ec \end{align}\end{subequations} where $Q_\ell\in\BR^{n\times n}$ and $R_\ell\in\BR^{m\times m}$ are positive semi-definite weighting matrices. Typical choices for the constraints $\BU$ and $\BX$ are polytopic or ellipsoidal sets. Combining the previous discussions, the \emph{optimal control problem (OCP)} can be stated as follows: \begin{subequations}\label{Equ:OCP} \begin{align} \min_{\psi}\quad&\frac{1}{T}\sum_{t=0}^{T-1}\E\bigl[\ell\bigl(x_{t},u_{t}\bigr)\bigr]\ec\\ \st\quad& x_{t+1}=A(\de_{t})x_{t}+B(\de_{t})u_{t}+w(\de_{t})\ec\enspace x_{0}=\bar{x}_{0} \quad \fa t=0,...,T-1\ec\\ \pst\quad&\E\bigl[\frac{1}{T}\sum_{t=0}^{T-1}\If_{\BX\co}(x_{t})\bigr]\leq\ep\ec\\ \pst\quad&\hspace*{0.1cm}u_{t}=\psi(x_{t})\quad\fa t=0,...,T-1\ef\hspace*{1.0cm} \end{align} \end{subequations} The equality constraints (\ref{Equ:OCP}b) are understood to be substituted recursively to eliminate all state variables $x_{0},x_{1},...,x_{T-1}$ from the problem. Thus only the state feedback law $\psi$ remains as a free variable in \eqref{Equ:OCP}. \begin{remark}[Alternative Formulations]\label{Rem:Formulation} (a) Instead of the sum of expected values, the cost function (\ref{Equ:OCP}a) can also be defined as a desired quantile of the sum of discounted stage costs. Then the problem formulation corresponds to a minimization of the `value-at-risk', see \eg \cite{Shapiro:2009}. (b) Multiple chance constraints on the state $\BX_{j}$, each with an individual probability level $\ep_{j}$, can be included without further complications. A single chance constraint is considered here for notational simplicity. \end{remark} Many practical control problems can be cast in the general form of \eqref{Equ:OCP}. For example in building climate control \cite{OldeEtAl:2012}, the energy consumption of a building should be minimized, while its internal climate is subject to uncertain weather conditions and the occupancy of the building. The comfort range for the room temperatures may occasionally be violated without major harm to the system. Another example is wind turbine control \cite{CannonEtAl:2009b}, where the power efficiency of a wind turbine should be maximized, while its dynamics are subject to uncertain wind conditions. High stress levels in the blades must not occur too often, in order to achieve a desired fatigue life of the turbine. \section{Scenario-Based Model Predictive Control}\label{Sec:SCMPC} The OCP is generally intractable, as it involves an infinite-dimensional decision variable $\psi$ (the state feedback law) and a large number of constraints (growing with $T$). Therefore it is common to approximate it by various approaches, such as \emph{Model Predictive Control (MPC)}. \subsection{Stochastic Model Predictive Control (SMPC)} The basic concept of MPC is to solve a tractable counterpart of \eqref{Equ:OCP} over a small horizon $N$ repeatedly at each time step. Only the first input of this solution is applied to the system \eqref{Equ:DynSystem}. In Stochastic MPC (SMPC), a \emph{Finite Horizon Optimal Control Problem (FHOCP)} is formulated by introducing chance constraints on the state: \begin{subequations}\label{Equ:FHOCP} \begin{align} \min_{u_{0|t},...,u_{N-1|t}}\quad &\sum_{t=0}^{N-1} \E\bigl[\ell\bigl(x_{i|t},u_{i|t}\bigr)\bigr]\ec\\ \st\quad & x_{i+1|t}=A(\de_{t+i})x_{i|t}+B(\de_{t+i})u_{i|t}+w(\de_{t+i})\ec\,\, x_{0|t}=x_{t}\quad\fa i=0,...,N-1\ec\\ \pst\quad&\hspace*{0.24cm}\Pb\bigl[x_{i+1|t}\notin\BX\bigr]\leq\ep_{i}\quad\fa i=0,...,N-1\ec\\ \pst\quad&\hspace*{0.32cm}u_{i|t}\in\BU\quad\fa i=0,...,N-1\ef \end{align} \end{subequations} Here $x_{i|t}$ and $u_{i|t}$ denote predictions and plans of the state and input variables made at time $t$, for $i$ steps into the future. The current measured state $x_{t}$ is introduced as an initial condition for the dynamics. The predicted states $x_{1|t},...,x_{N|t}$ are understood to be eliminated by recursive substitution of (\ref{Equ:FHOCP}b). Note that the predicted states are random by the influence of the uncertainties $\de_{t},...,\de_{t+N-1}$. The \emph{probability levels} $\ep_{i}$ in the \emph{chance constraints} (\ref{Equ:FHOCP}c) usually coincide with $\ep$ from the OCP \cite{CinqEtAl:2011,OldeEtAl:2012,SchwNik:1999}, but they may generally differ \cite{YanBit:2005}. Some formulations also involve chance constraints over the entire horizon \cite{LiEtAl:2002,CannonEtAl:2009b}, or as a combination with robust constraints \cite{KouvEtAl:2010,CannonEtAl:2011}. Other alternatives of SMPC consider integrated chance constraints \cite{ChatEtAl:2011}, or constraints on the expectation of the state \cite{PrimSung:2009}. \begin{remark}[Terminal Cost]\label{Rem:TermCost} An optional (convex) terminal cost $\ell_{f}:\BRn\to\BR_{0+}$ can be included in the FHOCP \cite{Macie:2002,Mayne:2009}. In this case the term \begin{equation*} \E\bigl[\ell_{f}\bigl(x_{N|t}\bigr)\bigr] \end{equation*} would be added to the cost function (\ref{Equ:FHOCP}a). \end{remark} The state feedback law provided by SMPC is given by a receding horizon policy: the current state $x_{t}$ is substituted into (\ref{Equ:FHOCP}b), then the FHOCP is solved for an input sequence $\{u\op_{0|t},...,u\op_{N-1|t}\}$, and the current input is set to $u_{t}:=u\op_{0|t}$. This means that the FHOCP must be solved online at each time step $t$, using the current measurement of the state $x_{t}$. However, the FHOCP is a stochastic program that remains difficult to solve, except for very special cases. In particular, the feasible set described by chance constraints is generally non-convex, despite of the convexity of $\BX$, and hard to determine explicitly. Hence a further approximation shall be made by scenario-based optimization. \subsection{Scenario-Based Model Predictive Control (SCMPC)} The basic idea of Scenario-Based MPC (SCMPC) is to compute an optimal finite-horizon input trajectory $\{u'_{0|t},...,u'_{N-1|t}\}$ that is feasible under $K$ of sampled `scenarios' of the uncertainty. Clearly, the scenario number $K$ has to be selected carefully in order to attain the desired properties of the controller. In this section, the basic setup of SCMPC is discussed, while the selection of a value for $K$ is deferred until Section \ref{Sec:Samples}. More concretely, let $\de^{(1)}_{i|t},...,\de^{(K)}_{i|t}$ be \iid samples of $\de_{t+i}$, drawn at time $t\in\BN$ for the prediction steps $i=0,...,N-1$. For convenience, they are combined into \emph{full-horizon samples} $\om^{(k)}_{t}:=\{\de^{(k)}_{0|t},...,\de^{(k)}_{N-1|t}\}$, also called \emph{scenarios}. The \emph{Finite-Horizon Scenario Program (FHSCP)} then reads as follows: \begin{subequations}\label{Equ:FHSCP} \begin{align} \min_{u_{0|t},...,u_{N-1|t}}\quad & \sum_{k=1}^{K}\sum_{i=0}^{N-1}\ell\bigl(x^{(k)}_{i|t},u_{i|t}\bigr)\ec\\ \st\quad & x^{(k)}_{i+1|t}=A(\de^{(k)}_{i|t})x^{(k)}_{i|t}+B(\de^{(k)}_{i|t})u_{i|t} +w(\de^{(k)}_{i|t})\c\,\, x^{(k)}_{0|t}=x_{t}\quad\fa i=0,...,N-1,\; k=1,...,K,\\ \pst\quad & x^{(k)}_{i+1|t}\in\BX\;\;\fa i=1,...,N-1,\; k=1,...,K,\\ \pst\quad & u_{i|t}\in\BU\;\;\fa i=0,...,N-1\ef \end{align}\end{subequations} The dynamics (\ref{Equ:FHSCP}b) provide $K$ different state trajectories over the prediction horizon, each corresponding to one sequence of affine transition maps defined by a particular scenario $\om^{(k)}_{t}$. Note that these $K$ state trajectories are not fixed, as they are still subject to the inputs $u_{0|t},...,u_{N-1|t}$. The cost function (\ref{Equ:FHSCP}a) approximates (\ref{Equ:FHOCP}a) as an average over all $K$ scenarios. The state constraints (\ref{Equ:FHSCP}c) are required to hold for $K$ sampled state trajectories over the prediction horizon. Applying a receding horizon policy, the SCMPC feedback law is defined as follows (see also Figure \ref{Fig:Algorithm}, for $R=0$). At each time step $t\in\BN$ the current state measurement $x_{t}$ is substituted into (\ref{Equ:FHSCP}b), and the current input $u_{t}:=u'_{0|t}$ is set to the first of the optimal FHSCP solution $\{u'_{0|t},...,u'_{N-1|t}\}$, which is called the \emph{scenario solution}. Unlike many MPC approaches, SCMPC does not have an inherent guarantee of \emph{recursive feasibility}, in the sense of \cite[Sec.\,4]{Mayne:2000}. Hence for a proper analysis of the closed-loop system, the following is assumed. \begin{assumption}[Resolvability]\label{Ass:Resolvability} Under the SCMPC regime, each FHSCP admits a feasible solution at every time step $t$ almost surely. \end{assumption} While Assumption \ref{Ass:Resolvability} appears to be restrictive from a theoretical point of view, it is often reasonable from a practical point of view. For some applications, such as buildings \cite{OldeEtAl:2012}, recursive feasibility may hold by intuition, or it may be ensured by the use of \emph{soft constraints} \cite[Sec.\,2]{QinBadg:2003}. All in all, MPC remains a useful tool in practice, even for difficult stochastic systems \eqref{Equ:DynSystem} without the possibility of an explicit guarantee of recursive feasibility. The following are possible alternatives and also convex formulations of \eqref{Equ:FHSCP}. The reasoning in each case is based on the theory in \cite{SchildEtAl:2014} and omitted for brevity. \begin{remark}[Alternative Formulations] (a) Instead of the average cost in (\ref{Equ:FHSCP}a), the minimization may concern the cost of a nominal trajectory, as \eg in \cite{Schildi:2012,PrandEtAl:2012}; or the average may be taken over any sample size other than $K$. (b) The inclusion of additional chance constraints into \eqref{Equ:FHSCP}, as mentioned in Remark \ref{Rem:Formulation}(b), is straightforward. The number of scenarios $K_{j}$ may generally differ between multiple chance constraints. (c) In case of a value-at-risk formulation, as in Remark \ref{Rem:Formulation}(a), the average cost in (\ref{Equ:FHSCP}a) is replaced by the maximum: \vspace*{-0.3cm} \begin{equation*} \text{``}\sum_{k=1}^{K}\text{''}\qquad\longrightarrow\qquad \text{``}\max_{k=1,...,K}\text{''}\ec\vspace*{-0.3cm} \end{equation*} where the sample size $K$ must be selected according to the desired risk level. \end{remark} \begin{remark}[Control Parameterization]\label{Rem:ContrParam} In the FHSCP, the predicted control inputs $u_{0|t},...,u_{N-1|t}$ may also be parameterized as a weighted sum of basis functions of the uncertainty, as proposed in \cite{SkafBoyd:2009, VayaEtAl:2012}. In particular, let $e_{1},...,e_{m}$ be the $J_{0}:=m$ unit vectors in $\BRm$, and for each time step $i=1,...,N$ let $q^{(j)}_{i|t}:\Delta^{i-1}\to\BRm$ be a finite set $j\in\{1,...,J_{i}\}$ of pre-selected basis functions. Then \begin{align*} &u_{0|t} := \sum_{j=1}^{J_{0}}\phi_{0}^{(j)}b_{j}\,,\\ &u_{i|t} := \sum_{j=1}^{J_{i}}\phi_{i}^{(j)} q^{(j)}_{i|t}\bigl(\de^{(k)}_{0|t},...,\de^{(k)}_{i-1|t}\bigr)\quad\fa i=1,...,N-1\,, \end{align*} can be substituted into problem \eqref{Equ:FHSCP}, so that the weights $\phi_{i}^{(j)}\in\BR$ for $i=0,...,N-1$ and $j=1,...,J_{i}$ become the new decision variables. \end{remark} A control parameterization with an increasing number of basis functions $J_{1},...,J_{N-1}$ generally improves the quality of the SCMPC feedback, while increasing the number of decision variables and hence the computational complexity; see \cite{SkafBoyd:2009,VayaEtAl:2012} for more details. Given the sampled scenarios, \eqref{Equ:FHSCP} is a convex optimization program for which efficient solution algorithms exist, depending on its structure \cite{BoydVan:2004}. In particular, if $\BX$ and $\BU$ are polytopic (respectively ellipsoidal) sets, then the FHSCP has linear (second-order cone) constraints. If the stage cost is either (\ref{Equ:StageCost}a,b), then the FHSCP has a reformulation with a linear objective function, using auxiliary variables. If the stage cost is (\ref{Equ:StageCost}c), then the FHSCP can be expressed as a quadratic program. More details on these formulation procedures are found in \cite[pp.\,154\,f.]{Macie:2002}. \subsection{A-Posteriori Scenario Removal} A key merit of SCMPC is that it renders the uncertain control system (\ref{Equ:FHOCP}b) into multiple deterministic affine systems (\ref{Equ:FHSCP}b) by substituting particular scenarios. This significantly simplifies the solution to the FHSCP, as compared to the FHOCP. However, by introducing these random scenarios, a randomizing element is added to the SCMPC feedback law. In particular, the closed-loop system may occasionally show an erratic behavior due to highly unlikely outliers in the sampled scenarios. This effect can be mitigated by a-posteriori scenario removal, see \cite{CampGar:2011}. This allows for the \emph{state constraints} (\ref{Equ:FHSCP}c) corresponding to $R>0$ scenarios to be removed \emph{after} the outcomes of all samples have been observed. In exchange, the original sample size $K$ must be (appropriately) increased over its value for $R=0$. Any appropriate combination $(K,R)$ is called a \emph{sample-removal pair}. The choice of appropriate values for $K$ and $R$ is deferred to Section \ref{Sec:Samples}. The selection of removed scenarios is performed by a \emph{(scenario) removal algorithm} \cite[Def.\,2.1]{CampGar:2011}. \begin{definition}[Removal Algorithm]\label{Def:RemAlg} (a) For each $\xi\in\BRn$, the \emph{(scenario) removal algorithm} $\CA_{\xi}:\Delta^{NK}\to \Delta^{N(K-R)}$ is a deterministic function selecting $(K-R)$ out of $K$ scenarios $\{\om^{(1)}_{t},...,\om^{(K)}_{t}\}$. (b) The selected scenarios at time step $t$ shall be denoted by \begin{equation*} \Om_{t}:=\CA_{x_{t}}\bigl(\om^{(1)}_{t},...,\om^{(K)}_{t}\bigr)\ef \end{equation*} \end{definition} Definition \ref{Def:RemAlg} is very general, in the sense that it covers a great variety of possible scenario removal algorithms. However, the most common and practical algorithms are described below: \begin{description} \item[\emph{\textmd{Optimal Removal:}}] The FHSCP is solved for all possible combinations of choosing $R$ out of $K$ scenarios. Then the combination that yields the lowest cost function value of all the solutions is selected. This requires the solution to $K$ choose $R$ instances of the FHSCP, a complexity that is usually prohibitive for larger values of $R$.\vspace*{0.2cm} \item[\emph{\textmd{Greedy Removal:}}] The FHSCP is first solved with all $K$ scenarios. Then, in each of $R$ consecutive steps, the state constraints of a single scenario are removed that yields the biggest improvement, either in the total cost or in the first stage cost. Thus the procedure terminates after solving $KR-R(R-1)/2$ instances of FHSCP.\vspace*{0.2cm} \item[\emph{\textmd{Marginal Removal:}}] The FHSCP is first solved with the state constraints of all $K$ scenarios. Then, in each of $R$ consecutive steps, the state constraints of a single scenario are removed based on the highest Lagrange multiplier. Hence the procedure requires the solution to $K$ instances of FHSCP. \end{description} Figure \ref{Fig:Algorithm} depicts an algorithmic overview of SCMPC, for the general case with scenario removal $R>0$. For the case without scenario removal, consider $R=0$ and the selected scenarios $\Om_{t}:=\{\om^{(1)}_{t},...,\om^{(K)}_{t}\}$. \vspace*{0.3cm} \begin{figure} \caption{Schematic overview of the SCMPC algorithm, for the case with scenario removal ($R>0$) and without scenario removal ($R=0$).\label{Fig:Algorithm} \label{Fig:Algorithm} \end{figure} \section{Problem Structure and Sample Complexity}\label{Sec:Samples} For the SCMPC algorithm described in Section \ref{Sec:SCMPC}, the sample-removal pair $(K,R)$ remains to be specified. Appropriate values for $K$ and $R$ are theoretically derived in this section. Their values generally depend on the control system and the constraints, and $K$ is referred to as the \emph{sample complexity} of the SCMPC problem. For some intuition about this problem, suppose that $R\geq 0$ is fixed and the sample size $K$ is increased. This means that the solution to the FHSCP becomes robust to more scenarios, with the following consequences. First, the average-in-time state constraint violations \eqref{Equ:AvgViol} decrease, in general. Therefore the state constraint will translate into a lower bound on $K$. Second, the computational complexity increases as well as the average-in-time closed-loop cost \eqref{Equ:AvgCost}, in general. Therefore the objective is to choose $K$ as small as possible, and ideally equal to its lower bound. The higher the number of removed constraints $R\geq 0$, the higher will be the lower bound on $K$, in order for the state constraints \eqref{Equ:AvgViol} to be satisfied. Now consider pairs $(R,K)$ of removed constraints $R$ together with their corresponding lower bounds $K$, which equally satisfy the state constraints \eqref{Equ:AvgViol}. For the intuition, suppose $R$ is increased, so $K$ increases as well. Then the computational complexity grows, due to more constraints in the FHSCP and the removal algorithm. At the same time, the solution quality of the FHSCP improves, in general, and hence the average-in-time closed-loop cost \eqref{Equ:AvgCost} decreases. Therefore $R$ is usually fixed to a value that is as high as admitted by the available computational resources. \subsection{Support Rank}\label{Sec:SRank} According to the classic scenario approach \cite{CampGar:2008,CampGar:2011}, the relevant quantity for determining the sample size $K$ for a single chance constraint (with a fixed $R$) is the number of \emph{support constraints} \cite[Def.\,2.1]{CampGar:2008}. In fact, $K$ grows with the (unknown) number of support constraints, so the goal is to obtain a tight upper bound. For the classic scenario approach, this upper bound is given by the dimension of the decision space \cite[Prop.\,2.2]{CampGar:2008}, \ie $Nm$ in the case of the FHSCP. The FHSCP is a multi-stage stochastic program, with multiple chance constraints (namely $N$, one per stage). This requires an extension to the classic scenario approach; the reader is referred to \cite{SchildEtAl:2014} for more details. Now each chance constraint contributes an individual number of support constraints, to which an upper bound must be obtained. These individual upper bounds are provided by the \emph{support rank} of each chance constraint \cite[Def.\,3.6]{SchildEtAl:2014}. \begin{definition}[Support Rank]\label{Def:SuppRank} (a) The \emph{unconstrained subspace} $\CL_{i}$ of a constraint $i\in\{0,...,N-1\}$ in (\ref{Equ:FHSCP}c) is the largest (in the set inclusion sense) linear subspace of the search space $\BR^{Nm}$ that remains unconstrained by all sampled instances of $i$, almost surely. (b) The \emph{support rank} of a constraint $i\in\{0,...,N-1\}$ in (\ref{Equ:FHSCP}c) is \begin{equation*} \rho_{i}:=Nm-\dim\CL_{i}\ec \end{equation*} where $\dim\CL_{i}$ represents the dimension of the unconstrained subspace $\CL_{i}$. \end{definition} Note that the support rank is an inherent property of a particular chance constraint and it is not affected by the simultaneous presence of other constraints. Hence the set of constraints of the FHSCP may change, for instance, due to the reformulations of Remark \ref{Rem:Formulation}. Besides the extension to multiple chance constraints, the support rank has the merit of a significant reduction of the upper bound on the number of support constraints. Indeed, the following two lemmas replace the classic upper bound $Nm$ with much lower values, such as $l\leq n$ or $m$, depending on the problem structure. For systems affected by \emph{additive} disturbances only, the support rank of any state constraint in the FHSCP is given by the support rank $l\leq n$ of $\BX$ in $\BRn$ (\ie the co-dimension of the largest linear subspace that is unconstrained by $\BX$). \begin{lemma}[Pure Additive Disturbances]\label{The:AddDist} Let $l\leq n$ be the support rank of $\BX$ and suppose that $A\bigl(\de^{(k)}_{i|t}\bigr)\equiv A$ and $B\bigl(\de^{(k)}_{i|t}\bigr)\equiv B$ are constant and the control is not parameterized (as in Remark \ref{Rem:ContrParam}). Then the support rank of any state constraint $i\in\{0,...,N-1\}$ in (\ref{Equ:FHSCP}c) is at most $l$. \end{lemma} For systems affected by \emph{additive and multiplicative} disturbances, Lemma \ref{The:AddDist} no longer holds. However, it will be seen that for the desired closed-loop properties, the relevant quantity for selecting the sample size $K$ is the support rank $\rho_{1}$ of the state constraint on $x_{1|t}$ only. For this first predicted step, the support rank is restricted to at most $m$, under both additive and multiplicative disturbances. \begin{lemma}[Additive and Multiplicative Disturbances]\label{The:FirstStep} The support rank $\rho_{1}$ of constraint $i=1$ in (\ref{Equ:FHSCP}c) is at most $m$. \end{lemma} For the sake of readability, the proofs of Lemmas \ref{The:AddDist} and \ref{The:FirstStep} are deferred to Appendix \ref{Sec:LemProof}. They effectively decouple the support rank, and hence the sample size $K$, from the horizon length $N$. Note that the result of Lemma \ref{The:FirstStep} holds also for the parameterized control laws of Remark \ref{Rem:ContrParam}. In this case, it decouples the sample size $K$ from the number of basis functions $J_{i}$ for all stages $i=1,...,N-1$. Tighter bounds of $\rho_{1}$ than those in Lemmas \ref{The:AddDist} and \ref{The:FirstStep} may exist, resulting from a special structure of the system \eqref{Equ:DynSystem} and/or the state constraint set $\BX$. The basic insights to exploit this can be found in the Appendix \ref{Sec:LemProof} and \cite{SchildEtAl:2014}. \subsection{Sample Complexity}\label{Sec:SampComp} This section describes the selection of the sample-removal pair $(K,R)$, based on a bound of the support rank $\rho_{1}$. Throughout this subsection, the initial state $x_{t}$ is considered to be fixed to an arbitrary value. Let $\Vm_{t}|x_{t}$ denote the \emph{(first step) violation probability}, \ie the probability with which the first predicted state falls outside of $\BX$: \begin{equation}\label{Equ:DefViol} \Vm_{t}|x_{t}:= \Pb\bigl[A(\de_{t})x_{t}+B(\de_{t})u'_{0|t}+w(\de_{t})\notin\BX\,\big|\,x_{t}\bigr]\ef \end{equation} Recall that $u'_{0|t}$ denotes the first input of the scenario solution $\{u'_{0|t},...,u'_{N-1|t}\}$. Clearly, $u'_{0|t}$ and $\Vm_{t}|x_{t}$ depend on the scenarios $\Om_{t}$ that are substituted into the FHSCP at time $t$. The notation $u'_{0|t}(\Om_{t})$ and $\Vm_{t}|x_{t}(\Om_{t})$ shall be used occasionally to emphasize this fact. The violation probability $\Vm_{t}|x_{t}(\Om_{t})$ can be considered as a random variable on the probability space $(\Delta^{KN},\Pb^{KN})$, with support in $[0,1]$. Here $\Delta^{KN}$ and $\Pb^{KN}$ denote the $KN$-th product of the set $\Delta$ and the measure $\Pb$, respectively. For distinction, the expectation operator on $(\Delta,\Pb)$ is denoted $\E$, and that on $(\Delta^{KN},\Pb^{KN})$ is denoted $\E^{KN}$. The distribution of $\Vm_{t}|x_{t}(\Om_{t})$ is unknown, being a complicated function of the entire control problem \eqref{Equ:FHOCP} and the removal algorithm $\CA_{x_{t}}$. However, it is possible to derive the following upper bound on this distribution. \begin{lemma}[Upper Bound on Distribution]\label{The:DistrBound} Let Assumptions \ref{Ass:Uncertainty}, \ref{Ass:Control}, \ref{Ass:Resolvability} hold and $x_{t}\in\BRn$ be an arbitrary initial state. For any violation level $\nu\in[0,1]$, \begin{subequations} \begin{equation}\label{Equ:DistrBound} \Pb^{KN}\bigl[\Vm_{t}|x_{t}(\Om_{t})>\nu\bigr]\leq U_{K,R,\rho_{1}}(\nu)\:, \hspace*{2.03cm} \end{equation} \begin{equation} U_{K,R,\rho_{1}}(\nu):= \min\Bigl\{1,{R+\rho_{1}-1\choose R}\B\bigl(\nu;K,R+\rho_{1}-1\bigl)\Bigr\}\:, \end{equation} \end{subequations} where $\B(\,\cdot\,;\,\cdot\,,\,\cdot\,)$ represents the beta distribution function \cite[frm.\,26.5.3,\,26.5.7]{Abramowitz:1970}, \begin{equation*} \B\bigl(\nu;K,R+\rho_{1}-1\bigl):= \sum_{j=0}^{R+\rho_{1}-1}{K\choose j}\nu^{j}(1-\nu)^{K-j}\:. \end{equation*} \end{lemma} \begin{proof} The proof is a straightforward extension of \cite[Thm.\,6.7]{SchildEtAl:2014}, where the bound on $\Vm_{t}|x_{t}(\Om_{t})$ is saturated at $1$. \end{proof} This paper exploits the result of Lemma \ref{The:DistrBound} to obtain an upper bound on the expectation \begin{equation} \E^{KN}\bigl[\Vm_{t}\,\big|\,x_{t}\bigr]:=\int_{\Delta^{KN}}\Vm_{t}|x_{t}(\Om_{t})\dm\Pb^{KN}\ef \end{equation} A reformulation via the indicator function $\If:\Delta^{KN}\to\{0,1\}$ yields that \begin{align}\label{Equ:NumInt} \E^{KN}\bigl[\Vm_{t}\,\big|\,x_{t}\bigr] &=\int_{[0,1]}\int_{\Delta^{KN}}\If\bigl(\Vm_{t}|x_{t}(\Om_{t})>\nu\bigr)\dm\Pb^{KN}d\nu \nonumber\\ &=\int_{[0,1]}\Pb^{KN}\bigl[\Vm_{t}|x_{t}(\Om_{t})>\nu\bigr]\dm\nu\nonumber\\ &\leq\int_{[0,1]}U_{K,R,\rho_{1}}(\nu)\dm\nu\ef \end{align} \begin{definition}[Admissible Sample-Removal Pair]\label{Def:AdmPair} A sample-removal pair $(K,R)$ is \emph{admissible} if its substitution into \eqref{Equ:NumInt} yields $\E^{KN}\bigl[\Vm_{t}\,\big|\,x_{t}\bigr]\leq\ep$. \end{definition} Whether a given sample-removal pair $(K,R)$ is admissible can be tested by performing the one-dimensional numerical integration \eqref{Equ:NumInt}. It can easily be seen that the integral value \eqref{Equ:NumInt} monotonically decreases with $K$ and monotonically increases with $R$. Hence, if either $K$ or $R$ is fixed, an admissible sample-removal pair $(K,R)$ can be determined \eg by a bisection method. Moreover, if $R$ is fixed, there always exist $K$ large enough to generate an admissible pair $(K,R)$. \begin{remark}[No Scenario Removal]\label{Rem:NoRemoval} If $R=0$, the integration \eqref{Equ:NumInt} can be replaced by the exact analytic formula \begin{equation}\label{Equ:SimpNumInt} \E^{KN}\bigl[\Vm_{t}\,\big|\,x_{t}\bigr]\leq\frac{\rho_{1}}{K+1}\ef \end{equation} \end{remark} Figure \ref{Fig:ViolProb} illustrates the monotonic relationship of the upper bound \eqref{Equ:NumInt} in $K$ and $R$. Supposing that $R=0,30,100$ is fixed, the corresponding admissible pair $(K,R)$ can be found by moving along the graphs until the desired violation level $\ep$ is reached. The solid and the dashed line correspond to different support dimensions $\rho_{1}=2$ and $\rho_{1}=5$. \begin{figure} \caption{Upper bound on the expected violation probability $\E^{KN} \label{Fig:ViolProb} \end{figure} \subsection{Closed-Loop Properties} This section analyzes the closed-loop properties of the control system under the SCMPC law for an admissible sample-removal pair $(K,R)$. To this end, the underlying stochastic process is first described. Recall that \begin{itemize} \item $x_{0},...,x_{T-1}$ is the closed-loop trajectory, where $x_{t}$ depends on all past uncertainties $\de_{0},...,\de_{t-1}$ as well as all past scenarios $\Om_{0},...,\Om_{t-1}$; \item $\Vm_{0},...,\Vm_{T-1}$ are the violation probabilities, where $\Vm_{t}$ depends on $x_{t}$ and $\Om_{t}$, and hence on $\Om_{0},...,\Om_{t}$ and $\de_{0},...,\de_{t-1}$; \item $M_{0},...,M_{T-1}$ indicate the actual violation of the constraints, where $M_{t}$ depends on $x_{t+1}$, and hence on $\Om_{0},...,\Om_{t}$ and $\de_{0},...,\de_{t}$. \end{itemize} At each time step $t$, there are a total of $D:=(KN+1)$ random variables, namely the scenarios together with the disturbance $\{\de_{t},\Om_{t}\}\in\Delta^{(KN+1)}=\Delta^{D}$. In order to simplify notations, define \begin{equation*} \CF_{t}:=\{\de_{0},\Om_{0},...,\de_{t},\Om_{t}\}\in\Delta^{(t+1)D}\ec \end{equation*} for any $t\in\{0,...,T-1\}$. These auxiliary variables allow for the random variables $x_{t}(\CF_{t-1})$, $\Vm_{t}(\CF_{t-1},\Om_{t})$, $M_{t}(\CF_{t})$ to be expressed in terms of their elementary uncertainties. Moreover, let $\Pb^{(t+1)D}$ denote the probability measure and $\E^{(t+1)D}$ the expectation operator on $\Delta^{(t+1)D}$, for any $t\in\{0,...,T-1\}$. Observe that $M_{t}\in\{0,1\}$ is a Bernoulli random variable with (random) parameter $\Vm_{t}$, because \begin{align}\label{Equ:BinVar} \E\bigl[M_{t}\,\big|\,\CF_{t-1},\Om_{t}\bigr] &=\int_{\Delta}M_{t}(\CF_{t})\dm\Pb(\de_{t})\nonumber\\ &=\Vm_{t}(\CF_{t-1},\Om_{t}) \end{align} for any values of $\CF_{t-1},\Om_{t}$. \begin{theorem}\label{The:ConvExp} Let Assumptions \ref{Ass:Uncertainty}, \ref{Ass:Control}, \ref{Ass:Resolvability} hold and $(K,R)$ be an admissible sample-removal pair. Then the expected time-average of closed-loop constraint violations \eqref{Equ:AvgViol} remains below the specified level $\ep$, \vspace*{-0.3cm} \begin{equation}\label{Equ:AsymExp} \E^{TD}\bigl[\frac{1}{T}\sum_{t=0}^{T-1}M_{t}\bigr]\leq\ep\ef\vspace*{-0.3cm} \end{equation} for any $T\in\BN$. \end{theorem} \begin{proof} By linearity of the expectation operator, \begin{align*} &\E^{TD}\bigl[\frac{1}{T}\bigl(M_{0}+M_{1}+...+M_{T-1}\bigr)\bigr]\\ &\,\,=\frac{1}{T}\bigl(\E^{D}\bigl[M_{0}\bigr]+\E^{2D}\bigl[M_{1}\bigr]+...+\E^{TD}\bigl[M_{T-1}\bigr]\bigr)\nonumber\\ &\,\,=\frac{1}{T}\bigl(\E^{D-1}\bigl[\Vm_{0}\bigr]+\E^{2D-1}\bigl[\Vm_{1}\bigr]+...+\E^{TD-1}\bigl[\Vm_{T-1}\bigr]\bigr)\,, \end{align*} by virtue of \eqref{Equ:BinVar}. Moreover, for any $t\in\{0,...,T-1\}$, \begin{equation*} \E^{(t+1)D-1}\bigl[\Vm_{t}\bigr] =\int_{\Delta^{tD}}\underbrace{\E^{D-1}\bigl[\Vm_{t}\,\big|\,\CF_{t-1}\bigr]}_{\leq\ep} \dm\Pb^{tD}\leq\ep\,, \end{equation*} where the integrand is pointwise upper bounded by $\ep$ because $(K,R)$ is an admissible sample-removal pair. \end{proof} Theorem \ref{The:ConvExp} shows that the chance constraints of the OCP can be expected to be satisfied over any finite time horizon $T$. The next Lemma \ref{The:AsymProb} sets the stage for an even stronger result, Theorem \ref{The:ConvSure}, showing that the chance constraint are satisfied almost surely as $T\to\infty$. \begin{lemma}\label{The:AsymProb} If Assumptions \ref{Ass:Uncertainty}, \ref{Ass:Control}, \ref{Ass:Resolvability} hold, then \vspace*{-0.3cm} \begin{equation}\label{Equ:AsymProb} \lim_{T\to\infty}\frac{1}{T}\sum_{t=0}^{T-1} \Bigl(M_{t}-\E^{D-1}\bigl[\Vm_{t}\big|\CF_{t-1}\bigr]\Bigr)=0\vspace*{-0.3cm} \end{equation} almost surely. \end{lemma} \begin{proof} For any $t\in\BN$, define $Z_{t}:=M_{t}-\E^{D-1}\bigl[\Vm_{t}\big|\CF_{t-1}\bigr]$ and observe that \begin{align}\label{Equ:AsymProb1} &\E^{D}\bigl[Z_{t}\big|\CF_{t-1}\bigr]\\ &\,\,\,=\E^{D}\bigl[M_{t}\big|\CF_{t-1}\bigr]-\E^{D}\bigl[\E^{D-1}\bigl[\Vm_{t}\big| \CF_{t-1}\bigr]\big|\CF_{t-1}\bigr]\nonumber\\ &\,\,\,=\E^{D}\bigl[M_{t}\big|\CF_{t-1}\bigr]-\E^{D-1}\bigl[\Vm_{t}\big|\CF_{t-1}\bigr] \nonumber\\ &\,\,\,=0\ec \end{align} by virtue of \eqref{Equ:BinVar}. In probabilistic terms, this says that $\{Z_{t}\}_{t\in\BN}$ is a sequence of martingale differences. Moreover, \begin{equation}\label{Equ:AsymProb2} \sum_{t=0}^{\infty}\frac{1}{(t+1)^2}\E^{D}\bigl[Z_{t}^{2}\big|\CF_{t-1}\bigr]<\infty \end{equation} almost surely, because $|Z_{t}|\leq 1$ is bounded for $t\in\BN$. Therefore \cite[Thm.\,2.17]{HallHeyde:1980} can be applied, which yields that \begin{equation}\label{Equ:AsymProb2} \sum_{t=0}^{T-1}\frac{1}{t+1}Z_{t} \end{equation} converges almost surely as $T\to\infty$. The result \eqref{Equ:AsymProb} now follows by use of Kronecker's Lemma, \cite[p.\,31]{HallHeyde:1980}. \end{proof} Note that Lemma \ref{The:AsymProb} does not imply that \begin{equation}\label{Equ:AsymProb3} \lim_{T\to\infty}\frac{1}{T}\sum_{t=0}^{T-1}M_{t}= \lim_{T\to\infty}\frac{1}{T}\sum_{t=0}^{T-1}\E^{D-1}\bigl[\Vm_{t}\big|\CF_{t-1}\bigr] \end{equation} almost surely, because it is not clear that the right-hand side converges almost surely. However, if it converges almost surely, then \eqref{Equ:AsymProb3} holds. \begin{theorem}\label{The:ConvSure} Let Assumptions \ref{Ass:Uncertainty}, \ref{Ass:Control}, \ref{Ass:Resolvability} hold and $(K,R)$ be an admissible sample-removal pair. Then \vspace*{-0.3cm} \begin{equation}\label{Equ:ConvSure} \underset{T\to\infty}{\lims}\,\frac{1}{T}\sum_{t=0}^{T-1}M_{t}\leq\ep\vspace*{-0.3cm} \end{equation} almost surely. \end{theorem} \begin{proof} From Lemma \ref{The:AsymProb}, \begin{align}\label{Equ:ConvSure1} 0=&\lim_{T\to\infty}\frac{1}{T}\sum_{t=0}^{T-1} \Bigl(M_{t}-\E^{D-1}\bigl[\Vm_{t}\big|\CF_{t-1}\bigr]\Bigr)\nonumber\\ \geq&\underset{T\to\infty}{\lims}\,\frac{1}{T}\sum_{t=0}^{T-1}\bigl(M_t-\ep\bigr)\nonumber\\ =&\underset{T\to\infty}{\lims}\,\frac{1}{T}\sum_{t=0}^{T-1}M_{t}-\ep \end{align} almost surely, where the second line follows from Definition \ref{Def:AdmPair}. \end{proof} \section{Numerical Example}\label{Sec:Exam} \subsection{System Data} Consider the stochastic linear system \begin{equation*}\label{Equ:ExSystem} x_{t+1}= \begin{bmatrix} 0.7 &-0.1(2+\theta_t) \\ -0.1(3+2\theta_t) &0.9 \end{bmatrix} x_{t}+ \begin{bmatrix} 1 &0 \\ 0 &1 \end{bmatrix} u_{t}+ \begin{bmatrix} w_{t}^{(1)}\\ w_{t}^{(2)} \end{bmatrix}, \end{equation*} where $x_{0}=[1\; 1]\tp$. Here $\theta_{t}\sim\CU\bigl([0,1]\bigr)$ is uniformly distributed on the interval $[0,1]$ and $w_{t}^{(1)},w_{t}^{(2)}\sim\CN(0,0.1)$ are normally distributed with mean $0$ and variance $0.1$. The inputs are confined to \begin{equation*} \BU:=\bigl\{\upsilon\in\BR^{2}\,\big|\,|\upsilon^{(1)}|\leq 5\,\wedge\,|\upsilon^{(2)}|\leq 5\bigr\}, \end{equation*} and two state constraints are considered: \begin{equation*} \BX_{1}:=\bigl\{\xi\in\BR^{2}\,\big|\,\xi^{(1)}\geq 1\bigr\}\ec\enspace \BX_{2}:=\bigl\{\xi\in\BR^{2}\,\big|\,\xi^{(2)}\geq 1\bigr\}, \end{equation*} either individually or in combination $\BX:=\BX_{1}\cap\BX_{2}$. The stage cost function is chosen to be of the quadratic form (\ref{Equ:StageCost}c), with the weights $Q_\ell:=I$ and $R_\ell:=I$. The MPC horizon is set to $N:=5$. \subsection{Joint Chance Constraint} The support rank of the joint chance constraint $\BX$ is bounded by $\rho_{1}=2$. Figure \ref{Fig:PhaseSingle} depicts a phase plot of the closed-loop system trajectory, for two admissible sample-removal pairs (a) $(19,0)$ and (b) $(1295,100)$, corresponding to $\ep=10\%$. Instances in which the state trajectory leaves $\BX$ are indicated in red. Note that the distributions are centered around a similar mean in both cases, however the case $R=0$ features stronger outliers than $R=100$. \begin{figure} \caption{Phase plot of closed-loop system trajectory (red: violating states; black: other states). The axis lines mark the boundary of the feasible set $\BX$.\label{Fig:PhaseSingle} \label{Fig:PhaseSingle} \end{figure} Table \ref{Tab:Joint} shows the empirical results of a simulation of the closed-loop system over $T=10,000$ time steps. Note that there is essentially no conservatism in the case of no removals ($R=0$). Some minor conservatism is present for small removal sizes, disappearing asymptotically as $R\to\infty$. At the same time, the reduction of the average closed-loop cost $\ell_{\textrm{avg}}$ is minor for this example, while the standard deviation $\ell_{\textrm{std}}$ is affected significantly. \renewcommand\arraystretch{1.5} \begin{table}[H] \begin{center} \begin{tabular}{c||rrrr} $\ep=10\%$ & $R=0$ & $R=50$ & $R=100$ & $R=500$\\ \hline\hline K & $19$ & $702$ & $1,295$ & $5,723$ \\ $V_{\textrm{avg}}$ & $9.87\%$ & $7.37\%$ & $8.06\%$ & $8.74\%$ \\ $\ell_{\textrm{avg}}$ & 3.78 & 3.75 & 3.72 & 3.68 \\ $\ell_{\textrm{std}}$ & 0.54 & 0.44 & 0.42 & 0.37 \end{tabular} \end{center} \vspace*{0.1cm} \caption{Joint chance constraint: closed-loop results for mean violations $V_{\textrm{avg}}$, mean stage cost $\ell_{\textrm{avg}}$, and standard deviation of stage costs $\ell_{\textrm{std}}$.\label{Tab:Joint}} \end{table} \renewcommand\arraystretch{1.0} To highlight the impact of the presented SCMPC approach, the results of Table \ref{Tab:Joint} can be compared to those of previous SCMPC approaches \cite{Schildi:2012,CalFag:2013a}. The sample size is $19$ (compared to about $400$), and the empirical share of constraint violations in closed-loop is $9.87\%$ (compared to about $0.05\%$). These figures become even worse when longer horizons are considered; \eg for $N=20$, previous approaches require about 900 samples and yield about $0.2\%$ violations. \subsection{Individual Chance Constraints} For the same example, the two chance constraints $\BX_{1}$ and $\BX_{2}$ are now considered separately, with the individual probability levels $\ep_{1}=5\%$ and $\ep_{2}=10\%$. Each support rank is bounded by $\rho_{1}=1$. Figure \ref{Fig:PhaseDouble} depicts a phase plot of the closed-loop system trajectory, for the admissible sample-removal pairs (a) $(19,0)$, $(9,0)$ and (b) $(2020,100)$, $(1010,100)$. \begin{figure} \caption{Phase plot of closed-loop system trajectory (blue, red, purple: violating states of $\BX_1$, $\BX_2$, $\BX_1$ and $\BX_2$; black: other states). The axis lines mark the boundaries of the feasible sets $\BX_1$ and $\BX_2$, respectively.\label{Fig:PhaseDouble} \label{Fig:PhaseDouble} \end{figure} Table \ref{Tab:Ind} shows the empirical results of a simulation of the closed-loop system over $T=10,000$ time steps. Note that there is very little conservatism in all cases. As in the previous example, the reduction of the average closed-loop cost $\ell_{\textrm{avg}}$ is minor, while the standard deviation $\ell_{\textrm{std}}$ is affected significantly. \renewcommand\arraystretch{1.5} \begin{table}[H] \begin{center} \begin{tabular}{c||rrr} $\ep_{1}=5\%,$ & $R_1=R_2$ & $R_1=R_2$ & $R_1=R_2$ \\ $\ep_{2}=10\%$ & $=0$ & $=50$ & $=100$ \\ \hline\hline $K_1$ & $19$ & $1,020$ & $2,020$ \\ $K_2$ & $9$ & $510$ & $1,010$ \\ $V_{\textrm{avg},1}$ & $5.14\%$ & $4.84\%$ & $4.95\%$ \\ $V_{\textrm{avg},2}$ & $9.94\%$ & $9.81\%$ & $9.93\%$ \\ $\ell_{\textrm{avg}}$ & 3.67 & 3.62 & 3.51 \\ $\ell_{\textrm{std}}$ & 0.54 & 0.46 & 0.42 \end{tabular} \end{center} \vspace*{0.1cm} \caption{Single chance constraint: closed-loop results for mean violations $V_{\textrm{avg},1}$ and $V_{\textrm{avg},2}$ of $\BX_{1}$ and $\BX_{2}$, mean stage cost $\ell_{\textrm{avg}}$, and standard deviation of stage costs $\ell_{\textrm{std}}$. \label{Tab:Ind}} \end{table} \renewcommand\arraystretch{1.0} \section{Conclusion}\label{Sec:Conc} The paper has presented new results on Scenario-Based Model Predictive Control (SCMPC). By focusing on the average-in-time probability of constraint violations and by exploiting the multi-stage structure of the finite-horizon optimal control problem (FHOCP), the number of scenarios has been greatly reduced compared to previous approaches. Moreover, the possibility to adopt a-posteriori constraint removal strategies is also accommodated. Due to its computational efficiency, the presented approach paves the way for a tractable application of Stochastic Model Predictive Control (SMPC) to large-scale problems with hundreds of decision variables. \begin{appendix} \section{Proof of Lemmas \ref{The:AddDist} and \ref{The:FirstStep}}\label{Sec:LemProof} The particular bounding arguments follow rather easily after some general observations on the support rank. Pick any state constraint $i\in\{1,...,N\}$ from (\ref{Equ:FHSCP}c). Recursively substituting the dynamics (\ref{Equ:FHSCP}b), the constrained state can be expressed as \begin{subequations} \begin{align}\label{Equ:StateCon} & x^{(k)}_{i|t}=\bigl(A^{(k)}_{i|t}\cdot ...\cdot A^{(k)}_{0|t}\bigr)x_{t} +\Ab^{(k)}_{i|t}\Bb^{(k)}_{i|t} \begin{bmatrix} u_{0|t}\\ \vdots\\ u_{N-1|t} \end{bmatrix} +\Ab^{(k)}_{i|t} \begin{bmatrix} w^{(k)}_{0|t}\\ \vdots\\ w^{(k)}_{i-1|t} \end{bmatrix},\\ &\Ab^{(k)}_{i|t}:= \begin{bmatrix} A^{(k)}_{i|t}\cdot ...\cdot A^{(k)}_{1|t}\\ \vdots\\ A^{(k)}_{1|t}\\ \Id \end{bmatrix}\tp,\\ &\Bb^{(k)}_{i|t}:= \begin{bmatrix} B^{(k)}_{0|t} & 0 &\hdots &0 &0 &\hdots &0\\ 0 & B^{(k)}_{1|t} &\hdots &0 &0 &\hdots &0\\ \vdots &\vdots &\ddots &\vdots &0 &\hdots &0\\ 0 & 0 &\hdots &B^{(k)}_{i|t} &0 &\hdots &0\\ \end{bmatrix}, \end{align}\end{subequations} where $I\in\BR^{n\times n}$ denotes the identity matrix, and for any $i=0,...,N-1$ the following abbreviations are used: \begin{equation*}\label{Equ:AbbrVar} A^{(k)}_{i|t}:=A\bigl(\de^{(k)}_{i|t}\bigr),\enspace B^{(k)}_{i|t}:=B\bigl(\de^{(k)}_{i|t}\bigr),\enspace w^{(k)}_{i|t}:=w\bigl(\de^{(k)}_{i|t}\bigr)\ef \end{equation*} Let $l\leq n$ be the support rank of $\BX$, \ie the co-dimension of the largest linear subspace that is unconstrained by $\BX$. Then there exists a projection matrix $P\in\BR^{l\times n}$ such that for each $x\in\BRn$ \begin{equation*} x\in\BX\quad\Longleftrightarrow\quad Px\in P\BX:=\bigl\{P\xi\:\big|\:\xi\in\BX\bigr\}\ef \end{equation*} For example, if the state constraint concerns only the first two elements of the state vector, then $l=2$ and $P\in\BR^{2\times n}$ may contain the first two unit vectors $e_{1},e_{2}\in\BRn$ as its rows. \subsection*{Proof of Lemma \ref{The:AddDist}} If $A\bigl(\de^{(k)}_{i|t}\bigr)\equiv A$ and $B\bigl(\de^{(k)}_{i|t}\bigr)\equiv B$ are constant for all $i\in\{0,...,N-1\}$, then \eqref{Equ:StateCon} reduces to \begin{equation}\label{Equ:AddDist} \underbrace{ \begin{bmatrix} PA^{i-1}B &\hdots & P & 0 &\hdots \end{bmatrix}}_{\rnk(\cdot)\,\leq l} \begin{bmatrix} u_{0|t}\\ \vdots\\ u_{N-1|t} \end{bmatrix} +PA^{i}x_{t}+ \begin{bmatrix} PA^{i-1}B &\hdots & P \end{bmatrix} \begin{bmatrix} w^{(k)}_{0|t}\\ \vdots\\ w^{(k)}_{i-1|t} \end{bmatrix}\in P\BX\ec \end{equation} for any $i\in\{1,...,N\}$. The rank of the first matrix of dimension $l\times Nm$ can be at most $l$, and therefore it has a null space of dimension at least $Nm-l$. The disturbance has no effect on this null space, because it enters only through the third, additive term in \eqref{Equ:AddDist}. Hence this null space is clearly an unconstrained subspace of the constraint and $\rho_{i}\leq l\leq n$ for all $i\in\{1,...,N\}$, proving Lemma \ref{The:AddDist}. \subsection*{Proof of Lemma \ref{The:FirstStep}} Consider the first state constraint $i=1$ of (\ref{Equ:FHSCP}c). Here \eqref{Equ:StateCon} reduces to \begin{equation}\label{Equ:FirstState} \underbrace{ \begin{bmatrix} P\Bb^{(k)}_{0|t} &0 &\hdots &0 \end{bmatrix}}_{\rnk(\cdot)\,\leq m} \begin{bmatrix} u_{0|t}\\ \vdots\\ u_{N-1|t} \end{bmatrix} +PA^{(k)}_{0|t}x_{t}+ Pw^{(k)}_{0|t}\in P\BX\ef \end{equation} The rank of the first matrix can here be at most $m$ for all outcomes of $\Bb^{(k)}_{0|t}$, because the last $(N-1)m$ variables in the decision vector are always in its null space. Hence $\rho_{1}\leq m$ in all cases, proving Lemma \ref{The:FirstStep}. \subsection*{Parameterized Control Laws} For the case of parameterized control laws as in Remark \ref{Rem:ContrParam}, it will be shown that the argument of Lemma \ref{The:FirstStep} continues to apply. Define for any $i=1,...,N-1$ \begin{align*} &Q_{0|t}:=\Id\ec\qquad &\Phi_{0|t}:=\phi_{0|t}\ec\quad\,\,\,\,\\ &Q_{i|t}^{(k)}:= \underbrace{ \begin{bmatrix} q_{i|t}^{(1)} & q_{i|t}^{(2)} & \hdots & q_{i|t}^{(J_{i})} \end{bmatrix}}_{\in\BR^{m\times J_{i}}}\ec\quad &\Phi_{i|t}:= \underbrace{ \begin{bmatrix} \phi_{i|t}^{(1)}\\ \vdots\\ \phi_{i|t}^{(J_{i})}\\ \end{bmatrix}}_{\in\BR^{J_{i}}}\ec \end{align*} where $q_{i|t}^{(j)}:=q_{i|t}^{(j)}\bigl(\de^{(k)}_{0|t},...,\de^{(k)}_{i|t}\bigr)$ is used as an abbreviation and $\Id\in\BR^{m\times m}$ denotes the identity matrix. Then the vector of control inputs under scenario $k=1,...,K$ can be expressed as the matrix-vector product \begin{equation*} \begin{bmatrix} u_{0|t}\\ u_{1|t}^{(k)}\\ \vdots\\ u_{N-1|t}^{(k)} \end{bmatrix} = \underbrace{ \begin{bmatrix} Q_{0|t} & 0 & \hdots & 0 \\ 0 & Q_{1|t}^{(k)} & \hdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \hdots & Q_{N-1|t}^{(k)} \end{bmatrix}}_{=:\Qb_{t}^{(k)}} \underbrace{ \begin{bmatrix} \Phi_{0|t}\\ \Phi_{1|t} \\ \vdots \\ \Phi_{N-1|t} \end{bmatrix}}_{=:\Phib_{t}}\ef \end{equation*} Substitute this, in place of the original decision vector in \eqref{Equ:FirstState} to see that the same rank argument as before applies. \end{appendix} \end{document}
\begin{document} \markboth{Hannah Larson and Geoffrey Smith} {Congruence properties of Taylor coefficients of modular forms} \title{Congruence properties of Taylor coefficients of modular forms} \author{Hannah Larson} \address{Department of Mathematics, Harvard University, 1 Oxford Street\\ Cambridge, Massachusetts 02138, United States} \email{[email protected]} \author{Geoffrey Smith} \address{Department of Mathematics, Yale University, 10 Hillhouse Avenue\\ New Haven, Connecticut 06511} \email{[email protected]} \maketitle \begin{abstract} In their work, Serre and Swinnerton-Dyer study the congruence properties of the Fourier coefficients of modular forms. We examine similar congruence properties, but for the coefficients of a modified Taylor expansion about a CM point $\tau$. These coefficients can be shown to be the product of a power of a constant transcendental factor and an algebraic integer. In our work, we give conditions on $\tau$ and a prime number $p$ that, if satisfied, imply that $p^m$ divides the algebraic part of all the Taylor coefficients of $f$ of sufficiently high degree. We also give effective bounds on the largest $n$ such that $p^m$ does not divide the algebraic part of the $n^{\text{th}}$ Taylor coefficient of $f$ at $\tau$ that are sharp under certain additional hypotheses. \end{abstract} \section{Introduction and statement of results} Let $f = \sum{a_nq^n}$ be a (holomorphic) modular form of weight $k$ on $\text{SL}_2(\mathbb{Z})$ with integral Fourier coefficients, where $q = e^{2\pi i z}$. It is well known that the derivative of a modular form is not generally a modular form. However, it is possible to define a \emph{non-holomorphic} derivative $\partial$ which preserves modularity but not holomorphicity. Furthermore, this derivative gives rise to a Taylor series expansion, \begin{equation} \label{tayseries} (1-w)^{-k}f\left(\frac{z - \bar{z}w}{1-w}\right) = \sum_{n=0}^{\infty}{(\partial^nf)(z)\frac{(4\pi \text{Im}(z)w)^n}{n!}} \quad (|w| < 1), \end{equation} that converges for $|w| < 1$ and thereby gives a well-defined description of $f$ on the upper half of the complex plane (see, for example, Secion 5.1 of \cite{Z}.) \begin{remark} In this last respect, equation \eqref{tayseries} is a more useful expansion than the standard Taylor series $\sum f^{(n)}(z)\frac{(w-z)^{n}}{n!}$, which only converges in a disk. \end{remark} Congruences of Fourier coefficients have been studied extensively. Ramanujan famously observed that $\sigma_{11}(n) \equiv \tau(n) \pmod {691}$, and since then Deligne and others have constructed a deep theory of congruence properties of Fourier series using Galois representations \cite{L76}, \cite{S72}, \cite{SD73}. In fact, these ideas play a central role in Wiles' proof of Fermat's Last Theorem \cite{W95}. We will instead study the congruence properties of the Taylor coefficients, relying on the theory of differential operators mod $p$ as explored by Swinnerton-Dyer in \cite{SD73}, rather than Galois representations. In general, the Taylor series coefficients are transcendental. However, for a modular form with integral Fourier coefficients and a CM point $\tau$, we can express $(\partial^nf)(\tau)$ as \begin{equation} \label{deft} (\partial^n f)(\tau) = t_f(\tau;n) \Omega_{\tau}^{2n + k}, \end{equation} where $t_f(\tau; n)$ is integral over $\mathbb{Z}_{\frac{1}{6}}:= \mathbb{Z}[\frac{1}{6}]$ and $\Omega_{\tau}$ is a transcendental factor depending only on $\tau$. The choice of $\Omega_\tau$ is not canonical. However, all choices for $\Omega_{\tau}$ are algebraic multiples of the canonical $\Omega_{-d}^*$ which is given by the Chowla-Selberg formula (\cite{Z}, Section 6.3): \begin{equation} \label{chowsel} \Omega_{-d}^* := \frac{1}{\sqrt{2\pi |d|}}\left(\prod_{j = 1}^{|d| - 1}{\Gamma \left(\frac{j}{|d|}\right)^{\chi_{-d}(j)}}\right)^{h(-d)/w(-d)}, \end{equation} where $-d$ is the discriminant of the quadratic extension containing $\tau$, $\Gamma$ is the Gamma-function, $\chi_{-d}(\cdot)=\left(\frac{-d}{\cdot}\right)$ is the Kronecker character of $\mathbb{Q}(\sqrt{-d})$, $h(-d)$ is the class number, and $w(-d)$ is the number of units in the ring of integers $\mathcal{O}_{-d}$. \begin{remark} In general, choosing $\Omega_{-d}^*$ for $\Omega_\tau$ is not ideal for our purposes because $f(\tau)/(\Omega_{-d}^*)^k$ is not in general an algebraic integer. However, since the ring of almost holomorphic modular forms, within which the image of the ring of modular forms under $\partial^n$ is a subspace, is a finitely generated ring, there is some algebraic number $a$ such that we can set $\Omega_\tau=\Omega^*_{-d}/a$. This process can produce infinitely many different $\Omega_\tau$, and our results are true for all of them; however, our results are most interesting for those $\Omega_\tau$ such that the algebraic integer $t_g(\tau;0)$ has zero $p$-adic valuation for some almost holomorphic modular form $g$. We give an example of this at the start of Section 2. \end{remark} In view of \eqref{tayseries} and \eqref{deft}, congruences of the $t_f(\tau; n)$ translate into meaningful statements about the Taylor coefficients. Our first result shows that such Taylor coefficients become increasingly divisible by powers of $p$ for half of the primes $p$. \begin{theorem} \label{weak} Suppose that $f$ is a holomorphic modular form of weight $k$ with integer Fourier coefficients, and suppose $\tau$ is a CM point in $\mathbb{Q}(\sqrt{-d})$. If $p \geq 5$ is a prime such that $\left(\frac{-d}{p}\right) \in \{0, -1\}$, then \[t_f(\tau; n) \equiv 0 \pmod {p^m}\] for all integers $m > 1$ and $n \geq (m-1)p^2$. \end{theorem} It turns out that when $m \leq k - 2$, we have the following better bound. \begin{theorem} \label{sharp} Assume the hypotheses in Theorem \ref{weak}. If $m \leq k -2$ and $p\geq 2k-2$, then \[t_f(\tau; n) \equiv 0 \pmod {p^{m}} \] for all $n \geq \left\lceil \frac{m}{2} \right\rceil p^2$. \end{theorem} We conjecture that some additional hypotheses of Theorem \ref{sharp} are unnecessary. More specifically, we conjecture the following: \begin{conjecture} Suppose that $f$ is a holomorphic modular form of weight $k$ with integer Fourier coefficients, and suppose $\tau$ is a CM point in $\mathbb{Q}(\sqrt{-d})$. If $p$ is a prime satisfying $\left(\frac{-d}{p}\right) \in \{0, -1\}$ and $p\geq k$, then \[t_f(\tau; n) \equiv 0 \pmod {p^{m}}\] for all integers $m > 1$ and $n \geq \left\lceil \frac{m}{2} \right\rceil p^2$. \end{conjecture} \begin{remark} In \cite{DG08}, another work about the $p$-adic behavior of the Taylor coefficients of modular forms, Datskovsky and Guerzhoy give interesting relations between the Taylor series coefficients of a modular form about a CM point $\zeta\in\mathbb{Q}(\sqrt{-d})$ and the coefficients about $\zeta/p$ and $\zeta/p^2$. Their relations, however, require that $\zeta$ be a so-called \emph{suitable point} with respect to $p$, which in turn requires that $\left(\frac{-d}{p}\right)=1$, whereas our results require that $\left(\frac{-d}{p}\right)\neq1$. \end{remark} \begin{example} Consider the Eisenstein series of weight $4$, $E_4$. Using equation \eqref{chowsel}, we find that $\Omega_{-4}^* = 0.590170299508048 \ldots$ and $E_4(i)/(\Omega_{-4}^*)^4 = 12$. The Taylor series expansion is then given by \begin{align*} (1-w)^{-4}E_4\left(\frac{i + iw}{1-w}\right) &= 12(\Omega_{-4}^*)^{4} + 20(\Omega_{-4}^*)^{8}\frac{(4\pi w)^{2}}{2!}+\ldots \\ &\quad + t_{E_{4}}(i;50)(\Omega_{-4}^*)^{104}\frac{(4 \pi w)^{50}}{50!}+\ldots. \end{align*} Computation shows $t_{E_{4}}(i,50)=3^{10} \cdot 5 \cdot 7^4 \cdot 85382194794899 \cdot 2049349304689849$ is a multiple of $7^{2}$ as expected by our conjecture. Also, we have that \[t_{E_4}(i; 170) = \frac{3^{43} \cdot 5 \cdot 7^6 \cdot 11^2 \cdot 31 \cdot 43}{2} \cdot 7713094 \ldots 4732307\] is a multiple of $7^6$, giving an example of the conjecture with $m = 3$ and $p = 7$. It is also divisible by $11^2$, and so is an example of the conjecture with $m = 1$ and $p = 11$. Now consider $p = 13$, a prime which does not satisfy $\left(\frac{-d}{p}\right) \in \{0, -1\}$. We observe that $t_{E_4}(i; 170)$ is not divisible by $13$ even though $170 > 13^2$. \end{example} This paper is organized as follows. In Section 2, we introduce the machinery that will be needed to prove our results. In Section 3, we prove a number of lemmas about differential operators mod $p$ and mod $p^2$. In Section 4, we introduce and prove several properties of a new ``valuation" $v$ that encodes certain useful divisibility properties of a modular form. The key to proving Theorems \ref{weak} and \ref{sharp} depends on the results in Sections 2, 3, and 4 in an indirect way. We accumulate powers of $p$ by keeping careful track of the powers of $E_{p-1}$---the Eisenstein series of weight $p-1$---that factor into $\partial^nf$. Lemma \ref{ZK} is the main device that allows us to translate these factors of $E_{p-1}$ to factors of $p$ dividing the $t_f(\tau; n)$. Section 5 includes this lemma and its proof and concludes with the proof of Theorems \ref{weak} and \ref{sharp}. \section{Preliminaries} The Eisenstein series $E_k$ of weight $k$ are defined by \begin{equation} E_k := 1 - \frac{2k}{B_k}\sum_{n=1}^{\infty}{\sigma_{k-1}(n)q^n}, \end{equation} where $B_k$ is the $k^{\text{th}}$ Bernoulli number and $\sigma_{k-1}$ is the $(k-1)^{\text{th}}$ divisor function. For even $k \geq 4$, the $E_k$ are modular forms of weight $k$. Following Ramanujan, we write $P=E_{2}$, $Q=E_{4}$, and $R=E_{6}$. Note that $P$ is not a modular form, but \begin{equation} P^* := P - \frac{3}{\pi \text{Im}(z)} \end{equation} transforms like a modular form of weight $2$; that is, it satisfies $P^*(-1/z) = z^2P^*(z)$ and $P^*(z + 1) = P^*(z)$. It is well known (see, for instance, \cite {Z} Proposition 4) that $f$ is expressible as a polynomial in $Q$ and $R$ with coefficients in $\mathbb{Q}$. Since $f$ has weight $k$, every term $a_{b, c}Q^bR^c$ of this polynomial will satisfy $k=4b+6c$, which we call the \emph{weight} of the monomial. We then can consider modular forms to be polynomials in $Q$, $R$ all of whose monomials have the same weight. By declaring the weight of $P$ and $P^*$ to be 2, \emph{quasimodular forms} are defined as those holomorphic functions on the upper half plane expressible as polynomials in $P,Q,R$ in which every monomial has the same weight. In addition, the \emph{almost holomorphic modular forms} are defined as those functions expressible as polynomials in $P^*,Q,R$ in which every monomial has the same weight. Now that we have fixed our notation, we give an example of when the canonical transcendental factor $\Omega_{-d}^*$ is not an ideal choice of $\Omega_{\tau}$ and find a suitable algebraic multiple of $\Omega_{-d}^*$. \begin{example} Let $\tau = \frac{1 + \sqrt{-7}}{2}$. Consider when $p = 7$. Since $\frac{P^*(\tau)}{(\Omega_{-7}^*)^2} = \frac{3}{\sqrt{7}}$ is not an algebraic integer, the canonical transcendental factor $\Omega_{-7}^*$ is not an ideal choice of $\Omega_{\tau}$. However, choosing $\Omega_{\tau} = \frac{\Omega_{-7}^*}{7^{1/4}}$ ensures that the $t_f(\tau; n) = (\partial^nf)(\tau)/\Omega_{\tau}^{2n+k}$ are algebraic integers for all $n$ because $\frac{P^*(\tau)}{\Omega_{\tau}^2} = 3$, $\frac{Q(\tau)}{\Omega_{\tau}^4} = 105$ and $\frac{R(\tau)}{\Omega_{\tau}^6} = 1323$ are algebraic---in fact, rational---integers. For example, the Taylor series of the discriminant $\Delta$ at $\tau$ is given by \begin{align*} (1 - w)^{-12}\Delta\left(\frac{\tau - \overline{\tau}w}{1 - w}\right) =& -343\Omega_{\tau}^{12} - 1029\Omega_{\tau}^{14}(2\pi \sqrt{7} w) - 343\Omega_{\tau}^{16}\frac{(2 \pi \sqrt{7} w)^{2}}{2!} \\ &+ 7203\Omega_{\tau}^{18}\frac{(2 \pi \sqrt{7} w)^3}{3!} + \ldots + t_{f}(\tau; 50)\Omega_{\tau}^{112}\frac{(2 \pi \sqrt{7} w)^{50}}{50!}. \end{align*} Computation shows that \[t_f(\tau; 50) = -3^{11} \cdot 5^5 \cdot 7^{11} \cdot 31 \cdot 113 \cdot 184997 \cdot 265541063 \cdot 46132277325870502334416643.\] As predicted by Theorem \ref{weak}, $t_f(\tau; 50) \equiv 0 \pmod {7^2}$. \end{example} \subsection{Modular forms mod $p^m$} For the rest of the paper, we will take $p$ to be a fixed prime number satisfying $p \geq 5$, and fix $f$ to be a modular form of weight $k$ with integral Fourier coefficients. Given a quasimodular form $g$ with integer Fourier coefficients, we let $\overline{g} \in (\mathbb{Z}/p\mathbb{Z})[[q]]$ be the image of its Fourier series under reduction mod $p$. By the famous results of Von-Staudt Clausen and Kummer, we have the following congruences (see \cite{L76}, Chapter 10, Theorem 7.1). \begin{lemma} \label{cong} We have that \begin{equation*} \overline{E}_{p-1} \equiv 1 \pmod {p} \end{equation*} and \begin{equation*} \overline{E}_{p+1} \equiv \overline{P} \pmod {p}. \end{equation*} \end{lemma} Let $G \in \mathbb{Z}_{(p)}[P, Q, R]$ be the expression for $g$ as a polynomial in $P, Q, R$, where $\mathbb{Z}_{(p)}$ is the ring of integers localized at $p$. We denote by $\overline{G}$ the image of $G$ in $(\mathbb{Z}/p^m\mathbb{Z})[P,Q,R]$, well-defined since there is a canonical map $\mathbb{Z}_{(p)}\rightarrow \mathbb{Z}/p^m\mathbb{Z}$ for all $m$. \begin{remark} While it is true that $\overline{G}=\overline{H}$ implies that $\overline{g}=\overline{h}$, it is not the case that $\overline{g}=\overline{h}$ implies that $\overline{G}=\overline{H}$. For example, $Q$ and $QR$ have power series that are congruent mod $7$, but $Q \neq QR$ as polynomials. Because of this important distinction, throughout this paper, we will be careful to keep track of which ring we are working in. \end{remark} Let $A_p(Q, R) = E_{p-1}$ and $B_p(Q, R) = E_{p+1}$ in the polynomial ring $\mathbb{Z}_{(p)}[Q, R]$. Because we have fixed $p$, we will drop the subscripts and write $A=A_{p}$ and $B=B_{p}$. For $\overline{f} \in (\mathbb{Z}/p^m\mathbb{Z})[[q]]$, we define the \textit{filtration} $w(\overline{f})$ to be the least integer $k'$ such that there exists a modular form $g$ of weight $k'$ with $\overline{f} = \overline{g}$. By a result of Swinnerton-Dyer (see \cite{SD73}, Theorem 2), we have the following lemma. \begin{lemma} \label{Ser1} We have that $w(\overline{f}) < k$ if and only if $\overline{A}^{p^{m-1}}$ divides $\overline{F}$. \end{lemma} \begin{example} In the previous remark, we saw that $QR$ has power series congruent to $Q$ mod $7$, so it has filtration less than its weight. This is implied by the above lemma as $A = R$ divides $QR$. \end{example} \subsection{Differential Operators mod $p^m$} The derivative $D$ is defined by \begin{equation} Df := \frac{1}{2\pi i} \frac{d}{dz}f = q \frac{d}{dq}f = \sum{na_nq^n}, \end{equation} where the factor of $\frac{1}{2 \pi i}$ is used to maintain integrality of Fourier coefficients. \begin{remark} A key property of $D$ that we will use is the following: as power series, $D^{p^m}f \equiv D^{p^{m-1}}f \pmod {p^m}$ for all positive integers $m$. This follows immediately from Euler's theorem, for if $p \nmid n$ then the $n^{\text{th}}$ Fourier coefficient is multiplied by $n^{\phi(p^m)} = n^{p^{m-1}(p-1)} \equiv 1 \pmod {p^m}$ between $D^{p^{m-1}}f$ and $D^{p^m}f$, and if $p \mid n$ then the $n^{\text{th}}$ Fourier coefficient of $D^{p^{m-1}}f$ is a multiple of $p^{p^{m-1}}$ and hence vanishes mod $p^m$. \end{remark} By a result of Ramanujan, $D$ is a derivation on the ring of quasimodular forms, $\mathbb{Z}_{(p)}[P, Q, R]$, that satisfies \begin{equation} \label{deriv} DP = \frac{P^2 - Q}{12}, \quad DQ = \frac{PQ - R}{3}, \quad DR = \frac{PR - Q^2}{2}. \end{equation} That is, $\mathbb{Z}_{(p)}[P, Q, R]$ is closed under differentiation by $D$. The non-holomorphic derivative is defined by \begin{equation} \partial_k := Df - \frac{k}{4\pi \text{Im}(z)}, \end{equation} and sends almost holomorphic modular forms of weight $k$ to almost holomorphic modular forms of weight $k + 2$. The following lemma gives information about the relationship between these two differential operators. \begin{lemma} \label{relate} If $F(P,Q,R)=D^{n}f$ is a polynomial for $D^nf$ in $P,Q,R$, then $\partial^{n}f=F(P^{*},Q,R)$. \end{lemma} \begin{proof} We induct on $n$. When $n=0$ there is nothing to prove. It is easy to show that the differential operator $\partial$ is a derivation that sends $Q$ to $\frac{P^{*}Q-R}{3}$, $R$ to $\frac{P^{*}R-Q^{2}}{2}$, and $P^{*}$ to $\frac{P^{*2}-Q}{12}$. Let $\phi$ be the map that sends $P$ to $P^{*}$. By \eqref{deriv}, we have $\phi \circ D = \partial \circ \phi$. Now suppose $\partial^{n}f = \phi D^nf$. This implies that $\partial^{n+1}f=\partial \partial^{n}f=\partial \phi D^{n}f=\phi D^{n+1}f$. Hence, the lemma is valid for $n+1$, and inducting is true for all $n$. \end{proof} Another important relationship between $D$ and $\partial$ is given by the following equation (see \cite{Z}, Section 5.1, Equation 56): For all nonnegative integers $n$, we have \begin{equation} \label{zag.eq56} \partial^nf = \sum_{r = 0}^n{\left(\frac{-1}{4\pi \text{Im}(z)}\right)^{r}{n \choose r}\frac{(k + n -1)!}{(k + n - r - 1)!}D^{n - r}f}. \end{equation} We define a differential operator $\theta$ in the ring of modular forms by: \[\theta f := \frac{BQ-AR}{3}\cdot\frac{\partial f}{\partial Q} + \frac{BR - AQ^2}{2}\cdot\frac{\partial f}{\partial R},\] where $\partial$ is the (formal) partial derivative in the polynomial ring of quasimodular forms. It sends modular forms of weight $k$ to modular forms of weight $k + p + 1$. Lemma \ref{cong} and \eqref{deriv} together show that $\theta f$ has power series congruent to $D f$ mod $p$. The following result of Serre (see \cite{S72}, Section 2.2, Lemme 1) describes the filtration of modular forms mod $p$ under the action of $\theta$. \begin{lemma} For $\overline{f}$ in the ring of modular forms mod $p$, we have the following results: \begin{enumerate} \item If $w(\overline{f})\not\equiv 0 \pmod p$, then $w(\theta(\overline{f}))=w(\overline{f})+p+1$. \item If $w(\overline{f})\equiv 0 \pmod p$, then $w(\theta(\overline{f})) \leq w(\overline{f})+2$. \end{enumerate} \end{lemma} \subsection{Rankin-Cohen brackets} Although the derivative of a modular form is not generally modular, we have seen that the obstruction to modularity can be corrected by the non-holomorphic derivative operator. More generally, in Section 7 of \cite{C75} Cohen defines the \emph{Rankin-Cohen brackets} $[\cdot, \cdot]_{n}$ as bilinear forms on the space of modular forms that, given modular forms of weight $k$ and $k'$, return modular forms of weight $k+k'+2n$. More precisely, he proves the following theorem as Corollary 7.2: \begin{theorem}[Rankin-Cohen] If $f$ and $g$ are modular forms of weight $k$ and $k'$ respectively, then the $n^{th}$ Rankin-Cohen bracket $[f,g]_{n}$, defined by \begin{equation*} [f, g]_n = \sum_{\substack{r, s \geq 0 \\ r+s = n}}{(-1)^r {k+n-1 \choose s} {k' + n - 1 \choose r}(D^rf)(D^sg)}, \end{equation*} is a modular form of weight $k+k'+2n$. \end{theorem} \label{RC} \section{Differential operators mod $p$ and mod $p^2$} \subsection{Differential operators mod $p$} We now develop several results about the action of differential operators on modular forms and quasimodular forms mod $p$. In doing so, we will connect our two notions of derivative, first as a formal derivation on our polynomial rings in $P,Q,R$, and second as a operation on formal power series mod $p$. The important result in this section are Lemma \ref{D^pf is modular}, which shows that the operator $D^p$ preserves modularity mod $p$, and Lemma 3.6, which gives useful divisibility properties of modular forms under certain repeated applications of $D^p$. Except where otherwise noted, all of the following lemmas apply in the ring $(\mathbb{Z}/p\mathbb{Z})[P, Q, R]$. \begin{proposition} \label{D^{np-k+1}} Given $p$ and $k$, let $n$ be the unique integer such that $0 \leq np - k + 1 < p$. Then $D^{np - k + 1}f$ is congruent mod $p$ to a modular form of weight $2np - k + 2$. \end{proposition} \begin{proof} Evaluating the Rankin-Cohen bracket, we find \begin{align*} [f, B]_{np - k + 1} &= \! \! \! \! \! \! \sum_{\substack{r, s \geq 0 \\ r+s = np - k + 1}} \! \! \! \! \! \! \! {(-1)^r {np \choose s} {(n+ 1)p - k + 1 \choose r }}(D^rf)(D^sB)\\ &\equiv {np \choose 0}{(n + 1)p - k + 1 \choose np - k + 1}(D^{np - k + 1}f)B \pmod p. \end{align*} The left-hand side is a modular form and $B$ is a modular form. Since the second binomial coefficient is a unit mod $p$ (the upstairs term is between $p$ and $2p$, so it is not divisible by $p$), we must have that $D^{np - k + 1}f$ is congruent mod $p$ to a modular form. \end{proof} \begin{corollary} \label{D^2A} We have $D^2A \equiv 0 \pmod p$. \end{corollary} \begin{proof} The second derivative $D^2A$ is a modular form mod $p$ by Proposition \ref{D^{np-k+1}} and has power series congruent to zero mod $p$ by Lemma \ref{cong}. \end{proof} \begin{lemma} \label{D^pf is modular} The $p^{\text{th}}$ derivative $D^{p}f$ is a modular form mod $p$. \end{lemma} \begin{proof} We consider the cases when $k \equiv 1 \pmod p$ and when $k \not \equiv 1 \pmod p$ separately. First suppose $k \not \equiv 1 \pmod p$. Pick $n$ such that $0 \leq np - k + 1 < p$. By Proposition \ref{D^{np-k+1}}, we have $D^{np - k + 1}f$ is congruent mod $p$ to a modular form of weight $2np - k + 2$. Now pick $m$ such that $0 \leq mp - (2np - k + 2) + 1 < p$. Proposition \ref{D^{np-k+1}} implies $D^{mp - (2np - k + 2) + 1}(D^{np - k + 1}f) = D^pf$ is a modular form mod $p$. Now suppose $k \equiv 1 \pmod p$. We evaluate the Rankin-Cohen bracket \begin{align*} [f, A]_{p} &= \sum_{\substack{r, s \geq 0 \\ r+s = p}}{(-1)^r {k+p-1 \choose s} {2p - 2 \choose r}(D^rf)(D^sA)} \\ &\equiv {k+p-1 \choose 0}{2p-2 \choose p}(D^pf)A \pmod {p}. \end{align*} The left-hand side is a modular form and $A$ is a modular form, so $D^pf$ must be a modular form mod $p$. \end{proof} We now use the modularity of $D^pf$ mod $p$ and $D^{np - k + 1}f$ mod $p$ to prove results about divisibility by $A$ mod $p$ by finding modular forms of different weights with congruent power series (see Lemma \ref{Ser1}). In the following propositions and subsequent lemma, these modular forms of different weights will come from applying the $\theta$ operator. \begin{proposition} \label{Abar} We have $D^{rp}f \equiv A^r \theta^r f \pmod p$. \end{proposition} \begin{proof} We use induction on $r$. Let $r = 1$. We have $D^pf \equiv Df$ as power series by Fermat's Little Theorem. Furthermore, $Df \equiv \theta f$ as power series by the definition of $\theta$ and the fact that $A$ has power series congruent to $1$ mod $p$. Since $D^pf$ has weight $2p + k$ and $\theta f$ has wight $k + p + 1$, we have that $A$ divides $D^pf$ mod $p$, and in fact, $D^pf \equiv A \theta f \pmod p$. Now suppose $D^{p(r-1)}f \equiv A^{r-1}\theta^{r-1}f \pmod p$. Then \begin{equation*} D^{rp}f \equiv D^p(A^{r-1}\theta^{r-1}f) \equiv A^{r-1}D^p(\theta^{r-1}f) \equiv A^{r}\theta^rf \pmod p. \end{equation*} \end{proof} \begin{proposition} \label{moredivisibility} Given $p$ and $k$, let $n$ be the unique integer such that $0 \leq np - k + 1 < p$. Then $\theta^{np-k+1} f$ is an element of the ideal $(A^{np-k+1}, p)$ in $\mathbb{Z}_{(p)}[P,Q,R]$. \end{proposition} \begin{proof} We have that the weight of $\theta^{np-k+1}f = k + (p+1)(np-k + 1)$. Because $D^{np - k + 1}$ is a modular form mod $p$, we have $D^{np - k + 1}f \equiv \theta^{np-k + 1}f$ mod $p$. That is, there exists a modular form of weight $2np - k + 2$ which is congruent to $\theta^{np - k + 1}$ mod $p$. Since $2np - k + 2 < k + (p+1)(np-k+ 1)$ we have that $\overline{A}$ divides $\theta^{p-k+1}$ with multiplicity given by \begin{equation*} \frac{k + (p+1)(np-k + 1) - (2np - k + 2)}{p - 1} = np - k + 1. \end{equation*} \end{proof} \begin{lemma} \label{ideallemma} For all $i \leq k - 1$, we have $D^{p^2 - ip}f$ is in the ideal $(A^{2p - k + 1 - i}, p)$ within $\mathbb{Z}_{(p)}[P,Q,R]$. \end{lemma} \begin{proof} Working mod $p$, Proposition \ref{Abar} gives \begin{align*} D^{p^2 - ip}f \equiv A^{p-i}\theta^{p-i}f \equiv A^{p-i}\theta^{k-i-1}\theta^{p-k+1}f \pmod p, \end{align*} and Proposition \ref{moredivisibility} implies $A^{2p-k + 1 - i}$ divides $D^{p^2 - ip}$ mod $p$. \end{proof} Our above lemmas are only concerned with modular forms. The following proposition and corollary instead prove some useful results about differentiation of our simplest quasimodular form, $P$. \begin{proposition} \label{D^pP modular mod p} The form $D^{p}P$ is modular mod $p$. \end{proposition} \begin{proof} Recall (see \eqref{deriv}) that $Q=P^{2}-12DP$. So, working mod $p$, we have $D^{p}Q=2PD^{p}P-12D^{p+1}P$ is a modular form. Let $XP^{i}$ be the leading term in $D^{p} P$ mod $p$ as a polynomial in $P$. Then the leading term of $D^{p}Q$ as a polynomial in $P$ is $2XP^{i+1}-(2p+2-i)XP^{i+1}\equiv iXP^{i+1} \pmod p$. Because $D^{p}Q$ is a modular form mod $p$ (by Lemma \ref{D^pf is modular}, we have $i\equiv 0 \pmod p$. Since we cannot have $i\geq p$---the weight of $X$ would then be at most $2$---we must have $i=0$, so $D^{p}P$ is a modular form mod $p$. \end{proof} \begin{corollary} \label{D^{p^2}P} We have $D^{p^2}P $ is in the ideal $(A^{2p}, p)$ within $\mathbb{Z}_{(p)}[P,Q,R]$. \end{corollary} \begin{proof} As a power series, $D^{p^2}P \equiv D^pP \pmod p$, and by Proposition \ref{D^pP modular mod p}, both are modular forms mod $p$. Since $D^{p^2}P$ has weight $2p^2 + 2$ and $D^pP$ has weight $2p + 2$, we have $A$ must divide $D^{p^2}P$ with multiplicity at least $2p$. \end{proof} \subsection{Differential operators mod $p^2$} Having developed the necessary machinery mod $p$ we now turn our attention to some results about modular forms mod ${p^2}$, that is, in the ring $(\mathbb{Z}/p^2\mathbb{Z})[P, Q, R]$. The following proposition and the consequent Lemma \ref{D^{p^2}modular} are analogous to Proposition \ref{D^{np-k+1}} and Lemma \ref{D^pf is modular} respectively. \begin{proposition} \label{D^{np^2-k+1}} Given $p$ and $k$, let $n$ be the unique integer such that $0 \leq np^2 - k + 1 < p^2$. Then $D^{np^2 - k + 1}f$ is congruent to a modular form mod ${p^2}$. \end{proposition} \begin{proof} We evaluate the Rankin-Cohen bracket: \begin{align*} &[f, BA^p]_{np^2 - k + 1} = \sum_{r+s=np^2 - k + 1}{(-1)^r{np^2 \choose s}{(n+1)p^2 - k + 1 \choose r}(D^rf)(D^s(BA^p))} \\ &\equiv \sum_{0 < i < np}{(-1)^{(np - i)p - k + 1}{np^2 \choose ip}{(n+1)p^2 - k + 1 \choose (np - i)p - k + 1}(D^{(np-i)p - k + 1}f)(D^{ip}(BA^p))} \\ &\qquad + (-1)^{np^2 - k + 1} {(n+1)p^2 - k + 1 \choose np^2 - k + 1}(D^{np^2 - k + 1}f)(BA^p) \pmod {p^2}. \end{align*} By Proposition \ref{D^{np-k+1}} and Lemma \ref{D^pf is modular}, the derivatives $D^{(np-i)p - k + 1}$ and $D^{ip}(BA^p)$ are modular mod $p$. Since every term accumulates a factor of $p$ from its first binomial coefficient except when $i=0$, every term after the first is a modular form mod $p^2$. By Theorem \ref{RC}, we have $[f, BA^p]_{np^2-k+1}$ is a modular form, so $D^{np^2 - k + 1}$ must be a modular form mod $p^2$. \end{proof} \begin{lemma} \label{D^{p^2}modular} The form $D^{p^2}f$ is modular mod ${p^2}$. \end{lemma} \begin{proof} We consider the cases when $k \equiv 1 \pmod {p^2}$ and when $k \not \equiv 1 \pmod {p^2}$ separately. First suppose $k \not \equiv 1 \pmod {p^2}$. Let $n$ be the unique integer such that $0 \leq np^2 - k + 1 < p^2$. By Proposition \ref{D^{np^2-k+1}}, we have $D^{np^2 - k + 1}f$ is congruent to a modular form of weight $2np^2 - k + 2$ mod ${p^2}$. Now let $m$ be the unique integer such that $0 \leq mp^2 - (2np^2 - k + 2) + 1 < p^2$. Then Lemma \ref{D^{np^2-k+1}} implies $D^{mp^2 - (2np^2 - k + 2) + 1}(D^{np^2 - k + 1}f) = D^{p^2}f$ is a modular form mod ${p^2}$. Now suppose $k \equiv 1 \pmod {p^2}$. We have the following expansion of the Rankin-Cohen bracket: \begin{align*} [f, A^p]_{p^2} &= \sum_{\substack{r, s \geq 0 \\ r+s = p^2}}{(-1)^r {k+p^2-1 \choose s} {2p^2-p-1\choose r}(D^rf)(D^sA^p)}. \intertext{When $s > 0$, the first binomial coefficient is divisible by $p$ and $D^sA^p$ is also divisible by $p$. So, working mod $p^2$, we have} [f, A^p]_{p^2} &\equiv {k+p^2-1 \choose 0}{2p^2 - p - 1 \choose p^2}(D^{p^2}f)A^p \pmod {p^2}. \end{align*} By Theorem \ref{RC}, the left-hand side is a modular form, and since $A^p$ is modular form and $p$ does not divide ${2p^2 - p - 1 \choose p^2}$, we have $D^{p^2}f$ is a modular form mod ${p^2}$. \end{proof} \section{The ``valuation'' $v$ } In this section, we define a function $v$ which behaves like a valuation with respect to the ideal $(A^p, p)$. The goal of this section is to understand the behavior of $v$ under repeated applications of the differential operator $D^{p^2}$. The important results are Lemma \ref{weakprop}, which gives a lower bound on $v(D^{mp^2}f)$ in terms of $m$, and Lemma \ref{sharpprop}, which gives a stronger lower bound under additional assumptions on $p, k$ and $m$. We define the function $v:\mathbb{Z}_{(p)}[P,Q,R]\rightarrow \mathbb{Z}$ by \begin{equation} \label{defv} v(f)=\mathrm{sup}\{n \ | \ f\in(A^p,p)^n\}. \end{equation} In other words, $v(f)$ is the sum the $p$-adic valuation of $f$ and the supremum of the set of all nonnegative integers $i$ such that $f$ is expressible as $f = A^{pi}G$ for some quasimodular form $G$. Note that $v(Df) \geq v(f) \geq 0$ for all quasimodular forms $f$ and $v(fg) \geq v(f) + v(g)$ for all quasimodular forms $f$ and $g$. \begin{remark} We have used quotation marks around the word ``valuation" because in general we do not have the equality $v(fg) = v(f) + v(g)$, so $v$ is not technically a valuation. \end{remark} \begin{example} Let $p = 5$. Then $A = E_4 = Q$. Consider $D^5A=\frac{35}{1296}P^5Q + \frac{175}{648}P^3Q^2 - \frac{175}{1296}P^4R + \frac{25}{432}PQ^3 - \frac{175}{648}P^2QR - \frac{35}{1296}Q^2R + \frac{25}{324}PR^2$. It is a multiple of $5$, as expected from Corollary \ref{D^2A}, but is neither a multiple of $A^5 = Q^5$ nor of $25$. So $v(D^5A)=1 \geq 0 = v(A)$. \end{example} \subsection{Important Facts About $v$} Before we can prove our key lemmas, we need several facts about $v$ under a single application of the differential operator $D^{p^2}$. Proposition \ref{p^2sharp} gives a lower bound (independent of $f$) for $v$ under the differential operator $D^{p^2}$ and Proposition \ref{v(D^{p^2}A^p)geq3} gives a lower bound on $v(D^{p^2} A^p)$. These then allow us to bound $v(D^{p^2}f)$ in terms of $v(f)$ in Proposition \ref{key}, which will be important in proving Lemma \ref{weakprop}. \begin{proposition} \label{p^2sharp} We have $v(D^{p^2}f) \geq 2$. \end{proposition} \begin{proof} It suffices to show that $D^{p^2}f\equiv A^{2p}+pA^pN \pmod{p^2}$ for some modular forms $M$ and $N$. By Lemma \ref{D^pf is modular}, we can write $D^pf = M + pG(P)$, where $M$ is a polynomial in $Q$ and $R$ and $G(P) = \sum_i{X_iP^i}$ is a polynomial in $P$ with coefficients $X_i$ that are polynomials in $Q$ and $R$. We claim that $A^pM + p \sum_i{X_iA^{p-i}B^i}$ is a modular form with power series congruent to $D^pf$. Since $A^p \equiv 1 \pmod {p^2}$, the first term has power series congruent to $M$; recalling that $B \equiv P \pmod p$ and $A \equiv 1 \pmod p$ as power series, it is clear that $pX_iA^{p-i}B^i \equiv pX_iP^i \pmod {p^2}$. Hence, in the ring of power series, $D^{p^2}f \equiv D^pf \equiv A^pM + p \sum_i{X_iA^{p-i}B^i} \pmod {p^2}$, showing that $D^{p^2}f$ has power series congruent to a modular form of weight $k + 2p + p(p-1)$. Since $D^{p^2}f$ has weight $k + 2p^2$ and is a modular form mod $p$ by Lemma \ref{D^{p^2}modular}, we have that $A^p$ divides $D^{p^2}f$ by Lemma \ref{Ser1}. \end{proof} \begin{proposition} \label{v(D^{p^2}A^p)geq3} We have $v(D^{p^2}A^p) \geq 3$. \end{proposition} \begin{proof} Using the product rule we expand \begin{equation} D^{p^2}A^p = \! \! \! \sum_{j_1 + \ldots + j_p = p^2}{\frac{p^2!}{j_1! \cdots j_p!}(D^{j_1}A) \cdots (D^{j_p}A)}. \end{equation} We proceed in cases. \\ \textbf{Case 1:} $j_r = p^2$ for some $r$. In this case, $v((D^{j_1}A) \cdots (D^{j_p}A)) = v(A^{p-1}D^{p^2}A) \geq 2$ by Proposition \ref{p^2sharp}. Because there are exactly $p$ terms of this type, $v$ of their sum will be at least $3$. \\ \textbf{Case 2:} there exists some $r$ such that $j_r \not \equiv 0 \pmod p$. We have $p^2$ divides $\frac{p^2!}{j_1! \cdots j_p!}$ and there exists some $r$ such that $j_r > 1$, so by Corollary \ref{D^2A}, the terms $\frac{p^2!}{j_1! \cdots j_p!}(D^{j_1}A) \cdots (D^{j_p}A)$ are divisible by $p^3$. \\ \textbf{Case 3:} $j_r \equiv 0 \pmod p$ for all $r$ and $j_r \neq p^2$ for all $r$. We have $p$ divides $\frac{p^2!}{j_1! \cdots j_p!}$, and $p$ divides any term $D^{j_r}A$ where $j_r$ is nonzero. Since there are at least two nonzero $j_r$ we have $p^3 \mid \frac{p^2!}{j_1! \cdots j_p!}(D^{j_1}A) \cdots (D^{j_p}A)$. In each case, $v$ is at least $3$, so $v(D^{p^2}A^p)$ is at least $3$, as desired. \end{proof} \begin{proposition} \label{key} We have $v(D^{p^2}f) \geq v(f) + 1$. \end{proposition} \begin{proof} Write $f = \sum_{i}{p^{v(f)-i}A^{ip}M_i}$ where the $M_i$ are modular forms. To prove the proposition, it suffices to show that $v(D^{p^2}(A^{ip}M_i)) \geq i + 1$. We expand $D^{p^2}(A^{ip}M_i)$ using the product rule: \begin{equation} D^{p^2}(A^{ip}M_i) = \sum_{j = 0}^{p^2}{{p^2 \choose j}(D^jA^{ip})(D^{p^2 - j}M_i)}. \end{equation} When $j = 0$, Proposition \ref{p^2sharp} implies $v(D^{p^2}M_i) \geq 2$, so $v(A^{ip}D^{p^2}M_i) \geq i + 2$. When $1 \leq j \leq p^2 - 1$, we have the inequalities: $v\left({p^2 \choose j}\right) \geq 1$ and $v(D^jA^{ip}) \geq v(A^{ip}) \geq i$, from which we conclude $v({p^2 \choose j}(D^jA^{ip})(D^{p^2 - j}M_i)) \geq i + 1$. Finally, when $j = p^2$, we have the following equation: \[D^{p^2}A^{ip} = \! \! \!\sum_{r_1 + \ldots + r_i = p^2}{\frac{p^2! }{r_1! \cdots r_i!}(D^{r_1}A^p) \cdots (D^{r_i}A^p)}.\] If some $r_{\bullet} = p^2$, Proposition \ref{v(D^{p^2}A^p)geq3} implies $v(A^{(i-1)p}D^{p^2}A^p) \geq i + 2$. Otherwise, $p$ divides $\frac{p^2! }{r_1! \cdots r_i!}$, and we have $v\left(\frac{p^2! }{r_1! \cdots r_i!}(D^{r_1}A^p) \cdots (D^{r_i}A^p)\right) \geq i + 1$. Hence, for all $j$, we have $v\left({p^2 \choose j}(D^jA^{ip})(D^{p^2 - j}M_i)\right) \geq i + 1$, which implies the proposition. \end{proof} Making some additional assumptions on $v(f)$ and $p$, we give stronger analogues of the previous proposition, which will be necessary to prove Lemma \ref{sharpprop}. \begin{proposition} \label{key2} If $v(f) \leq k - 1$ and $p \geq 2k - 2$, then $v(D^{p^2}f) \geq v(f) + 2$. \end{proposition} \begin{proof} As in the proof of Proposition \ref{key}, it suffices to show that $v(D^{p^2}A^{ip}M_i) \geq i + 2$. By the product rule, we have \begin{equation} D^{p^2}(A^{ip}M_i) = \sum_{j = 0}^{p^2}{p^2 \choose j}(D^jA^{ip})(D^{p^2 - j}M_i). \end{equation} We considering the following cases: \\ \textbf{Case 1:} $j = 0$. When $j = 0$, Proposition \ref{p^2sharp} implies $v(A^{ip}D^{p^2}M_i) \geq i + 2$. \\ \textbf{Case 2:} $j \not \equiv 0 \pmod p$. In this case, $p^2$ divides ${p^2 \choose j}$ so $v$ increases by at least $2$. \\ \textbf{Case 3:} $j \equiv 0 \pmod p$ and $j \leq p(k-1)$. We have $p$ divides ${p^2 \choose j}$, and Lemma \ref{ideallemma} says $A^p$ divides $D^{p^2 - j}M_i \pmod p$; that is, $v(D^{p^2 - j}M_i) \geq 1$. Therefore, $v({p^2 \choose j}(D^jA^{ip})(D^{p^2 - j}M_i)) \geq i + 2$. \\ \textbf{Case 4:} $j \equiv 0 \pmod p$ and $p(k-1) < j < p^2$ . Since $m \leq k - 1$, we have $ip < j$. Hence, $v(D^jA^{ip}) \geq i + 1$ because $p^2$ divides $D^{rp}A^p$ with $r > 1$. Because we also have a factor of $p$ from the binomial, $v({p^2 \choose j}(D^jA^{ip})(D^{p^2 - j}M_i)) \geq i + 2$. \\ \textbf{Case 5:} $j = p^2$. We have the following equation: \[D^{p^2}A^{ip} = \sum_{r_1 + \ldots r_i=p^2}{\frac{p^2}{r_1! \cdots r_p!}(D^{r_1}A^p) \cdots (D^{r_p}A^p)}.\] If there exists an $s$ such that $r_{s} \not \equiv 0 \pmod p$, then $p^2$ divides $\frac{p^2}{r_1! \cdots r_p!}$ and we are done. If there exists an $s$ such that $r_s = p^2$, the result is immediate from Proposition \ref{v(D^{p^2}A^p)geq3}. Otherwise, $r_{s} \equiv 0 \pmod p$ and $r_s \neq p^2$ for all $s$. Therefore, since $i < p$, there exists an $s$ such that $r_s > p$, and hence $p^2$ divides $D^{r_s}A^p$. In addition, $p$ divides $\frac{p^2}{r_1! \cdots r_p!}$, so $v(\frac{p^2}{r_1! \cdots r_p!}(D^{r_1}A^p) \cdots (D^{r_p}A^p)) \geq i + 2$, establishing the case. Hence, for all $j$ we have $v({p^2 \choose j}(D^jA^{ip})(D^{p^2 - j}M_i)) \geq i + 2$, proving the proposition. \end{proof} \begin{proposition} \label{increaseby1} If $v(f) \leq k - 2$ and $p \geq 2k - 2$, then $v(D^{p^2 - p}f) \geq v(f) + 1$. \end{proposition} \begin{proof} It suffices to show that $v(D^{p^2-p}A^{ip}M_i) \geq i + 1$. From the product rule we have \[D^{p^2 - p} (A^{ip}M_i) = \sum_{j}{{p^2 - p \choose j}(D^{j}A^{ip})(D^{p^2 - p - j}M_i)}.\] If $j \not \equiv 0 \pmod p$, then $p$ divides the binomial coefficient and we are done. It remains to consider terms in which $j \equiv 0 \pmod p$. If $j \leq (k - 2)p$, then Lemma \ref{ideallemma} says $v(D^{p^2 - p - j}M_i) \geq 1$. If $j > (k-2)p$, then we have $j > ip$, so $v(D^{j}A^{ip}) \geq i + 1$. Thus, $v\left({p^2 - p \choose j}(D^{j}A^{ip})(D^{p^2 - p - j}M_i)\right) \geq i + 1$ for all $j$, proving the proposition. \end{proof} \subsection{Key lemmas} Using the above facts, we now prove two important lemmas which make precise the increasing behavior of $v$ under repeated applications of the differential operator $D^{p^2}$. Lemma \ref{weakprop} is applicable in the context of Theorem \ref{weak}. Lemma \ref{sharpprop} is stronger version of Lemma \ref{weakprop} under additional assumptions which translate to the additional assumptions of Theorem \ref{sharp}. \begin{lemma} \label{weakprop} If $m \geq 1$ is an integer, then $v(D^{mp^2}f) \geq m + 1$. \end{lemma} \begin{proof} When $m = 1$, we have $v(D^{p^2}f) \geq 2$ by Proposition \ref{p^2sharp}. We proceed by induction on $m$. Suppose that for all $n \leq m$, we have $v(D^{np^2}f) \geq n + 1$. By equation \eqref{zag.eq56} we have \begin{equation} \partial^{mp^2}f = \sum_{r=0}^{mp^2}a_rT^rD^{mp^2 - r}f, \end{equation} where $T = \frac{-1}{4\pi y}$ and $a_r = {mp^2 \choose r}\frac{(k + mp^2 - 1)!}{(k + mp^2 - r -1)!}$. We claim that $v(a_rD^{mp^2 - r}f) \geq m + 2$ for all $r > 0$. Let $j$ be such that $(j-1)p^2 < r \leq jp^2$. We will show that $p^{j + 1}$ divides $a_r$. Suppose $j = 1$. If $r < p$, then $p^2$ divides the binomial coefficient. If $p \leq r < p^2$, then $p$ divides the binomial once and divides the complementary factor at least once. When $r = p^2$, we have $p$ divides the complementary factor at least twice. Now suppose $j \geq 2$. The complementary factor is divisible by $p^{(j-1)p}$. Since $(j-1)p > j + 1$, we have $p^{j + 1}$ divides $a_r$. Hence we have shown that $p^{j + 1}$ divides $a_r$. Now we see $v(a_rD^{mp^2 - r}f) = v(a_rD^{jp^2 - r}(D^{(m - j)p^2}f)) \geq m - j + 1 + j + 1 \geq m + 2$, proving the claim. Since $P^* = P + 12T$, the mapping $P \mapsto 0$ followed by $T \mapsto P/12$ sends $\partial^{mp^2}f$ to $D^{mp^2}f$ by Lemma \ref{relate}. We write $C_r$ for the coefficient of $P^r$ in $D^{mp^2}f$. By the induction hypothesis, we have $v(C_0) \geq m + 1$ so Proposition \ref{key} implies $v(D^{p^2}C_0) \geq m + 2$. The claim above proves that $v(C_r) \geq m + 2$ for all $r > 0$, so we have $v(D^{p^2}P^rC_r) \geq m + 2$ for all $r$. Hence, we have shown that $v(D^{(m+1)p^2}f) \geq m + 2$. Inducting, we have $v(D^{mp^2}f) \geq m + 1$ for all $m$, as desired. \end{proof} \begin{lemma} \label{sharpprop} Suppose that $p \geq 2k - 2$. If $m$ is an integer satisfying $1 \leq 2m \leq k - 2$, then we have the following: \begin{enumerate} \item $v(D^{mp^2 - p}f) \geq 2m - 1$. \item $v(D^{mp^2}f) \geq 2m$. \end{enumerate} \end{lemma} \begin{proof} When $m = 1$, the results follow from Propositions \ref{increaseby1} and \ref{p^2sharp} respectively. We proceed by induction on $m$. Suppose that for all $n \leq m$, we have $v(D^{np^2-p}f) \geq 2n - 1$ and $v(D^{np^2}f) \geq 2n$. We will show that $v(D^{(m+1)p^2}f) \geq 2m + 1$ and $v(D^{(m+1)p^2}f) \geq 2m + 2$. As in Lemma \ref{weakprop}, we write \begin{equation} \partial^{mp^2}f = \sum_{r=0}^{mp^2}a_rT^rD^{mp^2 - r}f, \end{equation} where $T = \frac{-1}{4\pi y}$ and $a_r = {mp^2 \choose r}\frac{(k + mp^2 - 1)!}{(k + mp^2 - r -1)!}$. We convert the above expression for $\partial^{mp^2}f$ into an expression for $D^{mp^2}f$ as a polynomial in $P$ using the map $P \mapsto 0$ and $T \mapsto \frac{P}{12}$ and write $C_r$ for the coefficient of $P^r$ in $D^{mp^2}f$. Note that $v(C_r) \geq v(a_rD^{mp^2 - r}f)$. We first give lower bounds for the $v(C_r)$. When $r = 0$, the induction hypothesis implies $v(C_0) = v(a_rD^{mp^2}f) = 2m.$ For $1 \leq r \leq k -1$, it is easy to see that $p^2$ divides $a_r$ and the induction hypothesis implies $v(C_r) = v(a_rD^{p-r}D^{mp^2 - p}f) \geq 2m + 1$. For $r > k - 1$, we claim that $v(C_r) \geq 2m + 2$. We consider the following cases: \\ \textbf{Case 1:} $k-1 < r \leq p$. We have that $p^3 \mid a_r$, and so $v(C_r) \geq v(a_rD^{p-r}D^{mp^2 - p}f) \geq 3 + 2m - 1 = 2m + 2$. \\ \textbf{Case 2:} $p < r \leq p^2$. We have $p^4 \mid a_r$, and so $v(C_r) \geq v(a_rD^{p^2 - r}D^{(m-1)p^2}f) \geq 4 + 2(m-1) = 2m + 2$. \\ \textbf{Case 3:} $r > p^2$. Let $j$ be an integer such that $(j-1)p^2 < r \leq jp^2$. It is easy to see that $p^{(p+1)(j-1)}$ divides $a_r$. Then $v(C_r) \geq v(a_rD^{jp^2 - r}D^{(m-j)p^2}f) \geq (p+1)(j - 1) + 2m - 2j \geq 2m + 2$. Hence we have shown $v(C_r) \geq 2m + 2$ for all $r > k - 1$. We next establish $v(D^{(m+1)p^2 - p}f) \geq 2m + 1$. We have $D^{(m+1)p^2 - p}f = \sum_{r}D^{p^2 - p}(P^rC_r).$ Since $v(C_0) = 2m$, Proposition \ref{increaseby1} implies $D^{p^2 - p}(C_0) \geq 2m + 1$. For all $r > 0$, we have $v(C_r) \geq 2m + 1$, so $v(D^{(m+1)p^2 - p}f) \geq 2m + 1$. Finally, we show that $v(D^{(m+1)p^2}f) \geq 2m + 2$. We have $D^{(m+1)p^2 }f = \sum_{r}D^{p^2}(P^rC_r)$. By Proposition \ref{key2}, we have $v(D^{p^2}C_0) \geq 2m + 2$. For $1\leq r \leq k-1$, we have $v(C_r) \geq 2m + 1$ and we claim that $v(D^{p^2}(P^rC_r)) \geq 2m + 2$. We have following expansion for $D^{p^2}(P^rC_r)$: \begin{equation} \label{claim} D^{p^2}(P^rC_r) = \sum_{j_1 + \ldots + j_{r+1} = p^2}{\frac{p^2}{j_1! \cdots j_{r+1}!}(D^{j_1}P) \cdots (D^{j_r}P)(D^{j_{r+1}}C_r)}. \end{equation} We have $p$ divides $\frac{p^2}{j_1! \cdots j_{r+1}!}$, and have hence established our claim, except for the terms in which there exists an $s$ such that $j_s = p^2$. Consider a term in which $j_s = p^2$. If $s = r+1$, then we have $v(D^{p^2}C_r) \geq v(C_r) + 1 \geq 2m + 2$ by Proposition \ref{key}. Otherwise $s \leq r$, and since Corollary \ref{D^{p^2}P} implies $v(D^{p^2}P) \geq 1$, we have \[v\left(\frac{p^2}{j_1! \cdots j_{r+1}!}(D^{j_1}P) \cdots (D^{j_r}P)(D^{j_{r+1}}C_r)\right) \geq 2m + 2.\] Finally, for $r > k -1$, we have $v(C_r) \geq 2m + 2$, so $D^{p^2}(P^rC_r) \geq 2m + 2$. We have shown that $v(D^{(m+1)p^2 - p}f) \geq 2m + 1$ and $v(D^{(m+1)p^2}f) \geq 2m + 2$. Inducting, we have $v(D^{mp^2 - p}f) \geq 2m - 1$ and $v(D^{mp^2}f) \geq 2m$ for all $m$, as desired. \end{proof} \section{Main results} At this point, we have a number of results about various derivatives of a modular form lying in some ideal $(A^p,p)^n$. To translate these results to the form of Theorems \ref{weak} and \ref{sharp}, for which we desire the divisibility of $(\partial^nf)(\tau)$ by powers of $p$, we prove the following lemma. \begin{lemma} \label{ZK} Let $\tau$ be a CM point with $-d$. If $p \geq 5$ is a prime such that $\left(\frac{-d}{p}\right) \in \{0, -1\}$, where $\left(\frac{-d}{p}\right)$ is the Legendre symbol, we have $t_{E_{p-1}}(\tau;0) \equiv 0 \pmod p$, where $E_{p-1}$ is the normalized Eisenstein series defined in Section 2.1. \end{lemma} \begin{proof} From equation 2 of \cite{KZ97} we have that $A=E_{p-1}$ can be expressed uniquely as \[A(\tau) = \Delta(\tau)^nQ(\tau)^{\delta}R(\tau)^{\epsilon}\tilde{f}(j(\tau)),\] where $p-1 = 12n + 4\delta + 6\epsilon$ and $\tilde{f}$ is a polynomial. The supersingular polynomial is defined as \[ss_{p}(j) := \prod_{\substack{E/\overline{\mathbb{F}}_p \\ E \, \text{supersingular}}}{(j - j(E))}.\] By Theorem 1 of \cite{KZ97}, it satisfies the equivalence \[ss_{p}(j(\tau)) \equiv \pm j(\tau)^{\delta} (j(\tau)-1728)^{\epsilon}\tilde{f}(j(\tau)) \pmod p.\] From Theorem 7.25 of \cite{O03} we know that $j(\tau)$ is a root of $ss_{p}(j)$ whenever $(\frac{-d}{p})=0,-1$, where $-d$ is the discriminant of $\mathbb{Q}(\tau)/\mathbb{Q}$. This implies $j(\tau)$ is a root of $\tilde{f}$, so long as $j(\tau) \neq 0, 1728$; that is, $\tau \neq i$ and $\tau \neq \rho = e^{2 \pi i/3}$. Since $R(i) = Q(\rho) = 0$, we have $A(\tau) \equiv 0 \pmod p$. \end{proof} Now, using this lemma and the properties of $v$, we can prove our main results. \subsection{Proofs of main results} \begin{proof}[Proof of Theorem \ref{weak}] Fix $p,n$ and $m$ satisfying the hypotheses. Lemma \ref{weakprop} implies $D^{mp^{2}}f\in(A^p,p)^{m+1}$ within the ring of quasimodular forms, so we can write $D^nf$ as a sum of the form \[D^{n}f = \sum_{0 \leq i \leq m}{p^{m-i}A^{ip}H_{i}},\] for some quasimodular forms $H_{i}$. Since $A$ is fixed under the map $\phi: \mathbb{Z}_{(p)}[P, Q, R] \to \mathbb{Z}_{(p)}[P^*, Q, R]$ that sends $P$ to $P^*$, Lemma \ref{relate} implies $\partial^nf=\sum{p^{m-i}A^{ip}\phi(H_{i})}$. In particular, we have that \[t_f(\tau;n) = \frac{\partial^nf(\tau)}{\Omega_{\tau}^{2n + k}} = \sum_{0 \leq i \leq m}{p^{m-i}\frac{A(\tau)^{ip}\phi(H_{i})(\tau)}{\Omega_{\tau}^{2n + k}}} = \sum_{0 \leq i \leq m}{p^{m-i} \frac{A(\tau)^{ip}}{\Omega_{\tau}^{ip(p-1)}} \frac{\phi(H_{i})(\tau)}{\Omega_{\tau}^{2n + k - ip(p-1)}}}. \] By Lemma \ref{ZK}, we have $\frac{A(\tau)^{ip}}{\Omega^{ip(p-1)}}$ is an algebraic integer multiple of $p^{ip}$, and by \eqref{deft} we have $\frac{\phi(H_{i})(\tau)}{\Omega_{\tau}^{2n + k - ip(p-1)}}$ is an algebraic integer, so $t_f(\tau; n) \equiv 0 \pmod {p^m}$. \end{proof} Theorem \ref{sharp} follows from a comparable argument, relying on Lemma \ref{sharpprop} rather than Lemma \ref{weakprop}. \begin{proof}[Proof of Theorem \ref{sharp}] Fix $p,n$ and $m$ satisfying the hypotheses. By Lemma \ref{sharpprop}, we can write \[D^{n}f = \sum_{0 \leq i \leq m}{p^{m-i}(A^p)^{i}H_{i}},\] for some quasimodular forms $H_{i}$. Since $A$ is fixed under the map sending $P \mapsto P^*$, Lemma \ref{relate} implies $\partial^nf$ can be written as $\sum{p^{m-i}(A^p)^{i}\phi(H_{i})}$, where $\phi: \mathbb{Z}_{(p)}[P, Q, R] \to \mathbb{Z}_{(p)}[P^*, Q, R]$ is the map that sends $P \mapsto P^*$. In particular, we have that \[t_f(\tau;n) = \frac{\partial^nf(\tau)}{\Omega_{\tau}^{2n + k}} = \sum_{0 \leq i \leq m}{p^{m-i}\frac{A(\tau)^{ip}\phi(H_{i})(\tau)}{\Omega_{\tau}^{2n + k}}} = \sum_{0 \leq i \leq m}{p^{m-i} \frac{A(\tau)^{ip}}{\Omega_{\tau}^{ip(p-1)}} \frac{\phi(H_{i})(\tau)}{\Omega_{\tau}^{2n + k - ip(p-1)}}}. \] By Lemma \ref{ZK}, we have $\frac{A(\tau)^{ip}}{\Omega^{ip(p-1)}}$ is an algebraic integer multiple of $p^{ip}$, and by \eqref{deft} we have $\frac{\phi(H_{i})(\tau)}{\Omega_{\tau}^{2n + k - ip(p-1)}}$ is an algebraic integer, so $t_f(\tau; n) \equiv 0 \pmod {p^m}$. \end{proof} \section{Acknowledgments:} The authors would like to thank Professor Ken Ono for suggesting the topic and for advice and guidance throughout the process, and an anonymous referee for useful comments on a draft of this paper. We also would like to thank Jesse Silliman, Isabel Vogt, the other participants at the 2013 Emory REU, and Alexander Smith for useful conversations. Both authors are also grateful to NSF for its support. \end{document}
\begin{document} \title{Recursion relations for chromatic coefficients\ for graphs and hypergraphs} \begin{abstract} We establish a set of recursion relations for the coefficients in the chromatic polynomial of a graph or a hypergraph. As an application we provide a generalization of Whitney's broken cycle theorem for hypergraphs, as well as deriving an explicit formula for the linear coefficient of the chromatic polynomial of the $r$-complete hypergraph in terms of roots of the Taylor polynomials for the exponential function. \end{abstract} \section{Introduction} The chromatic polynomial $\chi_G$ associated to a graph $G$, introduced by Birkhoff \cite{birkhoff}, is determined by defining $\chi_G(\lambdambda)$, for $\lambdambda\in \mathbb N$, to be the number of colourings of the vertices of $G$ with at most $\lambdambda$ colours, such that no adjacent vertices are attributed the same colour\cite{read,Dong2005}. The definition extends to hypergraphs\cite{bujitas2015}, by considering colourings such that each hyperedge contains at least two vertices with different colours. In the case of graphs, Whitney's broken cycle theorem \cite{whitney,Dohmen1999,blass-sagan,dohmen2} provides a combinatorial interpretation to the coefficients of the chromatic polynomial $\chi_G(\lambdambda)$ : if a graph $G$ has $n$ vertices, then the coefficient of $\lambdambda^i$ is given, up to the sign $(-1)^{n-i}$, by the number of spanning subgraphs of $G$ with $n-i$ edges with the property of not containing as a subset any of a particular list of special subgraphs of $G$, known as \emph{broken cycles}\footnote{Whitney's original theorem mentions \emph{broken circuits} instead, but the distinction between circuits and cycles is not relevant in this context.}. In the present article, we establish a set of recursion relations for the coefficients of the chromatic polynomial of a graph or hypergraph, which allow us to express the $i$-th order coefficient in terms of products of linear coefficients of certain subgraphs. We similarly show that the combinatorial quantities appearing in Whitney's theorem (as well as a natural generalization of them which covers the case of hypergraphs) also satisfy the same recursion relations (up to a sign factor). Since the two sequences are recursively defined by the same relations and it can be easily verified that they coincide on empty graphs, we obtain as a consequence a generalization of the broken cycle theorem for hypergraphs. There are a number of different extensions of Whitney's theorem to hypergraphs already present in the literature \cite{dohmen1, dohmen2, trinks, dohmen-trinks}. The one we present here encompasses those known to us. As a second application of the recursion relations, we derive an explicit formula for the linear chromatic coefficient of the $r$-complete hypergraphs in terms of the roots of the $(r-1)$'th Taylor polynomial of the exponential function (where the $r$-complete hypergraph is the hypergraph containing all possible hyperedges of cardinality $r$). Whitney's theorem implies that the coefficients of the chromatic polynomial of a graph are always integers with alternating signs. Moreover, applying the deletion-contraction principle for the chromatic polynomial \cite{birkhoff,whitney,Dong2005}, one can also show that they are numerically upper bounded by the corresponding coefficient for the complete graph of the same order. We show that both these facts can be obtained in a simple way as a consequence of the recursion relations we present, without using neither Whitney's theorem nor the deletion-contraction principle. The paper is organized as follows. In Section~\ref{sec:graphs}, we start by presenting the simpler case of the recursion relations for graphs, together with a new proof of Whitney's theorem in its original form. The section will follow the same approach we will use for the general case, but since it is arguably easier we present it here for illustration of the method, but it can safely be skipped. In Section~\ref{sec:hypergraphs} we present the general case of hypergraphs, and the generalization of Whitney's theorem. Finally in Section~\ref{sec:complete-hypergraphs} we apply the recursion relations to obtain the formula for the linear coefficient of the $r$-complete hypergraph. \section{The recursion relations for graphs} \lambdabel{sec:graphs} In this section $G=(V,E)$ denotes a simple graph, where $V$ is a non-empty finite set and $E$ is a set of unordered pairs of elements in $V$. The members in $V$ and $E$ are called the \emph{vertices} and \emph{edges} in $G$, respectively. The order of $G$, i.e.\ the number of vertices $|V|$, will be denoted by $n$. By $k(G)$ we shall denote the number of connected components of $G$. If $F\subseteq E$, the graph $\bar G\lambdangle F\rangle \equiv (V,F)$ is called the \emph{spanning subgraph} of $G$ induced by $F$, and we shall write $k(F)$ for $k(\bar G\lambdangle F\rangle)$. If $V'\subset V$, the graph $(V',E')$ where $E' = \{\{x,y\}\in E \mid x,y\in V'\}$ is called the \emph{subgraph of $G$ induced by $V'$}. It will be denoted by $G[V']$. \begin{defn} Let $\lambdambda\in\mathbb N$. A $\lambdambda$-colouring of a graph $G=(V,E)$ is a map $\pi: V\to \{1,2,\dots,\lambdambda\}$. A $\lambdambda$-colouring is called \emph{proper} if for each edge $e=\{x,y\}\in E$ it holds that $\pi(x)\neq \pi(y)$. We define $\chrom{G}(\lambdambda)$ to be the number of proper $\lambdambda$-colourings of $G$. \end{defn} It is well kown that the $\chi_G(\lambdambda)$ is a polynomial in $\lambdambda$. \begin{thm}\lambdabel{chrom1} The function $\chrom{G}$ is a polynomial, called the \emph{chromatic polynomial} of $G$, given by $$ \chrom{G}(\lambdambda) = \sum_{i=1}^n a_i(G) \lambdambda^{i}\,, $$ where \begin{equation}\lambdabel{coeff} a_i(G) = \sum_{\substack{F\subseteq E \\ k(F) = i}} (-1)^{|F|}\,. \end{equation} \end{thm} \begin{proof} Define for any edge $e\in E$ the function $f_e$ on the set of colourings of $G$ by $$ f_e(\pi) = \begin{cases} 0\;\mbox{if $\pi$ is constant on $e$}\\ 1\; \mbox{otherwise.}\end{cases}\,. $$ Then \begin{align*} \chrom{G}(\lambdambda) = \sum_\pi \prod_{e\in E} f_e(\pi) = \sum_\pi \prod_{e\in E} (1-(1-f_e(\pi)) &= \sum_\pi \sum_{F\subseteq E} (-1)^{|F|}\prod_{e\in F} (1-f_e(\pi))\\ &= \sum_{F\subseteq E} (-1)^{|F|}\lambdambda^{k(F)}\,. \end{align*} \end{proof} Whitney refined this result in what is known as his \emph{broken-cycle theorem} \cite{whitney}. Let $\leq$ be an arbitrary linear ordering of the edge set $E$. A \emph{broken cycle} of $G$ is then a set of edges $F\subseteq E$ obtained by removing the maximal edge from a cycle of $G$. \begin{thm}[Whitney 1932]\lambdabel{thm:whitney} For $i=1,\dots, n$ we have that \begin{equation} a_i(G) = (-1)^{n-i} h_i(G)\,, \end{equation} where $h_i(G)$ is the number of spanning subgraphs of $G$ with $n-i$ edges and containing no broken cycle. \end{thm} We will establish, in the next three lemmas, a set of recursion relations for coefficients $a_i$ and for coefficients $h_i$, respectively. Up to a sign factor, both sets of coefficients will be shown to satisfy the same recursion relations, and by observing that they coincide on the empty graph we will obtain as a consequence an inductive proof of Theorem~\ref{thm:whitney}. Recall, that an edge $e\in E$ is called a \emph{bridge} in $G=(V,E)$ if $k(E) < k(E\setminus{e})$ (i.e.\ removing $e$ increases the number of connected components of the graph), in which case we must have $k(E\setminus{e}) = k(E)+1$. If $F\subseteq E$ we say that $e\in F$ is a bridge in $F$ if it is a bridge in ${\bar G}\lambdangle F\rangle$. We denote by ${\cal B}_e^i$ the collection of $F\subseteq E$ such that $e$ is a bridge in $F$ and $k(F)=i$. \begin{lem}\lambdabel{lemma:subgraph} Let $G=(V,E)$ be a graph with $E\neq \emptyset$ and fix $e\in E$. We have that \begin{equation}\lambdabel{eq:decomposition-a} a_i(G) = b_e^{i}(G) - b_e^{i-1}(G)\,, \end{equation} where $b_e^0(G) = 0$ and \[ b^i_e(G) = \sum_{F\in {\cal B}_e^i} (-1)^{|F|}\,, \quad \forall i\ge 1.\] \end{lem} \begin{proof} For each subset $F$ of $E$ exactly one of the following holds: \[ 1)\; e\notin F,\qquad 2)\; e\; \mbox{is a bridge in $F$},\qquad 3)\; e\in F,\, \mbox{but $e$ is not a bridge in $F$}\,. \] We therefore have a decompositon of the collection $\{ F \subseteq E | k(F) = i \}$ into the three disjoint classes: \begin{align} {\cal A}_e^i &= \{ F\subseteq E \,|\, e \not \in F,\, k(F) = i\},\nonumber \\ {\cal B}_e^i &= \{ F\subseteq E \,|\, e \in F,\, k(F) = i,\, k(F\setminus \{e\}) = k(F) +1 \},\lambdabel{def:Bei} \\ {\cal C}_e^i &= \{ F\subseteq E \,|\, e \in F,\, k(F) = i,\, k(F\setminus \{e\}) = k(F) \}\,.\nonumber \end{align} Hence, for each $i=1,\dots,n-1$ we have \[ a_i = \sum_{F\in{\cal A}_e^i} (-1)^{|F|} + \sum_{F \in {\cal B}_e^i} (-1)^{|F|} + \sum_{F \in {\cal C}_e^i} (-1)^{|F|}. \] Clearly, the mapping $\phi$ defined by $\phi(F) = F\cup \{e\}$ is a bijection from ${\cal A}_e^i$ to ${\cal B}^{i-1}_e \cup {\cal C}_e^i$, which implies that \[ \sum_{F \in {\cal A}_e^i} (-1)^{|F|} = - \left(\sum_{F \in {\cal B}_e^{i-1}} (-1)^{|F|} + \sum_{F\in {\cal C}_e^i}(-1)^{|F|}\right). \] Plugging this expression into the previous formula for $a_i$, we get \[ a_i = \sum_{F\in {\cal B}_e^i} (-1)^{|F|} - \sum_{F\in {\cal B}^{i-1}_e} (-1)^{|F|} = b^i_e - b_e^{i-1} \] as desired. \end{proof} \begin{lem}\lambdabel{eq:lemma-subgraph-b} For $i= 1,2,3,\dots$ we have \begin{equation}\lambdabel{eq:lemma-subgraph} b^i_e(G) = - \sum_{\substack{V = V_1 \sqcup \cdots \sqcup V_{i+1}\\ e \not\in G[V_j],\,j=1,\dots,i+1}}\prod_{j=1}^{i+1}a_1( G[V_j]) \,, \end{equation} where $V = V_1 \sqcup \cdots \sqcup V_{i+1}$ denotes any decomposition of $V$ into $i+1$ (non-empty) disjoint subsets $V_1,\dots, V_{i+1}$. \end{lem} \begin{proof} Let $F\in {\cal B}^i_e$ and let $G_1=(V_1,F_1),\dots, G_{i+1}=(V_{i+1},F_{i+1})$ be the connected components of ${\bar G}\lambdangle F\setminus\{e\}\rangle$. In this way, $F$ defines a decomposition of $V$ into $i+1$ disjoint sets $V_1, \dots, V_{i+1}$ such that $e\not \in G[V_j]$ for any $j=1,\dots,i+1$. Let $E_1, \dots E_{i+1}$ be the edge sets of the vertex induced subgraphs $G[V_1], \dots, G[V_{i+1}]$, respectively. Note that $F$ decomposes as $F_1\cup \cdots\cup F_{i+1} \cup\{e\}$, where $F_j\subseteq E_j$ for each $j$. Conversely, given a decomposition of $V$ into $i+1$ subsets as above such that no $G[V_j]$ contains $e$, then $F=F_1\cup\dots\cup F_{i+1}\cup\{e\}$ belongs to ${\cal B}^i_e$ for any collection $F_1, \dots, F_{i+1}$ of edge sets in $G[V_1],\dots, G[V_{i+1}]$, respectively, such that each $(V_j,F_j)$ is connected. Hence, we can organize the sum over $F\in B_e^i$ by aggregating terms with the same decomposition of $V$: denoting by $k(F_j)$ the number of connected components of $(V_j,F_j)$, we have: \begin{multline*} b_e^i(G) = \sum_{F\in {\cal B}_e^i} (-1)^{|F|} = \sum_{\substack{V =V_1\sqcup \cdots \sqcup V_{i+1} \\ e\not \in G[V_j],\,j=1,\dots,i+1}} \sum_{\substack{F_j\subseteq E_j,\,k(F_j)=1\\j=1,\dots,i+1}} (-1)^{1+ \sum_{j=1}^{i+1}|F_j| } \\ = - \sum_{\substack{V=V_1\sqcup \cdots\sqcup V_{i+1}\\e \not \in G[V_j],\,j=1,\dots,i+1 }} \prod_{j=1}^{i+1} \sum_{\substack{F_j\subseteq E_j\\k(F_j)=1}} (-1)^{|F_j|} = - \sum_{\substack{V=V_1\sqcup \cdots\sqcup V_{i+1}\\e \not \in G[V_j],\,j=1,\dots,i+1}} \prod_{j=1}^{i+1}a_1(G[V_j])\,. \end{multline*} \end{proof} Note that only decompositions such that $G[V_j]$ is connected for all $j=1,\dots,i+1$ contribute to the right-hand side of \eqref{eq:lemma-subgraph}, since $a_1$ vanishes for disconnected graphs. Next, we proceed to verify a similar set of recursion relations for the $h_i$. For this purpose, assume a linear ordering of the edges of the graph $G=(V,E)$ is given and let us call a set of edges $F\subseteq E$ an \emph{$i$-forest} if $\bar G\lambdangle F\rangle$ has $i$ components each of which is a tree, i.e.\ $\bar G\lambdangle F\rangle$ is an acyclic graph with $k(F)=i$. Since for each tree the number of edges is one less than the number of vertices, we have that $i=k(F)=n-|F|$ for any $i$-forest $F$. Thus every spanning $i$-forest is a subgraph with $n-i$ edges. Conversely, since every cycle trivially contains a broken cycle as a subset, any subgraph of $G$ which does not contain any broken cycle is an $i$-forest, if it has $n-i$ edges. In conclusion, $h_i(G)$ is the number of spanning $i$-forests of $G$ containing no broken cycle. \begin{lem}\lambdabel{lemma:forest} For any graph $G=(V,E)$ with a linear ordering of $E\neq \emptyset$ we have that \begin{equation}\lambdabel{eq:decomposition-h} h_i(G) = c_{i-1}(G) + c_i(G), \end{equation} where the numbers $c_i(G),\, i=1,2,3,\dots,$ are given by \begin{equation}\lambdabel{eq:lemma-forest} c_i(G) = \sum_{\substack{V = V_1 \sqcup \cdots \sqcup V_{i+1}\\ e_{\max}\not\in G[V_j],\, j=1,\dots,i+1}} \prod_{j=1}^{i+1} h_1(G[V_j])\,. \end{equation} and $e_{\max}$ is the maximal edge of $G$, while $c_0(G)=0$. \end{lem} \begin{proof} Let $F$ be an $i$-forest of $G$ for some $i$, and fix $e\in E$. Then exactly one of the following is true: \begin{enumerate} \item $e\notin F$, and $F\cup\{e\}$ is not a forest (i.e.\ adding $e$ to $F$ creates a cycle), \item $e\notin F$, and $F\cup\{e\}$ is an $(i-1)$-forest, \item $e\in F$, and $F\setminus\{e\}$ is a $(i+1)$-forest. \end{enumerate} If we now choose $e=e_{\max}$ and $F$ is an $i$-forest such that case 1) holds, then $F$ has a broken cycle. If we therefore consider forests which contain no broken cycle, case 1) does not occur and we can therefore decompose the set \[{\cal E}^i = \{ F\subseteq E \,|\, F \text{ is a spanning $i$-forest with no broken cycle} \}\] into two disjoint classes: \begin{align*} \tilde{\cal A}^i_{e_{\max}} &= \{ F\in {\cal E}^i \,|\, e_{\max} \not \in F\}, \\ \tilde{\cal B}^i_{e_{\max}} &= \{ F\in {\cal E}^i \,|\, e_{\max} \in F\} \\ \end{align*} and, clearly, $F\mapsto F\cup\{e_{\max}\}$ is a bijection from $\tilde{\cal A}^i_{e_{\max}}$ onto $\tilde{\cal B}^{i-1}_{e_{\max}}$. If we now define $c_i(G) = |\tilde{\cal B}^i_{e_{\max}}|$ and recall that $h_i(G) = |{\cal E}^i|$, we see that \begin{equation} h_i(G) = c_{i-1}(G) + c_i(G)\,,\quad i=1,2,3,\dots\,. \end{equation} Note that $c_0(G)=0$ since ${\cal E}^0$ is empty. We have to show that the $c_i(G)$ given in \eqref{eq:lemma-forest} coincide with the ones we have just defined. Let $F\in \tilde{\cal B}^{i}_{e_{\max}}$. Then $F\setminus\{e_{\max}\}$ is a spanning $(i+1)$-forest and it can be written as the disjoint union of its components: $$F\setminus\{e_{\max}\} = T_1\cup \cdots \cup T_{i+1}\,.$$ Let $V_j$ be the vertex set of $T_j$ and let $G_j = G[V_j]$ be the corresponding vertex induced subgraph of $G$, for each $j=1,\dots,i+1$. Then $T_j$ is a spanning tree of $G_j$. Since $F$ contains no broken cycle by assumption, neither does any of the $T_j$ and, in particular, $e_{\max} \not\in G_j$ for every $j$. Conversely, consider a decomposition $V=V_1\sqcup \cdots \sqcup V_{i+1}$ such that $e_{\max} \not\in G_j = G[V_j]$ for every $j=1,\dots, i+1$. If $T_j$ is a spanning tree for $G_j$ for each $j$ then $F=T_1\sqcup \cdots \sqcup T_{i+1}\sqcup \{e_{\max}\}$ is a spanning $i$-forest of $G$. If none of the $T_j$ contains a broken cycle, then neither will $F$. This proves the formula. \end{proof} As in formula \eqref{eq:lemma-subgraph} only decompositions such that all $G[V_j]$ are connected contribute to the sum in \eqref{eq:lemma-forest}. \begin{proof}[Proof of Theorem \ref{thm:whitney}] With notation as in Lemmas \ref{lemma:subgraph} and \ref{lemma:forest} we define \[ \tilde a_i(G) = (-1)^{n-i} h_i(G)\qquad\mbox{and}\qquad \tilde b_e^i(G)= (-1)^{n-i}c_i(G) \] for $i=1,2,\dots, n$ and $i= 0,1,\dots, n$, respectively, (where $e=e_{\rm max}$). It follows from \eqref{eq:decomposition-h} and \eqref{eq:lemma-forest} that $\tilde a_i$ and $\tilde b^i_e$ satisfy the same recursion relations \eqref{eq:decomposition-a} and \eqref{eq:lemma-subgraph} as $a_i$ and $b^i_e$. Specialising \eqref{eq:lemma-subgraph} to $i=1$ and noting that $a_1=b_e^1$ we get \begin{equation}\lambdabel{eq:proof-induction-1} a_1(G) = - \sum_{\substack{V=V_1\sqcup V_2\\e \not \in G[V_j],\, j=1,2 }} a_1(G[V_1])\cdot a_1(G[V_2])\,. \end{equation} Noting that for the case of the empty graph $\bar G\lambdangle \emptyset \rangle$ it holds that \[ a_1(\bar G\lambdangle \emptyset \rangle) = \begin{cases} 1,\; \mbox{if $n=1$}\\ 0,\, \mbox{if $n>1$}\end{cases} \] this relation determines $a_1(G)$ uniquely for all graphs $G$ by induction, since the graphs $G[V_j]$ have fewer edges than $G$. In turn, relations \eqref{eq:decomposition-a} and \eqref{eq:lemma-subgraph} determine $a_i(G)$ for $i\geq 2$. Since it is clear that $a_1(\bar G\lambdangle \emptyset \rangle)=\tilde a_1(\bar G\lambdangle \emptyset \rangle)$ and $\tilde a_1(G)=\tilde b_e^1(G)$ it follows that $a_i(G)=\tilde a_i(G)$ for all $i$ and all graphs $G$. \end{proof} It is a well known fact that the coefficients $a_i(G)$ alternate in sign, and that they are numerically upper bounded by the corresponding coefficients for the complete graph of equal order. We will now briefly show how this follows in a simple manner from the recursion relations of Lemmas~\ref{lemma:subgraph} and \ref{eq:lemma-subgraph-b} without using neither Whitney's theorem nor the deletion-contraction principle, as a consequence of the following result. \begin{lem}\lambdabel{lem:pos-mon} For any graph $G$ of order $n$ and any edge $e$ of $G$ it holds that \begin{equation}\lambdabel{pos-mon} 0\le (-1)^{n-i}b_e^i(G) \le (-1)^{n-i}b_e^i(K_n)\,, \quad i=1,\dots,n , \end{equation} where $K_n$ denotes the complete graph on $n$ vertices. Moreover, the first inequality is sharp if and only if $k(G)\leq i\leq n$, while the second inequality is sharp for $1\leq i \leq n-1$ unless $G=K_n$. \end{lem} \begin{proof} We shall prove the statement by induction. Consider first the case $i=1$ and note that the recursion relation \eqref{eq:proof-induction-1} can be rewritten as \begin{equation}\lambdabel{eq:proof-induction-11} d(G) = \sum_{\substack{V=V_1\sqcup V_2\\e \not \in G[V_j],\, j=1,2 }} d(G[V_1])\cdot d(G[V_2])\,, \end{equation} where $d(G)= (-1)^{n-1} a_1(G)$. Since \[ d(V,\emptyset) = \begin{cases} 1\quad \mbox{if $|V|=1$}\\ 0\quad \mbox{if $|V|>1$}\,,\end{cases} \] it follows by induction on the number of edges in $G$ that $d(G)\geq 0$ for all $G$. If $G$ is connected it is easy to see, by successively deleting edges in paths connecting the endpoints of $e$, starting with $e$, that there exist decompositions $V=V_1\sqcup V_2$ such that $G[V_1]$ and $G[V_2]$ are both connected and do not contain $e$. This implies, again by induction, that $d(G)>0$ if $G$ is connected. On the other hand, if $G$ is disconnected, the sum in \eqref{eq:proof-induction-11} is empty and so $d(G)=0$. Using \eqref{eq:lemma-subgraph} in the form \begin{equation}\lambdabel{eq:lemma-subgraph-11} (-1)^{n-i} b^i_e(G) = \sum_{\substack{V = V_1 \sqcup \cdots \sqcup V_{i+1}\\ e \not\in G[V_j],\,j=1,\dots,i+1}}\prod_{j=1}^{i+1}d(G[V_j]) \,, \end{equation} we get that $(-1)^{n-i} b^i_e(G)\geq 0$. Moreover, if $G$ has $k$ connected components, the sum on the right-hand side is empty if $i<k$ whereas positive terms occur for $k\leq i\leq n$ and hence $(-1)^{n-i} b^i_e(G)> 0$ in this case. Moreover, considering $G$ as a subgraph of $K_n$ and comparing the formula \eqref{eq:lemma-subgraph-11} for $G$ and the corresponding one for $K_n$, we see that each summand in the former by the induction hypothesis can be bounded from above by a corresponding term in the latter, since all $K_n[V_j]$ are complete graphs. Hence, the rightmost bound in \eqref{pos-mon} follows. Finally, if $G$ is not the complete graph, we have $n\geq 2$ and there is an edge $f=\{x,y\}$ in $K_n$ that is not an edge of $G$. For $i\leq n-1$ we choose a decomposition $V=V_1\sqcup\dots\sqcup V_{i+1}$ in \eqref{eq:lemma-subgraph-11}, such that $V_1=\{x,y\}$ and $V_2=\{z\}$, where $z$ is an endpoint of $f$ that is not in $V_1$, and $V_3,\dots,V_{i+1}$ are arbitrary. Then $G[V_1]$ is disconnected and therefore this term in \eqref{eq:lemma-subgraph-11} vanishes, while the corresponding term for $K_n$ is strictly positive. This proves the last statement of the proposition. \end{proof} \begin{cly} For any graph $G$ with $n$ vertices it holds for $i=1,2,\dots,n$ that \begin{equation}\lambdabel{pos-mon1} 0\leq (-1)^{n-i}(a_1(G) + a_2(G)+\dots + a_i(G))\leq (-1)^{n-i}(a_1(K_n) + a_2(K_n)+\dots + a_i(K_n))\,, \end{equation} and \begin{equation}\lambdabel{pos-mon2} 0\leq (-1)^{n-i} a_i(G)\leq (-1)^{n-i} a_i(K_n). \end{equation} Moreover, in both cases the first inequality is sharp if and only if $k(G)\leq i\leq n$, while the second inequality is sharp for $1\leq i \leq n-1$ unless $G=K_n$. \end{cly} \begin{proof} Using that \begin{equation}\lambdabel{a-b-relation} a_i(G) = b_e^i(G) - b_e^{i-1}(G) \end{equation} by \eqref{eq:decomposition-a} and that $b_e^0(G)=0$ it follows that $$ b_e^i(G) = a_1(G) +\dots + a_i(G)\,. $$ In particular, $b_e^i(G)$ is independent of $e$ and \eqref{pos-mon1} is just a rewriting of \eqref{pos-mon}. Writing \eqref{a-b-relation} as $$ (-1)^{n-i}a_i(G) = (-1)^{n-i}b_e^i(G) + (-1)^{n-i+1}b_e^{i-1}(G) $$ the inequalities \eqref{pos-mon2} follow immediately from \eqref{pos-mon}. Moreover, the first inequality of \eqref{pos-mon2} is an equality if and only if $b_e^i(G)=b_e^{i-1}(G)=0$ and hence if and only if $0\leq i < k(G)$. Similarly, Lemma \ref{pos-mon} gives that if the second inequality of \eqref{pos-mon2} is an equality then $b_e^i(G)=b_e^{i}(K_n)$ and $b_e^{i-1}(G)=b_e^{i-1}(K_n)$ and hence $G=K_n$. This completes the proof of the corollary. \end{proof} It should be noted that the inequality \eqref{pos-mon1} can also easily be deduced from the (highly non-trivial) unimodularity of the coefficients of $\chi_G$ \cite{read,huh} and the fact that $a_1+a_2 + \dots + a_n =0$. \begin{rem} The alternating sign property of the $a_i$ plays a role, for the special case $i=1$, in the Mayer expansion for the hard-core lattice gas in statistical mechanics (also known as the cluster expansion of the polymer partition function)\cite{ueltschi2004cluster, Scott2005, friedli_velenik_2017}. Briefly, the model is defined by a finite set $\Gammamma$ which plays the role of the ``single-particle'' state space, a list of complex weights $w = (w_\gammamma)_{\gammamma \in \Gammamma}$, and an interaction $W:\Gammamma\times \Gammamma \to \{0,1\}$, which is symmetric and satisfies $W(\gammamma,\gammamma)=0$ for all $\gammamma \in \Gammamma$. Given a multiset $X=\{\gammamma_1,\dots, \gammamma_n\}$ of elements of $\Gammamma$ (where each $\gammamma_i$ can appear more than once), we define the simple graph $G[X] \subseteq K_n$ as the graph on $n$ vertices such that $i$ is adjacent to $j$ if $i\neq j$ and $W(\gammamma_i,\gammamma_j) = 0$. A subset $X$ of $\Gammamma$ is said to be \emph{independent} if $G[X]$ has no edges. The partition function is then given by \begin{equation} Z_\Gammamma(w) = \sum_{X\subseteq \Gammamma} \mathcal{B}ig(\prod_{\gammamma \in X} w_\gammamma \mathcal{B}ig) \prod_{\{\gammamma, \gammamma'\} \subseteq X} W(\gammamma, \gammamma') = \sum_{\substack{X\subseteq \Gammamma \\ X \text{ independent}}} \prod_{\gammamma\in X} w_\gammamma, \end{equation} which is the (generalized) independent-set polynomial of $G[\Gammamma]$ (the standard independent-set polynomial is given when $w$ is taken to be constant)\cite{Scott2005}. The Mayer expansion gives a formal series expansion for $\log Z_\Gammamma$ \cite[Proposition 5.3]{friedli_velenik_2017}: \begin{equation}\lambdabel{eq:cluster-expansion} \log Z_\Gammamma(w) = \sum_{n\ge 1} \frac{1}{n!} \sum_{\gammamma_1,\dots,\gammamma_n \in \Gammamma} a_1(G[\gammamma_1,\dots,\gammamma_n]) \prod_{i=1}^n w_{\gammamma_i} . \end{equation} The alternating sign property of $a_1$ implies in particular that the coefficient of order $n$ of $\log Z_G(w)$, seen as a polynomial in the variables $( w_\gammamma)_{\gammamma \in \Gammamma}$, has sign $(-1)^{n-1}$. This holds in greater generality \cite[Proposition 2.8]{Scott2005}, and has important implications for proving the convergence of the formal series \eqref{eq:cluster-expansion} \cite{Pfister1991}. \end{rem} \section{The recursion relations for hypergraphs} \lambdabel{sec:hypergraphs} Let $H$ be a \emph{hypergraph}, that is $H=(V,E)$ where $V$ is a finite non-empty set of \emph{vertices} and $E$ is a set of subsets of $V$, called \emph{edges}. We assume all edges have cardinality at least $2$ (i.\,e. $H$ has no loops) and will denote $|V|$ by $n$. A hypergraph $H'=(V',E')$ is a \emph{subgraph} of $H$ if $V'\subseteq V$ and $E'\subseteq E$. If $$ E'= \{ e\in E\mid e\subseteq V'\} $$ we call $H'$ the subgraph spanned by $V'$ and denote it by $H[V']$. If $$ V' = \bigcup_{e\in E'} e $$ we call $H'$ the subgraph spanned by $E'$ and denote it by $H\lambdangle E'\rangle$. Finally, in case $V=V'$ we call $H'$ a \emph{spanning subgraph} of $H$ and denote it by $\bar H\lambdangle E'\rangle$. Two different vertices $x,y\in V$ are called \emph{neighbours} in $H$ if $x,y\in e$ for some $e\in E$. A vertex $x$ is \emph{connected} to a vertex $y$ if either $x=y$ or there exists a finite sequence $x_1,x_2,\dots x_k$ of vertices such that $x_i$ and $x_{i+1}$ are neighbours for $i=1,\dots,k-1$ and $x_1=x$ and $x_k=y$. Clearly, connectedness is an equivalence relation on $V$. Calling the equivalence classes $V_1,\dots, V_N$ and letting $E_i$ be the set of edges containing only vertices of $V_i$, we have that $H_i=(V_i,E_i)$ is a hypergraph and $$ V=\bigcup_{i=1}^N V_i \,,\qquad E=\bigcup_{i=1}^N E_i\,. $$ If $N=1$ we call $H$ \emph{connected}. Evidently, $H_1,\dots,H_N$ are connected. They are called the \emph{connected components} of $H$ and their number is denoted by $k(H)$. Again, we shall use the notation $k(F)$ for $k(\bar H\lambdangle F\rangle)$. \begin{defn} Let $\lambdambda\in\mathbb N$. A $\lambdambda$-colouring of a hypergraph $H=(V,E)$ is a map $\pi: V\to \{1,2,\dots,\lambdambda\}$. A $\lambdambda$-colouring is called proper if for each edge $e\in E$ there exist vertices $x,y\in e$ such that $\pi(x)\neq \pi(y)$. We define $\chi_H(\lambdambda)$ to be the number of proper $\lambdambda$-colourings of $H$. \end{defn} Repeating the proof of Theorem \ref{chrom1} we obtain \begin{thm}\lambdabel{chrom2} The function $\chrom{H}$ is a polynomial, called the \emph{chromatic polynomial} of $H$, given by $$ \chi_H(\lambdambda) = \sum_{F\subseteq E} (-1)^{|F|} \lambdambda^{k(F)}\,. $$ \end{thm} Thus, the coefficients $a_i(H),\, i=1,2,3,\dots,n,$ of $\chi_H$ are given by the same formula \eqref{coeff} as for graphs. Now, fix $e\in E$ and let \begin{align*} {\cal A}_e^i &= \{F\subseteq E\mid e\notin F, k(F)=i\}\\ {\cal B}_e^{i,j} &= \{F\subseteq E\mid e\in F, k(F) =i, k(F\setminus\{e\}) = j\}\,. \end{align*} Note that ${\cal B}_e^{i,j}=\emptyset$ if $i>j$ and, if $F\in{\cal A}_e^i $, then $F\cup \{e\} \in{\cal B}_e^{j,i}$ for some $j\leq i$ yielding a bijective correspondence between ${\cal A}_e^{i}$ and $\cup_{j\leq i}{\cal B}_e^{j,i}$. Hence, we have \begin{equation} \sum_{F\in {\cal A}_e^i} (-1)^{|F|} = - \sum_{j=1}^i \sum_{F\in{\cal B}_e^{j,i}} (-1)^{|F|}\,. \end{equation} Using \begin{equation} a_i = \sum_{F\in {\cal A}_e^i} (-1)^{|F|} + \sum_{j=i}^n \sum_{F\in{\cal B}_e^{i,j}} (-1)^{|F|} \end{equation} it follows that \begin{equation}\lambdabel{I} a_i = \sum_{j>i} b_e^{i,j} - \sum_{j<i} b_e^{j,i}\,, \end{equation} where \begin{equation}\lambdabel{defb} b_e^{i,j} = \sum_{F\in {\cal B}_e^{i,j}} (-1)^{|F|}\,. \end{equation} In particular, we have \begin{equation}\lambdabel{I'} a_1 = \sum_{j=2}^n b_e^{1,j}\,. \end{equation} \begin{prop}\lambdabel{bformula} For $i<j$ it holds that \begin{equation}\lambdabel{II} b_e^{i,j} = - \sidesum{i}_{V_1\sqcup\dots\sqcup V_j=V}\prod_{k=1}^j a_1(H[V_k])\,, \end{equation} where the sum is over all decompositions of $V$ into j (non-empty) disjoint subsets such that $e$ intersects exactly $j-i+1$ of them. \end{prop} \begin{proof} Let $F\in {\cal B}_e^{i,j}$. Then $\bar H\lambdangle F\rangle$ has $i$ components $C_1,\dots,C_i$, whereas $\bar H\lambdangle F\setminus\{e\}\rangle$ has $j$ components $H_1=(V_1,F_1),\dots, H_j=(V_j,F_j)$ which are connected spanning subgraphs of $H[V_1],\dots, H[V_j]$, respectively. Indeed, we have $e\in C_m \equiv (V',F')$ for some $m=1,\dots,i$, and $(V',F'\setminus\{e\})$ then has $j-i+1$ components which together with $\{C_1,\dots,C_{m-1},C_{m+1},\dots,C_i\}$ make up $\{H_1,\dots,H_j\}$, and $e$ intersects exactly those $V_k$ which originate from $C_m$ by deleting $e$. On the other hand, given a decomposition $V_1\sqcup\dots\sqcup V_j$ of $V$ and connected spanning subgraphs $H_1=(V_1,F_1),\dots,H_j=(V_j,F_j)$ of $H[V_1],\dots, H[V_j]$, respectively, such that $e$ intersects exactly $j-i+1$ of $V_1,\dots, V_j$, we get that $F_1\cup\dots\cup F_j\cup\{e\}\in{\cal B}_e^{i,j}$ and the mapping $\psi$ defined by $$ \psi(\{H_1,\dots, H_j\}) = F_1\cup\dots\cup F_j\cup\{e\} $$ is a bijection onto ${\cal B}_e^{i,j}$. Since $$ (-1)^{|F_1\cup\dots\cup F_j\cup\{e\}|} = - \prod_{k=1}^j (-1)^{|F_k|}\,, $$ the claim follows upon noting that $a_1(H[V'])=0$ if $H[V']$ is not connected. \end{proof} Setting $i=1$ and summing over $j$ in \eqref{II} we get \begin{equation}\lambdabel{II'} a_1(H) = - \sum_{j=2}^n \sidesum{1}_{V_1\sqcup\dots\sqcup V_j=V}\prod_{k=1}^j a_1(H[V_k])\ \end{equation} which determines $a_1(H)$ inductively for any hypergraph $H$, since $H[V_1],\dots, H[V_j]$ all have fewer edges than $H$ and we obviously have \begin{equation}\lambdabel{III} a_1(\bar{H}\lambdangle \emptyset \rangle) = \begin{cases} 1\; \mbox{if $|V| = 1$} \\ 0\; \mbox{if $|V|>1$}\,.\end{cases} \end{equation} Once $a_1$ is known we obtain $b_e^{i,j}(H)$ for any $H$ from \eqref{I'} and consequently $a_i(H)$ from \eqref{I}. Hence, equations \eqref{I}, \eqref{II} and \eqref{III} determine all $a_i$ (as well as all $b_e^{i,j}$). We will now present a generalization of Whitney's broken cycle theorem for hypergraphs. \begin{defn}\lambdabel{delta} Let $H=(V,E)$ be a hypergraph and fix some linear ordering $\leq$ of $E$. A non-empty set $F\subseteq E$ is called \emph{broken-cyclic} in $H$ with respect to $\leq$ if it fulfils the following property ($\bigstar$)\quad $H\lambdangle F\rangle$ is connected and there exists an edge $e_0\subseteq \bigcup_{f\in F} f$ such that $e_0 > \mbox{max}\, F$. \end{defn} \begin{lem}\lambdabel{stardec} Assume $H=(V,E)$ is a hypergraph with connected components $H_1(V_1,E_1),$ \dots, $H_N=(V_N,E_N)$. Then $F\subseteq E$ is broken-cyclic in $H$ if and only if $F\subseteq E_i$ and $F$ is broken-cyclic in $H_i$ for some $i=1,\dots,N$, with ordering of edges inherited from that of $H$. \end{lem} \begin{proof} If $F$ is broken-cyclic in $H$ then $H\lambdangle F\rangle$ is connected and hence is a subgraph of some $H_i$. Consequently, if $e_0\subseteq \bigcup_{f\in F} f$ it is an edge of $H_i$ and it follows that $F$ is broken-cyclic in $H_i$. The converse, that a set of edges $F$ which is broken-cyclic in $H_i$ is also broken-cyclic in $H$, is obvious. \end{proof} From now on $H=(V,E)$ is a fixed hypergraph with some linear ordering $\leq$ on $E$ and $\cal D$ is some subsetset of $2^E$ consisting of broken-cyclic subsets in $H$ with respect to $\leq$. Moreover, if $H'=(V',E')$ is a subgraph of $H$ it will be assumed that $E'$ is ordered with respect to the restriction of $\leq$ to $E'$. We define \begin{align} {\cal E}_{\cal D} &= \{F\subseteq E\mid A\nsubseteq F \;\mbox{for all $A\in \cal D$}\} \\ {\cal E}^i_{\cal D} &= \{F\subseteq E\mid k(H\lambdangle F\rangle) = i \}\cap {\cal E}_{\cal D} \end{align} and set \begin{equation} a_{i,\cal D} = \sum_{F\in{\cal E}^i_{\cal D}} (-1)^{|F|}\,, \end{equation} for $i=1,2,3,\dots,n$. Note that $a_i = a_{i,\emptyset}$. We may now formulate the following version of the broken-cycle theorem. \begin{thm}\lambdabel{thyp} For any set $\cal D$ of broken-cyclic subsets of edges in a hypergraph $H$ it holds that \begin{equation} a_i = a_{i,\cal D} \end{equation} for all $i$. \end{thm} \begin{proof} Let $e=\mbox{max}\, E$. Defining the sets \begin{equation} {\cal A}^i_{e,\cal D} = {\cal A}^i_{e}\cap {\cal E}_{\cal D}\,,\qquad {\cal B}^{i,j}_{e,\cal D} = {\cal B}^{i,j}_{e}\cap{\cal E}_{\cal D}\,, \end{equation} we have the decomposition \begin{equation} {\cal E}^i_{\cal D} = {\cal A}^i_{e,\cal D}\cup\left(\bigcup_{j\geq i}{\cal B}^{i,j}_{e,\cal D}\right) \end{equation} into disjoint subsets. Moreover, since $e$ is maximal in $E$ it does not belong to any broken-cyclic subset in $H$ and therefore the mapping $\varphi$ defined by $\varphi(F) = F\cup\{e\}$ is a bijection from ${\cal A}^i_{e,\cal D}$ onto $\bigcup_{j\leq i} {\cal B}^{j,i}_{e,\cal D}$. Thus, defining \begin{equation} b^{i,j}_{e,\cal D} = \sum_{F\in{\cal B}^{i,j}_{e,\cal D}} (-1)^{|F|}\,, \end{equation} the same arguments as those leading to relation \eqref{I} imply \begin{equation}\lambdabel{Ihyp} a_{i,\cal D} = \sum_{j>i} b_{e,\cal D}^{i,j} - \sum_{j<i} b_{e,\cal D}^{j,i}\,. \end{equation} We next argue that the analogue of \eqref{II} also holds. Let $F\in {\cal B}^{i,j}_{e,\cal D}$ and consider the corresponding connected components $H_1=(V_1,F_1),\dots, H_N= (V_j,F_j)$ of the subgraph $\bar H\lambdangle F\setminus\{e\}\rangle$ (see the proof of Proposition~\ref{bformula}). For $A\in \cal D$ we have by Lemma~\ref{stardec} that $A\subseteq F$ if and only if $A\subseteq F_k$ for some $k=1,\dots, j$. Defining \begin{equation} {\cal D}_k = {\cal D}\cap 2^{E_k}\,, \end{equation} where $E_k$ denotes the edgeset of $H[V_k]$, this means that $A\nsubseteq F$ for all $A\in{\cal D}$ if and only if $A\nsubseteq F_k$ for all $A\in{\cal D}_k$ and all $k=1,\dots, j$. Observe that any $A\in{\cal D}_k$ is broken-cyclic in $H[V_k]$ since the vertices of edges in $A$ belong to $V_k$ and hence $H\lambdangle A\rangle = H[V_k]\lambdangle A\rangle$. We conclude that $F\in {\cal B}^{i,j}_{e,\cal D}$ if and only if $F_k\in {\cal A}^{1}_{e,{\cal D}_k}(H[V_k])$ for all $k=1,\dots, j$. As in the proof of Proposition~\ref{bformula} we obtain, conversely, from any decomposition $V_1\sqcup\dots\sqcup V_j=V$ and connected, spanning subgraphs $H_1=(V_1,F_1),\dots, H_j= (V_j,F_j)$ of $H_1=H[V_1],\dots, H_j= H[V_j]$ such that $A\nsubseteq F_k$ for all $A\in {\cal D}_k$ and all $k=1,\dots, j$, and such that $e$ intersects exactly $j-i+1$ of the sets $V_1,\dots, V_j$, that $F=F_1\cup\dots F_j\cup\{e\}$ belongs to ${\cal B}^{i,j}_{e,\cal D}$. Hence we obtain the desired relation \begin{equation}\lambdabel{IIhyp} b^{i,j}_{e,\cal D}(H) = -\sidesum{i}_{V_1\sqcup\dots\sqcup V_j=V} \prod_{k=1}^j a_{1,{\cal D}_k}(H[V_k])\,, \end{equation} where one should note that ${\cal D}_k$ depends solely on $V_k$ and $\cal D$ for a given $H$. Having established equations \eqref{Ihyp} and \eqref{IIhyp} the claimed equality of $a_i$ and $a_{i,{\cal D}}$ follows by induction on the number of edges since, if $E=\emptyset$, we must have $\cal D=\emptyset$ and so \begin{equation} a_{i,\cal D}(\bar H\lambdangle \emptyset \rangle) = a_i(\bar H\lambdangle \emptyset \rangle)\,,\quad i=1,2,3,\dots \end{equation} \end{proof} The following Propositions \ref{deltacond} and \ref{cyclecond} show that Theorem~\ref{thyp} contains the broken cycle theorems of \cite{dohmen1, dohmen2, trinks} and those quoted for hypergraphs in \cite{dohmen-trinks}. \begin{prop}\lambdabel{deltacond} Assume $H'=(V',F)$ is a $\partialrtialta$-cycle in $H=(V,E)$ in the sense of \cite{trinks}, i.e.\ $H'$ is a minimal subgraph of $H$ such that $F\neq \emptyset$ and $k(H')=k(H'-e)$ for all $e\in F$. Then $F\setminus\{\mbox{\rm max}\, F\}$ is broken-cyclic in $H$ according to Definition~\ref{delta}. \end{prop} \begin{proof} Since $H'$ is minimal it follows that $k(H')=k(H'-e)=1$ for all $e\in F$. In particular, $H'- \mbox{max}\, F$ is connected and equals $H\lambdangle F\setminus\{\mbox{max}\, F\}\rangle$ with vertex set $V'$. Hence, $\mbox{max}\, F\subseteq \bigcup_{f\in F\setminus\{\mbox{max}\, F\}} f$ and, of course, $\mbox{max}\, F> \mbox{max}(F\setminus\{\mbox{max}\, F\})$. \end{proof} \begin{prop}\lambdabel{cyclecond} Let $C=x_1e_1x_2e_2 \dots x_ne_nx_1$ be a cycle in $H$ in the sense of \cite{berge}, i.e.\ $x_1$, \dots, $x_n$, resp. $e_1,$ \dots, $e_n$, are pairwise distinct vertices, resp. edges, in $H$ such that $x_i\in e_{i-1}\cap e_i$ for $i=1,\dots, n$ (with $e_0\equiv e_n$). Setting $F=\{e_1,\dots, e_n\}$ we have that $F\setminus\{\mbox{\rm max}\, F\}$ is broken-cyclic in $H$ provided \begin{equation}\lambdabel{incl} \mbox{\rm max}\, F\subseteq \bigcup_{f\in F\setminus\{\mbox{\small\rm max}\, F\}} f\,, \end{equation} which in particular holds if $\mbox{\rm max}\, F$ has cardinaliy 2. \end{prop} \begin{proof} It is clear that $H\lambdangle F\setminus\{\mbox{max}\, F\}\rangle$ is connected and that \eqref{incl} ensures that we may use $e_0 = \mbox{max}\, F$ in Definition~\ref{delta}. If $\mbox{max}\, F =e_k$ has cardinality $2$ then $e_k=\{x_k,x_{k+1}\}\subseteq e_{k-1}\cup e_{k+1}\subseteq\cup_{ f\in F\setminus\{e_k\}} f$. \end{proof} Alternating sign properties of the $a_i$ for hypergraphs such as the ones described in Section \ref{sec:graphs} for graphs have been demonstrated in some specific cases, see e.g. \cite{dohmen1}. To what extent analogues of \eqref{pos-mon2} can be obtained in the general case of hypergraphs is not clear. We should mention on this topic that the deletion-contraction principle has been extended to hypergraphs \cite{Zykov_1974} as well as to mixed hypergraphs \cite{voloshin1993}. \section{An application: the first chromatic coefficient for complete hypergraphs} \lambdabel{sec:complete-hypergraphs} As a last topic we show that the recursion relations of Section~\ref{sec:hypergraphs} can be used to derive the value of $a_1$ for complete hypergraphs. Let $K_n^r$ be the $r$-complete hypergraph of order $n$, i.e.\ the edge set of $K_n^r$ consists of all $r$-subsets of its vertex set $V=\{1,2,\dots,n\}$. Note that if $r=2$, then $K^2_n$ is the complete graph $K_n$ and the result is well known (see e.g. \cite{Dong2005}). We shall calculate $a_1(K_n^r)$ for $r\geq 2$ and $n\geq 1$ making use of \eqref{II'}, which in this case takes the form \begin{align}\lambdabel{recompl} a_1(K_n^r) = - \sum_{j=2}^r \sum_{\substack{1\leq k_1\leq\dots\leq k_j\\ k_1+\dots +k_j=r}} N^r_{k_1,\dots,k_j}\!\sum_{\substack{s_1,\dots, s_j\geq 0\\ s_1+\dots +s_j=n-r}}\! \binom{n-r}{s_1 \dots s_j} \cdot a_1(K^r_{k_1+s_1})\cdot\ldots \cdot a_1(K_{n_j+s_j}^r)\,, \end{align} where $N^r_{k_1,\dots,k_j}$ denotes the number of partitions of $\{1,\dots,r\}$ into $j$ sets of size $k_1,\dots, k_j$ and $\binom{n-r}{s_1 \dots s_j}$ is the standard multinomial coefficient. Note also that we obviously have \begin{equation} \chi_{\small K_n^r}(\lambdambda) = \begin{cases} \lambdambda^n\quad \mbox{if $0\leq n<r$}\\ \lambdambda^n -\lambdambda \quad \mbox{if $n=r$}\end{cases}\,, \end{equation} so that, in particular, \begin{equation}\lambdabel{inicond} a_1(K_n^r) =\begin{cases} 1\quad \mbox{if $n=1$}\\ 0\quad \mbox{if $n=2,3,\dots r-1$}\end{cases} \end{equation} (while $a_1(K_n^n) = -1$). \begin{thm}\lambdabel{thm3} For $r\geq 2$ and $n\geq 1$ it holds that \begin{equation}\lambdabel{eq:a1-formula} a_1(K_n^r) = - (n-1)!\, \mu_{r-1}(n)\,, \end{equation} where \begin{equation} \mu_{r}(n) = \sum_{i=1}^{r} R_i^{-n} \end{equation} and $R_1,\dots, R_{r}$ denote the roots of the $r$'th Taylor polynomial $E_{r}$ of $\exp$. \end{thm} \begin{proof} Fix $r\ge 2$. We introduce the generating function $g(x)$ given by \begin{equation}\lambdabel{genfct} g(x) = \sum_{n=0}^\infty \frac{a_1(K^r_{n+1})}{n!} x^n \end{equation} and rewrite equations \eqref{recompl}-\eqref{inicond} as \begin{equation}\lambdabel{eq:diffgen} g^{(r-1)}(x) = - \sum_{j=2}^r \sum_{\substack{1\leq k_1\leq\dots\leq k_j\\k_1+\dots +k_j =r}} N^r_{k_1, \dots, k_j} g^{(k_1-1)}(x)\cdot\ldots\cdot g^{(k_j-1)}(x) \end{equation} with initial condition \[ g(0)=1\,,\quad g'(0)=g''(0) =\dots = g^{(r-2)}(0) =0\,. \] Given two $C^\infty$-functions $\psi$ and $\varphi$ of a real variable we recall the formula \begin{equation}\lambdabel{compleib} (\psi\circ\varphi)^{(r)}(x) = \sum_{j=1}^r \sum_{\substack{1\leq k_1\leq\dots\leq k_j\\k_1+\dots k_j =r}} N^r_{k_1,\dots, k_j} \psi^{(j)}(\varphi(x)) \varphi^{(k_1)}(x)\cdot\dots\cdot \varphi^{(k_j)}(x)\,, \end{equation} which is easy to verify by induction. For $\psi=\exp$ this gives $$ \exp(-\varphi(x))\left(\exp\circ\varphi\right)^{(r)}(x) = \sum_{j=1}^r \sum_{\substack{1\leq k_1\leq\dots\leq k_j\\k_1+\dots k_j =r}} N^r_{k_1,\dots, k_j} \varphi^{(k_1)}(x)\cdot\dots\cdot \varphi^{(k_j)}(x)\,. $$ Setting $g=\varphi'$ in \eqref{eq:diffgen} and using $N^{(r)}_{1,1,\dots 1}=1$ it follows that $\varphi$ satisfies $$ (\exp\circ\varphi)^{(r)} (x) = 0\,, $$ and hence that $\exp\circ\varphi$ equals a polynomial $P$ of degree at most $r-1$. Thus $$ g(x) = \frac{P'(x)}{P(x)}\,. $$ The initial conditions are easily seen to imply that $P=E_{r-1}$ and consequently $$ g(x) = \frac{E'_{r-1}(x)}{E_{r-1}(x)} = \sum_{i=1}^{r-1} \frac{1}{x-R_i}\,, $$ which gives the claimed result. \end{proof} \begin{rem} For $r=2$ we have $R_1=-1$ and we get from Theorem \ref{thm3} the known result \begin{equation} a_1(K_n) = a_1(K^2_n) = (-1)^{n-1} (n-1)!\,. \end{equation} By inserting this value into \eqref{eq:lemma-subgraph}, we obtain an expression for $a_i(K_n)$ for all $i$. It should be noted though that the value of $a_i(K_n)$ is equal to $ s(n,i)$, where $s(n,i)$ denotes the signed Stirling numbers of the first kind. \end{rem} \begin{rem} For $r=3$ the roots of $E_2$ are $R_{\pm} = -1\pm i$ which gives \begin{equation} a_1(K_n^3) = (-1)^{n-1}(n-1)!\, 2^{1-\frac n2}\cos\frac{n\pi}{4} \end{equation} \end{rem} For the calculation of $a_1(K^r_n)$ for larger values of $r$ one may use the results available in the literature for the moment function $\mu_r(n)$. In particular, the value of $\mu_r(n)$ was computed for $n\le 2(r+1)$ \cite[Theorem 7]{Zemyan2005}, which gives the following expression for $a_1(K^r_n)$, expanding the one given in \eqref{inicond} \begin{equation} a_1(K_n^r) = \begin{cases} 1 & \text{if $n=1$}\\ 0 & \text{if $2\le n \le r-1$}\\ (-1)^{n-r+1}\binom{n-1}{r-1} & \text{if $r \le n \le 2r-1$}\\ - [1+(-1)^{r}]\binom{2r-1}{r}& \text{if $n=2r$} \end{cases}. \end{equation} In \cite{Zemyan2005} it was also shown that, once $\mu_r(n)$ is known for $r$ consecutive values of $n$, then it is possible to recursively determine the value of $\mu_r(n)$ for every $n$. This recursive formula for $\mu_r(n)$, when expressed in terms of $a_1(K^r_n)$, reads as: \begin{equation} \sum_{j=0}^{r-1} \binom{r-2+m}{r-1-j} a_1(K^{r}_{j+m}) = 0, \quad \forall m \in \mathbb{N}. \end{equation} On a more general note, the properties of the zeros of the Taylor polynomials of $\exp$ have been intensively investigated, starting from the work of Szeg{\"o} \cite{szego1924eigenschaft} and Dieudonn\'e \cite{dieudonne1935zeros}, who showed that the points $\frac{R_i}{r}$ accumulate, as $r$ goes to infinity, on a closed curve contained in the unit circle, now known as the Szeg{\"o} curve. See also \cite{Newman1972,Newman1976,Buckholtz1966,Conrey1988,Pritsker1997,Walker2003,varga2008dynamical,Zemyan2005} for further developements. \noindent{\bf Acknowledgement} The authors acknowledge support from the Villum Foundation via the QMATH Centre of Excellence (Grant no. 19959). A.~L. acknowledges support from the Walter Burke Institute for Theoretical Physics in the form of the Sherman Fairchild Fellowship as well as support from the Institute for Quantum Information and Matter (IQIM), an NSF Physics Frontiers Center (NFS Grant PHY-1733907). \end{document}
\begin{document} \title{Travelling waves for the cane toads equation with bounded traits.} \begin{abstract} In this paper, we study propagation in a nonlocal reaction-diffusion-mutation model describing the invasion of cane toads in Australia \cite{Phillips}. The population of toads is structured by a space variable and a phenotypical trait and the space-diffusivity depends on the trait. We use a Schauder topological degree argument for the construction of some travelling wave solutions of the model. The speed $c^*$ of the wave is obtained after solving a suitable spectral problem in the trait variable. An eigenvector arising from this eigenvalue problem gives the flavor of the profile at the edge of the front. {The major difficulty is to obtain uniform $L^\infty$ bounds despite the combination of non local terms and an heterogeneous diffusivity.} \varepsilonnd{abstract} \noindent{\bf Key-Words:} {Structured populations, Reaction-diffusion equations, Travelling waves, Spectral problem}\\ \noindent{\bf AMS Class. No:} {35Q92, 45K05, 35C07} \section{Introduction.} In this paper, we focus on propagation phenomena in a model for the invasion of cane toads in Australia, proposed in \cite{Benichou}. It is a structured population model with two structural variables, the space $x\in \mathbb{R}^n$ and the motility $\theta \in \Theta$ of the toads. {Here $\Theta:= \left( \theta_{\text{min}}, \theta_{\text{max}} \right)$, with $\theta_{\text{min}} > 0$} denotes the bounded set of traits. {One modeling assumption is that} the space diffusivity depends {only on} $\theta$. The mutations are simply modeled {by} a diffusion process with constant diffusivity $\alpha$ in the variable $\theta$. {Each toad is in local competition} with all other individuals (independently of their trait) for resources. The resulting reaction term is of monostable type. Denoting $n(t,x,\theta)$ the density of toads having trait $\theta \in \Theta$ in position $x \in \mathbb{R}^n$ at time $t \in \mathbb{R}^+$, the model writes: \begin{equation}\label{eq:main} \left\{\begin{array}{l} \partial_t n - \theta \Delta_x n - \alpha \partial_{\theta\theta} n = r n (1 - \rho)\, , \qquad (t,x,\theta) \in \mathbb{R}^+ \times \mathbb{R}^n \times \Theta, \\ \partial_\theta n (t,x,\theta_{\min}) = \partial_\theta n (t,x,\theta_{\max}) = 0\, , \qquad (t,x) \in \mathbb{R}^+ \times \mathbb{R}^n.\\ \varepsilonnd{array} \right. \varepsilonnd{equation} with \begin{equation*} \forall (t,x) \in \mathbb{R}^+ \times \mathbb{R}^n, \qquad \rho(t,x) = \int_\Theta n(t,x,\theta)\, d\theta. \varepsilonnd{equation*} The Neumann boundary conditions ensure {the conservation of individuals through the mutation process}. { The invasion of cane toads has interested several field biologists. The data collected \cite{Shine, Phillips} show that the speed of invasion has always been increasing during the eighty first years of propagation and that younger individuals at the edge of the invasion front have shown significant changes in their morphology compared to older populations. This example of ecological problem among others (see the expansion of bush crickets in Britain \cite{Thomas}) illustrates the necessity of having models able to describe space-trait interactions. Several works have addressed the issue of front invasion in ecology, where the trait is related to dispersal ability \cite{Desvillettes,Champagnat}. It has been postulated that selection of more motile individuals can occur, even if they have no advantage regarding their reproductive rate, due to spatial sorting \cite{Kokko,Ronce,Shine,Simmons}.} Recently, some models for populations structured simultaneously by phenotypical traits and a space variable have emerged. A similar model to \varepsilonqref{eq:main} in a discrete trait setting has been studied by Dockery \textit{et al.} in \cite{Dockery}. Interestingly, they prove that in a bounded space domain {and with a rate of growth $r(x)$ heterogeneous in space}, {the only nontrivial Evolutionarily Stable State (ESS) is a population dominated by the slowest diffusing phenotype}. This conclusion is precisely the opposite of what is expected {at the edge of an invading front}. In \cite{Alfaro}, the authors study propagation in a model close to \varepsilonqref{eq:main}, where the {trait affects the growth rate $r$ but not the dispersal ability.} This latter assumption is made to take into account that the most favorable phenotypical trait may depend on space. The model reads \begin{equation*} \partial_t n - \Delta_{x,\theta} n = \left( r \left( \theta - B x \cdot e \right) - \int_{\mathbb{R}} k \left( \theta - B x \cdot e , \theta' - B x \cdot e \right) n(t,x,\theta') d \theta' \right) n(t,x,\theta), \varepsilonnd{equation*} and the authors prove the existence of travelling wave solutions. A version {with local competition in trait} of this equation has also been studied in \cite{Berestycki-Chapuisat}. {As compared to \cite{Alfaro,Berestycki-Chapuisat}, the main difficulty here is to obtain a uniform $L^\infty\left( \mathbb{R} \times \Theta \right)$ bound on the density $n$ solution of \varepsilonqref{eq:main}}. It is worth recalling that this propagation phenomena in reaction diffusion equations, through the theory of travelling waves, has been widely studied since the pioneering work of Aronson and Weinberger \cite{Aronson} on the Fisher-KPP equation \cite{Fisher,Kolmogorov}. We refer to \cite{Nadin,Nolen,Berestycki-Hamel} and the references therein for recent works concerning travelling waves for generalized Fisher-KPP equations in various heterogeneous media, and to \cite{Coville-Davila,Coville-Dupaigne,Shen} for works studying front propagation in models where the non locality appears in the dispersion operator. Studying propagation phenomena in nonlocal equations can be pretty involved since some qualitative features like Turing instability may occur at the back of the front, see \cite{Berestycki-Nadin,Hamel}, due to lack of comparison principles. Nevertheless, it is sometimes still possible to {construct} travelling fronts with rather abstract arguments. In this article, we aim to give a complete proof of some formal results that were previously announced in \cite{Bouin}. Namely construct some travelling waves solutions of \varepsilonqref{eq:main} with the expected qualitative features at the edge of the front. Let us now give the definition of spatial travelling waves we seek for \varepsilonqref{eq:main}. \begin{definition}\label{defonde} We say that a function $n(t,x,\theta)$ is a \textit{travelling wave} solution of speed $c \in \mathbb{R}^+$ in direction $e \in \mathbb{S}^n$ if it writes \begin{equation*} \forall (t,x,\theta) \in \mathbb{R}^+ \times \mathbb{R}^n \times \Theta, \qquad n(t,x,\theta):= \mu \left( \xi:= x \cdot e - c t , \theta \right), \varepsilonnd{equation*} where {\textit{the profile}} $\mu \in \mathcal{C}_b^2 \left( \mathbb{R} \times \Theta \right)$ is nonnegative, satisfies \begin{equation*} \liminf_{\xi \to - \infty} \mu \left( \xi , \cdot \right) > 0, \qquad \lim_{\xi \to + \infty} \mu \left( \xi , \cdot \right) = 0, \varepsilonnd{equation*} and solves \begin{equation}\label{eqkinwave} \begin{cases} - c \partial_{\xi} \mu = \theta {\partial_{\xi \xi} \mu} + \alpha \partial_{\theta \theta} \mu + r \mu (1 - \nu), \qquad (\xi , \theta) \in \mathbb{R} \times \Theta, \\ \partial_\theta \mu(\xi,\theta_{\text{min}}) = \partial_\theta \mu(\xi,\theta_{\text{max}}) = 0, \qquad \xi \in \mathbb{R}. \varepsilonnd{cases} \varepsilonnd{equation} where $\nu$ is the macroscopic density associated to $\mu$, that is $\nu \left( \xi \right) = \int_\Theta \mu \left( \xi, \theta \right) d \theta$. \varepsilonnd{definition} To state the main existence result we first need to explain which heuristic considerations yield to the derivation of possible speeds for fronts. As for the standard Fisher-KPP equations, we expect that the fronts we build in this work are so-called \textit{pulled fronts}: They are driven by the dynamics of small populations at the edge of the front. In this case, the speed of the front can be obtained through the linearized equation of \varepsilonqref{eqkinwave} around {\bf $\mu <<1$}. The resulting equation (which is now {a local elliptic equation}) writes \begin{equation}\label{eq:linmain} \begin{cases} - c \partial_{\xi} \widetilde \mu = \theta \partial_{\xi \xi} \widetilde\mu + \alpha \partial_{\theta\theta} \widetilde\mu + r \widetilde\mu, \qquad (\xi , \theta) \in \mathbb{R} \times \Theta, \\ \partial_\theta \widetilde\mu(\xi,\theta_{\text{min}}) = \partial_\theta \widetilde\mu(\xi,\theta_{\text{max}}) = 0, \qquad \xi \in \mathbb{R}. \varepsilonnd{cases} \varepsilonnd{equation} {Particular solutions of \varepsilonqref{eq:linmain} are a combination of an exponential decay in space and a monotonic profile in trait:} \begin{equation*} \forall (\xi , \theta) \in \mathbb{R} \times \Theta, \qquad \widetilde\mu(\xi,\theta) = Q_\lambda(\theta) e^{- \lambda \xi}, \varepsilonnd{equation*} where $\lambda > 0$ represents the spatial decreasing rate and $Q_\lambda$ the trait profile. {The pair} $(c(\lambda), Q_\lambda)$ solves the following \textit{spectral problem}: \begin{equation}\label{eq:eigenpb} \begin{cases} \alpha \partial_{\theta \theta} Q_\lambda(\theta) + \left( - \lambda c(\lambda) + \theta \lambda^2 + r \right) Q_\lambda(\theta) = 0\,, \qquad \theta\in \Theta, \\ \partial_\theta Q_\lambda \left( \theta_{\text{min}} \right) = \partial_\theta Q_\lambda \left( \theta_{\text{max}} \right) = 0, \\ Q_\lambda(\theta) > 0, \; \int_\Theta Q_\lambda(\theta)\, d\theta = 1\,. \varepsilonnd{cases} \varepsilonnd{equation} We refer to Section \ref{tools}, Proposition \ref{propspec} for detailed arguments showing that \varepsilonqref{eq:eigenpb} has a unique solution $\left( c(\lambda), Q_\lambda \right)$ for all $\lambda > 0$. We also prove there that we can define the minimal speed $c^*$ and its associated decreasing rate through the following formula: \begin{equation}\label{minspeed} c^*:= c(\lambda^*) = \min_{\lambda > 0} c(\lambda). \varepsilonnd{equation} \begin{remark} We emphasize that this structure of spectral problem giving information about propagation in models of "kinetic" type is quite robust. We refer to \cite{Alfaro,Berestycki-Chapuisat,Bouin-Calvez-Nadin, Bouin-Calvez-Nadin-2} for works where this kind of dispersion relations also give the speed of propagation of possible travelling wave solutions, and to \cite{Bouin-2,Bouin-Calvez,Bouin-Mirrahimi} for recent works where the same kind of spectral problem appears to find the limiting Hamiltonian in the WKB expansion of hyperbolic limits. \varepsilonnd{remark} We are now ready to state the main Theorem of this paper: \begin{theorem}\label{wave} Let {$\Theta:= \left( \theta_{\text{min}} , \theta_{\text{max}} \right), \theta_{\text{min}} > 0, \theta_{\text{min}} < + \infty$} and $c^*$ be the minimal speed defined after \varepsilonqref{minspeed}. Then, there exists a travelling wave solution of \varepsilonqref{eq:main} of speed $c^*$ in the sense of Definition \ref{defonde} . \varepsilonnd{theorem} This Theorem, together with the heuristic argument, has been announced in \cite{Bouin}. \begin{remark} As in \cite{Alfaro,Aronson}, we expect that waves going with faster speeds $c > c^*$ do exist and are constructible by a technique of sub- and super solutions. Nevertheless, since it {does not} make much difference with \cite{Alfaro}, we do not address this issue here. \varepsilonnd{remark} The paper is organized as follows. In Section \ref{tools}, {we study the spectral problem \varepsilonqref{eq:eigenpb} and provide some qualitative properties}. In Section \ref{Slab}, we elaborate a topological degree argument to solve the problem in a bounded slab. Finally in Section \ref{profileminspeed}, we construct the profile going with speed $c^*$ which proves the existence of Theorem \ref{wave}. {\section{The spectral problem.}\label{tools}} We discuss the spectral problem naturally associated to \varepsilonqref{eq:main} that we have stated in \varepsilonqref{eq:eigenpb}. We state and prove some useful properties of $Q_{\lambda}$ and some relations between $c^*$ and $\lambda^*$. \begin{proposition}[{\bf {Qualitative} properties of the spectral problem}]\label{propspec} For all $\lambda > 0$, the spectral problem \varepsilonqref{eq:eigenpb} has a unique solution $(c(\lambda),Q_\lambda)$. Moreover, the function $\lambda \mapsto c(\lambda)$ has a minimum, that we denote by $c^*$ and that we call the \textit{minimal speed}. This minimum is attained, and we denote by $\lambda^* > 0$ an associated decreasing rate and $Q_{\lambda^*}:= Q^*$ the corresponding profile. Then we have the following properties: \begin{enumerate} \item[(i)] {For all $\lambda > 0$,} the profile $Q_\lambda$ is increasing {w.r.t $\theta$}. {There exists $\theta_0$ such that} $Q_\lambda$ is convex on $\left[ \theta_{min} , \theta_{0} \right]$ and concave on $\left[ \theta_0 , \theta_{max} \right]$. Moreover, $\theta_0$ satisfies $- \lambda c(\lambda) + \lambda^2 \theta_0 + r = 0$ \item[(ii)] We define $\left\langle \theta_\lambda \right\rangle:= \int_\Theta \theta Q_\lambda (\theta) d \theta$, the mean trait associated to the {decay} rate $\lambda$. {We also define $\left\langle \theta^* \right\rangle:= \left\langle \theta_{\lambda^*} \right\rangle$}. One has \begin{equation}\label{rel1} \forall \lambda > 0, \qquad - \lambda c(\lambda) + \lambda^2 \left\langle \theta_\lambda \right\rangle + r = 0, \qquad \left\langle \theta_\lambda \right\rangle > \frac{\theta_{max} + \theta_{min}}{2}. \varepsilonnd{equation} \item[(iii)] {About the special features of the minimal speed, we have \begin{equation}\label{rel6} c^* > 2 \sqrt{r \langle \theta^* \rangle}, \varepsilonnd{equation} \begin{equation}\label{rel4} c^* \geq \lambda^* \left( \theta_{max} + \theta_{min} \right). \varepsilonnd{equation}} \varepsilonnd{enumerate} \varepsilonnd{proposition} \begin{proof}[\bf Proof of Proposition \ref{propspec}] We first prove the existence and uniqueness of $(c(\lambda),Q_{\lambda})$ for all positive $\lambda$. {Let $\beta > 0$ and $K$ be the positive cone of nonnegative functions in $\mathcal{C}^{1,\beta}\left( \Theta \right)$}. We define $L$ on $\mathcal{C}^{1,\beta}\left( \Theta \right)$ as below \begin{equation*} L (u)= - \alpha \partial_{\theta\theta}u(\theta) - \left( \theta - \theta_{\text{max}} \right)\lambda^2 u (\theta). \varepsilonnd{equation*} The resolvent of $L$ together with the Neumann boundary condition is compact from the regularizing effect of the Laplace term. Moreover, the strong maximum principle and the boundedness of $\Theta$ gives that it is strongly positive. Using the Krein-Rutman theorem we obtain that there exists a nonnegative eigenvalue {$\frac{1}{\gamma(\lambda)} $}, corresponding to a positive eigenfunction $Q_\lambda$. This eigenvalue is simple and none of the other eigenvalues corresponds to a positive eigenfunction. {As a consequence, $\lambda c(\lambda):= r+ \lambda^2 \theta_{\text{max}} - \gamma( \lambda )$ solves the problem}. We come to the proof of $(i)$. {Since $Q_\lambda \in \mathcal{C}^2(\Theta)$ and satisfies Neumann boundary conditions}, there exists $\theta_0$ such that $\partial_{\theta \theta} Q_{\lambda}(\theta_0) = 0$. Since $- \lambda c(\lambda) + \lambda^2 \theta + r $ is increasing with $\theta$, the sign of $\partial_{\theta \theta} Q_{\lambda}$ and thus the {monotonicity} of $Q_\lambda$ follows. We deduce: \begin{equation*} \lambda^2 \theta_{min} + r \leq \lambda c(\lambda) \leq \lambda^2 \theta_{max} + r. \varepsilonnd{equation*} This yields \begin{equation*} c(\lambda) \underset{\lambda \to 0}{\sim} \frac{r}{\lambda}, \qquad \lambda c(\lambda) = \mathcal{O}_{\lambda \to + \infty}(\lambda^2). \varepsilonnd{equation*} These latter relations and the continuity of $\lambda \mapsto c(\lambda)$ give the existence of a positive minimal speed $c^*$ and a smallest positive minimizer $\lambda^*$. We now prove $(ii)$. We obtain the first relation of \varepsilonqref{rel1} after integrating \varepsilonqref{eq:eigenpb} over $\Theta$ and recalling the Neumann boundary conditions. To get the second one, we divide the spectral problem by $Q_{\lambda}$ and then integrate over $\Theta$: \begin{equation}\label{rel2} \left\langle \theta_\lambda \right\rangle = \frac{\theta_{max} + \theta_{min}}{2} + \frac{\alpha}{\lambda^2 \vert \Theta \vert } \int_{\Theta} \left\vert \frac{\partial_\theta Q_{\lambda}}{Q_{\lambda}} \right\vert^2 d \theta > \frac{\theta_{max} + \theta_{min}}{2}. \varepsilonnd{equation} We finish with $(iii)$. {For this purpose, we define $W_{\lambda} = \left( Q_{\lambda} \right)^2 $. It satisfies Neumann boundary conditions on $\partial \Theta$ and \begin{equation*} \forall \theta \in \Theta, \qquad \alpha \partial_{\theta \theta} W + 2 \left( - \lambda c(\lambda) + \lambda^2 \theta + r \right) W = \alpha \left( \frac{\partial_\theta W}{2 \sqrt{W}} \right)^2 \geq 0. \varepsilonnd{equation*} We thus deduce that \begin{equation*} \lambda^2 \int_\Theta \theta W d \theta + \left( - \lambda c(\lambda) + r \right) \int_\Theta W d \theta > 0, \varepsilonnd{equation*} from which we deduce \begin{equation}\label{rel5} \frac{\int_{\Theta} \theta \left( Q^* \right)^2 d \theta}{\int_{\Theta} \left( Q^* \right)^2 d \theta} > \left\langle \theta^* \right\rangle. \varepsilonnd{equation} } Differentiating \varepsilonqref{eq:eigenpb} with respect to $\lambda$, we obtain \begin{equation*} \left( - \lambda c'(\lambda) - c(\lambda) + 2 \theta \lambda \right) Q_\lambda + \left( - \lambda c(\lambda) + \theta \lambda^2 + r \right) \frac{\partial Q_\lambda}{\partial \lambda} + \alpha \partial_{\theta \theta} \left( \frac{\partial Q_\lambda}{\partial \lambda} \right) = 0. \varepsilonnd{equation*} We {do not} have {any} information about $\frac{\partial Q_\lambda}{\partial \lambda}$. Nevertheless, one can overcome this {issue by testing} directly {against} $Q_\lambda$. We obtain, for $\lambda = \lambda^*$: \begin{equation*} - c^* \int_\Theta \left( Q^* \right)^2 d\theta + 2 \lambda^* \int_\Theta \theta \left( Q^* \right)^2 d\theta = 0, \varepsilonnd{equation*} since $c'(\lambda^*)=0$. {As a consequence \begin{equation}\label{rel3} \qquad c^* = 2 \lambda^* \frac{\int_{\Theta} \theta \left( Q^* \right)^2 d \theta}{\int_{\Theta} \left( Q^* \right)^2 d \theta}. \varepsilonnd{equation} Combining \varepsilonqref{rel3} with $- \lambda^* c^* + \left(\lambda^*\right)^2 \left\langle \theta^* \right\rangle + r = 0$, one obtains \begin{equation} \frac{(c^*)^2}{4r} = \frac12 \left( \frac{\int_{\Theta} \theta \left( Q^* \right)^2 d \theta}{\int_{\Theta} \left( Q^* \right)^2 d \theta}\right)^2 \left( \frac{\int_{\Theta} \theta \left( Q^* \right)^2 d \theta}{\int_{\Theta} \left( Q^* \right)^2 d \theta} - \frac{\left\langle \theta^* \right\rangle}{2} \right)^{-1}. \varepsilonnd{equation} which gives \varepsilonqref{rel6} since $\frac12 \left( \frac{\int_{\Theta} \theta \left( Q^* \right)^2 d \theta}{\int_{\Theta} \left( Q^* \right)^2 d \theta}\right)^2 \left( \frac{\int_{\Theta} \theta \left( Q^* \right)^2 d \theta}{\int_{\Theta} \left( Q^* \right)^2 d \theta} - \frac{\left\langle \theta^* \right\rangle}{2} \right)^{-1} \geq \left\langle \theta^* \right\rangle$ always holds true and \varepsilonqref{rel5} rules out equality.} Finally, using \varepsilonqref{rel1} and \varepsilonqref{rel3}, one has \begin{equation*} c^* > 2 \lambda^* \left\langle \theta^* \right\rangle \geq 2 \lambda^* \frac{\theta_{max} + \theta_{min}}{2} = \lambda^*\left( \theta_{max} + \theta_{min} \right). \varepsilonnd{equation*} \varepsilonnd{proof} \section{Solving the problem in a bounded slab.}\label{Slab} In this Section, we solve an approximated problem in a bounded slab $(-a , a) \times \Theta$. \begin{definition} For all $\tau > 0$, we define \begin{equation*} \forall \theta \in \Theta, \qquad g_\tau(\theta) = \theta_{min} + \tau \left( \theta - \theta_{min} \right). \varepsilonnd{equation*} Now, for all $a > 0$, the slab problem $P_{\tau,a}$ is defined as follows on $[-a , a] \times \Theta$: \begin{equation}\label{eq:slab} [P_{\tau,a}]\left\{\begin{array}{l} -c \mu_{\xi}^a - g_{\tau}(\theta) \mu_{\xi \xi}^a - \alpha \mu_{\theta\theta}^a = r \mu^a (1 - \nu^a)\, , \quad {(\xi,\theta)} \in (-a,a) \times \Theta, \\ \mu_\theta^a(\xi,\theta_{\min}) = \mu_\theta^a(\xi,\theta_{\max}) = 0\, ,\quad \xi \in {(-a , a)}, \\ \mu^a(-a,\theta) = \vert\Theta \vert^{-1}\, , \quad \mu^a(a,\theta) = 0\, , \quad \theta \in \Theta . \varepsilonnd{array} \right. \varepsilonnd{equation} with the supplementary renormalization condition $\nu^a(0) = \varepsilonpsilon$. For legibility, we set $P_{1,a}:= P_a$. \varepsilonnd{definition} The non-local character of the source term does not provide any full comparison principle for $P_{\tau,a}$. However, we still have $\mu \geq0$. {We follow \cite{Alfaro,Berestycki-Nadin} and shall use the Leray-Schauder theory. For this purpose, some uniform \textit{a priori} estimates (with respect to $\tau, a$) on the solutions of the slab problem are required. The main difference with \cite{Alfaro,Berestycki-Nadin} is that it is more delicate to obtain these uniform $L^\infty$ estimates since it is not possible to write neither a useful equation nor an inequation on $\nu$ due to the term $\theta \mu_{\xi\xi}$ (as it is the case in kinetic equations)}. Our strategy is the following. We first prove in Lemma \ref{upboundc} that the speed is uniformly bounded from above. Then, Lemmas \ref{lem:nc=0} and \ref{bottom} focus on the case $c=0$ and prove that there cannot exist any solution to the slab problem in this case, provided that the normalization $\varepsilonps$ is well chosen. Finally, when the speed is given and uniformly bounded, we can derive a uniform \textit{a priori} estimate on the solutions of the slab problem \varepsilonqref{eq:slab}. Thanks to these \textit{a priori} estimates, we apply a Leray-Schauder topological degree argument in Proposition \ref{slabsol}. All along Section \ref{Slab}, we omit the superscript $a$ in $\mu^a$ {and $\nu^a$}. \subsection{An upper bound for $c$.} \begin{lemma}\label{upboundc} For any normalization parameter $\varepsilonpsilon > 0$, there exists a sufficiently large $a_0(\varepsilonpsilon)$ such that any {pair} $(c,\mu)$ solution of the slab problem $P_{\tau,a}$ with $a \geq a_0(\varepsilonps)$ satisfies $c \leq c_\tau^* \leq c^*$. \varepsilonnd{lemma} \begin{proof}[{\bf Proof of Lemma \ref{upboundc}}] We just adapt an argument from \cite{Alfaro,Berestycki-Nadin}. It consists in finding a relevant subsolution for a related problem. As $\mu \geq 0$, one has \begin{equation}\label{eq:n} \forall (\xi,\theta) \in (-a,a) \times \Theta, \qquad -c \mu_{\xi} \leq g_\tau(\theta) \mu_{\xi\xi} + \alpha \mu_{\theta\theta} + r \mu. \varepsilonnd{equation} As \varepsilonqref{eq:eigenpb}, the following pertubated spectral problem has a unique solution associated with a minimal speed $c_\tau^*$: \begin{equation} \begin{cases} \alpha \partial_{\theta\theta} Q_\tau^*(\theta) + \left( - \lambda_\tau^* c_\tau^* + g_\tau(\theta) \left( \lambda_\tau^* \right)^2 + r \right) Q_\tau^*(\theta) = 0\, , \qquad \theta\in \Theta, \\ \partial_\theta Q_\tau^* \left( \theta_{\text{min}} \right) = \partial_\theta Q_\tau^* \left( \theta_{\text{max}} \right) = 0, \\ Q_\tau^*(\theta) > 0, \; \int_\Theta Q_\tau^*(\theta)\, d\theta = 1\,. \varepsilonnd{cases} \varepsilonnd{equation} Let us assume by contradiction that $ c > c^* $, then the family of functions $\psi_A ( \xi, \theta ):= A {e^{- \lambda_\tau^*\xi} Q_\tau^* ( \theta )}$ verifies \begin{equation}\label{eq:psi} \forall (\xi,\theta) \in (-a,a) \times \Theta, \qquad g_\tau(\theta) \left( \psi_A \right)_{\xi\xi} + \alpha \left( \psi_A \right)_{\theta\theta} + r \psi_A = \lambda^* c^* \psi_A < - c \left( \psi_A \right)_\xi, \varepsilonnd{equation} As the eigenvector $Q^*$ is positive, and $\mu \in L^{\infty} \left( -a , a \right)$, one has $\mu \leq \psi_A$ for $A$ sufficiently large, and $\mu \geq \psi_A$ for $A$ sufficiently small. As a consequence, one can define \begin{equation*} A_0 = \inf \left\lbrace A \; \vert \; \forall (\xi, \theta) \in \left( -a , a \right) \times \Theta, \; \psi_A (\xi, \theta) > \mu(\xi,\theta) \right\rbrace. \varepsilonnd{equation*} Necessarily, $A_0 > 0$ and there exists a point $(\xi_0, \theta_0) \in \left[ -a , a \right] \times \left[ \theta_{\text{min}}, \theta_{\text{max}} \right] $ where $\psi_{A_0}$ touches $\mu$: \begin{equation*} \mu(\xi_0 , \theta_0) = \psi_{A_0}(\xi_0 , \theta_0). \varepsilonnd{equation*} This point minimizes $\psi_A - n $ and {cannot} be in $\left( -a , a \right) \times \Theta$. Indeed, combining \varepsilonqref{eq:n} and \varepsilonqref{eq:psi}, one has in the interior, \begin{equation*} {\forall (\xi,\theta) \in ( -a , a ) \times \Theta} , \qquad c \left( \psi_A - n \right)_\xi + g_\tau(\theta) \left( \psi_A - n \right)_{\xi\xi} + \alpha \left( \psi_A - n \right)_{\theta\theta} + r \left( \psi_A - n \right) < 0. \varepsilonnd{equation*} But, if $(\xi_0,\theta_0)$ is in the interior, this latter inequality cannot hold since $ \theta \left( \psi_A - n \right)_{\xi\xi} + \alpha \left( \psi_A - n \right)_{\theta\theta} \geq 0 $. {Next we} eliminate the boundaries. First, $(\xi_0,\theta_0)$ cannot lie in the right boundary $ \left\lbrace x = a \right \rbrace \times \Theta $ since $\psi_{A_0} > 0$ and $ \mu = 0 $ there. Moreover, thanks to the Neumann boundary conditions satisfied by both $\psi_A$ and $\mu$, $(\xi_0,\theta_0)$ {cannot} be in $\left[ -a , a \right] \times \left\lbrace \theta_{\text{min}}, \theta_{\text{max}} \right \rbrace$. We now exclude the left boundary by adjusting the normalization. If $\xi_0 = -a$, then $\psi_A (\xi_0, \theta_0) = \vert \Theta \vert^{-1}$ and {$A_0 = \frac{e^{- \lambda_\tau^* a}}{\vert \Theta \vert Q_\tau^* ( \theta_0 ) }$. Then $\nu(0) \leq \frac{e^{- \lambda_\tau^* a}}{\Theta Q_\tau^* ( \theta_0 ) }$ which is smaller than $\varepsilonpsilon$ for a sufficiently large $a$.} \varepsilonnd{proof} \subsection{The special case $c=0$.} We now focus on the special case $c=0$. We first show (Lemma \ref{lem:nc=0}) that the density $\mu$ is uniformly bounded (with respect to $a >0$). From this estimate, we deduce in Lemma \ref{bottom} that there exists a constant $\varepsilonps_0$ depending only on the fixed parameters of the problem such that necessarily $\nu(0) \geq \varepsilonps_0$. Thus, provided that $\varepsilonps$ is set sufficiently small, our analysis will conclude that the slab problem {does not admit a} solution of the form $(c,\mu) = (0,\mu)$ {for $\varepsilonps < \varepsilonps_0$. We emphasize that the key \textit{a priori} estimate, \textit{i.e.} $\nu \in L^\infty\left( (-a,a) \times \Theta \right)$, is easier to obtain in the case $c=0$ than in the case $c \neq 0$ (compare Lemmas \ref{lem:nc=0} and \ref{lem:nc}}). \subsubsection{A priori estimate for {$\mu$} when $c=0$.} \begin{lemma}{\bf (A priori estimates, $c = 0$).}\label{lem:nc=0} Assume $c = 0$, $b > 0$ and $\tau \in [0,1]$. {There exists a constant $C(b)$ such that every solution $(c=0,\mu)$ of \varepsilonqref{eq:slab} satisfies} \begin{equation*} \forall (\xi,\theta) \in [ -b , b ] \times \Theta, \quad \mu(\xi,\theta) \leq \frac{{C(b)}}{\Theta} \frac{\theta_{max}}{\theta_{min}}. \varepsilonnd{equation*} \varepsilonnd{lemma} \begin{proof}[{\bf Proof of Lemma \ref{lem:nc=0}}] When $c=0$, the slab problem \varepsilonqref{eq:slab} reduces to \begin{equation*} [P_{\tau,b}]\left\{\begin{array}{l} - g_{\tau}(\theta) \mu_{\xi \xi} - \alpha \mu_{\theta\theta} = r \mu (1 - \nu)\, , \quad (\xi,\theta) \in {(-b,b)} \times \Theta, \\ \mu_\theta(\xi,\theta_{\min}) = \mu_\theta(\xi,\theta_{\max}) = 0\, ,\quad \xi \in {(-b , b)}, \\ \mu(-b,\theta) = \vert \Theta \vert^{-1}\, , \quad \mu(b,\theta) = 0\, , \quad \theta \in \Theta . \varepsilonnd{array} \right. \varepsilonnd{equation*} Integration with respect to the trait variable $\theta$ yields \begin{equation*} \left\{\begin{array}{l} - \displaystyle \left( \int_{\Theta} g_\tau(\theta) \mu(x,\theta) d\theta \right)_{\xi\xi} = r \nu(\xi) (1 - \nu(\xi)), \quad \xi \in \mathbb{R},\\ \nu(-b) = 1\, , \quad \nu(b) = 0\, . \varepsilonnd{array} \right. \varepsilonnd{equation*} Take a point $\xi_0$ where $\int_{\Theta} g_\tau(\theta) \mu(\xi,\theta) d\theta$ attains a maximum. At this point, one has necessarily $\nu(\xi_0) \leq 1$. The following sequence of inequalities {holds true} for all $\xi \in {\left(-b,b\right)}$: \begin{multline*} {\theta_{\text{min}} \nu(\xi)} = g_\tau(\theta_{min}) \nu(\xi) = g_\tau(\theta_{min}) \int_\Theta \mu(\xi,\theta) d\theta \leq \int_{\Theta} g_\tau(\theta) \mu(\xi,\theta) d\theta \\ \leq \int_{\Theta} g_\tau(\theta) \mu(x_0,\theta) d\theta \leq g_\tau(\theta_{max}) \nu(x_0) \leq g_\tau(\theta_{max}), \varepsilonnd{multline*} and give \begin{equation*} {\forall \xi \in ( -b , b ), \quad \nu(\xi) \leq \frac{g_\tau(\theta_{max})}{\theta_{min}} \leq \frac{\theta_{max}}{\theta_{min}}.} \varepsilonnd{equation*} Now, the Harnack inequality of Proposition \ref{Harnack} gives \begin{equation*} \forall (\xi,\theta) \in (-b,b) \times \Theta, \qquad n(\xi,\theta) \leq \frac{C(b)}{\vert \Theta \vert } \nu(\xi) \leq \frac{C(b)}{\vert \Theta \vert} \frac{\theta_{max}}{\theta_{min}}. \varepsilonnd{equation*} \varepsilonnd{proof} \subsubsection{Non-existence of solutions of the slab problem when $c=0$.} \begin{lemma}{\bf (Lower bound for $\nu(0)$ when $c=0$).}\label{bottom} There exists $\varepsilonpsilon_0 > 0$ such that if $a$ is large enough, then for all $\tau \in [0,1]$, any solution of the slab problem $(c=0,\mu)$ satisfies $\nu(0) > \varepsilonpsilon_0$. \varepsilonnd{lemma} \begin{proof}[{\bf Proof of Lemma \ref{bottom}}] We adapt {an} argument from {\cite{Alfaro}}. It is a bit simpler here since {the} trait space is bounded. For $b>0$, consider the following spectral problem in both variables $(\xi,\theta)$: {\begin{equation}\label{eq:evpb} \left\{\begin{array}{l} g_\tau(\theta) \left( \varphi_b \right)_{\xi\xi} + \alpha \left( \varphi_b \right)_{\theta\theta} + r \varphi_b = \psi_b \varphi_b \,, \quad (\xi,\theta) \in \left( - b , b\right) \times \Theta, \\ \left( \varphi_b \right)_\theta(\xi,\theta_{\min}) = \left( \varphi_b \right)_\theta(\xi,\theta_{\max}) = 0\,, \quad \xi \in \left( - b , b\right), \\ \varphi_b(-b,\theta) = 0 \, , \quad \varphi_b(b,\theta) = 0\,, \quad \theta \in \Theta. \varepsilonnd{array} \right. \varepsilonnd{equation} One can rescale the problem in the space direction setting $\xi = b \zeta$: \begin{equation}\label{eq:evpb} \left\{\begin{array}{l} \dfrac{g_\tau(\theta)}{b^2} \left( \varphi_b \right)_{\zeta \zeta} + \alpha \left( \varphi_b \right)_{\theta\theta} + r \varphi_b = \psi_b \varphi_b \,, \quad (\zeta,\theta) \in \left( - 1 , 1\right) \times \Theta \, , \\ \left( \varphi_b \right)_\theta(\zeta,\theta_{\min}) = \left( \varphi_b \right)_\theta(\zeta,\theta_{\max}) = 0\,,\quad \zeta \in \left( - 1 , 1\right) , \\ \varphi_b(-1,\theta) = 0 \, , \quad \varphi_b(1,\theta) = 0\,, \quad \theta \in \Theta. \varepsilonnd{array} \right. \varepsilonnd{equation}} {Using an Hamilton-Jacobi technique (see for instance \cite{Bouin-Mirrahimi} and all the references therein), one can prove that $\lim_{b \to +\infty} \psi_b = r$.} As a consequence, we fix $b$ sufficiently large to have $\psi_b > \frac{r}{2}$. Thanks to the \textit{a priori} estimate on $\mu$ obtained in Lemma \ref{lem:nc=0}, {and} by the Harnack inequality {(of Proposition \ref{Harnack})}, there exists a constant $C(b)$ which does not depend on $a > b$ such that \begin{equation*} \forall \theta \in \Theta, \qquad C(b) \mu(0, \theta) \geq C(b) \inf_{\left( -b , b\right) \times \Theta} \mu(\xi , \theta) \geq \|\mu\|_{L^\infty((-b,b)\times \Theta)}. \varepsilonnd{equation*} To compare \varepsilonqref{eq:slab} to \varepsilonqref{eq:evpb}, one has, for all $(\xi,\theta) \in [-b,b] \times \Theta$, \begin{equation*} g_\tau(\theta) \mu_{\xi\xi} + \mu_{\theta\theta} + r \mu = r \mu \nu \leq r \mu \vert \Theta \vert \|\mu\|_{L^\infty((-b,b)\times \Theta)} \leq r C \nu(0) \mu(\xi,\theta). \varepsilonnd{equation*} We deduce from this computation that as soon as $\nu(0) \leq \frac{1}{2C(b)}$, one has \begin{equation*} \forall (\xi,\theta) \in [-b,b] \times \Theta, \quad r C \nu(0) \mu(\xi,\theta) < {\psi_b} \mu(\xi,\theta), \varepsilonnd{equation*} and this means that $\mu$ is a subsolution of \varepsilonqref{eq:evpb}. We can now use the same arguments as for the proof of Lemma \ref{upboundc}. We define \begin{equation*} A_0 = \max \left\lbrace A \; \vert \; \forall (\xi, \theta) \in \left[ -b , b \right] \times \Theta, \; A \varphi_b (\xi, \theta) < \mu(\xi,\theta) \right\rbrace, \varepsilonnd{equation*} so that $u_{A_0}:= \mu - A_0 \varphi_b$ has a zero minimum in $(\xi_0,\theta_0)$ and satisfies \begin{equation*} \left\{\begin{array}{l} - g_\tau(\theta) \left( u_{A_0} \right)_{\xi\xi} - \alpha \left(u_{A_0} \right)_{\theta\theta} - r u_{A_0} > - \psi_b u_{A_0}\,, \quad (\xi,\theta) \in \left( - b , b\right) \times \Theta \, , \\ \left( u_{A_0} \right)_\theta(\xi,\theta_{\min}) = \left( u_{A_0} \right)_\theta(\xi,\theta_{\max}) = 0\,, \quad \xi \in \left( - b , b\right) , \\ u_{A_0}(-b,\theta) > 0 \, , \quad u_{A_0}(b,\theta) > 0\,, \quad \theta \in \Theta. \varepsilonnd{array} \right. \varepsilonnd{equation*} For the same reasons as in Lemma \ref{upboundc} this cannot hold, so that necessarily {$ \nu(0) > \varepsilonps_0:= \frac{1}{2C(b)} $}. \varepsilonnd{proof} \subsection{Uniform bound over the steady states, for $c \in \left[ 0 , c^* \right]$.} The previous Subsection is central in our analysis. Indeed, it gives a bounded set of speeds where to apply the Leray-Schauder topological degree argument, namely we can restrict ourselves to speeds $c \in \left[ 0 , c^* \right]$. Based on this observation, we are now able to derive a uniform $L^\infty$ estimate (with respect to $a$ and $\tau$) for solutions $\mu$ of \varepsilonqref{eq:slab}. This is done in Lemma \ref{lem:nc} below. \begin{lemma}{\bf (A priori estimates, $c \in \left[ 0 , c^* \right]$).}\label{lem:nc} Assume $c \in \left[ 0 , c^* \right]$, $\tau \in [0,1]$ and $a\geq 1$. Then there exists a constant $C_0$ depending only on $\theta_{\min}$ and $\vert \Theta \vert$ such that any solution $(c,\mu)$ of the slab problem $P_{a,\tau}$ satisfies \begin{equation*} \Vert \mu \Vert_{L^\infty\left( (-a,a) \times \Theta \right) }\leq C_0\,. \varepsilonnd{equation*} \varepsilonnd{lemma} \begin{proof}[{\bf Proof of Lemma \ref{lem:nc}}] We divide the proof into two steps. In the first step, we prove successively that $\mu$ and $\mu_\theta$ are bounded uniformly in $H^1\left( (-a,a) \times \Theta \right)$. In the second step, we use a suitable trace inequality to deduce a uniform $L^\infty\left( (-a,a) \times \Theta \right)$ estimate on $\mu$. We define $K_0(a) = \max_{[-a,a] \times \Theta} \mu$. We want to prove that $K_0(a)$ is in fact bounded uniformly in $a$. {The argument is inspired from \cite{Berestycki-Nadin}. The principle of the proof goes as follows: The maximum principle implies that $\nu(\xi_0) \leq 1$ if $\left( \xi_0 , \theta_0 \right)$ is a maximum point for $\mu$. This does not imply that $\max \mu \leq 1$. However, we can control $\mu(\xi_0,\theta_0)$ by the non local term $\nu(\xi_0)$ providing some regularity of $\mu$ in the direction $\theta$. In order to get this additional regularity we use the particular structure of the equation (the nonlocal term does not depend on $\theta$ and is non negative).} \quad {\bf \# Step 0: Preliminary observations.} \quad Denote by $(\xi_0,\theta_0)$ a point where the maximum is reached. If the maximum is attained on the $\xi-$boundary $\xi_0 = \pm a$ then $K_0(a) \leq \vert \Theta \vert^{-1}$ by definition. If it is attained on the $\theta-$boundary $\theta_0 \in \{\theta_{\min},\theta_{\max}\}$, then the first derivative $\partial_\theta \mu$ vanishes by definition. Hence $\mu_{\theta\theta}(\xi_0,\theta_0)\leq 0$ and $\mu_{\xi\xi}(\xi_0,\theta_0)\leq 0$. The same holds true if $(\xi_0,\theta_0)$ is an interior point. Evaluating equation \varepsilonqref{eq:slab} at $(\xi_0,\theta_0)$ implies \begin{equation*} K_0(a) (1 - \nu(\xi_0))\geq 0\,, \varepsilonnd{equation*} and therefore $\nu_0(\xi_0) \leq 1$. \quad {\bf \# Step 1: Energy estimates on $\mu$.} \quad We derive local energy estimates. We introduce a smooth cut-off function $\chi: \mathbb{R} \to [0,1]$ such that \begin{equation*} \begin{cases} \chi = 1 \qquad \text{on} \qquad J_1 = \left( \xi_0 - \frac12,\xi_0 + \frac12 \right), \\ \chi = 0 \qquad \text{outside} \qquad J_2 = \left[\xi_0 - 1,\xi_0 + 1\right]. \varepsilonnd{cases} \varepsilonnd{equation*} Notice that the support of the cut-off function does not necessarily avoid the $\xi-$boundary. We also introduce the following linear corrector \begin{equation*} \forall \xi \in [-a,a], \qquad m(\xi) = \frac{1}{\vert \Theta \vert} \frac{a - \xi}{2a}, \varepsilonnd{equation*} which is defined such that $m(-a) = \vert \Theta \vert^{-1}$, $m(a) = 0$, and $0\leq m\leq \vert \Theta \vert^{-1}$ on $(-a,a)$. Testing against $(\mu - m)\chi$ over $[-a,a] \times \Theta$, we get \begin{multline*} - c \int_{(-a,a) \times \Theta} (\mu - m) \chi \mu_\xi d\xi d\theta -\int_{(-a,a) \times \Theta} g_\tau(\theta) (\mu - m)_{\xi\xi} (\mu - m)\chi\, d\xi d\theta \\- \int_{(-a,a) \times \Theta} \mu_{\theta\theta} (\mu-m)\chi\, d\xi d\theta =\int_{(-a,a) \times \Theta} \mu(1 - \nu)( \mu-m)\chi\, d\xi d\theta. \varepsilonnd{multline*} We now transform each term of the l.h.s. by integration by parts. We emphasize that the linear correction $m$ ensures that all the boundary terms vanish. We get \begin{multline*} \int_{(-a,a) \times \Theta} g_\tau(\theta) \left|(\mu -m)_{\xi}\right|^2 \chi\, d\xi d\theta + \int_{(-a,a) \times \Theta} \left|\mu_\theta\right|^2 \chi \, d\xi d\theta \\ \leq \frac12 \int_{(-a,a) \times \Theta} g_\tau(\theta) (\mu-m)^2 \chi_{\xi\xi}\, d\xi d\theta + c \frac{\vert \Theta \vert^{-1}}{2a} \int_{(-a,a) \times \Theta} \chi (\mu-m) d\xi d\theta \\- c \int_{(-a,a) \times \Theta} \frac12 (\mu-m)^2 \chi_\xi d\xi d\theta + \int_{(-a,a) \times \Theta} \mu^2 \chi\, d\xi d\theta + \int_{(-a,a) \times \Theta} \mu \nu m\chi\, d\xi d\theta. \varepsilonnd{multline*} We use that $\mu \leq K_0(a)$, $\nu(\xi) \leq \vert\Theta\vert K_0(a)$, $g_\tau(\theta) \geq \theta_{\text{min}}$ and $\vert c \vert \leq c^*$ to get \begin{multline*} \theta_{\min}\int_{J_1 \times \Theta} \left|\mu_\xi-m_{\xi}\right|^2 \, d\xi d\theta + \int_{J_1 \times \Theta} \left|\mu_\theta\right|^2 \, d\xi d\theta \\ \leq c^* \frac{\vert \Theta \vert^{-1}}{2a} K_0 \vert J_2 \times \Theta \vert - c \int_{[-a,a] \times \Theta} \frac12 (\mu-m)^2 \chi_\xi d\xi d\theta \\+ \frac12\int_{(-a,a) \times \Theta} g_\tau(\theta) (\mu-m)^2 \chi_{\xi\xi}\, d\xi d\theta + \int_{J_2 \times \Theta} K_0^2 \, d\xi d\theta + \int_{J_2 \times \Theta} \vert \Theta \vert K_0^2 \, d\xi d\theta\,, \varepsilonnd{multline*} Then we use the pointwise inequality $| \mu_\xi - m_\xi |^2 \geq \mu_\xi^2/2 - m_\xi^2$ in the first integral of the l.h.s.: \begin{multline*}\label{testn2} \frac{\theta_{\min}}2\int_{J_1} \left|\mu_{\xi}\right|^2 \, d\xi d\theta + \int_{J_1} \left|\mu_\theta\right|^2 \, d\xi d\theta \leq \frac{K_0 c^*}{a} + \theta_{\min} \int_{J_1} \left|m_{\xi}\right|^2 \, d\xi d\theta \\\ + \int g_{\tau}(\theta)\left( \mu^2 + m^2 \right) \chi_{\xi\xi}\, d\xi d\theta + c^* \int \left( \mu^2 + m^2 \right) \chi_\xi d\xi d\theta + 4 \vert \Theta \vert K_0^2. \varepsilonnd{multline*} {Thus, we obtain our first energy estimate: $\mu \in H^1\left( [-a,a] \times \Theta \right)$ with a uniform bound of order $\mathcal{O}\left( K_0(a)^2 \right)$ uniformly:} \begin{equation}\label{nH1} \min\left( \dfrac{\theta_{\min}}2 , 1\right)\int_{J_1} \left(\left|\mu_{\xi}\right|^2 + \left|\mu_\theta\right|^2\right) \, d\xi d\theta \leq C(\vert \Theta \vert,\theta_{\min},\chi)\left( 1 + K_0(a)^2\right)\,, \varepsilonnd{equation} as soon as $a \geq \frac{1}{2}$. We now come to the proof that $\partial_\theta \mu$ is also in $H^1\left( (-a,a) \times \Theta \right)$. We differentiate \varepsilonqref{eq:slab} with respect to $\theta$ for this purpose. Here, we use crucially that $\nu$ is a function of the variable $x$ only. Note that we cannot expect that $\mu \in H^2\left( [-a,a] \times \Theta \right)$ with a bound of order $\mathcal{O}\left( K_0(a)^2 \right)$ at this stage. But we need additional elliptic regularity in the variable $\theta$ only. \begin{equation} \label{eq:n_theta} \forall (\xi,\theta) \in (-a,a) \times \Theta, \qquad - c \mu_{\xi\theta} - \tau \mu_{\xi\xi} - g_\tau(\theta) \mu_{\xi\xi\theta} - \mu_{\theta\theta\theta} = \mu_\theta (1 - \nu)\, . \varepsilonnd{equation} We use the cut-off function $\widetilde \chi(\xi) = \chi(\xi_0 + 2(\xi-\xi_0))$, for which $\mathrm{supp}\, \widetilde \chi\subset J_1$, and $\chi(\xi) = 1$ on $J_{1/2} = (\xi_0 - 1/4,\xi_0 + 1/4)$. Multiplying \varepsilonqref{eq:n_theta} by $\mu_\theta\widetilde \chi$, we get after integration by parts {\begin{multline*}\label{testntheta2} \int_{J_1} \tau \mu_\xi \mu_{\theta \xi} \widetilde \chi \, d\xi d\theta + \int_{J_1} \tau \mu_\xi \mu_{\theta } \widetilde \chi_\xi \, d\xi d\theta + \int_{J_1} g_\tau(\theta) \mu_{\xi \theta} \mu_\theta \widetilde \chi_\xi \, d\xi d\theta \\ + \int_{J_1} g_\tau(\theta) \left|\mu_{\xi \theta}\right|^2 \widetilde \chi \, d\xi d\theta + \int_{J_1} \left|\mu_{\theta\theta}\right|^2 \widetilde \chi \, d\xi d\theta \leq \int_{J_1} \left|\mu_{ \theta}\right|^2 \widetilde \chi \, d\xi d\theta\,+ c \int_{J_1} \tilde \chi_\xi \frac{\vert \mu_\theta \vert^2}{2} d\xi d\theta . \varepsilonnd{multline*}} Notice that all the boundary terms vanish since $\mu_\theta = 0$ on all segments of the boundary. Using the $H^1$ estimate \varepsilonqref{nH1} obtained previously for $\mu$, we deduce \begin{multline*} \frac{\theta_{\min}}{2} \int_{J_{1/2}} \left \vert \mu_{\theta \xi}\right\vert^2 \, d\xi d\theta + \int_{J_{1/2}} \left \vert \mu_{\theta\theta}\right \vert^2 \, d\xi d\theta \leq \left( 1 + \frac{c^*}{2} \Vert \widetilde\chi_\xi \Vert_\infty \right)\int_{J_1} \left \vert \mu_{ \theta}\right \vert^2 \, d\xi d\theta + \frac{1}{2\theta_{\min}} \int_{J_{1}} \left\vert \mu_\xi \right\vert^2 d\xi d\theta \\ + \frac12 \int_{J_1}\left( \left|\mu_\xi \right|^2 + \left|\mu_\theta\right|^2 \right) \left|\widetilde \chi_\xi \right| \, d\xi d\theta + \frac12 \int \theta \left|\mu_{\theta}\right|^2 \widetilde \chi_{\xi\xi} \, d\xi d\theta \varepsilonnd{multline*} from which we conclude \begin{equation}\label{nthetaH1} \min\left( \dfrac{\theta_{\min}}2 , 1\right)\int_{J_1} \left(\left|\mu_{\theta \xi}\right|^2 + \left|\mu_{\theta\theta}\right|^2\right) \, d\xi d\theta \leq \overline{C}(\Theta,\theta_{\min},\chi)\left( 1 + K_0(a)^2\right)\, . \varepsilonnd{equation} This crucial computation proves that $\mu_\theta$ also belongs to $H^1\left( (-a,a) \times \Theta \right)$. \quad {\bf \# Step 2: Improved regularity of the trace $\mu(\xi, \cdot )$.} \quad From the fact that $\mu_\theta$ is a $H^1\left( (-a,a) \times \Theta \right)$ function uniformly in $a$, we obtain that the trace function $\theta \mapsto \mu_\theta(\xi_0,\theta)$ belongs to $H^{1/2}\left( \Theta \right)$ uniformly by standard trace theorems. Therefore, $\theta \mapsto \mu(\xi_0,\theta)$ belongs to $H^{3/2}\left( \Theta \right)$ . This gives a constant $C_{tr}$ such that \begin{equation*} \Vert \mu(\xi_0,\cdot) \Vert_{H^{3/2}_\theta}^2 \leq C_{tr} \Vert \mu_{\theta} \Vert_{H^{1}_{x,\theta}}^2 \varepsilonnd{equation*} This enables to control the variations of the density $\mu$ in the direction $\theta$. Indeed, by interpolation inequality there exists a constant $C_{\text{int}}$ such that in the variable $\theta$, at a given point $\xi_0$: \[ \begin{cases} \|\mu\left( \xi_0, \cdot \right)\|_{L^\infty_\theta}^3 \leq C_{int} \|\mu\left( \xi_0, \cdot \right)\|_{L^1_\theta} \|\mu\left( \xi_0, \cdot \right)\|_{H^{3/2}_\theta}^2 & \mathrm{if}\quad \dfrac{\|\mu\left( \xi_0, \cdot \right)\|_{L^1_\theta}}{\|\mu\left( \xi_0, \cdot \right)\|_{H^{3/2}_\theta}} \leq \dfrac{1}{C_{int}} ,\\ \|\mu\left( \xi_0, \cdot \right)\|_{L^\infty_\theta} \leq C_{int} \|\mu\left( \xi_0, \cdot \right)\|_{L ^1_\theta} & \mathrm{otherwise,} \varepsilonnd{cases} \] (we refer to the Appendix for a proof of this inequality). Recall that $\nu(\xi_0) = \Vert \mu(\xi_0,\cdot) \Vert_{L^1_\theta} \leq 1$. It yields, combining with estimates \varepsilonqref{nH1} and \varepsilonqref{nthetaH1} of {\bf \# Step 1}: \[ \begin{cases} K_0(a)^3 \leq C_{int} C_{tr} \overline{C} \nu(\xi_0) \left(1 + K_0(a)^2 \right)& \mathrm{if}\quad \dfrac{\nu(\xi_0)}{\| \mu(\xi_0,\cdot)\|_{H^{3/2}_\theta}} \leq \dfrac{1}{C_{int}}, \\ K_0(a) \leq C_{int} \nu(\xi_0) & \mathrm{otherwise}. \varepsilonnd{cases} \] In both cases, this bounds $K_0(a)$ uniformly with respect to $a >0$. This concludes the proof of Lemma \ref{lem:nc}. \varepsilonnd{proof} \subsection{Resolution of the problem in the slab.} We now finish the proof of the existence of solutions of \varepsilonqref{eq:slab}. As previously explained, it consists in a Leray-Schauder topological degree argument. All uniform estimates derived in the previous Sections are key points to obtain \textit{a priori} estimates on steady states of suitable operators. We then simplify the problem with homotopy invariances. {We begin with a very classical problem: the construction of KPP travelling waves for the Fisher-KPP equation in a slab.} \begin{lemma}\label{propKPP} Let us consider the following Fisher-KPP problem in the slab $(-a,a)$: \begin{equation*} \left\{\begin{array}{l} -c \nu_{\xi} - \theta_{min} \nu_{\xi\xi} = r \nu (1 - \nu )\,, \qquad \xi \in \left( -a,a \right), \\ \nu(-a) = 1\, , \quad \nu(a) = 0\, . \varepsilonnd{array} \right. \varepsilonnd{equation*} One has the following properties: \begin{enumerate} \item For a given $c$, there exists a unique decreasing solution {$\nu^c \in [0,1]$. Moreover, the function $c \to \nu^c$ is continuous and decreasing.} \item\label{2} There exists $\varepsilonps^* > 0$ (independent of $a$) such that all solution with $c = 0$ satisfies $\nu_{c=0}(0) > \varepsilonps^*$. \item\label{3} For all $\varepsilonps > 0$, there exists $a(\varepsilonps)$ such that for all {$c > 2 \sqrt{r \theta_{min}}$, $\nu (0) < \varepsilonps$} for $a \geq a(\varepsilonps)$. \item {As a corollary of \ref{2} and \ref{3}, for all $\varepsilonps < \varepsilonps^*$, there exists a unique $c_0 \in \, [0, 2 \sqrt{r \theta_{min}}]$} such that $\nu_{c_0}(0) = \varepsilonps$ for $a \geq a(\varepsilonps)$. \varepsilonnd{enumerate} \varepsilonnd{lemma} \begin{proof}[{\bf Proof of Lemma \ref{propKPP}}] The existence and uniqueness of solutions follows from \cite{Aronson}. {Again by maximum principle arguments, $\nu \in [0,1]$}. The solution is necessarily decreasing since \begin{equation*} \forall \xi \in \, (-a,a), \qquad \left( \nu_\xi e^{\frac{c}{\theta_{min}} \xi} \right)_\xi \leq 0, \varepsilonnd{equation*} and $\nu_\xi(-a) \leq 0$. By classical arguments, the application {$c \to \nu^c$} is continuous. For the decreasing character, we write, for $c_1 < c_2$ and $v:= \nu_2 - \nu_1$: \begin{equation*} - c_2 v_\xi - \theta_{min} v_{\xi\xi} = {\left( 1 - \left( \nu_1 + \nu_2 \right) \right) v }+ \left( c_2 - c_1 \right) \left( \nu_1 \right)_\xi , \varepsilonnd{equation*} {so that $v$ satisfies \begin{equation*} \left\{\begin{array}{l} - c_2 v_\xi - \theta_{min} v_{\xi\xi} \leq \left( 1 - \left( \nu_1 + \nu_2 \right) \right) v , \qquad \xi \in \left( -a,a \right), \\ v(-a) = 0\, , \quad v(a) = 0\, . \varepsilonnd{array} \right. \varepsilonnd{equation*} The maximum principle then yields that $v \leq 0$, that is $\nu_2 \leq \nu_1$}. The proofs of Lemmas \ref{upboundc} and \ref{bottom} can be adapted to prove the remainder of the Lemma. \varepsilonnd{proof} With this $\varepsilonps^*$ in hand, we can state the main Proposition: \begin{proposition}{\bf (Solution in the slab).}\label{slabsol} Let $\varepsilonpsilon < \min\left( \varepsilonps_0 , \varepsilonps^*\right)$. There exists $C_0 > 0$ and $a_0(\varepsilonpsilon) > 0$ such that for all $a \geq a_0$, the slab problem \varepsilonqref{eq:slab} with the normalization condition $\nu(0) = \varepsilonpsilon$ has a solution $(c,\mu)$ such that \begin{equation*} \Vert \mu \Vert_{L^\infty \left( [-a,a] \times \Theta \right)} \leq C_0, \qquad c \in \left] \, 0 , c^{*} \, \right]. \varepsilonnd{equation*} \varepsilonnd{proposition} \begin{proof}[{\bf Proof of Proposition \ref{slabsol}}] Given a non negative function $\mu(\xi,\theta)$ satisfying the boundary conditions \begin{equation}\label{boundv} \forall (\xi,\theta) \in [-a,a] \times \Theta, \qquad \mu_\theta(\xi,\theta_{\min}) = \mu_\theta(\xi,\theta_{\max}) = 0, \qquad \mu(-a,\theta) = \vert \Theta \vert^{-1}\, , \qquad \mu(a,\theta) = 0\, . \varepsilonnd{equation} we consider the one-parameter family of problems on ${(-a,a)} \times \Theta$: \begin{equation}\label{eq:tauslab} \left\{\begin{array}{l} -c Z_{\xi}^\tau - g_{\tau}(\theta) Z_{\xi\xi}^\tau - {\alpha} Z_{\theta\theta}^\tau = {r \mu (1 - \nu)}\, , \qquad (\xi,\theta) \in (-a,a) \times \Theta, \\ Z_\theta^\tau(\xi,\theta_{\min}) = Z_\theta^\tau(\xi,\theta_{\max}) = 0\, ,\qquad \xi \in (-a,a) , \\ Z^\tau(-a,\theta) = \vert \Theta \vert^{-1}, Z^\tau(a,\theta) = 0, \qquad \theta \in \Theta . \varepsilonnd{array} \right. \varepsilonnd{equation} We introduce the map \begin{equation*} \mathcal{K}_{\tau}: (c,\mu) \to \left( \varepsilonpsilon - \nu(0) + c , Z^{\tau} \right), \varepsilonnd{equation*} where $Z_{\tau}$ is the solution of the previous linear system \varepsilonqref{eq:tauslab}. The ellipticity of the system \varepsilonqref{eq:tauslab} gives that the map $\mathcal{K}_{\tau}$ is a compact map from $\left( X = \mathbb{R} \times \mathcal{C}^{1,{\beta}} \left( (-a,a) \times \Theta \right) , \Vert (c , \mu) \Vert = \max \left( \vert c \vert, \Vert \mu \Vert_{ \mathcal{C}^{1,\beta}} \right) \right)$ onto itself ${(\forall \beta \in (0,1))}$. Moreover, it depends continuously on the parameter $\tau \in \left[ 0 , 1 \right]$. Solving the problem $P_a$ \varepsilonqref{eq:slab} is equivalent to proving that the kernel of $\text{Id} - \mathcal{K}_1$ is non-trivial. We can now apply the Leray-Schauder theory. We define the open set for $\delta > 0$, \begin{equation*} \mathcal{B} = \left \lbrace \; (c,\mu) \; \vert \; 0 < c < c^* + \delta, \; \Vert \mu \Vert_{\mathcal{C}^{1,\beta}\left( (-a,a) \times \Theta \right)} < C_0 + \delta \right\rbrace. \varepsilonnd{equation*} The different a priori estimates of Lemmas \ref{upboundc}, \ref{lem:nc=0}, \ref{bottom}, \ref{lem:nc} give that for all $\tau \in \left[ 0 , 1\right]$ and sufficiently large $a$, the operator $ \text{Id} - \mathcal{K}_{\tau}$ cannot vanish on the boundary of $\mathcal{B}$. Indeed, if it vanishes on $\partial \mathcal{B}$, there exists a solution $(c,\mu)$ of \varepsilonqref{eq:slab} which also satisfies $c \in \left\lbrace 0, c^{*} + \delta \right\rbrace$ or $\Vert \mu \Vert_{\mathcal{C}^{1,\beta}\left( (-a,a) \times \Theta \right)} = C_0 + \delta$ and {$\nu(0) = \varepsilonps$}. But this is ruled out by the condition $\varepsilonps < \varepsilonps_0$. It yields by the homotopy invariance that \begin{equation*} \forall \tau \in \left[ 0 ,1 \right], \quad \text{deg}\left( \text{Id} - \mathcal{K}_{1} , \mathcal{B} , 0 \right) = \text{deg}\left( \text{Id} - \mathcal{K}_{\tau} , \mathcal{B} , 0 \right) = \text{deg}\left( \text{Id} - \mathcal{K}_{0} , \mathcal{B} , 0 \right). \varepsilonnd{equation*} We now need to compute $\text{deg} \left( \text{Id} - \mathcal{K}_{0} , \mathcal{B} , 0 \right)$. This will be done with two supplementary homotopies. We need these two homotopies to write $\text{Id} - \mathcal{K}_0$ as a tensor of two applications whose degree with respect to $\mathcal{B}$ and $0$ are computable. We first define, {with $\nu_{Z^0}(\cdot) = \int_\Theta Z^0(\cdot,\theta) d\theta$}: \begin{equation*} \mathcal{M}_{\tau}: (c,v) \to \left( c - (1-\tau)\nu_v(0) - \tau \nu_{ Z^0}(0) + \varepsilonpsilon , Z^0\right) \varepsilonnd{equation*} If there exists $(c,\mu) \in \partial \mathcal{B}$ such that $\mathcal{M}_{\tau}(c,\mu) = (c,\mu)$, then $(c,\mu)$ is such that $Z^0 = \mu$ and $\nu_{Z^0}(0) = \varepsilonpsilon$. However, such a fixed point $(c,\mu)$ then satisfies \begin{equation}\label{eq:vKPP} \left\{\begin{array}{l} -c \mu_{\xi} - \theta_{min} \mu_{\xi\xi} - \mu_{\theta\theta} = r \mu (1 - \nu )\, , \qquad \xi \in (-a,a) \times \Theta, \\ \mu_\theta(\xi,\theta_{\min}) = \mu_\theta(\xi,\theta_{\max}) = 0\,, \qquad \xi \in (-a,a), \\ \mu(-a,\theta) = \vert \Theta \vert^{-1}, \quad \mu(a,\theta) = 0, \qquad \theta \in \Theta, \varepsilonnd{array} \right. \varepsilonnd{equation} which is now {closely linked to} the standard Fisher-KPP equation. Indeed, {after integration w.r.t $\theta$}, $\nu$ satisfies \begin{equation}\label{eq:tauslab2} \left\{\begin{array}{l} -c \nu_{\xi} - \theta_{min} \nu_{\xi\xi} = r \nu (1 - \nu )\,, \qquad \xi \in (-a,a), \\ \nu(-a) = 1\, , \quad \nu(a) = 0\,, \varepsilonnd{array} \right. \varepsilonnd{equation} {and $\nu(0)=\varepsilonps$}. Given a {(unique)} solution $\nu$ of \varepsilonqref{eq:tauslab2} after Lemma \ref{propKPP}, we can solve the equation for $v$. {The solution of \varepsilonqref{eq:vKPP} is then unique thanks to the maximum principle, and reads $\mu(\xi,\theta) = \frac{\nu(\xi)}{\vert \Theta \vert}$}. As a consequence, such a fixed point cannot belong to $\partial \mathcal{B}$ after all \textit{a priori} estimates of Lemma \ref{propKPP}. Thus, by the homotopy invariance and $ \mathcal{K}_{0} = \mathcal{M}_{0}$, we have \begin{equation*} \text{deg} \left( \text{Id} - \mathcal{K}_{0} , \mathcal{B} , 0 \right) = \text{deg} \left( \text{Id} - \mathcal{M}_{1} , \mathcal{B} , 0 \right). \varepsilonnd{equation*} {The concluding arguments are now the same as in \cite{Berestycki-Nadin}.} Up to the end of the proof, we shall exhibit the dependency of $Z^0$ in $c$: $Z^0 = Z_c$. We now define our last homotopy by the formula \begin{equation*} \mathcal{N}_{\tau}: (c,\mu) \to \left( c + \varepsilonpsilon - \nu_{Z_c}(0), \tau Z_c + (1-\tau) Z_{c_0} \right), \varepsilonnd{equation*} where $c_0$ is the unique $c \in \,[0, 2\sqrt{r\theta_{\text{min}}}]$ such that $\nu_{Z_c}(0) = \varepsilonpsilon$, for $\varepsilonps < \varepsilonps^* $ and $a(\varepsilonps)$ sufficiently large (see again Lemma \ref{propKPP}). If $\mathcal{N}_{\tau}$ has a fixed point, then necessarily $\varepsilonpsilon = \nu_{Z_c}(0)$ and $\mu = \tau Z_c + (1-\tau) Z_{c_0}$. This gives $\mu = Z_{c_0}$ by uniqueness of the speed $c_0$. Again, such a $\mu$ cannot belong to $\partial \mathcal{B}$ (we recall that {$c_0 < 2 \sqrt{r \theta_{min} } < c^* $ after \varepsilonqref{rel6}}). By homotopy invariance and $\mathcal{M}_1 = \mathcal{N}_1$: \begin{equation*} \text{deg} \left( \text{Id} - \mathcal{K}_{1} , \mathcal{B} , 0 \right) = \text{deg} \left( \text{Id} - \mathcal{K}_{0} , \mathcal{B} , 0 \right) = \text{deg} \left( \text{Id} - \mathcal{M}_{1} , \mathcal{B} , 0 \right) = \text{deg} \left( \text{Id} - \mathcal{N}_{0} , \mathcal{B} , 0 \right). \varepsilonnd{equation*} Finally, the operator $\left( \text{Id} - \mathcal{N}_{0} \right) (c,\mu) = \left( \nu_{Z_c}(0) - \varepsilonpsilon , \mu - Z_{c_0} \right)$ is such that $\text{deg} \left( \text{Id} - \mathcal{N}_{0} , \mathcal{B} , 0 \right) = - 1$. Indeed, the degree of the first component is $-1$ as it is a decreasing function of $c$, and the degree of the second one is $1$. We conclude that $\text{deg}\left( \text{Id} - \mathcal{K}_{1} , \mathcal{B} , 0 \right) = -1$. Therefore it has a non-trivial kernel whose elements are solution of the slab problem. This proves the Proposition. \varepsilonnd{proof} \section{Construction of spatial travelling waves with minimal speed $c^*$.}\label{profileminspeed} In this Section, we now use the solution of the slab problem \varepsilonqref{eq:slab} given by Proposition \ref{slabsol} to construct a wave solution with minimal speed $c^*$. For this purpose, we first pass to the limit in the slab to obtain a profile in the whole space $\mathbb{R} \times \Theta$. Then we prove that this profile necessarily travels with speed $c^*$. \subsection{Construction of a spatial travelling wave in the full space.} \begin{lemma}\label{convslab} Let {$\varepsilonps < \min\left( \varepsilonps_0 , \varepsilonps^* \right)$}. There exists $c_0 \in \left[ 0 , c^* \right]$ such that the system \begin{equation}\label{convslab2} \left\{\begin{array}{l} - c_0 \mu_\xi - \theta \mu_{\xi\xi} - \alpha \mu_{\theta\theta} = r \mu (1 - \nu), \qquad (\xi , \theta ) \in \mathbb{R} \times \Theta, \\ \mu_\theta(\xi,\theta_{\min}) = \mu_\theta(\xi,\theta_{\max}) = 0, \qquad \xi \in \mathbb{R}, \\ \varepsilonnd{array} \right. \varepsilonnd{equation} has a solution $\mu \in \mathcal{C}_b^2\left( \mathbb{R} \times \Theta \right)$ satisfying $\nu(0) = \varepsilonps$. \varepsilonnd{lemma} \begin{proof}[{\bf Proof of Lemma \ref{convslab}}] {For sufficiently large $a > a_0(\varepsilonps)$, Proposition \ref{slabsol} gives a solution $(c^a,\mu^a)$ of \varepsilonqref{eq:slab} which satisfies $c^a \in \left[ 0 , c^* \right]$, $\Vert \mu^a \Vert_{L^\infty((-a,a)\times \Theta)} \leq K_0$ and $\nu^a(0) = \varepsilonps$. As a consequence, \begin{equation*} \Vert \nu^a \Vert_{L^\infty((-a,a))} \leq \vert \Theta \vert K_0. \varepsilonnd{equation*} The elliptic regularity \cite{Gilbarg} implies that for all $\beta > 0$, $\Vert \mu^a \Vert_{\mathcal{C}^{1,\beta}((-a,a)\times \Theta)} \leq C$ for some $C>0$ uniform in $a$. Then, the Ascoli theorem gives that possibly after passing to a subsequence $a_n \to + \infty$, $(c^a,\mu^a)$ converges towards $(c_0,\mu) \in \left[ 0 , c^* \right] \times \mathcal{C}^{1,\beta}(\mathbb{R} \times \Theta)$ which satisfies \varepsilonqref{convslab2} and $\nu(0) = \varepsilonps$.} \varepsilonnd{proof} \begin{remark} We do not obtain after the proof that $\sup \nu \leq 1$, and nothing is known about the behaviors at infinity at this stage. Nevertheless, we have an uniform bound $\Vert \nu \Vert_{L^\infty(\mathbb{R})} \leq \vert \Theta \vert K_0$. \varepsilonnd{remark} \subsection{The profile is travelling with the minimal speed $c^*$.} \begin{lemma}{\bf (Lower bound on the infimum).}\label{inf} There exists $\delta > 0$ such that any solution $(c,\mu)$ of \begin{equation*} \left\{\begin{array}{l} - \theta \mu_{\xi\xi} - \alpha \mu_{\theta\theta} - c \mu_\xi = r(1 - \nu)\mu, \qquad \left(\xi , \theta \right) \in \mathbb{R} \times \Theta, \qquad \\ \mu_\theta(\xi,\theta_{\min}) = \mu_\theta(\xi,\theta_{\max}) = 0, \qquad \xi \in \mathbb{R}, \qquad \\ \varepsilonnd{array} \right. \varepsilonnd{equation*} with $c \in \left[ 0 , c^*\right]$, $\nu$ bounded and $\inf_{\xi \in \mathbb{R}} \nu(\xi) > 0$ satisfies $\inf_{\xi \in \mathbb{R}} \nu(\xi) > \delta$. \varepsilonnd{lemma} \begin{proof}[{\bf Proof of Lemma \ref{inf}}] We again adapt an argument from \cite{Alfaro} to our context. By the Harnack inequality of Proposition \ref{Harnack}, one has \begin{equation}\label{Hn} \forall \left( \xi , \theta , \theta' \right) \in \mathbb{R} \times \Theta^2, \qquad \mu(\xi,\theta) \leq C(\xi) \mu(\xi,\theta'), \varepsilonnd{equation} Since \varepsilonqref{eqkinwave} is invariant by translation in space, and the renormalization $\nu(0) = \varepsilonps$ is not used in the proof of the Harnack inequality, we can take a constant $C(\xi)$ which is independent from $\xi$ \cite{Gilbarg}. This yields \begin{equation*} \forall \left( \xi , \theta \right) \in \mathbb{R} \times \Theta, \qquad - \theta \mu_{\xi\xi}(\xi,\theta) - \alpha \mu_{\theta\theta}(\xi,\theta) - c \mu_\xi (\xi,\theta) \geq r(1 - C \Theta \mu(\xi,\theta))\mu(\xi,\theta). \varepsilonnd{equation*} {Hence, $\mu$ is a super solution of some elliptic equation with local terms only. For $\varepsilonta > 0$ arbitrarily given, we define the family of functions} \begin{equation*} \psi_m(\xi,\theta) = m \left( 1 - \varepsilonta \xi^2 \right) Q^*(\theta). \varepsilonnd{equation*} From the uniform $L^\infty$ estimate on $\mu$, there exists $M$ large enough such that $\psi_M(0,\theta) > \mu(0,\theta)$. Moreover, by assumption we have $\psi_m \leq \mu$ for $m= \frac{ \inf_\mathbb{R} \nu}{ C \vert \Theta \vert \Vert Q^* \Vert_\infty} > 0$. As a consequence, we can define {\begin{equation*} m_0:= \sup \lbrace m > 0 , \quad \forall (\xi, \theta) \in \mathbb{R} \times \Theta, \quad \psi_m (\xi,\theta) \leq \mu(\xi,\theta) \rbrace. \varepsilonnd{equation*}} As in previous same ideas, see Lemmas \ref{upboundc} and \ref{bottom}, there exists $(x_0 , \theta_0)$ such that $\mu - \psi_{m_0}$ has a zero minimum at this point. We have clearly that $\xi_0 \in \left[ - \frac{1}{\sqrt{\varepsilonta}} ; \frac{1}{\sqrt{\varepsilonta}} \right]$ since $\psi_m$ is negative elsewhere. We have, at $(\xi_0 , \theta_0)$: \begin{equation*} \begin{array}{lcl} 0 & \geq & - \theta_0 \left( \mu - \psi_{m_0} \right)_{\xi\xi} - \alpha \left( \mu - \psi_{m_0} \right)_{\theta \theta} - c \left( \mu - \psi_{m_0} \right)_\xi, \\ & \geq & r \left(1 - C \vert \Theta \vert \mu \right) \mu + \theta_0 \left( \psi_{m_0} \right)_{\xi\xi} + \alpha \left( \psi_{m_0} \right)_{\theta \theta} + c \left(\psi_{m_0}\right)_\xi, \\ & \geq & r \left(1 - C \vert \Theta \vert \mu \right) \mu - 2 \varepsilonta m_0 \theta_0 Q^*(\theta_0) - \left( -\lambda c( \lambda ) + \theta_0 \lambda^2 + r \right) \psi_{m_0}(\xi_0, \theta_0) - 2 c\varepsilonta \xi_0 m_0 Q^*(\theta_0), \\ & \geq & \mu(\xi_0,\theta_0) \left( \lambda^* c^* - \theta_0 (\lambda^*)^2 - r C \vert \Theta \vert \mu(\xi_0,\theta_0) \right) - 2 m_0 Q^*(\theta_0) \left( \varepsilonta \theta_0 + \varepsilonta \xi_0 c \right). \varepsilonnd{array} \varepsilonnd{equation*} It follows from $\mu(\xi_0,\theta_0) \geq \frac{\nu(\xi_0)}{C \vert \Theta \vert}$ \varepsilonqref{Hn}, from the inequalities $\vert \xi_0 \vert \leq \frac{1}{\sqrt{\varepsilonta}}$, $c \leq c^*$, $m_0 \leq M$ and the fact that for all $\theta_0 \in \Theta$, the quantity $c^* - \theta_0 \lambda^* - \theta_{\text{min}} \lambda^*$ is positive (see \varepsilonqref{rel4}) that \begin{equation*} \begin{array}{lcl} \mu(\xi_0 , \theta_0) &\geq& \displaystyle \frac{\lambda^* \left( c^* - \theta_0 \lambda^* \right)}{r C \vert \Theta \vert } - \frac{2 C \Theta M \Vert Q^* \Vert_{\infty}\left(\varepsilonta \theta_{\text{max}} + \sqrt{\varepsilonta} c^* \right)}{r C \vert \Theta \vert \nu(\xi_0)}, \\ &\geq& \displaystyle \frac{\theta_{\text{min}} \left( \lambda^* \right)^2 }{r C \vert \Theta \vert} - \frac{2 C \Theta M \Vert Q^* \Vert_{\infty}\left( \sqrt{\varepsilonta} c^* + \varepsilonta \theta_{\text{max}} \right)}{r C \vert \Theta \vert \,{ (\inf_{\xi \in \mathbb{R}} \nu)} }.\\ \varepsilonnd{array} \varepsilonnd{equation*} {Recalling $\inf_{\xi \in \mathbb{R}} \nu > 0$ and taking arbitrarily small values} of $\varepsilonta > 0$, we have necessarily $\mu(\xi_0, \theta_0) \geq \frac{\theta_{min} \left( \lambda^* \right)^2 }{{2} C r \vert \Theta \vert}$. Since $\mu$ and $\psi_{m_0}$ coincide at $(\xi_0,\theta_0)$, we have $m_0 \geq \frac{\theta_{min} \left( \lambda^* \right)^2 }{{2} r C \vert \Theta \vert \Vert Q^* \Vert_{\infty} }$. The definition of $m_0$ now gives \begin{equation*} \forall (\xi,\theta) \in \mathbb{R} \times \Theta, \qquad \mu(\xi,\theta) \geq \frac{\theta_{min} \left( \lambda^* \right)^2 }{{2} C \vert \Theta \vert r \Vert Q^* \Vert_{\infty} } \left( 1 - \varepsilonta \xi^2 \right) Q^*(\theta). \varepsilonnd{equation*} Since $\varepsilonta$ is arbitrarily small, we have necessarily $\nu(\xi) \geq \delta:= \frac{\theta_{min} \left( \lambda^* \right)^2 }{{2} C \vert \Theta \vert r \Vert Q^* \Vert_{\infty} }$ for all $\xi \in \mathbb{R}$. \varepsilonnd{proof} We deduce from this Lemma that up to choosing $\varepsilonps < \delta$, the solution necessarily satisfies $\inf_{\mathbb{R}} \nu(\xi) = 0$. {Since this infimum cannot be attained, we have necessarily $\liminf_{\xi \to + \infty} \nu(\xi) = 0$ (up to $\xi \to - \xi$ and $c \to -c$)}. We now prove that this enforces $c = c^*$ for our wave. For this purpose, we show that a solution going slower than $c^*$ cannot satisfy the $\liminf$ condition by a sliding argument. \begin{proposition}\label{prop:minspeed} Any solution $(c,\mu)$ of the system \begin{equation}\label{eq:minspeed} \left\{\begin{array}{l} - \theta \mu_{\xi\xi} - \alpha \mu_{\theta\theta} - c \mu_\xi = r \mu (1 - \nu), \qquad (\xi , \theta ) \in \mathbb{R} \times \Theta, \qquad \\ \mu_\theta(\xi,\theta_{\min}) = \mu_\theta(\xi,\theta_{\max}) = 0, \qquad \xi \in \mathbb{R}, \qquad \\ \varepsilonnd{array} \right. \varepsilonnd{equation} with $c\geq0$ and $\liminf_{\xi \in\mathbb{R}} \nu(\xi) = 0$ satisfies necessarily $c \geq c^*$. \varepsilonnd{proposition} As a consequence, the solution given after Lemma \ref{convslab} goes with the speed $c^*$. This latter speed appears to be the minimal speed of existence of nonnegative travelling waves, similarly as for the Fisher KPP equation. \begin{proof}[{\bf Proof of Proposition \ref{prop:minspeed}}] We again play with subsolutions. {By analogy with the Fisher-KPP equation, we shall use oscillating fronts associated with speed $c < c^*$ to "push" solutions of \varepsilonqref{eq:minspeed} up to the speed $c^*$}. We now proceed like in \cite{Bouin-Calvez-Nadin}. Let us now consider the spectral problem for complex values of $\lambda$: \begin{equation}\label{eq:eigenpbs} \begin{cases} \alpha \partial^2_{\theta\theta} Q_\lambda(\theta) + \left( - \lambda c + \theta \lambda^2 + r - s \right) Q_\lambda(\theta) = 0\, , \\ \partial_\theta Q_\lambda(\theta_{min}) = \partial_\theta Q_\lambda(\theta_{max}) =0\,. \varepsilonnd{cases} \varepsilonnd{equation} When $s = 0$ we know from Proposition \ref{propspec} that for $c = c^*$ there exists some \textit{real} $\lambda^* > 0$ such that the spectral problem is solvable with a positive eigenvector. Moreover, the minimal speed is increasing with respect to $r$. Indeed, for all $r < s$ and $\lambda > 0$, one has \begin{equation*} \lambda c_r(\lambda) = r + \lambda^2 \theta_{\text{max}} - \gamma( \lambda ) < s + \lambda^2 \theta_{\text{max}} - \gamma( \lambda ) = \lambda c_s(\lambda) \varepsilonnd{equation*} and thus $c_r^* < c_s^*$. Now suppose by contradiction that $c < c^*$. Take $c < \bar c < c^*$, $s>0$. One can choose $s( \bar c ) > 0 $ such that $\bar c$ is the minimal speed of the spectral problem \varepsilonqref{eq:eigenpbs}. There exists $\lambda_c:= \lambda_R + i \lambda_I \in \mathbb{C}$ with $\text{Re}(\lambda_c) > 0$ such that there exists $Q_{\lambda_c}: \Theta \mapsto \mathbb{C}$ which solves the spectral problem. A continuity argument ensures that $\text{Re} \left(Q_{\lambda_c}\right) > 0$ since $\text{Re} \left(Q_{\lambda_{\bar c}}\right) > 0$ when $\bar c$ is sufficiently close to $c$. Let us now define the real function \begin{equation*} \psi(\xi,\theta):= \text{Re} \left( e^{- \lambda_c \xi} Q_{\lambda_c} \left( \theta \right)\right) = e^{-\lambda_{R} \xi}\left[ \text{Re} \left(Q_{\lambda_c}(\theta)\right) \cos (\lambda_{I} \xi) + \text{Im} \left(Q_{\lambda_c}(\theta)\right) \sin (\lambda_{I} \xi) \right]. \varepsilonnd{equation*} For all $\theta \in \Theta$, one has $\psi\left( 0 , \theta \right) > 0$ and $\psi\left( \pm \frac{\pi}{\lambda_I} , \theta \right) < 0$. As a consequence, there exists an open subdomain $\mathcal{D} \subset \Omega:= \left[ - \frac{\pi}{\lambda_I} , \frac{\pi}{\lambda_I} \right] \times \Theta$ such that $\psi > 0$ on $\mathcal{D}$ and $\psi$ vanishes on the boundary $\partial \mathcal{D}$. From the Harnack estimate of Proposition \ref{Harnack}, there exists a constant $C$ which depends on $\vert \mathcal{D} \vert$ such that one has for all $\xi \in \mathbb{R}$, \begin{equation*} \forall (z,\theta,\theta') \in \mathcal{D} \times \Theta, \qquad u(z + \xi, \theta) \leq C u(\xi,\theta') \varepsilonnd{equation*} By construction, one has \begin{equation*} - \theta \psi_{\xi\xi} - \alpha \psi_{\theta \theta} - c \psi_\xi - r \psi = - s(\bar c) \psi. \varepsilonnd{equation*} Thus, for all $m \geq 0$, the function $v:= \mu - m\psi$ satisfies \begin{equation*} - \theta v_{\xi\xi} - \alpha v_{\theta \theta} - c v_\xi - r v = m s( \bar c)\psi - r \nu(\xi) \mu \varepsilonnd{equation*} There now exists $m_0$ such that $v$ attains a zero minimum at $(\xi_0,\theta_0) \in \mathcal{D}$. One deduces $\nu(\xi_0) \geq \frac{s(\bar c)}{r}$. We conclude by the Harnack estimate that $\nu(0) \geq \frac{s(\bar c)}{rC}$. {We now want to translate the argument in space. For this purpose, we define, for $\zeta \in \mathbb{R}$, the function $h(\xi,\theta):= \mu(\xi + \zeta,\theta)$. It also satisfies \varepsilonqref{eq:minspeed}. As a consequence, for all $\zeta \in \mathbb{R}$, $\nu\left( \zeta \right) = \int_{\Theta} h(0,\theta) d \theta \geq \frac{s(\bar c)}{rC}$. We emphasize that the renormalization $\nu(0) = \varepsilonps$, which is the only reason for which \varepsilonqref{eq:slab} is not invariant by translation, is not used here. We then obtain $\inf_{\xi\in\mathbb{R}} \nu(\xi) \geq \frac{s(\bar c)}{rC}$. This contradicts the property $\liminf_{\xi\in\mathbb{R}} \nu(\xi) = 0$.} \varepsilonnd{proof} \subsection{The profile has the required limits at infinity.} \begin{proposition}\label{limits} Any solution $(c,\mu)$ of the system \begin{equation*} \left\{\begin{array}{l} - \theta \mu_{\xi\xi} - \alpha \mu_{\theta\theta} - c \mu_\xi = r \mu (1 - \nu), \qquad (\xi , \theta ) \in \mathbb{R} \times \Theta, \qquad \\ \partial_\theta \mu(\xi,\theta_{\min}) = \partial_\theta \mu(\xi,\theta_{\max}) = 0, \qquad \xi \in \mathbb{R}, \qquad \\ \varepsilonnd{array} \right. \varepsilonnd{equation*} with $c\geq0$ and $\nu(0) = \varepsilonps$ satisfies \begin{enumerate} \item There exists $m>0$ such that $\forall \xi \in ]-\infty , 0], \quad \mu\left( \xi ,\cdot \right) > m Q(\cdot)$, \item $\lim_{\xi \to +\infty} \mu(\xi,\cdot) =0.$ \varepsilonnd{enumerate} \varepsilonnd{proposition} \begin{proof}[{\bf Proof of Proposition \ref{limits}}] We again adapt to our case an argument from \cite{Alfaro}. By the Harnack inequality of Proposition \ref{Harnack}, there exists $\widetilde C$ such that one has {\begin{equation}\label{HHn} \inf_{\xi \in [-1,0] \times \Theta} \mu(\xi,\theta) \geq \frac{\varepsilonps}{\widetilde{C} \vert \Theta \vert }, \varepsilonnd{equation}} recalling $\nu(0) = \varepsilonps$. Also recalling \begin{equation*} \forall \left( \xi , \theta , \theta' \right) \in \mathbb{R} \times \Theta^2, \qquad \mu(\xi,\theta) \leq C \mu(\xi,\theta'), \varepsilonnd{equation*} we obtain \begin{equation*} \forall \left( \xi , \theta \right) \in \mathbb{R} \times \Theta, \qquad - \theta \mu_{\xi\xi}(\xi,\theta) - \alpha \mu_{\theta\theta}(\xi,\theta) - c \mu_\xi (\xi,\theta) \geq r(1 - C \vert \Theta \vert \mu(\xi,\theta))\mu(\xi,\theta). \varepsilonnd{equation*} Let us define, for $m = \frac12 \min \left( \frac{\varepsilonps}{ \vert \Theta \vert \widetilde{C} \Vert Q^* \Vert_{\infty} } , \frac{\theta_{min} (\lambda^*)^2}{rC \Vert Q^* \Vert_{\infty} \vert \Theta \vert} \right)$ and $\varepsilonta > 0$ arbitrarily given, the function \begin{equation*} \psi_\varepsilonta(\xi,\theta) = m \left( 1 + \varepsilonta \xi \right) Q^*(\theta). \varepsilonnd{equation*} on $\left] - \infty , 0\right] \times \Theta$. We have, \begin{equation*} \forall (\xi,\theta) \in \left]-\infty,-1\right] \times \Theta, \qquad \psi_1(\xi,\theta) = m \left( 1 + \xi \right) Q^*(\theta) \leq 0 \leq \mu(\xi,\theta). \varepsilonnd{equation*} Moreover, for $(\xi,\theta) \in ]-1 , 0] \times \Theta$, using \varepsilonqref{HHn}, we have \begin{equation*} \psi_1(\xi,\theta) = m \left( 1 + \xi \right) Q^*(\theta) \leq m \Vert Q^* \Vert_{\infty} \leq \frac12 \frac{\varepsilonps \Vert Q^* \Vert_{\infty}}{ \vert \Theta \vert \widetilde{C} \Vert Q^* \Vert_{\infty} } \leq \inf_{\xi \in [-1,0] \times \Theta} \mu(\xi,\theta) \leq \mu(\xi,\theta). \varepsilonnd{equation*} As a consequence we can define \begin{equation*} \varepsilonta_0:= \min \lbrace \varepsilonta > 0 , \forall (\xi,\theta) \in \left]- \infty , 0\right] \times \Theta, \psi_\varepsilonta (\xi,\theta) \leq \mu(\xi,\theta) \rbrace \in [0,1]. \varepsilonnd{equation*} We will now prove that $\varepsilonta_0 = 0$ by contradiction. Suppose that $\varepsilonta_0 > 0$. {We apply the same technique as in the proofs of Lemmas \ref{upboundc} and \ref{bottom}: there exists $(\xi_0 , \theta_0)$ such that $\mu - \psi_{\varepsilonta_0}$ has a zero minimum at this point. Moreover, we have }$\xi_0 \in \left[ - \frac{1}{\varepsilonta_0} ; 0 \right]$ since $\psi_\varepsilonta$ is negative elsewhere. Moreover, $\xi_0$ cannot be $0$ since this would give $\mu(0,\theta_0) = m Q^*(\theta_0) \leq \frac12 \frac{\varepsilonps}{ \vert \Theta \vert \widetilde{C} } $ and this would contradict \varepsilonqref{HHn}. We have, at $(\xi_0 , \theta_0)$: \begin{align*} 0 & \geq - \theta \left( \mu - \psi_{\varepsilonta_0} \right)_{\xi\xi} - \alpha \left( \mu - \psi_{\varepsilonta_0} \right)_{\theta \theta} - c \left( \mu - \psi_{m_0} \right)_\xi \\ & \geq r \left(1 - C \Theta \mu \right) \mu + \theta \left( \psi_{\varepsilonta_0} \right)_{\xi\xi} + \alpha \left( \psi_{\varepsilonta_0} \right)_{\theta \theta} + c \left(\psi_{m_0}\right)_\xi \\ & \geq r \left(1 - C \Theta \mu \right) \mu - \psi_{\varepsilonta_0}(\xi_0, \theta_0) \left( -\lambda^* c^* + \theta_0 \left(\lambda^*\right)^2 + r \right) + c m_0 \varepsilonta Q^*(\theta_0) \\ & \geq \mu(\xi_0,\theta_0) \left( \lambda^* c^* - \theta_0 (\lambda^*)^2 - r C \vert \Theta \vert \mu(\xi_0,\theta_0) \right) + c m_0 \varepsilonta Q^*(\theta_0) \\ & \geq \mu(\xi_0,\theta_0) \left( \lambda^* c^* - \theta_0 (\lambda^*)^2 - r C \vert \Theta \vert \mu(\xi_0,\theta_0) \right) \varepsilonnd{align*} It yields \begin{equation*} \frac{\theta_{min} (\lambda^*)^2}{rC \vert \Theta \vert} \leq \mu(\xi_0,\theta_0) = \psi_{\varepsilonta_0}(\xi_0,\theta_0) \leq m \Vert Q^* \Vert_{\infty}. \varepsilonnd{equation*} and this contradicts the very definition of $m$. As a consequence, $\varepsilonta_0 = 0$ and \begin{equation*} \forall (\xi,\theta) \in \mathbb{R}^- \times \Theta, \qquad \mu(\xi,\theta) \geq m Q^*(\theta) \varepsilonnd{equation*} In particular, $\inf_{\mathbb{R}^-} \nu \geq m$ holds. We now prove that $\lim_{\xi \to +\infty} \mu(\xi,\cdot) =0$. It is sufficient to prove that $\lim_{\xi \to \infty}\nu(\xi) = 0$. Suppose that there exists $\delta$ a subsequence $\xi_n \to + \infty$ such that $\forall n \in \mathbb{N}, \;\nu(\xi_n)\geq \delta$. Adapting the preceding proof we obtain that for all $n \in \mathbb{N}$, \begin{equation}\label{last} \forall (\xi,\theta) \in \left] -\infty , \xi_n \right] \times \Theta, \qquad \nu(\xi) \geq \frac12 \min \left( \frac{\delta}{ \vert \Theta \vert \widetilde{C} \Vert Q^* \Vert_{\infty} } , \frac{\theta_{min} (\lambda^*)^2}{rC \Vert Q^* \Vert_{\infty} \vert \Theta \vert} \right). \varepsilonnd{equation} Hence \varepsilonqref{last} is true for all $\xi \in \mathbb{R}$ and Lemma \ref{inf} gives the contradiction since the normalization $\varepsilonps$ is well chosen. \varepsilonnd{proof} \section*{Acknowledgments} The authors are extremely grateful to Sepideh Mirrahimi for very fruitful comments and earlier computations on this problem. The authors also thank Olivier Druet for the proof of Proposition \ref{Harnack} and Léo Girardin for valuable suggestions. \begin{thebibliography}{100} \bibitem{Alfaro} M. Alfaro, J. Coville, and G. Raoul. {\varepsilonm Travelling waves in a nonlocal equation as a model for a population structured by a space variable and a phenotypical trait}. To appear in Comm. Partial Differential Equations. \bibitem{Arnold} A. Arnold, L. Desvillettes and C. Prevost, {\varepsilonm Existence of nontrivial steady states for populations structured with respect to space and a continuous trait}, Comm. Pure Appl. Anal. {\bf11} (2012), no. 1, 83–96. \bibitem{Aronson} D. G. Aronson and H. F. Weinberger, {\varepsilonm Multidimensional nonlinear diffusion arising in population genetics}, Adv. in Math. 30 (1978), no. 1, 33–76. \bibitem{Benichou} O. Benichou, V. Calvez, N. Meunier and R. Voituriez, {\varepsilonm Front acceleration by dynamic selection in Fisher population waves}, Phys. Rev. E 86, 041908 (2012). \bibitem{Berestycki-Chapuisat} H. Berestycki and G. Chapuisat, {\varepsilonm Travelling fronts guided by the environment for reaction-diffusion equations}, preprint arXiv:1206.6575. \bibitem{Berestycki-Hamel} H. Berestycki and F. Hamel, {\varepsilonm Generalized transition waves and their properties}, Comm. Pure Appl. Math. 65, (2012), no. 5, 592–648. \bibitem{Berestycki-Nadin} H. Berestycki, G. Nadin, B. Perthame and L. Ryzhik, {\varepsilonm The non-local Fisher-KPP equation: travelling waves and steady states}, Nonlinearity {\bf 22} (2009), no. 12, 2813–2844. \bibitem{Bouin} E. Bouin, V. Calvez, N. Meunier, S. Mirrahimi, B. Perthame, G. Raoul, and R. Voituriez. {\varepsilonm Invasion fronts with variable motility: phenotype selection, spatial sorting and wave acceleration}. C. R. Math. Acad. Sci. Paris, 350(15-16):761–766, 2012. \bibitem{Bouin-2} E. Bouin, {\varepsilonm Revisiting the WKB approach for front propagation in kinetic equations}. In preparation. \bibitem{Bouin-Calvez} E. Bouin, V. Calvez, {\varepsilonm A kinetic eikonal equation}, C. R. Math. Acad. Sci. Paris {\bf 350} (2012), 243--248. \bibitem{Bouin-Calvez-Nadin} E. Bouin, V. Calvez, G. Nadin, {\varepsilonm Front propagation in a kinetic reaction-transport equation}, preprint arXiv:1307.8325, 2013. \bibitem{Bouin-Calvez-Nadin-2} E. Bouin, V. Calvez, G. Nadin, {\varepsilonm Hyperbolic travelling waves driven by growth}, To appear in M3AS, 2013. \bibitem{Bouin-Mirrahimi} E. Bouin, S. Mirrahimi, {\varepsilonm A Hamilton-Jacobi approach for a model of population structured by space and trait}, preprint arXiv:1307.8332, 2013. \bibitem{Champagnat} N. Champagnat et S. Méléard, {\varepsilonm Invasion and adaptive evolution for individual-based spatially structured populations}, J. Math. Biol. 55 (2007), no. 2, 147–188. \bibitem{Coville-Davila} J. Coville, J. Dávila and S. Martínez, {\varepsilonm Pulsating fronts for nonlocal dispersion and KPP nonlinearity}, Ann. Inst. H. Poincaré Anal. Non Linéaire 30 (2013), no. 2, 179–223. \bibitem{Coville-Dupaigne} J. Coville and L. Dupaigne, {\varepsilonm On a non-local reaction diffusion equation arising in population dynamics}, Proc. Roy. Soc. Edinburgh Sect. A 137 (2007), no. 4, 727–755. \bibitem{Desvillettes} L. Desvillettes, R. Ferrière et C. Prévost, {\varepsilonm Infinite dimensional reaction-diffusion for population dynamics}, preprint CMLA (2004). \bibitem{Dockery} J. Dockery, V. Hutson, K. Mischaikow, M. Pernarowski, {\varepsilonm The evolution of slow dispersal rates: a reaction diffusion model}, J. Math. Biol. 37 (1) (1998) 61–83. \bibitem{Ferriere} N. Champagnat, R. Ferrière, S. Méléard, {\varepsilonm From individual stochastic processes to macroscopic models in adaptive evolution}, Stoch. Models 24 (Suppl. 1) (2008) 2–44. \bibitem{Fisher} R.A. Fisher, {\varepsilonm The advance of advantageous genes}, Ann. Eugenics {\bf 65} (1937), 335--369. \bibitem{Gilbarg} Gilbarg, David, and Trudinger, Neil S., {\varepsilonm Elliptic partial differential equations of second order}, Springer Berlin ; New York, 1998. \bibitem{Hamel} F. Hamel, L. Ryzhik, {\varepsilonm On the nonlocal Fisher-KPP equation: steady states, spreading speed and global bounds}, preprint http://arxiv.org/abs/1307.3001. \bibitem{Kokko} H. Kokko, A. López-Sepulcre, {\varepsilonm From individual dispersal to species ranges: perspectives for a changing world}, Science 313 (5788) (2006) 789–791. \bibitem{Kolmogorov} A.N. Kolmogorov, I.G. Petrovsky, N.S. Piskunov, {\varepsilonm Etude de l'\'equation de la diffusion avec croissance de la quantit\'e de mati\`ere et son application \`a un probl\`eme biologique}, Moskow Univ. Math. Bull. {\bf 1} (1937), 1--25. \bibitem{Nadin} G. Nadin, {\varepsilonm travelling fronts in space-time periodic media}, J. Math. Pures Appl. (9) 92 (2009), no. 3, 232–262. \bibitem{Nolen} J. Nolen and L. Ryzhik, {\varepsilonm travelling waves in a one-dimensional heterogeneous medium}, Ann. Inst. H. Poincaré Anal. Non Linéaire 26 (2009), no. 3, 1021–1047. \bibitem{Perthame} B. Perthame, {\varepsilonm Transport Equations in Biology}, Frontiers in Mathematics, Birkh{\"a}user Basel, 2007. \bibitem{Phillips} B.L. Phillips, G.P. Brown, J.K. Webb, R. Shine, {\varepsilonm Invasion and the evolution of speed in toads}, Nature 439 (7078) (2006) 803. \bibitem{Ronce} O. Ronce, {\varepsilonm How does it feel to be like a rolling stone? Ten questions about dispersal evolution}, Annu. Rev. Ecol. Syst. 38 (2007) 231–253. \bibitem{Shen} W. Shen and A. Zhang, {\varepsilonm travelling wave solutions of spatially periodic nonlocal monostable equations}, ArXiv e-prints, (2012). http://arxiv.org/abs/1202.2452 \bibitem{Shine} R. Shine, G.P. Brown, B.L. Phillips, {\varepsilonm An evolutionary process that assembles phenotypes through space rather than through time}, Proc. Natl. Acad. Sci. USA 108 (14) (2011) 5708–5711. \bibitem{Simmons} A.D. Simmons, C.D. Thomas, {\varepsilonm Changes in dispersal during species’ range expansions}, Amer. Nat. 164 (2004) 378–395. \bibitem{Thomas} Thomas, C. D. and Bodsworth, E. J. and Wilson, R. J. and Simmons, A. D. and Davies, Z. G. and Musche, M. and Conradt, L., {\varepsilonm Ecological and evolutionary processes at expanding range margins}, Nature {\bf 411}, 577-581 (2001). \bibitem{Xin} J. Xin, {\varepsilonm Front propagation in heterogeneous media}, SIAM Rev. 42, (2000), no. 2, 161–230. \varepsilonnd{thebibliography} \section*{Appendix A: A Harnack inequality up to the boundary.} We emphasize here a useful Harnack inequality for \varepsilonqref{eqkinwave} which is true up to the boundary in the direction $\theta$. This is possible thanks to the Neumann boundary conditions in this direction. \begin{proposition}\label{Harnack} Suppose that $\mu$ is a solution of \varepsilonqref{eqkinwave} such that the total density $\nu$ is locally bounded. Then for all $0 < b < + \infty$, there exists a constant $C(b) < + \infty$ such that the following Harnack inequality holds: \begin{equation*} \forall (\xi,\theta,\theta') \in (-b,b) \times \Theta \times \Theta, \qquad \mu(\xi,\theta) \leq C(b) \mu(\xi,\theta'). \varepsilonnd{equation*} \varepsilonnd{proposition} \begin{proof}[{\bf Proof of Proposition \ref{Harnack}}] {One has to figure out how to obtain the validity of the Harnack inequality up to the boundary in $\Theta$. Indeed, it holds on sub-compacts sets thanks to the standard elliptic regularity, given that the density $\nu$ is bounded. To obtain the full Harnack estimate, we consider the equation \varepsilonqref{eqkinwave} after a reflection with respect to $\theta = \theta_{min}$ and $\theta = \theta_{max}$ and for positive values of $\theta$. One obtains the following equation in the weak sense \begin{equation*} \forall (\xi, \theta) \in \mathbb{R} \times \left( \mathbb{R}^{+*} \cap \left( \mathbb{R} \backslash \lbrace \theta_{min} + \Theta \mathbb{Z} \rbrace \right)\right) , \qquad - c \mu_\xi (\xi,\theta) - g(\theta) \mu_{\xi \xi}(\xi,\theta) - \alpha \mu_{\theta\theta}(\xi,\theta) = r \mu(\xi,\theta) (1 - \nu(t,\xi))\,. \varepsilonnd{equation*} The crucial point is that this equation is also satisfied on the boundaries $\theta = \mathbb{R}^+ \cap \lbrace \theta_{min} + \Theta \mathbb{Z} \rbrace $ thanks to the Neumann boundary conditions. Indeed, no Dirac mass in $\theta = \mathbb{R}^+ \cap \lbrace \theta_{min} + \Theta \mathbb{Z} \rbrace$ arises while computing the second derivative $\partial_{\theta\theta}$ in the symmetrized equation.} \varepsilonnd{proof} \section*{Appendix B: Proof of the interpolation estimate.} We prove here the interpolation estimate which is needed in {\bf \# Step 2} of the proof of Lemma \ref{lem:nc}. Since $\theta$ is the only variable playing a role here, we denote $g(\theta) = n(t,x,\theta)$. Let $(\theta,\theta') \in \Theta^2$. For technical reason, we impose $|\theta-\theta'|^{-1}\geq e^4$. We set $K = |\theta-\theta'|^{-1}$. We first prove a H\"older-like estimate, \begin{equation*} \vert g(\theta) - g(\theta') \vert \leq \frac12 \|g\|_{H^{3/2}}|\theta - \theta'|\log\left(|\theta - \theta'|^{-1}\right)\, . \varepsilonnd{equation*} For this purpose, we use Fourier expansions. We recall the definition of the fractional Sobolev norm \begin{equation*} \Vert f \Vert_{H^{\frac32}} = \left( \sum_{k\in\mathbb{Z}^*} |k|^3 |\hat f(k)|^2 \right)^{1/2}, \varepsilonnd{equation*} where $\hat f$ is the Fourier transformation of $f$. We then have \begin{align*} |g(\theta) - g(\theta')| & = \sum_{|k|\leq K} |\hat g(k)|\left| e^{ik\theta} - e^{ik\theta'}\right| + \sum_{|k|> K} |\hat g(k)|\left| e^{ik\theta} - e^{ik\theta'}\right| \\ & \leq \sum_{|k|\leq K} |k| |\hat g(k)| |\theta - \theta'| + 2 \sum_{|k|> K}|\hat g(k)|\\ & \leq |\theta - \theta'| \sum_{|k|\leq K} |k|^{3/2} |\hat g(k)| |k|^{-1/2} + 2 \sum_{|k|> K} |k|^{3/2}|\hat g(k)||k|^{-3/2} \\ & \leq |\theta - \theta'| \left( \sum_{k\in\mathbb{Z}^*} |k|^3 |\hat g(k)|^2 \right)^{1/2} \left( \sum_{|k|\leq K} |k|^{-1} \right)^{1/2} + 2 \left( \sum_{k\in\mathbb{Z}^*} |k|^3 |\hat g(k)|^2 \right)^{1/2} \left( \sum_{|k|> K} |k|^{-3} \right)^{1/2}\\ & \leq \|g\|_{H^{3/2}} \left( |\theta - \theta'|\log\left(|\theta - \theta'|^{-1}\right) + 2 |\theta - \theta'| \right) \\ & \leq \|g\|_{H^{3/2}} |\theta - \theta'| \left( 2 + \log\left(|\theta - \theta'|^{-1}\right) \right) \\ & \leq \frac12 \|g\|_{H^{3/2}}|\theta - \theta'|\log\left(|\theta - \theta'|^{-1}\right)\, . \varepsilonnd{align*} Next we estimate \begin{equation*} g(\theta) = g(\theta) - g(\theta') + g(\theta') \leq \frac12 \|g\|_{H^{3/2}}|\theta - \theta'|\log\left(|\theta - \theta'|^{-1}\right) + g(\theta'). \varepsilonnd{equation*} We integrate for $|\theta - \theta'|\leq \delta/2$, and divide by $\delta$ where $\delta \leq e^{-4}$. \begin{equation*} g(\theta) \leq \frac12 \|g\|_{H^{3/2}} \delta \log \delta^{-1} + \dfrac{\|g\|_{L^1}}\delta\, . \varepsilonnd{equation*} Choosing $\delta = \min\left( e^{-4}, \left( \|g\|_{L^1}/\|g\|_{H^{3/2}}\right)^{1/2}\right)$, we get eventually \[ \begin{cases} \|g\|_{L^\infty} \leq \dfrac1{2} \left(\|g\|_{L^1} \|g\|_{H^{3/2}}\right)^{1/2}\left( \dfrac12 \log\left(\dfrac{\|g\|_{H^{3/2}}}{\|g\|_{L^1}}\right) + 2\right) & \mathrm{if}\quad \dfrac{\|g\|_{L^1}}{\|g\|_{H^{3/2}}} \leq e^{-8} \\ \|g\|_{L^\infty} \leq 3 e^4 \|g\|_{L ^1} & \mathrm{otherwise} \varepsilonnd{cases} \] In order to simplify the forthcoming computations, we use the simple estimate $(\forall \delta<e^{-4})\; \log \delta^{-1} + 2 \leq C \delta^{-1/3}$ for some constant $C$. We obtain finally \[ \begin{cases} \|g\|_{L^\infty}^3 \leq C \|g\|_{L^1} \|g\|_{H^{3/2}}^2 & \mathrm{if}\quad \dfrac{\|g\|_{L^1}}{\|g\|_{H^{3/2}}} \leq \dfrac1C , \\ \|g\|_{L^\infty} \leq C \|g\|_{L ^1} & \mathrm{otherwise}. \varepsilonnd{cases} \] \varepsilonnd{document}
\begin{document} \sloppy \title{Robust optimal well control using an adaptive multi-grid reinforcement learning framework} \author[1]{Atish Dixit} \author[2]{Ahmed H. ElSheikh} \affil[1,2]{School of Energy, Geoscience, Infrastructure and Society \protect\\ Heriot-Watt University} \affil[1]{\textup{Email: [email protected]}} \affil[2]{\textup{Email: [email protected]}} \date{} \setcounter{Maxaffil}{0} \renewcommand\Affilfont{\itshape\small} \maketitle \begin{abstract} Reinforcement learning (RL) is a promising tool to solve robust optimal well control problems where the model parameters are highly uncertain, and the system is partially observable in practice. However, RL of robust control policies often relies on performing a large number of simulations. This could easily become computationally intractable for cases with computationally intensive simulations. To address this bottleneck, an adaptive multi-grid RL framework is introduced which is inspired by principles of geometric multi-grid methods used in iterative numerical algorithms. RL control policies are initially learned using computationally efficient low fidelity simulations using coarse grid discretization of the underlying partial differential equations (PDEs). Subsequently, the simulation fidelity is increased in an adaptive manner towards the highest fidelity simulation that correspond to finest discretization of the model domain. The proposed framework is demonstrated using a state-of-the-art, model-free policy-based RL algorithm, namely the Proximal Policy Optimisation (PPO) algorithm. Results are shown for two case studies of robust optimal well control problems which are inspired from SPE-10 model 2 benchmark case studies. Prominent gains in the computational efficiency is observed using the proposed framework saving around 60-70\% of computational cost of its single fine-grid counterpart. \end{abstract} \section{Introduction} Optimal control problem involves finding controls for a dynamical system such that a certain objective function is optimized over a pre-defined simulation time. Recently, reinforcement learning (RL) has been demonstrated as an effective method to solve stochastic optimal control problems in fields like manufacturing \citep{dornheim2020model}, energy \citep{anderlini2016control} and fluid dynamics \citep{rabault2019artificial}. RL, being virtually a stochastic optimisation method, involves a huge number of exploration and exploitation attempts in order to learn the optimal control policy. As a result, learning the optimal policy requires a large number of simulations of the controlled dynamical system which is often computationally expensive. In this paper, an adaptive multi-grid RL framework is introduced to reduce overall computational cost of number of simulations required to learn the optimal control policy. Various research studies have shown the effectiveness of using multi-grid method to improve the convergence rate of reinforcement learning. \cite{anderson1994multigrid} extend Q-Learning by casting it as a multi-grid method and has shown a reduction in the number of updates required to reach a given error level in the Q-function. \cite{ziv2005multigrid} and \cite{pareigis1996multi} formulated the value function learning process with a Hamilton-Jacobi-Bellman (HJB) equation which is solved using algebraic multi-grid methods. Albeit the effectiveness of this strategy, HJB formulation is only feasible when the model dynamics are well defined. As a result, these methods cannot to be applied to problems where the model dynamics are an approximate representation of reality. \cite{li2015multi} used multi-grid approach to compute tabular Q values for energy conservation and comfort of HVAC in buildings which is applicable to certain simple RL problems with finite and discrete state-action space. In this paper, the aim is to present a generalized multi-grid RL approach which can be applied on both, discrete and continuous, state and action space where HJB formulation may not be possible for instance, when the transition in model dynamics is not necessarily differentiable and/or when the model is stochastic. This framework is essentially inspired by the principles of geometric multi-grid methods used in iterative numerical algorithms. The optimal policy learning process is initiated using a low fidelity simulation that correspond to a coarse grid discretization of the underlying partial differential equations (PDEs). This learned policy is then reused to further train it using high fidelity simulations in an adaptive and incremental manner. Robustness of the policy learned using this framework is finally evaluated against uncertainties in the model dynamics. In reinforcement learning literature, such a learning process is categorized as transfer learning. The idea behind transfer learning is that instead of learning directly on the target task, the agent can first train on one or more source task(s), and transfer the knowledge acquired to aid in solving the target task \citep{taylor2009transfer}. In the context of current study, highest fidelity simulation correspond to the target task which is assumed to have the fine-grid discretization which guarantees good approximation of the output quantities of interest with the accuracy required by the problem at hand. Low grid fidelity simulations that compromises on the accuracy of these quantities, on the other hand, correspond to source tasks. These low grid fidelity simulations are generated using a degree of freedom parameter called grid fidelity factor (much like in the study done by \cite{narvekar2016source}). Transfer learning is a much broader sub-domain of RL that covers knowledge transfer in the form of data samples \citep{lazaric2008transfer}, policies \citep{fernandez2010probabilistic}, models \citep{fachantidis2013transferring} or value functions \citep{taylor2005behavior}. In this study, the knowledge transfer is done in the form of the policy for a model-free, on-policy algorithm called proximal policy optimisation (PPO). Since the policy is designed for the state and actions corresponding to the highest fidelity simulation we employ a mapping function that maps states and actions from low fidelity simulations to high fidelity simulations and vice versa. This is done by defining restriction (mapping from high to low fidelity simulation) and prolongation (mapping from low to high fidelity simulation) operators which are normally found in classical geometric multi-grid methods. Effectiveness of this multi-grid RL framework is demonstrated for robust optimal well control problem which is a subject of intensive research activities in subsurface reservoir management \citep{van2009robust, roseta2004robust, brouwer2001recovery}. For this problem, the dynamical system under consideration is non-linear and, in practice, is partially observable since the data is only available at a sparse set of points (i.e. well locations). Furthermore, the subsurface model parameters are highly uncertain due to sparsity of the available field data. Optimal well control problem consists of optimizing the control variables like valve openings of wells in order to maximize sweep efficiency of injector fluid throughout the reservoir life. Reservoir permeability field is considered as an uncertain model parameter for which the uncertainty distribution is known. Although the proposed framework is demonstrated for robust optimal well control problem, it is designed to be general enough to be applicable to similar optimal control problems governed by a set of PDEs. Two test cases -- both representing a distinct model parameter uncertainty and control dynamics -- are used to demonstrate the computational gains of using the multi-grid idea. The outline of the rest of this paper is as following: Section \ref{sect: methodology} provides the problem description and proposed framework to solve robust optimal well control problem. Section \ref{sect: case studies} details the model parameters for the two case studies designed for demonstration. Results of the proposed framework on these two case studies are demonstrated in section \ref{sect: results}. Finally, section \ref{sect: conclusion} concludes with the research study summary and an outlook of future research directions. \section{Methodology} \label{sect: methodology} Fluid flow control in subsurface reservoirs has many engineering applications ranging from the financial aspects of efficient hydrocarbon production to the environmental problems of contaminated removal from polluted aquifers \citep{whitaker1999single}. In this paper, a canonical single-phase subsurface flow control problem (also referred as robust optimal well control problem) is studied where water is injected in porous media to displace a contaminant. This process is commonly modeled using an advection equation for tracer flow through porous media (also referred as Darcy flow through porous media) over the temporal domain $\mathcal{T} = [t_0, t_M] \subset \mathbb{R}$ and spatial domain $\mathcal{X} \subset \mathbb{R}^2$. In the context of fluid displacement (e.g. groundwater decontamination), the tracer corresponds to clean water injected in the reservoir from the injector wells and the non-traced fluid corresponds to the displaced contaminated water from the reservoir through producer wells. The source and sink locations within the modeled domain correspond to injector and producer wells, respectively. The tracer flow models water flooding with the fractional variable $s(x,t) \in [0,1]$ (also referred as saturation) which represents the fraction of injected clean water to the displaced contaminated water at location $x \in \mathcal{X}$ and time $t \in \mathcal{T}$. The fluid flow in and out of the domain is represented with $a(x,t)$ which is treated as source/sink terms of the governing equation. Set of well locations are denoted as $x' \in \mathcal{X'}$ (where $\mathcal{X'} \subset \mathcal{X}$). In other words, $a(x,t)$ is assigned to zero everywhere in the domain $\mathcal{X}$ except the set of locations $x'$. The controls $a^+(x,t)$ (formulated as $\max(0,a(x,t))$) and $a^-(x,t)$ (formulated as $\min(0,a(x,t))$) represent the injector and producer flow controls, respectively (note that $a=a^+ + a^-$). Task of the problem under consideration, is to find optimal controls $a^*(x',t)$ which is the solution of following closed-loop optimisation problem: \begin{subequations} \begin{align} & \max_{s(\cdot), a(\cdot)} \int_{t_0}^{t_M} \left ( \sum_{x'}a^-(x',t)(1-s(x',t)) \right ) dt, & x' \in \mathcal{X'}, \ t \in \mathcal{T} \label{eq: obj_fun} \\ & \frac{ds}{dt} = \frac{1}{\phi} \left (a^+ + sa^- - \nabla \cdot sv \right ), & x \in \mathcal{X}, \ t \in \mathcal{T} \label{eq: gov_eq} \\ & s(\cdot,t_0) = s_0,\ \ v \cdot \textbf{n} = 0, & \label{eq: init_eq}\\ & \sum_{x'} a^+(x',t) = -\sum_{x'} a^-(x',t) = c, & x' \in \mathcal{X'}, \ t \in \mathcal{T} \label{eq: constr} \end{align} \label{eq: prob_def} \end{subequations} The objective function defined in equation \eqref{eq: obj_fun} represents the total displaced fluid flow out of the reservoir (e.g. contaminated water production) and is maximized on the finite time interval $\mathcal{T}$. The intigrand in this function is referred as Lagrangian term in control theory and is often denoted by $L(s,a)$. The water flow trajectory $s(x,t)$, is governed by advection equation \eqref{eq: gov_eq} which is solved given the velocity field $v$, which is obtained from the Darcy's law: $v = - (k/\mu) \nabla p$. The pressure $p(x,t) \in \mathbb{R}$, is obtained from the pressure equation, $-\nabla \cdot (k/\mu) \nabla p = a $. Porosity $\phi (x,\cdot)$, permeability $k (x,\cdot)$, and viscosity $\mu (x,\cdot)$, are the model parameters. Permeability $k$, represents the model uncertainty and is treated as a random variable that follows a known probability density function $\mathcal{K}$ with $K$ as its domain. The initial and no flow boundary conditions are defined in equation \eqref{eq: init_eq}, where \textbf{n} denotes outward normal vector from the boundary of $\mathcal{X}$. The constraint defined in equation \eqref{eq: constr} represent the fluid incompressibility assumption along with the fixed total source/sink term $c$ which represents total water injection rate in the reservoir. In a nutshell, the optimisation problem provided in equations \eqref{eq: prob_def} is solved to find the optimal controls $a^*(x',t)$ such that they are robustly optimal over the entire permeability uncertainty domain, $K$. \subsection{RL framework} According to RL convention, the optimal control problem defined in equation \eqref{eq: prob_def} is modeled as a Markov decision process which is defined as a quadruple $\left \langle \mathcal{S},\mathcal{A},\mathcal{P},\mathcal{R} \right \rangle$. Here, $\mathcal{S} \subset \mathbb{R}^{n_s}$ is set of all possible states with the dimension $n_s$, $\mathcal{A} \subset \mathbb{R}^{n_a}$ is a set of all possible actions with the dimension $n_a$. The state $S$, is represented with the saturation $s(x,\cdot)$ and pressure $p(x,\cdot)$ values over the entire domain $\mathcal{X}$. The action $A$, is represented with an array of well control values $a(x',\cdot)$. More details of this array like representation of action are presented in section \ref{sect: rl_formulation}. The optimal control problem defined in equation \eqref{eq: prob_def} is discretized into $M$ control steps and as a result, its solution is a set of optimal control values ${a^*(x',t_1), a^*(x',t_2), \ldots, a^*(x',t_M)}$ where $t_0<t_1<t_2<\cdots <t_{M}$. The transition function $\mathcal{P}:\mathcal{S}\times \mathcal{A} \rightarrow \mathcal{S}$, is assumed to follow Markov property. That is, transition to the state $S(t_{m+1})$ is obtained by executing the actions $A(t_{m})$ when in the state $S(t_{m})$. Such transition function is obtained by discretizing equation \eqref{eq: gov_eq}. For a transition from the state $S(t_{m})$, to the state $S(t_{m+1})$, the real valued reward $R(t_{m+1})$, is calculated as $R(t_{m+1})=\mathcal{R}(S(t_{m}), A(t_{m}), S(t_{m+1}))$, where $\mathcal{R}:\mathcal{S} \times \mathcal{A} \times \mathcal{S} \rightarrow \mathbb{R}$ is the reward function. The reward function is obtained by discretizing the objective function (equation \eqref{eq: obj_fun}) into control steps such that, \begin{equation} R(t_{m+1})= \int_{t_m}^{t_{m+1}} L(s,a) dt. \label{eq: reward_func} \end{equation} The optimal controls are obtained by learning a control policy function which is defined as $\pi:\mathcal{S} \rightarrow \mathcal{A}$. This function is denoted as $\pi(A|S)$ and is generally represented with a neural network. Essentially, the control policy $\pi(A|S)$, maps a given state $S(t_m)$, into an action $A(t_m)$. For an optimal control problem, with $M$ control steps, the goal of reinforcement learning is to find an optimal policy $\pi^*(A|S)$ such that the expected reward $G = \sum_{m=1}^M \gamma^{m-1} R(t_{m})$, is maximized. Note that immediate rewards $R$, are exponentially decayed by the discount rate $\gamma \in [0,1]$. The discount rate represents how myopic the learned policy is, for instance, learned policy is considered completely myopic when $\gamma=0$. The controller, which is also referred to as an agent, follows the policy and explores various control trajectories by interacting with the environment which consists of a transition function $\mathcal{P}$ and a reward function $\mathcal{R}$. The data gathered by these control trajectories are used to update the policy towards optimality. Each such update of the policy is referred to as the policy iteration. In RL literature, a single complete control trajectory is referred to as an episode. Essentially, RL algorithms attempt to learn the optimal policy $\pi^*(A|S)$ from a randomly initialized policy $\pi(A|S)$, by exploring state-action space by executing a high number of episodes. In order to represent the variability in permeability, a finite number $l$, of well spread uncertainty distribution samples is chosen. This is achieved with a clustering analysis (please refer appendix \ref{app: cluster} for cluster analysis formulation used in this paper) of the domain $K$. The sample vector $\textbf{k}= \{k_1, k_2, \cdots k_l\}$, is constructed with samples of the distribution $\mathcal{K}$, which are located nearest to the cluster centers. The policy $\pi^*(A|S)$, is learned by randomly selecting the parameter $k$ from the training vector $\textbf{k}$ at the beginning of every episode. The policy return $R^{\pi(A|S)}$, is computed by averaging the returns of policy $\pi(A|S; k_i)$ (policy applied on the simulation where permeability is set to $k_i$) on $l$ simulations, which is formulated as, \begin{equation} R^{\pi(A|S)} = \frac{1}{l} \sum_{i=1}^l \sum_{m=0}^{M-1} \int_{t_m}^{t_{m+1}} L(s, \pi(A|S; k_i)) dt. \end{equation} In optimal well control problems, the system is partially observable, that is, reservoir information is only available at well locations throughout the reservoir life cycle. In order to accommodate this fact, the agent is provided with the available observation as its state. For this study, observation is represented with a set of saturation and pressure values at the well locations $x'$. Note that, with such representation of states, the underlying assumption of Markov property of the transition function is approximated. \subsection{Learning convergence criteria} The optimal policy convergence is detected by monitoring the policy return $R^{\pi(A|S)}$, after every policy iteration. Conventionally, when this value converges to a maximum value, the optimal policy is assumed to be learned. The convergence criteria for $i$th policy iteration is defined as, \begin{equation} \delta_i = \left | \frac{R^{\pi(A|S)}_i - R^{\pi(A|S)}_{i-1} }{\max (R^{\pi(A|S)}_{i-1}, \epsilon) } \right | < \delta, \label{eq: conv_cri} \end{equation} where $\delta_i$ is the return tolerance at $i$th policy iteration, $\delta$ is stopping tolerance and $\epsilon$ is a small non-zero number used to avoid division by zero. The convergence of policy learning is often flat near the optimal result. For this reason, the convergence criteria defined in equation \eqref{eq: conv_cri} is checked for the latest $n$ consecutive policy iterations. For instance, if $\textbf{r}$ is the array of monitored values of $R^{\pi(A|S)}$ at all policy iterations, the policy $\pi(A|S)$ is considered converged when the convergence criteria (equation \eqref{eq: conv_cri}) for last $n$ policy iterations is met. Algorithm \ref{alg:isconverged} delineates the pseudocode for this convergence criteria. \begin{algorithm*} \caption{learning convergence criteria} \begin{algorithmic}[1] \Procedure{IsConverged}{$\textbf{r}$, $n$, $\delta$} \If {$length(\textbf{r}) < n$} \Return {False} \EndIf \State compute $\delta_i$ (equation \eqref{eq: conv_cri}) for last $n$ values of \textbf{r} and get its maximum $\delta_{max}$ \If{$\delta_{max} < \delta$} \State \Return{True} \Else \State \Return{False} \EndIf \EndProcedure \end{algorithmic} \label{alg:isconverged} \end{algorithm*} Figure \ref{fig:conv_criteria} illustrates effect of $n$ and $\delta$ on convergence criteria for an example of reinforcement learning process. Policy return plot is shown in blue color where each value at policy iteration is shown with a dot. The corresponding return tolerance is plotted in gray color which is represented in percentage format ($\delta_i\times100$, where $\delta_i$ is computed from equation \eqref{eq: conv_cri}). It can be seen that the convergence criteria (denoted with markers on these plots) is more stringent when the stopping tolerance $\delta$, is smaller and consecutive policy iteration steps $n$, are higher. \begin{figure} \caption{Plot of policy returns versus number of training episodes} \label{fig:conv_criteria} \end{figure} \subsection{Adaptive multi-grid RL framework} An adaptive multi-grid RL framework is proposed where, essentially, the policies learned using lower grid fidelity environments are transferred and trained with higher fidelity environments. The grid fidelity for an environment is described with the factor $\beta \in (0,1]$. The environment with $\beta=1$ is assumed to have the fine-grid discretization which guarantees good approximation fluid flow production out of the domain as defined in equation \eqref{eq: obj_fun}. For any environment where $\beta<1$, the environment grid-size is coarsened with the factor of $\beta$. For instance, if a high fidelity environment where $\beta=1$ corresponds to simulation with grid size $64\times64$, the simulation grid size is reduced to $32\times32$ when $\beta$ is set to 0.5. Restriction operator $\Phi_{\beta}()$, is used to coarsen the high fidelity simulation parameters with the factor of $\beta$. This is done by partitioning a finer grid of size $m \times n$ (corresponding to $\beta=1$) into the coarser dimensions $\lfloor \beta m \rfloor \times \lfloor \beta n \rfloor$ (corresponding to $\beta < 1$ where $\lfloor \cdot \rfloor$ is the floor operator) and computing these coarse grid cell values as a function $\textbf{f}$, of values in the corresponding partition. Figure \ref{fig: operators}a illustrate this restriction operator for a variable $x \in \mathbb{R}^{n\times m}$. The function $\textbf{f}$, for different parameters of the reservoir simulation are listed in table \ref{tab:coarse_f}. On the other hand, prolongation operator $\Phi^{-1}_{\beta}()$, maps a coarse grid environment parameters to fine grid as shown in figure \ref{fig: operators}b. \begin{figure} \caption{illustration for the restriction operator $\Phi_{\beta} \label{fig: operators} \end{figure} \begin{table} \caption{ restriction operator function for simulation parameters} \centering \begin{tabular}{l l } \hline simulation parameter & function, \textbf{f} \\ \hline saturation, $s$ & mean \\ porosity, $\phi$ & mean \\ pressure, $p$ & mean \\ permeability, $k$ & harmonic mean \\ flow control, $a$ & sum \\ \hline \end{tabular} \label{tab:coarse_f} \end{table} A typical agent-environment interaction using this framework is illustrated in figure \ref{fig: rl_framework}. Note that the transition function $\mathcal{P}$, and reward function $\mathcal{R}$, are sub-scripted with $\beta$ to indicate the grid fidelity of the environment. State $S(t_m)$, action $A(t_m)$ and reward $R(t_m)$ are denoted with shorthand notations, $S_m$, $A_m$ and $R_m$, respectively. Throughout the learning process the policy is represented with states and actions corresponding to high fidelity grid environment. As a result, actions and states, to and from the environment, undergo the restriction $\Phi_{\beta}$ and prolongation $\Phi^{-1}_{\beta}$ operations at each time-step as shown in the environment box of the figure \ref{fig: rl_framework}. \begin{figure} \caption{A typical agent-environment interaction in the proposed multi-grid RL framework} \label{fig: rl_framework} \end{figure} \begin{algorithm*} \caption{Proximal policy optimisation with adaptive multi-grid framework} \begin{algorithmic}[1] \State Define $\delta$, $n$ and an empty array $\textbf{r}$ for convergence criteria \State Define a grid fidelity factor array $\boldsymbol{\beta}=[\beta_1, \beta_2,\ldots,\beta_m]$, where $\beta_m=1$ and $\beta_1<\beta_2<\ldots<\beta_m$. \State Define an episode limit array $\textbf{E}=[E_1, E_2,\ldots, E_m]$, where $E_1<E_2<\ldots<E_m$. \State Define total episode count, $e=0$ \For{$i=1,2,\ldots, m$} \State Generate the environment $\mathcal{E}_{\beta_i}$, with the grid fidelity factor $\beta_i$ \For {$iteration=1,2,\ldots$} \For {$actor=1,2,\ldots,N$} \State Run policy $\pi_{\theta_{old}}$ in environment $\mathcal{E}_{\beta_i}$ , for $T$ time steps (in total, $E$ episodes) \State Compute value function estimates $\hat{V}_1,\ldots,\hat{V}_T$ using critic network \State Compute advantage function estimates $\hat{A}_1,\ldots,\hat{A}_T$ \EndFor \State Optimize $J_{ppo}(\theta)$ with $K$ epochs and minibatch size $M\leq NT$ \State $\theta_{old}\leftarrow\theta$ \State Compute the policy return $R^{\pi_{\theta}(A|S)}$ and append it in \textbf{r} \State $e:=e+E$ \If {IsConverged($\textbf{r}$, $n$, $\delta$) or $e \geq E_i$} \State \textbf{break} \EndIf \EndFor \EndFor \end{algorithmic} \label{alg:ppo_adamultigrid} \end{algorithm*} The proposed framework is demonstrated for PPO algorithm. PPO \citep{schulman2017proximal} is a policy gradient algorithm that models the stochastic policy $\pi_\theta(A|S)$, with a neural network (also referred to as the actor network). Essentially, the network parameters $\theta$, are obtained by optimizing for the objective function, \begin{equation} \label{eq: ppo_loss} \begin{split} J_{ppo}(\theta) =& \hat{\mathbb{E}}_t \Bigg[ \Bigg. \min \Big( \Big. r_t(\theta) \hat{Adv} (S_t,A_t),\\ &\textup{clip}(r_t(\theta),1-\epsilon,1+\epsilon)\hat{Adv} (S_t,A_t) \Big. \Big) \Bigg. \Bigg] , \end{split} \end{equation} where $r_t(\theta) = \pi_{\theta}(A_t|S_t)/\pi_{\theta_{old}}(A_t|S_t)$ and $\theta_{old}$ correspond to the policy parameters before the policy update. The advantage function estimator $\hat{Adv}$, is computed using generalized advantage estimator \citep{schulman2015high} which is derived from the value function $V_t$. The value function estimator $\hat{V}_t$ is learned through a separate neural network termed as the critic network. Definitions of advantage and value functions are provided in appendix \ref{app: value_func}. In practice, a single neural network is used to represent both, actor and critic networks. The objective function for this integrated actor-critic network is the summation of actor loss term (equation \eqref{eq: ppo_loss}), value loss term and entropy loss term. For the purpose of maintaining brevity in our description these latter loss terms are omitted and the policy network's objective function is treated as $J_{ppo}(\theta)$ in further discussion. However, please note that they are considered while executing the framework. Readers are referred to \cite{schulman2017proximal} for the detailed definition of policy network loss term. Algorithm \ref{alg:ppo_adamultigrid} presents the pseudocode for the proposed multi-grid RL framework. The framework consists of, in total, $m$ values of grid fidelity factor which are represented with an array $\boldsymbol{\beta}=[\beta_1, \beta_2,\ldots,\beta_m]$, where $\beta_m=1$ and $\beta_1<\beta_2<\ldots<\beta_m$. The environment is denoted as $\mathcal{E}_{\beta_i}$, which represents the environment with the grid fidelity factor $\beta_i$. The policy $\pi_{\theta}(A|S)$ is learned initially with environment, $\mathcal{E}_{\beta_1}$, until the convergence criteria is met. The convergence criteria is checked using the algorithm \ref{alg:isconverged} with predefined parameters $\delta$ and $n$. Upon convergence, further policy iterations are learned using the environment $\mathcal{E}_{\beta_2}$, and so on until the convergence criteria is met for the highest grid fidelity environment $\mathcal{E}_{\beta_m}$. A limit for number of episodes to be executed at each grid level is also set. This is done by defining an episode limit array $\textbf{E}=[E_1, E_2,\ldots, E_m]$, where $E_m$ is total number of episodes to be executed and $E_1<E_2<\ldots<E_m$. That is, for every environment with grid fidelity factor $\beta_j$ the maximum number of episodes to be trained is limited to $E_j$. \section{Case studies} \label{sect: case studies} Two test cases are designed representing two distinct permeability uncertainty distributions and control dynamics. For both cases, the values for model parameters emulate those in the benchmark reservoir simulation cases, SPE-10 model 2 \citep{christie2001tenth}. Table \ref{tab:res_model} delineates these values for test case 1 and 2. As per the convention in geostatistics, the distribution of $\log{(k)}$ is assumed to be known and is denoted by $\mathcal{G}$. As a result, $g=\log(k)$ is treated as a random variable in the problem description defined in equation \eqref{eq: prob_def}. Uncertainty distributions for test case 1 and 2 are denoted with $\mathcal{G}_1$ and $\mathcal{G}_2$, respectively. \begin{table}[ht] \caption{Reservoir model parameters} \centering \begin{tabular}{l l l l} \hline & case 1 & case 2 & units\\ \hline spatial domain $\mathcal{X}$ & (1200$\times$1200) & (620$\times$1820) & ft$^2$\\ temporal domain $\mathcal{T}$ & [0,125] & [0,25] & days \\ initial saturation $s_0$ & 0.0 & 0.0 & -- \\ viscosity $\mu$ & 0.3 & 0.3 & cP \\ porosity $\phi$ & 0.2 & 0.2 & -- \\ number of producers $n_p$ & 31 & 14 & -- \\ number of injectors $n_i$ & 31 & 7 & -- \\ total injector flow $\sum a^+$ & 2304 & 9072 & ft$^2$/day\\ \hline \end{tabular} \label{tab:res_model} \end{table} \subsection{Uncertainty distribution for test case 1} The log-permeability uncertainty distribution for test case 1 is inspired from the case study done by \cite{brouwer2001recovery}. Figure \ref{fig:domain_schema}a shows schematics of the spatial domain for this case. In total, 31 injector wells (illustrated with blue circles) and 31 producer wells (illustrated with red circles) are placed at the left and right edge of the domain, respectively. As illustrated in Figure \ref{fig:domain_schema}a, a linear high permeability channel (shown in gray color) passes from the left to right side of the domain. $l_1$ and $l_2$ represent the distance from the top edge of the domain on the left and right side while the channel width is denoted with $w$. These parameters follow uniform distributions defined as, $w \sim U(120, 360)$, $l_1 \sim U(0,L-w)$ and $l_2 \sim U(0,L-w)$, where $L$ is domain length. In other words, the random variable $g$ follows the probability distribution $\mathcal{G}_1$ which is parameterized with $w$, $l_1$ and $l_2$: \begin{equation*} g \sim \mathcal{G}_1(w, l_1, l_2). \end{equation*} To be specific, log permeability $g$ at a location $(x,y)$ is formulated as: \begin{equation*} g(x,y) = \left\{\begin{matrix} \log{(245)} & \textup{if} & \frac{l_2-l_1}{L}x+l_1 \leq y \leq \frac{l_2-l_1}{L}x+l_1+w, \\ & & \\ \log{(0.14)} & \ \ \textup{otherwise}, & \end{matrix}\right. \end{equation*} where $x$ and $y$ are horizontal and vertical distances from the upper left corner of the domain illustrated in figure \ref{fig:domain_schema}a. The values for permeability at the channel (245 mD) and the rest of the domain (0.14 mD) are inspired from Upperness log-permeability distribution peak values specified in SPE-10 model 2 case. \begin{figure} \caption{schematic of the spatial domain for test case 1 and 2} \label{fig:domain_schema} \end{figure} \subsection{Uncertainty distribution for test case 2} Test case 2 represents uncertainty distribution of a smoother permeability field. Figure \ref{fig:domain_schema}b illustrates reservoir domain for this case. It comprises of 14 producers (illustrated with red circles) located symmetrically on left and right edges (7 on each edge) of the domain and 7 injectors (illustrated with blue circles) located at the central vertical axis of the domain. A prior distribution $F$ is assumed over all the locations $x \in \mathcal{X}$ as, \begin{align} & F(x) = \mu + Z(x), \textup{where}, \label{eq: prior_case2} \\ & \mathbb{E}(Z(x))=0, \nonumber \\ & \textup{Cov}(Z(x), Z(\tilde{x})) = \sigma^2 k(x,\tilde{x}), \nonumber \end{align} where the process variance, $\sigma$, is assigned as 5 and the exponential covariance function (kernel), $k(x,\tilde{x})$, is defined as, \begin{equation*} k(x,\tilde{x}) = \exp\left [ - \left ( \frac{(x_1 - \tilde{x}_1)^2}{l_1^2} + \frac{(x_2 - \tilde{x}_2)^2}{l_2^2} \right )^{1/2} \right ], \end{equation*} where the parameters $l_1$ and $l_2$ are assigned to be 620ft (width of the domain) and 62ft (10\% of domain width), respectively. The posterior distribution given the observed log-permeability vector, $\textbf{g}(x') = [ g(x'_1), g(x'_2), \cdots, g(x'_n)]$, where each observation correspond to a log-permeability value of 2.41 at a well location (i.e., $n=21$ since there are, in total, 21 number of wells in this case). From the principle of ordinary kriging, the posterior distribution, $\mathcal{G}_2$, for log-permeability at a location $x \in \mathcal{X}$ is a normal distribution which is defined as, \begin{align*} g(x) \sim &\ \mathcal{G}_2( \hat{g}(x), \hat{s}^2(x)), \textup{ where,} \\ \hat{g}(x) =&\ \hat{\mu} + \textbf{k}(x', x)^\intercal \textbf{k}(x',x')^{-1} (\textbf{g}(x')-\textbf{1}\hat{\mu}), \\ \hat{s}^2(x) =&\ \sigma^2 \Bigg[ \Bigg. 1 - \textbf{k}(x',x)^\intercal \textbf{k}(x',x')^{-1} \textbf{k}(x',x)\\ &+ \frac{(1-\textbf{1}^\intercal \textbf{k}(x',x')^{-1} \textbf{k}(x',x))^2}{ \textbf{1}^\intercal \textbf{k}(x',x')^{-1} \textbf{1}} \Bigg. \Bigg], \end{align*} where $\textbf{k}(x',x)$ is $n$ dimensional vector whose $i$th value is $k(x'_i, x)$, $\textbf{k}(x',x')$ is $n\times n$ dimensional matrix whose value at $(i,j)$ is $k(x'_i, x'_j)$, $\textbf{1}$ is a $n$ dimensional vector with all elements of one ($\textbf{1} = [1,1,\cdots, 1]^\intercal$) and $\hat{\mu}$ is an estimate of the global mean $\mu$, which is obtained from the kriging model based on the maximum likelihood estimation of the distribution $F(x)$ (from equation \eqref{eq: prior_case2}) for the observations $\textbf{g}(x')$, and is formulated as, \begin{equation*} \hat{\mu} = \frac{\textbf{1}^\intercal \textbf{k}(x',x')^{-1} \textbf{g}(x')}{\textbf{1}^\intercal \textbf{k}(x',x')^{-1} \textbf{1}}. \end{equation*} The log-permeability distribution $\mathcal{G}_2$, is created with an ordinary kriging model using the geostatistics library gstools \citep{sebastian_muller_2019_2541735}. In the simulation, samples of the permeability fields are obtained with a clockwise rotation angle of $\pi/8$. \subsection{State, action and reward formulation} \label{sect: rl_formulation} PPO algorithm attempts to learn the parameters $\theta$ of the policy neural network $\pi_{\theta}(A|S)$. The episodes (i.e. the entire simulation temporal domain $\mathcal{T}$) are divided in five control steps. Each episode timestep corresponding to a control step is denoted with $t_m$, where $m \in \{1,2,\cdots, 5 \}$. The state $S$, is represented by an observation vector which consists of saturation and pressure values at well locations, $x'$. Since the saturation values at injector wells are always one, irrespective of the time $t_m$, they are omitted from the observation vector. Consequently, the observation vector is of the size $2n_p + n_i$ (i.e., $n_s = 93$ for test case 1 and $n_s = 35$ for test case 2). Note that this observation vector forms the input to the policy network $\pi_{\theta}(A|S)$. A vector of flow control values of all the injector and producer wells, denoted by $A$, is represented as the action. The action vector $A$, consists of in total $n_p + n_i$ values (i.e., $n_a=62$ for test case 1 and $n_a=21$ for test case 2). In order to maintain constraint defined in equation \eqref{eq: constr}, the action vector is represented with a vector of weights $w \in \mathbb{R}^{n_a}$, such that $0.001\leq w_j \leq 1$. Each weight value $w_j$, corresponds to the proportion of flow through the $j$th well. As a result, the values in the action vector are written as, $( w_1,\cdots,w_{n_i},w_{n_i+1},\cdots,w_{n_i+n_p} )$. Flow through $j$th injector $A_j$, is computed such that the constraint defined in \eqref{eq: constr} is satisfied: \begin{equation*} A_j = -\frac{w_j}{\sum_{i=j}^{n_i}w_j}c. \end{equation*} Similarly, flow through $j$th producer, $A_{j+n_i}$, is written as, \begin{equation*} A_{j+n_i} = \frac{w_{j+n_i}}{\sum_{j=1}^{n_p}w_{j+n_i}}c. \end{equation*} The reward function, as defined in equation \eqref{eq: reward_func}, is divided by total pore volume ($\phi \times lx \times ly $) as form of normalization to obtain a reward function in the range [0,1]. The normalized reward represents recovery factor or sweep efficiency of the contaminated fluid. Recovery factor represents the total amount of contaminants swept out of the domain. For instance, the recovery factor of 0.65 means that in total of 65\% of contaminants are swept out of the domain using waterflooding. To put it in the context of ground water decontamination problem, the optimal controls correspond to the well controls that maximize the percentage of contaminants swept out of the reservoir. \subsection{Multi-grid framework formulations} The proposed framework is demonstrated using three levels of grid fidelity corresponding to $\beta=0.25$, $\beta=0.5$ and $\beta=1.0$. Table \ref{tab: beta_table} lists the discretization grid size corresponding to these grid fidelity factors for both test cases. In order to show the effectiveness of the proposed framework, the obtained results are compared with single grid and multi-grid frameworks. The results for single grid framework are same as if they were obtained using classical PPO algorithm where the environment has a fixed fidelity factor throughout the policy learning process. This is done by setting the grid fidelity factor array $\boldsymbol{\beta}$, and episode limit array \textbf{E}, with a single value in algorithm \ref{alg:ppo_adamultigrid}. The factor $n$ in convergence criteria procedure (delineated in algorithm \ref{alg:isconverged}) is set to infinity. In other words, convergence criteria is unchecked and the policy learning take place for a predefined number of episodes. In total three such single-grid experiments are done corresponding to $\beta=0.25$, $\beta=0.5$ and $\beta=1.0$. Further, two multi-grid experiments are performed to demonstrate the effectiveness of the proposed framework. The first multi-grid experiment is referred as ``fixed'' where convergence criteria is kept unchecked just like single-grid frameworks. The multiple levels of grids are defined by setting the grid fidelity factor array $\boldsymbol{\beta}$, and episode limit array \textbf{E}, as an array of multiple values corresponding to each fidelity factor value and its corresponding episode count. In the fixed multi-grid framework, policy learning takes place by updating the environment fidelity factor according to $\boldsymbol{\beta}$ without checking the convergence criteria (i.e. by setting $n=\infty$). Secondly, the ``adaptive'' multi-grid framework parameters are set similar to those used in fixed multi-grid framework except for the convergence criteria parameters $n$ and $\delta$. Table \ref{tab: exp_type} delineates number of experiments and their corresponding parameters for test case 1 and 2. Figure \ref{fig: case_1_coarse} provide visualization of effect of fidelity factor $\beta$, on the simulation of test case 1. Figure \ref{fig: case_1_coarse}a and \ref{fig: case_1_coarse}b show log-permeability and saturation plots corresponding to $\beta=0.25$, $\beta=0.5$ and $\beta=1.0$. Further, figure \ref{fig: case_1_coarse}c illustrate the effect of grid fidelity on simulation run time for single episode (shown on left with a box plot with 100 simulation run trials) and ``equivalent $\beta=1$ simulation run time'' for each grid fidelity factor (shown on right). Equivalent $\beta=1$ simulation run time is defined as the ratio of average simulation run time for a grid fidelity factor $\beta$, to that corresponding to $\beta=1$. This quantity is used as a scaling factor to convert the number of simulations for any value of $\beta$ to its equivalent number of simulations as if they were performed with $\beta=1$. Similar plots for test case 2 are demonstrated in figure \ref{fig: case_2_coarse}. \begin{table} \caption{grid fidelity factor and corresponding grid size} \centering \begin{tabular}{l l l} \hline & test case 1 & test case 2 \\ \hline $\beta=1$ & $61\times61$ & $31\times91$ \\ $\beta=0.5$ & $30\times30$ & $15\times45$ \\ $\beta=0.25$ & $15\times15$ & $7\times22$ \\ \hline \end{tabular} \label{tab: beta_table} \end{table} Results obtained using the proposed framework are evaluated against the benchmark optimisation results obtained using differential evolution (DE) algorithm \citep{storn1997differential}. For both optimisation methods (PPO and DE) multiprocessing is employed to reduce total computational time. However, parallelism behaviour is quite varied between PPO and DE algorithms. In PPO algorithms, neural networks are back propagated synchronously at the end of each policy iteration which causes extra computational time in waiting and data distribution. As a results, in order to compare computational efforts irrespective of computational resources and parallelism behaviours, it is fair to compare number of simulation runs which is a major source of computational cost in these algorithms. The PPO algorithm for the proposed framework is executed using the stable baselines library \citep{stable-baselines3}, while python's SciPy \citep{2020SciPy-NMeth} library is used for DE algorithm. Appendix \ref{app: rl_params} delineate all the algorithm parameters used in this study. \begin{table*}[t] \caption{multi-grid framework experiments} \centering \begin{tabular}{l l l} \hline & test case 1 & test case 2 \\ \hline & $\boldsymbol{\beta}=[0.25]$ & $\boldsymbol{\beta}=[0.25]$ \\ single grid ($\beta=0.25$) & $\textbf{E}=[75000]$ & $\textbf{E}=[150000]$ \\ & $n=\infty$; $\delta=0$ & $n=\infty$; $\delta=0$ \\ \hline & $\boldsymbol{\beta}=[0.5]$ & $\boldsymbol{\beta}=[0.5]$ \\ single grid ($\beta=0.5$) & $\textbf{E}=[75000]$ & $\textbf{E}=[150000]$ \\ & $n=\infty$; $\delta=0$ & $n=\infty$; $\delta=0$ \\ \hline & $\boldsymbol{\beta}=[1.0]$ & $\boldsymbol{\beta}=[1.0]$ \\ single grid ($\beta=1.0$) & $\textbf{E}=[75000]$ & $\textbf{E}=[150000]$ \\ & $n=\infty$; $\delta=0$ & $n=\infty$; $\delta=0$ \\ \hline & $\boldsymbol{\beta}=[0.25, 0.5, 1.0]$ & $\boldsymbol{\beta}=[0.25, 0.5, 1.0]$ \\ fixed multi-grid & $\textbf{E}=[25000, 50000, 75000]$ & $\textbf{E}=[50000, 100000, 150000]$ \\ & $n=\infty$; $\delta=0$ & $n=\infty$; $\delta=0$ \\ \hline & $\boldsymbol{\beta}=[0.25, 0.5, 1.0]$ & $\boldsymbol{\beta}=[0.25, 0.5, 1.0]$ \\ adaptive multi-grid & $\textbf{E}=[25000, 50000, 75000]$ & $\textbf{E}=[50000, 100000, 150000]$ \\ & $n=25$; $\delta=0.2$ & $n=25$; $\delta=0.2$ \\ \hline \end{tabular} \label{tab: exp_type} \end{table*} \begin{figure} \caption{effect of grid fidelity factor $\beta$ on the environment for test case 1} \label{fig: case_1_coarse} \end{figure} \begin{figure} \caption{effect of grid fidelity factor $\beta$ on the environment for test case 2} \label{fig: case_2_coarse} \end{figure} \section{Results} \label{sect: results} The control policy where injector and producer wells are equally open throughout the entire episode is referred to as the base policy. Under such policy, the water flooding prominently takes place in the high permeability region leaving the low permeability region swept inefficiently. The optimal policy for these test cases would be to control the producer and injector flow to mitigate this imbalance in water flooding. The optimal policy, learned using reinforcement learning for test case 1, show on an average around 12\% improvement with respect to recovery factor achieved using the base policy. While for test case 2, the average improvement in the order of 25\% is observed. Figure \ref{fig: case_1_rl_plots} illustrates the plots for policy return $R^{\pi(A|S)}$, corresponding to all the frameworks listed in table \ref{tab: exp_type} for test case 1. At the beginning of the learning process, the policy return values for single-grid framework keeps improving and eventually converge to a maximum value when the policy converges to an optimal policy. Note that for lower value of grid fidelity factor $\beta$, the optimal policy return is also low. In other words, the coarsening of simulation grid discretization also reflects in overall reduction in recovery factor. This is due to the low accuracy of states and actions representation for environments with $\beta<1$. On the other hand, the overall computational gain is observed due to coarser grid sizes. Simulation run time corresponding to $\beta=0.25$ and $\beta=0.5$ show around 66\% and 54\% reduction as compared to that with $\beta=1$. the results of multi-grid frameworks are compared with the single grid framework corresponding to $\beta=1$ which refers to classical PPO algorithm using the environment with a fixed high fidelity grid factor. As shown in the plots at the center and right of figure \ref{fig: case_1_rl_plots}, both multi-grid frameworks show convergence to the optimal policy which is achieved using high fidelity single grid framework. In the fixed multi-grid framework the fidelity factor, is incremented at a fixed interval of 25000 number of episodes. The adaptive framework is also provided with the same interval but with additional convergence check within each interval. For multi-grid learning plots shown in figure \ref{fig: case_1_rl_plots} (center and right plots), equivalent number of episodes corresponding to the environment with $\beta=1$ is illustrated as a secondary horizontal axis. This way, the computational effect of multi-grid frameworks is directly compared to single-grid (with $\beta=1$) framework. The equivalent number of $\beta=1$ episodes corresponding to episodes with certain $\beta$ value are computed by multiplying it with the equivalent $\beta=1$ simulation run time. For instance, number of episodes with $\beta=0.25$ are multiplied with 0.37. For fixed multi-grid framework, it takes 46264 number of equivalent $\beta=1$ episodes to achieve an equally optimal policy that is obtained with 75000 number of episodes using single grid ($\beta=1$) framework. Similarly, the same is achieved with just 28907 number of equivalent $\beta=1$ episodes using adaptive multi-grid framework. In other words, around 38\% and 61\% reduction is observed in simulation run time using fixed and adaptive multi-grid frameworks, respectively. Further, the robustness of the policy learned using these frameworks is compared by applying it on a highest fidelity environment with random permeability samples from the distribution $\mathcal{G}_1$, which were never seen during the policy learning process. Figure \ref{fig: case_1_results}a shows the plots of these unseen permeability fields, while the corresponding results obtained using these frameworks are plotted in figure \ref{fig: case_1_results}b. Optimal results obtained using differential evolutionary (DE) algorithms are provided as benchmark (marked as DE in figure \ref{fig: case_1_results}b). Note that DE algorithm, in itself, is not a suitable method to solve the robust optimal control problem since it can provide optimal controls only for certain permeability samples as opposed to PPO algorithm where the learned policy is applicable to all samples of permeability distribution. However, DE results are used as the reference optimal results which are achieved by direct optimization on sample by sample basis. Equivalence in the optimality of learned policies obtained using these three experiments can be observed from the closeness in their corresponding optimal recovery factors. \begin{figure*} \caption{plots of policy return versus number of episodes for test case 1} \label{fig: case_1_rl_plots} \end{figure*} \begin{figure*} \caption{evaluation of learned policies for test case 1} \label{fig: case_1_results} \end{figure*} Figure \ref{fig:case_1_rl_ex} demonstrate the policy visualization for an example of permeability sample in case 1. In this figure, the results are shown for permeability sample index 4 from the figure \ref{fig: case_1_results}a where a high permeability channel passes through lower region of the domain. The optimal policy, in this case, would be to restrict the flow through injector wells and producer wells which are in the vicinity of the channel. The super-positioned comparison of optimal results for base case, differential evolution, single-grid framework (where $\beta=1$), fixed multi-grid framework and adaptive multi-grid framework shows that the optimal policy is learned successfully using the proposed framework. \begin{figure*} \caption{illustration of learned optimal control policies for test case 1} \label{fig:case_1_rl_ex} \end{figure*} \begin{figure*} \caption{plots of policy return versus number of episodes for test case 2} \label{fig: case_2_rl_plots} \end{figure*} \begin{figure*} \caption{evaluation of learned policies for test case 2} \label{fig: case_2_results} \end{figure*} \begin{figure*} \caption{illustration of learned optimal control policies for test case 2} \label{fig:case_2_rl_ex} \end{figure*} For test case 2, similar results are observed as shown in figure \ref{fig: case_2_rl_plots}. The single-grid algorithms converge to an optimal policy in total 150000 number of episodes. The fixed multi-grid algorithm is trained with 50000 episode interval for each grid fidelity factor as shown in the central plot in figure \ref{fig: case_2_rl_plots}. The optimal policy is learned in 92657 equivalent $\beta=1$ episodes thus saving around 38\% of simulation run time. The adaptive multi-grid framework further reduces computational cost by achieving the optimal policy in 36618 number of equivalent $\beta=1$ episodes (simulation time reduction of about 76\% with respect to $\beta=1$ single-grid framework). Figure \ref{fig: case_2_results} illustrate the results of policy evaluation on an unseen permeability samples from the distribution $\mathcal{G}_2$. The permeability samples are shown in figure \ref{fig: case_2_results}a and the optimal recovery factor corresponding to learned policies are plotted in figure \ref{fig: case_2_results}b. Figure \ref{fig:case_2_rl_ex} demonstrate the optimal controls for an example of permeability sample index 5 from figure \ref{fig: case_2_results}a. The optimal policy learned using differential evolution algorithm refers to increasing the flow through injector wells which are in low permeability region while restricting the flow through producer wells for which the water cutoff is reached. The policies learned using RL framework takes advantage of the default location and orientation of high permeability regions. In this case, the optimal policy is achieved by controlling the well flow control such that the flow traverses through the permeability channels (that is, the flow is more or less perpendicular to the permeability orientation). \section{Conclusion} \label{sect: conclusion} An adaptive multi-grid RL framework is introduced to solve robust optimal well control problem. The proposed framework results in significant reduction in computational cost of policy learning process as compared to classical PPO algorithm results. In the presented case studies, 61\% computational savings in simulation runtime for test case 1 and 76\% for test case 2 is observed. The results are highly dependent on the right choice of the algorithm hyper-parameters (e.g. $\delta$, $n$, $\boldsymbol{\beta}$ and $\textbf{E}$) which were tuned heuristically. As a future direction to this research study, the aim is to find the optimal values for $\boldsymbol{\beta}$ that maximizes the overall computational savings. Furthermore, policy transfer was performed sequentially in the current framework which seemed to have worked optimally. However, to improve the generality of the proposed framework it would be important to study the effect of sequence of policy transfer on the overall performance. \section*{Acknowledgment} The first author would like to acknowledge the Ali Danesh scholarship to fund his PhD studies at Heriot-Watt University. The authors would also like to acknowledge the EPSRC funding through the grant EP/V048899/1. \appendix \section{Cluster analysis of permeability uncertainty distribution} \label{app: cluster} training vector \textbf{k}, is chosen to represent the variability in the permeability distribution $\mathcal{K}$. For the optimal control problem, our main interest is in the uncertainty in the dynamical response of permeability rather than the uncertainty in permeability itself. As a result, connectivity distance \citep{park2011modeling} is used as a measure of distance between permeability field samples. The connectivity distance matrix $\textbf{D} \in \mathbb{R}^{N \times N}$ among $N$ samples of $\mathcal{K}$ is formulated as, \begin{equation*} \textbf{D}(k_i, k_j) = \sum_{x''} \int_{t_0}^{T} \left [ s(x'',t; k_i) - s(x'',t; k_j) \right ]^2 dt, \label{eq: conn_dist} \end{equation*} where $N$ correspond to a large number number of samples of uncertainty distribution, $s(x'',t;k_i)$ is saturation at location $x''$, and time $t$, when the permeability is set to $k_i$ and all wells are open equally. Multi-dimensional scaling of the distance matrix \textbf{D} is used to produce $N$ two-dimensional coordinates $d_1, d_2, \cdots, d_N$, each representing a permeability sample. The coordinates $d_1, d_2, \cdots, d_N$ are obtained such that the distance between $d_i$ and $d_j$ is equivalent to $\textbf{D}(k_i, k_j)$. In the k-means clustering process, these coordinates are divided in $l$ sets $S_1, S_2, \cdots , S_l$, obtained by solving the optimisation problem: \begin{equation*} \arg \min_{S} \sum_{i}^{l} \sum_{d_j \in S_i } \left \| d_j - \mu_{S_i} \right \|, \end{equation*} where $\mu_{S_i}$ is average of all coordinates in the set $S_i$. The training vector \textbf{k} is a set of $l$ samples of $\mathcal{K}$ where each of its value $k_i$ correspond to the one nearest to $\mu_{S_i}$. \begin{figure*} \caption{log-permeability plots for training data of test case 1 and 2} \label{fig: cluster} \end{figure*} Total number of samples $N$ and clusters $l$ are chosen to be 1000 and 16 for both uncertainty distributions, $\mathcal{G}_1$ and $\mathcal{G}_2$. Training vector $\textbf{k}$ is obtained with samples $k_1, \cdots, k_{16}$ each corresponding to a cluster center. Figure \ref{fig: cluster}a and \ref{fig: cluster}b show cluster plots for $\mathcal{G}_1$ and $\mathcal{G}_2$ permeability distribution samples, respectively. Further, 16 permeability samples, each randomly chosen from a cluster, are chosen to evaluate the learned policies. Figures \ref{fig: case_1_results}a and \ref{fig: case_2_results}a illustrate these samples for test case 1 and 2, respectively. \section{Definitions of value and advantage function} \label{app: value_func} In RL, the policy $\pi(A|S)$ is said to be optimal if it maps the state $S_t$ with an action $A_t$ that correspond to maximum expected return value. These return values are learned through the data obtained in agent-environment interactions. Following are some definition of return values typically used in RL: Value function is the expected future return for a particular state $S_t$ and is defined as, \begin{equation*} V(S) = \mathbb{E}_{\pi} \left [ \sum_{m} \gamma^m R_{m+t+1} | S_t = S \right], \end{equation*} where $\mathbb{E}_{\pi}[\cdots]$ denotes expected value given that the agent follows the policy $\pi$. As a short hand notation, $V(S)$ at state $S_t$ is denoted as $V_t$. Q function is similar to value function except that it represent the expected return when the agent takes action $a_t$ in the state $S_t$. It is defined as, \begin{equation*} Q(S, A) = \mathbb{E}_{\pi} \left [ \sum_{m} \gamma^m R_{m+t+1} | S_t = S, A_t = A \right]. \end{equation*} Advantage function is defined as the difference between Q function and value function and is denoted by $Adv(S, A)$ at state $S$ and action $A$. \section{Algorithm parameters} \label{app: rl_params} Parameters used for PPO are tabulated in table \ref{tab: ppo_param} which were tuned using trial and error. For PPO algorithm, parameters were tuned in order to find least variability in learning plots. Figures \ref{fig: case_1_seeds} and \ref{fig: case_2_seeds} show learning plots corresponding to three distinct seeds to show the stochasticity of the obtained results. The DE algorithm's parameters are delineated in table \ref{tab: de_param}. The code repository for both the test cases presented in this paper can be found on the link: \url{https://github.com/atishdixit16/ada\_multigrid\_ppo}. \begin{table}[ht] \caption{PPO algorithm parameters} \centering \begin{tabular}{l l l l} \hline & case 1 & case 2\\ \hline number of CPUs, $N$ & 64 & 64 \\ number of steps, $T$ & 40 & 40 \\ mini-batch size, $M$ & 16 & 16 \\ epochs, $K$ & 20 & 20 \\ discount rate, $\gamma$ & 0.99 & 0.99 \\ clip range, $\epsilon$ & 0.1 & 0.15 \\ policy network MLP layers & [93,150,100,80,62] & [35,70,70,50,21] \\ policy network activation functions & tanh & tanh\\ policy network optimizers & Adam & Adam \\ learning rate & 3e-6 & 1e-4\\ \hline \end{tabular} \label{tab: ppo_param} \end{table} \begin{table}[ht] \caption{DE algorithm parameters} \centering \begin{tabular}{l l l l} \hline & case 1 & case 2\\ \hline number of CPUs & 64 & 64 \\ number of iterations & 1024 & 1024 \\ population size & 310 & 105 \\ recombination factor & 0.9 & 0.9 \\ mutation factor & (0.5,1) & (0.5,1) \\ \hline \end{tabular} \label{tab: de_param} \end{table} \begin{figure*} \caption{learning plots for three distinct seed values for test case 1} \label{fig: case_1_seeds} \end{figure*} \begin{figure*} \caption{learning plots for three distinct seed values for test case 2} \label{fig: case_2_seeds} \end{figure*} {\footnotesize } \end{document}
\begin{document} \title[Stacky Bogomolov-Miyaoka-Yau inequality]{On the Bogomolov-Miyaoka-Yau inequality\\ for stacky surfaces} \author{Jiun-Cheng Chen} \address{Department of Mathematics\\ Third General Building, National Tsing Hua University\\ No. 101 Sec 2 Kuang Fu Road \\Hsinchu, Taiwan 30043, Taiwan} \email{[email protected], [email protected]} \author{Hsian-Hua Tseng} \address{Department of Mathematics\\ Ohio State University\\ 100 Math Tower, 231 West 18th Ave.\\mathcal{C}olumbus\\ OH 43210\\ USA} \email{[email protected]} \date{\today} \begin{abstract} We discuss a generalization of the Bogomolov-Miyaoka-Yau inequality to Deligne-Mumford surfaces of general type. \end{abstract} {\mathfrak{m}}aketitle \section{Introduction} We work over ${\mathfrak{m}}athbb{C}$. For a smooth complex projective surface $S$ of general type, the Bogomolov-Miyaoka-Yau inequality for $S$ reads (see \cite{mi77}) \begin{equation}\label{BMY_sm_surface} 3c_2(T_S){{\mathfrak{m}}athfrak{g}}eq c_1(T_S)^2. \end{equation} Together with Noether's inequality, this puts constraints on the topology of surfaces of general types. Generalizations of (\ref{BMY_sm_surface}) to singular surfaces and surface pairs have been found, see for example \cite{m}, \cite{l1, l2}. In this paper we discuss a generalization of (\ref{BMY_sm_surface}) to Deligne-Mumford stacks. Let ${\mathfrak{m}}X$ be a smooth proper Deligne-Mumford ${\mathfrak{m}}athbb{C}$-stack of dimension $2$. Let ${\mathfrak{p}}i:{\mathfrak{m}}X \to X$ be the natural map to the coarse moduli space. We assume that $X$ is a projective variety. Since ${\mathfrak{m}}X$ is assumed to be smooth, it has a tangent bundle $T_{\mathfrak{m}}X$. A good theory of Chern classes is available for Deligne-Mumford stacks, see for example \cite{v}, \cite{k}. \begin{Main} Let ${\mathfrak{m}}X$ be as above. Assume that the canonical bundle $K_{\mathfrak{m}}X:=\wedge^2T_{\mathfrak{m}}X^\vee$ is numerically effective, then \begin{equation}\label{BMY_stack} 3 c_2(T_{\mathfrak{m}}X){{\mathfrak{m}}athfrak{g}}eq c_1(T_{\mathfrak{m}}X)^2. \end{equation} \end{Main} Certainly (\ref{BMY_stack}) takes the same shape as (\ref{BMY_sm_surface}). A proof of (\ref{BMY_stack}), along the lines of Miyaoka's original proof of (\ref{BMY_sm_surface}) in \cite{mi77}, is given in Section \ref{pf_BMY}. Section \ref{sec:examples} contains examples of (\ref{BMY_stack}). In Section \ref{codim1} we consider (\ref{BMY_stack}) for a class of stacks ${\mathfrak{m}}X$ with stack structures in codimension $1$, recovering \cite[Corollary 0.2]{l1}. In Section \ref{codim2} we consider (\ref{BMY_stack}) for Gorenstein stacks ${\mathfrak{m}}X$ with isolated stack points, recovering \cite[Corollary 1.3]{m}. Generalizations of the Bogomolov-Miyaoka-Yau inequality to varieties with quotient singularities (i.e. orbifolds) certainly have been studied before by many authors using various approaches. References to these can be found in e.g. \cite{l1, l2}. In this paper we work in the context of Deligne-Mumford stacks. This viewpoint has the advantage that (\ref{BMY_stack}) can be proven by following Miyaoka's original arguments in \cite{mi77}. Also, as discussed in Section \ref{sec:examples}, (\ref{BMY_stack}) specializes to some generalizations of the original (\ref{BMY_sm_surface}) by straightforward and elementary means. \subsection*{Acknowledgment} J. -C. C. is a Golden-Jade Fellow of Kenda Foundation, Taiwan. He is supported in part by National Science Council and National Center for Theoretical Sciences, Taiwan. H.-H. T. is supported in part by NSF grant DMS-1047777 and Simons Foundation Collaboration Grant. \section{Proof of (\ref{BMY_stack})}\label{pf_BMY} In this Section we give a proof of (\ref{BMY_stack}). Our proof is adapted from Miyaoka's original proof in \cite{mi77}. Let ${\mathfrak{m}}X$ be a smooth proper Deligne-Mumford stack of dimension $2$. If $\mathcal{X}$ has non-trivial stack structures at generic points, then $\mathcal{X}$ is an \'etale gerbe over a stack with trivial generic stack structure, see for example \cite[Proposition 4.6]{bn}. More precisely, there is a finite group $G$, a stack $\mathcal{X}'$ with trivial generic stabilizers, and a morphism $f: \mathcal{X}\to \mathcal{X}'$ realizing $\mathcal{X}$ as a $G$-gerbe over $\mathcal{X}'$. Since $T_\mathcal{X}=f^*T_{\mathcal{X}'}$, we see that (\ref{BMY_stack}) for $\mathcal{X}$ is equivalent to (\ref{BMY_stack}) for $\mathcal{X}'$. Therefore it suffices to consider only those $\mathcal{X}$ with stack structures in codimension ${{\mathfrak{m}}athfrak{g}}eq 1$. For the rest of this section we assume this. Let ${\mathfrak{m}}F$ be a locally free sheaf of rank $2$ on $\mathcal{X}$. Let ${\mathfrak{m}}V:={\mathfrak{m}}athbb{P}({\mathfrak{m}}F)$ be the projectivization, with natural projection $p: {\mathfrak{m}}V\to \mathcal{X}$. Let ${\mathfrak{m}}H$ be the divisor associated to the tautological sheaf on ${\mathfrak{m}}V$. \begin{lemma}\label{fundamentallemma} Assume that ${\mathfrak{m}}W \subset {\mathfrak{m}}V$ is linearly equivalent to ${\mathfrak{m}}H- p^* {\mathfrak{m}}D$, where ${\mathfrak{m}}D \subset {\mathfrak{m}}X$ is a divisor on ${\mathfrak{m}}X$. Then we have \begin{equation} {\mathfrak{m}}D \cdot det {\mathfrak{m}}F \leq c_2({\mathfrak{m}}F) + {\mathfrak{m}}D ^2. \end{equation} \end{lemma} \begin{proof} We closely follow Miyaoka's original proof \cite{mi77}. Let $i: {\mathfrak{m}}W \subset {\mathfrak{m}}V$ be the inclusion morphism. Note that the composition $p \circ i: {\mathfrak{m}}W \to {\mathfrak{m}}X$ is birational by our assumption on the linear equivalence class of ${\mathfrak{m}}W$. Since resolutions can be chosen such that they are compatible with \'etale base change, there is a sequence of blow-ups \begin{equation} {\mathfrak{m}}u: {\mathfrak{m}}V_s \xrightarrow{{\mathfrak{m}}u_s} {\mathfrak{m}}V_{s-1} {\operatorname{rig}}htarrow \cdots {\operatorname{rig}}htarrow {\mathfrak{m}}V_1 \xrightarrow{{\mathfrak{m}}u_1} {\mathfrak{m}}V_0={\mathfrak{m}}V \end{equation} such that the proper transform $ {\mathfrak{m}}W'$ of ${\mathfrak{m}}W$ is a smooth Deligne-Mumford stack in ${\mathfrak{m}}V_s$. Let $i': {\mathfrak{m}}W'\subset {\mathfrak{m}}V_s$ and $\rho: {\mathfrak{m}}W'\to \mathcal{X}$ be the natural maps. Let ${\mathfrak{m}}E_1$, ${\mathfrak{m}}E_2$, $\cdots$, ${\mathfrak{m}}E_s$ be the exceptional divisors on ${\mathfrak{m}}V_s$. The divisor ${\mathfrak{m}}W'$ is linearly equivalent to ${\mathfrak{m}}u ^* ({\mathfrak{m}}H- p^*{\mathfrak{m}}D) -\sum a_i {\mathfrak{m}}E_i$. It can be seen\footnote{The argument is similar to that of \cite[Lemma 7]{mi77} and is omitted.} that the canonical bundle $K_{{\mathfrak{m}}W'}$ satisfies $K_{{\mathfrak{m}}W'}= \rho^*K_{{\mathfrak{m}}X}+ \sum {\mathfrak{m}}C_i$ where $C_i$ is a curve and $\rho( {\mathfrak{m}}C_i)=point$. By the Hodge index theorem (for a stacky version see \cite[Theorem 3.1.3]{lie}), it follows that $(K_{{\mathfrak{m}}W'}- \rho ^*K_{{\mathfrak{m}}X} + \sum c_i i'^*{\mathfrak{m}}E_i)^2 \leq 0$ for any $c_i \in {{\mathfrak{m}}athbb{R}}$. Write $K_{{\mathfrak{m}}V_{s}}= {\mathfrak{m}}u^*(-2{\mathfrak{m}}H+ p^* K_{{\mathfrak{m}}X}+ p^*(det {\mathfrak{m}}F))+ \sum b_i {\mathfrak{m}}E_i$. The adjunction formula implies that $$K_{{\mathfrak{m}}W'}= i'^*[{\mathfrak{m}}u^*(-{\mathfrak{m}}H)+ (p \circ {\mathfrak{m}}u)^*(K_{{\mathfrak{m}}X}+det {\mathfrak{m}}F -{\mathfrak{m}}D) + \sum (b_i-a_i) {\mathfrak{m}}E_i].$$ Thus $i'^*{\mathfrak{m}}u^*(-{\mathfrak{m}}H+ p^*(det {\mathfrak{m}}F -{\mathfrak{m}}D))^2 \leq 0$. Set $k:= i'^* {\mathfrak{m}}u^*(-{\mathfrak{m}}H+ p^*(det {\mathfrak{m}}F -{\mathfrak{m}}D))^2$. We can also compute this self-intersection number $k$ in another way: \begin{equation*} \begin{split} k&= {\mathfrak{m}}u^*(- {\mathfrak{m}}H+ p^*(det {\mathfrak{m}}F)-p^* {\mathfrak{m}}D)^2 ( {\mathfrak{m}}u^* {\mathfrak{m}}H- ( p \circ {\mathfrak{m}}u )^* {\mathfrak{m}}D -\sum a_i {\mathfrak{m}}E_i)\\ &= {\mathfrak{m}}u^*(- {\mathfrak{m}}H+ p^*(det {\mathfrak{m}}F)-p^* {\mathfrak{m}}D))^2 ( {\mathfrak{m}}u^* {\mathfrak{m}}H- ( p \circ {\mathfrak{m}}u )^* {\mathfrak{m}}D) \quad \quad \quad (\text{since ${\mathfrak{m}}E_i$ is exceptional})\\ &={\mathfrak{m}}H^3-{\mathfrak{m}}H^2 \cdot p^*({\mathfrak{m}}D -2 det {\mathfrak{m}}F)+ {\mathfrak{m}}H \cdot (p^*(det {\mathfrak{m}}F)^2-(p^* {\mathfrak{m}}D)^2). \end{split} \end{equation*} Using the standard relations for the intersection numbers on the projectivization of a rank $2$ vector bundle, we calculate that \begin{equation*} k= c_1^2(det({\mathfrak{m}}F))-c_2({\mathfrak{m}}F) -(det {\mathfrak{m}}F)^2+ det {\mathfrak{m}}F \cdot {\mathfrak{m}}D -{\mathfrak{m}}D^2= -c_2({\mathfrak{m}}F) +det {\mathfrak{m}}F \cdot {\mathfrak{m}}D -{\mathfrak{m}}D^2. \end{equation*} The result follows. \end{proof} Let ${\mathfrak{m}}O_{{\mathfrak{m}}X}( {\mathfrak{m}}D)$ be a subsheaf of $\Omega^1_{{\mathfrak{m}}X}$. One key observation used in Miyaoka's original proof is that the Iitaka dimension of ${\mathfrak{m}}O_{{\mathfrak{m}}X}( {\mathfrak{m}}D)$ is at most $1$. \begin{Thm}\label{iitakadim} If ${\mathfrak{m}}O_{{\mathfrak{m}}X}( {\mathfrak{m}}D)$ is a subsheaf of $\Omega^1_{{\mathfrak{m}}X}$ of a projective Deligne-Mumford stack ${\mathfrak{m}}X$, then $h^0({\mathfrak{m}}X, {\mathfrak{m}}O_{{\mathfrak{m}}X}(n {\mathfrak{m}}D)) \leq c n$ for some positive constant $c$ and $n>>0$. \end{Thm} The proof of Theorem \ref{iitakadim} is very similar to that of \cite[Theorem 2'']{mi77}. Two main ingredients are needed in the proof of \cite[Theorem 2'']{mi77}: (1) the Riemann-Roch formula, and (2) a lemma due to de Franchis. The de Franchis lemma states that any global holomorphic differential form on a K\"{a}hler manifold or a surface is $d$-closed \cite[Lemma 9]{mi77}. This lemma follws essentially from Stoke's theorem. The argument still works for smooth K\"{a}hler Deligne-Mumford stacks (K\"{a}hler orbifolds) or smooth surface Deligne-Mumford stacks. The Riemann-Roch for stacks is proved in \cite{toen_rr}. One can prove the following result using Theorem \ref{iitakadim}. \begin{proposition}\label{keyprop} Let $ {\mathfrak{m}}F \subset \Omega ^1_{{\mathfrak{m}}X}$ be a locally free sheaf of rank 2 and assume that $det({\mathfrak{m}}F) ^{\otimes n}$ is generated by global sections for some $n>0$. If ${\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D)$ has a non-trivial section, then \begin{equation*} {\mathfrak{m}}D \cdot det({\mathfrak{m}}F) \leq max \{ c_2({\mathfrak{m}}F),0\}. \end{equation*} \end{proposition} \begin{proof} Consider $p: {\mathfrak{m}}V={{\mathfrak{m}}athbb{P}}({\mathfrak{m}}F) \to {\mathfrak{m}}X$. The canonical isomorphism gives us $$H^0({\mathfrak{m}}X, {\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D))=H^0({\mathfrak{m}}V, {\mathfrak{m}}O_{{\mathfrak{m}}V}({\mathfrak{m}}H -p^* {\mathfrak{m}}D)).$$ If ${\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D)$ has a non-trivial section, then $|{\mathfrak{m}}H -p^* {\mathfrak{m}}D|$ is non-empty. Pick ${\mathfrak{m}}W \in |{\mathfrak{m}}H -p^* {\mathfrak{m}}D|$. Decompose ${\mathfrak{m}}W$ as ${\mathfrak{m}}W= {\mathfrak{m}}W_0+ p^* {\mathfrak{m}}D'$ where ${\mathfrak{m}}W_0$ is effective and irreducible which is linearly equivalent to ${\mathfrak{m}}H- p^*({\mathfrak{m}}D+{\mathfrak{m}}D')$ and ${\mathfrak{m}}D'$ is effective. Note that $(det {\mathfrak{m}}F)^{\otimes n}$ is generated by global sections, so the intersection number ${\mathfrak{m}}D' \cdot det ({\mathfrak{m}}F) {{\mathfrak{m}}athfrak{g}}eq 0$. It follows that ${\mathfrak{m}}D \cdot det ({\mathfrak{m}}F) \leq ({\mathfrak{m}}D + {\mathfrak{m}}D') \cdot det ({\mathfrak{m}}F)$ and it suffices to prove $({\mathfrak{m}}D+{\mathfrak{m}}D') \cdot det ({\mathfrak{m}}F) \leq max \{c_2({\mathfrak{m}}F),0\}$. Set ${\mathfrak{m}}D''= {\mathfrak{m}}D+{\mathfrak{m}}D'$ to simplify notation. By Lemma \ref{fundamentallemma}, ${\mathfrak{m}}D'' \cdot det({\mathfrak{m}}F) \leq c_2({\mathfrak{m}}F) + {\mathfrak{m}}D'' \cdot {\mathfrak{m}}D''$. Observe that ${\mathfrak{m}}O_{{\mathfrak{m}}X}({\mathfrak{m}}D'')$ is a subsheaf of $\Omega^1_{{\mathfrak{m}}X}$. Indeed, the effectiveness of ${\mathfrak{m}}W_{0}$ ensures the existence of a non-trivial section of ${\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D'')$, i.e. an injection $ {\mathfrak{m}}O_{{\mathfrak{m}}X} \hookrightarrow {\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D'')$. Twisting by ${\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D'')$, embeds ${\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D'')$ into ${\mathfrak{m}}F \subset \Omega^{1}_{{\mathfrak{m}}X}$. By Theorem \ref{iitakadim}, ${\mathfrak{m}}D''$ has Iitaka dimension at most $1$. It follows that ${\mathfrak{m}}D'' \cdot det({\mathfrak{m}}F) \leq 0$ or ${\mathfrak{m}}D'' \cdot {\mathfrak{m}}D'' \leq 0$. \footnote{Arguing as in \cite[Lemma 10]{mi77}.} This completes the proof. \end{proof} Assuming $c_2({\mathfrak{m}}F)$ is positive for the time being, we can obtain an upper bound on $c_2$ provided the sheaf ${\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D)$ has no sections. This can then be used to derive a contradiction. To be more precise, one needs a modified version of Proposition \ref{keyprop}, in which the condition on the sheaf ${\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D)$ having a section is replaced by the condition that some symmetric power $S^m {\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D)$ having a section. \begin{Thm}\label{boundonc2} Let $ {\mathfrak{m}}F \subset \Omega ^1_{{\mathfrak{m}}X}$ be a locally free sheaf of rank 2 and assume that $det({\mathfrak{m}}F) ^{\otimes n}$ is generated by global sections for some $n>0$. If $S^m {\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D)$ has a non-trivial section, then \begin{equation*} {\mathfrak{m}}D \cdot det({\mathfrak{m}}F) \leq max \{ mc_2({\mathfrak{m}}F),0\}. \end{equation*} \end{Thm} The proof of Theorem \ref{boundonc2} follows from Proposition \ref{keyprop} and the following easy lemma (which is analogous to \cite[Lemma 11]{mi77}). \begin{lemma}\label{basechange} Let $p: {\mathfrak{m}}V= {{\mathfrak{m}}athbb{P}}({\mathfrak{m}}F) \to {\mathfrak{m}}X$ be the projective bundle of a locally free sheaf of rank $2$. Let ${\mathfrak{m}}W \in | m{\mathfrak{m}}H- p^* {\mathfrak{m}}D|$. Then there is a surjective morphism $\beta: {\mathfrak{m}}X' \to {\mathfrak{m}}X$ such that $\beta ^*{\mathfrak{m}}W$ is decomposed to ${\mathfrak{m}}W_1+ \cdots {\mathfrak{m}}W_m$ where ${\mathfrak{m}}W_i$ is an effective divisor linear equivalent to ${\mathfrak{m}}H' -p^* {\mathfrak{m}}D_i$. \end{lemma} \begin{proof}[Proof of Theorem \ref{boundonc2}] The following argument is taken from \cite[Theorem 3]{mi77}. Let $f$ be a global section of $S^m {\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(- {\mathfrak{m}}D)$. Lemma \ref{basechange} imples that after a suitable cover $\beta:{\mathfrak{m}}Y \to {\mathfrak{m}}X$, we can decompose $\beta ^* f$ can be written as $f_1 f_2 \cdots f_m \in H^0({\mathfrak{m}}Y, S^m \beta^{*} {\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}Y}( -\beta ^* {\mathfrak{m}}D))$, where $f_i \in H^0({\mathfrak{m}}Y, \beta^{*} {\mathfrak{m}}F \otimes {\mathfrak{m}}O_{{\mathfrak{m}}Y}( -\beta ^*{\mathfrak{m}}D_{i}))$ and $(det \beta^* {\mathfrak{m}}F)^{\otimes m} \cong (\beta^* det ( {\mathfrak{m}}F)^{\otimes m})$ is generated by global sections. From Proposition \ref{keyprop}, it follows that $\beta^* {\mathfrak{m}}D_i \cdot (det( \beta ^*{\mathfrak{m}}F)) \leq max \{ c_2(\beta^* {\mathfrak{m}}F),0\}$. Summing over all $i$'s, we have $\beta ^*{\mathfrak{m}}D \cdot det(\beta^* {\mathfrak{m}}F) \leq max \{m c_2(\beta^*{\mathfrak{m}}F),0\}.$ Let $d$ be the mapping degree of $\beta$. Clearly, $\beta^* {\mathfrak{m}}D \cdot det( \beta ^*{\mathfrak{m}}F)=d D \cdot det({\mathfrak{m}}F)$ and $c_2( \beta^* {\mathfrak{m}}F)=d \beta ^*c_2({\mathfrak{m}}F)$. \end{proof} We now come to (\ref{BMY_stack}). \begin{Thm}\label{mainthm} Let ${\mathfrak{m}}X$ be a non-singular Deligne-Mumfors stack with the projective coarse space $X$ of general type and $c_1({\mathfrak{m}}X)$ nef. Then $c_1^2({\mathfrak{m}}X) \leq 3 c_2({\mathfrak{m}}X)$ holds. \end{Thm} \begin{proof} As in \cite{mi77}, we consider two cases: (1) $c_1^2({\mathfrak{m}}X) \leq 2 c_2({\mathfrak{m}}X)$ and (2) $c_1^2({\mathfrak{m}}X) >2 c_2({\mathfrak{m}}X)$. The first case is obvious. For the second case, set $\alpha := \frac{c_2({\mathfrak{m}}X)}{c_1^2({\mathfrak{m}}X)}$. Note that $\alpha <1/2$. Pick $\delta >0$ sufficiently small and rational. By Theorem \ref{boundonc2} applied to ${\mathfrak{m}}D=m(\alpha+\delta)K_\mathcal{X}$, ${\mathfrak{m}}F=\Omega_\mathcal{X}^1$, we can find a positive integer $m$ such that $m (\alpha + \delta) \in {\mathfrak{m}}athbb{Z}$, and $$ h^0( {\mathfrak{m}}X, S^m \Omega_{{\mathfrak{m}}X}^1 \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(-m(\alpha + \delta) K_{{\mathfrak{m}}X}))=0.$$ By Serre duality for smooth projective Deligne-Mumford stacks \cite[Theorem 2.22]{ni08}, we have $$h^2( {\mathfrak{m}}X, S^m \Omega_{{\mathfrak{m}}X}^1 \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(-m(\alpha + \delta) K_{{\mathfrak{m}}X}))=h^0( {\mathfrak{m}}X, S^m \Omega_{{\mathfrak{m}}X}^1 \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(-m(1-\alpha - \delta) K_{{\mathfrak{m}}X})\otimes K_{{\mathfrak{m}}X}).$$ As $\alpha < 1/2$ and $\delta$ is small, we have $1- \alpha - \delta > \alpha$. We apply Theorem \ref{boundonc2} to ${\mathfrak{m}}D=m(2-\alpha-\delta)K_\mathcal{X}$, ${\mathfrak{m}}F=\Omega_\mathcal{X}^1$, to get $$h^2( {\mathfrak{m}}X, S^m \Omega_{{\mathfrak{m}}X}^1 \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(-m(\alpha + \delta) K_{{\mathfrak{m}}X}))=0.$$ Hence $$\chi ( {\mathfrak{m}}X, S^m \Omega_{{\mathfrak{m}}X} \otimes {\mathfrak{m}}O (-m (\alpha+ \delta) K_{{\mathfrak{m}}X}))= -h^1( {\mathfrak{m}}X, S^m \Omega_{{\mathfrak{m}}X}^1 \otimes {\mathfrak{m}}O_{{\mathfrak{m}}X}(-m(\alpha + \delta) K_{{\mathfrak{m}}X})) \leq 0.$$ Note that to compute the cohomology groups of a (subsheaf of) symmetric power of a vector bundle, one can work on the the projectivized vector bundle and computing the cohomology groups of relevant line bundles. Thus $$0{{\mathfrak{m}}athfrak{g}}eq \chi ( {\mathfrak{m}}X, S^m \Omega_{{\mathfrak{m}}X} \otimes {\mathfrak{m}}O (-m (\alpha+ \delta) K_{{\mathfrak{m}}X}))= \chi ( {\mathfrak{m}}V, {\mathfrak{m}}O_{{\mathfrak{m}}V} (-m ({\mathfrak{m}}H-(\alpha+ \delta) {\mathfrak{p}}i^* K_{{\mathfrak{m}}X})).$$ By Riemann-Roch for stacks \cite{toen_rr}, we have $\chi ( {\mathfrak{m}}V, {\mathfrak{m}}O_{{\mathfrak{m}}V} (-m ({\mathfrak{m}}H-(\alpha+ \delta) {\mathfrak{p}}i^* K_{{\mathfrak{m}}X}))$ grows like $\frac{1}{6} ({\mathfrak{m}}H-(\alpha+ \delta) {\mathfrak{p}}i^* K_{{\mathfrak{m}}X})^3 m^3$ as $m\to \infty$. It implies that $({\mathfrak{m}}H-(\alpha+ \delta) {\mathfrak{p}}i^* K_{{\mathfrak{m}}X})^3 \leq0$. Taking $\delta $ to $0$, we obtain \begin{equation*} \begin{split} 0 {{\mathfrak{m}}athfrak{g}}eq ({\mathfrak{m}}H-\alpha {\mathfrak{p}}i^* K_{{\mathfrak{m}}X})^3 =&c_1^2({\mathfrak{m}}X)-c_2({\mathfrak{m}}X) -3 \alpha c_1^2({\mathfrak{m}}X)+ 3 \alpha ^2 c_1^2({\mathfrak{m}}X) \\ =&(1-\alpha -3 \alpha + 3 \alpha ^2) c_1 ^2({\mathfrak{m}}X)\\ =&(1-\alpha)(1-3\alpha)c_1^2({\mathfrak{m}}X). \end{split} \end{equation*} Since $\alpha <1/2$ and $c_1 ^2({\mathfrak{m}}X)$ is non-negative, we get $1- 3 \alpha \leq 0$ as desired. \end{proof} \section{Examples of (\ref{BMY_stack})}\label{sec:examples} \subsection{General discussion} According to the main result of \cite{gs}, the map $\mathcal{X}\to X$ from the stack $\mathcal{X}$ to its coarse moduli space $X$ (which we assume to be a variety with quotient singularities) can be factored as $$\mathcal{X}\to \mathcal{X}_1\to \mathcal{X}_2\to X,$$ where \begin{enumerate} \item $\mathcal{X}_1$ has trivial generic stabilizers; \item $\mathcal{X}_2$ is the canonical stack associated to the variety $X$ (see e.g. \cite[Definition 4.4]{fmn}) and has stack structures in codimension at least $2$; \item $\mathcal{X}\to \mathcal{X}_1$ is a {\em gerbe}; \item $\mathcal{X}_1\to \mathcal{X}_2$ is a composition of {\em root constructions} along divisors, thus introducing codimension-$1$ stack structures to $\mathcal{X}_2$. \end{enumerate} Since $\mathcal{X}\to \mathcal{X}_1$ is a gerbe, the tangent bundle of $\mathcal{X}_1$ pulls back to the tangent bundle of $\mathcal{X}$. So the inequality (\ref{BMY_stack}) for $\mathcal{X}$ is equivalent to (\ref{BMY_stack}) for $\mathcal{X}_1$. Therefore when considering examples, we may restrict our attention to $\mathcal{X}$ whose stack structures are in codimension at least $1$. In the rest of this section we present two examples of (\ref{BMY_stack}): the example in Section \ref{codim1} is obtained by root constructions, and the examples in Section \ref{codim2} are canonical stacks associated to quotient varieties. In these examples we show that (\ref{BMY_stack}) coincides with previous results. \subsection{Codimension $1$ stack structure}\label{codim1} We consider (\ref{BMY_stack}) for an example of stack $\mathcal{X}$ with stack structures in codimension $1$. Let $X$ be a smooth complex projective surface and $D$ a simple normal crossing ${\mathfrak{m}}athbb{Q}$-divisor of the form $D=\sum_i (1-1/r_i)D_i$ with $r_i{{\mathfrak{m}}athfrak{g}}eq 2$ integers. Let $\mathcal{X}$ be the natural stack cover of the pair $(X, D)$. By construction the coarse moduli space of $\mathcal{X}$ is $X$. The natural map ${\mathfrak{p}}i: \mathcal{X}\to X$ is an isomorphism outside ${\mathfrak{p}}i^{-1}(\text{Supp}\,D)$, which is where $\mathcal{X}$ has non-trivial stack structures. The stack $\mathcal{X}$ can be constructed from $X$ by applying root constructions along components of $D$. Furthermore we have the following formula for the canonical bundle: \begin{equation}\label{K-formula} K_\mathcal{X}={\mathfrak{p}}i^*(K_X+D). \end{equation} We now examine (\ref{BMY_stack}) for this $\mathcal{X}$. By (\ref{K-formula}), $$c_1(T_\mathcal{X})^2=c_1(K_\mathcal{X})^2=(K_X+D)^2.$$ By Gauss-Bonnet theorem for Deligne-Mumford stacks \cite[Corollaire 3.44]{toen} we have $$c_2(T_\mathcal{X})=\chi(\mathcal{X}),$$ the Euler characteristic of $\mathcal{X}$ as defined in \cite[Definition 3.43]{toen} (note that the notation $\chi^{orb}$ is used in \cite{toen}). Put $$\mathcal{D}_i:={\mathfrak{p}}i^{-1}(D_i), \quad \mathcal{D}_i^\circ:=\mathcal{D}_i\smallsetminusus (\cup_{j\neq i}(\mathcal{D}_i\cap \mathcal{D}_j)).$$ Then we have \begin{equation*} \chi(\mathcal{X}\smallsetminusus {\mathfrak{p}}i^{-1}(\text{Supp}\,D))=\chi(\mathcal{X})-\sum_i \chi(\mathcal{D}_i^\circ)-\sum_{p\in \mathcal{D}_i\cap \mathcal{D}_j}\chi(p). \end{equation*} Similarly, put $D_i^\circ=D_i\smallsetminusus (\cup_{j\neq i} (D_i\cap D_j))$, we have \begin{equation*} \chi(X\smallsetminusus \text{Supp}\,D)=\chi(X)-\sum_i\chi(D_i^\circ)-\sum_{\bar{p}\in D_i\cap D_j} \chi(\bar{p}). \end{equation*} Since $\mathcal{X}\smallsetminusus{\mathfrak{p}}i^{-1}(\text{Supp}\,D)\simeq X\smallsetminusus \text{Supp}\,D$, we have $\chi(\mathcal{X}\smallsetminusus {\mathfrak{p}}i^{-1}(\text{Supp}\,D))=\chi(X\smallsetminusus \text{Supp}\,D)$. Equivalently, \begin{equation*} \chi(\mathcal{X})=\chi(X)-\sum_i\chi(D_i^\circ)-\sum_{\bar{p}\in D_i\cap D_j} \chi(\bar{p})+ \sum_i \chi(\mathcal{D}_i^\circ)+\sum_{p\in \mathcal{D}_i\cap \mathcal{D}_j}\chi(p). \end{equation*} Since the map $\mathcal{D}_i^\circ\to D_i^\circ$ is of degree $1/r_i$ and the map $\mathcal{D}_i\cap\mathcal{D}_j\to D_i\cap D_j$ is of degree $1/r_ir_j$, we have $$\chi(\mathcal{D}_i)=\frac{1}{r_i}\chi(D_i), \quad \chi(\mathcal{D}_i\cap \mathcal{D}_j)=\frac{1}{r_ir_j}\chi(D_i\cap D_j).$$ This implies that \begin{equation}\label{eulerstackX} \chi(\mathcal{X})=\chi(X)-\sum_i(1-1/r_i)\chi(D_i^\circ)+\sum_{\bar{p}\in D_i\cap D_j}(1/r_ir_j-1). \end{equation} By \cite[Theorem 8.7]{l2}, for $\bar{p}\in D_i\cap D_j$ the local orbifold Euler number of the pair $(X,D)$ at $\bar{p}$ is given by $e_{orb}(\bar{p}; X, D)=1/r_ir_j$. Together with (\ref{eulerstackX}) this implies that $\chi(\mathcal{X})$ coincides with the orbifold Euler number $e_{orb}(X,D)$ of the pair $(X,D)$, as defined in \cite{l2}. Thus if $K_\mathcal{X}$ is numerically effective, then (\ref{BMY_stack}) is equivalent to \cite[Theorem 0.1]{l2} applied to the pair $(X,D)$. \subsection{Codimension $2$ stack structure}\label{codim2} Let $\mathcal{X}$ be a smooth proper Deligne-Mumford ${\mathfrak{m}}athbb{C}$-stack of dimension $2$ with isolated stack structures. Let ${\mathfrak{p}}i: \mathcal{X}\to X$ be the natural map to the coarse moduli space $X$. Let $p_1, p_2,...,p_k\in \mathcal{X}$ be the stacky points. Suppose that $\mathcal{X}$ is Gorenstein, i. e. each stacky point $p_i$ has a neighborhood $p_i\in U_i\subset \mathcal{X}$ of the form $U_i\simeq [{\mathfrak{m}}athbb{C}^2/G_i]$ with $G_i\subset SU(2)$ a finite subgroup, identifying $p_i$ with $[0/G_i]\in [{\mathfrak{m}}athbb{C}^2/G_i]$. It is a standard fact that the coarse moduli space $X$ is a projective surface with canonical singularities. Suppose further that $K_\mathcal{X}$ is numerically effective. We consider (\ref{BMY_stack}) for such $\mathcal{X}$. By assumption we have $K_\mathcal{X}={\mathfrak{p}}i^*K_X$. Thus $$c_1(T_\mathcal{X})^2=c_1(K_\mathcal{X})^2=c_1(K_X)^2.$$ We now consider the term $c_2(T_\mathcal{X})$. The first step is to consider $\chi(\mathcal{O}_\mathcal{X})$ by using Riemann-Roch theorem for stacks \cite{toen, toen_rr}. We follow \cite[Appendix A]{t} for the presentation of the Riemann-Roch theorem. We have $$\chi(\mathcal{O}_\mathcal{X})=\int_{I\mathcal{X}}\widetilde{ch}(\mathcal{O}_\mathcal{X})\widetilde{Td}(T_\mathcal{X}).$$ Here $I\mathcal{X}$ is the inertia stack of $\mathcal{X}$. By our assumption on $\mathcal{X}$, we have the following description of $I\mathcal{X}$: $$I\mathcal{X}=\mathcal{X}\cup \bigcup_{i=1}^k (Ip_i \smallsetminusus p_i).$$ Here the term $Ip_i\smallsetminusus p_i$ is the inertia stack of $p_i\simeq BG_i$ with the main component removed, namely $$Ip_i\smallsetminusus p_i\simeq \bigcup_{(g)\neq (1): \text{conjugacy class of } G_i} BC_{G_i}(g).$$ By the definition of the Chern character $\widetilde{ch}$, we have $\widetilde{ch}(\mathcal{O}_\mathcal{X})=1$ on every component of $I\mathcal{X}$. Hence \begin{equation}\label{RR-sum} \chi(\mathcal{O}_\mathcal{X})=\int_{I\mathcal{X}}\widetilde{Td}(T_\mathcal{X})=\int_\mathcal{X}\widetilde{Td}(T_\mathcal{X})|_\mathcal{X}+\sum_{i=1}^k\int_{Ip_i\smallsetminusus p_i} \widetilde{Td}(T_\mathcal{X})|_{Ip_i\smallsetminusus p_i}. \end{equation} Note that $\widetilde{Td}(T_\mathcal{X})|_\mathcal{X}=Td(T_\mathcal{X})$, and we only need its degree $2$ component. Hence \begin{equation}\label{RR-main-term} \int_\mathcal{X}\widetilde{Td}(T_\mathcal{X})|_\mathcal{X}=\frac{1}{12}\int_\mathcal{X} (c_2(T_\mathcal{X})+c_1(T_\mathcal{X})^2). \end{equation} The contribution coming from $Ip_i\smallsetminusus p_i$ can be also evaluated. \begin{lemma}\label{RR-computation} Let $E_i$ be the exceptional divisor of the minimal resolution of ${\mathfrak{m}}athbb{C}^2/G_i$. Then $$\int_{Ip_i\smallsetminusus p_i} \widetilde{Td}(T_\mathcal{X})|_{Ip_i\smallsetminusus p_i}=\frac{1}{12}(\chi(E_i)-\frac{1}{|G_i|}).$$ \end{lemma} An elementary proof of this Lemma is given in the Appendix. Next, we reinterpret the term $\chi(\mathcal{O}_\mathcal{X})$. By definition, $\chi(\mathcal{O}_\mathcal{X}):=\sum_{l{{\mathfrak{m}}athfrak{g}}eq 0}(-1)^l \text{dim}\, H^l(\mathcal{X}, \mathcal{O}_\mathcal{X})$. Since ${\mathfrak{p}}i_*\mathcal{O}_\mathcal{X}=\mathcal{O}_X$ (see e.g. \cite[Theorem 2.2.1]{av}), we have $H^l(\mathcal{X}, \mathcal{O}_\mathcal{X})=H^l(X, \mathcal{O}_X)$ and \begin{equation}\label{euler-equality} \chi(\mathcal{O}_\mathcal{X})=\chi(\mathcal{O}_X). \end{equation} Combining (\ref{RR-sum}), (\ref{RR-main-term}), (\ref{euler-equality}), and Lemma \ref{RR-computation}, we obtain the following expression of $c_2(T_\mathcal{X})$: \begin{equation}\label{2nd_chern} \int_\mathcal{X} c_2(T_\mathcal{X})=12\chi(\mathcal{O}_X)-\int_\mathcal{X} c_1(T_\mathcal{X})^2-\sum_{i=1}^k(\chi(E_i)-1/|G_i|). \end{equation} Using this, we see that in the present situation, (\ref{BMY_stack}) is equivalent to \begin{equation}\label{BMY_stack_codim2} 12\chi(\mathcal{O}_X){{\mathfrak{m}}athfrak{g}}eq \frac{4}{3}c_1(K_X)^2+\sum_{i=1}^k(\chi(E_i)-\frac{1}{|G_i|}). \end{equation} On the other hand, it is clear that (\ref{BMY_stack_codim2}) is a special case of \cite[Corollary 1.3]{m}. \appendix \section{Proof of Lemma \ref{RR-computation}} In this Appendix we prove Lemma \ref{RR-computation}. By our assumption on $\mathcal{X}$, for $g\in G_i$, the $g$-action on the tangent space $T_{p_i}\mathcal{X}$ has two eigenvalues $\xi_g$ and $\xi_g^{-1}$, where $\xi_g$ is a certain root of unity. By the definition of $\widetilde{Td}(T_\mathcal{X})$ we have \begin{equation}\label{twisted_sector_contribution} \int_{Ip_i\smallsetminusus p_i} \widetilde{Td}(T_\mathcal{X})|_{Ip_i\smallsetminusus p_i}=\sum_{(g)\neq (1): \text{conjugacy class of } G_i}\frac{1}{|C_{G_i}(g)|}\frac{1}{2-\xi_g-\xi_g^{-1}}. \end{equation} We now evaluate (\ref{twisted_sector_contribution}) using the $ADE$ classification of ${\mathfrak{m}}athbb{C}^2/G_i$. \subsection{Type A} If ${\mathfrak{m}}athbb{C}^2/G_i$ is of type $A_{n-1}$, then $G_i\simeq {\mathfrak{m}}athbb{Z}_n$ and the action on ${\mathfrak{m}}athbb{C}^2$ is given as follows. If we identify ${\mathfrak{m}}athbb{Z}_n$ with the group of $n$-th roots of $1$, then an element $\xi\in {\mathfrak{m}}athbb{Z}_n$ acts on ${\mathfrak{m}}athbb{C}^2$ via the matrix $$\left(\begin{array}{cc} \xi& 0 \\ 0& \xi^{-1} \end{array}{\operatorname{rig}}ht).$$ It follows that (\ref{twisted_sector_contribution}) is given by \begin{equation}\label{cont_A} \frac{1}{n}\sum_{l=1}^{n-1}\frac{1}{2-\exp(2{\mathfrak{p}}i\sqrt{-1}l/n)-\exp(2{\mathfrak{p}}i\sqrt{-1}l/n)^{-1}}. \end{equation} By \cite[Lemma 3.3.2.1]{lie}, (\ref{cont_A}) is equal to $$\frac{n^2-1}{12n}=\frac{1}{12}(n-1/n).$$ Since the exceptional divisor of the minimal resolution of ${\mathfrak{m}}athbb{C}^2/{\mathfrak{m}}athbb{Z}_n$ is a chain of $(n-1)$ copies of ${\mathfrak{m}}athbb{CP}^1$, its Euler characteristic is $n$. This proves the Lemma in type A case. \subsection{Type D} If ${\mathfrak{m}}athbb{C}^2/G_i$ is of type $D_{n+2}$ (here $n{{\mathfrak{m}}athfrak{g}}eq 2$), then $G_i$ is isomorphic to the binary dihedral group $Dic_n$. The group $Dic_n$ is of order $4n$ and may be presented as follows: $$Dic_n=\left\langlea,x|a^{2n}=1, x^2=a^n, x^{-1}ax=a^{-1}\right\rangle.$$ The action of $Dic_n$ on ${\mathfrak{m}}athbb{C}^2$ is given as follows: \begin{equation}\label{action_typeD} a{\mathfrak{m}}apsto \left(\begin{array}{cc} \exp({\mathfrak{p}}i\sqrt{-1}/n)& 0\\ 0& \exp(-{\mathfrak{p}}i\sqrt{-1}/n)\end{array}{\operatorname{rig}}ht), \quad x{\mathfrak{m}}apsto \left(\begin{array}{cc} 0& 1\\ -1& 0\end{array}{\operatorname{rig}}ht). \end{equation} An elementary calculation shows that the conjugacy classes of $Dic_n$ and the orders of their centralizer subgroups are given as follows: \begin{equation}\label{conj_class} \begin{split} &\{1\}, \quad \{a^n\}\quad (\text{order of centralizer group}=4n)\\ &\{a^l, a^{-l}\}, 1\leq l\leq n-1,\quad (\text{order of centralizer group}=2n)\\ &\{xa,xa^3,xa^{5},...,xa^{2n-1}\}, \quad \{x, xa^2, xa^4,...,xa^{2n-2}\} \quad (\text{order of centralizer group}=4). \end{split} \end{equation} Using (\ref{action_typeD}) and (\ref{conj_class}) it is easy to identify the contribution from each conjugacy class. It follows that (\ref{twisted_sector_contribution}) is given by \begin{equation} \frac{1}{2n}\sum_{k=1}^{n-1}\frac{1}{2-\exp({\mathfrak{p}}i\sqrt{-1}k/n)-\exp({\mathfrak{p}}i\sqrt{-1}k/n)^{-1}}+\frac{1}{16n}+\frac{1}{8}+\frac{1}{8}. \end{equation} We need to evaluate the sum $\sum_{k=1}^{n-1}\frac{1}{2-\exp({\mathfrak{p}}i\sqrt{-1}k/n)-\exp({\mathfrak{p}}i\sqrt{-1}k/n)^{-1}}$. Again by \cite[Lemma 3.3.2.1]{lie}, we have \begin{equation*} \begin{split} \frac{(2n)^2-1}{12} =&\sum_{k=1}^{2n-1}\frac{1}{2-\exp(2{\mathfrak{p}}i\sqrt{-1}k/(2n))-\exp(2{\mathfrak{p}}i\sqrt{-1}k/(2n))^{-1}}\\ =&\sum_{k=1}^{n-1}\frac{1}{2-\exp({\mathfrak{p}}i\sqrt{-1}k/n)-\exp({\mathfrak{p}}i\sqrt{-1}k/n)^{-1}}+\frac{1}{4}\\ &+ \sum_{k=1}^{n-1}\frac{1}{2-\exp(2{\mathfrak{p}}i\sqrt{-1}(n+k)/(2n))-\exp(2{\mathfrak{p}}i\sqrt{-1}(n+k)/(2n))^{-1}}. \end{split} \end{equation*} Note that \begin{equation*} \begin{split} &2-\exp(2{\mathfrak{p}}i\sqrt{-1}(n+k)/(2n))-\exp(2{\mathfrak{p}}i\sqrt{-1}(n+k)/(2n))^{-1}\\ =&2+\exp({\mathfrak{p}}i\sqrt{-1}k/n)+\exp({\mathfrak{p}}i\sqrt{-1}k/n)^{-1}\\ =&2+2\cos({\mathfrak{p}}i k/n)=4\cos^2({\mathfrak{p}}i k/(2n))=4\sin^2(({\mathfrak{p}}i(k+n)/(2n));\\ &2-\exp({\mathfrak{p}}i\sqrt{-1}k/n)-\exp({\mathfrak{p}}i\sqrt{-1}k/n)^{-1}\\ =&2-2\cos({\mathfrak{p}}i k/n)=4\sin^2({\mathfrak{p}}i k/(2n)). \end{split} \end{equation*} Since $\sin({\mathfrak{p}}i (k+n)/(2n))=-\sin({\mathfrak{p}}i(k-n)/(2n))$, we see that \begin{equation*} \begin{split} &\sum_{k=1}^{n-1}\frac{1}{2-\exp({\mathfrak{p}}i\sqrt{-1}k/n)-\exp({\mathfrak{p}}i\sqrt{-1}k/n)^{-1}}\\ &= \sum_{k=1}^{n-1}\frac{1}{2-\exp(2{\mathfrak{p}}i\sqrt{-1}(n+k)/(2n))-\exp(2{\mathfrak{p}}i\sqrt{-1}(n+k)/(2n))^{-1}}, \end{split} \end{equation*} from which it follows that $$2 \sum_{k=1}^{n-1}\frac{1}{2-\exp({\mathfrak{p}}i\sqrt{-1}k/n)-\exp({\mathfrak{p}}i\sqrt{-1}k/n)^{-1}}+\frac{1}{4}=\frac{(2n)^2-1}{12}.$$ This shows that $$\sum_{k=1}^{n-1}\frac{1}{2-\exp({\mathfrak{p}}i\sqrt{-1}k/n)-\exp({\mathfrak{p}}i\sqrt{-1}k/n)^{-1}}=\frac{n^2-1}{6}$$ and (\ref{twisted_sector_contribution}) is given by $$\frac{n^2-1}{12n}+\frac{1}{16n}+\frac{1}{8}+\frac{1}{8}=\frac{1}{12}(n+3-\frac{1}{4n}).$$ Since the exceptional divisor of the minimal resolution of ${\mathfrak{m}}athbb{C}^2/Dic_n$ is a tree of ${\mathfrak{m}}athbb{CP}^1$ whose dual graph is the Dynkin diagram $D_{n+2}$, its Euler characteristic is $n+3$ and the Lemma is proved in this case. \subsection{Type E} If ${\mathfrak{m}}athbb{C}^2/G_i$ is of type $E$, then there are three possibilities: $E_6, E_7, E_8$. The group $G_i$ is isomorphic to the binary tetrahedral group (for $E_6$), the binary octahedral group (for $E_7$), or the binary icosahedral group (for $E_8$). In each case the group and its action on ${\mathfrak{m}}athbb{C}^2$ can be explicitly described, and the Lemma can be proved by computing (\ref{twisted_sector_contribution}) using this information. We work out the details for $E_6$ and leave the other two cases to the reader. In the $E_6$ case, the group $G_i$ is isomorphic to the binary tetrahedral group $2T$. This group is of order $24$ and its elements can be identified with the following quaternion numbers: $$\frac{1}{2}({\mathfrak{p}}m 1{\mathfrak{p}}m i{\mathfrak{p}}m j{\mathfrak{p}}m k), \quad {\mathfrak{p}}m i, \quad {\mathfrak{p}}m j, \quad {\mathfrak{p}}m k, \quad, {\mathfrak{p}}m 1.$$ The group $2T$ has $7$ conjugacy classes:\\\\ \begin{tabular}[b]{|c|c|c|c|c|} \hline Conjugacy Class & $(1)$ & $(-1)$ & $(i)$ & $(\frac{1}{2}(1+i+j+k))$\\ \hline Size & $1$ & $1$ & $6$ & $4$\\ \hline Conjugacy Class & $(\frac{1}{2}(1+i+j-k))$ & $(\frac{1}{2}(-1+i+j+k))$ & $(\frac{1}{2}(-1+i+j-k))$ & \\ \hline Size & $4$ & $4$ & $4$ & \\ \hline \end{tabular} The action of $2T$ on ${\mathfrak{m}}athbb{C}^2$ can be described using the following identification $$x+yi+zj+wk{\mathfrak{m}}apsto \left(\begin{array}{cc} x+yi& z+wi\\ -z+wi& x-yi\end{array}{\operatorname{rig}}ht).$$ Now it is straightforward to see that (\ref{twisted_sector_contribution}) is given by $$\frac{1}{24}\frac{1}{2-(-2)}+\frac{1}{4}\frac{1}{2-0}+\frac{1}{6}\frac{1}{2-1}+\frac{1}{6}\frac{1}{2-1}+\frac{1}{6}\frac{1}{2-(-1)}+\frac{1}{6}\frac{1}{2-(-1)}=\frac{167}{288}=\frac{1}{12}(7-\frac{1}{24}).$$ Since $7$ is the Euler characteristic of the exceptional divisor of the minimal resolution of ${\mathfrak{m}}athbb{C}^2/2T$, the result follows. \end{document}
\begin{document} \title[On the Forelli--Rudin projection theorem]{On the Forelli--Rudin projection theorem} \author{Marijan Markovi\'{c}} \begin{abstract} Motivated by the Forelli--Rudin projection theorem we give in this paper a criterion for boundedness of an integral operator on weighted Lebesgue spaces in the interval $(0,1)$. We also calculate the precise norm of this integral operator. This is the content of the first part of the paper. In the second part, as applications, we give some results concerning the Bergman projection and the Berezin transform. We derive a generalization of the Dostani\'{c} result on the norm of the Berezin transform acting on Lebesgue spaces over the unit ball in $\mathbf{C}^n$. \end{abstract} \subjclass[2010]{Primary 45P05, Secondary 47B38} \keywords{the Bergman projection, the Berezin transform, Bergman spaces} \address{ Faculty of Natural Sciences and Mathematics\endgraf University of Montenegro\endgraf Cetinjski put b.b.\endgraf 81000 Podgorica\endgraf Montenegro} \email{[email protected]} \maketitle \section{The main result and its proof} \subsection{Gauss hypergeometric functions} We firstly recall some basic facts concerning the Gauss hypergeometric functions. For $a,\, b,\, c\in\mathbf{C}$ the Gauss hypergeometric function is given by the series \begin{equation*} {_2}{F}_1(a,b;c;z) = \sum_{k=0}^\infty\frac{(a)_k(b)_k}{(c)_k}\frac {z^k}{k!} \end{equation*} for all $c$ different from zero and negative integers. Here, for any complex number $q$ the shifted factorial (the Pochhammer symbol) is \begin{equation*} (q)_\beta= \left\{ \begin{array}{ll} q(q + 1)\cdots (q + \beta - 1), & \hbox{if $\beta\ge1$,} \\ 1, & \hbox{if $\beta=0$}, \end{array} \right. \end{equation*} where $\beta$ is a non--negative integer. The above series converges at least for $|z|<1$, and for $|z|=1$ if $\Re ({c-a-b})>0$. We will need the following three identities: i) For $\Re c>\Re d >0$ there holds \begin{equation}\label{EQ.INT.TRANS.1} {_2}F_1 (a, b; c; z) = \frac {\Gamma(c)}{\Gamma(d) \Gamma(c-d)} \int_0^1 t^{d-1} (1-t)^{c-d-1}\, {_2}F_1 (a, b; d; tz)\, dt, \end{equation} where $z$ is different from $1$ and $\left|\arg(1-z)\right|<\pi$. This is known as the Euler formula. ii) The Euler transform says that \begin{equation}\label{EQ.SIMPLE.TRANSFORM} {_2}F_1 (a, b; c; z) = (1-z)^{c-a-b}\, {_2}F_1 (c-a, c- b; c, z). \end{equation} iii) Gauss proved that, if $\Re (c-a-b)>0$, then \begin{eqnarray}\label{EQ.GAUSS} {_2}{F}_1(a,b;c;1) = \frac{\Gamma(c) \Gamma(c-a-b)}{\Gamma(c-a)\Gamma(c-b)}. \end{eqnarray} For all these facts we refer to the second chapter in~\cite{AAR.BOOK.SPECIAL}, where the reader may also find all properties concerning the Gauss hypergeometric functions we need in the paper. \subsection{The main result} For $\mu>0$ we denote by $L^p_\mu(0,1)$ the space of all measurable function $\varphi(t)$ in $(0,1)$ which satisfy the condition \begin{equation*} \|\varphi \|_{p,\mu}^p = \mu \int_0^1 |\varphi (t)|^p t^{\mu-1} dt<\infty. \end{equation*} The normalized weighted measure $\mu t^{\mu - 1} dt$ we denote by $d\mu (t)$. For a parameter $\sigma>-1$ we will consider the operator $F_\sigma$ given in the following way \begin{equation*} {F}_\sigma \varphi (s)= \mu \int_0^1 (1-t)^\sigma \, {_2}F_1(\lambda,\lambda;\mu;s\, t)\, \varphi (t)\, t^{\mu-1} dt, \end{equation*} where we have denoted \begin{equation*} \lambda = (\mu + \sigma + 1)/2. \end{equation*} The operator $F_\sigma$ may be viewed as an integral operator on $L^p_\mu(0,1)$ with the kernel \begin{equation*} K_\sigma (s,t) =(1-t)^\sigma\, {_2}F_1 (\lambda, \lambda;\mu;s\, t). \end{equation*} It happens that the operator $F_\sigma$ is bounded on $L^p_\mu (0,1)$ if and only if $\sigma > 1/p -1$. This is the content of the following \begin{theorem} For $1\le p < \infty$ the operator ${F}_\sigma$ maps continuously the space $L^p_\mu(0,1)$ into itself if and only if $\sigma> 1/p -1$. Moreover, we have \begin{equation*} \|{F}_\sigma\|_{L^p_{\mu}(0,1)\rightarrow L^p_{\mu}(0,1)} = \frac {\Gamma(\mu+1)}{\Gamma^2(\lambda)}\Gamma(1/p) \Gamma(\sigma+1- 1/p) \end{equation*} for all $\sigma> 1/p -1$. \end{theorem} If $T:X\rightarrow Y$ is a linear operator from a linear space with a norm $(X,\|\cdot\|_X)$ into $(Y,\|\cdot\|_Y)$, we denote by $\|T\|_{X\rightarrow Y}$ the norm of $T$, i.e., \begin{equation*} \|T\| _ { X\rightarrow Y} \ = \sup_{\|x\|_{X}\le 1 } {\|Tx\|_Y}. \end{equation*} \subsection{Auxiliary results} The case $p=1$ of our theorem is not difficult to consider. It will be derived from \begin{lemma}\label{LE.NORM.L1} Let $\nu$ be a finite measure on $X$. Let $T$ be an integral operator which acts on $L^1 = L^1(X,\nu)$ with the non--negative kernel $K(x,y)$, i.e., let \begin{equation*} T f (x) =\int_X K(x,y)\, f(y)\, d\nu(y). \end{equation*} Then $T$ maps $L^1$ into itself if and only if \begin{equation*} \sup_{y\in X} \int_X K(x,y) \, d\nu(x)<\infty. \end{equation*} In this case we have \begin{equation*} \|T\|_{ L^1 \rightarrow L^1} = \sup_{y\in X} \int_X K(x,y) \, d\nu(x). \end{equation*} \end{lemma} \begin{proof} For all $f\in L^1$ there holds \begin{equation*}\begin{split} \| {T} f\|_1& = \int_X \left|\int_X K(x,y) \, f(y)\, d\nu(y)\right| d\nu(x) \\& \le \int_X \left\{ \int_X K (x,y)\, d\nu(x)\right\} |f(y)| \, d\nu(y) \\& \le \left\{\sup_{y\in X} \int_X K(x,y)|\, d\nu(x)\right\} \|f\|_1. \end{split}\end{equation*} If we take $f\equiv 1$, then we have the equality sign at each place above. This proves the necessary condition, and gives the norm of $T$. \end{proof} In the case $1<p<\infty$ we will use the following well known result. \begin{lemma}[The Schur test] Suppose that $(X,\nu)$ is a $\sigma$-finite measure space and $K(x,y)$ is a nonnegative measurable function on $X\times X$, and $T$ the associated integral operator \begin{equation*} Tf(x) = \int_X K(x,y)\, f(y)\, d\nu(y). \end{equation*} Let $1<p<\infty$ and $1/p + 1/q = 1$. If there exist a positive constant $C$ and a positive measurable function $f$ on $X$ such that \begin{equation*} \int_X K(x,y)\, f(y)^q\, d\nu(y)\le C\, f(x)^q. \end{equation*} for almost every $x\in X$ and \begin{equation*} \int_X K(x,y)\, f(x)^p\, d\nu(x)\le C\, f(y)^p. \end{equation*} for almost every $y\in X$, then $T$ is bounded on $L^p = L^p(X,\nu)$ and the following estimate of the norm holds \begin{equation*} \|T\|_{ L^p\rightarrow L^p} \le C. \end{equation*} \end{lemma} \subsection{The proof of the main result} We start now with the proof of our main result. Let us first discus the simple case $p=1$. By the Euler formula and the Euler transform we have \begin{equation*}\begin{split} \sup_{t\in (0,1)} \int_0^1 K_\sigma (s,t)\, d\mu (s) & \ = \sup_{t\in (0,1)} \mu (1-t)^{\sigma} \int_0^1 s^{\mu-1}\, {_2}F_1 (\lambda, \lambda;\mu; s\, t)\, ds \\&=\sup_{t\in (0,1)} \mu (1-t)^{\sigma} \mu^{-1}\, {_2}F_1(\lambda,\lambda;\mu+1;t) \\&=\sup_{t\in (0,1)} {_2}F_1 (\mu+1 - \lambda,\mu+1 - \lambda ;\mu+1;t) \\& = \frac {\Gamma (\mu+1 ) \Gamma (2\lambda -\mu-1 )}{\Gamma^2(\lambda)} = \frac {\Gamma (\mu+1 )}{\Gamma^2 (\lambda )}\Gamma (\sigma )<\infty \end{split}\end{equation*} if and only if $\sigma>0$. The last conclusion follows from \begin{lemma}\label{LE.MAX} For reals $y>0$ and $x$ the function $\, {_2}F_1(x,x;y;r)\, $ is bounded in $(0,1)$ if and only if $y>2x$ in which case we have \begin{equation*} \sup_{r\in (0,1) } {_2}F_1(x,x;y;r)\, = \, {_2}F_1(x,x;y;1)= \frac { \Gamma(y)\Gamma(y-2x) }{\Gamma^2(y-x)}. \end{equation*} \end{lemma} \begin{proof} If $y>2x$, then $\, {_2}F_1(x,x;y;r)\, $ is continuous in $[0,1]$ and increasing in $r\in(0,1)$. Therefore, we may apply the Gauss relation to obtain the maximum. In other cases we have \begin{equation*} {_2}F_1(x,x;y;r) \sim \frac {\Gamma\left(2x\right)} {\Gamma^2\left(x\right)}\log \frac 1{1-r} \quad \text{if}\quad y=2x, \end{equation*} and \begin{equation*} {_2}F_1(x,x;y;r) \sim \frac {\Gamma (y)\Gamma(2x - y)} {\Gamma^2(x)} \frac 1{(1-r)^{2x-y}} \quad \text{if}\quad y<2x. \end{equation*} For these asymptotic relations we refer to the second chapter in~\cite{AAR.BOOK.SPECIAL}. \end{proof} Now, according to Lemma~\ref{LE.NORM.L1} we conclude \begin{equation*} \|{F_\sigma}\|_{ L^1_\mu (0,1)\rightarrow L^1_\mu (0,1)}= \frac {\Gamma (\mu+1 )}{\Gamma^2 ( \lambda)}\Gamma (\sigma ) \end{equation*} for all $\sigma>0$. The rest of the proof of our main result is devoted to the case $1<p<\infty$. It has two parts. In the first one, using the Schur test, we prove that $F_\sigma$ is bounded for $\sigma> 1/p - 1$, and we obtain the estimate of the norm of ${F}_\sigma$ from above. In the second part we deliver the proof that the same number is the norm estimate from below. In this part we also obtain that the condition $\sigma> 1/p-1$ is necessary for the boundedness of $F_\sigma$. \subsubsection*{Part I} To start with this, denote $\varphi(t)=(1-t)^{- 1/{p q}}$. Assume that $\sigma> 1/p-1$. We have \begin{equation*}\begin{split} &\int_0^1 K_\sigma(s,t)\, \varphi(t)^q\, d\mu(t) \\&= \mu \int_0^1 t^{\mu-1}\, (1-t)^\sigma {_2}F_1(\lambda,\lambda;\mu;s\, t)\, \varphi(t)^q\, dt \\& = \mu\, \frac {\Gamma(\mu) \Gamma(2\lambda-\mu- 1/p)} {\Gamma(2\lambda-1/p)} \, {_2}F_1(\lambda,\lambda;2\lambda- 1/p;s) \\& = \frac {\Gamma(\mu+1) \Gamma(2\lambda-\mu- 1/p)} {\Gamma(2\lambda- 1/p)}\, (1-s)^{1/p}\, {_2}F_1(\lambda,\lambda;2\lambda- 1/p;s)\, \varphi(s)^q \\& = \frac {\Gamma(\mu+1) \Gamma(2\lambda-\mu-1/p)} {\Gamma(2\lambda-1/p)}\, {_2}F_1(\lambda- 1/p, \lambda- 1 /p;2\lambda- 1/p;s)\, \varphi(s)^q \\&\le \frac {\Gamma(\mu+1) \Gamma(2\lambda-\mu-1/p)} {\Gamma(2\lambda- 1/p)} \, \frac {\Gamma(2\lambda- 1/p) \Gamma( 1/p)}{\Gamma^2(\lambda)}\, \varphi(s)^{q} \\&= \frac {\Gamma(\mu+1)} {\Gamma^2(\lambda)}\Gamma(\sigma+1-1/p)\Gamma(1/p)\, \varphi(s)^{q}. \end{split}\end{equation*} Since $(1-t)^\sigma \varphi(t)^q = (1-t)^{(2\lambda -1/p)-\mu-1}$, at the second place we used the Euler formula~\eqref{EQ.INT.TRANS.1} for $c= 2\lambda- 1/ p$ and $d=\mu$ (observe that $c-d = \sigma+1/q> 1/ p- 1+ 1/ q =0$). At the third place we used the Euler transform~\eqref{EQ.SIMPLE.TRANSFORM}. The inequality follows by Lemma~\ref{LE.MAX}, since the function ${_2}F_1(\lambda- 1/p,\lambda-1/p;2\lambda-1/p;s)$ is increasing in $s\in (0,1)$. Similarly, one derives \begin{equation*}\begin{split} &\int_0^1 K_\sigma(s,t)\, \varphi(s)^p\, d\mu(s) \\&=\mu (1-t)^\sigma \int_0^1 s^{\mu-1}\, {_2}F_1(\lambda,\lambda;\mu;s\, t)\, \varphi(s)^p \, ds \\&=\mu (1-t)^{\sigma} \frac{\Gamma(\mu) \Gamma(1/p)} {\Gamma(\mu+1/p)} \, {_2}F_1(\lambda,\lambda;\mu+ 1/p;t) \\&= \frac{\Gamma(\mu+1) \Gamma(1/p)} {\Gamma(\mu+ 1/p)} (1-t)^{2\lambda-\mu- 1/p} \, {_2}F_1(\lambda,\lambda;\mu+ 1/p;t)\, \varphi(t)^{p} \\& = \frac{\Gamma(\mu+1) \Gamma(1/p)} {\Gamma(\mu+ 1/p)} \, {_2}F_1(\mu- \lambda+ 1/p,\mu -\lambda+ 1/p;\mu+ 1/p;t)\, \varphi(t)^{p} \\& = \frac{\Gamma(\mu+1) \Gamma(1/p)} {\Gamma(\mu+ 1/p)}\, {_2}F_1(\mu - \lambda+ 1/p,\mu - \lambda+ 1/p;\mu+ 1/p;t)\, \varphi(t)^{p} \\&\le \frac{\Gamma(\mu+1) \Gamma(1/p)} {\Gamma(\mu+1/p)} \frac {\Gamma(\mu+ 1/p) \Gamma(2\lambda - \mu -1/p)} {\Gamma^2(\lambda)}\, \varphi(t)^p \\&=\frac {\Gamma(\mu+1)}{\Gamma^2(\lambda)}\Gamma(1/p) \Gamma(\sigma+1-1/p)\, \varphi(t)^p. \end{split}\end{equation*} By the Schur test we finally obtain \begin{equation*} \|{F}_\sigma\|_ {L^p_\mu(0,1)\rightarrow L^p_\mu (0,1)}\le \frac {\Gamma(\mu +1)}{\Gamma^2(\lambda)}\Gamma(1/p) \Gamma(\sigma+1- 1/p) \end{equation*} for every $\sigma> 1/p -1$. \subsubsection*{Part II} As we have said, the aim of the second part is to establish the norm estimate of $F_\sigma\, (1< p<\infty,\, \sigma>1/p-1)$ from below. The following four observations will be useful in that approach. i) If $H(t)=C\, t^{ \theta/ p} (1-t)^{{\tilde \theta}/p}$, where $C$ is a positive constant, then $H \in L^p_\mu(0,1),\, 1<p<\infty$ and $\|H \|_{p,\mu} =1$ if and only if $\theta>-\mu,\, \tilde{\theta}> -1$, and \begin{equation*} C = \mu^{- 1/p} \mathrm{B}(\theta+\mu,\tilde{\theta}+1)^{-1/p}. \end{equation*} Indeed, this follows from the simple computation \begin{equation*}\begin{split} 1 & = \|H\|_{p,\mu}^p =\mu \int_0^1 H(t)^p t^{\mu -1} dt = \mu C^p \int_0^1 t^{\theta+\mu-1} (1-t)^{\tilde{\theta}} dt \\&= \mu C^p \mathrm{B}(\theta+\mu ,\tilde{\theta}+1). \end{split}\end{equation*} ii) There holds the identity \begin{equation}\label{LE.INT.TRANS.2} \int_0^1 t^{c-1} (1-t)^{d-1}\, {_2}F_1 (a, b; c; t)\, dt = \frac{\Gamma (c) \Gamma(d) \Gamma(c+d-a-b)}{\Gamma(c+d-a) \Gamma(c+d-b)} \end{equation} for $\Re c>0,\, \Re d>0$ and $\Re(c+d-a-b)>0$. We refer to~\cite{LIU.ZHOU.IEOT} for a proof. iii) Let $l>0$ and let $G$ be any function defined in an interval $(0,l)$ with positive values. For every $1<p<\infty$ we have \begin{equation}\label{LE.LIMSUP} \limsup_{(\zeta,\eta)\rightarrow (0,0)} \frac{G(\eta)^{-1/p} G(( {\zeta+\eta})/p)} {G({\zeta}/({p-1}))^{1-1/p}}\ge 1. \end{equation} It is enough to note that if we set $\eta=\zeta/({p-1})$, then we have $(\zeta+\eta)/p =\zeta/ (p-1) =\eta$, and therefore \begin{equation*} \frac{G(\eta)^{- 1/p} G(( {\zeta+\eta})/ p )} {G({\zeta}/({p-1}))^{1- 1/p}} = \frac{G(\eta)^{- 1/p} G(\eta)} {G(\eta)^{1- 1/p}}= 1, \end{equation*} what immediately implies the statement of this lemma. iv) If $L^p=L^p(X,\nu)$ is a Lebesgue space, recall that for an operator $T:L^p \rightarrow L^p\, (1< p<\infty)$ we have \begin{equation*}\begin{split} &\|T\|_{ L^p\rightarrow L^p} \\&= \sup \left\{ \left|\int _X T\Phi(x)\, \overline{\Psi(x)}\, d\nu(x)\right|: f\in\Phi \in L^p,\, \Psi\in L^q,\, \|\Phi\|_p=\|\Psi\|_q=1\right\}, \end{split}\end{equation*} where $q$ is conjugate to $p$, i.e., $q= { p}/({p-1})$. Therefore, for our operator $F_\sigma$ we will calculate \begin{equation*} \int _0^1 F_\sigma \Phi(s)\, \overline{\Psi(s)}\, d\mu (s) \end{equation*} for appropriate $\Phi (s)\in L^p_\mu(0,1)$ and $\Psi (t)\in L^q_\mu (0,1)$. First of all, using the Fubini theorem we obtain \begin{equation*}\begin{split} &\int _0^1 F_\sigma \Phi(s)\, \overline {\Psi(s)}\, d\mu(s) \\& = \mu^2 \int_0^1 s^{\mu-1} \left\{\int_0^1 t^{\mu-1} (1-t)^{\sigma}\, {_2}F_1(\lambda,\lambda ;\mu;s\, t)\, \Phi (t)\, dt\right\} \overline{\Psi (s)}\, ds \\&=\mu^2 \int_0^1 t^{\mu-1} (1-t)^{\sigma} \left\{\int_0^1 s^{\mu-1}\, {_2}F_1(\lambda,\lambda;\mu;s\, t)\, \overline{\Psi (s)}\, ds\right\} \Phi (t)\, dt. \end{split}\end{equation*} In the preceding relation we will take for $\Phi(t)$ and $\Psi(s)$ the functions of the following form \begin{eqnarray*} \Phi(t) = C \, t^{ \theta/ p} (1-t)^{{\tilde{\theta}}/p},\quad \Psi (s) = \tilde{C}\, s^{ \vartheta/ q} (1-s)^{{\tilde{\vartheta}}/q}. \end{eqnarray*} We must have $\theta,\, \vartheta>-\mu$, and $\tilde{\theta},\, \tilde{\vartheta}>-1$, as well as \begin{equation*} {C}^p =\mu^{-1} \mathrm{B}(\theta+\mu,\tilde{\theta}+1)^{-1}, \quad \tilde{C}^q = \mu^{-1} \mathrm{B}(\vartheta+\mu,\tilde{\vartheta}+1)^{-1}. \end{equation*} In the sequel we will chose $\theta,\, \tilde{\theta}$ and $\vartheta,\, \tilde{\vartheta}$ in the way that it makes simpler the calculation of integrals in the expression for $\int _0^1 F_\sigma \Phi(s) \overline{\Psi(s)}\, d\mu(s)$. Introducing the preceding type of functions with $\vartheta=0$ we obtain \begin{equation*}\begin{split} &\int_0^1 s^{\mu-1}\, {_2}F_1(\lambda,\lambda;\mu;s\, t)\, \overline{\Psi (s)}\, ds \\&=\tilde{C}\, \int_0^1 s^{\mu-1} (1-s)^{{\tilde{\vartheta}}/q}\, {_2}F_1(\lambda,\lambda;\mu;s\, t)\, ds \\&=\tilde{C}\, \int_0^1 s^{\mu-1} (1-s)^{({\tilde{\vartheta}}/q+\mu+1)-\mu-1} \, {_2}F_1(\lambda,\lambda;\mu;s\, t)\, ds \\& = \tilde{C}\, \frac{\Gamma(\mu) \Gamma( {\tilde{\vartheta}}/q+1)} {\Gamma({\tilde{\vartheta}}/q+\mu+1)}\, {_2}F_1(\lambda,\lambda;{\tilde{\vartheta}}/q+\mu+1;t), \end{split}\end{equation*} where we have used~\eqref{EQ.INT.TRANS.1} for $ {c}={\tilde{\vartheta}}/q+\mu+1$ and ${d}=\mu$; note ${c}-{d} = {\tilde{\vartheta}}/q+1>-{1}/ q+1 = 1/p> 0$. For the sake of simplicity in the following calculation we will take $\tilde{\vartheta} = ({\theta-p})/({p-1})$. Then we have \begin{equation*} {\tilde{\vartheta}} /q+\mu +1 = \mu + \theta/p. \end{equation*} Since we must have $\tilde{\vartheta}>-1$, it follows that $\theta>1$. Now, it remains to transform \begin{equation*}\begin{split} &\int_0^1 t^{\mu-1} (1-t)^{\sigma}\, {_2}F_1(\lambda,\lambda;{\tilde{\vartheta}}/q+\mu+1;t)\, \Phi(t)\, dt \\&=C\, \int_0^1 t^{(\mu + \theta/ p)-1} (1-t)^{({\tilde{\theta}}/p+\sigma +1)-1}\, {_2}F_1(\lambda,\lambda;\mu +\theta/p;t)\, dt \\&= C \, \frac {\Gamma(\mu+ \theta/ p ) \Gamma({\tilde{\theta}}/ p+\sigma +1) \Gamma((\theta + {\tilde{\theta}})/p)} {\Gamma^2((\theta + {\tilde{\theta}})/p + \lambda)}. \end{split}\end{equation*} We have used~\eqref{LE.INT.TRANS.2}; note that $(\mu+\theta/p) + ({\tilde{\theta}}/p+\sigma +1) - \lambda = (\theta + {\tilde{\theta}})/p>0$. All together we have \begin{equation*}\begin{split} &\int_0^1 F_\sigma \Phi(s)\, \overline{\Psi(s)}\, d\mu (s) \\&= \mu \, C \, \tilde{C}\, \frac {\Gamma(\mu+1) \Gamma(\mu+ {{\theta}}/p) \Gamma( {\tilde{\theta}}/p+ \sigma +1) \Gamma((\theta + {\tilde{\theta}})/p)}{\Gamma^2((\theta + {\tilde{\theta}})/p +\lambda)}. \end{split}\end{equation*} In the sequel we assume that $\sigma>1/p-1$ (note that in this case we have ${\tilde{\theta}}/p+\sigma +1>0$). If $\sigma\le 1/p-1$, then $F_\sigma:L^p_\mu(0,1) \rightarrow L^p_\mu(0,1)$ is not well defined. Since $\vartheta = 0 $ and $\tilde{\vartheta} =({\theta-p})/({p-1})$ we obtain \begin{equation*}\begin{split} C\, \tilde{C} &= \mu ^{- 1/p} {\mathrm B}(\theta+\mu,\tilde{\theta}+1)^{- 1/p} \mu^{- 1/q} {\mathrm B}(\vartheta+\mu,\tilde{\vartheta}+1)^{1/p-1} \\&\sim \mu^{-1} \frac {\Gamma(\tilde{\theta}+1)^{- 1/p}} {\Gamma (({\theta-1})/ ({p-1}) )^{1- 1/p}}, \end{split}\end{equation*} as ${(\theta,\tilde{\theta})\rightarrow (1,-1)}$. Now, regarding~\eqref{LE.LIMSUP} it follows \begin{equation*}\begin{split} &\limsup_{(\theta,\tilde{\theta})\rightarrow (1,-1)} \int_0^1 F_\sigma \Phi(s)\, \overline{\Psi(s)}\, d\mu (s) \\& = \frac {\Gamma (\mu+1)}{\Gamma^2(\lambda)}\Gamma( 1/p) \Gamma(-1/p+\sigma +1) \limsup_{(\theta,\tilde{\theta})\rightarrow (1,-1)} \frac {\Gamma(\tilde{\theta}+1)^{-1/p} \Gamma((\theta +{\tilde{\theta}})/p)}{\Gamma(({\theta-1})/({p-1}))^{1-1/p}} \\&= \frac { \Gamma(\mu+1)} {\Gamma^2 (\lambda )}\Gamma(1/p) \Gamma(-1/p+\sigma +1) \limsup_{(\zeta,\eta)\rightarrow (0,0)} \frac {\Gamma(\eta)^{-1/p} \Gamma( (\zeta +\eta)/ p)}{\Gamma({\zeta}/({p-1}) )^{1-1/p}} \\&\ge\frac { \Gamma(\mu+1)}{\Gamma^2 (\lambda)}\Gamma(1/p)\Gamma(-1/p+\sigma +1). \end{split}\end{equation*} Thus, we have proved \begin{equation*}\begin{split} \|F_\sigma\|_{L^p_\mu(0,1)\rightarrow L^p_\mu(0,1)}&\ge \limsup_{(\theta,\tilde{\theta})\rightarrow (1,-1)} \int _0^1 F_\sigma \Phi(s)\, \overline{\Psi(s)} \, d\mu (s) \\&\ge\frac {\Gamma(\mu+1)}{\Gamma^2 ( \lambda)}\Gamma( 1/p)\Gamma(- 1/p+\sigma +1) \end{split}\end{equation*} for $\sigma>1/p-1$. \section{The Bergman projection and the Berezin transform} \subsection{The Bergman projection} In the sequel $n$ will be a positive integer. Let \begin{equation*} \left<z,w\right>= z_1\overline{w}_1+\dots+z_n\overline{w}_n, \end{equation*} stand for the inner product in the complex $n$-dimensional space $\mathbf{C}^n$, where $z=(z_1,\dots,z_n)$ and $w=(w_1,\dots,w_n)$. The standard norm in $\mathbf{C}^n$, induced by the inner product, is denoted by $|z|=\sqrt{\left<z,z\right>}$. We denote by $B$ the unit ball $\{z\in\mathbf C^n:|z|<1\}$ in $\mathbf{C}^n$. Let $S= \partial B$ be the unit sphere. The normalized Lebesgue measure on the unit ball (sphere) is denoted by $dv\, (d\tau)$. Following the Rudin monograph~\cite{RUDIN.BOOK.BALL} as well as the Forelli and Rudin work~\cite{FORELLI.RUDIN.INDIANA}, associate with each complex number $s=\sigma+it,\, \sigma>-1$ the integral kernel \begin{equation*} K_s(z,w)=\frac{\left(1-|w|^2\right)^s}{\left(1-\left<z,w\right>\right)^{n+1+s}}, \end{equation*} and let \begin{equation*} T_s f(z) = c_s \int_B K_s(z,w)\, f(w)\, dv(w),\quad z\in B. \end{equation*} We understand $f(w)$ is a such one function in $B$ that the previous integral is well defined, and the complex power is understood to be the principal branch. The operator $T_s$ is the Bergman projection. The coefficient $c_s$ is chosen in the way that for the weighted measure in the unit ball \begin{equation*} dv_s(w)=c_s (1-|w|^2 )^s dv(w) \end{equation*} there holds $v_s(B)=1$, i.e., $T_s 1=1$. Using the polar coordinates \begin{equation*} \int_B h(z)\, dv(z) = 2n \int_0^1 r^{2n-1} dr \int_S h(r\zeta)\, d\tau(\zeta), \end{equation*} one can show that \begin{equation*} c_s^{-1} = n \mathrm {B} (s+1,n) = \frac{\Gamma(s+1) \Gamma(n+1)}{\Gamma(n+s+1)}, \end{equation*} where $\Gamma$ and $\mathrm{B}$ are Euler functions. Let $L^p(B)\, (1\le p<\infty)$ stand for the Lebesgue space of all measurable functions in the unit ball of $\mathbf{C}^n$ which modulus with the exponent $p$ is integrable. For $p=\infty$ let it be the space of all essentially bounded measurable functions. Denote by $\|\cdot\|_p$ the usual norm on $L^p(B)\, (1\le p\le \infty)$. Recall that \begin{equation*} \|f\|_p^p \ = \int_{B} |f(z)|^p\, dv(z) \end{equation*} for $f\in L^p(B)\, (1\le p<\infty)$. Forelli and Rudin~\cite{FORELLI.RUDIN.INDIANA} proved that $T_s:L^p(B) \rightarrow L^p_a = L^p\cap H(B)$, where $H(B)$ is the space of all analytic functions in the unit ball, is a bounded (and surjective) operator if and only if $\sigma > 1/p -1$, where $1\le p<\infty$. Moreover, they find $\|T_s\|_{L^1(B)\rightarrow L_a^1(B)}$ for $\sigma>0$ and $\|T_s\|_{ L^2(B)\rightarrow L^2_a(B)}$ for $\sigma>- 1/2$. It seems that the calculation of $\|T_s\|_{ L^p(B)\rightarrow L^p_a(B)}$ in other cases is not an easy problem. On the other hand, if $p=\infty$, it is known~\cite{CHOE.PAMS} that the operator $T_\sigma\, (\sigma>-1)$ projects $L^\infty(B)$ continuously onto the Bloch space $\mathcal{B}$ of the unit ball in $\mathbf{C}^n$. Recall that the Bloch space $\mathcal{B}$ contains all functions $f$ analytic in $B$ for which the semi--norm $\|f\|_{\beta} = \sup_{z\in B} \left(1-|z|^2\right) \left|\nabla f(z)\right|$ is finite. One can obtain a true norm by adding $|f(0)|$, more precisely in the following way \begin{equation*} \|f\|_\mathcal{B}=|f(0)|+\|f\|_\beta,\quad f\in\mathcal{B}. \end{equation*} The $\beta$-(semi-)norm of $T_\sigma:L^{\infty}(B)\rightarrow \mathcal{B}$ is defined by \begin{equation*} \|T_\sigma\|_\beta \ = \sup_{\|f\|_\infty\le 1}\|T_\sigma f\|_\beta. \end{equation*} In~\cite{KALAJ.MARKOVIC.MATH.SCAND} we find the (semi-)norm of $T_\sigma$ w.r.t. the $\beta$-(semi-)norm. We obtained \begin{equation*} \|T_\sigma\|_\beta = \frac{\Gamma(2\lambda+1)}{\Gamma^2(\lambda +1/2)}, \end{equation*} where we have introduced \begin{equation*} \lambda =(n+\sigma+1)/2. \end{equation*} Following the approach as in~\cite{PERALA.AASF.2013}, one can derive \begin{equation*} \|T_\sigma\|_{{L^\infty(B)\rightarrow \mathcal{B}} }= 1+\|T_\sigma\|_\beta = 1+\frac{\Gamma(2\lambda+1)} {\Gamma^2(\lambda+1 /2)}. \end{equation*} Particulary, for $\sigma=0$ and $n=1$ we put $P = T_0$ and $B=\mathbf{U}$ (then we have the original Bergman projection). Per\"{a}l\"{a} proved that \begin{equation*} \|P\|_\beta =\frac 8\pi\quad\text{and}\quad\|P\|_{L^\infty(\mathbf{U})\rightarrow\mathcal{B}} = 1 + \frac 8\pi, \end{equation*} which are the main results from~\cite{PERALA.AASF.2012} and~\cite{PERALA.AASF.2013}, respectively. For a related result we refer to~\cite{KALAJ.VUJADINOVIC.JOT}. \subsection{The maximal Bergman projection} Beside the operator $T_\sigma$ we will consider now the integral operator $\tilde{T}_\sigma\, (\sigma>-1)$ given by the kernel \begin{equation*} |K_\sigma(z,w)| = \frac{\left(1-|w|^2\right)^\sigma}{\left|1-\left<z,w\right>\right|^{2\lambda}}, \end{equation*} i.e., \begin{equation*} \tilde{T}_\sigma f(z)= c_\sigma \int_B \frac{\left(1-|w|^2\right)^\sigma}{\left|1-\left<z,w\right>\right|^{2\lambda}}\, f(w)\, dv(w),\quad z\in B. \end{equation*} It is known that $\tilde{T}_\sigma$ maps $L^p(B)\, (1 \le p<\infty)$ into itself continuously if and only if and $\sigma> 1/p-1$; see~\cite{FORELLI.RUDIN.INDIANA} where Forelli and Rudin used this operator in order to establish the continuity of $T_\sigma$. In order to connect our main result with the Forelli--Rudin result, we need to transform the integral $I_c(z)$ which appears in the first chapter of the Rudin monograph~\cite{RUDIN.BOOK.BALL}. For the proof of the next lemma see Proposition 1.4.10 in~\cite{RUDIN.BOOK.BALL}. \begin{lemma}\label{LE.I.C} For any real number $c$ introduce \begin{equation*} I_c (z) \, = \int_S \frac{d\tau(\zeta)}{\left|1-\left<z,\zeta\right>\right|^{n+c}},\quad z\in B. \end{equation*} Then \begin{equation*} I_c(z)\, =\, {_2}F_1(\tilde{\lambda},\tilde{\lambda};n;|z|^2), \end{equation*} where $\tilde{\lambda} = ( n + c)/2$. \end{lemma} The following simple observations will be useful. Let $h(w) =H(|w|^2)$ be a radially symmetric function in the unit ball, where $H(t)$ is defined in the interval $(0,1)$ and non--negative. i) Norm of $h\in L^p (B)$ is given by \begin{equation*} \|h\|_p^p\ = n \int_0^1\, s^{n-1}\, H(s)^p\, ds = \|H\|_{p,n}^p \end{equation*} for $1\le p<\infty$. Indeed, using polar coordinates we obtain \begin{equation*}\begin{split} \int_B h(z)\, dv(z)& = 2n \int_0^1 r^{2n-1}\, dr \int_S h(r\zeta)\, d\tau(\zeta) \\&= 2n \int_0^1\, r^{2n-1}\, H (r^2)\, dr =n \int_0^1\, s^{n-1}\, H (s)\, ds. \end{split}\end{equation*} ii) The function $\tilde{T}_\sigma h (z)$ is also radially symmetric, if it is defined. Moreover, \begin{equation*}\begin{split} \tilde{T}_\sigma h(z)& = c_\sigma\, n \, \int_0^1 t^{n-1} (1-t )^\sigma\, {_2}F_1(\lambda,\lambda; n;t\, |z|^2)\, H(t)\, dt \\& = c_\sigma F_\sigma H (|z|^2). \end{split}\end{equation*} To see that this relation holds, use polar coordinates and Lemma~\ref{LE.I.C} to obtain \begin{equation*}\begin{split} c_\sigma^{-1} \tilde {T}_\sigma h(z)& =2n \int_0^1 r^{2n-1}\, (1-r^2)^\sigma\, I_{2\lambda-n}(rz)\, H (r^2)\, dr \\&= n \int_0^1 s^{n-1}\, (1-s)^\sigma\, {_2}F_1(\lambda,\lambda;n;s|z|^2)\, H (s)\, ds \end{split}\end{equation*} (we introduced $s=r^2$ to obtain the last integral). Thus, the operator $\tilde{T}_\sigma : L^p(B)\rightarrow L^p(B)$ is bounded if and only if $F_\sigma: L^p_n (0,1) \rightarrow L^p_n (0,1)$ is bounded. Moreover, \begin{equation*} \|\tilde{T}_\sigma\|_{L^p(B)\rightarrow L^p(B)} = c_\sigma \|F_\sigma\|_{ L^p_n (0,1) \rightarrow L^p_n (0,1)} \end{equation*} for all $1\le p<\infty$ and $\sigma> 1/p-1$. Therefore, we have \begin{theorem}\label{TH.MAIN} The operator $\tilde{T}_\sigma$ is bounded if and only if $\sigma> 1/p-1$. Norm of $\tilde {T}_\sigma: L^p(B)\rightarrow L^p(B)\, (1\le p<\infty)$ is \begin{equation*}\begin{split} \|\tilde {T}_\sigma\|_{ L^p(B)\rightarrow L^p(B)} & =\frac{\Gamma(2\lambda)}{\Gamma^2(\lambda)\Gamma(\sigma+1)}\Gamma(1/p)\Gamma(\sigma+1-1/p) \\&= \frac{\Gamma(n+\sigma+1)} {{\Gamma^2({(n+\sigma+1)/2})}\Gamma(\sigma+1)}\Gamma(1/p)\Gamma(\sigma + 1- 1/p) \end{split}\end{equation*} for all $\sigma>1/p-1$. \end{theorem} \begin{remark} For $n=1$ Theorem~\ref{TH.MAIN} reduces to the main result in~\cite{DOSTANIC.J.ANAL.MATH}. See Theorem 1 there. See also~\cite{LIU.ZHOU.IJM}. \end{remark} The conjugate operator $\tilde{T}_\sigma^*: L^p(B)\rightarrow L^p(B)\, (1<p\le\infty)$ of $\tilde{T}_\sigma: L^q(B)\rightarrow L^q(B)\, (1\le q<\infty)$ is \begin{equation*} \tilde{T}_\sigma^*g(z) = c_\sigma \int_B \frac{\left(1-|z|^2\right)^\sigma}{\left|1-\left<z,w\right>\right|^{2\lambda}}\, g(w)\, dv(w),\quad z\in B. \end{equation*} Since \begin{equation*} \|\tilde {T}^*_\sigma\|_{ {L^p(B)\rightarrow L^p(B)} } = \|\tilde {T}_\sigma \|_{{L^q(B)\rightarrow L^q(B)}}, \end{equation*} we immediately deduce \begin{corollary}\label{EQ.NORM.CONJUGATE.TTILDA} Norm of $\tilde {T}_\sigma^*:L^p(B)\rightarrow L^p(B)\, (1<p\le \infty)$ is given by \begin{equation*}\begin{split} \|\tilde {T}^*_\sigma\|_{ L^p(B)\rightarrow L^p(B)} & =\frac { \Gamma(n+\sigma+1)} { \Gamma^2((n +\sigma + 1)/2) \Gamma(\sigma+1)} \Gamma( 1/q) \Gamma(\sigma + 1- 1/q) \\&= c_\sigma \frac{\Gamma(n+1)}{\Gamma^2( (n+\sigma +1)/2)}\Gamma(1/q)\Gamma(\sigma + 1- 1/q) \end{split}\end{equation*} for $\sigma> 1/q-1$. \end{corollary} \subsection{An estimate of the norm of $T_\sigma$} Forelli and Rudin~\cite{FORELLI.RUDIN.INDIANA} proved that \begin{equation*} \|T_\sigma\|_{ L^1(B)\rightarrow L^1_a(B)} = \frac{\Gamma(\sigma)}{\Gamma(\sigma+1)}\frac{\Gamma(2\lambda)}{\Gamma^2(\lambda)}, \quad \sigma>0. \end{equation*} Note that $\|\tilde{T}_\sigma\|_{ L^1(B)\rightarrow L^1(B)} = \|T_\sigma\|_{L^1(B)\rightarrow L^1_a (B)}$. They also proved \begin{equation*} \|T_\sigma\|_{ L^2(B)\rightarrow L^2_a(B)} = \frac{\sqrt{\Gamma(2\sigma + 1)} }{\Gamma(\sigma+1)}, \quad \sigma>-1/2. \end{equation*} Regarding theses results by the Riesz--Thorin theorem we obtain \begin{equation*}\begin{split} \|T_\sigma\|_{L^p(B)\rightarrow L^p_a (B)}& \le\left\{\frac{\Gamma(2\lambda)}{\Gamma^2(\lambda)}\frac{\Gamma(\sigma)}{\Gamma(\sigma+1)}\right\}^{2/p-1} \left\{\frac{ \sqrt{\Gamma(2\sigma + 1)} }{\Gamma(\sigma+1)}\right\}^{2-2/p} \\&= \frac {2^{1-1/p}} {\sigma^{1/p}} \left\{\frac{ \Gamma(2 \sigma)}{\Gamma^2(\sigma)}\right\}^{1-1/p} \left\{\frac{\Gamma(2\lambda)}{\Gamma^2(\lambda)} \right\}^{2/p-1} \end{split}\end{equation*} for $1\le p\le 2$ and $\sigma>0$. The estimate of $\|T_\sigma\|_{ L^p(B)\rightarrow L^p_a(B)}$ given in the following corollary, which follows from $\|T_\sigma\|_{ L^p(B)\rightarrow L^p_a (B)} \le \|\tilde{T}_\sigma\|_{ L^p(B)\rightarrow L^p(B)}$, is better in some cases. \begin{corollary}\label{CORO.NORM.ESTIMATE} \begin{equation*} \|T_\sigma\|_{ L^p(B)\rightarrow L^p_a (B)} \le \frac{\Gamma(2\lambda)} {\Gamma^2(\lambda) \Gamma(\sigma+1)}\Gamma(1/p)\Gamma(\sigma + 1-1/p) \end{equation*} for $1\le p<\infty$ and $\sigma>\ 1/p -1$. \end{corollary} Particularly, for $\sigma = 0$ we have $P=T_0$ and the norm estimate \begin{equation*} \|P\|_{ L^p(B)\rightarrow L^p_a(B)}\le \frac{\Gamma(n+1)} {\Gamma^2((n+1)/2)} \frac{\pi}{\sin(\pi/p)}, \end{equation*} where $1<p<\infty$. This estimate for $n=1$ is also obtained in~\cite{DOSTANIC.CZMJ}. \subsection{$L^p$-norm of the transform of Berezin} In the case of the unit ball the Berezin transform takes the form \begin{equation*} \mathfrak{B}f(z) =\int_B \frac { (1-|z|^2)^{n+1}} {\left|1-\left<z,w\right>\right|^{2n+2}}\, f(w)\, dv(w),\quad z\in B. \end{equation*} Berezin~\cite{BEREZIN.IZVESTIYA} introduced the notion of covariant and contravariant symbols of an operator. The Berezin transform finds applications in the study of Hankel and Toeplitz operators. An interesting result~\cite{ENGLIS.JFA.1994} says that if $u\in L^1(\mathbf{U})$, where $\mathbf{U}= \left\{z\in\mathbf{C}: |z|<1\right\}$ is the unit disc in the complex plane, then $u$ is a harmonic function in $\mathbf{U}$ if and only if $\mathfrak{B} u = u$. Observe that \begin{equation}\label{EQ.BEREZIN.TTILDA} {B} = c^{-1}_{n+1} \tilde{T}^*_{n+1}. \end{equation} \begin{corollary} Norm of the Berezin transform $\mathfrak{B}:L^p(B)\rightarrow L^p(B)$ is given by \begin{equation*} \|\mathfrak {B}\|_{ L^p(B)\rightarrow L^p(B)} =\left\{ \prod_{k=1}^n\left( 1+\frac 1 {kp}\right)\right\}\frac {\pi/p} {\sin(\pi/p)} \end{equation*} for $1<p<\infty$, and \begin{equation*} \|\mathfrak{B}\| _{ L^\infty(B)\rightarrow L^\infty(B)} =1. \end{equation*} \end{corollary} The result of this corollary is obtain in~\cite{LIU.ZHOU.IJM}, but we give a proof for the sake of completeness. \begin{proof} Let $1<p<\infty$. Since by Corollary~\ref{EQ.NORM.CONJUGATE.TTILDA} we have \begin{equation*} \|\tilde {T}^*_{n+1}\| _{L^p(B)\rightarrow L^p(B)} = c_{n+1} \frac{\Gamma(1- 1/p) \Gamma (n+1 + 1/p)}{\Gamma (n+1)}, \end{equation*} it follows by~\eqref{EQ.BEREZIN.TTILDA} that \begin{equation*}\begin{split} \|\mathfrak{B}\|_{L^p(B)\rightarrow L^p(B)} &= \frac{\Gamma(1- 1/p)\Gamma(n+1 + 1/p)}{\Gamma(n+1)} \\&=\frac{\Gamma(1- 1/p)\left\{ \prod_{k=1}^n (k + 1/p)\right\} \Gamma(1/p)}{{n!}\, p} \\&=\left\{ \prod_{k=1}^n\left( 1+\frac 1 {kp}\right)\right\}\frac {\pi/p}{\sin(\pi/p)}. \end{split}\end{equation*} We have used the Euler identity $\Gamma(x) \Gamma(1-x) = \frac {\pi}{\sin({\pi}/x)}$ for $x\in (0,1)$ in order to obtain the last expression. The case $p=\infty$ follows also from Corollary~\ref{EQ.NORM.CONJUGATE.TTILDA}. Introducing $q=1$ we obtain \begin{equation*} \|\tilde {T}^*_{n+1} \|_{ L^\infty(B)\rightarrow L^\infty(B) } = c_{n+1}, \end{equation*} what implies the result concerning the $L^{\infty}$-norm of the Berezin transform. \end{proof} \begin{corollary} \begin{equation*} \|{B}\|_{ L^2(B)\rightarrow L^2(B)} = \frac{(2n+ 1)!!}{( 2n)!!} {\frac\pi 2}. \end{equation*} \end{corollary} \begin{corollary} \begin{equation*} \|{B}\|_{ L^p(B)\rightarrow L^p(B)}\sim \frac {(n+1) \pi }{\sin(\pi/p)}\sim\frac {(n+1) \pi }{\pi - \pi/ p} \sim\frac {n+1 }{p - 1}, \quad p\rightarrow 1. \end{equation*} \end{corollary} \end{document}
\begin{document} \title{Generation of Atomic Cluster States through the Cavity Input-Output Process} \author{Jaeyoon Cho} \affiliation{Division of Optical Metrology, Korea Research Institute of Standards and Science, Daejeon 305-340, Korea} \author{Hai-Woong Lee} \affiliation{Department of Physics, Korea Advanced Institute of Science and Technology, Daejeon 305-701, Korea} \date{\today} \begin{abstract} We propose a scheme to implement a two-qubit controlled-phase gate for single atomic qubits, which works in principle with nearly ideal success probability and fidelity. Our scheme is based on the cavity input-output process and the single photon polarization measurement. We show that, even with the practical imperfections such as atomic spontaneous emission, weak atom-cavity coupling, violation of the Lamb-Dicke condition, cavity photon loss, and detection inefficiency, the proposed gate is feasible for generation of a cluster state in that it meets the scalability criterion and it operates in a conclusive manner. We demonstrate a simple and efficient process to generate a cluster state with our high probabilistic entangling gate. \end{abstract} \pacs{pacs} \maketitle \newcommand{\bra}[1]{\left<#1\right|} \newcommand{\ket}[1]{\left|#1\right>} \newcommand{\abs}[1]{\left|#1\right|} The one-way quantum computation \cite{br01,rb01,rbb03,n04,nd04} has opened up a new paradigm for constructing reliable quantum computers. In their pioneering works \cite{br01,rb01}, Raussendorf and Briegel showed that preparation of a particular entangled state, called a cluster state, accompanied with local single-qubit measurements is sufficient for simulating any arbitrary quantum logic operations. Therefore, experimental or intrinsic difficulties in performing two-qubit operations can be substituted with (possibly probabilistic) generation of an entangled state. Especially, Nielsen showed that the resource overhead of a conventional linear optics quantum computer \cite{klm01} is drastically decreased by combining it with the idea of the one-way quantum computation \cite{n04}. A cluster state can be visualized as a collection of qubits and lines connecting them. In order to generate a cluster state systematically, one first initializes each qubit in state $\ket{+}=\frac{1}{\sqrt2}(\ket{0}+\ket{1})$, where $\ket{0}$ and $\ket{1}$ are the computational basis states, and then performs controlled-phase operations between every neighboring qubits connected by the lines. In some previous works \cite{bk04,dr05,br05}, it was shown that in principle there is no threshold value of $p$ required for efficient generation of a cluster state, where $p$ is the success probability of each controlled-phase operation. For a reasonable computational overhead, however, a high success probability $p$ should be attained. In the present work, we propose a scheme to implement a two-qubit controlled-phase gate for single atomic qubits, which works in principle with nearly ideal success probability and fidelity. The proposed entangling gate is suitable for the systematic generation of a cluster state described above for two reasons. The first is that it works between two individually trapped atoms, thus it meets the scalability criterion. Since a large number of qubits should be entangled in a cluster state to perform a nontrivial quantum computation, entangling gates which work only inside a single trapping structure \cite{cz95,pgcz95,pw02,ysy03} can not be used directly for our goal. The second is that, in contrast to other scalable two-qubit gate schemes \cite{cz20,kmw02,xlgy04}, it operates in a conclusive manner even in the practical situation. Even if the success probability decreases due to the experimental imperfection, one can still detect whether the operation has succeeded or not, and in case it has succeeded, the fidelity is very high \cite{bk04,lbk05}. We demonstrate how a cluster state of an arbitrary configuration can be generated with our high probabilistic entangling gate. \begin{figure} \caption{The setup for the basic building block. A qubit is encoded in two ground levels $\ket0$ and $\ket1$ of a 3-level atom trapped in an one-sided optical cavity. The transition between states $\ket1$ and $\ket{e} \label{fig:setup} \end{figure} Fig.~\ref{fig:setup} shows the setup for the basic building block of our scheme. A qubit is encoded in two ground levels $\ket0$ and $\ket1$ of a 3-level atom, which is trapped in an one-sided optical cavity. The transition between states $\ket{1}$ and $\ket{e}$ is coupled resonantly to the right-circularly polarized mode of the cavity with coupling rate $g$, and state $\ket{0}$ is decoupled from the cavity field. We consider two kinds of transition channels for the cavity photon. The first one is the cavity decay due to transmission through the cavity mirror, whose rate is $\kappa_c$. Every other unwanted photon losses, such as cavity absorption and scattering, are characterized by the overall loss rate $\kappa_l$. For the gate operation, we will inject a photon into the cavity and observe the output photon along the cavity decay channel, and postselect those cases in which a photon is detected. The evolution of the system, then, can be described by the non-Hermitian conditional Hamiltonian in the framework of the quantum trajectory method \cite{c93}. In the rotating frame, the conditional Hamiltonian of the system, without the cavity decay, can be written as \begin{equation} H_s=-i\frac{\gamma}{2}\ket{e}\bra{e}+g(a\ket{e}\bra{1}+a^\dagger\ket{1}\bra{e})-i\frac{\kappa_l}{2} a^\dagger a, \end{equation} where $\gamma$ and $a$ denote the atomic spontaneous emission rate and the annihilation operator for the right-circularly polarized mode of the cavity, respectively. Taking into account the coupling through the cavity decay channel, the system is fully specified by the boundary condition \begin{equation} b_{out}(t)=b_{in}(t)+\sqrt{\kappa_c}a(t), \label{eq:03141} \end{equation} and the quantum Langevin equation \begin{equation} \begin{split} \dot{s}=-i(sH_s-H_s^\dagger s)&-[s,a^\dagger]\left(\frac{\kappa_c}{2}a+\sqrt{\kappa_c}b_{in}(t)\right)\\ &+[s,a]\left(\frac{\kappa_c}{2}a^\dagger+\sqrt{\kappa_c}b_{in}^\dagger(t)\right), \end{split} \label{eq:03142} \end{equation} where $s$ is an arbitrary system operator, and $b_{in}(t)$($b_{out}(t)$) is the input(output) field operator \cite{gc85}. Suppose the atom is initially prepared in its ground state. When a photon is reflected from the cavity, its pulse shape would be changed due to the interaction with the atom-cavity system. In particular, when both the adiabatic condition ($\abs{\frac{\dot{s}}{s}}\ll\kappa_c,g$) and the strong atom-cavity coupling condition ($g\gg\kappa_c,\gamma$) are satisfied, the system only acquires a conditional phase shift with a good approximation \cite{dk04}. If the atom is in state $\ket{1}$ and a right-circularly polarized photon is incident, the system acquires no phase shift. Otherwise, i.e., if the photon does not see the atom, the system acquires a phase shift of $\pi$. Accordingly, in this regime, the simple setup of Fig.~\ref{fig:setup} serves as a controlled-phase gate between a photonic qubit and an atomic qubit. Before introducing the complete scheme, let us investigate this building block in more detail taking into account various aspects of practical imperfections. We assume the atom is trapped in a harmonic potential. Since the cavity field varies spatially along the cavity axis, the harmonic motion of the atom leads to time variation of the atom-cavity coupling rate. With an assumption that the gate operates outside the Lamb-Dicke condition, we model the time dependence of the atom-cavity coupling rate as $g(t)=g_0\cos\left(\frac{\pi}{3}\sin\left(\frac{2\pi t}{T_g}+\phi\right)\right)$, where $T_g$ denotes the period of the atomic motion and $\phi$ is an arbitrary phase. Here, we have allowed the coupling rate to vary between $g_0/2$ and $g_0$ in accordance with a typical cavity QED experiment \cite{emky01}. The pulse shape of the input photon is assumed to be $f_{in}(t)=\left[T_f \cosh\left(\frac{2t}{T_f}\right)\right]^{-1}$, which is normalized as $\int\abs{f(t)}^2=1$. Here, $T_f$ denotes the pulse width. We define $P$ as the success probability that a photon is detected, which is identical to the probability that no photon is lost by the atomic spontaneous emission (with rate $\gamma$) or the unwanted cavity photon loss (with rate $\kappa_l$). Since we postselect those cases in which a photon is detected, the pulse shape $f_{out}(t)$ of the output photon can be regarded to be normalized as $\int\abs{f_{out}(t)}^2=1$, and the fidelity $F$ between the two pulses is given by $F=\abs{\int f_{in}^*(t)f_{out}(t)dt}$. All of the values above can be obtained on the basis of the cavity input-output formulae (\ref{eq:03141}) and (\ref{eq:03142}). \begin{figure} \caption{When the photon does not interact with the atom, (a) the success probability $P_0$ and (b) the fidelity $F_0$ with respect to $\kappa_l$ for $\kappa_c T_f=\{10,20,30,50,70\} \label{fig:graph1} \end{figure} \begin{figure} \caption{When the photon interacts with the atom, (a) the success probability $P_1$ and (b) the fidelity $F_1$ with respect to the average atom-cavity coupling rate $\left<g(t)\right>$ for every combinations of parameter sets: $\kappa_c T_f=\{10,50\} \label{fig:graph2} \end{figure} Let us first consider a case in which a photon reflects from a bare cavity. Let $P_0$ and $F_0$ be the success probability and the fidelity in this case, respectively. In Fig.~\ref{fig:graph1}, we plot (a) $P_0$s and (b) $F_0$s with respect to $\kappa_l$ varying the pulse width: $\kappa_c T_f=\left\{10,20,30,50,70\right\}$. Fig.~\ref{fig:graph1}(a) shows the success probability is determined dominantly by $\kappa_l$: $P_0$ decreases as $\kappa_l$ increases. In Fig.~\ref{fig:graph1}(b), the upper curve is obtained for the longer $T_f$ in order. This behavior is originated from the fact that the adiabatic condition is satisfied more strongly with the longer pulse width. When $\kappa_c T_f\gtrsim50$, the attained fidelity is found to be very close to the ideal value ($F_0>0.995$) regardless of the cavity photon loss. Our numerical calculations indicate that, in every cases, the acquired phase shift is exactly $\pi$. Secondly, we consider another case in which a right-circularly polarized photon reflects from the cavity while the atom is prepared in state $\ket{1}$. In this case, due to the interaction between the photon and the atom, the reflection occurs in a different manner. Let $P_1$ and $F_1$ be the success probability and the fidelity in this case, respectively. In Fig.~\ref{fig:graph2}, we plot (a) $P_1$s and (b) $F_1$s, which have been averaged over $\phi$, with respect to the average atom-cavity coupling rate $\left<g(t)\right>$ for every combinations of parameter sets: $\kappa_c T_f=\left\{10,50\right\}$, $\kappa_c T_g=\left\{50,125\right\}$, and $k_l/\kappa_c=\left\{0,0.2\right\}$. Here, we have assumed $\gamma=\kappa_c$. In this case, our numerical simulations indicate that the cavity photon is hardly created. The photon loss is thus dominated by the atomic spontaneous emission. Accordingly, both $P_1$ and $F_1$ are determined dominantly by the atom-cavity coupling rate, which is why each curve in Fig.~\ref{fig:graph2} is hardly distinguishable. Fig.~\ref{fig:graph2}(b) shows that the fidelity is very close to the ideal value even in the weak atom-cavity coupling regime. The acquired phase is found to be exactly zero. A remarkable point of the above numerical results is that, though the success probability could decrease due to the unavoidable photon loss, the fidelity remains very high in most parametric regimes we have considered. From now on, let us assume $F_0=F_1=1$ for simplicity. In order to demonstrate that the setup of Fig.~\ref{fig:setup} serves as a controlled-phase gate, suppose a photon in state $\frac{1}{\sqrt2}(\ket{L}+\ket{R})$, where $\ket{L}$($\ket{R}$) denotes a left-(right-)circularly polarized photon, is reflected from the cavity while the atom is in state $\frac{1}{\sqrt2}(\ket{0}+\ket{1})$. A straightforward calculation yields the success probability $P=\frac{P_0}{4}(3+r)$ and the fidelity $F=\frac{3+\sqrt{r}}{2\sqrt{3+r}}$, where we have defined $r\equiv P_1/P_0$. The resulting entangled state can be written as $\frac{1}{\sqrt{3+r}}(\ket0\ket{L}+\ket0\ket{R}+\ket1\ket{L}-\sqrt{r}\ket1\ket{R})$ up to a global phase. \begin{figure} \caption{Controlled-phase gate between atom A and atom B. Each W represents a $\lambda/4$-plate and D represents a polarization detector. For the gate operation, a left-circularly polarized single photon is injected from left and the polarization of the output photon is measured at the detector.} \label{fig:gate} \end{figure} \begin{figure} \caption{(a) The success probability $P$ and (b) the fidelity $F$ of the controlled-phase gate with respect to $r=P_1/P_0$.} \label{fig:graph3} \end{figure} Now, the building block in Fig.~\ref{fig:setup} can be exploited for our goal. Fig.~\ref{fig:gate} shows the controlled-phase gate between two atoms A and B. Each W in the figure represents a $\lambda/4$-plate that converts the basis of a single-photon qubit between $\left\{\ket{L},\ket{R}\right\}$ and $\left\{\frac{1}{\sqrt2}(\ket{L}\pm\ket{R})\right\}$. Initially, each atom is prepared in state $\frac{1}{\sqrt2}(\ket0+\ket1)$. For the gate operation, a single photon in state $\ket{L}$ is injected from left and the polarization of the output photon is measured at the detector. From a straightforward algebra, one can find that a photon in state $\ket{L}$ is detected with probability $P_L=\frac{P_0^2}{32}\left[r^2+2r+4(r-1)\sqrt{r}+13\right]$, while a photon in state $\ket{R}$ with probability $P_R=\frac{P_0^2}{32}(r+3)^2$. In the former case, the final state becomes \begin{equation} \begin{split} \ket{\Psi_L}=\frac{P_0}{\sqrt{8P_L}}&\left[\ket0_A\ket0_B+\ket0_A\ket1_B+\ket1_A\ket0_B\right.\\ &\left.-\frac{r+2\sqrt{r}-1}{2}\ket1_A\ket1_B\right], \end{split} \end{equation} and in the latter case, \begin{equation} \begin{split} \ket{\Psi_R}=\frac{P_0}{\sqrt{8P_R}}&\left[\ket0_A\ket0_B+\ket0_A\ket1_B-\sqrt{r}\ket1_A\ket0_B\right.\\ &\left.+\frac{r+1}{2}\ket1_A\ket1_B\right], \end{split} \end{equation} which can be converted to the desired entangled state by applying a Pauli operator $\sigma_x$ on atom B. In Fig.~\ref{fig:graph3}, we plot (a) the success probability $P=P_L+P_R$ and (b) the average fidelity $F$ with respect to $r\equiv P_1/P_0$. Since a photon passes through two cavities in order, the success probability is basically second order in $P_0$ and $P_1$. The fidelity is found to be very high regardless of the success probability. In particular, when $P_0\simeq P_1$, the attained fidelity is as high as 1. An interesting property of the gate is that the fidelity would be decreased as the atom-cavity coupling rate is increased. In order to get an optimal fidelity, one first increase $F_0$ by increasing the pulse width as shown in Fig.~\ref{fig:graph1}(b), and then adjust $\kappa_l$ and $\left<g(t)\right>$ to have $P_0=P_1$. For a typical cavity decay rate $\kappa_c/2\pi=4~\mathrm{MHz}$ \cite{emky01}, one gets $F_0>0.995$ with $T_f=50/\kappa_c\simeq2~\mathrm{\mu s}$. We note that the success probability could decrease further due to photon losses at other parts of the setup in Fig.~\ref{fig:gate}, such as optical components, optical paths, and the detector. Even in those cases, the fidelity is not affected as long as the losses are polarization independent and dark counts are neglected. \begin{figure} \caption{A modified version of the controlled-phase gate to take advantage of two-sided cavities. Each BS represents a 50:50 beam splitter.} \label{fig:final} \end{figure} There is still room for improvement by which one can replace each one-sided cavity in Fig.~\ref{fig:gate} with a two-sided cavity. Let us assume both cavity mirrors have the same decay rate $\kappa_c'$. One can easily show that the cavity input-output formulae (\ref{eq:03141}) and (\ref{eq:03142}) as well as the commutation relations $\left[b_{in}^\dagger(t),b_{in}(t')\right]=\left[b_{out}^\dagger(t),b_{out}(t')\right]=\delta(t-t')$ are preserved by substituting as \begin{equation} \begin{split} b_{in}(t)&\rightarrow\frac{1}{\sqrt2}\left[b_{in}^{(1)}(t)+b_{in}^{(2)}(t)\right],\\ b_{out}(t)&\rightarrow\frac{1}{\sqrt2}\left[b_{out}^{(1)}(t)+b_{out}^{(2)}(t)\right],\\ \kappa_c&\rightarrow2\kappa_c', \end{split} \end{equation} where two cavity decay channels are represented by superscripts $(1)$ and $(2)$, respectively. The setup of Fig.~\ref{fig:final} thus works in the same fashion as that of Fig.~\ref{fig:gate} with an effective cavity decay rate $2\kappa_c'$, where each beam splitter is of 50:50 type. Finally, we demonstrate how our controlled-phase gate is directly used to generate a cluster state. Here, we assume the gate works with success probability $P>2/3$. In this case, one can take a simple add-on strategy to generate a cluster state of an arbitrary configuration. In order to show this, let us denote by $\ket{\Psi_n}$ the 1D cluster state of $n$ qubits, and express $\ket{\Psi_{n-2}}$ as \begin{equation} \ket{\Psi_{n-2}}=\ket{\phi_0}_{n-3}\ket{0}_{n-2}+\ket{\phi_1}_{n-3}\ket{1}_{n-2}, \end{equation} where $\ket{i}_{n-2}$ denotes the state of the $(n-2)$th qubit and $\ket{\phi_i}_{n-3}$ denotes the relevant terms for the other $(n-3)$ qubits. It is easily verified that $\ket{\Psi_n}$ can be written as \begin{equation} \begin{split} \ket{\Psi_n}&=\frac{1}{\sqrt2}\ket{\phi_0}_{n-3}\ket{0}_{n-2}\left(\ket{0}_{n-1}\ket{+}_n+\ket{1}_{n-1}\ket{-}_n\right)\\ &+\frac{1}{\sqrt2}\ket{\phi_1}_{n-3}\ket{1}_{n-2}\left(\ket{0}_{n-1}\ket{+}_n-\ket{1}_{n-1}\ket{-}_n\right), \end{split} \label{eq:04041} \end{equation} where $\ket{\pm}=\frac{1}{\sqrt2}(\ket0\pm\ket1)$. In order to generate $\ket{\Psi_{n+1}}$, one simply attach a qubit in state $\ket+$ to $\ket{\Psi_n}$ by performing a controlled-phase operation. If the operation succeeds, one gets $\ket{\Psi_{n+1}}$. If it fails, however, since $n$th qubit is measured in an arbitrary basis, the state~(\ref{eq:04041}) becomes a mixed state \begin{equation} \begin{split} \rho_{n-1}^f&=\frac12\left(\ket{\phi_0}_{n-3}\ket{0}_{n-2}+\ket{\phi_1}_{n-3}\ket{1}_{n-2}\right)\ket{0}_{n-1}\bra{\cdots}\\ &+\frac12\left(\ket{\phi_0}_{n-3}\ket{0}_{n-2}-\ket{\phi_1}_{n-3}\ket{1}_{n-2}\right)\ket{1}_{n-1}\bra{\cdots}. \end{split} \end{equation} From this expression, it is apparent that $\ket{\Psi_{n-2}}$ can be recovered from $\rho_{n-1}^f$ by measuring the $(n-1)$th qubit in the computational basis and performing an appropriate unitary operation on the $(n-2)$th qubit according to the measurement result. In other words, when an add-on process fails, only two qubits are lost. The average number of qubits attached by $m$ entangling operations is thus $(3P-2)m$, which grows on average if $P>2/3$. In the same fashion, it is also shown that if the $i$th qubit of $\ket{\Psi_n}$ $(i<n)$ is measured in an arbitrary basis, one can recover two 1D cluster states $\ket{\Psi_{i-2}}$ and $\ket{\Psi_{n-i-1}}$ up to appropriate local unitary operations by measuring both the $(i-1)$th and the $(i+1)$th qubits. We can thus connect two 1D cluster states by performing controlled-phase operations to form a cross-shaped 2D cluster state. Though a failure of the entangling operation would break them into four 1D cluster states, they can be connected into two 1D cluster states as shown above, and then be used to form a 2D cluster state again. By repeating these procedures, one can generate a cluster state of an arbitrary configuration. In summary, we have proposed a contolled-phase gate which operates between two distant atoms each trapped in an optical cavity, and have shown that the proposed gate is feasible for generation of a cluster state. In particular, the gate has no theoretical bound on the attainable success probability while it achieves a very high fidelity even with the considerable imperfections. This research was supported by a Grant from the Ministry of Science and Technology (MOST) of Korea and from Korea Research Institute of Standards and Science (KRISS). \begin{references} \bibitem{br01} H. J. Briegel and R. Raussendorf, Phys. Rev. Lett. \textbf{86}, 910 (2001). \bibitem{rb01} R. Raussendorf and H. J. Briegel, Phys. Rev. Lett. \textbf{86}, 5188 (2001). \bibitem{rbb03} R. Raussendorf, D. E. Browne, and H. J. Briegel, Phys. Rev. A \textbf{68}, 022312 (2003). \bibitem{n04} M. A. Nielsen, Phys. Rev. Lett. \textbf{93}, 040503 (2004). \bibitem{nd04} M. A. Nielsen and C. M. Dawson, Phys. Rev. A \textbf{71}, 042323 (2005). \bibitem{klm01} E. Knill, R. Laflamme, and G. J. Milburn, Naure \textbf{409}, 46 (2001). \bibitem{bk04} S. D. Barrett and P. Kok, Phys. Rev. A \textbf{71}, 060310 (2005). \bibitem{dr05} L.-M. Duan and R. Raussendorf, Phys. Rev. Lett. \textbf{95}, 010501 (2005). \bibitem{br05} D. E. Browne and T. Rudolph, Phys. Rev. Lett. \textbf{95}, 010501 (2005). \bibitem{cz95} J. I. Cirac and P. Zoller, Phys. Rev. Lett. \textbf{74}, 4091 (1995). \bibitem{pgcz95} T. Pellizzari, S. A. Gardiner, J. I. Cirac, and P. Zoller, Phys. Rev. Lett. \textbf{75}, 3788 (1995). \bibitem{pw02} J. Pachos and H. Walther, Phys. Rev. Lett. \textbf{89}, 187903 (2002). \bibitem{ysy03} X. X. Yi, X. H. Su, and L. You, Phys. Rev. Lett. \textbf{90}, 097902 (2003). \bibitem{cz20} J. I. Cirac and P. Zoller, Nature \textbf{404}, 579 (2000). \bibitem{kmw02} D. Kielpinski, C. Monroe, and D. J. Wineland, Nature \textbf{417}, 709 (2002). \bibitem{xlgy04} Y.-F. Xiao \textit{et al.}, Phys. Rev. A \textbf{70}, 042314 (2004). \bibitem{lbk05} Y. L. Lim, A. Beige, and L. C. Kwek, Phys. Rev. Lett. \textbf{95}, 030505 (2005). \bibitem{c93} H. J. Carmichael, \textit{An Open Systems Approach to Quantum Optics}, Lecture Notes in Physics (Springer-Verlag, Berlin, 1993). \bibitem{gc85} C. W. Gardiner and M. J. Collett, Phys. Rev. A \textbf{31}, 3761 (1985). \bibitem{dk04} L.-M. Duan and H. J. Kimble, Phys. Rev. Lett. \textbf{92}, 127902 (2004). \bibitem{emky01} S. J. van Enk, J. McKeever, H. J. Kimble, and J. Ye, Phys. Rev. A \textbf{64}, 013407 (2001). \end{references} \end{document}
\begin{document} \begin{abstract} We study Farber's topological complexity (TC) of Davis' projective product spaces (PPS's). We show that, in many non-trivial instances, the TC of PPS's coming from at least two sphere factors is (much) lower than the dimension of the manifold. This is in high contrast with the known situation for (usual) real projective spaces for which, in fact, the Euclidean immersion dimension and TC are two facets of the same problem. Low TC-values have been observed for infinite families of non-simply connected spaces only for H-spaces, for finite complexes whose fundamental group has cohomological dimension not exceeding $2$, and now in this work for infinite families of PPS's. We discuss general bounds for the TC (and the Lusternik-Schnirelmann category) of PPS's, and compute these invariants for specific families of such manifolds. Some of our methods involve the use of an equivariant version of TC. We also give a characterization of the Euclidean immersion dimension of PPS's through generalized concepts of axial maps and, alternatively, non-singular maps. This gives an explicit explanation of the known relationship between the generalized vector field problem and the Euclidean immersion problem for PPS's. \end{abstract} \overline{m}aketitle \tableofcontents \section{Introduction and notation}\label{intro} As shown in~\cite{FTY}, the topological complexity (TC) and the Euclidean immersion dimension (Imm) of the $n$-dimensional real projective space $\mathrm{P}^n$ are related by \begin{equation}\label{classic} \mathrm{TC}(\mathrm{P}^n)=\overline{m}athrm{Imm(\mathrm{P}^n)}-\epsilon(n)=2n-\delta(n) \end{equation} where $$\epsilon(n)=\begin{cases}1,&n=1,3,7;\\0,&\overline{m}box{otherwise},\end{cases} $$ $\delta(n)=\overline{m}athrm{O}(\alpha(n))$, and $\alpha(n)$ denotes the number of ones in the binary expansion of~$n$. It is natural to ask whether the nice phenomenon in the first equality in~(\ref{classic}) is part of a general property of manifolds. Not only does this question have a negative answer, but even close relatives of real projective spaces fail to satisfy the first equality in~(\ref{classic}). For instance, in view of~\cite{AsDaGo} and~\cite{G-robotics}, the failure holds for lens spaces whose fundamental group has torsion of the form $2^e$ for $e>1$. The same answer is observed in a forthcoming paper by two of the authors in which they study flag manifolds whose fundamental group is an elementary 2-group of rank greater than $1$. This paper now shows that the list of counterexamples extends to Davis' projective product spaces, a family of manifolds giving a rather natural generalization of real projective spaces, and which, in particular, have $\overline{m}athbb{Z}_2$ as their fundamental groups (in the `generic' case). Indeed, Theorem~\ref{cota1} in this paper shows that, in contrast to the second equality in~(\ref{classic}), the topological complexity of a projective product space coming from at least two sphere factors can be much lower than the dimension of the manifold. Thus, in those cases, more than half the homotopy obstructions in the motion planning problem for $\mathrm{P}_{\overline{n}}$ are trivial (cf.~Remark~\ref{lasobs} and the considerations after Theorem~\ref{cota1}). Up to the authors knowledge, this gives the first infinite family of non-simply connected closed manifolds which are not H-spaces and whose TC is lower than their dimension (cf.~\cite{CV,LS}; the upper bound in~\cite[Theorem~3]{CF} should be noted, too). \overline{m}edskip In the rest of this introductory section we set up notation and recall needed preliminary results. We use the reduced form of the Schwarz genus (also called sectional category, and denoted by $\overline{m}athrm{secat}$) of a fibration, i.e.\ a trivial fibration has zero genus. In particular, we consider the reduced form of the Lusternik-Schnirelmann category (cat) and that of Farber's topological complexity (TC) of a space $X$---the latter being the reduced Schwarz genus of the double evaluation map $X^{[0,1]}\to X\times X$ sending a path $\gamma\colon\thinspacelon[0,1]\to X$ to the pair $(\gamma(0),\gamma(1))$. Thus, $\mathrm{cat}(X)=\mathrm{TC}(X)=0$ for a contractible space~$X$. We will also assume the reader is familiar with~\cite{davispps}, and we next briefly recall the required results from that paper. \overline{m}edskip We let $\overline{n}$ stand for an $r$-tuple $(n_1,\ldots,n_r)$ of positive integers with $n_1\leq\cdots\leq n_r$. We consider the diagonal action of $\overline{m}athbb{Z}_2$ on $S_{\overline{n}}:=S^{n_1}\times\cdots\times S^{n_r}$, and let $\overline{m}athrm{P}pn$ denote the resulting orbit space (so $\mathrm{P}_{(n_1)}$ is the usual real projective space $\mathrm{P}^{n_1}$). We set $|\overline{n}|:=\dim(\overline{m}athrm{P}pn)=\dim(S_{\overline{n}})=\sum n_i$ and $\ell(\overline{n})=r$. The real line bundle associated to the obvious covering $S_{\overline{n}}\to\overline{m}athrm{P}pn$, denoted by $\xi_{\overline{n}}$ and called the canonical line bundle over $\overline{m}athrm{P}pn$, can be used to identify the stable class of the tangent bundle $\tau_{\overline{m}athrm{P}pn}$ since \begin{equation}\label{tangentstableclass} \tau_{\overline{m}athrm{P}pn}\oplus r\varepsilon\approx(|\overline{n}|+r)\xi_{\overline{n}}. \end{equation} Here $\varepsilon$ stands for a trivial line bundle. The total space of the $k$-fold iterated Whitney sum of $\xi_{\overline{n}}$ is given by the Borel construction $k\xi_{\overline{n}}=S_{\overline{n}}\times_{\overline{m}athbb{Z}_2}\overline{m}athbb{R}^k$. In particular, the projectivization of $k\xi_{\overline{n}}$ is given by \begin{equation}\label{ksuma} P(k\xi_{\overline{n}})=\overline{m}athrm{P}pn\times\mathrm{P}^{k-1}. \end{equation} The diagonal inclusion $S^{n_1}\hookrightarrowS_{\overline{n}}$ and the projection onto the first factor $S_{\overline{n}}\to S^{n_1}$ induce corresponding maps $j\colon\thinspacelon\mathrm{P}^{n_1}\hookrightarrow\overline{m}athrm{P}pn$ and $p\colon\thinspacelon\overline{m}athrm{P}pn\to\mathrm{P}^{n_1}$ satisfying \begin{equation}\label{hopfpreservados} j^*(\xi_{\overline{n}})\approx\xi_{n_1}\overline{m}box{, \ \ }p^*(\xi_{n_1})\approx\xi_{\overline{n}}\overline{m}box{, \ \ and \ \ }p\circ j=\overline{m}athrm{Id}. \end{equation} For $2\leq i\leq r$ there are mod 2 cohomology classes $x_i$ in $\overline{m}athrm{P}pn$ with $\dim(x_i)=n_i$ such that the mod 2 cohomology ring of $\overline{m}athrm{P}pn$ is given by \begin{equation}\label{mod2cohring} H^*(\overline{m}athrm{P}pn;\overline{m}athbb{Z}_2)=H^*(\mathrm{P}^{n_1};\overline{m}athbb{Z}_2)\otimes\Lambda[x_2,\ldots,x_r] \end{equation} (where $\Lambda$ denotes an exterior algebra) with the only exception that, if $n_1$ is even, then $x_i^2=x^{n_1}x_i$ whenever $n_i=n_1$. Here $x\in H^1(\mathrm{P}^{n_1};\overline{m}athbb{Z}_2)$ satisfies\begin{equation}\label{xsgrandesrestringentrivial} \overline{m}box{$x=w_1(\xi_{\overline{n}}),\;$ but all classes $x_i$ restrict trivially under the inclusion $j$.} \end{equation} We also need the concept of \hspace{.4mm}``generalized axial map'' as defined in~\cite{AsDaGo}: For a real vector bundle $\alpha$ over a space $X$, we let $S(\alpha)$ and $P(\alpha)$ stand, respectively, for the sphere and projectivized bundles associated to $\alpha$. Let $h_\alpha$ denote the Hopf line bundle over $P(\alpha)$ splitting off $\overline{m}athrm{P}i^*(\alpha)$, where $\overline{m}athrm{P}i\colon\thinspacelon P(\alpha)\rightarrow X$ is the projection. A Hopf-type map\footnote{This is called an `axial map' in~\cite{AsDaGo}, but we have to modify the name in view of Definition~\ref{axialchico} in the next section.} for $\alpha$ is any continuous map $P(\alpha)\rightarrow \mathrm{P}^N$ for which the composite $P(\alpha)\to \mathrm{P}^N\hookrightarrow \mathrm{P}^\infty$ classifies $h_\alpha$. In particular,~(\ref{ksuma}) allows us to talk about Hopf-type maps defined on products of the form $\overline{m}athrm{P}pn\times\mathrm{P}^s$. \section{Immersion dimension}\label{immaxi} \subsection{Axial maps} Consider a pair of sequences $\overline{n}=(n_1,\ldots,n_r)$ and $\overline{m}=(m_1,\ldots,m_s)$. \begin{definicion}\label{axialchico} A continuous map $\alpha\colon\thinspacelon\mathrm{P}_{\overline{n}}\times\mathrm{P}_{\overline{m}}\to\mathrm{P}^\infty$ is said to be axial if its restriction to each of the axes classifies the corresponding canonical bundle. By~$(\ref{xsgrandesrestringentrivial})$ this means that $\alpha$ corresponds to the class $x\otimes1+1\otimes x$. A continuous map $\mathrm{P}_{\overline{n}}\times\mathrm{P}_{\overline{m}}\to\mathrm{P}^L$ is called axial if the composite $\mathrm{P}_{\overline{n}}\times\mathrm{P}_{\overline{m}}\to\mathrm{P}^L\hookrightarrow\mathrm{P}^\infty$ is axial. \end{definicion} \begin{nota}\label{tcchico} By~$(\ref{hopfpreservados})$, the existence of an axial map $\overline{m}athrm{P}pn\times\overline{m}athrm{P}pm\to\mathrm{P}^L$ depends only on $n_1$ and $m_1$. In particular, according to~\cite{FTY}, if $n_1=m_1$, $\mathrm{TC}(\mathrm{P}^{n_1})$ is the minimal integer $L$ for which there is an axial map $\overline{m}athrm{P}pn\times\overline{m}athrm{P}pm\to\mathrm{P}^L$. In any case, an axial map $\overline{m}athrm{P}pn\times\overline{m}athrm{P}pm\to\mathrm{P}^L$ can exist only if $L\geq\overline{m}ax\{n_1,m_1\}$. \end{nota} A slightly weaker concept of axiality arises by requiring that the restriction of $\alpha\colon\thinspacelon\mathrm{P}_{\overline{n}}\times\mathrm{P}_{\overline{m}}\to\mathrm{P}^\infty\hspace{.2mm}$ to $\hspace{.5mm}j\times j\colon\thinspacelon\mathrm{P}^{n_1}\times\mathrm{P}^{m_1}\hookrightarrow\overline{m}athrm{P}pn\times\overline{m}athrm{P}pm\hspace{.4mm}$ is axial in the usual sense. Yet, nothing is lost with respect to the more restrictive Definition~$\ref{axialchico}$ if we only care (as we will in this subsection) about the {\it existence} of such maps. Indeed, in view of~$(\ref{mod2cohring})$, the only potential problem arises when $n_2=1$ or $m_2=1$. To fix ideas, assume $n_2=\cdots=n_\ell=1<n_{\ell+1}$ $(\ell\leq r)$. Then, the restriction of $\alpha$ to its first axis might conceivably correspond to a class of the form $x+\sum_{i=2}^\ell\epsilon_ix_i$. Although such a situation is perfectly attainable, it can be easily fixed. Indeed,~\cite[Theorem~2.20]{davispps} asserts that, under the present conditions, $\overline{m}athrm{P}pn$ is homeomorphic to $(S^1)^{\ell-1}\times\mathrm{P}_{\overline{q}}$ where $\overline{q}=(1,n_{\ell+1},\ldots,n_r)$. Thus, unless $m_2=1$ (in which case the following adjustment would have to be made on the second axis, too), the required axial map is given by the composite $\mathrm{P}_{\overline{n}}\times\overline{m}athrm{P}pm\to\mathrm{P}_{\overline{n}}\times\overline{m}athrm{P}pm\overset{\alpha\;}\rightarrow\mathrm{P}^\infty$ where the first map is $\gamma\times1$, and $\gamma$ is the projection $\mathrm{P}_{\overline{n}}\to\mathrm{P}_{\overline{q}}\hspace{.3mm}$ followed by the inclusion $\mathrm{P}_{\overline{q}}\hookrightarrow\overline{m}athrm{P}pn$. \overline{m}edskip As a consequence of Remark~\ref{tcchico}, the nice relationship between $\mathrm{TC}$ and the existence of suitable axial maps between (usual) real projective spaces cannot hold for a $\overline{m}athrm{P}pn$ with $\ell(\overline{n})>1$. Yet, the axial map approach can be used to characterize the immersion dimension of $\overline{m}athrm{P}pn$ in a suitable range of dimensions. Indeed, the following are standard consequences of~\cite{davispps} and \cite{sanderson}: \begin{enumerate}[(I)] \item\label{laaxialcritic}The existence of a smooth immersion $\mathrm{P}_{\overline{n}}\mathrm{imm}to\overline{m}athbb{R}^M$ implies the existence of an axial map $\mathrm{P}^{n_1}\times\mathrm{P}^{|\overline{n}|+r-1}\to\mathrm{P}^{M+r-1}$. \item\label{ms}The converse of~(\ref{laaxialcritic}) holds provided $\overline{m}athrm{P}pn$ is not stably parallelizable and $n_1<2(M-|\overline{n}|)$. \end{enumerate} \overline{m}edskip We will now elaborate on the previous facts from a purely `projective-product' viewpoint---without relying on the connection through the generalized vector field problem. \begin{proposicion}\label{immgivesaxi} The existence of an immersion $\mathrm{P}_{\overline{n}}\mathrm{imm}to\overline{m}athbb{R}^M$ implies the existence of a Hopf-type map $\mathrm{P}_{\overline{n}}\times\mathrm{P}^{|\overline{n}|+r-1}\to\mathrm{P}^{M+r-1}$. \end{proposicion} \begin{proof} Let $\varepsilon$ be the trivial line bundle over $\overline{m}athrm{P}pn$ and $\overline{n}u$ the normal bundle of the given immersion. From~(\ref{tangentstableclass}) we have the composite $$(|\overline{n}|+r)\xi_{\overline{n}}\hookrightarrow(|\overline{n}|+r)\xi_{\overline{n}}\oplus\overline{n}u=(\tau_{\mathrm{P}_{\overline{n}}}\oplus r\varepsilon)\oplus\overline{n}u=(M+r)\varepsilon. $$ The required Hopf-type map is given by the composite $$\mathrm{P}_{\overline{n}}\times\mathrm{P}^{|\overline{n}|+r-1}{\hspace{.3mm}=\hspace{.3mm}}P((|\overline{n}|+r)\xi_{\overline{n}}){\hspace{.3mm}\hookrightarrow\hspace{.3mm}} P\left((M+r)\varepsilon\right){\hspace{.3mm}=\hspace{.3mm}}\overline{m}athrm{P}pn\times\mathrm{P}^{M+r-1}\hspace{-.3mm}\stackrel{\overline{m}athrm{proj}}\longrightarrow\mathrm{P}^{M+r-1}$$ (cf.~\cite[Section~2]{AsDaGo}). \end{proof} \begin{nota}\label{radial} The converse of Proposition~$\ref{immgivesaxi}$ can be proved (under an additional hypothesis) in terms of a standard application of Haefliger-Hirsch homotopy approximation of monomorphisms by skew-maps in the meta\-stable range~$($\cite{HH}, compare with~\cite{AdGiJa} or \cite[Corollary~2.8]{AsDaGo}$)$. Indeed, the axial map in the conclusion of Proposition~$\ref{immgivesaxi}$ is double covered by a $\overline{m}athbb{Z}_2$-equivariant map $$ S((|\overline{n}|+r)\xi_{\overline{n}})=S_{\overline{n}}\times_{\overline{m}athbb{Z}_2}S^{|\overline{n}|+r-1}\to S^{M+r-1}. $$ This and the projection $(|\overline{n}|+r)\xi_{\overline{n}}\to\overline{m}athrm{P}pn$ determine a map $S((|\overline{n}|+r)\xi_{\overline{n}})\to\overline{m}athrm{P}pn\times S^{M+r-1}$ which, after radial extension, yields a skew map $(|\overline{n}|+r)\xi_{\overline{n}}\to(M+r)\varepsilon\hspace{.5mm}$ over $\overline{m}athrm{P}pn$. Theorem~$1.2$ in~\cite{HH} claims that the latter map can be skew-deformed to a bundle monomorphism $\overline{m}athrm{P}hi\colon\thinspacelon(|\overline{n}|+r)\xi_{\overline{n}}\hookrightarrow(M+r)\varepsilon$ provided \begin{equation}\label{rmetastable} 3|\overline{n}|<2M. \end{equation} Coker$(\overline{m}athrm{P}hi)$ is then an $(M-|\overline{n}|)$-dimensional bundle which, after taking into account~$(\ref{tangentstableclass})$ and cancelling $r$ trivial sections, yields an isomorphism $\tau_{\overline{m}athrm{P}pn}\oplus\overline{m}box{Coker}(\overline{m}athrm{P}hi)=M\varepsilon$. Thus \cite{hirsch} asserts that Coker$(\overline{m}athrm{P}hi)$ is the normal bundle of an immersion, as required. \end{nota} Of course, the hypothesis~(\ref{rmetastable}) is much stronger than the arithmetical condition in~(\ref{ms}), a hypothesis where $n_1$ plays a more relevant role (and which is in accordance to Remark~\ref{tcchico}). \begin{proposicion}\label{hopfaxial} There is a Hopf-type map $\mathrm{P}_{\overline{n}}\times\mathrm{P}^{|\overline{n}|+r-1}\to\mathrm{P}^{M+r-1}$ if and only if there is an axial map $\mathrm{P}^{n_1}\times\mathrm{P}^{|\overline{n}|+r-1}\to\mathrm{P}^{M+r-1}$. \end{proposicion} \begin{proof} In view of~(\ref{xsgrandesrestringentrivial}), it suffices to check that the map $\overline{m}athrm{P}pn\times\mathrm{P}^{|\overline{n}|+r-1}\to\mathrm{P}^\infty$ that classifies the Hopf line bundle $h_{(|\overline{n}|+r)\xi_{\overline{n}}}$ corresponds to $x\otimes1+1\otimes x$. For this purpose, we may assume without loss of generality that the given Hopf-type map arises from an immersion as in Proposition~\ref{immgivesaxi} (say for a large enough $M$---this is irrelevant for the intended goal). Then, with the notation of that result, we see from~(\ref{hopfpreservados}) that, by restricting the isomorphism $(|\overline{n}|+r)\xi_{\overline{n}}\oplus\overline{n}u=(M+r)\varepsilon$ under the inclusion $j\colon\thinspacelon \mathrm{P}^{n_1}\hookrightarrow\overline{m}athrm{P}pn$, we get a Hopf-type map $$ \mathrm{P}^{n_1}\times\mathrm{P}^{|\overline{n}|+r-1}=P((|\overline{n}|+r)\xi_{n_1})\hookrightarrow P((|\overline{n}|+r)\xi_{\overline{n}})=\mathrm{P}_{\overline{n}}\times\mathrm{P}^{|\overline{n}|+r-1}\to\mathrm{P}^{M+r-1} $$ which, as proved in~\cite{AdGiJa}, must also be an axial map. Thus,~(\ref{xsgrandesrestringentrivial}) implies that $h_{(|\overline{n}|+r)\xi_{\overline{n}}}$ corresponds, under the identification $P((|\overline{n}|+r)\xi_{\overline{n}})=\overline{m}athrm{P}pn\times\mathrm{P}^{|\overline{n}|+r-1}$ in~(\ref{ksuma}), to a class of the form $$1\otimes x+(x+\sum\overline{m}u_ix_i)\otimes1$$ where the summation runs over indexes $i$ with $n_i=1$, and each $\overline{m}u_i$ is either 0 or 1. But the first isomorphism in~(\ref{hopfpreservados}) and the naturality of the construction of Hopf line bundles imply $\overline{m}u_i=0$ for all relevant $i$. \end{proof} \begin{ejemplo}\label{kee} The arithmetical hypothesis in~\emph{(\ref{ms})} is superfluous when $\ell(\overline{n})=1$, but it is needed if $\ell(\overline{n})>1$. From our perspective, such a phenomenon is due to the fact that, although the immersion dimension of any standard real projective space holds within Haefliger's metastable range~$($\cite{AdGiJa}$)$, as noted in~\cite{davispps}, a projective product space $\overline{m}athrm{P}pn$ with $\ell(\overline{n})>1$ usually admits (very) low-codimension Euclidean immersions---compare to Remark~$\ref{aclara}$ below. For instance\footnote{We thank Kee Lam for kindly pointing out this example.}, the non-parallelizable $\mathrm{P}_{(12,14)}$ does not immerse in $\overline{m}athbb{R}^{30}$ in view of~\cite[Theorem~3.4]{davispps}, \cite[Lemma~2.2]{sanderson}, and~\cite{KD} (in that order), but the existence of the corresponding axial map in~\emph{(\ref{laaxialcritic})} is obtained in~\cite{KD} through a Postnikov tower argument. \end{ejemplo} Despite Example~\ref{kee}, the method of proof of the main result in~\cite{AdGiJa} yields: \begin{proposicion}\label{agj} If $\mathrm{gd}\left(-(|\overline{n}|+r)\xi_{n_1}\right)>\lceil{(n_1+1)/2}\rceil$, then the arithmetical hypothesis in~\emph{(\ref{ms})} is superfluous. \end{proposicion} \begin{proof} Assume for a contradiction that, for some $M$, there is an axial map $\mathrm{P}^{n_1}\times\mathrm{P}^{|\overline{n}|+r-1}\to\mathrm{P}^{M+r-1}$ but that the non-stably parallelizable $\overline{m}athrm{P}pn$ does not immerse in $\overline{m}athbb{R}^M$. Without loss of generality we can assume $M=\mathrm{imm}(\overline{m}athrm{P}pn)-1>|\overline{n}|$. Then,~\cite[Theorem~3.4]{davispps} gives $$ M-|\overline{n}|=\mathrm{imm}(\overline{m}athrm{P}pn)-|\overline{n}|-1=\mathrm{gd}(-(|\overline{n}|+r)\xi_{n_1})-1\geq\left\lceil{\frac{n_1+1}2}\right\rceil, $$ which amounts to having the arithmetical hypothesis in~(\ref{ms}).\end{proof} \begin{nota}\label{aclara} In the same line of reasoning as in Example~$\ref{kee}$, it follows from~\cite{adams} that, for any large $n_1$, there are instances of spaces $\mathrm{P}_{(n_1,\ldots,n_r)}$ for which the hypothesis in Proposition~$\ref{agj}$ fails. \end{nota} It is worth mentioning that, for $n_1\leq 9$, the arithmetical hypothesis in~(\ref{ms}) above is superfluous\footnote{Kee Lam has brought the author's attention that the smallest case where the arithmetical hypothesis in~(\ref{ms}) is actually needed takes place when $n_1=10$. }. As in the proof of Proposition~\ref{agj}, such an assertion can be verified by checking that, in the indicated range, there is no axial map $\mathrm{P}^{n_1}\times\mathrm{P}^{|\overline{n}|+r-1}\to\mathrm{P}^{M+r-1}$ with $M=\mathrm{imm}(\overline{m}athrm{P}pn)-1>|\overline{n}|$. Indeed, under the current hypothesis, such an axial map is prevented by the relation \begin{equation}\label{hopf} (x+y)^{M+r}\overline{n}eq0 \end{equation} where $x$ and $y$ denote respectively the generators of the mod~$2$ cohomology groups $H^1(\mathrm{P}^{n_1};\overline{m}athbb{Z}_2)$ and $H^1(\mathrm{P}^{|\overline{n}|+r-1};\overline{m}athbb{Z}_2)$. Explicitly, the basis element $x^gy^{|\overline{n}|+r-1}\in H^*(\mathrm{P}^{n_1}\times\mathrm{P}^{|\overline{n}|+r-1};\overline{m}athbb{Z}_2)$ appears in the expansion of~$(\ref{hopf})$ with coefficient \begin{equation}\label{bino} \binom{|\overline{n}|+r+g-1}{g} \end{equation} where $g=\mathrm{gd}(-(|\overline{n}|+r)\xi_{n_1})$. But, under the current hypothesis, a direct verification using~\cite{keesectioning} (or, alternatively, Table~$4.4$ and Proposition~$4.5$ in~\cite{davispps}) shows that~$(\ref{bino})$ is odd. For instance, consider the case $n_1=6$, where the assumption that $\overline{m}athrm{P}pn$ is not stably parallelizable means $|\overline{n}|+r\overline{n}ot\equiv0\bmod8$. Then,~\cite[Table~4.4]{davispps} gives $g=(6,6,5,4,3,2,1)$ for $|\overline{n}|+r\equiv(1,2,3,4,5,6,7)\bmod8$. So $$ \binom{|\overline{n}|+r+g-1}{g}\equiv\left(\binom{6}{6},\binom{7}{6},\binom{7}{5},\binom{7}{4},\binom{7}{3},\binom{7}{2},\binom{7}{1}\right)\equiv1\;\bmod2. $$ \overline{m}edskip We close this subsection by remarking that, just as the situation in Example~\ref{kee} for the condition $n_1<2(M-|\overline{n}|)$, the hypothesis that $\overline{m}athrm{P}pn$ is not stably parallelizable is also needed in~(\ref{ms}). Yet, the full TC-axial picture is well understood in the stably parallelizable case. In fact, the situation is entirely similar to that in the classical case with $\ell(\overline{n})=1$, where there are well-known axial maps $\mathrm{P}^n\times\mathrm{P}^n\to\mathrm{P}^n$ for $n=1,3,7$, but of course no immersion $\mathrm{P}^n\mathrm{imm}to\overline{m}athbb{R}^n$. Namely, since the immersion dimension of a stably parallelizable $\overline{m}athrm{P}pn$ is $|\overline{n}|+1$, there is an axial map $\mathrm{P}_{\overline{n}}\times\mathrm{P}^{|\overline{n}|+r-1}\to\mathrm{P}^{|\overline{n}|+r}$. But there is a finer (and optimal) axial map \begin{equation}\label{finer} \mathrm{P}_{\overline{n}}\times\mathrm{P}^{|\overline{n}|+r-1}\to\mathrm{P}^{|\overline{n}|+r-1} \end{equation} (which cannot come from an immersion). Indeed, as shown in~\cite{davispps}, the stable parallelizability of $\overline{m}athrm{P}pn$ means that the exponent in the highest $2$-power dividing $|\overline{n}|+r$ is no less than $\overline{m}athrm{P}hi(n_1)$---the number of positive integers less than or equal to $n_1$ and which are congruent to $0$, $1$, $2$, or $4$ mod $8$. Therefore, classical work of Hurwitz, Radon, and Eckmann on the so-called Hurwitz-Radon matrix equations gives in fact a non-singular bilinear map $\overline{m}athbb{R}^{n_1+1}\times\overline{m}athbb{R}^{|\overline{n}|+r}\to\overline{m}athbb{R}^{|\overline{n}|+r}$ and, in view of Remark~\ref{tcchico}, an axial map of the form~(\ref{finer}). An intriguing possibility is that explicit `linear' formul\ae\ leading to an axial map~(\ref{finer}) could be deduced from a refinement of the Clifford-algebra input in the Hurwitz-Radon number---without relying on Remark~\ref{tcchico}. \subsection{Non-singular maps} The existence of axial maps can be translated into the existence of certain non-singular maps. Not only is such a fact a straightforward generalization of the corresponding well-known property for usual projective spaces, but the language of non-singular maps turns out to be irrelevant for the purposes of the paper, since they fail to provide local motion planners as in the classical case. Consequently, these ideas are loosely treated in this subsection, mentioned only for completeness purposes. There are two closely related notions of non-singular maps associated to an axial map between projective product spaces. In the first one, for an $\ell$-tuple $\overline{q}=(q_1,\ldots,q_\ell)$, we consider the cone $Q_{\overline{q}}$ in $\overline{m}athbb{R}^{q_1+1} \times \dots \times \overline{m}athbb{R}^{q_\ell+1} $ consisting of tuples $\overline{x}=(x_1, \ldots, x_\ell)$ with $|x_1| = \cdots = |x_\ell|$. Thus, $\overline{m}athrm{P}_{\overline{q}}$ is the projectivization of $Q_{\overline{q}}$, i.e.~$\overline{m}athrm{P}_{\overline{q}}$ is the subspace of $\mathrm{P}^{|\overline{q}|+\ell-1}$ consisting of the lines contained in $Q_{\overline{q}}$. Then, a continuous map $f:Q_{\overline{n}}\times Q_{\overline{m}} \to \overline{m}athbb{R}^{k+1}$ is said to be non-singular if $f(\lambda \overline{x}, \overline{m}u \overline{y})= \lambda\overline{m}u f(\overline{x},\overline{y})$ for $\lambda,\overline{m}u\in\overline{m}athbb{R}$, and if the equality $f(\overline{x},\overline{y})=0$ holds only with $\overline{x}=0$ or $\,\overline{y}=0$. With this definition, {\it there is a one-to-one correspondence between the set of non-singular maps $f:Q_{\overline{n}}\times Q_{\overline{m}}\to\overline{m}athbb{R}^{k+1}$ (taken up to multiplication by a non-zero scalar) and the set of axial maps $g:\overline{m}athrm{P}pn\times\overline{m}athrm{P}pm\to\mathrm{P}^k$.} Such a corresponding pair $(f,g)$ fits in a commutative diagram $$\xymatrix{ Q_{\overline{n}} \times Q_{\overline{m}} \ar[d]_f & S_{\overline{n}} \times S_{\overline{m}} \ar[d]_{f'} \ar@{_(->}[l] \ar[r] & S_{\overline{n}} \times_{\overline{m}athbb{Z}_2} S_{\overline{m}} \ar[d]_h \ar[r] & \overline{m}athrm{P}_{\overline{n}} \times \overline{m}athrm{P}_{\overline{m}} \ar[d]_g \\ \overline{m}athbb{R}^{k+1} & \overline{m}athbb{R}^{k+1} - \{0\} \ar@{_(->}[l] \ar[r]^{\ \ \ \ \ \rho}& S^k \ar[r]& \mathrm{P}^k. }$$ Here the unlabelled horizontal maps facing east are the obvious two fold coverings, $\rho$ is the normalization map $\rho(u)=u/|u|$, $f'$ is the restriction $f|_{S_{\overline{n}}\times S_{\overline{m}}}$, and the right hand square is a pullback (hence $h$ is $\overline{m}athbb{Z}_2$-equivariant). Explicitly, given $f$, $g([\overline{x}],[\overline{y}])$ is the line in $\overline{m}athbb{R}^{k+1}$ that goes through the origin and $f(\overline{x},\overline{y})$. Conversely, given $g$, pick $h$ as in the diagram above and precompose it with the double covering $S_{\overline{n}}\times S_{\overline{m}}\to S_{\overline{n}}\times_{\overline{m}athbb{Z}_2} S_{\overline{m}}$ to get a $\overline{m}athbb{Z}_2$-biequivariant map $\tilde{g}:S_{\overline{n}}\times S_{\overline{m}} \to S^{k}$. Then $f$ is the ``bi-radial'' extension of $\tilde{g}$ given by \begin{equation}\label{alaXico} f(\overline{x}, \overline{y}) = \begin{cases} \frac{|\overline{x}|}{\sqrt{r}} \frac{|\overline{y}|}{\sqrt{s}} \; \tilde g\left(\frac{\sqrt{r}}{|\overline{x}|} \, \overline{x}, \frac{\sqrt{s}}{|\overline{y}|} \, \overline{y} \right), & \text{if $\overline{x} \overline{n}eq 0$ and $\overline{y} \overline{n}eq 0;$} \\ 0,& \text{if $\overline{x} =0$ or $\overline{y}=0$}. \end{cases} \end{equation} Note that if $f:\overline{m}athbb{R}^{n_1+1} \times \overline{m}athbb{R}^{m_1+1} \to \overline{m}athbb{R}^{k+1}$ is a non-singular map (in the usual sense), then for any $\overline{n}=(n_1,n_2,\ldots,n_r)$ and $\overline{m}=(m_1,m_2,\ldots,m_s)$ a non-singular map $Q_{\overline{n}}\times Q_{\overline{m}}\to\overline{m}athbb{R}^{k+1}$ can be defined by $(\overline{x},\overline{y})\overline{m}apsto f(x_1,y_1)$. Of course, this fact is compatible with Remark~$\ref{tcchico}$. A slight variation of the notion of non-singular maps goes as follows: Set $V_{\overline{t}}=\overline{m}athbb{R}^{t_1 +1}\times \cdots \times \overline{m}athbb{R}^{t_\ell+1}$. A map $f:V_{\overline{n}}\times V_{\overline{m}} \to \overline{m}athbb{R}^{k+1}$ is said to be non-singular if $f(\lambda \overline{x}, \overline{m}u \overline{y})= \lambda\overline{m}u f(\overline{x},\overline{y})$ for $\lambda,\overline{m}u\in\overline{m}athbb{R}$, and if the equality $f(\overline{x},\overline{y})=0$ holds only when a coordinate $x_i$ of $\overline{x}$ or a coordinate $y_j$ of $\overline{y}$ vanishes. Then the above considerations apply basically without change, except that~(\ref{alaXico}) takes the slightly more elaborated form $$ f(\overline{x}, \overline{y}) = \begin{cases} N(\overline{x},\overline{y})\;\tilde{g}\left( \frac{x_1}{|x_1|},\ldots,\frac{x_r}{|x_r|},\frac{y_1}{|y_1|},\ldots,\frac{y_r}{|y_s|} \right), & \text{if no } x_i \text{ nor } y_j\text{ is zero}; \\ 0, & \text{otherwise}, \end{cases} $$ where $N(\overline{x},\overline{y})=\left(|x_1|\cdots|x_r|\right)^{\frac{1}{r}}\left(|y_1|\cdots|y_s|\right)^{\frac{1}{s}}$. \section{Topological complexity}\label{SecTC} In this section we give several general estimates for $\mathrm{TC}(\overline{m}athrm{P}pn)$. We find that $\mathrm{TC}(\overline{m}athrm{P}pn) < \dim(\overline{m}athrm{P}pn)$ in certain cases, indicating that a simple relation to immersion dimension such as~(\ref{classic}) does not hold for these manifolds. We also compute the exact value of $\mathrm{TC}(\overline{m}athrm{P}pn)$ in many cases (Proposition~\ref{analogo}), and give evidence toward the appealing possibility that $\mathrm{TC}(\overline{m}athrm{P}pn)$ would depend mostly on $\mathrm{TC}(\mathrm{P}^{n_1})$ and $\ell(\overline{n})$. Let $\overline{\infty}$ stand for the $r$-tuple $(\infty,\ldots,\infty)$, and let $\mathrm{P}_{\overline{\infty}}$ denote the quotient of $\overline{m}athrm{P}rod_{r}S^\infty$ by the diagonal action of $\overline{m}athbb{Z}_2$ (with the antipodal action on each factor). Note that $\mathrm{P}_{\overline{\infty}}$ is an Eilenberg-MacLane space $K(\overline{m}athbb{Z}_2,1)$ containing $\overline{m}athrm{P}pn$. \begin{lema}\label{cells} There is a CW decomposition for $\mathrm{P}_{\overline{\infty}}$ whose $n_1$-skeleton is contained in $\overline{m}athrm{P}pn$. \end{lema} \begin{proof} Let $e^0_+\cup e^0_-\cup\cdots\cup e^m_+\cup e^m_-$ be the usual $\overline{m}athbb{Z}_2$-equivariant cell structure on a sphere $S^m$, and consider the resulting product structure \begin{equation}\label{celstructure} S_{\overline{n}}=\bigcup e^{i_1}_\overline{m}athrm{P}m\times\cdots\times e^{i_r}_\overline{m}athrm{P}m. \end{equation} If $\tau$ stands for the generator of $\overline{m}athbb{Z}_2$, then a cell structure on $\overline{m}athrm{P}pn$ can be formed by identifying a cell $e^{i_1}_\overline{m}athrm{P}m\times\cdots\times e^{i_r}_\overline{m}athrm{P}m$ in~(\ref{celstructure}) with the corresponding cell $\tau\cdot(e^{i_1}_\overline{m}athrm{P}m\times\cdots\times e^{i_r}_\overline{m}athrm{P}m)$. If $\ell(\overline{m})=\ell(\overline{n})$ and $n_i\le m_i$, the inclusion $\overline{m}athrm{P}pn\hookrightarrow\overline{m}athrm{P}pm$ contains the $n_1$-skeleton of $\overline{m}athrm{P}pm$. Thus the required cell structure in $\mathrm{P}_{\overline{\infty}}$ is the inductive one under the above inclusions.\end{proof} We are indebted to Sergey Melikhov for pointing out (in~\cite{Mel}) the proof of the following fact: \begin{proposicion}\label{polyhedral_fibers} Let $M^m$ and $N^n$ be closed smooth manifolds, and let $C^\infty(M,N)$ denote the space of smooth maps in the Whitney $C^\infty$-topology. Then for $f\colon\thinspace M\to N$ in a dense subset of $C^\infty(M,N)$, the fibers $f^{-1}(y)$ with $y\in N$ are all polyhedra of dimension $\le \overline{m}in(m-n,0)$. \end{proposicion} \begin{proof} First we note that the set of triangulable maps is dense in $C^\infty(M,N)$. Recall that a smooth map $f\colon\thinspace M\to N$ is {\em triangulable} if there exists a PL map $g\colon\thinspace K\to L$ between PL manifolds, and homeomorphisms $h\colon\thinspace M\to K$ and $h'\colon\thinspace N\to L$ such that $g\circ h = h'\circ f$. By Verona's proof of Thom's triangulation conjecture \cite{Ver}, we know that all proper, topologically stable maps $f\colon\thinspace M\to N$ are triangulable. By the Thom-Mather theorem (a full proof of which appears in \cite{GWPL}), such maps form an open dense subset of $C^\infty(M,N)$. Next, we note that the fibers $f^{-1}(y)$ of a triangulable map $f\colon\thinspace M\to N$ are all polyhedra (they are homeomorphic to simplicial complexes). For given $y\in N$, we may choose a triangulation $h'\colon\thinspace N\to L$ as above with $h'(y)$ a vertex of $L$. Then $f^{-1}(y)$ is homeomorphic with $g^{-1}(h'(y))$, a subcomplex of $K$. Finally, we claim that for $f\colon\thinspace M\to N$ in an open dense subset of the space $C^\infty(M,N)$, the fibers $f^{-1}(y)$ all have covering dimension $\le \overline{m}in(m-n,0)$. Intersecting this set with the set of proper, topologically stable maps, we find an open dense set of maps whose fibres are all polyhedra of covering dimension $\le \overline{m}in(m-n,0)$. Since covering dimension is a topological property, this proves the Proposition. The proof of the final claim follows from the multi-jet transversality theorem \cite{GG}, which implies that for an open dense set of mappings $f\colon\thinspace M\to N$, the fibers $f^{-1}(y)$ all have the structure of a smooth submanifold of $M$ of dimension $\overline{m}in(m-n,0)$ away from at most finitely many isolated singular points. \end{proof} \begin{teorema}\label{main2} $\mathrm{TC}(\overline{m}athrm{P}pn)\leq2|\overline{n}|-n_1+1$ for $\hspace{.3mm}\ell(\overline{n})>1$. On the other hand, the following numbers are equal, giving a lower bound for $\mathrm{TC}(\overline{m}athrm{P}pn)\colon\thinspacelon$ \begin{itemize} \item The Schwartz genus of the obvious double cover $S_{\overline{n}}\times_{\overline{m}athbb{Z}_2}S_{\overline{n}}\to\overline{m}athrm{P}pn\times\overline{m}athrm{P}pn$. \item The smallest integer $L$ for which $(L+1)\xi_{\overline{n}}\otimes\xi_{\overline{n}}$ admits a nowhere zero section. \item The smallest integer $L$ for which there is an axial map $\overline{m}athrm{P}pn\times\overline{m}athrm{P}pn\to\mathrm{P}^{L}$. \item $\mathrm{TC}(\mathrm{P}^{n_1})$ \end{itemize} \end{teorema} \begin{proof} It follows from Remark~\ref{tcchico} and the first two conditions in~(\ref{hopfpreservados}) that the number described in each of the first three items does not change if $\overline{n}$ is replaced by $n_1$ (for the first item we use the fact that the indicated double cover is the sphere bundle associated to $\xi_{\overline{n}}\otimes\xi_{\overline{n}}$). Therefore, the equality of the four listed numbers follows from~\cite[Theorem~6.1]{FTY}. The fact that they give a lower bound for $\mathrm{TC}(\overline{m}athrm{P}pn)$ follows from the third condition in~(\ref{hopfpreservados}) and the behavior of $\mathrm{TC}$ under retracts. We use the argument in~\cite[Corollary~4.5]{grant} (which is inspired in turn by~\cite{ow}) to prove the upper bound in this theorem. Set $L=2|\overline{n}|-n_1+1$. By~(\ref{classic}) and Remark~\ref{tcchico}, we can chose an axial map $q\colon\thinspacelon\overline{m}athrm{P}pn\times\overline{m}athrm{P}pn\to\mathrm{P}^L$. Since the axial condition is homotopical, we can assume first that $q$ is smooth and then, by Proposition~\ref{polyhedral_fibers}, that for each $z\in\mathrm{P}^L$ the inverse image $q^{-1}(z)$ is homeomorphic to a CW complex of dimension at most $n_1-1$. Then, the axiality of $q$ implies that the image of the class $x$ in~(\ref{xsgrandesrestringentrivial}) under the composite \begin{equation}\label{hoty} q^{-1}(z)\hookrightarrow \mathrm{P}_{\overline{n}} \times \mathrm{P}_{\overline{n}} \stackrel{\overline{m}athrm{P}i_i\;\,}\to \mathrm{P}_{\overline{n}} \end{equation} is independent of the projection $\overline{m}athrm{P}i_i\colon\thinspacelon\mathrm{P}_{\overline{n}} \times \mathrm{P}_{\overline{n}} \to \mathrm{P}_{\overline{n}}$ ($i=1,2$) used. In fact, Lemma~\ref{cells} and the dimensionality assumption on $q^{-1}(z)$ imply that the actual homotopy type of~(\ref{hoty}) is independent of $i$. The result then follows from Lemma~2.5 and Theorem~4.3 in~\cite{grant}. \end{proof} Of course, part of the argument for the lower bound in Theorem~\ref{main2} actually yields $\mathrm{TC}(\mathrm{P}^{n_1})\leq\mathrm{TC}(\mathrm{P}_{(n_1,n_2)})\leq\cdots\leq\mathrm{TC}(\mathrm{P}_{(n_1,\ldots,n_{r-1})})\leq\mathrm{TC}(\overline{m}athrm{P}pn)$. On the other hand, the argument proving the upper bound uses and corrects the proof of~\cite[Corollary~4.5]{grant} which, instead of using Proposition~\ref{polyhedral_fibers}, is based on an assertion about approximating axial maps by submersions. But such a claim is false in general, as illustrated next. \begin{ejemplo}\label{mark_example} Since $\mathrm{P}^2\looparrowright \overline{m}athbb{R}^3$, there exists an axial map $q\colon\thinspace\mathrm{P}^2\times\mathrm{P}^2\to\mathrm{P}^3$. Note that $2<3<2\cdot 2 =4$. However, $q$ is not homotopic to a submersion. In fact, there does not exist {\em any} submersion $\mathrm{P}^2\times\mathrm{P}^2\to\mathrm{P}^3$, by the following easy argument involving Stiefel-Whitney classes: Suppose $g\colon\thinspace\mathrm{P}^2\times\mathrm{P}^2\to\mathrm{P}^3$ is a submersion. Then we obtain the short exact sequence of vector bundles over $\mathrm{P}^2\times \mathrm{P}^2$ $$ 0 \to E \to T(\mathrm{P}^2\times\mathrm{P}^2) \stackrel{dg\,}{\longrightarrow} g^*T(\mathrm{P}^3) \to 0 $$ where the kernel $E$ is a real line bundle. It then follows that $$w(\mathrm{P}^2\times\mathrm{P}^2) = w(E)\hspace{.6mm}g^*w(\mathrm{P}^3) = w(E)$$ $($the latter equality since $\mathrm{P}^3$ is parallelizable$)$. But this is impossible since, for example, $w_2(\mathrm{P}^2\times\mathrm{P}^2)\overline{n}eq 0$. \end{ejemplo} \begin{nota}\label{CV} It is possible to prove the upper bound in Theorem~$\ref{main2}$ by applying~\cite[Theorem~3]{CV} to an axial map $\overline{m}athrm{P}pn\times\overline{m}athrm{P}pn\to\mathrm{P}^{2|\overline{n}|-n_1+1}$, and noticing that the canonical inclusion $\overline{m}athrm{P}pn\hookrightarrow\mathrm{P}^{2|\overline{n}|-n_1+1}$ is an $n_1$-equivalence. We have chosen the approach in~\cite{grant} due to the intrinsic interest of Proposition~$\ref{polyhedral_fibers}$. \end{nota} \begin{nota}\label{lasobs} The standard upper bound $\mathrm{TC}\leq2\dim$ means that, in general, there are up to twice $\dim(X)$ classical homotopy obstructions to consider when bounding $\mathrm{TC}(X)$ from above. For instance, the first top two are central in~\cite{CF}, with the very top one being critical for Costa-Farber's applications---the next-to-the-top one comes for free from~\cite{berstein}. Thus, the upper bound in Theorem~$\ref{main2}$ is already taking care of the first $n_1-1$ of these obstructions for $X=\overline{m}athrm{P}pn$. \end{nota} The lower bound in Theorem~\ref{main2} is rather crude, as it ignores information coming from $S^{n_2}\times\cdots\times S^{n_r}$. For instance,~\cite[Theorem~4.5]{FTY},~(\ref{mod2cohring}), and `zero-divisors' cup-length ($\overline{m}athrm{zcl}$) considerations (as defined in~\cite{farberTCplanning}) easily yield \begin{equation}\label{enriquesbound} \mathrm{TC}(\overline{m}athrm{P}pn)\geq2^{e+1}+\ell({\overline{n}})-2 \overline{m}box{ provided }n_1\geq 2^e, \end{equation} which improves by arbitrarily large amount the lower bound in Theorem~\ref{main2} when $\ell(\overline{n})\gg0$. On the other hand, the general philosophy behind~(\ref{classic}) implies that the lower bound in Theorem~\ref{main2} can be much stronger than that in~(\ref{enriquesbound}) if $\ell(\overline{n})=2$. For instance~\cite{james} gives \begin{equation}\label{jassint} \mathrm{TC}(\mathrm{P}_{(2^e-1,2^e-1)})\geq\mathrm{TC}(\mathrm{P}^{2^e-1})\geq2^{e+1}-2e-(2,1,1,3) \end{equation} provided $e\equiv(0,1,2,3)\bmod4$, a bound which is almost twice that in~(\ref{enriquesbound}). Of course, further results of this sort can be deduced from our current knowledge of the immersion dimension of (usual) real projective spaces. In view of~\cite[Theorems~2.1 and 2.4]{GTV}, it should be possible to use zcl-considerations based on {\it generalized} cohomology theories in order to insert the nice $\ell(\overline{n})$-feature of~(\ref{enriquesbound}) into the lower bound in Theorem~\ref{main2}, thus merging the corresponding strengths of~(\ref{enriquesbound}) and~(\ref{jassint}) into a single lower bound (we hope to explore such a possibility elsewhere). \overline{m}edskip More interesting is the fact that $\mathrm{TC}(\overline{m}athrm{P}pn)$ can be arbitrarily smaller than the dimension of $\overline{m}athrm{P}pn$. The simplest of such situations originates from the subadditivity of TC~(\cite{farberTCplanning}), as $\mathrm{TC}(\overline{m}athrm{P}pn)-\mathrm{TC}(\overline{m}athrm{P}pm)\leq2$ whenever $\mathrm{TC}(\overline{m}athrm{P}pn)\approx\overline{m}athrm{P}pm\times S^{n_i}$ (the latter decomposition is characterized arithmetically in~\cite[Theorem~2.20]{davispps}). As an extreme situation consider the following partial analogue of~\cite[(2.21)]{davispps}: \begin{proposicion}\label{analogo} Let $\overline{m}athrm{P}hi(n_1)$ be the number of positive integers equal to or less than $n$ which are congruent to $0$, $1$, $2$, or $4$ (mod $8$). If $\overline{n}u(n_i + 1)\geq\overline{m}athrm{P}hi(n_1)$ for all $i>1$, then $$\overline{m}athrm{zcl}_{\overline{m}athbb{Z}_2}(\mathrm{P}^{n_1})+\ell(\overline{n})-1\leq\mathrm{TC}(\overline{m}athrm{P}pn)\leq\mathrm{TC}(\mathrm{P}^{n_1})+\ell(\overline{n})-1.$$ Further, both inequalities above become equalities precisely for $n_1$ a $2$-power. \end{proposicion} \begin{proof} The first inequality is~(\ref{enriquesbound}); the second inequality follows from~\cite[Theorem~2.20]{davispps}. The final assertion follows from the standard fact that $\mathrm{TC}(\mathrm{P}^{n_1})=\overline{m}athrm{zcl}_{\overline{m}athbb{Z}_2}(\mathrm{P}^{n_1})$ precisely for $n_1$ a 2-power. \end{proof} Proposition~\ref{analogo} suggests the possibility that $\mathrm{TC}(\overline{m}athrm{P}pn)$ can be estimated for any $\overline{n}$ in terms of $\mathrm{TC}(\mathrm{P}^{n_1})$ and $\ell(\overline{n})$ alone. Theorem~\ref{cota1} below (whose proof is postponed to the next section) fits into such a general philosophy, and shows that the low TC-phenomenon in Proposition~\ref{analogo} holds even if there are no spheres factoring out $\overline{m}athrm{P}pn$. \begin{teorema}\label{cota1} If $k$ denotes the number of spheres $S^{n_i}$ with $n_i$ even and $i>1$, then $\mathrm{TC}(\overline{m}athrm{P}pn)<(\mathrm{TC}(\mathrm{P}^{n_1})+1)(\ell(\overline{n})+k)$. \end{teorema} The upper bound in Theorem~$\ref{cota1}$ will be much lower than the dimension of $\overline{m}athrm{P}pn$ provided the sum $n_2+\cdots+n_r$ is large enough---which can hold even if there are no spheres $S^{n_i}$ factoring out $\overline{m}athrm{P}pn$. Thus, in such cases, most of the homotopy obstructions in the motion planning problem for $\overline{m}athrm{P}pn$ already vanish. It is worth noticing that $\mathrm{TC}(\overline{m}athrm{P}pn)$ is not always less than $\dim(\overline{m}athrm{P}pn){:}$ if $1^r$ stands for the $r$-tuple $(1,\ldots,1)$, then $\mathrm{TC}(\mathrm{P}_{1^r})=\dim(\mathrm{P}_{1^r})$, in view of Proposition~$\ref{analogo}$. On the other hand, the upper bound in Theorem~\ref{cota1} not always improves that in Theorem~\ref{main2}. For instance, in the case of $\mathrm{P}_{(2^e,2^e)}$, the former bound is $6\cdot2^e$ while the latter one is only $3\cdot2^e+1$. \section{Equivariant topological complexity} In a recent paper \cite{CG} Hellen Colman and the second author explore an equivariant generalization of topological complexity, in the setting of compact group actions. Here we give additional examples and results which will be useful in applying their results to the estimation of topological complexity of projective product spaces. Let $G$ be a compact Hausdorff topological group (in our present applications $G$ will be the cyclic group $\overline{m}athbb{Z}_2$). If $p\colon\thinspace E\to B$ is a $G$-map, the {\em equivariant sectional category} of $p$, denoted $\overline{m}athrm{secat}_G(p)$, is defined in \cite[Section 5]{CG} to be the least integer $k$ such that $B$ may be covered by $k$ invariant open sets $U_1,\ldots , U_k$ on each of which there exists a $G$-homotopy section, that is a $G$-map $s\colon\thinspace U_i \to E$ such that $p\circ s$ is $G$-homotopic to the inclusion $i_{U_i}\colon\thinspace U_i\hookrightarrow B$. If $p$ is a $G$-fibration, then this is equivalent to requiring the existence of a $G$-section $s\colon\thinspace U_i\to E$ such that $p\circ s = i_{U_i}$. In particular, for any $G$-space $X$ the {\em equivariant topological complexity} of $X$ is defined in \cite[Section 6]{CG} to be the equivariant sectional category of the double evaluation map $X^{[0,1]}\to X\times X$. Here $G$ acts diagonally on the product and by composition on the path space of $X$. In keeping with the conventions in place in this paper, we will define the equivariant topological complexity to be one less than the number of sets in the open cover; thus \[ \mathrm{TC}_G(X)=\overline{m}athrm{secat}_G(X^{[0,1]}\to X\times X)-1. \] \begin{lema}\label{tcequivariante} Let $G=\overline{m}athbb{Z}_2$ act antipodally on the sphere $S^n$, where $n\ge 1$. Then \[ \mathrm{TC}_G(S^n) = \left\{\begin{array}{ll} 1 & \overline{m}box{if $n$ is odd,} \\ 2 & \overline{m}box{if $n$ is even.} \end{array}\right. \] \end{lema} \begin{proof} We argue that the usual motion planning rules on the spheres can be made equivariant with respect to the antipodal action, by choosing vector fields which are equivariant. Suppose $n$ is odd. Then the projective space $\mathrm{P}^n$ has zero Euler characteristic and so admits a nowhere-vanishing vector field. Using the double cover immersion $S^n\to P^n$, this pulls back to a nowhere-vanishing vector field $v$ on $S^n$ which is equivariant in the sense that $dg(v(A)) = v(gA)$ for $g\in G$ and $A\in S^n$. We consider the open sets $U_0=\{(A,B)\in S^n\times S^n\overline{m}id A\overline{n}eq -B\}$ and $U_1 = \{(A,B)\in S^n\times S^n\overline{m}id A\overline{n}eq B\}$. We define $s_0$ on $U_0$ by choosing the shortest geodesic path from $A$ to $B$ (traveled at constant velocity). We define $s_1$ on $U_1$ in two stages: first travel from $A$ to $-A$ along the great circle in the direction determined by $v(A)$; second travel from $-A$ to $B$ along the shortest geodesic path. It is easy to check that these sets and motion planning rules are $G$-invariant. When $n$ is even, removing a point $[C]$ from $P^n$ gives an open manifold homotopy equivalent to $P^{n-1}$, which therefore admits a nowhere-vanishing vector field. Again we pull this back to obtain a nowhere-vanishing equivariant vector field $v'$ on $S^n-\{-C,C\}$. We let $U_0$ and $s_0$ be as before. We let $U_1'=\{(A,B)\in S^n\times S^n\overline{m}id A\overline{n}eq B, C,-C\}$ and define $s_1'$ using $v'$ similarly to $s_1$. Finally we let $U_2'=\{(A,-A)\overline{m}id A\in W\cup -W\}$, where $W$ is a small open disk neighbourhood centred on $C$. The path $s_2'(A,-A)$ for $A\in W$ travels first along the geodesic segment to the centre $C$ of $W$; then along some fixed path $\gamma$ from $C$ to $-C$; then along the geodesic segment in $-W$ to $-A$. For $A\in -W$ the path $s_2'(A,-A)$ travels first along the geodesic segment in $-W$ to $-C$; then along $-\gamma$ from $-C$ to $C$; then along the geodesic segment in $W$ to $-A$. The lower bounds are given by the obvious inequality $\mathrm{TC}(X)\le\mathrm{TC}_G(X)$, which holds for any $G$-space $X$. \end{proof} \begin{teorema}\label{product} Let $G$ be a compact Lie group, and let $X$ and $Y$ be smooth $G$-manifolds. Then \[ \mathrm{TC}_G(X\times Y)\le \mathrm{TC}_G(X) + \mathrm{TC}_G(Y) \] where $X\times Y$ is given the diagonal $G$-action. \end{teorema} \begin{proof} Let $\mathrm{TC}_G(X)=n$ and $\mathrm{TC}_G(Y)=m$. Suppose that $X\times X = U_0\cup \cdots \cup U_n$ where the $U_i$ are open invariant sets with $G$-sections $s_i\colon\thinspace U_i\to X^{[0,1]}$. Suppose further that $Y\times Y = V_0\cup \cdots \cup V_m$ where the $V_j$ are open invariant sets with $G$-sections $\sigma_j\colon\thinspace V_j\to Y^{[0,1]}$. We can find a $G$-invariant partition of unity $\{ f_i\}$ on $X\times X$ subordinate to $\{U_i\}$ (see \cite[Corollary B.33]{GGK}). Likewise let $\{g_j\}$ be a $G$-invariant partition of unity on $Y\times Y$ subordinate to $\{ V_j\}$. The rest of the proof proceeds by direct analogy with the proof in the non-equivariant case given in \cite[Theorem 11]{farberTCplanning}, hence is omitted. \end{proof} \begin{nota} Theorem~$\ref{product}$ is certainly not the most general setting in which the product inequality holds. For instance, we believe it holds whenever $X$ and $Y$ are $G$-ENRs. \end{nota} \begin{corolario} Consider the diagonal action of $\hspace{.5mm}\overline{m}athbb{Z}_2\hspace{-.3mm}$ on $S_{\overline{n}}=S^{n_1}\times \cdots \times S^{n_r}$. If $k$ denotes the number of spheres with $n_i$ {\em even}, then \[ \mathrm{TC}_G(S_{\overline{n}}) = \ell(\overline{n}) + k. \] \end{corolario} The main result we will apply from \cite{CG} gives an upper bound for the (non-equivariant) topological complexity of a Borel fibration in terms of the topological complexity of the base and the equivariant topological complexity of the fibre. \begin{teorema}[{\cite[Theorem 6.21]{CG}}]\label{CGTCG} Let $X$ be a $G$-space, and let $E\to B=E/G$ be a numerable principal $G$-bundle. Then \[ \mathrm{TC}(X_G) < (\mathrm{TC}_G(X)+1)(\mathrm{TC}(B)+1), \] where $X_G=E\times_G X$ is the corresponding Borel space of $X$. \end{teorema} \begin{proof}[Proof of Theorem~$\ref{cota1}$] Let $\overline{m}=(n_2,\ldots,n_r)$. Note that $\overline{m}athrm{P}pn$ can be thought of as the Borel space $S^{n_1}\times_{\overline{m}athbb{Z}_2}S_{\overline{m}}$. The result then follows from Theorem~\ref{CGTCG}. \end{proof} The argument in the proof of Theorem~\ref{cota1} can be used to give low upper bounds for the LS-category of projective product spaces (extending the phenomenon noted in~\cite[(2.21)]{davispps} when $\overline{m}athrm{P}pn$ has a full set of factoring spheres). Namely \begin{equation}\label{cates} \mathrm{cat}(\overline{m}athrm{P}pn)<(n_1+1)\ell(\overline{n}). \end{equation} Since $\mathrm{TC}\leq2\hspace{.3mm}\mathrm{cat}$, we get in particular \begin{equation}\label{teces} \mathrm{TC}(\overline{m}athrm{P}pn)<2(n_1+1)\ell(\overline{n})-1 \end{equation} which improves on Theorem~\ref{cota1} only when $\overline{m}athrm{P}pn$ comes from a product having `enough' even dimensional spheres, i.e.~$k\geq C_{n_1}\ell(\overline{n})$ where $$ C_{n_1}=\frac{2n_1+1-\mathrm{TC}(\mathrm{P}^{n_1})}{\mathrm{TC}(\mathrm{P}^{n_1})+1}. $$ Note that, although $k\leq\ell(\overline{n})$, $C_{n_1}\!\ll1$ for any `generic' $n_1$. \overline{m}edskip Just as~$(\ref{teces})$ and Theorem~$\ref{cota1}$ may fail to improve the upper bound in Theorem~$\ref{main2}$, the bound in~$(\ref{cates})$ is not always useful (for instance if all the $n_i$ are equal). For such cases it is worth keeping in mind that, in view of~\cite[Theorem~3.5]{berstein}, the standard estimate $\mathrm{cat}\leq\dim$ is improved by the inequality $\mathrm{cat}(\overline{m}athrm{P}pn)<\dim(\overline{m}athrm{P}pn)$ provided $n_1>1$ and $\ell(\overline{n})>1$. {\sc Departamento de Matem\'aticas Centro de Investigaci\'on y de Estudios Avanzados del IPN M\'exico City 07000, M\'exico} {\it E-mail address:} {\bf [email protected]} \overline{m}edskip {\sc School of Mathematical Sciences The University of Nottingham University Park, Nottingham, NG7 2RD, UK} {\it E-mail address:} {\bf [email protected]} \overline{m}edskip {\sc Departamento de Matem\'aticas Universidad de Guanajuato Guanajuato 36000, Gto, M\'exico} {\it E-mail address:} {\bf [email protected]} \overline{m}edskip {\sc Departamento de Matem\'aticas Centro de Investigaci\'on y de Estudios Avanzados del IPN M\'exico City 07000, M\'exico} {\it E-mail address}: {\bf [email protected]} \end{document}
\mbox{\boldmath $e$}gin{document} \noindentitle[ Existence of periodic orbits for $\mathsf{PSVF}$ via Conley theory. ] {Existence of periodic orbits for piecewise-smooth vector fields with sliding region via Conley theory.\\ } \author[A.~Romero]{Angie T. S. Romero} \address[A.~Romero] {Instituto de Matem\'atica e Estat\'istica\\ Universidade Federal de Goi\'as\\ Goi\^ania-GO\\ Brazil. } \email{[email protected]} \author[E.~Vieira]{Ewerton R. Vieira} \address[E.~Vieira] {The Center for Discrete Mathematics and Theoretical Computer Science\\ Rutgers University\\ Piscataway, New Jersey\\ USA and Instituto de Matem\'atica e Estat\'istica\\ Universidade Federal de Goi\'as\\ Goi\^ania-GO\\ Brazil, } \email{[email protected], [email protected]} \keywords{Conley Index, Periodic orbits, Filippov systems. } \mbox{\boldmath $e$}gin{abstract}{ The Conley theory has a tool to guarantee the existence of periodic trajectories in isolating neighborhoods of semi-dynamical systems. We prove that the positive trajectories generated by a piecewise-smooth vector field $Z=(X, Y)$ defined in a closed manifold of three dimensions without the scape region produces a semi-dynamical system. Thus, we have built a semiflow that allows us to apply the classical Conley theory. Furthermore, we use it to guarantee the existence of periodic orbits in this class of piecewise-smooth vector fields. } \end{abstract} \maketitle \setlength{\mbox{\boldmath $a$}selineskip}{14pt} \section{Introduction} A dynamical system describes the evolution of a phenomenon over time, and these may be considered in discrete or continuous time. Traditionally, the solution of a differential equation system model is a continuous dynamical system. Usually, it is not easy to explicitly obtain solutions of a dynamical system; the existence of periodic orbits and limit cycles (isolated periodic orbits). At the beginning of the 19th century, Henri Poincaré, while investigating the movement of planets, established the modern dynamic systems; his work combined topology and geometry, thus conducting a qualitative study of dynamic systems. At the end of the 19th century, David Hilbert, at the Second International Congress of Mathematicians held in Paris, proposed a list of 23 relevant problems. One famous unsolved problem is the 16th on this list, which refers to finding the maximum number of limit cycles of a field of polynomial vectors of degree greater than or equal to $2$. \newline In dynamical systems, a modern research theme refers to the piecewise smooth vector fields ($\mathsf{PSVF}$ for short). The $\mathsf{PSVF}$ are systems that are not completely differentiable but are differentiable by parts, where a vector field is suddenly interrupted and changed by another distinct vector field. These systems are widely used for modeling some problems associated with control theory, economics, and biology, see \cite{bernardo}. We are interested in guaranteeing the existence of periodic orbits in $\mathsf{PSVF}$. So, the main objective of this paper is to use the Conley theory to obtain periodic orbits in $\mathsf{PSVF}$. \newline The Conley theory has a significant quantity of applications for the study of the dynamical and semi-dynamical systems. Charles Conley introduced a new topological index, called Conley Index, as a generalization of the Morse Index. The Conley Index guarantees the existence of invariant sets within a particular compact; one of these invariant sets may be a periodic orbit, which is one of our main focuses. In \cite{Conley}, Charles Conley began developing this theory for two-sided flows on compact or locally compact spaces and this was continued by Dietmar Salamon, see \cite{Salamon}, and was extended to semiflows by Rybakowski, see \cite{Rybakowski}. In the paper \cite{Mccord}, the authors present a result towards finding periodic orbits in semi-dynamical systems; it is the main tool for this paper. A modern application of finding periodic orbit in neuroscience is in the thesis \cite{Abel}, where the author uses numerical techniques to obtain periodic orbits in dynamics given by Competitive Threshold-Linear Networks and Wilson-Cowan networks. \newline Since the periodic orbits do not correspond to local objects, our study of these systems is global. In this paper, we consider $M$ as a closed manifold. A $\mathsf{PSVF}$ defined on $M$ is a tangent vector field to $M$, which it’s just not differentiable in the points that belong to a submanifold $N$. The submanifold $N$ has the dimensions $n-1$ and, usually, is called discontinuity submanifold. Two approaches exist in the literature for formulating the equations for $\mathsf{PSVF}$; these are the equivalent control method of Utkin and the convex method of Filippov. In the book by Filippov, see \cite{Filippov}, the author established conversions used in this paper to define solution orbit for a $\mathsf{PSVF}$. The problem of guaranteeing the existence of periodic orbits in smooth vector fields by parts is of colossal importance. In the literature, one of the tools studied for this purpose is the first recurrence map or Poincaré map; a fixed point this map corresponds to a periodic orbit. Some of the works in this context are: \cite{carmona}, \cite{du}, and \cite{llibre3}. In the papers \cite{llibre}, and \cite{llibre1}, the authors use the first integral to study the existence of crossing periodic orbits. Finally, the well-known theory of averaging was also used, for example, in the work \cite{llibre2}. Moreover, in \cite{euzebio2014estudo}, \cite{junior2016orbitas} and \cite{tonon2010sistemas}, the author also study the existence of periodic orbits in piecewise smooth systems. \newline The Conley theory has been developed for continuous and discrete flows, multiflows, and semiflows. Referent to flows, one of the first works related to Conley Theory and discontinuous systems correspond to \cite{Casagrande}. The authors use a regularization of the discontinuous vector field, see \cite{SotomayorTeixeira1}, to adapt the Conley index for continuous flows. They define the D-Conley index and show it is invariant by homotopy. More recently, Cameron Thieme has been extending the Conley Theory for multiflows. His main objective is to generalize Conley index theory to differential inclusions having the Filippov systems as motivation. Firstly, in \cite{Cameron1}, he introduces differential inclusions and Filippov systems, and he shows the existence of a multiflow for this systems class. Subsequently in the preprints \cite{Cameron2} and \cite{Cameron3}, the author exposes a definition of perturbation (see Definition 3.1 in \cite{Cameron2}) and shows that both the isolating neighborhoods and the attractor-repeller decomposition are stable, which are essential objects in the Conley theory. \newline Our approach is to construct a semiflow for Filippov systems in order to apply the well-established results of the Conley theory. Recently, a related work has been done by Mrozek and Wanner in \cite{mrozek2020creating}; their engaging paper shows the construction of a continuous semiflow on a finite topological space $\mathsf{X}$ for a combinatorial vector field (a discretization of a piecewise smooth vector field). \newline This paper is structured as follows. The preliminary refer to Conley index and notation of $\mathsf{PSVF}$ is in Section II. Section III demonstrates the construction of a semiflow using the positive trajectories of a $\mathsf{PSVF}$ that, according to Filippov convention, only present crossing and sliding; there exists a unique solution for positive motion by the trajectories of the $\mathsf{PSVF}$. At the head of section III is the main result of this article. Finally, in Section IV, we are using the Conley Theory applicate to the semi-dynamical system construed in Section III to find periodic trajectories in $\mathsf{PSVF}$. \section{Preliminaries} Throughout this paper, we identify the intervals $(-\infty,0]$ and $[0,\infty)$ by $\mathbb{R^-}$ and $\mathbb{R^+}$, respectively. Let $\mathsf{X}$ be a topological space and $f$ a real-valued function on $\mathsf{X}$. We say that $f$ is upper semi-continuous at $x^*$ if and only if $\lim\sup_{{x}\noindento x^*} {f(x)}\leq f(x^*).$ \mbox{\boldmath $e$}gin{definition}\label{semiflowdef} The pair $(\mathsf{X},\phi)$ is called a (continuous) semi-dynamical system and $\phi$ a semiflow if $\mathsf{X}$ is a Hausdorff topological space and $\pi$ is a mapping, $\pi:\mathsf{X}\noindentimes \mathbb{R}^+\longrightarrow \mathsf{X}$ which satisfies \mbox{\boldmath $e$}gin{itemize} \item [(1)] $\phi(x,0)=x$ for each $x\in \mathsf{X}$ (initial value property), \item [(2)] $\phi(\phi(x,t),s)=\phi(x,t+s)$ for each $x\in \mathsf{X}$ and $t,s\in{\mathbb{R}^+}$ (semigroup property), and \item [(3)] $\phi$ is continuous on the product space $\mathsf{X}\noindentimes\mathbb{R}^+$ (continuity property). \end{itemize} \end{definition} Without loss of generality, if we are working with a fixed semiflow, we must often use the most straightforward notation $x\cdot t$ in place of $\phi(x,t)$. A function $\sigma: I \longrightarrow \mathsf{X}$ where $I$ is a nonempty interval in $\mathbb{R}$ is called a solution of $(\mathsf{X},\phi)$ if whenever $t\in I$, $s\in{\mathbb{R}^+} $ and $t+s \in I$, then $\sigma(t)\cdot s=\phi(\sigma(t),s) = \sigma(t+s)$. The interval $I$ is the domain of $\sigma$ and according to our notation is represented by $D(\sigma)$. If $x\in \mathsf{X}$, a solution $\sigma$ with $0\in{D(\sigma)}$ and $\sigma(0)=x$ is called a solution through $x$. The function $\phi_x:\mathbb{R}^+\longrightarrow \mathsf{X}$ given by $\phi_x(t)=x\cdot t$ is a solution through $x$ and, indeed, is the unique solution through $x$ with domain $\mathbb{R}^+$. We call this solution the positive motion through $x$. \subsection{Conley index} In this section, we review the Conley index theory, following the definitions given by K. Mischaikow and M. Mrozek \cite{Mischaikow}. For this section assume that $\mathsf{X}$ is a metric space and $\phi: \mathsf{X} \noindentimes \mathbb{R^+}\longrightarrow \mathsf{X}$ a semiflow. A subset $S \subset \mathsf{X}$ is said to be an \noindentextit{invariant set} with respect to the semiflow $\phi$ if, for all $p\in S$, one has $p\cdot t \in S$ for all $t \in\mathbb{R^+}$. In other words, $\phi(S,\mathbb{R^+})=S$. Let $N\subset \mathsf{X}$ be a subset of $\mathsf{X}$. The \noindentextit{maximal invariant set of $N$} is defined by: $\mathrm{inv}(N)=\{x\in{\mathsf{X}}| \ x\cdot t \in N, \noindentext{ for all } t\in{\mathbb{R^+}}\}$. A subset $S\subset \mathsf{X}$ is called an \noindentextit{isolated invariant set} if there exists a compact neighborhood $N$ of $S$ in $\mathsf{X}$ such that $S\subset \mathrm{int}(N)$ and $S=\mathrm{inv}(N)$. In this case, $N$ is said to be an \noindentextit{isolating neighborhood} for $S$ in $\mathsf{X}$. The set $N$ is called an \noindentextit{isolating neighborhood} for $\phi$ if it is closed, contained in the domain of $\phi$, and $\mathrm{Inv}(N)\subset \mathrm{int}(N)$. The most relevant property of the Conley index is its invariance by continuation, even under small perturbations. \mbox{\boldmath $e$}gin{definition} Let $S\subset \mathsf{X}$ be an isolated invariant set. A pair $(N,L)$ of compact sets in $\mathsf{X}$ is said to be an \noindentextit{index pair} for $S$ in $\mathsf{X}$ if $L\subset N$ and \mbox{\boldmath $e$}gin{enumerate} \item $\overline{N\setminus L}$ is an isolating neighborhood for $S$ in $\mathsf{X}$; \item $L$ is an positively invariant in $N$, that is, if $x\in{L}$ and $x\cdot [0,T]\subset N$ then $x\cdot[0,T]\subset L$; \item $L$ is the exit set of the semiflow, that is, if $x\in N$ and $x\cdot\mathbb{R^+}\varsubsetneq N $ then there exists $T>0$ such that $x\cdot[0,T]\subset N$ and $x\cdot T\in L$. \end{enumerate} \end{definition} Given a pair $(N,L)$ of topological spaces with $L\subset N$ and $L\neq \emptyset $, define: \mbox{\boldmath $e$}gin{equation}\label{relation} x\sim y \Leftrightarrow x=y \noindentext{ or } x,y\in L. \end{equation} Denote by $N/L$ the pointed space $(N/\sim, [L])$. The Figure \ref{figure1} shows the index pair $(N,L)$ for a hyperbolic invariant set that is diffeomorphic to a circle in three dimensions. \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=0.8]{figure1} \caption{} \label{figure1} \end{figure} Next, we present the definitions of the Conley index that we use throughout this paper. \mbox{\boldmath $e$}gin{definition} The \noindentextit{Homotopy Conley Index} of $S$ is defined as the homotopy type of the pointed space $N/L$, where $(N,L)$ is the index pair $S$. \end{definition} Note that the definition that the homotopy Conley index is the homotopy type of a topological space. Unfortunately, operating with homotopy classes of spaces is extremely difficult. To evade this problem, it is useful to consider the cohomology Conley index. \mbox{\boldmath $e$}gin{definition} Let $S$ be a isolated invariant set with respect to the semiflow $\phi$ and let $(N,L)$ be a index pair for $S$. The \noindentextit{ cohomology Conley index} is defined as \mbox{\boldmath $e$}gin{equation} CH^*(S)=CH^*(S,\phi):=H^*(N/L)\approx H^*(N,L) \end{equation} where $H^*$ denotes the Alexander-Spanier cohomology with integer coefficients. \end{definition} The most important property of an isolating neighborhood is that it is robust concerning perturbation. Still, before enunciating this property, the following definition is necessary. \mbox{\boldmath $e$}gin{definition} Let $N\subset \mathsf{X}$ be a compact set. Let $S_\lambda=\mathrm{inv}(N,\phi_\lambda)$. Two isolated invariant sets $S_{\lambda_0}$ and $S_{\lambda_1}$ are related by continuation or $S_{\lambda_0}$ continues to $S_{\lambda_1}$ if $N$ is an isolating neighborhood for all $S_{\lambda}, \lambda\in{[\lambda_0,\lambda_1]}$. \end{definition} Now, we state the continuation theorem for the Conley index. \mbox{\boldmath $e$}gin{thm}[Continuation Property] Let $S_{\lambda_0}$ and $S_{\lambda _0}$ be isolated invariant stes that are related by continuation. Then, $$CH^*(S_{\lambda_0})\approx CH^*(S_{\lambda_1}).$$ \end{thm} The proof of the Continuation Property can be found in \cite{Salamon}. Let us now consider, as an example, the Conley index of a stable periodic orbit. \mbox{\boldmath $e$}gin{exam}\label{exam2.7} The homotopy type of the pointed space $N/L$ of a stable periodic orbit in two dimensions is equal to the homotopy type of $S^1 \vee S^0$ (see Figure \ref{figure2}), and so \mbox{\boldmath $e$}gin{equation*} CH^k(S) \approx \left \{ \mbox{\boldmath $e$}gin{matrix} \mathbb{Z} & \mbox{ }k=0, 1, \\ 0 & \mbox{ otherwise}. \end{matrix}\right. \end{equation*} \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=1.1]{figure2} \caption{The homotopy type of the pointed space $N/L$ of a stable periodic orbit in dimension two.} \label{figure2} \end{figure} \end{exam} The following result generalizes Example \ref{exam2.7}. \mbox{\boldmath $e$}gin{prop}[Mischaikow, \cite{Mischaikow}] Let $S$ be a hyperbolic periodic orbit with an oriented unstable manifold of dimension $n+1$. Then \mbox{\boldmath $e$}gin{equation*} CH^k(S) \approx \left \{ \mbox{\boldmath $e$}gin{matrix} \mathbb{Z} & \mbox{ }k=n, n+1, \\ 0 & \mbox{ otherwise}. \end{matrix}\right. \end{equation*} \end{prop} Figure \ref{figure3} shows the homotopy type of the pointed space $N/L$ of a unstable periodic orbit in two dimensions is equal to the homotopy type of $S^2 \vee S^1$. \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=0.9]{figure3} \caption{The homotopy type of a unstable periodic orbit in dimension two.} \label{figure3} \end{figure} Now, we refer to Poincaré's section that, the essential hypothesis to confirm the existence of periodic orbits in a semi-dynamic system when using the Conley theory. Suppose $N$ is an isolating neighborhood for a semiflow $\phi$. Then $\varXi \subset \mathsf{X}$ is a \noindentextit{local section} for $\phi$ and $N$ if there exists $\xi> 0$ such that $$C_{\xi}^N(\varXi):=\{x\in N | x\cdot(0,\xi)\cap\varXi\neq \emptyset \}$$ is open in $N$ and for every $x\in{C_{\xi}^N(cl(\varXi))}$ there exists a unique element of $x\cdot(0,\xi)\cap \varXi$. \mbox{\boldmath $e$}gin{definition} $\varXi \subset X$ is a \noindentextit{Poincar\'{e} section} for $\phi$ in $N$ if: \mbox{\boldmath $e$}gin{enumerate} \item $\varXi$ is a local section, \item $\varXi_N:=\{\varXi \cap N\}$ is closed and \item for every $x\in N$, $x\cdot(0,\infty)\cap \varXi \neq \emptyset.$ \end{enumerate} \end{definition} Observe that it is not necessary to know $S$ to find a Poincaré section, also not required to be a subset of $N$. Certainly, if N has an exit set, then for any subset of N, there will be points in N whose orbits exit N before they cross again by the subset at issue, so no subset of N can be a Poincaré section. Suppose $(\Lambda,\rho)$ is a metric space that parameterizes a continuous family of semiflows $\phi^{\lambda}:\mathsf{X} \noindentimes \mathbb{R^+} \longrightarrow \mathsf{X}$. Furthermore, suppose that for some $\lambda_0\in{\Lambda}$, $N$ is an isolating neighborhood for $\phi^{\lambda_0}$ and $\varXi$ is a local section for $N$ and $\lambda_0$. \mbox{\boldmath $e$}gin{prop}\label{prop2.11} Let $W$ be an admissible set for $\phi^{\lambda}$ for all $\lambda\in\Lambda$. Assume that $N\subset W$ is an admissible isolating neighborhood for $\phi^{\lambda}$ for all $\lambda\in\Delta\subset{\Lambda}$, a neighborhood of $\lambda_0$. Furthermore, assume that $N$ has a Poincaré section for $\phi^{\lambda_0}$ and that $\mathrm{inv}(N,\phi^{\lambda_0})=\emptyset$. Then, for $\Delta$ sufficiently small, given $\lambda\in{\Delta}$ there exists an isolating neighborhood $N_{\lambda}\subset W$ for $\phi^{\lambda}$ such that $$\mathrm{inv}(N_{\lambda},\phi^{\lambda})=\mathrm{inv}(N,\phi^{\lambda})$$ and $N_{\lambda}$ admits a Poincaré section. \end{prop} The previous proposition is the main result to guarantee the existence of periodic orbits that persists under perturbations of the system. In other words, for nearby semi-flows $\phi^{\lambda_0}$, there is an isolating neighborhood $N_{\lambda}$ that admits a section of Poincaré and has the same maximal invariant set for $\lambda\in{\Delta}$. \newline Given a Poincar\'{e} section $\varXi$ for $N$, there exists a subset $\varXi_0$ of $\varXi$, open in $\varXi$, such that $\varXi\cap S =\varXi_S\subset \varXi_0$ and such that, for every $x\in \varXi_0$, there exists a unique minimal strictly positive time $\pi_{\varXi}(x)$ with $x\cdot[0,\pi_{\varXi}(x)]\subset N$ and $x\cdot\pi_{\varXi}(x)\in\varXi$. The \noindentextit{Poincar\'{e} map} $\varPi_{\varXi}$ associated with the Poincar\'{e} section $\varXi$ is defined by \mbox{\boldmath $e$}gin{equation} \mbox{\boldmath $e$}gin{aligned} \varPi_{\varXi}: & \varXi_{0}\longrightarrow \varXi\\ & x \longmapsto x\cdot\pi_{\varXi}(x). \end{aligned} \end{equation} A map $f:\mathsf{X} \longrightarrow \mathsf{Y}$ is \noindentextit{locally compact} if every point $x\in{\mathsf{X}}$ admits a neighborhood $U$ such that the closure of $f(U)$ is compact. If, in addition, there exists a compact set $A$ such that for every $x\in{\mathsf{X}}$, $cl\{f^n(x)|n\in{\mathbb{N}}\}\cap A\neq \emptyset$, then $f$ is called a map of \noindentextit{compact attraction}. A semiflow $\phi$ on $\mathsf{X}$ is locally compact if there exists $t>0$ such that $\phi_t$ is locally compact and is of compact attraction if $\phi_t$ is a map of compact attraction for some $t>0$. \mbox{\boldmath $e$}gin{thm}[Mrozek, \cite{Mrozek}] Assume $f:\mathsf{X} \longrightarrow \mathsf{X}$ is a map of compact attraction and $K$ is an isolated invariant set for $f$. Then, the Conley index of $K$ under $f$ is of finite type. \end{thm} At the end of this section, we have the theorem that provides conditions for the Conley index of an isolated invariant set to contain a periodic trajectory. \mbox{\boldmath $e$}gin{thm}[Mccord, Mrozek and Mischaikow,\cite{Mccord}]\label{teo1} Assume $\mathsf{X}$ is an absolute neighborhood retract and $\phi:\mathsf{X}\noindentimes [0,\infty)\rightarrow \mathsf{X}$ is a semiflow with compact attraction. If $N$ is a isolating neighborhood for $\phi$ which admits a Poincar\'{e} section $\varXi$ and either \mbox{\boldmath $e$}gin{equation}\label{eq1} dim \ CH^{2n}(N,\phi)=dim \ CH^{2n+1}(N,\phi) \ \ \ \ \ for \ \ \ n\in{\mathbb{Z^+}} \end{equation} or \mbox{\boldmath $e$}gin{equation} dim \ CH^{2n}(N,\phi)=dim \ CH^{2n-1}(N,\phi) \ \ \ \ \ for \ \ \ n\in{\mathbb{Z^+}}, \end{equation} where not all the above dimensions are zero, then $\phi$ has a periodic trajectory in $N$. \end{thm} \mbox{\boldmath $e$}gin{cor} Under the hypotheses of Theorem \ref{teo1}, if $N$ has the Conley index of a hyperbolic periodic orbit, then $\mathrm{inv}(N)$ contains a periodic orbit. \end{cor} \subsection{Piecewise-smooth vector fields} In this section, we introduce the Filippov convention for smooth piecewise vector fields, the reference \cite{Filippov}. Let $M$ be a closed $n$-dimensional $C^r$ manifold, denote by $\mathfrak{X}(M)$ the space of $C^r$ vector field tangent to $M$, endowed with the $C^r$-topology where $r$ is finite and sufficiently large. Let $\Sigma$ be a one codimension compact submanifold of $M$, that divides $M$ in two pieces, i.e. $M=\Sigma^+\cup \Sigma^-$, where $\Sigma^+$ and $\Sigma^{-}$ are manifold with common boundary $\partial \Sigma^+ =\partial \Sigma^-=\Sigma$. If $h: M \rightarrow \mathbb{R}$ is such that $h^{-1}(0)=\Sigma$, $h^{-1}([0,\infty))=\Sigma^+$, $h^{-1}((-\infty,0])=\Sigma^-$ and $0$ is a regular value $h$ so define a $\mathsf{PSVF}$ as follows \mbox{\boldmath $e$}gin{equation}\label{spvf} Z(p) = \left\{ \mbox{\boldmath $e$}gin{array}{rcl} X(p)&& \noindentext{if } p\in{\Sigma^{+}}, \\ Y(p) && \noindentext{if } p\in{\Sigma^{-}}. \end{array}\right. \end{equation} We make the identification $Z=(X,Y)\in{\mathfrak{X}(M,h)}=\mathfrak{X}(M)\noindentimes \mathfrak{X}(M)$ equip with the product topology. We denote $\Sigma^\pm \setminus \Sigma$ for $\noindentextrm{int}(\Sigma^\pm)$. \newline For a vector field, $X\in{\mathfrak{X}(M)}$ the Lie derivative is defined as an operator $\mathcal{L}_{X}:\mathfrak{X}(M)\rightarrow \mathfrak{X}(M)$, such that, the Lie derivative of a vector field $Y$ in the direction of the vector field $X$ is the vector field $\mathcal{L}_{X}(Y)$ defined by $$\mathcal{L}_{X}(Y):=\frac{d}{dt}\bigg| _{t=0}(D\varphi(x,t))^{-1}Y(\varphi{(x,t)}),$$ where $(t,x)\mapsto\varphi{(x,t)}$ is the local flow of $X$. Generally $\mathcal{L}_{X}(Y)$ is defined by $[X,Y]$, where $[\cdot,\cdot]$ corresponds to the Lie bracket. The derivative for a differentiable function $h$ in $M$ is $\mathcal{L}_{X}(h)=(Xh)(p)=\sum_{i} m_i(p)\dfrac{\partial h}{\partial x_i}(p)$, where $ \bigg\{ \dfrac{\partial}{\partial{x_i}}\bigg\} $ is one basis associate to of a parametrization $\mathbf{x}:\mathcal{U}\subset{\mathbb{R}^n}\rightarrow M $ and each $m_i:\mathcal{U}\rightarrow \mathbb{R}$ is a function in $\mathcal{U}$. Then, we have $\mathcal{L}_{X}(h)=\langle X(p),\nabla h(p)\rangle$, where $h$ indicates the expression of $h$ in the parametrization $\mathbf{x}$. Indeed, we can consider Lie derivatives \mbox{\boldmath $e$}gin{equation*} Xh(p)=\langle X(p),\nabla h(p)\rangle \noindentext{ and } X^{k+1}h(p)=XX^kh(p)=\langle X(p),\nabla X^k h(p)\rangle, \end{equation*} where $\langle \cdot,\cdot \rangle$ denote the Euclidean inner product in $\mathbb{R}^{n}$ and $h$ indicates the expression of $h$ in a parametrization $\mathbf{x}$ of $M$. \newline Following the Filippov convention in \cite{Filippov}, we distinguish the following regions in the discontinuity manifold $\Sigma$: \mbox{\boldmath $e$}gin{itemize} \item[$\diamond$] the crossing region: $\Sigma^{c}=\{p\in{\Sigma};Xh(p)Yh(p)>0\}$, $\Sigma^{c+}=\{p\in{\Sigma};Xh(p)>0 \noindentext{ and } Yh(p)>0\}$ and $\Sigma^{c-}=\{p\in{\Sigma}; Xh(p)<0 \noindentext{ and } Yh(p)<0\}$, \item[$\diamond$] the escaping region: $\Sigma^{e}=\{p\in{\Sigma};Xh(p)> 0 \noindentext{ and }Yh(p)<0\}$, \item[$\diamond$] the sliding region: $\Sigma^{s}=\{p\in{\Sigma};Xh(p)<0 \noindentext{ and }Yh(p)>0\}$. \end{itemize} \mbox{\boldmath $e$}gin{definition}\label{slide} The sliding vector field associated to $Z$ is the vector field tangent to $\Sigma^{s}$ and defined at $$Z^{s}(p)= \frac{1}{Yh(p)-Xh(p)}(Yh(p)X(p)-Xh(p)Y(p)).$$ If $p\in{\Sigma^s}$ then $p\in{\Sigma^e}$ for $-Z$ and then we can define the escaping vector field on $\Sigma^e$ associated to $Z$ by $Z^e=-(-Z)^s$. \end{definition} Let $X\in{\mathfrak{X}(\Sigma^+)}$. Since $\Sigma^+$ is a manifold with boundary then if $p\in{\partial \Sigma^+}$ we have that: $p$ is a \noindentextit{regular point} of $X$ when $Xh(p)\neq 0$ or $p$ is a \noindentextit{singular point} of $X$ when $Xh(p)=0$. We say that $p$ is a \noindentextit{singularity tangency} of $X$ if the orbit passing for $p$ is tangent to boundary of $\Sigma^+ $ in p, i.e., $Xh(p)=0$ e $X(p)\neq0$, moreover, we say that $p$ is a \noindentextit{singularity tangency} of $X$ if: \mbox{\boldmath $e$}gin{itemize} \item[$\diamond$] $Xh(p)= 0$ and $X^2h(p)\neq 0$ (fold or singularity of order $2$), \item[$\diamond$] $Xh(p)= X^2h(p)=0$, $X^3h(p)\neq 0$ and the set $\{Dh(p),DXh(p), DX^2h(p)\}$ is linearly independent (cusp or singularity tangency of order $3$), \item[$\diamond$] $\cdots$ \item[$\diamond$] $Xh(p)=X^2h(p)=\cdots= X^{m-1}h(p)=0$, $X^{m}h(p)\neq 0$ and the set $\{Dh(p),DXh(p), \\ DX^2h(p), \cdots,DX^{m-1}h(p)\}$ is linearly independent (singularity tangency of order $m$). \end{itemize} Although the Conley theory can be applied to semi-dynamical systems defined in any dimension, in this paper, we consider $M$ of dimension $3$, since it is the lowest dimension in which we find diversity in regions and tangencies formed in the discontinuity manifold. \mbox{\boldmath $e$}gin{definition} Denoted by $S_X$ and $S_Y$ the tangency sets of $X$ and $Y$, respectively. If $Z=(X,Y)\in{\mathfrak{X}(M,h)}$, then the tangency set of $Z$ is given by $S_{Z}= S_{X} \cup S_{Y} $. \end{definition} Denoted by $\Sigma_X=\{p\in{\Sigma}; \ Xh(p)\neq 0 \}$, respectively for $Y$ and $\Sigma_{Z}= \Sigma_{X} \cap \Sigma_{Y} $. \mbox{\boldmath $e$}gin{rem} \label{rem2.19} Following \cite{Sotomayor}, $\Sigma$ is the disjoint union of submanifolds of decreasing dimension, if $S_X^j$ is the set of tangential singularities of order $j$ of $X$, then $\Sigma=\Sigma_X\cup S_X^2 \cup S_X^3$. This implies that every orbit meets the boundary of $\Sigma^+$ in a discrete set of points. Generically, a fold point of $X$ belongs to a local curve of fold points of $X$ with the same visibility, and cusp points occur as isolated points located at the extreme of curves of fold points. \end{rem} The critical points of $\Sigma^s$ are consider \noindentextit{pseudo-equilibria of $Z$} and the sliding vector field can be extended beyond the boundary of $\Sigma^{s}$, in fact, if $p\in{\partial \Sigma^s}$ then: \mbox{\boldmath $e$}gin{enumerate} \item if $Xh(p)=0$ but $Yh(p)\neq 0$ then $Z^s(p)=X(p)$ , \item if $Yh(p)=0$ but $Xh(p)\neq 0$ then $Z^s(p)=Y(p)$ and \item if $Xh(p)=0$ and $Yh(p)=0$ then $p$ is a pseudo-equilibrium for $Z^s$ that is $Z^s(p)=p$. \end{enumerate} Denote by $\Sigma^S= \Sigma^s \cup \partial\Sigma^s$. \newline If $p\in{\Sigma^c}$, then the orbit of $Z=(X,Y)\in{\mathfrak{X}(M,h)}$ at $p$ is defined as the concatenation of the orbits of $X$ and $Y$ at $p$. Nevertheless, if $p\in{\Sigma\setminus\Sigma^c}$, there may be a lack of uniqueness of solutions. In this case, the flow of $Z$ is multivalued and any trajectory passing through $p$ originated by the orbits of $X$, $Y$, $Z^{s}$ and $Z^{e}$ is considered as a solution of $Z$. More details can be found in \cite{Filippov}. \newline For $ \mathrm{dim}(M)=n$, on the discontinuity manifold, we assume only isolated crossing regions. For $ \mathrm{dim}(M) = 3 $, on the discontinuity manifold, it may have regions of crossing, sliding, and escaping, as well regions formed by regular, singular, and tangency points of order $ 2 $ and $ 3 $. Given that we are interested in constructing the semiflow generated by trajectories of a $\mathsf{PSVF}$, in this paper, we do not consider systems with sliding and scaping regions simultaneously. For systems that have escaping and crossing regions, we consider the backward flow in order to construct a semi-flow. \mbox{\boldmath $e$}gin{definition} Let $Z=(X,Y)\in{\mathfrak{X}(M,h)}$ a point $p\in{\Sigma}$ is said to be a \noindentextit{ $\Sigma$-singularity} of $Z$ if $p$ is a tangential singularity, or a pseudo-equilibrium of $Z$. Otherwise, it is said to be a \noindentextit{regular-regular} point of $Z$. \end{definition} We said that a point $p\in{\Sigma}$ is said to be a \noindentextit{fold point} of $X\in{\mathfrak{X}(\Sigma^+)}$ if $Xh(p)=0$ and $X^2h(p)\neq 0$. If $X^2h(p)>0$ (resp. $X^2h(p)< 0$), then $p$ is a \noindentextit{visible fold or with visible contact (resp. invisible fold or with invisible contact)}. Furthermore, a point $p\in{\Sigma}$ is said to be a \noindentextit{cusp point} of $X\in{\mathfrak{X}(\Sigma^+)}$ if $Xh(p)=0$, $X^2h(p)=0$, $X^3h(p)\neq 0$ and $\{Dh(p),DXh(p), DX^2h(p)\}$ is linearly independent. If $X^3h(p)>0$ (resp. $X^3h(p)< 0$), then $p$ is a \noindentextit{visible cusp or with visible contact (resp. invisible cusp or with invisible contact)}. If $Y\in{\mathfrak{X}(\Sigma^-)}$, the visibility condition is switched. \newline Resuming the previous results, we have the following definition for the local trajectory of one point $p\in M$ in a $\mathsf{PSVF}$. \mbox{\boldmath $e$}gin{definition}\label{traj} The local trajectory $\varphi_{Z}(p,t)$ of a $\mathsf{PSVF}$ of the form (\ref{spvf}) through a point $p\in M$ is defined as follows where every interval $I$ contains zero. \mbox{\boldmath $e$}gin{enumerate} \item For $p\in{\noindentextrm{int}(\Sigma^+)}$ and $p\in{\noindentextrm{int}(\Sigma^-)}$ the trajectory is given by $\varphi_{Z}(p,t)= \varphi_{X}(p,t)$ and $\varphi_{Z}(p,t)= \varphi_{Y}(p,t)$ respectively, where $t\in{I\subset\mathbb{R}}$. \item For $p\in{\Sigma^{c+}}$ and taking the origin of time at $p$, the trajectory is defined as $\varphi_{Z}(p,t)= \varphi_{Y}(p,t)$ for $t\in{I\cap \mathbb{R}^-}$ and $\varphi_{Z}(p,t)= \varphi_{X}(p,t)$ for $t\in{I\cap \mathbb{R}^+}$. For the case $p\in{\Sigma^{c-}}$ the definition is the same reversing. \item For $p\in{\Sigma^{s}}$ and taking the origin of time at $p$, the trajectory is defined as $\varphi_{Z}(p,t)=\varphi_{Z^s}(t, p)$ for $t\in{I\cap \mathbb{R}^+}$ and $\varphi_{Z}(p,t)= \varphi_{1}(t, p)$ and $\varphi_{Z_1}(p,t)$ is either $\varphi_{X}(p,t)$, $\varphi_{Y}(p,t)$ or $\varphi_{Z^s}(p,t)$ for $t\in{I\cap \mathbb{R}^-}$. \item For $p$ a generic singularity tangency and taking the origin of time at $p$, this is visible or invisible tangency for at least one of the fields $X$ ou $Y$, the trajectory is defined as $\varphi_{Z}(p,t)= \varphi_{1}(p,t)$ for $t\in{I\cap\mathbb{R}^-}$ and $\varphi_{Z}(p,t)= \varphi_{2}(p,t)$ for $t\in{I\cap \mathbb{R}^+}$ where each $\varphi_{1}$, $\varphi_{2}$ is either $\varphi_{X}$ or $\varphi_{Y}$ or $\varphi_{Z^{\Sigma}}$. \item For $p$ a equilibrium point, i.e., the equilibrium points of $X$, $Y$ and $Z^s$, the trajectory is defined as $\varphi_{Z}(p,t)=p$ for all $t\in \mathbb{R}$. \end{enumerate} \end{definition} \section{Main Results} The main objective of this section is to generate a semi-dynamical system with the forward trajectories of a piecewise-smooth vector field using the convex method of Filippov. Using this semiflow be used together with Theorem \ref{teo1}, we guarantee the existence of periodic orbits in piecewise-smooth vector fields without sliding region. \newline Let topological space $\mathsf{X}$ be a finite union of closed subsets $\mathsf{X}_i$, i.e. $\mathsf{X}=\bigcup_{i=1}^n{\mathsf{X}_i}$; if for some topological space $\mathsf{Y}$, there are continuous maps $f_i:\mathsf{X}_i\longrightarrow \mathsf{Y}$ that agree on overlaps (i.e., ${f_i}{\mkern 1mu \vrule height 2ex\mkern2mu_{\mathsf{X}_i \cap \mathsf{X}_j}}={f_j}{\mkern 1mu \vrule height 2ex\mkern2mu_{\mathsf{X}_i \cap \mathsf{X}_j}}$), then the generalized pasting or gluing lemma says that there exists a unique continuous function $f:\mathsf{X} \longrightarrow \mathsf{Y}$ with $f\mkern 1mu \vrule height 2ex\mkern2mu_{\mathsf{X}_i}=f_i$, for all $i$. We used the gluing lemma with the functions of semiflows generated by positives trajectories of the vector fields $X$, $Y$, and $Z^s$. \newline Throughout this paper, the dimension of the manifold $M$ is restricted to $n$-dimensional, $n \leq 3$. This assumption is needed to simplify the construction of a semiflow for the cases where the switching manifold $\Sigma$ has tangencies. For $n \geq 4$, the ideas will be similar; however, the challenge will be the analyze of more tangency cases and defining Filippov convention for submanifolds with only tangencies. It is important to note that, for a Filippov system with only sliding and crossing regions, the assumption on the 3-dimensional manifold can be dropped, see Theorem \ref{teo3.23}. \newline Assume that $M$ is a closed $3$-dimensional $C^1$ manifold, and the discontinuity manifold has crossing regions and sliding regions. $M=\Sigma^+\cup \Sigma^-$, where $\Sigma^+$ and $\Sigma^{-}$ are manifold with common boundary $\Sigma$. We begin by introducing up some notations for subsets of $\Sigma$ that we use frequently. \mbox{\boldmath $e$}gin{definition} For $X\in{\mathfrak{X}(\Sigma^+)}$, denote $\Sigma_X^+=\{p\in\Sigma; \ Xh(p)>0\}$ and $\Sigma_X^-=\{p\in\Sigma; \ Xh(p)<0\}$ subsets of $\Sigma_X$. Furthermore, \mbox{\boldmath $e$}gin{enumerate} \item $S_X^{v}=\{p\in{S_X}; \ p\noindentext{ is a visible fold of } X\}$, \item $S_X^{i}=\{p\in{S_X}; \ p\noindentext{ is a invisible fold of } X\}$, \item $S_X^{ic}=\{p\in{S_X}; \ p\noindentext{ is a cusp of } X \noindentext{ and } X^3h(p)<0 \}$ and \item $S_X^{vc}=\{p\in{S_X}; \ p\noindentext{ is a cusp of } X \noindentext{ and } X^3h(p)>0\}$. \end{enumerate} Analogously to $Y\in{\mathfrak{X}(\Sigma^-)}$ and $Z^s\in{\mathfrak{X}(\Sigma^S)}$. \end{definition} In the following definition, $t_X^+(p)$ is the time necessary for the positive trajectory of $p$ to leave definitely $\Sigma^+$. It is the primary tool when defining the domain in the semiflow. \mbox{\boldmath $e$}gin{definition}\label{tx} Let $M$ be a closed manifold and $\Sigma$ ($\Sigma=h^{-1}(0)$ with $h: M \rightarrow \mathbb{R}$) a compact codimension $1$ submanifold of $M$, such that divides $M$ in two connected manifold $\Sigma^+$ and $\Sigma^{-}$ with common boundary $\Sigma$. For $X \in{\mathfrak{X}(M)}$, let $\Lambda_X^{+}=\{p\in{\Sigma^+}; \ \varphi_X(p,[0,\infty)) \nsubseteq \Sigma^+\}$ and $t_X^+:\Sigma^+\longrightarrow \mathbb{R}^+\cup\{\infty\}$ such that \mbox{\boldmath $e$}gin{equation*} t_X^+(p) = \left\{ \mbox{\boldmath $e$}gin{array}{lll} \ \infty && \noindentext{if } p\notin{\Lambda_X^+},\\ \mathrm{inf} \{t>0; \ \varphi_X(p,t)\in{S_X^{ic} \cup \Sigma_X^-} \} && \noindentext{if } \varphi_X(p,[0,t])\subset{\Sigma^+} \noindentext{ for } t\in{I\cap\mathbb{R}^+} \noindentext{ with } t\neq 0,\\ \ 0 && \noindentext{otherwise } . \end{array}\right. \end{equation*} Analogously for $Y\in{\mathfrak{X}(M)}$. \end{definition} To better understand the belong definition, we present the following example with different cases to $t_X^+$. \mbox{\boldmath $e$}gin{exam} For $X\in{\mathfrak{X}(\Sigma^+)}$ we can have some points as in the Figure \ref{figure4}, note that ${p_i}\in{\Lambda_X^+}$ for $i=1,...,6$ but only for $p_1,p_2$ and $p_3$, $\varphi_X(p,[0,t])\subset{\Sigma^+}$ for $t\in{I\cap\mathbb{R}^+}$ with $t\neq 0$ then $t_X^+(p_i)>0$ for $i=1,2,3$ with $t_X^+(p_2)>t_X^+(p_3)$ and $t_X^+(p_i)=0$ for $i=4,5,6$. Now, for $p_i$ for $i=7,...,9$ we have that $\varphi_X(p,[0,\infty)) \subset \Sigma^+$ then, these points are not in $\Lambda_X^+$ and so $t_X^+(p_i)=\infty$ for $i=7,...,9$. \end{exam} \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=1.3]{figure4} \caption{${p_i}\in{\Lambda_X^+}$ for $i=1,...,6$ but ${p_j}\notin{\Lambda_X^+}$ for $j=7,...,10$.} \label{figure4} \end{figure} In the case, for the vector field $Z^s$, locally $\Sigma^s$ is a sub-manifold with boundary $\partial \Sigma^s=Xh^{-1}(0)$ or $\partial \Sigma^s=Yh^{-1}(0)$. Without loss of generality, we take $p\in{S_X}$ such that $Yh(p)>0$ then locally the set $S_X$ that is a sub-manifold of co-dimension $1$ separates the discontinuity manifold into two pieces $\Sigma^S$ and $\Sigma^{c^+}$ such that $\Sigma^S=Xh^{-1}((-\infty, 0])$ and $\Sigma^{c+}=Xh^{-1}((0,\infty))$. See Figure \ref{figure5}. \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=1]{figure5} \caption{Local behavior of a cusp-regular singularities in a $\mathsf{PSVF}$.} \label{figure5} \end{figure} In order to extend the Definition \ref{tx} to the vector field $Z^s$, in \cite{Teixeira1}, the following result is proved. \mbox{\boldmath $e$}gin{lem}\label{lem3.4} \mbox{\boldmath $e$}gin{itemize} \item [(i)] The vector field $Z^s$ can be smoothly extended beyond the boundary of $\Sigma^s$. \item [(ii)] If a point p in $\partial \Sigma^s$ is a fold point (resp. cusp point) of $X$ and a regular point of $Y$ then $Z^s$ is transverse to $\partial\Sigma^s$ at $p$ (resp. $Z^s$ has a quadratic contact with $\partial\Sigma^s$ at $p$). \end{itemize} \end{lem} So, by Lemma \ref{lem3.4} and the following crollary, the Definition \ref{tx} is valid to $Z^s \in{\mathfrak{X}(\Sigma^S)}$. \mbox{\boldmath $e$}gin{cor}\label{cor3.5} Let $Z=(X,Y)$, $p\in{S_X}$ and $Yh(p)>0$. If $p\in{S_X^i}$ then $(Z^s)Xh(p)<0$ but if $p\in{S_X^v}$ then $(Z^s)Xh(p)>0$. If $p\in{S_X^{ic}}$ then $p\in{S_{Z^s}^v}$ and if $p\in{S_X^{vc}}$ then $p\in{S_{Z^s}^i}$ . \end{cor} In the following proposition, we show that the function $t_X^+$ is upper semi-continuous. Consider the notation, if $U$ is a open of $M$ then $\gamma_X(p)=\{\varphi_X(p,t); \ t\in{I}\}$ denoted the local orbit of a point $p\in{U}$; $\gamma_X^+(p)$ and $\gamma_X^-(p)$ the local orbit positive and negative of $p$, respectively. \mbox{\boldmath $e$}gin{prop}\label{propo3.5} The application $t_X^{+}:\Sigma^+\rightarrow \mathbb{R}^+\cup{\{\infty\}}$ such that $p \mapsto t_X^{+}(p)$ is upper semi-continuous. Analogously for $t_{Z^s}^+$ and $t_Y^+$. \end{prop} \mbox{\boldmath $e$}gin{proof} Note that, if $p\notin{\Lambda_X^+}$ then $t_X^+(p)=\infty$ and $\lim\sup_{q\noindento p} t_X^+(q)\leq t_X^+(p)$ and so $t_X^{+}$ is upper semi-continuous at $p$. Now, assume that $p\in{\Lambda_X^+}$ with $p\in{\Sigma}$, let $q=\varphi_X(p,t_X^+(p))$ and $\gamma=\varphi_X(p,[0,t_X^+(p)])$. Choose coordinates $X=(x_1,x_2,x_3)$ around $p\in{\Sigma}$ such that $X=(1,0,0)$, let $x_3=g(x_1,x_2)$ be a $C^{\infty}$ solution of $h(x_1,x_2,x_3)=0$ with $g(0,0)=0$. Fix $N=\{x_1=0\}$ as the section transverse to $X$ at $p$. Define the projection $\sigma_{X_p}:(\Sigma,p)\longrightarrow (N,p)$ of $\Sigma$ along the orbits of $X$, onto $N$ is given by $$\sigma_{X_p}(x_1,x_2,g(x_1,x_2))=(0,x_2,g(x_1,x_2)).$$ Since $\Sigma$ is the disjoint union of submanifolds of decreasing dimension: $\Sigma_X\cup S_X^2 \cup S_X^3$. The map $\sigma_{X_p}$ is an immersion at $p$. This implies that every orbit meets the boundary of $\Sigma^+$ in a discrete set of points. Let $\mathcal{U}$ open of $p$ in $M$ and $\mathcal{U}_{\Sigma^+}=\mathcal{U}\cap \Sigma^+$. \mbox{\boldmath $e$}gin{enumerate} \item Assume that $p\in{\Sigma_X}$. \mbox{\boldmath $e$}gin{enumerate} \item[(1.1)] If $p\in{\Sigma_X^-}$ then $t_X^+(p)=0$ and so for all $\noindentilde{p}\in{\mathcal{U}_{\Sigma^+}}$ (Case (a) of Figure \ref{figure6}), we have that $t_X^+(\noindentilde{p})\longrightarrow t_X^+(p)$ whenever $\noindentilde{p}\longrightarrow p$. \item[(1.2)] If $Xh(p)>0$ and assume that not exists internal tangencies in the arc trajectory $\gamma$ and since $\Sigma=\Sigma_X\cup S_X^2\cup S_X^3$ assume that $q\in{\Sigma_X^-}$, by long tubular flow theorem there exists a tubular flow $(F,f)$ of $X$ such that $F\supset \gamma$, taking $F$ so small that the vector field in the box induced by $f$ and $X$ is the constant field $f_*X=(1,0,0)$ and choose the same coordinates $X=(x_1,x_2,x_3)$ around $p\in{\Sigma}$ (Figure \ref{figure6}). Consequently, again, for all $\noindentilde{p}\in{\mathcal{U}_{\Sigma^+}}$ we have that $t_X^+(\noindentilde{p})\longrightarrow t_X^+(p)$ whenever $\noindentilde{p}\longrightarrow p$. \mbox{\boldmath $e$}gin{figure}[H] \includegraphics{figure6} \caption{The tubular flow $(F,f)$ of $X$ such that $\gamma\subset F$.} \label{figure6} \end{figure} \end{enumerate} \item Assume that $p$ is a singularity tangency of order two, then $p$ is a fold of $\sigma_X$ ($X^2h(p)=\frac{\partial^2 g }{\partial {x_ 1}^2}\neq 0$) and in this case there exits a $C^r$-diffeomorphism $\noindentilde{\sigma}_{X_p}:(\Sigma,p)\longrightarrow (\Sigma,p)$, called the symmetric associated with $\sigma_{X_p}$ such that $\noindentilde{\sigma}_{X_p}(p)=p$, $\sigma_{x_p} \circ \noindentilde{\sigma}_{X_p} = \sigma_{X_p}$ and $\noindentilde{\sigma}_{X_p}^2=Id$. Observe that $S_X=\mathrm{Fix}(\noindentilde{\sigma}_X)$ and if $\noindentilde{p}\notin{S_X}$ then $\noindentilde{\sigma}_X(\noindentilde{p})$ is the point where the trajectory of $X$ passing through $\noindentilde{p}$ meets $\Sigma$. \mbox{\boldmath $e$}gin{enumerate} \item[(2.1)] If $p\in{S_X^i}$ then $g(x_1,x_2)$ is conjugate to $(x_1,x_2)\longmapsto{x}_1^2$, $\noindentilde{p}=(\noindentilde{x_1},\noindentilde{x_2},\noindentilde{x_3})\in{\Sigma^+}$ iff $\noindentilde{x}_3\geq {\noindentilde{x}_1}^2$ (Case (b) of Figure \ref{figure7}) and so for all $\noindentilde{p}\longrightarrow p$ with $\noindentilde{p}\in{\Sigma^+}$ there exists a unique $t(\noindentilde{p})\geq 0$ such that the orbit-solution $t\longrightarrow \phi_X(\noindentilde{p},t)$ of $X$ through $\noindentilde{p}$ meets $\Sigma$, at a point $\noindentilde{q}=\phi_X(\noindentilde{p},t(\noindentilde{p}))$, so $$\lim_{\noindentilde{p}\noindento p} t_X^+(\noindentilde{p})=\lim_{\noindentilde{p}\noindento p} t(\noindentilde{p})=t(p)=0=t_X^+(p).$$ \item[(2.2)] Now, assume that $p\in{S_X^v}$ then $g(x_1,x_2)$ is conjugate to $(x_1,x_2)\longmapsto-{x_1}^2$, $\noindentilde{p}=(\noindentilde{x_1},\noindentilde{x_2},\noindentilde{x_3})\in{\Sigma^+}$ iff $\noindentilde{x}_3\geq -{\noindentilde{x}_1}^2$ (Case (c) of Figure \ref{figure7}). Reduce $\mathcal{U}$, if necessary, assume that $\gamma$ there are not internal tangencies in $\mathcal{U}$ then $\mathcal{U}_{\Sigma^+}=\mathcal{U}_1\cup \mathcal{U}_2\cup \mathcal{U}_3$ with $\mathcal{U}_1=\{\noindentilde{p}\in{\mathcal{U}_{\Sigma^+}}; \gamma_X^+(\noindentilde{p}) \cap \Sigma_X^- \neq \emptyset\}$, $\mathcal{U}_2=\{\noindentilde{p}\in{\mathcal{U}_{\Sigma^+}}; \gamma_X^+(\noindentilde{p})\cap S_X \neq \emptyset \noindentext{ or } \gamma_X^+(\noindentilde{p})\cap \Sigma = \emptyset \}$ and $\mathcal{U}_3=\{\noindentilde{p}\in{\mathcal{U}_{\Sigma^+}}; \gamma_X^-(\noindentilde{p})\cap \Sigma_X^+ \neq \emptyset\}$ then for the points $\noindentilde{p}\in{\mathcal{U}_1}$, $\gamma_X^+(\noindentilde{p}) \cap \Sigma_X^- \neq \emptyset$ and so $\lim_{\noindentilde{p}\noindento p} t_X^+(\noindentilde{p})=0$. Again, if we assume that $Xh(q)<0$ and there are no internal tangencies in the arc trajectory $\gamma$ so by long tubular flow theorem, reduce $F$, if necessary, for to not have internal tangencies in $F$ then for all $\noindentilde{p}\in{\mathcal{U}}$ there exists a unique $t(\noindentilde{p})\geq 0$ such that the orbit-solution $t\longrightarrow \phi_X(\noindentilde{p},t)$ of $X$ through $\noindentilde{p}$ meets $\Sigma$ (around of $q$) and for the points $\noindentilde{p} \in {\mathcal{U}_2 \cap \mathcal{U}_ 3}$ we have that $t(\noindentilde{p})=t_X^+(\noindentilde{p})$ so $\lim\sup_{\noindentilde{p}\noindento p} {t_X^+{(\noindentilde{p})}}=\inf\{\sup\{t_X^+(\noindentilde{p}); \ \noindentilde{p}\in{(\mathcal{U}_1\cup \mathcal{U}_2 \cup \mathcal{U}_3)\setminus{\{p\}}})\}\}\leq t_X^+(p).$ \newline Now, assume that $Xh(q)<0$ but $\gamma$ there are, in fact finites, internal tangencies and let $p_1\in{\gamma}$ ($p_1\neq p$) the first point in $\gamma$ such that $p_1\in{S_X^v}$. Let $t_1>0$ such that $\varphi_X(p,t_1)=p_1$ then $t_1\ll t_X^+(p)$ and $\lim_{\noindentilde{p}\noindento p} t_X^+(\noindentilde{p})=t_1$ for the points $\noindentilde{p}\in{\mathcal{U}_3}$ and consequently $t_X^+$ is upper semi-continuous at $p$ in this cases. \end{enumerate} \mbox{\boldmath $e$}gin{figure}[H] \includegraphics{figure7} \caption{$X$ as a “straight” vector field and a “twisted” boundary.} \label{figure7} \end{figure} \item Assume that $p$ is a singularity tangency of order three, then $p$ is a cusp of $\sigma_X$. \mbox{\boldmath $e$}gin{enumerate} \item[(3.1)] If $X^3h(p)<0$ then $g(x_1,x_2)$ is conjugate to $(x_1,x_2)\longmapsto{x_1}^3+x_1x_2$, $\noindentilde{p}=(\noindentilde{x_1},\noindentilde{x_2},\noindentilde{x_3})\in{\Sigma^+}$ iff $\noindentilde{x}_3\geq {\noindentilde{x}}_1^3+\noindentilde{x}_1\noindentilde{x}_2$ (Case (d) of Figure \ref{figure7}) then $\gamma^+(\noindentilde{p})\cap \mathrm{int}({U \cap\Sigma^-}) \neq \empty$ and so $\lim_{\noindentilde{p}\noindento p} t_X^+(\noindentilde{p})=t_X^+(p)$. \item[(3.2)]If $X^3h(p)>0$ then $g(x_1,x_2)$ is conjugate to $(x_1,x_2)\longmapsto-{x_1}^3-x_1x_2$ (Case (e) of Figure \ref{figure7}) then $\mathcal{U}_{\Sigma^+}={\rm V}_1\cup {\rm V}_2\cup {\rm V}_3$ with ${\rm V}_1=\{\noindentilde{p}\in{\mathcal{U}_{\Sigma^+}}; \ \noindentilde{p}\in{S_X^i} \noindentext{ or } \gamma_X^+(\noindentilde{p}) \cap \Sigma_X^- \neq \emptyset \}$, ${\rm V}_2=\{\noindentilde{p}\in{\mathcal{U}_{\Sigma^+}}; \ \gamma_X^+(\noindentilde{p})\in{S_X^v} \}$ and ${\rm V}_3=\{\noindentilde{p}\in{\mathcal{U}_{\Sigma^+}}; \ \noindentilde{p} \notin{ {\rm V}_1 \cup V_2} \}$. For the points in ${\rm V}_1$ by definition of $t_X^+$ we have that $\lim_{\noindentilde{p}\noindento p} t_X^+(\noindentilde{p})=0$, for the other points in $U_{\Sigma}$, by long tubular flow theorem there exists a tubular flow $(F,f)$ of $X$ such that $F\supset \gamma$ and consequently, to use Case $2.2$ above for the points in ${\rm V}_2 \cup {\rm V}_3$ for show that $\lim_{\noindentilde{p}\noindento p} t_X^+(\noindentilde{p})=t_X^+(\noindentilde{p})$, so $t_X^+$ is upper semi-continuous at $p$. \end{enumerate} \end{enumerate} Now, if $p\in{\Lambda_X^+}$ but $p\notin{\Sigma}$ then we using the long tubular flow theorem for $\gamma$ (and using some the Cases 1), 2) or 3), if it is necessary) for show the assertion. And so $t_X^+$ is upper semi-continuous at for all $p\in{\Sigma^+}$ therefore it is a function upper semi-continuous. \end{proof} The following remark is a result notable of the before proposition. For each point in the manifold, there are only three possibilities for the limit superior of the function $t_X^+$. \mbox{\boldmath $e$}gin{rem}\label{cortx2} If $p\in{\Sigma^+}$ and $\noindentilde{p}\longrightarrow p$ then $\displaystyleplaystyle\lim\sup_{\noindentilde{p}\noindento p} {t_X^+{(\noindentilde{p})}}$ is $0$, $t_X^+(p)$ or the time $t>0$ such that $\varphi_X(p,t)$ is the first point of internal tangency of $\gamma$ depending of the cases $1)$, $2)$ or $3)$ of the proof of Proposition \ref{propo3.5}. \end{rem} A natural question is to ask whether function $t_X^+$ can be continuous. The answer is true in some cases. Using Proposition \ref{propo3.5} and Remark \ref{cortx2} we have the following corollary. \mbox{\boldmath $e$}gin{cor}\label{cortx} The application $t_X^{+}:\Sigma^+\rightarrow \mathbb{R}^+\cup{\{\infty\}}$ such that $p \mapsto t_X^{+}(p)$ is continuous whenever in $\Sigma=\Sigma_X\cup S_X^i$. Analogously for $t_{Z^s}^+$ and $t_Y^+$. \end{cor} \mbox{\boldmath $e$}gin{proof} Follow by long tubular flow theorem for the trajectory arc $\gamma=\varphi_X(p,[0,t_X^+(p)])$ for $p\in{\Lambda_X^+}$. \end{proof} Now, we build the hypotheses to use the gluing lemma to create the semi-dynamical system. Again, we assume that $M$ is a closed $3$-dimensional $C^1$ manifold, and the discontinuity manifold has crossing regions and sliding regions. The boundary of these regions is composed of tangency points. We consider the points in $\Sigma$ classified in two types $\mathrm{A}$ and $\mathrm{B}$, as follows: \mbox{\boldmath $e$}gin{itemize} \item [Case A.] Singularity points of type singular-regular; \mbox{\boldmath $e$}gin{itemize} \item [A1.] $p\in{S_X^v}$ and $Yh(p)> 0$ (or $p\in{S_Y^v}$ and $Xh(p)< 0$); \item [A2.] $p\in{S_X^i}$ and $Yh(p)> 0$ (or $p\in{S_Y^i}$ and $Xh(p)<0$); \item [A3.] $p\in{S_X^{ic}}$ and $Yh(p)> 0$ (or $p\in{S_Y^{ic}}$ and $Xh(p)<0$); \item [A4.] $p\in{S_X^{vc}}$ and $Yh(p)> 0$ (or $p\in{S_Y^{vc}}$ and $Xh(p)<0$). \end{itemize} \mbox{\boldmath $e$}gin{figure}[h] \includegraphics[scale=1.2]{figure8} \caption{Local behavior of the points in a case $\mathrm{A}$.} \end{figure} \item [Case B.] Singularity points of type singular-singular; \mbox{\boldmath $e$}gin{itemize} \item [B1.] $p\in{S_X^v \cap S_Y^i}$ (or $p\in{S_X^i \cap S_Y^v}$) with $p\in{\partial \Sigma^{c+}\cap \partial \Sigma^{c-}}$; \item [B2.] $p\in{S_X^{ic}}$ and $p\in{S_Y^i}$ (or $p\in{S_Y^{ic}}$ and $p\in{S_X^i}$) with $S_X$ and $S_Y$ tangent of $p$. \end{itemize} \mbox{\boldmath $e$}gin{figure}[h] \includegraphics[scale=1.2]{figure9} \caption{Local behavior of the points in a case $\mathrm{B}$.} \end{figure} \end{itemize} We denoted, also by $A_1$ to $B_2$ the sets of points in the cases $A_1$ to $B_2$. Note that, by Definition \ref{slide}, if $p$ is the point of case $\mathrm{B2}$ then $Xh(p)=Yh(p)=0$ and so $Z^sh(p)=0$. With the following lemma, we guarantee that the positive local trajectory for the tangency points in the cases $A1$ to $B2$ has uniqueness. \mbox{\boldmath $e$}gin{lem}\label{traj1} The positive local trajectory $\varphi_{Z}(p,t)$ of a $\mathsf{PSVF}$ of the form (\ref{spvf}) through a point $p\in S_Z$ in some of the cases $\mathrm{A1}$ to $\mathrm{B2}$ is unique. \end{lem} \mbox{\boldmath $e$}gin{proof} In fact, by Definition \ref{traj} and Lemma \ref{cor3.5} \mbox{\boldmath $e$}gin{enumerate} \item [1)] for $p$ in $\mathrm{A1}$, $\mathrm{A4}$ and $\mathrm{B1}$, the trajectory is given by $\varphi_{Z}(p,t)=\varphi_{X}(p,t)$ ( $\varphi_{Z}(p,t)=\varphi_{Y}(p,t)$); \item [2)] for $p$ in $\mathrm{A2}$ and $\mathrm{A3}$, the trajectory is given by $\varphi_{Z}(p,t)=\varphi_{Z^s}(p,t)$; \item [3)] for $p$ in $\mathrm{B2}$, the trajectory is given by $\varphi_{Z}(p,t)=\varphi_{Z^s}(p,t)=p$ \end{enumerate} for $t\in{I\cap \mathbb{R^+}}$ and $0\in{I}$. \end{proof} In the following definition, for each $p\in{M} $ we build a sequence of points under the discontinuity manifold using the function $t_X^+$, also, together to one sequence of fields $X$, $Y$ and $Z^s$ and closed intervals of positive time. The objective of these sequences is to use them to restrict the domains of the flows generated by the fields in question. \mbox{\boldmath $e$}gin{definition}\label{omegax} Let $Z=(X,Y)\in{\mathfrak{X}(M,h)}$ with $\Sigma=\Sigma^c \cup \Sigma^s\cup S_Z$ and $p\in S_Z$ in some of the cases $\mathrm{A1}$ to $\mathrm{B3}$. If $p\in{M}$ then denoted $p_0=p$ and $p_i= \varphi_{Z_{{i-1}(p)}}(p_{i-1},t_{Z_{{i-1}(p)}}^+(p_{i-1}))$ for $i\in{\mathbb{Z}^+}$, where $Z_{0(p)}=X$ if $p\in{\mathrm{int}(\Sigma^+)}$, $Z_{0(p)}=Y$ if $p\in{\mathrm{int}(\Sigma^-)}$ and \mbox{\boldmath $e$}gin{equation*} Z_{i(p)} = \left\{ \mbox{\boldmath $e$}gin{array}{rcl} X && \noindentext{if } p_{i}\in\Sigma^{c+} \noindentext{or } p_{i}\in{( \mathrm{A1} \cup \mathrm{A4} \cup \mathrm{B1})\cap(S_X^v \cup S_X^{vc}) },\\ Z^s && \noindentext{if } p_{i}\in\Sigma^{s} \noindentext{or } p_{i}\in {\mathrm{A2}\cup \mathrm{A3} \cup \mathrm{B2}},\\ Y && \noindentext{if } p_i\in{\Sigma^{c-}} \noindentext{or } p_{i}\in{( \mathrm{A1} \cup \mathrm{A4} \cup \mathrm{B1})\cap(S_Y^v \cup S_Y^{vc}) }. \end{array}\right. \end{equation*} Let $\Delta_X^+(p)=\{i\in{\mathbb{Z}^+}; \ Z_{i(p)}=X\}$ and $\Omega_X=\{(p,t)\in{M\noindentimes \mathbb{R}^+}; \ t\in{I_X(p)}\}$ whit $$I_X(p)=\bigcup_{i\in{\Delta_X^+}(p)} I_X^i(p)$$ where $I_X^0(p) = \left[ 0,a_{0(p)} \right] $, $I_X^i(p) = \left[ a_{{i-1}(p)},a_{i(p)} \right] $ for $i>0$ such that $a_{i(p)}=\displaystyleplaystyle\sum_{j=0}^{i} t_{Z_{j(p)}}^+(p_j)$. Analogously, we defined $\Omega_Y$ and $\Omega_{Z^s}$ for the vector fields $Y$ and $Z^s$, respectively. \end{definition} Now, we define the functions to be glued together. \mbox{\boldmath $e$}gin{definition}\label{semiflow} Let $\varphi_X$, $\varphi_Y$ and $\varphi_{Z^s}$ the flows of the vector fields $X\in{M}$, $Y\in{M}$ and $Z^s\in{\mathfrak{X}(\Sigma^S)}$, respectively. Denote by the restriction of the domains to $\Omega_X$, $\Omega_Y$ or $\Omega_{Z^s}$ correspond $$\phi_{X}: \Omega_X \longrightarrow \Sigma^+,$$ $$\phi_{Y}: \Omega_Y \longrightarrow \Sigma^- \noindentext{ and }$$ $$\phi_{Z^s}: \Omega_{Z^s} \longrightarrow \Sigma^S.$$ For $(p,t)\in{\Omega_X}$ then $t\in I_X^i(p)$ with $i\in{\Delta_X^+(p)}$ and $\phi_X(p,t)=\varphi_X(p_i,t-a_{i-1}(p))$. Analogously, for $\phi_Y$ and $\phi_{Z^s}$. \end{definition} \mbox{\boldmath $e$}gin{rem}\label{rem3.14} The $\phi_{X}: \Omega_X \longrightarrow M $ is a continuous function, in fact $\phi_X=\mathfrak{i}\circ \varphi_X|_{\Omega_X} $ with $\mathfrak{i}$ is the inclusion function of $\Sigma^+$ in $M$. Analogously, for $\phi_Y$ and $\phi_{Z^s}$. \end{rem} The following lemma is a significant result for the proof that the collage of the limits points of the domain of the functions for the gluing is correct. \mbox{\boldmath $e$}gin{lem}\label{lem3.15} If $(p,s)\in{\overline{\Omega_X}}$ then $\phi_{Z_{i(p)}}(p,s)\in{\Sigma^+}$ where $s\in{I_{Z_{i(p)}}^i(p)}$. \end{lem} \mbox{\boldmath $e$}gin{proof} Assume by contra-position that $q=\phi_{Z_{i(p)}}(p,s)\notin{\Sigma^+}$ then $q\in{\mathrm{int}(\Sigma^-)}$. Let $V$ a open neighborhood of $\mathrm{int}(\Sigma^-)$ such that $q\in{V}$ then by Remark \ref{rem3.14} $\phi_Y^{-1}(V)$ is a open neighborhood of $M\noindentimes \mathbb{R^+}$ such that $(p,s)\in{\phi_Y^{-1}(V)}$ and for all $(\noindentilde{p},\noindentilde{s})\in{\phi_Y^{-1}(V)}$, $\noindentilde{s}\in{I_Y(p)\setminus I_X(p)}$ and so $\phi_Y^{-1}(V)\cap \Omega_X =\emptyset$ that is $ (p,s)\notin{\overline{\Omega_X}}$. \end{proof} For $(p,s)\in{M\noindentimes\mathbb{R^+}}$ let $\gamma_i(p)=\varphi_{Z_{i(p)}}(p_i,[0,t_{Z_{i(p)}}^+(p_i)])$ for each $i\in{\Delta_Z^+(p)}$ and $\Gamma$ the concatenation of for all $\gamma_i(p)$. Let $\Delta_Z^+(p)=\Delta_X^+(p)\cup\Delta_Y^+(p)\cup\Delta_{Z^s}^+(p)$. \mbox{\boldmath $e$}gin{prop}\label{prop3.15} Let $(p,s)\in\overline{\Omega_X}$ with $s\in{I_{Z_{i(p)}}^i(p)}$ and $i\in{\Delta_Z^+(p)}$ then $$\lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento s}} \phi_X(\noindentilde{p},\noindentilde{s})=\phi_{Z_{i(p)}}(p,s)$$ whenever $(\noindentilde{p},\noindentilde{s})\in{(V\noindentimes I)\cap\Omega_X}$ with $V\noindentimes I $ is a neighborhood of $(p,s)$ in $M\noindentimes\mathbb{R^+}$. Analogously for $(p,s)$ in $\overline{\Omega_Y}$ and $\overline{\Omega_{Z^s}}$. \end{prop} \mbox{\boldmath $e$}gin{proof} If $(p,s)\in{\Omega_X}$ follow by continuity of $\phi_X$. For $(p,s)\notin{\Omega_X}$ then $s\in{(I_Y(p)\cup I_{Z^s}(p))\setminus I_X(p)}$, that is, there exists $i\in{\Delta_Z(p)^+}$ such that $s\in{I_{Z_{i(p)}}^i(p)}$ with $Z_{i(p)} \neq X $. Fixing $p\in{M}$, we proceed by induction on $i$. \newline \noindentextit{Induction basis:} Assume that $i=0$ then $(p,s)\in{M\noindentimes\mathbb{R^+}}$ is such that $s\in{I_{Z_{0(p)}}^0(p)}$, note that, $p\in{\Sigma^-}$. Firstly, assume that $p\in{\mathrm{int}(\Sigma^-)}$ then $Z_{0(p)}=Y$ and by Lemma \ref{lem3.15} $s\neq 0$. Then $\gamma_0(p)=\varphi_Y(p,[0,a_{0(p)}])=\varphi_Y(p,[0,t_Y^+(p)])$ and so we have the following cases: \mbox{\boldmath $e$}gin{enumerate} \item For $s<a_{0(p)}$, this is $\gamma_0(p)$ has internal tangencies. The first internal tangency is a adherent point of $\Omega_X$ if, and only if, these is a point of case $\mathrm{B1}$ then let $q=\phi_Y(p,s)=\varphi_Y(p,s)$ this point, so $q\in{S_X^i\cap S_Y^v}$ and also $q\in{\partial \Sigma^{c+} \cap \partial \Sigma^{c-}}$. We are going to show that $\phi_X(\noindentilde{p},\noindentilde{s})\longrightarrow q$ when $\noindentilde{p}\longrightarrow p$ and $\noindentilde{s}\longrightarrow s$. Note that (Case (a) of Figure \ref{figure10}) $\noindentilde{s}\in{I_X^1(\noindentilde{p})}$ and as $p\in{\mathrm{int}(\Sigma^-)}$ and $\gamma_0(p)$ has internal tangencies by Remark \ref{cortx2} we have that $\displaystyleplaystyle\lim_{\noindentilde{p}\noindento p}t_Y^+(\noindentilde{p})=s$ and $$\lim_{\noindentilde{p} \noindento p } \noindentilde{p}_1= \lim_{\noindentilde{p} \noindento p} \varphi_Y(\noindentilde{p},t_Y^+(\noindentilde{p}))=\varphi_Y(\lim_{\noindentilde{p}\noindento p} \noindentilde{p},\lim_{\noindentilde{p}\noindento p} t_Y^+(\noindentilde{p}))=\varphi_Y(p,s)=q,$$ and so \mbox{\boldmath $e$}gin{align*} \lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento s}} \phi_X(\noindentilde{p},\noindentilde{s}) &=\lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento s}} \varphi_X(\noindentilde{p}_1,\noindentilde{s}-a_{0(\noindentilde{p})}) \\ &=\lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento s}} \varphi_X(\noindentilde{p}_1,\noindentilde{s}-t_Y^+(\noindentilde{p})) \\ &= \varphi_X \left( \lim_{\noindentilde{p}\noindento p} \noindentilde{p}_1,\lim_{\noindentilde{s}\noindento s}\noindentilde{s}-\lim_{\noindentilde{p}\noindento p} t_Y^+(\noindentilde{p})\right) \\ &= \varphi_X(q, s-s)=q. \end{align*} \mbox{\boldmath $e$}gin{minipage}{\linewidth} \mbox{\boldmath $e$}gin{minipage}{0.47\linewidth} \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=1.2]{figure10} \caption{$q=\phi_Y(p,s)$ is a point of case $\mathrm{B1}$ and $\noindentilde{p}_1\in{\Sigma^{c+}}$.} \label{figure10} \end{figure} \end{minipage} \hspace{0.02\linewidth} \mbox{\boldmath $e$}gin{minipage}{0.47\linewidth} \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=1.2]{figure11} \caption{ $q=\phi_Y(p,s)$ is a point of case $\mathrm{A3}$, $\noindentilde{p}_1\in{\Sigma^{s}}$ and $\noindentilde{p}_2\in{\partial\Sigma^{s}}$.} \label{figure11} \end{figure} \end{minipage} \end{minipage} \newline Now, if $\varphi_Y(p,s)$ is not the first internal tangency, the proof follow by induction in $k$ where $k$ is the position of internal tangency, in fact $\gamma_0(p)$ has finite tangencies with $\Sigma$. \newline \item For $s=a_{0(p)}$, assume that $\gamma_{0(p)}$ has no internal tangencies. In this case, $q=\phi_Y(p,s)=\phi_Y(p,a_{0(p)})=\phi_Y(p,t_Y^+(p)) =p_1$ is a point of case either $\mathrm{A1}$ or $\mathrm{A3}$. For $q \in{\mathrm{A1}}$ the proof is analogous to case (1), it remains to prove to $q\in{\mathrm{A3}}$. As $(p,s)$ is a adherent point of $\Omega_X$ then either $\noindentilde{s}\in{I_X^1(p)}$ if $\noindentilde{p}_1\in{\Sigma^{c+}\cup\partial\Sigma^s}$ (is analogous to case $(1)$) or $\noindentilde{s}\in{I_X^2(p)}$ if $\noindentilde{p}_1\in{\Sigma^s}$ (Case (b) of Figure \ref{figure11}). For $\noindentilde{s}\in{I_X^2(p)}$ then $\noindentilde{p}_1\longrightarrow q$ when $\noindentilde{p}\longrightarrow p$ and by using the part $2.2$ of the proof of Proposition \ref{propo3.5} but with the map $t_{Z^s}^+:\Sigma^S\longrightarrow \mathbb{R^+}$ we have that $\displaystyleplaystyle\lim_{\noindentilde{p}_1\noindento q}t_{Z^s}^+(\noindentilde{p}_1)=0$, in fact $\gamma_1(\noindentilde{p})=\varphi_{Z^s}(\noindentilde{p}_1,[0,t_{Z^s}^+(\noindentilde{p}_1)])$ has no internal tangencies with $\partial\Sigma^s$ and so $$\lim_{\noindentilde{p}\noindento p} \noindentilde{p}_2= \lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{p}_1\noindento q}} \varphi_{Z^s}(\noindentilde{p}_1,t_{Z^s}^+(\noindentilde{p}_1))=\varphi_{Z^s}(\lim_{\noindentilde{p}\noindento p} \noindentilde{p}_1,\lim_{\noindentilde{p}_1\noindento q} t_{Z^s}^+(\noindentilde{p}_1))=\varphi_{Z^s}(q,0)=q,$$ and so \mbox{\boldmath $e$}gin{align*} \lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento s}} \phi_X(\noindentilde{p},\noindentilde{s}) &=\lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento s}} \varphi_X(\noindentilde{p}_2,\noindentilde{s}-a_1(\noindentilde{p})) \\ &=\lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento s}} \varphi_X(\noindentilde{p}_2,\noindentilde{s}-(t_Y^+(\noindentilde{p})+t_{Z^s}^+(\noindentilde{p}_1))) \\ &= \varphi_X(\lim_{\noindentilde{p}\noindento p} \noindentilde{p}_1,\lim_{\noindentilde{s}\noindento s}\noindentilde{s}-(\lim_{\noindentilde{p}\noindento p} t_Y^+(\noindentilde{p}) + \lim_{\noindentilde{p}_1\noindento q} t_{Z^s}^+(\noindentilde{p}_1) )) \\ &= \varphi_X(q, s-(s+0))=q. \end{align*} If $\gamma_{0(p)}$ has internal tangencies and $(p,a_{0(p)})\in\overline{\Omega_X}$ then this internal tangencies are points of case $\mathrm{B1}$ and so use to item (1) and then the item (2) but instead of $\noindentilde{p}$ use to $\noindentilde{p}_j$ with $j\in{\Delta_Z^+{\noindentilde{p}}}$ such that $\noindentilde{p}_j \noindento \noindentilde{q}$ where $p^*$ is the last of the internal tangencies of $\gamma_{0(p)}$ such that $(p,s^*)\in\overline{\Omega_X}$ with $\phi_{Z_{0(p)}}(p,s^*)=\phi_Y(p,s^*)=q^*$. \end{enumerate} Now, assume that $p\in{\Sigma}$. As $s\in{I_{Z_{0(p)}}^0(p)}$ then either $s=0$ or $0<s\leq t_{Z_{0(p)}}^+(p)$. If $s=0$ then $p$ is a point of cases $A2$, $A3$ or $B1$ and doing a similar analysis to the previous items $(1)$ and $(2)$ we have that $\lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento 0}} \phi_X(\noindentilde{p},\noindentilde{s})=p$. If $0<s\leq t_{Z_{0(p)}}^+(p)$ then $\lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento s}} \phi_X(\noindentilde{p},\noindentilde{s})$ is either a internal tangency of $\gamma_0(p)$ ( with $\Sigma$ when $Z_{0(p)}=Y$ or with $\partial\Sigma^s$ when $Z_{0(p)}=Z^s$ ) or $p_1$, and the proof is analogous to the previous items $(1)$ and $(2)$. Analogously for $(p,s)$ in $\overline{\Omega_Y}$ and $\overline{\Omega_{Z^s}}$ with $s\in{I_{Z_{0(p)}}^0(p)}$. \newline \noindentextit{Inductive step:} We assume that the proposition is true for all $(p,s)\in{\overline{\Omega_X}}$ ($(p,s)\in{\overline{\Omega_Y}}$ or $(p,s)\in{\overline{\Omega_{Z^s}}}$) with $s\in{I_{Z_{i(p)}}^i(p)=[a_{{i-1}(p)},a_{i(p)}]}$ thus for $s=a_{i(p)}$, if $(p,a_{i(p)})\in{\overline{\Omega_{Z^*}}}$ with $Z^*=X,$ $Y$ or $Z^s$ so $$\lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento a_{i(p)}}} \phi_{Z^*}(\noindentilde{p},\noindentilde{s})=\phi_{Z_{i(p)}}(p,a_{i(p)})=p_{i+1},$$ then there exists $j\in{\Delta_Z^+(\noindentilde{p})}$ such that $\noindentilde{p}_j\longrightarrow p_{i+1}$ and by continuity of $\varphi_{Z_{{j-1}(\noindentilde{p})}}$; $a_{{j-1}(\noindentilde{p})}\longrightarrow a_{i(p)}$ whenever $\noindentilde{p}\longrightarrow p$. First assume that $a_{i(p)}<s<a_{{i+1}(p)}$, this is, $\gamma_{i+1}(p)\subset{\Sigma}$ has internal tangencies \mbox{\boldmath $e$}gin{enumerate} \item [(a)] if $Z_{{i+1}(p)}=Z^s$ so $\gamma_{i+1}(p)\subset{\Sigma}$ and $q=\phi_{Z^s}(p,s)\in{\mathrm{A3}}$ with $q$ cusp visible point for $X$, Figure \ref{figure12} when $p_{i+1}\in{\Sigma^s}$. As $V$ a neighborhood of $M$ contain $p$ by inductive hypothesis $\noindentilde{s}\in{I_{Z_{{j+1}(\noindentilde{p})}}^{j+1}(\noindentilde{p})}$ with $Z_{{j+1}(\noindentilde{p})}=X$ for each $(\noindentilde{p},\noindentilde{s})\in{(V\noindentimes I)\cap \Omega_X}$ so $\displaystyleplaystyle\lim_{\noindentilde{p}_j\noindento p_{i+1}} t_{Z^s}^+(\noindentilde{p}_j)=s-a_{i(p)}$, $\displaystyleplaystyle\lim_{\noindentilde{p}\noindento p} \noindentilde{p}_{j+1}= \lim\limits_{\noindentilde{p}_j\noindento p_{i+1}} \varphi_{Z^s}(\noindentilde{p}_j,t_{Z^s}^+(\noindentilde{p}_j))=\varphi_{Z^s}(p_{i+1},s-a_{i(p)})=\phi_{Z^s}(p,s)=q$ and $\displaystyleplaystyle\lim_{\noindentilde{p}\noindento p}a_{j(\noindentilde{p})}=\displaystyleplaystyle\lim_{\noindentilde{p}\noindento p}a_{{j-1}(\noindentilde{p})} +\displaystyleplaystyle\lim_{\noindentilde{p}_{j-1}\noindento p_{i+1}} t_{Z^s}^+(\noindentilde{p}_j)=a_{i(p)}+(s-a_{i(p)})=s$ and therefore and so \mbox{\boldmath $e$}gin{align*} \lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento s}} \phi_X(\noindentilde{p},\noindentilde{s}) &=\lim\limits_{\substack{\noindentilde{p} \noindento p \\ \noindentilde{s}\noindento s}} \varphi_X(\noindentilde{p}_{j+1},\noindentilde{s}-a_{j(\noindentilde{p})}) \\ &= \varphi_X(\lim_{\noindentilde{p}\noindento p} \noindentilde{p}_{j+1},\lim_{\noindentilde{s}\noindento s}\noindentilde{s}-\lim_{\noindentilde{p}\noindento p} a_{j(\noindentilde{p})}) \\ &= \varphi_X(q,0)=q. \end{align*} \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=1.6]{figure12} \caption{$q=\phi_Y(p,s)$ with $a_{i(p)}<s<a_{{i+1}(p)}$ is a point of case $\mathrm{A3}$, $\noindentilde{p}_j\in{\Sigma^{s}}$ and $\noindentilde{p}_{j+1}\in{\partial\Sigma^{s}}$.} \label{figure12} \end{figure} \item [(b)] if $Z_{{i+1}(p)}=Y$ or $Z_{{i+1}(p)}=X$ so the process is analogous to item $(1)$ of the induction basis using the inductive hypothesis as in the below item (b). \end{enumerate} For $(p,s)\in{\overline{\Omega_X}}$ with $s=a_{{i+1}(p)}$ the process is analogous to item $(2)$ of the induction basis using the inductive hypothesis as in the item (b). Analogously for $(p,s)$ in $\overline{\Omega_Y}$ and $\overline{\Omega_{Z^s}}$. \end{proof} It is essential to highlight the following deduction. \mbox{\boldmath $e$}gin{cor}\label{coromegax} The map $\phi_X:\overline{\Omega_X}\longrightarrow \Sigma^+$ is continuous and if $(p,s)\in{\overline{\Omega_X}\setminus \Omega_X }$ with $s\in{I_{Z_{i(p)}}^i(p)}$ then $\phi_X(p,s)=\phi_{Z_{i(p)}}(p,s)$. Analogously for the maps $\phi_Y:\overline{\Omega_Y}\longrightarrow \Sigma^-$ and $\phi_{Z^s}:\overline{\Omega_{Z^s}}\longrightarrow \Sigma^S$. \end{cor} Now, we are ready to construct the semi-dynamical system for a $\mathsf{PSVF}$. \mbox{\boldmath $e$}gin{thm}\label{semiflow1} Assume $M$ is closed 3-dimensional $C^1$ manifold and $Z\in{\mathfrak{X}(M,h)}$. If $\Sigma=\Sigma^{c}\cup\Sigma^{s}\cup S_Z$ with $p \in{S_Z}$ is in some of the cases $\mathrm{A1}$ to $\mathrm{B2}$ then the trajectories of $Z=(X,Y)\in{\mathfrak{X}(M,h)}$ generate a semi-dynamical system $(M,\phi_{Z})$. \end{thm} For the proof of Theorem \ref{semiflow1}, we firstly show the following lemmas. \mbox{\boldmath $e$}gin{lem}\label{sf1} In the conditions of Proposition \ref{semiflow1}. \mbox{\boldmath $e$}gin{enumerate} \item [(a)] For all $(p,t)\in \overline{\Omega_X} \cap \overline{\Omega_{Z^s}}$ we have that $\phi_X(p,t)=\phi_{Z^s}(p,t)$; \item [(b)] For all $(p,t)\in \overline{\Omega_Y} \cap \overline{\Omega_{Z^s}}$ we have that $\phi_Y(p,t)=\phi_{Z^s}(p,t)$ and \item [(c)] For all $(p,t)\in \overline{\Omega_X} \cap \overline{\Omega_Y}$ we have that $\phi_X(p,t)=\phi_Y(p,t)$. \end{enumerate} \end{lem} \mbox{\boldmath $e$}gin{proof} Let $(p,t)\in \overline{\Omega_X} \cap \overline{\Omega_{Z^s}}$ then \mbox{\boldmath $e$}gin{itemize} \item [(1)] If $(p,t)\in{\Omega_X\cap\Omega_{Z^s}}$ then there exists $i\in{\Delta_Z^+(p)}$ such that $t\in{I_X^i(p)\cap I_{Z^s}^{i+1}(p)}$ (similarly for $t\in{I_{Z^s}^i(p)\cap I_{X}^{i+1}(p)}$) so $t=a_{i(p)}$ and $$\phi_X(p,t)=\varphi_X(p_{i+1},t-a_{i(p)})=\varphi_X(p_{i+1},0)=p_{i+1}=\varphi_{Z^s}(p_{i+1},0)=\phi_{Z^s}(p,t).$$ \item [(2)] If $(p,t)\in (\overline{\Omega_X} \cap \overline{\Omega_{Z^s}})\setminus \Omega_X $ ( similarly for $(p,t)\in (\overline{\Omega_X} \cap \overline{\Omega_{Z^s}})\setminus \Omega_{Z^s} $) then there exists $i\in{\Delta_{Z^s}(p)}$ such that $t\in{I_{Z^s}^i(p)}$ and by Corollary \ref{coromegax} $\phi_X(p,t)=\phi_{Z^s}(p,t)$. \item [(3)] If $(p,t)\in (\overline{\Omega_X} \cap \overline{\Omega_{Z^s}})\setminus ( \Omega_X \cap \Omega_{Z^s})$ then $t=0$ and $p$ is a point os case $\mathrm{A1}$ so $\phi_X(p,t)=\varphi_X(p,0)=p=\varphi_{Z^s}(p,0)=\phi_{Z^s}(p,t)$. \end{itemize} The proof is analogous to $b)$ and $c)$. \end{proof} \mbox{\boldmath $e$}gin{lem}\label{sf2} With the same hypothesis that Proposition \ref{semiflow1}. The map $\phi_Z:M \noindentimes \mathbb{R^+} \longrightarrow M$ such that \mbox{\boldmath $e$}gin{equation*} \phi_Z(p,t) = \left\{ \mbox{\boldmath $e$}gin{array}{rcl} \phi_X(p,t)&& \noindentext{if } (p,t)\in{\overline{\Omega_X}}, \\ \phi_{Z^s}(p,t)&& \noindentext{if } (p,t)\in{\overline{\Omega_{Z^s}}}, \\ \phi_Y(p,t)&&\noindentext{if }(p,t)\in{\overline{\Omega_Y}} \end{array}\right. \end{equation*} is well defined for all $(p,t)\in{M \noindentimes\mathbb{R^+}}$. \end{lem} \mbox{\boldmath $e$}gin{proof} For all $p\in{M}$, applying Definitions \ref{traj} and \ref{traj1} we have that \mbox{\boldmath $e$}gin{itemize} \item [(a)] if $p\in{\noindentextrm{int}(\Sigma^+)}$ then $\phi_Z(p,t)=\phi_X(p,t)$ for all $t\in{[0,a_{0(p)}]}$; \item [(b)] if $p\in{\noindentextrm{int}(\Sigma^-)}$ then $\phi_Z(p,t)=\phi_X(p,t)$ for all $t\in{[0,a_{0(p)}]}$; \item [(c)] if $p\in{\Sigma^{c+}}$ then $\phi_Z(p,t)=\phi_X(p,t)$ for all $t\in{[0,a_{0(p)}]}$; \item [(d)] if $p\in{\Sigma^{c-}}$ then $\phi_Z(p,t)=\phi_Y(p,t)$ for all $t\in{[0,a_{0(p)}]}$; \item [(e)] if $p\in{\Sigma^s}$ then $\phi_Z(p,t)=\phi_{Z^s}(p,t)$ for all $t\in{[0,a_{0(p)}]}$; \item [(f)] if $p$ is in the cases $\mathrm{A1}$, $\mathrm{A4}$ or $\mathrm{B1}$ that is fold or cusp visible point for $X$ then $\phi_Z(p,t)=\varphi_X(p,t)$ for all $t\in{[0,a_{0(p)}]}$, \item [(g)] if $p$ is in the case $\mathrm{A2}$, $\mathrm{A3}$ or $\mathrm{B2}$ then $\phi_Z(p,t)=\phi_{Z^s}(p,t)$ for all $t\in{[0,a_{0(p)}]}$, \item [(h)] if $p$ is in the cases $\mathrm{A1}$, $\mathrm{A4}$ or $\mathrm{B1}$ that is fold or cusp visible point for $Y$ then $\phi_Z(p,t)=\varphi_Y(p,t)$ for all $t\in{[0,a_{0(p)}]}$. \end{itemize} If $a_{0(p)}$ is a real number then $\phi_{Z_1(p)}(p,a_{0(p)})\in{\Sigma}$ hence apply item c) to h) above for all $t\in[a_{0(p)}, a_{1(p)}]$ and do it again if $a_1(p)=t_{Z_{0(p)}}^+(p_0)+t_{Z_{1(p)}}^+(p_1)<\infty$ and so forth. \end{proof} \mbox{\boldmath $e$}gin{proof}[Proof of Theorem \ref{semiflow1}] Applying the gluing lemma for $\phi_X:\overline{\Omega_X}\longrightarrow M$, $\phi_Y:\overline{\Omega_Y}\longrightarrow M$ and $\phi_{Z^s}:\overline{\Omega_{Z^s}}\longrightarrow M$, by Remark \ref{rem3.14}, Proposition \ref{prop3.15}, Lemmas \ref{sf1} and \ref{sf2} we have that $\phi_Z:M \noindentimes \mathbb{R^+} \longrightarrow M$ such that \mbox{\boldmath $e$}gin{equation*} \phi_Z(p,t) = \left\{ \mbox{\boldmath $e$}gin{array}{rcl} \phi_X(p,t)&& \noindentext{if } (p,t)\in{\overline{\Omega_X}}, \\ \phi_{Z^s}(p,t)&& \noindentext{if } (p,t)\in{\overline{\Omega_{Z^s}}}, \\ \phi_Y(p,t)&&\noindentext{if }(p,t)\in{\overline{\Omega_Y}} \end{array}\right. \end{equation*} is continuous.\\ Now, we prove the items 1) and 2) of Definition \ref{semiflowdef}. The initial value property is satisfied since $\varphi_X$, $\varphi_{Z^s}$ and $\varphi_Y$ are flows. In order to prove the semi-group property, let $p\in{M}$ and $t,s\in{\mathbb{R^+}}$ and we prove that $\phi_Z(\phi_Z(p,t),s)=\phi_Z(p,t+s)$. Assume that $t\in{{\rm I}_{Z_{j(p)}}^j(p)}$ and let \mbox{\boldmath $e$}gin{equation} q=\phi_Z(p,t)=\phi_{Z_{j(p)}}(p,t)=\varphi_{Z_{j(p)}}(p_j,t-a_{{j-1}(p)}), \end{equation} assume also that $s\in{I_{Z_{i(q)}}^i(q)}$ then $t+s\in{I_{Z_{{j+i}(p)}}^{j+i}(p)}$ and $Z_{i(q)}=Z_{{j+i}(p)}$ so we use induction on $i\in{\Delta_Z^+(q)}$ for prove that \mbox{\boldmath $e$}gin{enumerate} \item $q_i=p_{j+i}$ for $i>0$, \item $a_{{i-1}(q)}=a_{{j+i-1}(p)}-t$ for $i>0$ and finally that \item $\phi_Z(\phi_Z(p,t),s)=\phi_Z(p,t+s)$. \end{enumerate} \noindentextit{Induction basis:} Assume that $i=0$ then $s\in{I_{Z_{0(q)}}^0(q)}$, $t+s\in{I_{Z_{j(q)}}^j(q)}$ and $Z_{0(q)}=Z_{j(p)}$ and so \mbox{\boldmath $e$}gin{align*} \phi_Z(\phi_Z(p,t),s) &=\phi_Z(q,s)=\phi_{Z_{0(q)}}(q,s) \\ &= \varphi_{Z_{0(q)}}(\varphi_{Z_{j(p)}}(p_j,t-a_{j(p)}),s) \\ &= \varphi_{Z_{j(p)}}(\varphi_{Z_{j(p)}}(p_j,t-a_{j(p)}),s) \\ &= \varphi_{Z_{j(p)}}(p_j,t-a_{j(p)}+s) \\ &= \varphi_{Z_{j(p)}}(p_j,(t+s)-a_{j(p)}) \\ &= \varphi_{Z_{j(p)}}(p,t+s) \\ &= \phi_Z(p,t+s). \end{align*} \noindentextit{Inductive step:} For $i\in{\Delta_Z(q)}$ and $s\in{I_{Z_{i(q)}}^i(q)}$ assume true that $q_i=p_{j+i}$, $a_{i-1}(q)=a_{j+i-1}(p)-t$ and $\phi_Z(\phi_Z(p,t),s)=\phi_Z(p,t+s)$ for all $t\in{{\rm I}_{Z_{j(p)}}^j(p)}$ and $s\in{{\rm I}_{Z_{i(q)}}^i(q)}$ then for $i+1\in{\Delta_Z(q)}$ and $s\in{I_{Z_{{i+1}(q)}}^{i+1}(q)}$ \mbox{\boldmath $e$}gin{enumerate} \item $q_{i+1}=\varphi_{Z_{i(q)}}(q_i,t_{Z_{i(q)}}^+(q_i))=\varphi_{Z_{{j+i}(p)}}(p_{j+i},t_{Z_{{j+i}(p)}}^+(p_{j+i}))= p_{j+i+1}$ , \item $a_{i(q)}=a_{{i-1}(q)} + t_{Z_{i(q)}}^+(q_i)=(a_{{j+i-1}(p)}-t)+t_{Z_{{j+i}(p)}}^+(p_{j+i})=a_{{j+i}(p)}-t $ and so \item \mbox{\boldmath $e$}gin{align*} \phi_Z(\phi_Z(p,t),s) &=\phi_Z(q,s)=\phi_{Z_{{i+1}(q)}}(q_{i+1},s-a_{i(q)}) \\ &= \varphi_{Z_{{j+i+1}(p)}}(p_{j+i+1},s-(a_{{j+i}(p)}-t)) \\ &= \varphi_{Z_{{j+i+1}(p)}}(p_{j+i+1},(t+s)-a_{{j+i}(p)}) \\ &= \phi_{Z_{{j+i+1}(p)}}(p,t+s) \\ &= \phi_Z(p,t+s). \end{align*} \end{enumerate} Therefore $(M,\phi_Z)$ with $\Sigma=\Sigma^{c+}\cup \Sigma^{c-}\cup\Sigma^s\cup S_Z $ with $p \in{S_Z}$ is in some of the cases $\mathrm{A1}$ to $\mathrm{B2}$ is a semi-dynamical system. \end{proof} As an outcome of Theorem \ref{semiflow1}, we obtain two significant corollaries. \mbox{\boldmath $e$}gin{cor} Assume $M$ is closed 2-dimensional $C^1$ manifold and $Z\in{\mathfrak{X}(M,h)}$. If $\Sigma=\Sigma^{c}\cup\Sigma^{s}\cup S_Z$ with $p \in{S_Z}$ is in some of the cases $\mathrm{A1}$, $\mathrm{A2}$ or $\mathrm{B1}$ then the trajectories of $Z=(X,Y)\in{\mathfrak{X}(M,h)}$ generate a semi-dynamical system $(M,\phi_{Z})$. \end{cor} \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=1.4]{figure13} \caption{Example points of case $\mathrm{A}$ and $\mathrm{B}$ in 2-dimension.} \end{figure} \mbox{\boldmath $e$}gin{cor} Assume $M$ is closed 3-dimensional $C^1$ manifold and $Z\in{\mathfrak{X}(M,h)}$. If $\Sigma=\Sigma^{c}\cup \mathrm{B1}$ then the trajectories of $Z=(X,Y)\in{\mathfrak{X}(M,h)}$ generate a dynamical system $(M,\varphi_{Z})$. \end{cor} \mbox{\boldmath $e$}gin{proof} Is sufficient note that in the proof of Theorem \ref{semiflow1} if no exists $\Sigma^s$ then we can work with $\varphi_X$ and $\varphi_Y$ instead of $\phi_X$ and $\phi_Y$ since, in this case, is possible to extend Definitions \ref{tx} and \ref{omegax} for $\mathbb{R^-}$. For $X \in{\mathfrak{X}(M)}$, let $\Lambda_X^{-}=\{p\in{\Sigma^+}; \ \varphi_X(p,(-\infty,0]) \nsubseteq \Sigma^+\}$ and $t_X^+:\Sigma^+\longrightarrow \mathbb{R}^-\cup\{\infty\}$ such that \mbox{\boldmath $e$}gin{equation*} t_X^-(p) = \left\{ \mbox{\boldmath $e$}gin{array}{lll} \ -\infty && \noindentext{if } p\notin{\Lambda_X^-},\\ \mbox{\boldmath $e$}gin{array}{r@{}} \mathrm{sup} \{t<0; \ \varphi_X(p,t)\in{S_X^{ic} \cup \Sigma_X^-} \} \end{array} && \noindentext{if } \varphi_X(p,[t,0])\subset{\Sigma^+} \noindentext{ for } t\in{I\cap\mathbb{R}^-} \noindentext{ with } t\neq 0,\\ \ 0 && \noindentext{otherwise } . \end{array}\right. \end{equation*} Analogously for $Y\in{\mathfrak{X}(M)}$. If $p\in{M}$ then denoted $p_0=p$ and $p_i= \varphi_{Z_{({i-1})(p)}^-}(p_{i-1},t_{Z_{{i-1}(p)}}^-(p_{i-1}))$ for $i\in{\mathbb{Z}^-}$, where $Z_{0(p)}=X$ if $p\in{\mathrm{int}(\Sigma^+)}$, $Z_{0(p)}=Y$ if $p\in{\mathrm{int}(\Sigma^-)}$ and \mbox{\boldmath $e$}gin{equation*} Z_{i(p)} = \left\{ \mbox{\boldmath $e$}gin{array}{rcl} X && \noindentext{if } p_{i}\in\Sigma^{c+} \noindentext{or } p_{i}\in{ \mathrm{B1}\cap S_X^v} ,\\ Y && \noindentext{if } p_i\in{\Sigma^{c-}} \noindentext{or } p_{i}\in{ \mathrm{B1}\cap S_Y^v}. \end{array}\right. \end{equation*} Let $\Delta_X^-(p)=\{i\in{\mathbb{Z}^-}; \ Z_{i(p)}=X\}$ and $\Omega_X=\{(p,t)\in{M\noindentimes \mathbb{R}}; \ t\in{I_X(p)}\}$ whit $$I_X(p)=\bigcup_{i\in{\Delta_X^+ \cup \Delta_X^-}(p)} I_X^i(p)$$ where $I_X^0(p) = \left[ a_{0(p)}^-, a_{0(p)} \right] $, $I_X^i(p) = \left[ a_{{i-1}(p)}^-,a_{i(p)}^- \right] $ for $i<0$ such that $a_{i(p)}^-=\displaystyleplaystyle\sum_{j=0}^{i} t_{Z_{j(p)}}^-(p_j)$. Analogously, we defined $\Omega_Y$ for the vector fields $Y$. \end{proof} Finally, the following theorem provides very general conditions under which an isolated invariant set must contain a periodic orbit in a $\mathsf{PSVF}$. \mbox{\boldmath $e$}gin{thm}\label{teo3.21} Assume $M$ is closed 3-dimensional $C^1$ manifold and $Z\in{\mathfrak{X}(M,h)}$. If $\Sigma=\Sigma^{c}\cup\Sigma^{s}\cup S_Z$ with $p \in{S_Z}$ is in some of the cases $\mathrm{A1}$ to $\mathrm{B2}$ and $\phi_Z:M\noindentimes [0,\infty)\rightarrow M$ is the semiflow generate by the trajectories of $Z=(X,Y)\in{\mathfrak{X}(M,h)}$. If $N$ is a isolating neighborhood for $\varphi$ which admits a Poincar\'{e} section $\varXi$ and either \mbox{\boldmath $e$}gin{equation} dim \ CH^{2n}(N,\varphi)=dim \ CH^{2n+1}(N,\varphi) \ \ \ \ \ for \ \ \ n\in{\mathbb{Z^+}} \end{equation} or \mbox{\boldmath $e$}gin{equation} dim \ CH^{2n}(N,\varphi)=dim \ CH^{2n-1}(N,\varphi) \ \ \ \ \ for \ \ \ n\in{\mathbb{Z^+}}, \end{equation} where not all the above dimensions are zero, then $\varphi$ has a periodic trajectory in $N$. \end{thm} \mbox{\boldmath $e$}gin{cor}\label{cor3.22} Under the hypotheses of Theorem \ref{teo3.21}, if $N$ has the Conley index of a hyperbolic periodic orbit, then $\mathrm{inv}(N)$ contains a periodic orbit. \end{cor} The following theorem is a n-dimensional version of Theorem 3.21, where we assume that the system has only crossing and sliding regions without tangency points. \mbox{\boldmath $e$}gin{thm}\label{teo3.23} Assume $M$ is closed n-dimensional $C^1$ manifold and $Z\in{\mathfrak{X}(M,h)}$. If $\Sigma=\Sigma^{c}\cup\Sigma^{s}$ and $\phi_Z:M\noindentimes [0,\infty)\rightarrow M$ is the semiflow generate by the trajectories of $Z=(X,Y)\in{\mathfrak{X}(M,h)}$. If $N$ is a isolating neighborhood for $\varphi$ which admits a Poincar\'{e} section $\varXi$ and either \mbox{\boldmath $e$}gin{equation} dim \ CH^{2n}(N,\varphi)=dim \ CH^{2n+1}(N,\varphi) \ \ \ \ \ for \ \ \ n\in{\mathbb{Z^+}} \end{equation} or \mbox{\boldmath $e$}gin{equation} dim \ CH^{2n}(N,\varphi)=dim \ CH^{2n-1}(N,\varphi) \ \ \ \ \ for \ \ \ n\in{\mathbb{Z^+}}, \end{equation} where not all the above dimensions are zero, then $\varphi$ has a periodic trajectory in $N$. \end{thm} \section{Some applications} \subsection{Regularization} In this subsection, we provide an immediate consequence by the fact of the Conley index is robust under perturbation, we use Proposition \ref{prop2.11} and the concept of regularization of discontinuous vector fields, given by J. Sotomayor and Marco A. Teixeira \cite{SotomayorTeixeira1}. By a transition function we mean a $C^{\infty}$ function $\varphi:\mathbb{R}\longrightarrow\mathbb{R}$ such that: $\varphi(t)=0$ if $t\leq -1$, $\varphi(t)=1$ if $t\geq 1$ and $\varphi^{\prime}>0$ if $t\in(-1,1)$. \mbox{\boldmath $e$}gin{definition} A $\varphi_{\epsilon}-$regularization of $Z=(X,Y)$ is the one parameter family of vector fields $Z_{\epsilon}$ in $\mathfrak{X}(M)$ given by $$Z_{\epsilon}(q)=\left(1- \varphi_{\epsilon}(h(q))\right) Y(q) + \varphi_{\epsilon}(h(q))X(q),$$ where $\varphi_{\epsilon}(t)=\varphi(\frac{t}{\epsilon})$. \end{definition} A fundamental characteristic of the Conley index is the homotopy invariance, and this permits the robustness of Theorem \ref{teo3.21}. Using the homotopy invariance of the Conley index and Proposition \ref{prop2.11} we have that the following proposition. \mbox{\boldmath $e$}gin{prop} Let $\gamma$ be a periodic orbit in a isolating neighborhood $N$ of the semiflow generated by the trajectories of $Z=(X,Y)$. Then there a $\epsilon_0$ such that for every $\epsilon\leq\epsilon_0$, $Z_{\epsilon}$ contains a periodic orbit in $N$. \end{prop} \subsection{Closed poly-trajectories} Piecewise-smooth vector fields defined on plane present a type of solutions called closed poly-trajectories, which are a particular case of periodic orbits defined in \cite{Sotomayor1}. This section provides some necessary conditions to guarantee the existence of closed poly-trajectories solutions in a disk of $M$ when the $\mathsf{PSVF}$ has sliding motion. \mbox{\boldmath $e$}gin{definition} Consider $M$ is closed 2-dimensional $C^1$ manifold and $Z=(X,Y)\in{\mathfrak{X}(M,h)}$. \mbox{\boldmath $e$}gin{enumerate} \item A curve $\Gamma$ is a closed poly-trajectory if $\Gamma$ is closed and \mbox{\boldmath $e$}gin{itemize} \item $\Gamma$ contains regular arcs of at least two of the vector fields $X|\Sigma^+$, $Y|\Sigma^-$, $Z^e$ and $Z^s$ or is composed by a single regular arc of either $Z^s$ or $Z^e$; \item the transition between arcs of $X$ and arcs of $Y$ happens in sewing points (and vice versa); \item the transition between arcs of $X$ (or $Y$) and arcs of $Z^s$ or $Z^e$ happens through fold points or regular points in the escape or sliding arc, respecting the orientation. Moreover if $\Gamma\neq \Sigma$ then there exists at least one visible fold point on each connected component of $\Gamma\cap\Sigma$. \end{itemize} \item Let $\Gamma$ be a canard cycle of $Z$. We say that \mbox{\boldmath $e$}gin{itemize} \item is a closed poly-trajectory of kind I if $\Gamma$ meets $\Sigma$ just in sewing points; \item is a closed poly-trajectory of kind II if $\Gamma=\Sigma$; \item is a closed poly-trajectory of kind III if $\Gamma$ contains at least one visible fold point of $Z$. \end{itemize} In Figures \ref{figure14}, \ref{figure15} and \ref{figure16} appear poly-trajectories of kind I, II and III respectively. \item Let $\Gamma$ be a closed poly-trajectory. We say that $\Gamma$ is hyperbolic if \mbox{\boldmath $e$}gin{itemize} \item is of kind I and $\eta\prime(p)\neq 1$ where $\eta$ is the first return map defined on a segment $T$ with $p\in{T\cap\Gamma}$; \item is of kind II; \item $\Gamma$ is of kind III and either $\Gamma\cap\Sigma\subseteq\Sigma^c\cup\Sigma^s$ or $\Gamma\cap\Sigma\subseteq\Sigma^c\cup\Sigma^e$. \end{itemize} \end{enumerate} \end{definition} \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=0.9]{figure14} \caption{Closed poly-trajectories of kind I (left), II (center) and III (right).} \label{figure14} \end{figure} Let $(pq)_{Z^*}$ be an arc of $Z^*$, where $Z^*=X,Y$, joining the visible fold point $p$ to the point $q\in{\Sigma_{Z^*}}$. We say that $(pq)_{Z^*}$ has \noindentextit{focal kind} if there is not fold points between $p$ and $q$ (see Figure \ref{figure15} ) and we say that $(pq)_{Z^*}$ has \noindentextit{graphic kind} if it has only one fold point between $p$ and $q$ (see Figure \ref{figure16}). \mbox{\boldmath $e$}gin{minipage}{\linewidth} \centering \mbox{\boldmath $e$}gin{minipage}{0.35\linewidth} \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[width=\linewidth]{figure15} \caption{Focal kind arc.} \label{figure15} \end{figure} \end{minipage} \hspace{0.05\linewidth} \mbox{\boldmath $e$}gin{minipage}{0.35\linewidth} \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[width=\linewidth]{figure16} \caption{Graphic kind arc.} \label{figure16} \end{figure} \end{minipage} \end{minipage} So, we can use our main result to show the subsequent proposition. \mbox{\boldmath $e$}gin{prop}\label{prop4.5} Let $M$ a closed 2-dimensional $C^1$ manifold and $Z=(X,Y)\in{\mathfrak{X}(M,h)}$ such that $\Sigma=\Sigma^{c}\cup\Sigma^{s}\cup S_Z$ where for all $p \in{S_Z}$, $p$ is in some of the cases $\mathrm{A1}$, $\mathrm{A2}$ or $\mathrm{B1}$, moreover assume that $X$ is a linear vector field and $Yh(x)>0$ for all $x\in \mathcal{U}\cap\Sigma$. Let $\mathcal{U}$ be a disk of $M$ that belong to the same chart of $M$ and such that: \mbox{\boldmath $e$}gin{itemize} \item [(1)] $\noindentextrm{int}(\mathcal{U})\cap \Sigma\neq\emptyset$, \item [(2)] $\mathcal{U}\cap\noindentextrm{int}(\Sigma^+)$ contain only one equilibrium point $\noindentilde{x}$, is which unstable focus and an the unstable manifold of $\noindentilde{x}$ intercept to $\Sigma$ is an arc in $\Sigma$, and \item [(3)] there are no pseudo-equilibrium points in $\mathcal{U}\cap\Sigma$. \end{itemize} Then, in $\mathcal{U}$ there exists a hyperbolic poly-trajectory of kind $III$. \end{prop} \mbox{\boldmath $e$}gin{proof} A closed poly-trajectory of $Z=(X,Y)\in{\mathfrak{X}(M,h)}$ is a periodic orbit for semi-flow $\phi_Z$ thus, we used Theorem \ref{teo3.21}.\newline Let $\noindentilde{x}$ the unstable focus in $\mathcal{U}\cap\noindentextrm{int}(\Sigma^+)$, $p$ the visible fold point for $X$ such that $\gamma_0(p)=p\cdot[0,t_X^+(p)]$ is the focal kind arc. Let $\noindentilde{p}$ and $\noindentilde{q}$ the intersection of $\partial \mathcal{U}$ and $\Sigma$ such that $Xh(x)>0$ for all $x\in{(\noindentilde{p}p)_{\Sigma}\setminus{\{p\}}}$ where $(\noindentilde{p}p)_{\Sigma}$ is the arc in $\Sigma$ of the point $\noindentilde{p}$ to $p$, and $Xh(x)<0$ for all $x\in{(p\noindentilde{q})_{\Sigma}\setminus{\{p\}}}$. Take $q\in{\noindentextrm{int}(\noindentilde{p}p)_{\Sigma}}$ such that $q_1\in{\noindentextrm{int}(p\noindentilde{q})_{\Sigma}}$ and $\gamma_0(q)\subset{\mathcal{U}}$. Consider $\mu <0$ such that $\Sigma_{\mu}=h^{-1}(\mu)$ is parallel to $\Sigma$ and $\Sigma_{\mu}\cap\noindentextrm{int}(\mathcal{U})\neq \emptyset$, and consider the points $v, \noindentilde{v}\in{\Sigma_{\mu}}$ satisfying that $q=\noindentilde{v}_1$ and $q_1=v_1$. Let $\noindentilde{N}$ the region bounded by the curve $(v\noindentilde{v})_{\Sigma_{\mu}}\cup\gamma_0(\noindentilde{v})\cup\gamma_0(q) \cup \gamma_0(v)$, see Figure \ref{figure17}. \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=0.9]{figure17} \caption{Construction of the isolating neighborhood.} \label{figure17} \end{figure} Now, let $\mathcal{X}i$ a local section for $\varphi_X$ crossing the stable manifold of $\noindentilde{x}$ but $\varphi_X\cap\Sigma=\emptyset$, as in the Figure \ref{figure18}. If $a\pm bi$ are the eigenvalues of $X$ associate to $\noindentilde{x}$ then, let $r\in{\mathcal{X}i}$ such that $\noindentheta(r)<90^\circ$ when $b<0$ or $90^\circ<\noindentheta(r)<180^\circ$ when $b>0$, where $\noindentheta(r)$ is the angle that the vector $X(r)$ makes with $\Sigma$. As $\noindentilde{x}$ is a unstable focus, then there exist $\noindentilde{r}\in{\mathcal{X}i}$ and let $\noindentilde{\gamma}_{r}$ the arc of $r$ to $\noindentilde{r}$ by flow $\varphi_X$, then $\noindentilde{\gamma}_{r}\cup (r\noindentilde{r})_{\mathcal{X}i}$ is homeomorphic to $S^1$. Thus, let $N=\noindentilde{N}\setminus \left(\noindentilde{\gamma}_{r}\cup (r\noindentilde{r})_{\mathcal{X}i}\right) $. \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=1.2]{figure18} \caption{Poincaré Section.} \label{figure18} \end{figure} Note that $\mathrm{inv}(N)=\gamma_0(p)\cup (pp_1)_{\Sigma}$ and so $\mathrm{inv}(N)\subset \mathrm{int}(N)$. The exit set $L$ is empty, in fact, if $x\in N\cap \Sigma^+$ then by hypotheses (3) either $x\in{\Sigma^s}$ or there exists $t_X^+(x)\in{(0,\infty)}$ such that $x \cdot t_X^+(x)\in{\Sigma^s}$, and by hypotheses (5), there is $s_x\geq 0$ such that $x \cdot (t_X^+(x) +s_x)\in{\mathrm{inv}(N)}$; analogously if $x\in N\cap \Sigma^-$. In the Figure \ref{figure18} you can see that the homotopy type of $N$ is the same of a stable periodic orbit thus \mbox{\boldmath $e$}gin{equation*} CH^k(N) \approx \left \{ \mbox{\boldmath $e$}gin{matrix} \mathbb{Z} & \mbox{ }k=0, 1, \\ 0 & \mbox{ otherwise}. \end{matrix}\right. \end{equation*} \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=0.6]{figure19} \caption{The homotopy type of $N/L$.} \label{figure19} \end{figure} \noindentextit{Poincaré Section.} Let $\mathcal{X}i$ a local section for $\varphi_X$ crossing the stable manifold of $\noindentilde{x}$ as in the Figure \ref{figure19}. We claim that $\mathcal{X}i$ is the required Poincare section for $N$. It is closed, and it is transverse to the semiflow. Finally, we must show that the forward orbit of every point in $N$ intersects $\mathcal{X}i$. If $x\in{N\cap\Sigma^+}$ then, by the hypothesis (3), $x\in{\Lambda_X^{+}}$ and there is $t_X^+(x)>0$ such that $x_1= x\cdot t_X^+(x)\in{\Sigma\cap N}$, moreover by the hypothesis (a), $x_1\in{\Sigma^s}$. By hypothesis (5), $x_1\in{\Lambda_{Z^+}^{+}}$ then there is $t_{Z^s}^+(x_1)>0$ such that $x_2= x_1\cdot t_{Z^s}^+(x_1)=p$ and forward orbit of $p$ intersects $\mathcal{X}i$. If $x\in{N\cap\Sigma^-}$ then, by the hypothesis (4) and by construction of $N$, there is $t_Y^+(x)>0$ such that $x_1= x\cdot t_Y^+(x)\in{\Sigma\subset\Sigma^+}$ and continue as before. Thus $\mathcal{X}i$ is a Poincare section for the semiflow $\phi_Z$ in $N$. \end{proof} Using the ideas of Proposition \ref{prop4.5}, we can show that in a disk $\mathcal{U}$ with dynamic as in the figure \ref{figure20}, there exists a hyperbolic poly-trajectory of kind $III$. \mbox{\boldmath $e$}gin{figure}[H] \includegraphics[scale=0.85]{figure20} \caption{In both cases, there exists a hyperbolic poly-trajectory of kind $III$ contain to $\mathcal{U}$.} \label{figure20} \end{figure} \section{Closing remarks and future directions} The results in this paper bring us one step closer to generalizing the tool of C. Mccord, K. Mischaikow, and M. Mrozek in \cite{Mccord} to piecewise smooth vector fields using the Filippov convention. The next step is to include tangential singularities with orders greater than $3$ in systems defined in a manifold of dimension $n>3$. Forthcoming works include applying the results to guarantee the existence of periodic orbits in a piecewise smooth system that models an intermittent treatment of the human immunodeficiency virus, see \cite{de2020global}. Another application is to find periodic orbits in hill models in biology using $\mathsf{PSVF}$ to defined switch systems as in Dynamic Signatures Generated by Regulatory Networks ($\mathsf{DSGRN}$), see \cite{cummins2016combinatorial}. \section*{Acknowledgment} The first author is is supported by CAPES (the Coordena{\c c}{\~a}o Aperfei{\c c}oamento de Pessoal de N{\'i}vel Superior-Brasil) and affiliated with IME-UFG (Instituto de Matem\'atica e Estat\'istica, Universidade Federal de Goi\'as). The second author affiliated with DIMACS (the Center for Discrete Mathematics and Theoretical Computer Science), Rutgers University, and IME-UFG (Instituto de Matem\'atica e Estat\'istica, Universidade Federal de Goi\'as) and would like to acknowledge the support of the National Science Foundation under grant HDR TRIPODS 1934924. \nocite{*} \end{document}
\begin{document} \title[A new cyclic sieving phenomenon for Catalan objects]{A new cyclic sieving phenomenon for Catalan objects} \author{Marko Thiel} \address{Department of Mathematics, University of Zürich, Winterthurerstrasse 190, 8050 Zürich, Switzerland} \begin{abstract} Based on computational experiments, Jim Propp and Vic Reiner suspected that there might exist a sequence of combinatorial objects $X_n$, each carrying a natural action of the cyclic group $C_{n-1}$ of order $n-1$ such that the triple $\left(X_n,C_{n-1},\frac{1}{[n+1]_q}{2n \brack n}_q\right)$ exhibits the cyclic sieving phenomenon. We prove their suspicion right. \end{abstract} \maketitle \section{Introduction} \subsection{The Cyclic Sieving Phenomenon} Reiner, Stanton and White have observed that the following situation often occurs: one has a combinatorial object $X$, a cyclic group $C$ that acts on $X$ and a ``nice'' polynomial $X(q)$ whose evaluations at $|C|$-th roots of unity encode the cardinalities of the fixed point sets of the elements of $C$ acting on $X$. They termed this the cyclic sieving phenomenon. \begin{definition}[\protect{\cite{reiner04cyclic}}] Let $X$ be a finite set carrying an action of a cyclic group $C$ and let $X(q)$ be a polynomial in $q$ with nonnegative integer coefficients. Fix an isomorphism $\omega$ from $C$ to the set of $|C|$-th roots of unity, that is an embedding $\omega:C\hookrightarrow\mathbb{C}^*$. We say that the triple $(X,C,X(q))$ exhibits the \defn{cyclic sieving phenomenon} (CSP) if \[|\{x\in X:c(x)=x\}|=X(q)_{q=\omega(c)}\text{ for every }c\in C.\] \end{definition} \noindent In particular, if $(X,C,X(q))$ exhibits the CSP then $|X|=X(1)$. So $X(q)$ is a \defn{$q$-analogue} of $|X|$. \subsection{Catalan numbers} One of the most famous number sequences in combinatorics is the sequence $1,1,2,5,14,42,132,\ldots$ of \defn{Catalan numbers} given by the formula \[C_n:=\frac{1}{n+1}\binom{2n}{n}.\] A vast variety of combinatorial objects are counted by the Catalan number $C_n$, for example the set of triangulations of a convex $(n+2)$-gon and the set of noncrossing matchings of $\{1,2,\ldots,2n\}$. The (MacMahon) \defn{$q$-Catalan number} $C_n(q)$ is the natural $q$-analogue of $C_n$, defined as \[C_n(q):=\frac{1}{[n+1]_q} {2n \brack n}_q,\] where $[n]_q:=1+q+q^2+\ldots+q^{n-1}$, $[n]_q!:=[1]_q[2]_q\cdots[n]_q$ and ${n \brack k}_q:=\frac{[n]_q!}{[n-k]_q![k]_q!}$. It is a polynomial in $q$ with nonnegative integer coefficients.\\ \\ The $q$-Catalan number has the distinction of occurring in two entirely different CSPs for Catalan objects: \begin{theorem}[\protect{\cite[Theorem 7.1]{reiner04cyclic}}] Let $\Delta_n$ be the set of triangulations of a convex $(n+2)$-gon and let $C_{\Delta_n}$ be the cyclic group of order $n+2$ acting on $\Delta_n$ by rotation. Then $(\Delta_n,C_{\Delta_n},C_n(q))$ exhibits the cyclic sieving phenomenon. \end{theorem} \begin{theorem}[\protect{\cite[Theorems 1.4 and 1.5]{petersen09promotion}}]\label{matching} Let $M_n$ be the set of noncrossing matchings of $[2n]:=\{1,2,\ldots,2n\}$ and let $C_{M_n}$ be the cyclic group of order $2n$ acting on $M_n$ by rotation. Then $(M_n,C_{M_n},C_n(q))$ exhibits the cyclic sieving phenomenon. \end{theorem} \noindent Computational experiments by Jim Propp and Vic Reiner suggested that substituting an $(n-1)$-th root of unity into $C_n(q)$ always yields a positive integer. So they suspected that there might also be cyclic sieving phenomenon involving $C_n(q)$ and a cyclic group of order $n-1$. The main result of this note proves that their suspicion is correct. \begin{theorem}\label{main} For any $n>0$, there exists an explicit set $X_n$ that carries an action of the cyclic group $C_{X_n}$ of order $n-1$ such that the triple $(X_n,C_{X_n},C_n(q))$ exhibits the cyclic sieving phenomenon. \end{theorem} \section{Proof of Theorem \ref{main}} The first order of business is to define the set $X_n$. Call a subset of $[m]$ a \defn{ball} if it has cardinality $1$ and an \defn{arc} if it has cardinality $2$. Define a \defn{$(1,2)$-configuration} on $[m]$ as a set of pairwise disjoint balls and arcs. Say that a $(1,2)$-configuration $F$ has a \defn{crossing} if it contains arcs $\{i_1,i_2\}$ and $\{j_1,j_2\}$ with $i_1<j_1<i_2<j_2$. If $F$ has no crossing it is called \defn{noncrossing}. \begin{figure} \caption{The noncrossing $(1,2)$-configuration $F=\{ \{1,3\} \end{figure}\\ For $n>0$, define $X_n$ to be the set of noncrossing $(1,2)$-configurations of $[n-1]$. This is a corrected variant of $(\mathrm{e}^8)$ in Stanley's Catalan addendum \cite{stanleyaddendum}. \begin{theorem}\label{cn} $|X_n|=C_n$ for all $n>0$. \begin{proof} To choose a noncrossing $(1,2)$-configuration $F$ of $[n-1]$, first pick the number $a$ of arcs in it. Then pick the subset $A$ of $[n-1]$ to be covered by arcs in one of $\binom{n-1}{2a}$ ways. Then choose a noncrossing matching of $A$ in one of $C_a=\frac{1}{a+1}\binom{2a}{a}$ ways. Finally choose the set of balls in $F$ from $[n-1]\backslash A$ in one of $2^{n-1-2a}$ ways. Thus \[|X_n|=\sum_{a\geq0}\binom{n-1}{2a}\frac{1}{a+1}\binom{2a}{a}2^{n-1-2a}=\frac{1}{n+1}\binom{2n}{n}.\] The last equality can be proven in many ways, for example using ``snake oil'' \cite{wilf06generating}. \end{proof} \end{theorem} Define $C_{X_n}$ as the cyclic group of order $n-1$ acting on $[n-1]$ by cyclically permuting its elements. The corresponding action of $C_{X_n}$ on the set of $(1,2)$-configurations on $[n-1]$ preserves crossings, so it restricts to an action on $X_n$. \begin{proof}[Proof of Theorem \ref{main}] We proceed by direct computation. Let \begin{align*} g:[n-1]&\rightarrow[n-1]\\ i&\mapsto i+1\text{ if }i\neq n-1\\ n-1&\mapsto 1 \end{align*} be a generator of $C_{X_n}$ and let $\omega:g^k\mapsto e^{\frac{2\pi ik}{n-1}}$ be an embedding $C_{X_n}\hookrightarrow\mathbb{C}^*$. In order to show that \begin{equation}\label{CSP} |\{x\in X_n:g^k(x)=x\}|=C_n(q)_{q=e^{\frac{2\pi ik}{n-1}}}\text{ for every }k \end{equation} we simply compute both sides. Without loss of generality, we may assume that $k$ divides $n-1$, say $dk=n-1$.\\ \\ First we compute the right-hand side of (\ref{CSP}). If $d=1$, it equals $C_n(1)=C_n$. If $d=2$, it equals $\binom{n}{\frac{n-1}{2}}$ using $C_n(q)=\frac{1}{[n]_q}{2n \brack n+1}_q$ and \cite[Proposition 4.2 (iii)]{reiner04cyclic}. If $d\neq 1,2$ it equals $\binom{2k}{k}$ using $C_n(q)=\frac{[2n]_q}{[n]_q[n+1]_q}{2n-1 \brack n}_q$ and \cite[Proposition 4.2 (iii)]{reiner04cyclic}.\\ \\ Next we compute the left-hand side of (\ref{CSP}). To choose a noncrossing $(1,2)$-configuration $F$ of $[n-1]$ that fixed by $g^k$, first pick the number $a$ of points in $[k]$ that are covered by arcs of $F$. Then pick the subset of $[k]$ covered by arcs of $F$ in one of $\binom{k}{a}$ ways. The $g^k$-invariance of $F$ then determines the entire subset $A$ of $[n-1]$ covered by arcs of $F$. In particular $|A|=da$. Next choose a $g^k$-invariant noncrossing matching of $A$. These are in natural bijection with the $c^{a}$-invariant noncrossing matchings of $[da]$ (where $c$ is the generator of the natural cyclic action on $[da]$). So using Theorem \ref{matching} their number is $C_{\frac{da}{2}}(q)_{q=e^{\frac{2\pi ia}{da}}}$ (taken to be $0$ if $da$ is odd). Finally, choose the balls of $F$ in $[k]$ in one of $2^{k-a}$ ways. By $g^k$-invariance these determine all the balls of $F$. Putting it all together we have \begin{equation} |\{x\in X_n:g^k(x)=x\}|=\sum_{a\geq0}\binom{k}{a}C_{\frac{da}{2}}(q)_{q=e^{\frac{2\pi ia}{da}}}2^{k-a} \end{equation} If $d=1$, then \[|\{x\in X_n:g^k(x)=x\}|=\sum_{a\geq0}\binom{n-1}{2a}\frac{1}{a+1}\binom{2a}{a}2^{n-1-2a}=\frac{1}{n+1}\binom{2n}{n}\] as in Theorem \ref{cn}.\\ \\ Now consider the case $d>1$. If $2\mid a$, then \[C_{\frac{da}{2}}(q)_{q=e^{\frac{2\pi ia}{da}}}=\binom{a}{\frac{a}{2}}\] using \cite[Proposition 4.2 (ii)]{reiner04cyclic}. If $2\nmid a$, then using ${2n\brack n}_q-{2n\brack n+1}_q=q^nC_n(q)$ and \cite[Proposition 4.2 (ii)]{reiner04cyclic} gives \[C_{\frac{da}{2}}(q)_{q=e^{\frac{2\pi ia}{da}}}=\binom{a}{\frac{a-1}{2}}\text{ if }d=2,\] and \[C_{\frac{da}{2}}(q)_{q=e^{\frac{2\pi ia}{da}}}=0\text{ if }d>2.\] So we calculate that for $d=2$ we have \begin{align*} &|\{x\in X_n:g^k(x)=x\}|\\ &=\sum_{a\geq0}\binom{\frac{n-1}{2}}{2a}\binom{2a}{a}2^{\frac{n-1}{2}-2a} +\sum_{a\geq0}\binom{\frac{n-1}{2}}{2a+1}\binom{2a+1}{a}2^{\frac{n-1}{2}-2a-1}\\ &=\binom{n}{\frac{n-1}{2}}. \end{align*} For $d>2$ we have \[|\{x\in X_n:g^k(x)=x\}|=\sum_{a\geq0}\binom{k}{2a}\binom{2a}{a}2^{k-2a}=\binom{2k}{k}\] as required. \end{proof} \end{document}
\begin{document} \newtheorem{theorem}{\hspace{\parindent} T{\scriptsize HEOREM}}[section] \newtheorem{proposition}[theorem] {\hspace{\parindent }P{\scriptsize ROPOSITION}} \newtheorem{corollary}[theorem] {\hspace{\parindent }C{\scriptsize OROLLARY}} \newtheorem{lemma}[theorem] {\hspace{\parindent }L{\scriptsize EMMA}} \newtheorem{definition}[theorem] {\hspace{\parindent }D{\scriptsize EFINITION}} \newtheorem{problem}[theorem] {\hspace{\parindent }P{\scriptsize ROBLEM}} \newtheorem{conjecture}[theorem] {\hspace{\parindent }C{\scriptsize ONJECTURE}} \newtheorem{example}[theorem] {\hspace{\parindent }E{\scriptsize XAMPLE}} \newtheorem{remark}[theorem] {\hspace{\parindent }R{\scriptsize EMARK}} \renewcommand{\arabic{section}.\arabic{theorem}}{\arabic{section}.\arabic{theorem}} \renewcommand{(\roman{enumi})}{(\roman{enumi})} \renewcommand{\theenumi}{(\roman{enumi})} \title{Apollonius ``circle" in Hyperbolic Geometry} \begin{abstract} In Euclidean geometry the circle of Apollonious is the locus of points in the plane from which two collinear adjacent segments are perceived as having the same length. In Hyperbolic geometry, the analog of this locus is an algebraic curve of degree four which can be bounded or ``unbounded". We study this locus and give a simple description of this curve using the half-plane model. In the end, we give the motivation of our investigation and calculate the probability that three collinear adjacent segments can be seen as of the same positive length under some natural assumptions about the setting of the randomness considered. \end{abstract} \noindent \section{Introduction} In most of the textbooks the Circle of Apollonius is discussed in conjunction with the Angle Bisector Theorem: \emph{``The angle bisector in a triangle divides the opposite sides into a ratio equal to the ratio of the adjacent sides."} Once one realizes that the statement can be equally applied to the exterior angle bisector, then the Circle of Apollonius appears naturally (Figure~\ref{locus1}), since the two angle bisectors are perpendicular. \begin{figure} \caption{The Circle $x^2+y^2=4$ } \label{locus1} \end{figure} \noindent For instance, an easy exercise in algebra shows that the circle of equation $x^2+y^2=4$ is equivalent to $$\frac{\sqrt{x^2 +(y-4)^2}}{\sqrt{x^2 +(y-1)^2}}=\frac{BA}{BC}=\frac{DA}{DC}=2,$$ \noindent taking $A(0,4)$, $B(0,2)$, $C(0,1)$ and $D(0,-2)$. Similar calculations can be employed to treat the general situation, i.e., taking $A(0,a)$, $B(0,b)$ and $C(0,c)$ with real positive numbers $a$, $b$ and $c$ such that $a>b>c$. Then we can state the well-known result: \begin{theorem}[Apollonius]\label{apollonius} Given points $A$, $B$ and $C$ as above, the set of points $P(x,y)$ in the plane characterized by the equality $\angle APB\equiv \angle BPC$ is \par (i) the line of equation $y=(a+c)/2$ if $b=(a+c)/2$; \par (ii) the circle of equation $x^2+y^2=b^2$, if $b<(a+c)/2$ and $b^2=ac$. \end{theorem} \noindent Let us observe that the statement of this theorem does not reduce the generality since the coordinate of point $D$ can be shown to be $y_D=(2ac-bc-ab)/(a+c-2b)$ and one can take the origin of coordinates to be the midpoint of $\overline{BD}$. This turns out to happen precisely when $b^2=ac$. What is the equivalent of this result in Hyperbolic Geometry? We are going to use the half-plane model, $\mathbb H$, to formulate our answer to this question (see Anderson \cite{anderson} for the terminology and notation used). Without loss of generality we may assume as before that the three points are on the $y$-axis: $A(0,a)$, $B(0,b)$ and $C(0,c)$, with real positive numbers $a$, $b$ and $c$ such that $a>b>c$. \begin{theorem}\label{theorem1} Given points $A$, $B$ and $C$ as above, the set of points $P(x,y)$ in the half-plane $\mathbb H$, characterized by the equality $\angle APB\equiv \angle BPC$ in $\mathbb H$ is the curve given in polar coordinates by \begin{equation}\label{eq1} r^4(2b^2-a^2-c^2)=2r^2(a^2c^2-b^4)\cos (2\theta)+b^2(2a^2c^2-a^2b^2-c^2b^2). \end{equation} Moreover, \par (i) if $b=\left(\frac{a^2+c^2}{2}\right)^{\frac{1}{2}}$, this curve is half of the hyperbola of equation $$r^2\cos (2\theta)+b^2=0, \ \ \theta\in (\frac{\pi}{4},\frac{3\pi}{4}),$$\par (ii) if $b=\sqrt{ac}$ this curve is the semi-circle $r=b$, $\theta\in (0,\pi)$,\par (iii) if $b=\left(\frac{a^{-2}+c^{-2}}{2}\right)^{-\frac{1}{2}}$, this curve is half of the lemniscate of equation $$r^2+b^2\cos (2\theta)=0, \ \ \theta\in (\frac{\pi}{4},\frac{3\pi}{4}).$$\par \end{theorem} It is interesting that all these particular cases in Theorem~\ref{theorem1}, can be accomplished using integer values of $a$, $b$ and $c$. This is not surprising for the Diophantine equation $b^2=ac$ since one can play with the prime decomposition of $a$ and $c$ to get $ac$ a perfect square. For the equation $2b^2=a^2+c^2$ one can take a Pythagorean triple and set $a=|m^2+2mn-n^2|$, $c=|m^2-2mn-n^2|$ and $b=m^2+n^2$ for $m,n\in \mathbb N$. Perhaps it is quite intriguing for some readers that the last Diophantine equation $2a^2c^2=a^2b^2+c^2b^2$ is satisfied by the product of some quadratic forms, namely $$\begin{array}{c} a=(46m^2+24mn+n^2)(74m^2+10mn+n^2), \\ b=(46m^2+24mn+n^2)(94m^2+4mn-n^2), \\ \text{and}\ \ c=(94m^2+4mn-n^2)(74m^2+10mn+n^2), \ \text{for}\ \ m,n\in \mathbb Z. \end{array}$$ Another surprising fact is that Theorem~\ref{theorem1} appears as a more general result than Theorem~\ref{apollonius} because of part (ii) and the observation that we can always assume that the axes of coordinates are chosen with the origin to be the center of the Apollonius circle. \begin{figure} \caption{The curve in polar coordinates (a=35, c=5) } \label{locus2} \end{figure} In Figure~\ref{locus2}, we included all of the possible shapes of the locus in Theorem~\ref{theorem1} except for the case $b<\left(\frac{a^{-2}+c^{-2}}{2}\right)^{-\frac{1}{2}}$ which is similar to the case $b>\left(\frac{a^2+c^2}{2}\right)^{\frac{1}{2}}$. We notice a certain symmetry of these cases showing that the hyperbola ($b=\left(\frac{a^2+c^2}{2}\right)^{\frac{1}{2}}$) is nothing else but a lemniscate in hyperbolic geometry. With this identification, it seems like the curves we get, resemble all possible shapes of the intersection of a plane with a torus. In the next section we will prove the above theorem and in the last section we will give the motivation of our work. \section{Proof of Theorem~\ref{theorem1}} \begin{proof} Let us consider a point $P$ of coordinates $(x,y)$ with the given property as in Figure~\ref{locus3} which is not on the line $\overline{AC}$, ($x\not =0$). Then the Hyperbolic lines determined by $P$ and the three points $A$, $B$ and $C$ are circles orthogonal on the $x$-axis. We denote their centers by $A'(a',0)$, $B'(b',0)$ and $C'(c',0)$. \begin{figure} \caption{The point $P$ and the lines determined by it with $A$, $B$ and $C$} \label{locus3} \end{figure} The point $A'$ can be obtained as the intersection of the perpendicular bisector of $\overline{PA}$ and the $x$-axis. Similarly we obtain the other two points $B'$ and $C'$. The equation of the perpendicular bisector of $\overline{PA}$ is $Y-\frac{y+a}{2}=-\frac{x}{y-a}(X-\frac{x}{2})$ and so $a'=\frac{x^2+y^2-a^2}{2x}$. Similar expressions are then obtained for $b'$ and $c'$, i.e., $b'=\frac{x^2+y^2-b^2}{2x}$ and $c'=\frac{x^2+y^2-c^2}{2x}$. Which shows that the order of the points $A'$, $B'$ and $C'$ is reversed ($a'<b'<c'$). The angle between the Hyperbolic lines $\overset{\leftrightarrow}{PA}$ and $\overset{\leftrightarrow}{PA}$ is defined by the angle between the tangent lines to the two circles at $P$, which is clearly equal to the angle between the radii corresponding to $P$ in each of the two circles. So, $m_{\mathbb H}(\angle APB)=m(\angle A'PB')$ and $m_{\mathbb H}(\angle BPC)=m(\angle B'PC')$. This equality is characterized by the proportionality given by the Angle Bisector Theorem in the triangle $PA'C'$: $$\frac{PA'}{PC'}=\frac{A'B'}{B'C'}\Leftrightarrow \frac{\sqrt{(x^2-y^2+a^2)^2+4x^2y^2}}{\sqrt{(x^2-y^2+c^2)^2+4x^2y^2}}=\frac{a^2-b^2}{b^2-c^2}.$$ Using polar coordinates, $x=r\cos \theta$ and $y=r\sin \theta$, we observe that $x^2-y^2=r^2\cos 2\theta$ and $2xy=r^2\sin 2\theta$. Hence the above equality is equivalent to $$(r^4+2a^2r^2\cos 2\theta+a^4)(b^2-c^2)^2=(r^4+2c^2r^2\cos 2\theta+c^4)(a^2-b^2)^2.$$ One can check that a factor of $(a^2-c^2)$ can be simplified out and in the end we obtain (\ref{eq1}).\end{proof} \section{Four points ``equally" spaced and the motivation} Our interest in this locus was motivated by the Problem 11915 in this Monthly (\cite{kidwell&Meyerson}). This problem stated: \emph {Given four (distinct) points $A$, $B$, $C$ and $D$ in (this) order on a line in Euclidean space, under what conditions will there be a point $P$ off the line such that the angles $\angle APB$, $\angle BPC$, and $\angle CPD$ have equal measure?} It is not difficult to show, using two Apollonius circles, that the existence of such a point $P$ is characterized by the inequality involving the cross-ratio \begin{equation}\label{eq2} [A,B;C,D]=\frac{\frac{BC}{BA}}{\frac{DC}{DA}}<3. \end{equation} We were interested in finding a similar description for the same question in Hyperbolic space. One can think using the same idea of the locus that replaces the Apollonius circle in Euclidean geometry and that is why we looked into finding what this locus is. Having the description of this locus one can see that the intersection of two curves as in Theorem~\ref{theorem1} is difficult to predict. Fortunately, we can use the calculation done in the proof of Theorem~\ref{theorem1} and formulate a possible answer in the new setting. Given four points $A$, $B$, $C$ and $D$ in (this) order on a line in the Hyperbolic space, we can use an isometry to transform them on the line $x=0$ and having coordinates $A(0,a)$, $B(0,b)$, $C(0,c)$ and $D(0,d)$ with $a>b>c>d$. The the existence of a point $P$ off the line $x=0$, such that the angles $\angle APB$, $\angle BPC$, and $\angle CPD$ have equal measure in the Hyperbolic space is equivalent to the existence of $P$ in Euclidean space corresponding to the points $A'$, $B'$, $C'$ and $D'$ as constructed in the proof of Theorem~\ref{theorem1}. Therefore the answer is in terms of a similar inequality \begin{equation}\label{eq3} [A',B';C',D']=\frac{\frac{B'C'}{B'A'}}{\frac{D'C'}{D'A'}}<3 \Leftrightarrow \frac{(b^2-c^2)(a^2-d^2)}{(a^2-b^2)(c^2-d^2)}<3. \end{equation} To have a different take of what (\ref{eq2}) means we will translate it into a geometric probability which is not difficult to compute: \emph {if two points are randomly selected (uniform distribution) on the segment $\overline{AD}$, then the probability that a point $P$ off the line $\overline{AD}$ such that the angles $\angle APB$, $\angle BPC$, and $\angle CPD$ have equal measure exists (where the two points are denoted by $B$ and $C$, $B$ being the closest to $A$), is equal to $$P_e=\frac{15-16\ln 2}{9}\approx 0.4345$$} The inequality (\ref{eq3}) gives us the similar probability in the Hyperbolic space: $$P_h=\frac{2\sqrt{5}\ln(2+\sqrt{5})-5}{5\ln 2}\approx 0.4201514924$$ \noindent where the uniform distribution here means, it is calculated with respect to the measure $\frac{1}{y}dy$ along the $y$-axis. \section{Spherical Geometry and the perfect setting} Our both problems must have an even more interesting answer in the setting of spherical geometry. Due to the infinite nature of both Euclidean and Hyperbolic spaces the geometric probability question makes sense only in limiting situations. In this case we can simply ask: {\bf Problem 1:} \emph {What is the equivalent of the circle of Apollonius in spherical geometry?} {\bf Problem 2:} \emph {Given a line in spherical geometry and four points on it, chosen at random with uniform distribution, what is the probability that the points look equidistant from a point on the sphere that is not on that line?} We find the problems more fascinating because there is no clear order in this geometry. Given three collinear points, any of them can be thought as between the other two. So, we may expect three different curves as a result. If three points $A$, $B$ and $C$ are already equidistant on a line $\ell$, the three lines perpendicular to $\ell$ through these points satisfy the locus requirement and they are concurrent. Is there such a point (in fact at least two if one exists) of concurrency, for a general position of three points on a line? Perhaps not, if the three points are close to one another; but there must be quite a variety of situations when it is possible. Also, the probability in question, we anticipate to be somewhat bigger. \end{document}
\begin{document} \setlength{\parindent}{0in} \setlength{\parskip}{2ex} \title{Monotone versions of $\delta$-normality} \author{Chris Good} \email{[email protected]} \author{Lylah Haynes} \email{[email protected]} \address{School of Mathematics, University of Birmingham, Birmingham B15 2TT, UK} \begin{abstract} We continue the study of properties related to monotone countable paracompactness, investigating various monotone versions of \(\delta\)-normality. We factorize monotone normality and stratifiability in terms of these weaker properties. \end{abstract} \maketitle \emph{Key words:} Monotonically normal, monotonically \(\delta\)-normal, coherently \(\delta\)-normal, stratifiable, \(\delta\)-stratifiable \emph{AMS subject classification:} 54E20, 54E30 \section{Introduction} Dowker {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{d} proves that the product of a space $X$ and the closed unit interval $[0,1]$ is normal iff $X$ is both normal and countably paracompact. Mack {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{mack2} proves that a space \(X\) is countably paracompact iff \(X \times [0,1]\) is \(\delta\)-normal and that every countably paracompact space is $\dlt$-normal (see below for definitions). In {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{gks} and its sequels {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{gy,gk}, the first author \textit{et al.} introduce and study a monotone version of countable paracompactness (MCP) closely related to stratifiabilty. In {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{gh}, the current authors consider various other possible monotone versions of countable paracompactness and the notion of $\mathcal{D}(\mathbb R)n$ (monotone $\delta$-normality) arises naturally in this study. It turns out that MCP and $\mathcal{D}(\mathbb R)n$ are distinct properties and that, if \(X \times [0,1]\) is m\(\delta\)n, then \(X\) (and hence $X\times[0,1]$) is MCP. In this paper we take a closer look at monotone versions of $\dlt$-normality. Our notation and terminology are standard as found in {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{eng} or {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{kunen}. All spaces are assumed to be \(T_1\) and regular. \section{Monotone versions of $\dlt$-normality} \begin{defn} Let $X$ be a space. A subset \(D\) of $X$ is said to be a regular $G_{\delta}$- set iff there exist open sets \(U_n\), $n\in\w$, such that \(D \subseteq U_n\) for each \(n\) and \(D=\bigcap_{n \in \omega} \overline{U}_n\). \end{defn} Clearly, a set \(D\) is a regular $G_{\delta}$- set iff there exist open sets \(U_n\), \(n \in \omega\), such that \(D = \bigcap_{n \in \omega} {U}_n =\bigcap_{n \in \omega} \overline{U}_n\). \begin{defn} $X$ is said to be \emph{$\dlt$-normal} {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{mack2} iff any two disjoint closed sets, one of which is a regular $G_{\delta}$- set, can be separated by open sets. $X$ is said to be \emph{weakly $\dlt$-normal} {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{sigma} iff any two disjoint regular $G_{\delta}$- sets can be separated by open sets. \end{defn} We note in passing the following facts about regular $G_{\delta}$- sets. Finite unions and countable intersections of regular $G_{\delta}$- sets are again regular $G_\dlt$. If \(X\) is \(T_3\), for every \(x \in X\) and every open neighbourhood \(V\) of \(x\) there exists a regular $G_{\delta}$- set \(K\) such that \(x \in K \subseteq V\). In any space \(X\), the zero-sets are regular $G_{\delta}$- sets and so in a normal space \(X\), if \(C\) is a closed set contained in an open set \(U\), then there exists an open set \(W\) such that \(W\) is the complement of a regular $G_{\delta}$- set and \(C \subseteq W \subseteq \overline{W} \subseteq U\). If \(E\) is a regular $G_{\delta}$- set in \(X\), then \(E \times \{\alpha\}\) is a regular $G_{\delta}$- set in \(X \times M\) for any infinite compact metrizable space \(M\) and \(\alpha \in M\). If \(Y\) is any compact space, since the projection map is both closed and open, then the projection of a regular $G_{\delta}$- set in \(X \times Y\) is itself a regular $G_{\delta}$- set in \(X\). On the other hand, a regular $G_{\delta}$- subset of a regular $G_{\delta}$- subset of $X$ is not necessarily a regular $G_{\delta}$- set in $X$: for example, the \(x\)-axis, $A$, is a regular $G_{\delta}$- subset of the Moore plane and every subset of $A$ is a regular $G_{\delta}$- subset in $A$. Let us make the following definition. \begin{defn} Let $X$ be a space and $\mathcal{C}$ be a collection of pairs of disjoint closed sets. We shall say that $H$ is a $\mathcal{C}$-mn operator on $X$ iff $H$ assigns to each pair $(C,D)\in\mathcal{C}$ an open set $H(C,D)$ such that \begin{enumerate} \item $C\subseteq H(C,D)\subseteq \overline{H(C,D)}\subseteq X\setminus D$, \item if $C\subseteq C'$ and $D'\subseteq D$, then $H(C,D)\subseteq H(C',D')$. \end{enumerate} \end{defn} \begin{defn} Let $H$ be a $\mathcal{C}$-mn operator on $X$. \begin{enumerate} \item If $\mathcal{C}$ is the collection of pairs of disjoint closed subsets of $X$, then $X$ is monotonically normal\:. \item \langlebel{deflmdn} If $\mathcal{C}$ is the collection of disjoint closed subsets $(C,D)$ such that $C$ is a regular $G_\dlt$-set, then $X$ is left monotonically $\dlt$-normal or l$\mathcal{D}(\mathbb R)n$. \item If $\mathcal{C}$ is the collection of pairs of disjoint closed subsets of $X$ at least one of which is a regular $G_\dlt$-set, then $X$ is monotonically $\dlt$-normal or $\mathcal{D}(\mathbb R)n$. \item If $\mathcal{C}$ is the collection of pairs of disjoint regular $G_{\delta}$- subsets of $X$, then $X$ is $\mathcal{D}(\mathbb R)dn$. \end{enumerate} \end{defn} It can easily be shown that right monotone $\dlt$-normality (where $D$, rather than $C$, is assumed to be a regular $G_{\delta}$- set) is equivalent to lm\(\delta\)n. Note that, replacing $H(C,D)$ with $H(C,D)\smallsetminus \overline{H(D,C)}$ if necessary, we may assume that $H(C,D){\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap H(D,C)=\varnothingowt$ whenever $H$ is an mn, $\mathcal{D}(\mathbb R)n$ or $\mathcal{D}(\mathbb R)dn$ operator. There are a number of characterizations of monotone normality, amongst them the equivalence of conditions (1) and (2) in Theorem \ref{mngr2} (see {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{g}) (the proof of the extension stated here is routine). Mimicking the proof of this characterization, we obtain the hierarchy of monotone versions of \(\delta\)-normality listed in Theorem \ref{ul1}. \begin{thm} \langlebel{mngr2} The following are equivalent for a space \(X\): \varnothingewcounter{romcount} \begin{enumerate} \item \(X\) is monotonically normal. \item \langlebel{part2} There is an operator \(\psi\) assigning to each open set \(U\) in \(X\) and \(x \in U\), an open set \(\psi(x,U)\) such that \begin{enumerate} \item \(x \in \psi(x,U)\), \item if \(\psi(x,U) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \psi(y,V) \varnothingeq \varnothing\), then either \(x \in V\) or \(y \in U\). \end{enumerate} \item There is an operator \(\psi\) as in (2) such that, in addition, \(\psi(x,U) \subseteq U\). \item There is an operator \(\psi\) as in (2) such that, in addition, \(\overline{\psi(x,U)} \subseteq U\). \end{enumerate} \end{thm} In Theorem \ref{mngr2}, monotone normality is characterized in terms of an operator assigning an open set to each point \(x\) and open neighbourhood $U$ of \(x\). We define several new properties, analogous to these characterizations, by considering an operator acting on a regular $G_{\delta}$- set \(L\) and an open set containing \(L\). \begin{defn} \langlebel{defnuln} A space \(X\) is \textit{weakly coherently} \(\delta\)\textit{-normal (wc\(\delta\)n)} iff there is an operator \(\phi\) assigning to each regular $G_{\delta}$- set \(L\) and open set \(U\) containing \(L\), an open set \(\phi(L,U)\) such that \begin{enumerate} \item \(L \subseteq \phi(L,U)\), \item if \(\phi(L,U) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \phi(K,V) \varnothingeq \varnothing\) then either \(L {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap V \varnothingeq \varnothing\) or \(K {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap U \varnothingeq \varnothing\). \end{enumerate} \(X\) is \textit{coherently \(\delta\)-normal (c\(\delta\)n)} if in addition, \begin{enumerate} \item[(3)] \(L\subseteq \phi(L,U)\subseteq \overline{\phi(L,U)} \subseteq U\). \end{enumerate} \(X\) is \textit{monotonically coherently \(\delta\)-normal (\mathcal{C}dn)} if in addition, \begin{enumerate} \item[(4)] if \(L \subseteq L'\) and \(U \subseteq U'\) then \(\phi(L,U) \subseteq \phi(L',U')\). \end{enumerate} \end{defn} If $\phi$ is an operator witnessing that $X$ is wc\(\delta\)n, there is no assumption that $\phi(L,U)$ is monotone in $L$ or $U$ nor that it is a subset of $U$. We have the following proposition. \begin{prop} \langlebel{monul} Suppose that \(X\) is wc\(\delta\)n. Then there is a wc\(\delta\)ns operator $\phi$ on $X$ such that: \begin{enumerate} \item \(L \subseteq \phi(L,U) \subseteq U\) and \item if \(L \subseteq L'\) and \(U \subseteq U'\), then \(\phi(L,U) \subseteq\phi(L',U')\). \end{enumerate} \end{prop} \begin{proof} Suppose \(\psi\) is a wc\(\delta\)ns operator on \(X\) and let \(L\) be a regular $G_{\delta}$- set contained in an open set \(U\). Define \[\varphi(L,U) = U{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap\bigcup \{\psi(J,W) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon J \subseteq L, \mbox{ \(J\) is regular } G_{\delta}, \mbox{ \(W\) is open}, \mbox{ } J \subseteq W \subseteq U\}.\] Then \(\varphi(L,U)\) is open and \(L \subseteq \varphi(L,U) \subseteq U\) and clearly \(\phi(L,U) \subseteq\phi(L',U')\) whenever \(L \subseteq L'\) and \(U \subseteq U'\) It remains to verify that $\phi$ is, indeed, a wc\(\delta\)ns operator. So suppose that \(\varphi(L,U) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \varphi(K,V) \varnothingeq \varnothing\). Then for some regular $G_{\delta}$- sets $L'$ and $K'$, and open sets $U'$ and $V'$, such that $L'\subseteq L$, $K'\subseteq K$, $L'\subseteq U'\subseteq U$ and $K'\subseteq V'\subseteq V$, we have $\psi(L',U'){\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap\psi(K',V')\varnothingeq\varnothing$. Hence either $\varnothing\varnothingeq L'{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap V'\subseteq L{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap V$ or $\varnothing\varnothingeq K'{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap U'\subseteq K{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap U$, as required. \end{proof} On the other hand, it is not clear whether c\(\delta\)ns implies \mathcal{C}dn. In light of Theorem \ref{mngr2}, we might expect there to be a relationship between m\(\delta\)n, wc\(\delta\)ns and c\(\delta\)n. Indeed, we have the following theorem. \begin{thm} \langlebel{ul1} Each of the following properties of a space \(X\) implies the next: \begin{enumerate} \item \langlebel{onei} Monotonically normal, \item \langlebel{twoii} m\(\delta\)n, \item \langlebel{threeiii} \mathcal{C}dn, \item \langlebel{fouriv} c\(\delta\)n, \item \langlebel{fivev} wc\(\delta\)n, \item \langlebel{sixvi} m\(\delta\delta\)n. \end{enumerate} Moreover, every \mathcal{C}dns space is lm\(\delta\)n and every lm\(\delta\)n space is m\(\delta\delta\)n. \end{thm} \begin{proof} The proofs of (\ref{onei}) \(\rightarrow\) (\ref{twoii}), (\ref{threeiii}) \(\rightarrow\) (\ref{fouriv}), (\ref{fouriv}) \(\rightarrow\) (\ref{fivev}) and the fact that lm\(\delta\)n implies m$\dlt\dlt$n are trivial. (\ref{twoii}) \(\rightarrow\) (\ref{threeiii}): We modify the proof of Theorem \ref{mngr2}. Suppose \(H\) is an m\(\delta\)n operator for \(X\) with \(H(L,K) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap H(K,L) = \varnothing\). Let \(L\) be a regular $G_{\delta}$- set and \(U\) an open set such that \(L \subseteq U\) and define \(\psi(L,U) = H(L, X \smallsetminus U)\). Then \(L \subseteq \psi(L,U) \subseteq \overline{\psi(L,U)} \subseteq U\). Assume \(L {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap V = \varnothing\) and \(K {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap U = \varnothing\) where \(K\) is a regular $G_{\delta}$- set contained in an open set \(V\). Then \(L \subseteq X \smallsetminus V\) and \(K \subseteq X \smallsetminus U\). So by monotonicity, \(\psi(L,U) \subseteq H(L,K)\). Similarly, \(\psi(K,V) \subseteq H(K,L)\). Therefore \(\psi(L,U) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \psi(K,V) = \varnothing\). Monotonicity of the operator \(\psi\) follows from the monotonicity of \(H\), hence \(\psi\) is a \mathcal{C}dns operator for \(X\). (\ref{fivev}) \(\rightarrow\) (\ref{sixvi}): Again we modify the proof of Theorem \ref{mngr2}. Suppose \(\psi\) is a wc\(\delta\)ns operator for \(X\) and let \(L\) and \(K\) be disjoint regular $G_{\delta}$- sets in \(X\). Define \[H(L,K) = \bigcup \{\psi(J,U) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon J \subseteq L {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap U, \mbox{ } J \mbox{ is regular \(G_{\delta}\)}, \mbox{ } U \mbox{ is open, } U {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap K = \varnothing\}.\] Then \(H(L,K)\) is open with \(L \subseteq H(L,K)\). We show that \(\overline{H(L,K)} \subseteq X \smallsetminus K\). Since \(X\) is wc\(\delta\)n, if \(U\) is open with \(U {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap K = \varnothing\) and \(J\) is any regular $G_{\delta}$- set contained in \(L {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap U\), then \(\psi(K, X \smallsetminus L) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \psi(J,U) = \varnothing\). Hence \(\psi(K, X \smallsetminus L) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap H(L,K) = \varnothing\) and so \(K {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \overline{H(L,K)} = \varnothing\). It is routine to show that the operator \(H\) is monotone. To see that \mathcal{C}dns implies lm\(\delta\)n, assume \(\psi\) is a \mathcal{C}dns operator for \(X\). Let \(C\) and \(D\) be disjoint closed sets, \(C\) a regular $G_{\delta}$- set. Define \(H(C,D) = \psi(C,X \smallsetminus D)\). Then \(C \subseteq H(C,D) \subseteq \overline{H(C,D)} \subseteq X \smallsetminus D\). Suppose \(C \subseteq C'\) and \(D' \subseteq D\). Then \(X \smallsetminus D \subseteq X \smallsetminus D'\), hence \(H(C,D) \subseteq H(C',D')\). \end{proof} The proof of the following is routine. \begin{prop} Let \(M\) be a compact metrizable space. If $X\times M$ satisfies any of the properties listed in Theorem \ref{ul1}, then so does $X$. \end{prop} \section{Factorizations of monotone normality} Kohli and Singh {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{sigma} factorize normality in terms of various weak normality properties. They define a space to be \(\Sigma\)-normal if for each closed set \(C\) contained in an open set \(U\), there exists a set \(W\) that is the complement of a regular $G_{\delta}$- set such that \(C \subseteq W \subseteq U\) and show that a space is normal iff it is both weakly \(\delta\)-normal and \(\Sigma\)-normal. There is an obvious monotone version of this result that factorizes monotone normality into monotone $\Sigma$-normality and m\(\delta\delta\)n. However, it turns out that we can do better than this in the monotone case. \begin{defn} A space \(X\) is monotonically \(\Sigma\)-normal, or m$\Sigma$n, iff there is an operator \(W\) assigning to each closed set $C$ and each open set \(U\) containing $C$, an open set \(W(C,U)\) such that \begin{enumerate} \item $X \smallsetminus W(C,U)$ is a regular $G_\dlt$-set, \item \(C \subseteq W(C,U) \subseteq U\) and \item if \(C \subseteq C'\) and \(U \subseteq U'\), then \(W(C,U) \subseteq W(C',U')\). \end{enumerate} \end{defn} \begin{prop} \langlebel{prop1} \(X\) is m$\Sigma$n iff there are operators \(D\) and \(W\) assigning to each closed set $C$ and open set $U$, containing $C$, sets $D(C,U)$ and $W(C,U)$ such that \begin{enumerate} \item $D(C,U)$ and $X \smallsetminus W(C,U)$ are regular $G_{\delta}$- sets \item \(C \subseteq D(C,U) \subseteq W(C,U) \subseteq U\), \item $D(C,U){\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap W(X\smallsetminus U,X\smallsetminus C)=\varnothing$, \item if \(C \subseteq C'\) and \(U \subseteq U'\), and then \(D(C,U) \subseteq D(C',U')\) and \(W(C,U) \subseteq W(C',U')\). \end{enumerate} \end{prop} \begin{proof} Suppose the conditions of the theorem hold, then clearly \(X\) is m$\Sigma$n. Conversely, suppose \(V\) is a m$\Sigma$n operator for \(X\) and that \(C\subseteq U\). Define $D'(C,U)=X \smallsetminus V(X\smallsetminus U,X\smallsetminus C)$, so that $C\subseteq D'(C,U)\subseteq U$ and $D'(C,U)$ is a regular $G_\dlt$, and define $W(C,U)=V(D'(C,U),U)$. It is routine to check conditions (1), (2) and (4). Now define $D(C,U)=D'(C,U) \smallsetminus W(X\smallsetminus U,X\smallsetminus C)$, which is the intersection of two regular $G_\dlt$-sets. Since $W(X\smallsetminus U,X\smallsetminus C)=V\big(X\smallsetminus V(C, U),X\smallsetminus C\big)$ and $C{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap W(X\smallsetminus U,X\smallsetminus C)=\varnothing$, we have operators $D$ and $W$ satisfying all four conditions. \end{proof} \begin{prop} \langlebel{prop3} Every monotonically normal space and every perfectly normal space is m$\Sigma$n. \end{prop} \begin{proof} To show that every monotonically normal space is m$\Sigma$n, we extend the proof that every normal space is \(\Sigma\)-normal {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{sigma} and use the monotone version of Urysohn's lemma {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{stares}. Suppose \(X\) is perfectly normal. Then every open set is the complement of a regular $G_{\delta}$- set and defining \(W(C,U) = U\) shows that $X$ is m$\Sigma$n. \end{proof} It turns out that a weaker property (that might be termed monotone $\Sigma$ Hausdorff) is all that is needed to factorize monotone normality in terms of m\(\delta\delta\)n. \begin{defn} A space \(X\) has \textit{property \((\star)\)} iff there are operators $D$ and $E$ assigning to every $x\in X$ and open set \(U\) containing $x$, disjoint sets \(D(x,U)\) and \(E(x,U)\) such that \begin{enumerate} \item \(D(x,U)\) and \(E(x,U)\) are regular $G_\dlt$-sets, \item \(x \in D(x,U) \subseteq U\) and \item for every open set \(V\) and \(y \in V\), if \(x \varnothingotin V\) and \(y \varnothingotin U\), then \(D(y,V) \subseteq E(x,U)\). \end{enumerate} \end{defn} Of course, if $X$ is a regular space we can, without loss of generality, drop the assumption that $D(x,U)\subseteq U$. \begin{prop} A space \(X\) has \textit{property \((\star)\)} iff there are operators $D$ and $W$ assigning to each $x\in X$ and each open $U$ containing $x$, sets \(D(x,U)\) and \(W(x,U)\) such that \begin{enumerate} \item $D(x,U)$ and $X \smallsetminus W(x,U)$ are regular $G_\dlt$-sets, \item \(x \in D(x,U) \subseteq W(x,U) \subseteq U\) and \item for every open set \(V\) and \(y \in V\), if \(x \varnothingotin V\) and \(y \varnothingotin U\), then \(D(y,V) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap W(x,U)= \varnothing\). \end{enumerate} \end{prop} \begin{proof} If $D$ and $E$ witness that \(X\) has property \((\star)\), define $W(x,U)=X \smallsetminus E(x,U)$ for each $x\in U$. If \(z \varnothingotin U\) and \(\hat{V} = X \smallsetminus D(x,U)\), then \(x \varnothingotin \hat{V}\) and so \(z \in D(z,\hat{V}) \subseteq E(x,U)\). Hence \(X \smallsetminus U \subseteq E(x,U)\) and so \(W(x,U) \subseteq U\). Since \(D(x,U)\) and \(E(x,U)\) are disjoint, \(D(x,U) \subseteq W(x,U)\) and condition (3) is clear. The converse follows just as easily. \end{proof} Property $(\star)$ is relatively easy to achieve. \begin{thm} \langlebel{propstar} Every m$\Sigma$n space and every Tychonoff space with $G_\dlt$ points has property $(\star)$. Hence every monotonically normal space, every perfectly normal space, every first countable Tychonoff space and every Tychonoff space with a $G_\dlt$-diagonal has property $(\star)$. \end{thm} \begin{proof} Suppose that $X$ is m$\Sigma$n. Let $D$ and $W$ satisfy the conditions of Proposition \ref{prop1}. Suppose that $U$ and $V$are open sets and that $x\in U\smallsetminus V$ and $y\in V\smallsetminus U$. By (4), $D(\{y\},V){\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap W(\{x\},U)\subseteq D(\{y\},X\smallsetminus\{x\}{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap W(\{x\},X \smallsetminus \{y\})$, which is empty by (3). Hence $D(\{x\},U)$ and $W(\{x\},U)$ define operators satisfying property $(\star)$. Suppose now that $X$ is Tychonoff and has $G_\dlt$ points. Let $x\in U$. Since $\{x\}$ is a $G_\dlt$-set, regularity implies that it is a regular $G_{\delta}$- set. Since $X$ is Tychonoff, there is a continuous function \(f {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon X \to [0,1]\) such that \(f(x)=1\) and \(f(X \smallsetminus U) = 0\). Define \(D(x,U) = \{x\}\) and \(E(x,U) =f^{-1}(0)\). Then $D(x,U) $ and $E(x,U)$ are disjoint regular $G_\dlt$-sets such that $x\in D(x,U)\subseteq U$ and $X \smallsetminus U\subseteq E(x,U)$, so that $D(y,V)\subseteq E(x,U)$, whenever $y\in V\smallsetminus U$. \end{proof} \begin{exmp} Assuming ${\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}lubsuit^*$, there is a space with property $(\star)$ that is not m$\Sigma$n. \end{exmp} \begin{proof} ${\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}lubsuit^*$ asserts the existence of a sequence $R_\alp=\{\bt_{\alp,n} {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon n\in\w\}$ for every limit ordinal $\alp\in\w_1$ that is cofinal in $\alp$ such that, whenever $X$ is an uncountable subset of $\w_1$, $\{\alp\in\w_1 {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon X{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap R_\alp\text{ is cofinal in }\alp\}$ contains a closed unbounded set. ${\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}lubsuit^*$ holds, for example, in any model of $V=L$. Let $X=\w_1\times2$. For each limit $\alp$ and $n\in\w$, define $B(\alp,n)=\{(\alp,1)\}{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}up\{(\bt_{\alp,k},0) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon n\leqslant k\}$. Let $\T$ be the topology on $X$ generated by the collection $$ \big\{\{(\alp,i)\} {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon \alp\text{ is a successor or }i=0\big\} {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}up\big\{B(\alp,n) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon \alp\text{ is a limit}, n\in\w\big\}. $$ With this topology, $X$ is zero-dimensional, hence Tychonoff, and first countable, so has property $(\star)$. If $U$ is an open set containing an uncountable subset of $\w_1\times\{0\}$, for closed unboundedly many $\alp$, $R_\alp{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap\{\bt {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon (\bt,0)\in U\}$ is cofinal in $\alp$, so that $\{\alp {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon (\alp,1)\in\overline{U}\}$ contains a closed unbounded subset. Since the intersection of countably many closed unbounded subsets of $\w_1$ is, again, closed and unbounded, it follows that every uncountable regular $G_{\delta}$- set in $X$ contains a closed unbounded subset of $\w_1\times\{1\}$. Hence, if $C$ is any uncountable, co-uncountable subset of $\w_1\times\{1\}$, $U=C{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}up \left(\w_1\times\{0\}\right)$ and $D$ is any regular $G_\dlt$-set containing $C$, then $C\subseteq U$, $U$ is open but $D\varnothingot\subseteq U$. Hence $X$ is not m$\Sigma$n. \end{proof} Interestingly, property \((\star)\) is enough to push m\(\delta\delta\)n up to monotone normality. \begin{thm} \langlebel{starequivs} A space is monotonically normal iff it has property $(\star)$ and is m\(\delta\delta\)n. \end{thm} \begin{proof} Suppose \(H\) is an m\(\delta\delta\)n operator for \(X\) such that \(H(E,F) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap H(F,E) = \varnothing\). Let \(U\) be an open set with \(x \in U\). By property \((\star)\), there exist disjoint regular $G_{\delta}$- sets \(D(x,U)\) and \(E(x,U)\) such that \(x \in D(x,U) \subseteq U\) and for any open set \(V\) with \(x \varnothingotin V\), if \(y \in V \smallsetminus U\) then \(D(y,V) \subseteq E(x,U)\). Define \(\psi(x,U) = H(D(x,U), E(x,U))\). Then \(D(x,U) \subseteq \psi(x,U)\), so \(x \in \psi(x,U)\). Suppose \(x \varnothingotin V\) and \(y \in V \smallsetminus U\). Then by monotonicity of \(H\), \(H(D(y,V), E(y,V)) \subseteq H(E(x,U),D(x,U))\). It follows that \(H(D(y,V),E(y,V)) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap H(D(x,U), E(x,U)) = \varnothing\). Hence \(\psi(y,V) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \psi(x,U) = \varnothing\). By Theorem \ref{mngr2}, \(X\) is monotonically normal. The converse is trivial given Theorems \ref{ul1} and \ref{propstar}. \end{proof} Hence, in any space with property $(\star)$, for example in a first countable Tychonoff space, each of the properties listed in Theorem \ref{ul1} is equivalent to monotone normality. \begin{thm} \langlebel{corr20} \begin{enumerate} \item If every point of $X$ is a regular $G_{\delta}$- set, then $X$ is monotonically normal iff it is wc\(\delta\)n. \item $X$ is c\(\delta\)ns iff it is wc\(\delta\)ns and \(\delta\)-normal. \item If $X$ is normal, then $X$ is c\(\delta\)ns iff it is m\(\delta\delta\)n. \end{enumerate} \end{thm} \begin{proof} In each case one implication follows from Theorem \ref{ul1} and from the fact that a c\(\delta\)ns space is obviously $\dlt$-normal. To complete (1) and (2), suppose that $\psi$ satisfies conditions (1) and (2) of Definition \ref{defnuln}. If every $x\in X$ is a regular $G_\dlt$, then $\phi(x,U)=\psi(\{x\},U)$ satisfies conditions (2) of Theorem \ref{mngr2} and $X$ is monotonically normal. If $X$ is $\dlt$-normal and $L$ is a regular $G_\dlt$-subset of the open set $U$, then there is an open set $\phi(L,U)$ such that $L\subseteq\phi(L,U) \subseteq \overline{\phi(L,U)} \subseteq \psi(L,U) \subseteq U\). It is trivial to check that, in this case, $\phi$ is a c\(\delta\)ns operator. To complete (3), suppose \(H\) is an m\(\delta\delta\)n operator for \(X\) with \(H(L,K) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap H(K,L) = \varnothing\). Let \(L\) be a regular $G_{\delta}$- set and \(U\) an open set such that \(L \subseteq U\). Since \(X\) is normal, there exists an open set \(W_L\) such that \(W_L\) is the complement of a regular $G_{\delta}$- set and \(L \subseteq W_L \subseteq U\). Define \(\psi(L,U) = H(L, X \smallsetminus W_L)\), then \(L \subseteq \psi(L,U) \subseteq \overline{\psi(L,U)} \subseteq W_L \subseteq U\). Now suppose \(L {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap V = \varnothing\) and \(K {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap U = \varnothing\) where \(K\) is a regular $G_{\delta}$- set contained in an open set \(V\). Then \(L \subseteq X \smallsetminus W_K\) and \(K \subseteq X \smallsetminus W_L\). By monotonicity, \(\psi(L,U) \subseteq H(L,K)\) and \(\psi(K,V) \subseteq H(K,L)\), hence \(\psi(L,U) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \psi(K,V) = \varnothing\). Therefore \(\psi\) is a c\(\delta\)ns operator for \(X\). \end{proof} \section{Products with compact metrizable spaces and stratifiability} A space $X$ is semi-stratifiable if there is an operator $U$ assigning to each $n\in \omega$ and closed set $D$ an open set $U(n,D)$ containing $D$ such that $\bigcap_{n \in \omega} U(n,D)=D$ and $U(n,D')\subseteq U(n,D)$ whenever $D'\subseteq D$. If, in addition, $\bigcap_{n \in \omega}\overline{U(n,D)}=D$, then $X$ is said to be stratifiable. A space $X$ is stratifiable iff $X\times M$ is monotonically normal for any (or all) infinite compact metrizable $M$ iff $X$ is both semi-stratifiable and monotonically normal (see {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ite{mn}). \begin{defn} A space \(X\) is \(\delta\)\textit{-semi-stratifiable} iff there is an operator \(U\) assigning to each \(n \in \omega\) and regular $G_{\delta}$- set \(D\) in \(X\), an open set \(U(n,D)\) containing \(D\) such that \begin{enumerate} \item[(1)] if \(E \subseteq D\), then \(U(n, E) \subseteq U(n, D)\) for each \(n \in \omega\) and \item[(2)] \(D = \bigcap _{n \in \omega} U(n,D)\). \end{enumerate} If in addition, \begin{enumerate} \item[(3)] \(D = \bigcap _{n \in \omega} \overline{U(n,D)}\), \end{enumerate} then \(X\) is \(\delta\)\textit{-stratifiable}. \end{defn} Just as for stratifiability, we may assume that the operator \(U\) is also monotonic with respect to \(n\), so that \(U(n+1,D) \subseteq U(n,D)\) for each \(n\) and regular $G_{\delta}$- set \(D\). The proof of the following is essentially the same as the proof of the corresponding results for stratifiability and monotone normality. \begin{thm} \langlebel{dsmddn} \langlebel{dstrat3} \begin{enumerate} \item If \(X\) is \(\delta\)-stratifiable, then $X$ is $\dlt$-semi-stratifiable and m\(\delta\delta\)n. \item If \(X\) is \(\delta\)-semi-stratifiable and lm\(\delta\)n, then it is \(\delta\)-stratifiable. \end{enumerate} \end{thm} \begin{thm}\langlebel{dstratwo} Let \(M\) be any infinite compact metrizable space. \(X\) is \(\delta\)-stratifiable iff \(X \times M\) is \(\delta\)-stratifiable iff $X\times M$ is $\mathcal{D}(\mathbb R)dn$. \end{thm} \begin{proof} Let \(\pi {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon X \times M \to X\) be the projection map. Since $M$ is compact, $\pi$ is both open and closed. Suppose $X\times M$ is $\dlt$-stratifiable with \(\delta\)-stratifiability operator $W$. By Theorem \ref{dsmddn}, $X\times M$ is $\mathcal{D}(\mathbb R)dn$. To see that $X$ is $\dlt$-stratifiable, let $D$ be a regular $G_\dlt$-subset of $X$. Fix some $r\in M$ and define \(U(n,D) = \pi(W(n,D \times \{r\}))\). It is routine to verify that $U$ is a $\dlt$-stratifiability operator for $X$. Now suppose that $X$ is $\dlt$-stratifiable with operator $U$ such that \(U(n, \varnothing) = \varnothing\) and satisfying \(U(n+1,E) \subseteq U(n,E)\) for each \(n\) and regular $G_{\delta}$- set \(E\). Suppose \(D\) is a regular \(G_{\delta}\)-set in \(X \times M\). Then \(D = \bigcap _{i \in \omega} \overline{U}_i\) where \(D \subseteq U_i\) and \(U_i\) is open in \(X \times M\) for each \(i\). Define \(D_r = D {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap (X \times \{r\})\) for each \(r \in M\). Then each \(D_r\) is a regular $G_{\delta}$- set since \(D_r = \bigcap _{i \in \omega} \overline{U_i {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap (X \times B_{1/2^i}(r))}\) and \(D_r \subseteq U_i {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap (X \times B_{1/2^i}(r))\) for all \(i \in \omega\). Clearly \(D = \bigcup _{r \in M} D_r\). Moreover \(\pi(D_r)\) is a regular $G_{\delta}$- set in \(X\) for each \(r \in M\). For each \(n \in \omega\) define \[H(n,D) = \bigcup_{r \in M} U(n, \pi(D_r)) \times B_{\frac{1}{2^n}}(r).\] We show that \(H\) is a \(\delta\)-stratifiability operator for \(X \times M\). Clearly \(H(n,D)\) is open for each regular $G_{\delta}$- set \(D\) and \(n \in \omega\). That \(H\) is monotone is clear from the monotonicity of \(U\). It is easily seen that \(D \subseteq H(n,D)\) for each \(n \in \omega\), so it remains to prove that \(\bigcap_{n \in \omega} \overline{H(n,D)} \subseteq D\). Suppose \((x,s) \in \bigcap_{n \in \omega} \overline{H(n,D)}\smallsetminus D\). Then there exists a basic open set \(V \varnothingi x\) and \(k \in \omega\) such that \((V \times B_{1/2^k}(s)) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap D = \varnothing\) and so \((V \times B_{1/2^k}(s)) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap (\pi(D_r) \times \{r\}) = \varnothing\) for all \(r \in B_{1/2^k}(s)\). Since \((x,s) \in \overline{H(n,D)}\) for each \(n \in \omega\), we may consider the following two cases: Case 1: Assume \((x,s) \in \overline{\bigcup_{r \in B_{1/2^k}(s)} U(n, \pi(D_r)) \times B_{1/2^n}(r)}\) for all \(n \geqslant k+1\). Then for all such \(n\), \((W \times B_{1/2^m}(s)) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \bigcup_{r \in B_{1/2^k}(s)} U(n, \pi(D_r)) \times B_{1/2^n}(r) \varnothingeq \varnothing\) for all basic open sets \(W \varnothingi x\), \(m \in \omega\). It follows that for some \(t \in B_{1/2^k}(s)\), \(V {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap U(n, \pi(D_t)) \varnothingeq \varnothing\) for each \(n \geqslant k+1\). Then, since \(U\) is monotonic with respect to \(n\), \(V {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \bigcap_{n \in \omega} U(n, \pi(D_t)) \varnothingeq \varnothing\). Therefore \(V {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \pi(D_t) \varnothingeq \varnothing\), a contradiction. Case 2: Assume \((x,s) \in \overline{\bigcup_{r \varnothingotin B_{1/2^k}(s)} U(n, \pi(D_r)) \times B_{1/2^n}(r)}\) for all \(n \geqslant k+1\). Then for some \(p \varnothingotin B_{1/2^k}(s)\), \((W \times B_{1/2^m}(s)) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap (U(n, \pi(D_p)) \times B_{1/2^n}(p)) \varnothingeq \varnothing\) for all basic open sets \(W \varnothingi x\), \(m \in \omega\) and \(n \geqslant k+1\). Thus, for all such \(m\) and \(n\), \(B_{1/2^m}(s) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap B_{1/2^n}(p) \varnothingeq \varnothing\). However, \(B_{1/2^{k+1}}(s) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap B_{1/2^n}(p) = \varnothing\) for all \(n \geqslant k+1\), a contradiction. Therefore \(D = \bigcap_{n \in \omega} \overline{H(n,D)}\) as required. To complete the proof we wish to show that if $X\times M$ $\mathcal{D}(\mathbb R)dn$, then $X$ is $\dlt$-stratifiable. Note first that we may assume that $X\times\Omega$ is $\mathcal{D}(\mathbb R)dn$, where $\Omega=\w+1$ is the convergent sequence. To see this note that if $W$ is a subspace of $M$ that is homeomorphic to $\Omega$, then any regular $G_\dlt$-subset of $X\times W$ is in fact a regular $G_\dlt$-subset of $X\times M$, so that $X\times W$ is also $\mathcal{D}(\mathbb R)dn$. The proof is now familiar. Let $H$ be an $\mathcal{D}(\mathbb R)dn$ operator for $X\times\Omega$ such that $H(C,D){\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap H(D,C)=\varnothingowt$ for any regular $G_\dlt$-sets $C$ and $D$. For each $n\in\w$, let $\Omega_n=(\w+1)\smallsetminus\{n\}$ and let $\pi {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon X\times \Omega\to X$ be the projection map. If $E$ is a regular $G_\dlt$-subset of $X$ define $$U(n,E)=\pi\big(H(E\times \{n\},X\times\Omega_n)\big).$$ Clearly \(E \subseteq U(n,E)\) for each \(n\). Suppose that $z\in \bigcap_{n\in \omega}\overline{U(n,E)}\smallsetminus E$. Then, as $E$ is closed, there is a regular $G_\dlt$-set $D$ such that $z\in D\subseteq X\smallsetminus E$. Hence $K=D{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \bigcap_{n\in \omega}\overline{U(n,E)}$ is a regular $G_\dlt$ such that $z\in K$, $K{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap E=\varnothingowt$ and $K\subseteq\bigcap_{n\in \omega}\overline{U(n,E)}$, from which it follows that $$K\times\{w\}\subseteq \overline{\bigcup_{n\in\w}\overline{H(E\times\{n\},X\times\Omegaega_n)}} =\overline{\bigcup_{n\in\w} H(E\times\{n\},X\times\Omega_n)}.$$ Therefore, for some $n\in \w$, we have $$\varnothingowt\varnothingeq H\big(K\times\{\w\},E\times\Omega\big){\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap H\big(E\times\{n\},X\times\Omega_n\big),$$ but, by monotonicity, this implies that $$\varnothingowt\varnothingeq H\big(K\times\{\w\},E\times\Omega\big){\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap H\big(E\times\Omega,K\times\{\w\}\big),$$ which is a contradiction and it follows that $\bigcap_{n\in \omega}\overline{U(n,E)}=E$. \end{proof} Clearly property \((\star)\) will have an effect on $\dlt$-stratifiability although it not clear that it is productive. Obviously, by Theorem \ref{propstar}, if \(X\) and \(Y\) are Tychonoff with $G_\dlt$ points, in particular if $Y$ is a compact metrizable space, then \(X \times Y\) has property \((\star)\). Furthermore, if the product of a space with some compact metrizable space does not have property \((\star)\), then the space is not stratifiable. \begin{cor} \langlebel{stdststar} Let \(M\) be any infinite compact metrizable space. If $X\times M$ has property $(\star)$, in particular if $X$ is a Tychonoff space with $G_\dlt$ points, then $X$ is stratifiable iff $X$ is $\dlt$-stratifiable iff $X\times M$ is m\(\delta\delta\)n. \end{cor} \section{Examples} The following lemma gives some simple sufficient conditions on the regular $G_{\delta}$- subsets of a space for it to be wc\(\delta\)ns or \mathcal{C}dn. \begin{lem} Let $X$ be a space. \begin{enumerate} \item If, whenever \(L\) and \(K\) are disjoint regular $G_{\delta}$- subsets, at least one of them is clopen, then $X$ is wc\(\delta\)n. \item If every regular $G_{\delta}$- subset of $X$ is clopen, then $X$ is both \mathcal{C}dns and $\dlt$-stratifiable. \end{enumerate}\langlebel{makes it ul} \end{lem} \begin{proof} (1) For any regular $G_{\delta}$- set \(L\) contained in an open set \(U\), define \(\psi\) as follows: \[\psi(L,U)= \begin{cases} L & \mbox{if \(L\) is clopen} \\ U & \mbox{if \(L\) is not clopen.} \end{cases}\] Suppose \(L\) is clopen. Then \(\psi(L,U) = L\) and \(\psi(K,V) \subseteq V\), where \(K\) is a regular $G_{\delta}$- set contained in an open set \(V\). Hence if \(L {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap V = \varnothing\) and \(K {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap U = \varnothing\), then \(\psi(L,U) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \psi(K,V) = \varnothing\). (2) follows immediately by defining $\phi(L,U)=L$ and $U(n,L)=L$ for any $n\in\omega$ and regular $G_{\delta}$- set $L$. \end{proof} Given a cardinal $\kp$, let $\mathbb L_\kp$ denote the space $\kp+1$ with the topology generated by isolating each $\alp\in \kp$ and declaring basic open neighbourhoods of $\kp$ to take the form $\mathbb L_\kp \smallsetminus C$, where $C$ is some countable subset of $\kp$. Note that, if $\kp$ is uncountable, then any regular $G_\dlt$-subset of $\mathbb L_\kp$ containing the point $\kp$ is clopen and co-countable and that a regular $G_\dlt$-set that does not contain $\kp$ is countable. \begin{exmp} \langlebel{Lindel1} $\mathbb L_{\w_1}$ is monotonically normal and \(\delta\)-stratifiable, but not semi-stratifiable. Moreover $\mathbb L_{\w_1}\times (\w+1)$ is m\(\delta\delta\)n. \end{exmp} \begin{proof} By Lemma \ref{makes it ul} (2), $\mathbb L_{\w_1}$ is $\dlt$-stratifiable. By Theorem \ref{mngr2}, defining \(\psi(x,U) = U\), if $x=\w_1$, and \(\psi(x,U) = \{x\}\), otherwise, whenever $x$ is in the open set $U$, we see that $\mathbb L_{\w_1}$ is monotonically normal. However, since $\{\w_1\}$ is not a $G_{\delta}$- subset, $\mathbb L_{\w_1}$ is not semi-stratifiable. That $\mathbb L_{\w_1}\times (\w+1)$ is m\(\delta\delta\)n follows by Theorem \ref{dstratwo}. \end{proof} \begin{exmp} Let $\mathbb S$ be the Sorgenfrey line. $\mathbb S$ is monotonically normal but not $\dlt$-stratifiable and $\mathbb S\times(\w+1)$ is not m\(\delta\delta\)n. \end{exmp} \begin{proof} Since $\mathbb S\times(\w+1)$ is first countable and Tychonoff, it has property $(\star)$. Since $\mathbb S$ is not stratifiable, $\mathbb S\times(\w+1)$ is not monotonically normal and therefore not m\(\delta\delta\)n. \end{proof} \begin{exmp} \langlebel{Lindomega1} $X=\big[\mathbb L_{\w_1}\times(\w+1)\big] \smallsetminus \{(\w_1,\w)\}$ is wc\(\delta\)n, but neither c\(\delta\)ns nor l$\mathcal{D}(\mathbb R)n$. \end{exmp} \begin{proof} Let $T=\{(\alp,\w) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon \alp\in\w_1\}$ and $R=\{(\w_1,k) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon k\in\w\}$ To see that $X$ is not c\(\delta\)n, note that $T$ is a regular $G_\dlt$-set and that $U=X \smallsetminus R$ is an open set containing $T$. If $\phi(T,U)$ is any open set such that $T\subseteq \phi(T,U)\subseteq X \smallsetminus R$, then, for some $k\in \w$, $\{(\alp,k) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon (\alp,k)\in\phi(T,U)\}$ is uncountable, so that $(\w_1,k)\in\overline{\phi(T,U)}$, but $(\w_1,k)\varnothingotin U$. The same argument shows that $X$ is not l$\mathcal{D}(\mathbb R)n$ either. To see that $X$ is wc\(\delta\)n, let \(L\) be a regular $G_{\delta}$- subset of the open set \(U\). First note that if $(\w_1,k)\in L$, then $L{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap (\mathbb L_{\w_1}\times\{k\})$ is a clopen subset of $X$. For each $(x,\w)\in L$, there is a least $k_x\in \w$ such that $\{(x,j) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon k_x\leqslant j\}$ is a subset of $U$. Let $B(x,U)=\{(x,\w)\}{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}up\{(x,j) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon k_x\leqslant j\}$. Define $$ \psi(L,U) = L {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}up \bigcup \{B(x,U) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon (x,\w)\in L\}. $$ Then $L\subseteq \psi(L,U)\subseteq U$ and $\psi(L,U)$ is open. Suppose that $L$ and $K$ are regular $G_{\delta}$- sets, $U$ and $V$ are open sets and that $L\subseteq U\smallsetminus V$ and $K\subseteq V\smallsetminus U$. Then \begin{align*}\psi(L,U)& {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \psi(K,V)\\ &= \big(L {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}up \bigcup \{B(x,U) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon (x,\w)\in L\}\big) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap\big(K {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}up \bigcup \{B(x,V) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon (x,\w)\in K\}\big)\\ &=\bigcup \{B(x,U) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon (x,\w)\in L\} {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \bigcup \{B(x,V) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}olon (x,\w)\in K\}=\varnothing, \end{align*} since otherwise, if $(x,k)\in\psi(L,U) {\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap \psi(K,V)$, then $(x,\w)\in L{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap K$. \end{proof} \begin{exmp} $X=\left[\mathbb L_{\w_1}\times\mathbb L_{\w_1}w\right] \smallsetminus \{(\w_1,\w_2)\}$ is \mathcal{C}dns and $\dlt$-stratifiable, but not $\mathcal{D}(\mathbb R)n$. \end{exmp} \begin{proof} Let $L$ be a regular $G_{\delta}$- subset of $X$ containing $(\w_1,\alp)$ (or $(\alp,\w_2)$). Then $L$ contains a clopen neighbourhood of $(\w_1,\alp)$ (or $(\alp,\w_2)$). Hence every regular $G_{\delta}$- subset of $X$ is clopen and by Lemma \ref{makes it ul}, $X$ is \mathcal{C}dns and $\dlt$-stratifiable. To see that $X$ is not $\mathcal{D}(\mathbb R)n$, suppose to the contrary that $H$ is an $\mathcal{D}(\mathbb R)n$ operator such that $H(C,D){\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap H(D,C)=\varnothingowt$. For each $\alp\in \w_1$ and $\bt\in \w_2$, let \begin{align*} &C_\alp=\big\{(\alp,\w_2)\big\}, &D_\alp=X \smallsetminus \big(\{\alp\}\times\mathbb L_{\w_1}w\big),\\ &E_\bt=\big\{(\w_1,\bt)\big\}, &F_\bt=X \smallsetminus \big(\mathbb L_{\w_1}\times\{\bt\}\big). \end{align*} Notice that $C_\alp{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap D_\alp=E_\bt{\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap F_\bt=\varnothingowt$, $C_\alp\subseteq F_\bt$, $E_\bt\subseteq D_\alp$, $H(C_\alp,D_\alp)\subseteq\{\alp\}\times\mathbb L_{\w_1}w$, and $H(E_\bt,F_\bt)\subseteq\mathbb L_{\w_1}\times\{\bt\}$. Hence $H(C_\alp,D_\alp)\subseteq H(F_\bt,E_\bt)$, so that $H(C_\alp,D_\alp){\mathfrak c}} \def\A{{\mathcal A}} \def\B{{\mathcal B}ap H(E_\bt,F_\bt)=\varnothingowt$. Now, for each $\bt \in \w_2$, there are no more than countably $\alp\in\w_1$ such that $(\alp,\bt)\varnothingotin H(E_\bt,F_\bt)$. This implies that there is a subset $W$ of $\w_2$ with cardinality $\w_2$ and some $\alp_0\in\w_1$ such that $(\alp_0,\w_1]\times\{\bt\}$ is a subset of $H(E_\bt,F_\bt)$ for each $\bt\in W$. It follows that for any $\alp_0\leqslant \alp\in \w_1$ and any $\bt\in W$, $(\alp,\bt)\varnothingotin H(C_\alp,D_\alp)$, so that $H(C_\alp,D_\alp)$ is not open, which is the required contradiction. \end{proof} \end{document}
\begin{document} \title{A generalization of the Clunie--Sheil-Small theorem} \author{Ma{\l}gorzata Michalska, Andrzej M. Michalski} \address{ Ma{\l}gorzata Michalska, \newline Institute of Mathematics, \newline Maria Curie-Sk{\l}odowska University, \newline pl. M. Curie-Sk{\l}odowskiej 1, \newline 20-031 Lublin, Poland} \email{[email protected]} \address{ Andrzej M. Michalski, \newline Department of Complex Analysis, \newline The John Paul II Catholic University of Lublin, \newline ul. Konstantyn\'{o}w 1H, \newline 20-950 Lublin, Poland} \email{[email protected]} \date{\today} \subjclass[2010]{31A05, 30C55, 30C45} \keywords{harmonic mappings, convex in one direction, shear construction} \maketitle \begin{abstract} In 1984, a simple and useful univalence criterion for harmonic functions was given by Clunie and Sheil-Small, which is usually called the shear construction. However, the application of this theorem is limited to the planar harmonic mappings convex in the horizontal direction. In this paper, a natural generalization of the shear construction is given. More precisely, our results are obtained under the hypothesis that the image of a harmonic mapping is a sum of two sets convex in the horizontal direction. \end{abstract} \baselineskip1.4\baselineskip \section{Introduction} Let $\mathbb{D}:=\{z\in\mathbb{C}:|z|<1\}$ be the open unit disk in the complex plane $\mathbb{C}$. A function $f:\mathbb{D}\to\mathbb{C}$ is said to be harmonic, if its real and imaginary parts are real harmonic, i.e. they satisfy the Laplace equation. Since $\mathbb{D}$ is simply connected it is well-known that $f$ can be written in the form \begin{equation}\label{f_sum_hol} f(z)=h(z)+\overline{g(z)},\quad z\in\mathbb{D}, \end{equation} where $h$ and $g$ are analytic in $\mathbb{D}$. The Jacobian $J_f$ of $f$ in terms of $h$ and $g$ is given by \begin{equation}\label{f_jacobian} J_f(z)=|h'(z)|^2-|g'(z)|^2,\quad z\in\mathbb{D}. \end{equation} Among all the harmonic functions in $\mathbb{D}$ one can distinguish those with non-vanishing Jacobian. In fact, it is proved that such harmonic functions are locally 1-1. If the Jacobian of a harmonic function in $\mathbb{D}$ is positive, it means that this function is locally 1-1 and sense-preserving. More information about basics of harmonic functions can be found e.g. in \cite{Duren1}. Clunie and Sheil-Small in \cite{ClunieSheilSmall1} gave the following theorem, known as the shear construction. {\renewcommand{B}{A} \begin{theorem}\label{shear_construction} A function $f=h+\overline{g}$ harmonic in $\mathbb{D}$ with positive Jacobian is 1-1 sense-preserving mapping of $\mathbb{D}$ onto a domain convex in the direction of the real axis if, and only if, $h-g$ is an analytic 1-1 mapping of $\mathbb{D}$ onto a domain convex in the direction of the real axis. \end{theorem} \addtocounter{theorem}{-1}} It appeared to have many applications as an univalence criterion and as a method of constructing harmonic mappings (see, e.g., \cite{DorffNowakWoloszkiewicz, DorffSzynal, DriverDuren, GanczarWidomski, GrigorianSzapiel, HengartnerSchober, KlimekSmetMichalski, Livingston}). In this paper we generalize the theorem of Clunie and Sheil-Small. In Section 2 we show some auxiliary results. In Section 3 we use results from Section 2 to give new conditions for univalence of the planar harmonic mappings. \section{Topological properties}\setcounter{equation}{0} The proof of Theorem \ref{shear_construction} of Clunie and Sheil-Small relies on the following lemma, which will be also useful in our considerations. {\renewcommand{B}{B} \begin{lemma}\label{shear_lemma} Let $D$ be a domain convex in the direction of the real axis and let $p$ be a continuous real-valued function in $D$. Then the mapping $D\ni w\mapsto w+p(w)$ is 1-1 in $D$ if, and only if, it is locally 1-1. In this case the image of $D$ is convex in the direction of the real axis. \end{lemma} \addtocounter{theorem}{-1}} Using this lemma we will prove more general results and apply them to obtain new univalence criteria for harmonic mappings. For a given set $D$ in the complex plane $\mathbb{C}$ it will be convenient to define \begin{equation}\label{set_projection} P_y(D):=\left\{a\in\mathbb{R}:\exists_{z\in D}\mathop{\rm Im} z=a\right\}. \end{equation} Such defined set $P_y(D)$ has several immediate properties, which we formulate in the following lemma for convenience in further use. \begin{lemma}\label{projection_lemma} Let $D_1$ and $D_2$ be the domains with nonempty intersection such that $D_1\cup D_2$ is simply connected. Then $P_y(D_1)$, $P_y(D_2)$ and $P_y(D_1\cap D_2)$ are open intervals. \end{lemma} \begin{proof} The Janiszewski theorem \cite[p.~268, Theorem~2]{Kuratowski1} yields the connectedness of the set $D_1\cap D_2$, which clearly is also open. Thus $D_1\cap D_2$ is a nonempty domain as well as $D_1$ and $D_2$. Hence, obviously, $P_y(D_1)$, $P_y(D_2)$ and $P_y(D_1\cap D_2)$ are open and connected subset of the real line $\mathbb{R}$, which completes the proof. \end{proof} Using this lemma we can prove the following theorem. \begin{theorem}\label{thm_1} Let $D_1$, $D_2$ be the domains convex in the direction of the real axis and let $q:D_1\cup D_2\to\mathbb{C}$ be a continuous function for which Jacobian $J_q$ exists, and such that $\mathop{\rm Im} q(z)=\mathop{\rm Im} z$ for all $z\in D_1\cup D_2$. Then $q$ is 1-1 if, and only if, $J_q\neq 0$ and $$P_y(D_1\cap D_2)=P_y(q(D_1)\cap q(D_2)).$$ \end{theorem} \begin{proof} If $D_1$, $D_2$ are two disjoint domains convex in the direction of the real axis then our claim follows immediately from Theorem \ref{shear_construction}. Hence, we consider the case $D_1\cap D_2\not=\emptyset$. Assume that $q$ is 1-1 in $D_1\cup D_2$. We show that Jacobian is not equal to $0$ and $P_y(D_1\cap D_2)=P_y(q(D_1)\cap q(D_2))$. It is clear that if $q$ is 1-1, then it is locally 1-1 and thus $J_q\neq 0$. It is also clear that $P_y(D_1\cap D_2)\subset P_y(q(D_1)\cap q(D_2))$. We show the inverse inclusion. Let $a\in \mathbb{R}\setminus P_y(D_1\cap D_2)$ be fixed. Then for any choice of $z_1\in D_1$ and $z_2\in D_2$ such that $\mathop{\rm Im} z_1=a$ and $\mathop{\rm Im} z_2=a$ we have $q(z_1)\neq q(z_2)$, since $q$ is 1-1. Thus we deduce that $a\notin P_y(q(D_1)\cap q(D_2))$, which means that $P_y(q(D_1)\cap q(D_2))\subset P_y(D_1\cap D_2)$. Hence we get $P_y(D_1\cap D_2)=P_y(q(D_1)\cap q(D_2))$. To prove the converse we assume that $J_q\neq 0$ and $P_y(D_1\cap D_2)=P_y(q(D_1)\cap q(D_2))$ and we show that $q$ is 1-1. The property $\mathop{\rm Im} q(z)=\mathop{\rm Im} z$ for all $z\in D_1\cup D_2$ together with Lemma \ref{shear_lemma} ensure that $q$ is 1-1 in $(D_1\cup D_2)\cap\{z\in\mathbb{C}:\mathop{\rm Im} z\in P_y(D_1\cap D_2)\}$. Assume that $q$ be not 1-1 in $$\widetilde{D}:=(D_1\cup D_2)\cap\{z\in\mathbb{C}:\mathop{\rm Im} z\notin P_y(D_1\cap D_2)\}.$$ Then, there exist $a\in \widetilde{D}$ and $z_1,z_2\in D_1\cup D_2$ such that $a=\mathop{\rm Im} z_1=\mathop{\rm Im} z_2$ and $q(z_1)=q(z_2)$. But the last equality means that $a\in P_y(q(D_1)\cap q(D_2))$ and by the definition of $\widetilde{D}$ we have $a\notin P_y(D_1\cap D_2)$, which is a contradiction to the assumption that $P_y(D_1\cap D_2)=P_y(q(D_1)\cap q(D_2))$. Thus, $q$ is 1-1 in $\widetilde{D}$. Now, the property $\mathop{\rm Im} q(z)=\mathop{\rm Im} z$ for all $z\in D_1\cup D_2$ implies that $q$ is 1-1 in $D_1\cup D_2$ and this completes the proof. \end{proof} Replacing the univalence condition in Theorem \ref{thm_1} by the condition that the sets $D_1\cup D_2$ and $q(D_1)\cup q(D_2)$ are simply connected we get. \begin{theorem}\label{thm_2} Let $D_1$, $D_2$ be the domains convex in the direction of the real axis with nonempty intersection and let $q:D_1\cup D_2\to\mathbb{C}$ be a continuous function, such that $J_q$ exists and it is not equal to $0$, and $\mathop{\rm Im} q(z)=\mathop{\rm Im} z$ for all $z\in D_1\cup D_2$. If $D_1\cup D_2$ and $q(D_1)\cup q(D_2)$ are simply connected then $$P_y(D_1\cap D_2)=P_y(q(D_1)\cap q(D_2)).$$ \end{theorem} \begin{proof} First, observe that the inclusion \begin{equation}\label{incl} P_y(D_1\cap D_2)\subset P_y(q(D_1)\cap q(D_2)) \end{equation} is valid for all domains $D_1$, $D_2$ and for all functions $q$ satisfying assumptions of Theorem \ref{thm_2}. Now, we prove the inverse inclusion. We can assume that the Jacobian $J_q$ is positive. Notice, that if $D_1\cup D_2$ is simply connected then, by Lemma \ref{projection_lemma}, the set $P_y(D_1\cap D_2)$ is connected. By the same Lemma \ref{projection_lemma} and obvious equality $q(D_1\cup D_2)=q(D_1)\cup q(D_2)$ we deduce that $P_y(q(D_1)\cap q(D_2))$ is connected since $q(D_1\cup D_2)$ is simply connected. Moreover, $P_y(D_1\cap D_2)$ and $P_y(q(D_1)\cap q(D_2))$ are open since $D_1\cap D_2$ and $q(D_1)\cap q(D_2)$ are open, hence $P_y(D_1\cap D_2)$ and $P_y(q(D_1)\cap q(D_2))$ are nonempty open intervals. Next, assume that there exists a real number $a$ such that $a\in P_y(q(D_1)\cap q(D_2))$ and $a\notin P_y(D_1\cap D_2)$. Then, there exist $\tilde{a}\in P_y(q(D_1)\cap q(D_2))\setminus P_y(D_1\cap D_2)$ and $\varepsilon>0$ such that the sets $$\widetilde{A}_\varepsilon:=(\tilde{a}-\varepsilon,\tilde{a}+\varepsilon)\cap P_y(D_1\cap D_2) \quad\text{ and }\quad (\tilde{a}-\varepsilon,\tilde{a}+\varepsilon)\setminus (P_y(D_1\cap D_2)\cup\{\tilde{a}\})$$ are nonempty open intervals. Indeed, this follows from the properties of $P_y(D_1\cap D_2)$ and $P_y(q(D_1)\cap q(D_2))$ as the open and nonempty intervals. Now, since $\tilde{a}\in P_y(q(D_1)\cap q(D_2))$ and $q(D_1)\cap q(D_2)$ is open, we can find points $w_1,w_2\in q(D_1)\cap q(D_2)$ such that $$\mathop{\rm Re} w_1<\mathop{\rm Re} w_2 \quad\text{ and }\quad \mathop{\rm Im} w_1=\mathop{\rm Im} w_2=\tilde{a}.$$ Recall, that $\tilde{a}\notin P_y(D_1\cap D_2)$, thus there exist points $$\eta_1,\eta_2\in D_1 \quad \text{ and }\quad \zeta_1,\zeta_2\in D_2 $$ such that $q(\eta_1)=q(\zeta_1)=w_1$ and $q(\eta_2)=q(\zeta_2)=w_2$ and, by Lemma \ref{shear_lemma}, they are unique. Moreover, the assumption that the Jacobian $J_q$ is positive implies either \begin{eqnarray} && \mathop{\rm Re} \eta_1<\mathop{\rm Re} \eta_2 < \mathop{\rm Re} \zeta_1<\mathop{\rm Re} \zeta_2,\nonumber \\ \text{or}&& \nonumber\\ && \mathop{\rm Re} \zeta_1<\mathop{\rm Re} \zeta_2 < \mathop{\rm Re} \eta_1<\mathop{\rm Re} \eta_2. \nonumber \end{eqnarray} Now, since $D_1$ and $D_2$ are open sets and $\widetilde{A}_\varepsilon$ is a nonempty, open interval, then there exist sequences $$\mathbb{N}\ni n\mapsto \eta_{1,n}\in D_1 ,\ \eta_{1,n}\to\eta_1, \quad\text{ and }\quad \mathbb{N}\ni n\mapsto \eta_{2,n}\in D_1 ,\ \eta_{2,n}\to\eta_2,$$ and the sequences $$\mathbb{N}\ni n\mapsto \zeta_{1,n}\in D_2 ,\ \zeta_{1,n}\to\zeta_1 \quad\text{ and }\quad \mathbb{N}\ni n\mapsto \zeta_{2,n}\in D_2 ,\ \zeta_{2,n}\to\zeta_2,$$ with $\mathop{\rm Im}\eta_{1,n}=\mathop{\rm Im}\eta_{2,n}=\mathop{\rm Im}\zeta_{1,n}=\mathop{\rm Im}\zeta_{2,n}\in \widetilde{A}_{\varepsilon}$ and such that either \begin{eqnarray}\label{real_part_ineq_sq} && \mathop{\rm Re} \eta_{1,n}<\mathop{\rm Re} \eta_{2,n} < \mathop{\rm Re} \zeta_{1,n}<\mathop{\rm Re} \zeta_{2,n},\nonumber \\ \text{or}&& \\ && \mathop{\rm Re} \zeta_{1,n}<\mathop{\rm Re} \zeta_{2,n} < \mathop{\rm Re} \eta_{1,n}<\mathop{\rm Re} \eta_{2,n},\nonumber \end{eqnarray} for sufficiently large $n$. Next, from the continuity of $q$ we deduce that $$q(\eta_{1,n})\to w_1, \qquad q(\eta_{2,n})\to w_2 \quad \text{ and } \quad q(\zeta_{1,n})\to w_1,\qquad q(\zeta_{2,n})\to w_2.$$ Thus, by the assumption that $J_q>0$ and \eqref{real_part_ineq_sq} we have either \begin{eqnarray*} && \mathop{\rm Re} q(\eta_{2,n}) < \mathop{\rm Re} q(\zeta_{1,n}) \quad\text{or}\quad \mathop{\rm Re} q(\zeta_{2,n}) < \mathop{\rm Re} q(\eta_{1,n}), \end{eqnarray*} for sufficiently large $n$, which implies $ \mathop{\rm Re} w_2 \leq \mathop{\rm Re} w_1 $. But this is a contradiction to the assumption $\mathop{\rm Re} w_1<\mathop{\rm Re} w_2$. Thus, we have the inclusion $P_y(q(D_1)\cap q(D_2))\subset P_y(D_1\cap D_2)$, which together with \eqref{incl} yields $P_y(D_1\cap D_2)=P_y(q(D_1)\cap q(D_2))$, and this completes the proof. \end{proof} \begin{corollary}\label{cor_1} Let $D_1$, $D_2$ be the domains convex in the direction of the real axis with nonempty intersection, such that $D_1\cup D_2$ is simply connected and let $q:D_1\cup D_2\to\mathbb{C}$ be a continuous function for which the Jacobian $J_q$ exists, such that $\mathop{\rm Im} q(z)=\mathop{\rm Im} z$ for all $z\in D_1\cup D_2$ and $q(D_1)\cup q(D_2)$ is simply connected. Then $J_q\neq 0$ if, and only if, $q$ is 1-1. \end{corollary} \begin{proof} It is an immediate consequence of Theorem \ref{thm_1} and Theorem \ref{thm_2}. \end{proof} \section{Harmonic mappings}\setcounter{equation}{0} In this section we apply the results obtained in the previous section to the theory of harmonic mappings. We start with the definition which will simplify our considerations. For a given set $D$ let \begin{equation}\label{set_B} \Lambda_y(D):=\left\{a\in\mathbb{R}:(D\cap \{z\in\mathbb{C}:\mathop{\rm Im} z=a\})\ \text{is a nonempty and connected set}\right\}. \end{equation} We will see the set $\Lambda_y$ is as much convenient in the following investigations as the set $P_y$, defined by \eqref{set_projection}, was in the previous section. Thus, we need the following lemma describing a connection between $P_y$ and $\Lambda_y$. \begin{lemma}\label{A_B_conection} Let $D_1$, $D_2$ be the domains convex in the direction of the real axis with nonempty intersection. Then $P_y(D_1\cap D_2)=\Lambda_y(D_1\cup D_2)$. \end{lemma} \begin{proof} Let $D_1$, $D_2$ be the domains convex in the direction of the real axis with nonempty intersection. We will show both inclusions $$P_y(D_1\cap D_2)\subset\Lambda_y(D_1\cup D_2)\quad \text{ and } \quad \Lambda_y(D_1\cup D_2)\subset P_y(D_1\cap D_2).$$ Assume first, that $a\in P_y(D_1\cap D_2)$. Then, there exists $w\in D_1\cap D_2$ such that $\mathop{\rm Im} w=a$. This means, that $$w\in (D_1\cap\{z\in\mathbb{C}:\mathop{\rm Im} z=a\})\cap(D_2\cap\{z\in\mathbb{C}:\mathop{\rm Im} z=a\}).$$ Next, observe that the sets $D_1 \cap\{z\in\mathbb{C}:\mathop{\rm Im} z=a\}$ and $D_2 \cap\{z\in\mathbb{C}:\mathop{\rm Im} z=a\}$ are nonempty and connected, since both domains $D_1$ and $D_2$ are convex in the direction of the real axis, and in addition they have nonempty intersection. Thus, the set $$(D_1\cup D_2)\cap\{z\in\mathbb{C}:\mathop{\rm Im} z=a\}$$ is nonempty and connected, and consequently $a\in \Lambda_y(D_1\cup D_2)$. Now, we prove the second inclusion. Let $a\in \Lambda_y(D_1\cup D_2)$, then the set $$(D_1\cup D_2)\cap\{z\in\mathbb{C}:\mathop{\rm Im} z=a\}$$ is nonempty and connected. Next, observe that $$D_1\cap\{z\in\mathbb{C}:\mathop{\rm Im} z=a\}\quad \text{ and } \quad D_2\cap\{z\in\mathbb{C}:\mathop{\rm Im} z=a\}$$ are open and connected intervals since $D_1$ and $D_2$ are open and convex in the direction of the real axis. Hence, there exists $w\in D_1\cap D_2$, such that $\mathop{\rm Im} w=a$ and thus, $a\in P_y(D_1\cap D_2)$, which completes the prove. \end{proof} Now, we can apply results obtained in Section 2 to harmonic mappings. \begin{theorem}\label{thm_3} Let $f=h+\overline{g}$ be a harmonic function in $\mathbb{D}$ such that $J_f>0$ in $\mathbb{D}$. If $\Lambda_y((h-g)(\mathbb{D}))=\Lambda_y(f(\mathbb{D}))$ then the following statements are equivalent \begin{enumerate} \item[(1)]{$f$ is 1-1 mapping and $f(\mathbb{D})$ is a sum of two non-disjoint domains convex in the direction of the real axis.} \item[(2)]{$h-g$ is 1-1 analytic mapping and $(h-g)(\mathbb{D})$ is a sum of two non-disjoint domains convex in the direction of the real axis.} \end{enumerate} \end{theorem} \begin{proof} Let $f=h+\overline{g}$ be a harmonic function in the unit disk and such that $J_f$ is positive in $\mathbb{D}$, and let $\Lambda_y((h-g)(\mathbb{D}))=\Lambda_y(f(\mathbb{D}))$. We show that $(1)=>(2)$ and $(2)=>(1)$. $(1)=>(2)$. Assume that $f$ is 1-1 in the unit disk and that $f(\mathbb{D})=D_1\cup D_2$, where $D_1,D_2\subset\mathbb{C}$ are domains convex in the direction of the real axis with nonempty intersection. Then there exists $f^{-1}:D_1\cup D_2\to \mathbb{D}$ and the composition $q:=(h-g)\circ f^{-1}$ is well defined continuous function. Observe, that $q(w)=(h-g)(f^{-1}(w))=w-2\mathop{\rm Re} g(f^{-1}(w))$ for all $w\in D_1\cup D_2$. Moreover, by Lemma \ref{A_B_conection} we have \begin{equation}\label{proj_f} \Lambda_y(f(\mathbb{D}))=\Lambda_y(D_1\cup D_2)=P_y(D_1\cap D_2). \end{equation} Similarly, by Lemma \ref{A_B_conection} and by equality $q(D_1\cup D_2)=q(D_1)\cup q(D_2)$ we have \begin{equation}\label{proj_q} \Lambda_y((h-g)(\mathbb{D})) =\Lambda_y(q(D_1\cup D_2))=\Lambda_y(q(D_1)\cup q(D_2))=P_y(q(D_1)\cap q(D_2)). \end{equation} The formulae \eqref{proj_f} and \eqref{proj_q}, together with the hypothesis $\Lambda_y((h-g)(\mathbb{D}))=\Lambda_y(f(\mathbb{D}))$, yield \begin{equation}\label{proj_f_q} P_y(D_1\cap D_2)=P_y(q(D_1)\cap q(D_2)). \end{equation} Thus, the assumptions of Theorem \ref{thm_1} are satisfied and in consequence we obtain that $q$ is 1-1 in $\mathbb{D}$. Hence, $h-g$ is 1-1 in $\mathbb{D}$, since $f$ is. Additionally, both sets $q(D_1)$ and $q(D_2)$ are domains convex in the direction of the real axis, by Lemma \ref{shear_lemma}, and their intersection is not empty by \eqref{proj_f_q}. $(2)=>(1)$. Now, assume that $h-g$ is 1-1 in the unit disk and that $(h-g)(\mathbb{D})=\Omega_1\cup \Omega_2$, where $\Omega_1,\Omega_2\subset\mathbb{C}$ are domains convex in the direction of the real axis with nonempty intersection. Then there exists $(h-g)^{-1}:\Omega_1\cup \Omega_2\to \mathbb{D}$ and the composition $q:=f\circ (h-g)^{-1}$ is well defined continuous function. Observe, that we have $q(w)=f((h-g)^{-1}(w))=w+2\mathop{\rm Re} g((h-g)^{-1}(w))$ for all $w\in \Omega_1\cup \Omega_2$. Reasoning similar to the one used in previous case and the use of Lemma \ref{A_B_conection} give us equality \begin{equation}\label{proj_om_f_q} P_y(\Omega_1\cap \Omega_2)=P_y(q(\Omega_1)\cap q(\Omega_2)). \end{equation} Again, the assumptions of Theorem \ref{thm_1} are satisfied and in consequence we obtain that $q$ is 1-1 in $\mathbb{D}$, thus $f$ is 1-1 in $\mathbb{D}$, since $h-g$ is. Finally, $q(\Omega_1)$ and $q(\Omega_2)$ are domains convex in the direction of the real axis, by Lemma \ref{shear_lemma}, and their intersection is not empty by \eqref{proj_om_f_q}. \end{proof} As a consequence of Theorem \ref{thm_3} we obtain a generalization of Theorem \ref{shear_construction} of Clunie and Sheil-Small. \begin{theorem}\label{thm_4} Let $f=h+\overline{g}$ be a harmonic function in $\mathbb{D}$ such that $J_f>0$ in $\mathbb{D}$. If $(h-g)(\mathbb{D})$ and $f(\mathbb{D})$ are nonempty simply connected domains then the following statements are equivalent \begin{enumerate} \item[(1)]{$f$ is 1-1 mapping and $f(\mathbb{D})$ is a sum of two non-disjoint domains convex in the direction of the real axis.} \item[(2)]{$h-g$ is 1-1 analytic mapping and $(h-g)(\mathbb{D})$ is a sum of two non-disjoint domains convex in the direction of the real axis.} \end{enumerate} \end{theorem} \begin{proof} Observe, that if $f$ is 1-1 in $\mathbb{D}$ and $f(\mathbb{D})=D_1\cup D_2$, where $D_1,D_2\subset\mathbb{C}$ are domains convex in the direction of the real axis with nonempty intersection, then the function $$D_1\cup D_2\ni w\mapsto q_{f}(w):=(h-g)(f^{-1}(w))=w-2\mathop{\rm Re} g(f^{-1}(w))$$ is well-defined and continuous in $D_1\cup D_2$. The same is true if we assume that $h-g$ is 1-1 in $\mathbb{D}$ and $(h-g)(\mathbb{D})=\Omega_1\cup \Omega_2$, where $\Omega_1,\Omega_2\subset\mathbb{C}$ are domains convex in the direction of the real axis with nonempty intersection, that is the function $$D_1\cup D_2\ni w\mapsto q_{h-g}(w):=f((h-g)^{-1}(w))=w+2\mathop{\rm Re} g((h-g)^{-1}(w))$$ is well-defined and continuous in $\Omega_1\cup \Omega_2$. Since $(h-g)(\mathbb{D})$ and $f(\mathbb{D})$ are nonempty simply connected domains then by Theorem \ref{thm_2} and Lemma \ref{A_B_conection}, the proof follows from Theorem \ref{thm_3}. \end{proof} If one omits in Theorem \ref{thm_4} the assumption that both $f(\mathbb{D})$ and $(h-g)(\mathbb{D})$ are simply connected, then the Theorem \ref{thm_4} is no longer true which is shown in the following example. \begin{example}\rm Consider vertical shear of the rotated Koebe function with dilatation $\omega(z):=iz$. From the equations \begin{align} &h(z)-g(z)=\frac{z}{(1-iz)^2}\nonumber \\ &g^\prime(z)=izh^\prime(z)\nonumber \end{align} we get \begin{align} &h(z)=\frac{-6iz-3z^2+iz^3}{6(i+z)^3},\nonumber \\ &g(z)=\frac{3z^2+iz^3}{6(i+z)^3},\nonumber \end{align} and $$f(z)=h(z)+\overline{g(z)}=\frac{-6iz-3z^2+iz^3}{6(i+z)^3}+\overline{\left(\frac{3z^2+iz^3}{6(i+z)^3}\right)}.$$ Now, using transformation $$w=u+iv:=\frac{1+iz}{1-iz},$$ which maps the unit disk onto the right half-plane, i.e. $\{w\in\mathbb{C}:\mathop{\rm Re} w>0\}$ we get \begin{align} &h(z)-g(z)=\frac{1}{4i}(w^2-1),\nonumber \\ &h(z)+g(z)=\frac{1}{6i}(w^3-1),\nonumber \end{align} and consequently $$f(z)=\mathop{\rm Re}(h(z)+g(z))+i\mathop{\rm Im}(h(z)-g(z))=-\frac{1}{6}\mathop{\rm Im}(w^3-1)-\frac{i}{4}\mathop{\rm Re}(w^2-1).$$ After some calculations we obtain \begin{equation} \label{map_f} f(z)=-\frac{1}{6}v(3u^2-v^2)-\frac{i}{4}(u^2-v^2-1), \end{equation} where $u>0$ and $v\in \mathbb{R}$. Clearly, the function $h(z)-g(z)$ maps the unit disk onto the plane with the slit along the imaginary axis, more precisely onto $\mathbb{C}\setminus \{z\in\mathbb{C}: \mathop{\rm Im} z\geq \frac{1}{4}\text{ and }\mathop{\rm Re} z=0\}$, which is a simply connected domain. On the other hand, the formula \eqref{map_f}, allows us to find the image of the unit disk via the map $f(z)$, by studying which parts of the vertical lines of the complex plane belong to $f(\mathbb{D})$. First, observe that $\mathop{\rm Re} f(z)=0$ if and only if $v=0$ or $v^2=3u^2$. Thus, we have $\mathop{\rm Im} f(z)=\frac{1}{4}-\frac{u^2}{4}$, with $u>0$, if $v=0$ and $\mathop{\rm Im} f(z)=\frac{1}{4}+\frac{u^2}{2}$, with $u>0$, if $v^2=3u^2$, and consequently we get that the point $\frac{i}{4}$ do not belong to $f(\mathbb{D})$. Now, assume that $\mathop{\rm Re} f(z)=c$ with $c\neq 0$. Then, since $v\neq 0$, we have $u^2=\frac{v^2}{3}-\frac{2c}{v}$ and $$\mathop{\rm Im} f(z)=\frac{2v^3+3v+6c}{12v}, \quad\text{where } v\in(-\infty,0)\cup(0,+\infty).$$ If $c>0$ and $v\in (-\infty,0)$ then \begin{align} &\lim_{v\to -\infty} \frac{2v^3+3v+6c}{12v}=+\infty,\nonumber\\ &\lim_{v\to 0^-} \frac{2v^3+3v+6c}{12v}=-\infty,\nonumber \end{align} and the whole vertical line $w=c$ belongs to $f(\mathbb{D})$. Analogously, if $c<0$ and $v\in (0,+\infty)$ then \begin{align} &\lim_{v\to +\infty} \frac{2v^3+3v+6c}{12v}=+\infty,\nonumber\\ &\lim_{v\to 0^+} \frac{2v^3+3v+6c}{12v}=-\infty,\nonumber \end{align} and in that case the whole vertical line $w=c$ belongs to $f(\mathbb{D})$, too. Hence, we get $f(\mathbb{D})=\mathbb{C}\setminus\{\frac{i}{4}\}$ which is not simply connected domain. The function $f$ fail to satisfy assumptions of Theorem \ref{thm_3}, and straightforward calculations shows that $f(\frac{\sqrt{3}}{3})=f(-\frac{\sqrt{3}}{3}) =\frac{3i}{8}$, thus $f$ is not univalent in $\mathbb{D}$. \end{example} \begin{remark} Recall, that Theorem \ref{shear_construction} can be reformulated and it remains valid for a function convex in any fixed direction. Notice, that our results can also be rewritten in this fashion. \end{remark} \end{document}
\mathbf Egin{document} \title{Algebraic cycles and Tate classes\\ on Hilbert modular varieties} \author{Jayce R. Getz} \author{Heekyoung Hahn} \mathrm{ad}dress{Department of Mathematics\\ Duke University\\ Durham, NC 27708} \email{[email protected]} \mathrm{ad}dress{Department of Mathematics\\ Duke University\\ Durham, NC 27708} \email{[email protected]} \mathbb{S}ubjclass[2010]{Primary 11F41} \thanks{The authors are thankful for partial support provided by NSERC Discovery Grants. } \mathbf Egin{abstract} Let $E/\mathbb{Q}$ be a totally real number field that is Galois over $\mathbb{Q}$, and let ${\sf p}i$ be a cuspidal, nondihedral automorphic representation of $\mathrm{GL}_2(\mathbb{A}_E)$ that is in the lowest weight discrete series at every real place of $E$. The representation ${\sf p}i$ cuts out a ``motive'' $M_{\mathrm{\textrm{\'{e}t}}}({\sf p}i^{\infty})$ from the $\ell$-adic middle degree intersection cohomology of an appropriate Hilbert modular variety. If $\ell$ is sufficiently large in a sense that depends on ${\sf p}i$ we compute the dimension of the space of Tate classes in $M_{\mathrm{\textrm{\'{e}t}}}({\sf p}i^{\infty})$. Moreover if the space of Tate classes on this motive over all finite abelian extensions $k/E$ is at most of rank one as a Hecke module, we prove that the space of Tate classes in $M_{\mathrm{\textrm{\'{e}t}}}({\sf p}i^{\infty})$ is spanned by algebraic cycles. \end{abstract} \maketitle \mathbb{S}ection{Introduction and a statement of the result}\label{intro} Let $E/\mathbb{Q}$ be a totally real number field of absolute degree $d$: \mathbf Egin{equation}\label{d} d:=[E:\mathbb{Q}]. \end{equation} For each compact open subgroup of $U \leq \mathrm{Res}_{E/\mathbb{Q}}\mathrm{GL}_2(\mathbb{A}^{\infty})=\mathrm{GL}_2(\mathbb{A}_E^{\infty})$ one has a Shimura variety $$ Y^U:=\mathrm{Sh}(\mathrm{Res}_{E/\mathbb{Q}}\mathrm{GL}_2,(\textf{C}C-\mathrm{R}R)^d)^U. $$ Here as usual $\mathbb{A}$ (resp. $\mathbb{A}_E$) is the adeles of $\mathbb{Q}$ (resp. $E$), $\mathrm{R}es$ is the Weil restriction of scalars, and $\mathbb{A}^{\infty}$ (resp. $\mathbb{A}_E^{\infty}$) denotes the finite adeles of $\mathbb{Q}$ (resp. $E$). If $U$ is a neat subgroup then $Y^U$ is a quasi-projective (non-compact) smooth scheme over $\mathbb{Q}$. We denote by $X^U$ the Baily-Borel compactification of $Y^U$. It is a projective scheme over $\mathbb{Q}$ with isolated singularities. Any cohomology group of $Y^U$ or $X^U$ with coefficients in a ring $R$ comes equipped with an action of the Hecke algebra \mathbf Egin{align} C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U):=C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U,R) \end{align} of $U$-biinvariant compactly supported smooth functions with coefficients in $R$; it acts via correspondences. Let ${\sf p}i$ be a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_E)$, let $\ell$ be a prime, let $\overline{\mathbb{Q}}_{\ell}$ be an algebraic closure of $\mathbb{Q}_{\ell}$ and let $\iota:\textf{C}C \to \overline{\mathbb{Q}}_{\ell}$ be an isomorphism. One can use these Hecke operators to cut out a subrepresentation $M_{\mathrm{\textrm{\'{e}t}},\iota}^U({\sf p}i^{\infty})$ of the representation $H^d_{\mathrm{\textrm{\'{e}t}}}(Y^U \times \overline{\mathbb{Q}},\overline{\mathbb{Q}}_{\ell})$ of $\mathrm{Gal}_{\mathbb{Q}}$ that is ${\sf p}i^{\infty}$-isotypic\footnote{Here and below ${\sf p}i= {\sf p}i_{\infty} \otimes {\sf p}i^{\infty}$ where ${\sf p}i_{\infty}$ (resp.~${\sf p}i^{\infty}$) is an admissible representation of $\mathrm{GL}_2(E_{\infty})$ (resp.~$\mathrm{GL}_2(\mathbb{A}_E^{\infty})$).} under the Hecke algebra. Here $\mathrm{Gal}_k:=\mathrm{Gal}(\overline{\mathbb{Q}}/k)$ is the absolute Galois group of number field $k$ with respect to a fixed choice of algebraic closure $\overline{\mathbb{Q}}$ of $\mathbb{Q}$. For a precise definition of $M_{\mathrm{\textrm{\'{e}t}}, \iota}^U({\sf p}i^{\infty})$ we refer the reader to \S \ref{coho} below. We use the symbol $M$ because we think of this Galois representation as the \'etale realization of a motive, although we will not prove that this is the case. \mathbf Egin{rem} If ${\sf p}i$ is cuspidal, then the ${\sf p}i$-isotypic component of $H^j_{\mathrm{\textrm{\'{e}t}}}(Y^U \times \overline{\mathbb{Q}},\overline{\mathbb{Q}}_{\ell})$ is nonzero only if $j =d$ (see, for example, \cite{Harder} and \cite{GG}). If ${\sf p}i$ is noncuspidal, the ${\sf p}i$-isotypic component is explained in terms of either K\"ahler forms or Eisenstein series (see \cite{Harder}), and to address such classes would take us too far afield. \end{rem} In this paper, following the tradition of \cite{HLR}, \cite{MR} and \cite{RamaHil} our goal is to investigate how much of $M_{\mathrm{\textrm{\'{e}t}}, \iota}^U({\sf p}i^{\infty})$ is explained by the fundamental classes of algebraic cycles. Assume that $d$ is even. A suitable extension of the Tate conjecture \cite{Tate} to this non-compact setting is that for each finite extension $k/\mathbb{Q}$ the space $M_{\mathrm{\textrm{\'{e}t}}, \iota}^U({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_k}$ is spanned by the classes of algebraic cycles on $Y^U \times k$. Here $(n)$ denotes the $n$-fold Tate twist. Harder, Langlands and Rapoport \cite{HLR} proved this when $d=2$ in the nondihedral case, Murty and Ramakrishnan \cite{MR} dealt with the dihedral\footnote{Recall that an automorphic representation ${\sf p}i$ of $\mathrm{GL}_2(\mathbb{A}_E)$ is \emph{dihedral} if there is a quadratic extension $L/E$ and a Hecke character $\chi:\mathbb{A}_L^{\times} \to \textf{C}C^{\times}$ such that ${\sf p}i=AI(\chi)$ is the automorphic induction of $\chi$.} case when $d=2$, and Ramakrishnan \cite{RamaHil} provided some results when $d=4$. We prove the following modest extension of the results of \cite{HLR}: \mathbf Egin{thm}\label{thm-main} Let $U \leq \mathrm{GL}_2(\mathbb{A}_E^{\infty})$ be a compact open subgroup and let ${\sf p}i$ be nondihedral. Suppose that the rank of $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U, ss}({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_{k}}$ as a $C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U)$ module is at most $1$ for all abelian extensions $k/E$. If $\ell$ is sufficiently large in a sense depending on ${\sf p}i$ then $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U,ss}({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_k}$ is spanned by algebraic cycles for all finite extensions $k/\mathbb{Q}$. \end{thm} Here $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U, ss}({\sf p}i^{\infty})$ denotes the semisimplification of $M_{\mathrm{\textrm{\'{e}t}},\iota}^U({\sf p}i^{\infty})$ as a $\mathrm{Gal}_{\mathbb{Q}}$-representation. \mathbf Egin{remarks} \item In the course of proving Theorem \ref{thm-main} we compute $M_{\mathrm{\textrm{\'{e}t}}, \iota}^{U,ss}({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_{k}}$ over sufficiently large finite extensions $k/\mathbb{Q}$ if ${\sf p}i$ is not dihedral (see Proposition \ref{one}). This proposition, and perhaps the main theorem, may be well-known to experts, but they have not appeared in the literature and seem a useful springboard to further investigations of Tate classes on Hilbert modular varieties and more general Shimura varieties. \item In order to prove our theorem we need to assume that the algebraic envelope of the Galois representation $\rho_{{\sf p}i,\iota}$ attached to ${\sf p}i$ by Blasius and Rogawski \cite{BlR} and Taylor \cite{T} is large. We can deduce this from the results of \cite{Dimi} provided that $\ell$ is sufficiently large, and this is the reason for our assumption that $\ell$ is sufficiently large. If one could strengthen the results of \cite{Dimi} as in \cite{Ribet}, then one could dispense with this assumption. Alternately, if one knew the automorphy (or perhaps even potential automorphy) of certain tensor product representations (compare \S \ref{sec-tens-prod}) one might place $\rho_{{\sf p}i,\iota}$ in a compatible system and then try to prove that the dimension of the space of Tate classes is independent of $\ell$. \end{remarks} We now outline the contents of this paper. In the next section we compute the set of one dimensional subrepresentations of tensor products of the standard representation of the algebraic group $\mathrm{GL}_2$. In \S \ref{sec-alg-env} and \S \ref{sec-tens-prod} we use this result together with some ideas of Serre and Ribet to compute the number of one dimensional subrepresentations of certain tensor products of rank two Galois representations. We set notation for Asai representations in \S \ref{sec-Asai} and state how the \'etale cohomology of Hilbert modular varieties is described as a Galois representation in terms of these Asai representations in \S \ref{coho}. We recall Hirzebruch-Zagier cycles and their twists in \S \ref{sec-twist} and \S \ref{sec-HZ} and also recall how their cohomological nontriviality is linked to Asai $L$-functions. Finally in \S \ref{sec-main} we put these pieces together to prove Theorem \ref{main}, which is a restatement of Theorem \ref{thm-main}. \mathbb{S}ection{Counting one dimensional constituents of representations of $\mathrm{GL}_2$} \label{sec-alg-reps} Let $R:\mathrm{GL}_{2} \to \mathrm{GL}_{2}$ denote the standard representation, where we regard $\mathrm{GL}_2$ as an algebraic group over a characteristic zero field. Each irreducible representation of $\mathrm{GL}_{2}$ is isomorphic to $\mathrm{Sym}^kR\otimes (\widetilde{\varepsilon}dge^2R)^{\otimes m}$ for some nonnegative integer $k$ and some integer $m$. This is the representation of highest weight $(k,m)$. In this representation the diagonal torus $\mathbf Egin{pmatrix}a &0\\0& a^{-1}b\end{pmatrix}$ acts with weights \mathbf Egin{equation}\label{km} a^kb^m, a^{k-2}b^{1+m}, \dots, a^{-k}b^{k+m}. \end{equation} \mathbf Egin{lem} \label{even} For any $n \in \mathbb{Z}_{> 0}$ the number of one dimensional subrepresentations of $\otimes_{i=1}^{2n}R$ is equal to \mathbf Egin{equation} \binom{2n}{n}-\binom{2n}{ n-1}. \end{equation} Each one dimensional subrepresentation is isomorphic to the irreducible representation of highest weight $(0,n)$, namely $\det(R)^{n}$. \end{lem} \mathbf Egin{proof} Consider the binomial expansion \mathbf Egin{align} (a+a^{-1}b)^{2n} =&\binom{2n}{0}a^{2n}+\binom{2n}{1}a^{2n-2}b+\cdots+\binom{2n}{n-1}a^{2}b^{n-1}+\binom{2n}{n}b^n\nonumber\\ &+\binom{2n}{n+1}a^{-2}b^{n+1}+\cdots+\binom{2n}{2n}a^{-2n}b^{2n}.\label{remaining} \end{align} With a little thought, one sees that the weights that occur in $R^{\otimes 2n}$ are precisely the weights in this binomial expansion, each occurring with multiplicity equal to the given binomial coefficients. In particular, the highest weights that occur are \mathbf Egin{equation}\label{list} (2n, 0), (2n-2, 1), \dots, (2, n-1), (0,n) \end{equation} and, as recalled above, the diagonal torus acts with weights \mathbf Egin{equation}\label{ith} a^{2n-2i}b^{i}, a^{2n-2(i+1)}b^{1+i}, \dots, b^n, \dots, a^{-2n+2i}b^{2n+i} \end{equation} in each representation of highest weight $(2n-2i,i)$. In particular, we see that any one dimensional subrepresentation is the representation of highest weight $(0,n)$, corresponding to $\det(R)^n$. We are left with computing how many times the highest weight $(0,n)$ occurs. First observe from \eqref{remaining} that the weight $b^n$ appears $\binom{2n}{n}$ times in total. Second observe from \eqref{ith} that it appears in each highest weight of the list \eqref{list}. Note that the weights appearing the second half of binomial expansion in \eqref{remaining} have already occurred in all the subrepresentations of highest weight not equal to $(0, n)$, namely $(2n, 0), (2n-2, 1), \dots, (2, n-1)$. Therefore by \eqref{ith} the number of copies of the weight $b^n$ appearing only in the highest weight $(0,n)$ is $$ \binom{2n}{n}-\binom{2n}{n-1}. $$ \end{proof} \mathbf Egin{lem}\label{odd} For any $n \in \mathbb{Z}_{\mathfrak{g}eq 0}$ there are no one dimensional subrepresentations of $\otimes_{i=1}^{2n+1}R$. \end{lem} \mathbf Egin{proof} As in the proof of Lemma \ref{even}, the weights that occur in $R^{\otimes (2n+1)}$ are precisely the weights occurring in the binomial expansion \mathbf Egin{align} (a+a^{-1}b)^{2n+1} =&\binom{2n+1}{0}a^{2n+1}+\binom{2n+1}{1}a^{2n-1}b+\cdots+\binom{2n+1}{n}ab^{n}\nonumber\\ &+\binom{2n+1}{n+1}a^{-1}b^{n+1}+\cdots+\binom{2n+1}{2n+1}a^{-(2n+1)}b^{2n+1}. \end{align} Note that $R^{\otimes(2n+1)}$ does not have weights of the form $b^k$ for any $k$. This implies that $R^{\otimes(2n+1)}$ does not have one dimensional subrepresentations. \end{proof} \mathbb{S}ection{Algebraic envelopes of rank two Galois representations} \label{sec-alg-env} Let $F$ be a number field, let $\ell$ be a rational prime and let $\overline{\mathbb{Q}}_{\ell}$ be a choice of algebraic closure of $\mathbb{Q}_{\ell}$. Let $G$ be a $\overline{\mathbb{Q}}_{\ell}$-algebraic group. Recall that the \textbf{algebraic envelope} of an abstract group $A \leq G(\overline{\mathbb{Q}}_{\ell})$ is the smallest algebraic subgroup of $G$ whose $\overline{\mathbb{Q}}_{\ell}$-points contain $A$. Similarly the algebraic envelope of a representation $\rho:\mathrm{Gal}_F \to \mathrm{GL}_n(\overline{\mathbb{Q}}_{\ell})$ is the algebraic envelope of $\rho(\mathrm{Gal}_F)$. Here as above $\mathrm{Gal}_F$ is the absolute Galois group of $F$; all representations of this group will be assumed to be continuous. For a representation $\rho:\mathrm{Gal}_F \to \mathrm{GL}_n(\overline{\mathbb{Q}}_{\ell})$ we denote by $\overline{\rho} : \mathrm{Gal}_F\to \mathrm{GL}_n(\overline{\mathbb{F}}_{\ell})$ its reduction modulo $\ell$; it depends on the choice of a $\mathrm{Gal}_F$-stable $\overline{\mathbb{Z}}_{\ell}$-lattice (but only up to semi-simplification). \mathbf Egin{lem} \label{lem-sl2} Suppose that $\overline{\rho}(\mathrm{Gal}_F)$ contains $\mathrm{SL}_2(\mathbb{F}_{\ell})$ for some $\ell\mathfrak{g}eq5$. Then the algebraic envelope of $\rho$ contains $\mathrm{SL}_2$. \end{lem} \mathbf Egin{proof} Let $\ell\mathfrak{g}eq 5$ be such that $\overline{\rho}(\mathrm{Gal}_F)$ contains $\mathrm{SL}_2(\mathbb{F}_{\ell})$. It then follows from \cite[Theorem 2.1]{Ribet} that $\rho(\mathrm{Gal}_F)$ contains $\mathrm{SL}_2(\mathbb{Z}_{\ell})$. Thus the Lie algebra of $\mathrm{SL}_2(\mathbb{Z}_{\ell})$ is contained in the Lie algebra of the $\overline{\mathbb{Q}}_{\ell}$-points of the algebraic envelope of $\rho$ which implies by dimension considerations that the algebraic envelope of $\rho$ contains $\mathrm{SL}_2$. \end{proof} Let \mathbf Egin{align} \label{rho-i} \rho_{ i}: \mathrm{Gal}_F \longrightarrow \mathrm{GL}_2(\overline\mathbb{Q}_{\ell}),{\sf q}uad 1\leq i\leq n, \end{align} be a set of Galois representations. One has the tensor product \mathbf Egin{align} \label{tens-prod} \otimes_{i=1}^{n}\rho_i: \mathrm{Gal}_F \longrightarrow \mathbb{A}ut\big(\otimes_{i=1}^{n}V_{i}\big)(\overline{\mathbb{Q}}_{\ell}), \end{align} where $V_{i}=\mathbb{Q}^2$ and we view $\mathbb{A}ut\big(\otimes_{i=1}^{n}V_{ i}\big)$ as an algebraic group over $\mathbb{Q}$ (or by base change as an algebraic group over $\overline{\mathbb{Q}}_{\ell}$). One has a natural homomorphism \mathbf Egin{equation}\label{phi} {\sf p}hi: \mathrm{Gal}_F \longrightarrow {\sf p}rod_{i=1}^{n}\mathbb{A}ut(V_i)(\overline{\mathbb{Q}}_{\ell}) \end{equation} such that $\otimes_{i=1}^{n} \rho_{ i}$ is the composite \mathbf Egin{align} \mathrm{Gal}_F \longrightarrow {\sf p}rod_{i=1}^{n}\mathbb{A}ut(V_i)(\overline{\mathbb{Q}}_{\ell}) \longrightarrow \mathbb{A}ut\big(\otimes_{i=1}^{n}V_i\big)(\overline{\mathbb{Q}}_{\ell}), \end{align} where the first map is ${\sf p}hi$ and the second is the tensor product. Here we are viewing $\mathrm{Aut}(V_i)$ for each $i$ as an algebraic group over $\mathbb{Q}$. \mathbf Egin{lem}\label{image_phi} Suppose that for each $j$ the group $\overline{\rho}_{ j}(\mathrm{Gal}_F)$ contains $\mathrm{SL}_2(\mathbb{F}_{\ell})$ and that for each $i \neq j$ there is no character $\chi:\mathrm{Gal}_F \to \mathrm{GL}_1(\overline{\mathbb{Q}}_{\ell})$ such that $$ \rho_{ i}\cong \rho_{ j} \otimes \chi. $$ Then the algebraic envelope of ${\sf p}hi(\mathrm{Gal}_F)$ contains ${\sf p}rod_{i=1}^{n}\mathrm{SL}_2$. \end{lem} \mathbf Egin{proof} Let $H$ be the algebraic envelope of ${\sf p}hi(\mathrm{Gal}_F)$ inside ${\sf p}rod_{k=1}^{n}\mathrm{Aut}(V_k)$. For each $i \neq j$ let \mathbf Egin{align}\label{proj} P_{i j} : {\sf p}rod_{k=1}^{n}\mathbb{A}ut(V_k) \longrightarrow \mathbb{A}ut(V_i)\times \mathbb{A}ut(V_j) \nonumber \end{align} be the natural projections. By Lemma \ref{lem-sl2} the projection of $P_{ij}(H)$ to either $\mathrm{Aut}(V_i)$ or $\mathrm{Aut}(V_j)$ contains $\mathrm{SL}_2$. By Goursat's lemma (in the context of algebraic groups or Lie algebras, compare \cite[Lemma 3.2]{Ribet} and \cite[Exercise 1.4.8]{Bourbaki}) and our assumption on the $\rho_{ i}$ together with the fact that all (algebraic) automorphisms of $\mathrm{SL}_2$ are inner we conclude that the projection $P_{ij}(H)$ contains $\mathrm{SL}_2 \times \mathrm{SL}_2$. Note that $\mathrm{SL}_2(\overline{\mathbb{Q}}_{\ell})$ is equal to its commutator subgroup as an abstract group. With this in mind, in spite of the fact that $\mathrm{SL}_2(\overline{\mathbb{Q}}_{\ell})$ is infinite, the proof of \cite[Lemma 3.3]{Ribet} is still valid in the present context and allows us to conclude the proof of the lemma. \end{proof} \mathbb{S}ection{Tensor products of Galois conjugates} \label{sec-tens-prod} Let $E/F$ be a Galois extension of number fields and let $\rho:\mathrm{Gal}_E \to \mathrm{GL}_2(\overline{\mathbb{Q}}_{\ell})$ be a Galois representation. We now investigate the number of one dimensional subrepresentations of the tensor product $\otimes_{\text{\sffamily{\bf\textsf{z}}}eta\in\mathrm{Gal}(E/F)} \rho^{\text{\sffamily{\bf\textsf{z}}}eta}$. For fixed $\ell$, define an equivalence relation $\mathbb{S}im$ on the set of $\ell$-adic Galois representations $\rho:\mathrm{Gal}_E \to \mathrm{GL}_2(\overline{\mathbb{Q}}_{\ell})$ by $\rho_{ 1}\mathbb{S}im \rho_{ 2}$ if $P\rho_{ 1}\cong P\rho_{ 2}$ as projective representations, where $P\rho_{ i}$ is the composite $$ \mathbf Egin{CD} P\rho_{ i}: \mathrm{Gal}_E@> {\rho_{i}}>>\mathrm{GL}_2(\overline{\mathbb{Q}}_{\ell})@> {P}>>\textrm{PGL}_2(\overline{\mathbb{Q}}_{\ell}) \end{CD} $$ with $P$ the natural projection. Note that $\mathrm{Gal} (E/F)$ acts on the set of equivalence classes. For each $\rho : \mathrm{Gal}_E \longrightarrow \mathrm{GL}_2(\overline\mathbb{Q}_{\ell})$, let $\mathrm{Gal}(E/F)_{\rho}$ denote the stabilizer of the equivalence class of $\rho$. Thus there are characters $\chi(\xi):\mathrm{Gal}_E \to \mathrm{GL}_1(\overline{\mathbb{Q}}_{\ell})$ indexed by $\xi \in \mathrm{Gal}(E/F)_{\rho}$ such that $$ \rho^{\xi} \cong \rho \otimes \chi(\xi). $$ Thus \mathbf Egin{equation}\label{stable} \bigotimes_{\xi \in \mathrm{Gal}(E/F)_{\rho}}\rho^{\xi} \cong \rho^{\otimes |\mathrm{Gal}(E/F)_{\rho}|}{\sf p}rod_{\xi \in \mathrm{Gal}(E/F)_{\rho}}\chi(\xi). \end{equation} \mathbf Egin{prop}\label{one} Suppose that the algebraic envelope of $\rho$ contains $\mathrm{SL}_2$. Let $$ m=|\mathrm{Gal}(E/F)_{\rho}|. $$ Then for any finite extension $k/E$ the number of one dimensional subrepresentations of $$\otimes_{\text{\sffamily{\bf\textsf{z}}}eta\in\mathrm{Gal}(E/F)}\rho^{\text{\sffamily{\bf\textsf{z}}}eta}|_{\mathrm{Gal}_k}$$ is zero if $m$ is odd and otherwise is equal to $$ \left(\binom{m}{m/2}-\binom{m}{m/2-1}\right)^{[E:F]/m}. $$ In the latter case each one dimensional subrepresentation is isomorphic to \mathbf Egin{align*} {\sf p}rod_{\mu \in \mathrm{Gal}(E/F)/\mathrm{Gal}(E/F)_{\rho}}\left(\det(\rho)^{m/2}{\sf p}rod_{\xi \in \mathrm{Gal}(E/F)_{\rho}}\chi(\xi)\right)^{\mu}\mathbf{B}igg|_{\mathrm{Gal}_k}. \end{align*} \end{prop} Before proving the proposition we state a corollary of this result and Lemma \ref{lem-sl2}: \mathbf Egin{cor}\label{one-K} Suppose that $\rho:\mathrm{Gal}_E \to \mathrm{GL}_2(\overline{\mathbb{Q}}_{\ell})$, $\ell \mathfrak{g}eq 5$, has the property that $\overline{\rho}(\mathrm{Gal}_E)$ contains $\mathrm{SL}_2(\mathbb{F}_{\ell})$. If there is a one dimensional subrepresentation of $\otimes_{\text{\sffamily{\bf\textsf{z}}}eta \in\mathrm{Gal}(E/F)} \rho^{\text{\sffamily{\bf\textsf{z}}}eta}$ then there is a subfield $E \mathfrak{g}eq K \mathfrak{g}eq F$ with $[E:K]=2$, $\mathrm{Gal}(E/K)=\langle \mathbb{S}igma \rangle$ and a character $\chi: \mathrm{Gal}_E \to \mathrm{GL}_1(\overline{\mathbb{Q}}_{\ell})$ such that $\rho^{\mathbb{S}igma} \cong \rho \otimes \chi$. {\sf q}ed \end{cor} \mathbf Egin{proof}[Proof of Proposition \ref{one}] Since $\mathrm{Gal}(E/F)$ acts on the set of equivalence classes of $\rho$, using the notation of \eqref{stable} we write {\allowdisplaybreaks \mathbf Egin{align*} \otimes_{\text{\sffamily{\bf\textsf{z}}}eta \in\mathrm{Gal}(E/F)} \rho^{\text{\sffamily{\bf\textsf{z}}}eta}=&\otimes_{\mu \in \mathrm{Gal}(E/F)/\mathrm{Gal}(E/F)_{\rho}}\left(\otimes_{\xi \in \mathrm{Gal}(E/F)_{\rho}}\rho^{\xi}\right)^{\mu}\\ =&\otimes_{\mu \in \mathrm{Gal}(E/F)/\mathrm{Gal}(E/F)_{\rho}}\left(\rho^{\otimes |\mathrm{Gal}(E/F)_{\rho}|}{\sf p}rod_{\xi \in \mathrm{Gal}(E/F)_{\rho}}\chi(\xi)\right)^{\mu}\\ =&\otimes_{\mu \in \mathrm{Gal}(E/F)/\mathrm{Gal}(E/F)_{\rho}}\left(\rho^{\otimes m}{\sf p}rod_{\xi \in \mathrm{Gal}(E/F)_{\rho}}\chi(\xi)\right)^{\mu}. \end{align*}} \noindent By Lemma \ref{image_phi} and this decomposition we see that the number of one dimensional subrepresentations of $\otimes_{\text{\sffamily{\bf\textsf{z}}}eta\in\mathrm{Gal}(E/F)}\rho^{\text{\sffamily{\bf\textsf{z}}}eta}$ is equal to the number of one dimensional subrepresentations of $\rho^{\otimes m}$ taken to the $|\mathrm{Gal}(E/F)|/m$ power. Hence the result follows from Lemma \ref{even} and Lemma \ref{odd}. \end{proof} \mathbb{S}ection{Asai $L$-functions} \label{sec-Asai} Let $E/F$ be an extension of number fields. Fix $n\mathfrak{g}eq 1$. One has the \emph{Asai representation} \mathbf Egin{align} \mathrm{As}_{E/F}:{}^L\mathrm{Res}_{E/F}\mathrm{GL}_n=\mathrm{GL}_n(\overline{\mathbb{Q}}_{\ell})^{\mathrm{Hom}_{F}(E,\overline{F})} \rtimes \mathrm{Gal}_F \longrightarrow \mathrm{GL}((\overline{\mathbb{Q}}_{\ell}^n)^{\otimes \mathrm{Hom}_{F}(E,\overline{F})}) \end{align} defined by stipulating that \mathbf Egin{align*} \mathrm{As}_{E/F}(((g_{\mathbb{S}igma})_{\mathbb{S}igma \in \mathrm{Hom}_{F}(E,\overline{F})},1))(\otimes_{\mathbb{S}igma \in \mathrm{Hom}_{F}(E,\overline{F})}v_{\mathbb{S}igma} )=\otimes_{\mathbb{S}igma \in \mathrm{Hom}_{F}(E,\overline{F})}g_{\mathbb{S}igma}v_{\mathbb{S}igma}\\ \mathrm{As}_{E/F}((1)_{\mathbb{S}igma \in \mathrm{Hom}_F(E,\overline{F})},\tau)(\otimes_{\mathbb{S}igma \in \mathrm{Hom}_{F}(E, \overline{F})}v_{\mathbb{S}igma})=\otimes_{\mathbb{S}igma \in \mathrm{Hom}_{F}(E,\overline{F})}v_{\tau \circ \mathbb{S}igma}. \end{align*} A representation $$ \rho:\mathrm{Gal}_E \longrightarrow \mathrm{GL}_n(\overline{\mathbb{Q}}_{\ell}) $$ extends uniquely to a homomorphism $$ \rho:\mathrm{Gal}_F \longrightarrow {}^L\mathrm{Res}_{E/F}\mathrm{GL}_n $$ commuting with the projections to $\mathrm{Gal}_F$ on the $L$-group side. Thus to each such $\rho$ we can associate the representation $$ \mathrm{As}_{E/F}(\rho):=\mathrm{As}_{E/F}\circ\rho :\mathrm{Gal}_F \longrightarrow \mathrm{GL}((\overline{\mathbb{Q}}_{\ell}^{n})^{\otimes \mathrm{Hom}_{F}(E,\overline{F})}). $$ We note that for all field extensions $L \mathfrak{g}eq E$ one has $$ \mathrm{As}_{E/F}(\rho)\big|_{\mathrm{Gal}_L} \cong \otimes_{\mathbb{S}igma \in \mathrm{Hom}_{F}(E,\overline{F})}\rho^{\mathbb{S}igma}\big|_{\mathrm{Gal}_L}. $$ Thus the Asai representation $\mathrm{As}_{E/F}(\rho)$ is a canonical extension of $\otimes_{\mathbb{S}igma \in \mathrm{Hom}_{F}(E,\overline{F})}\rho^{\mathbb{S}igma}$ to $\mathrm{Gal}_{F}$. \mathbb{S}ection{Certain cohomology groups}\label{coho} For the basic results on Shimura varieties used without further comment in this section we refer to the reader to \cite{Deligne}. We view the pair $(\mathrm{Res}_{E/\mathbb{Q}}\mathrm{GL}_2,(\textf{C}C-\mathrm{R}R)^d)$ as a Shimura datum in the usual manner \cite[\S 5.1]{GG}. Thus we have, for each compact open subgroup $U \leq \mathrm{Res}_{E/\mathbb{Q}}\mathrm{GL}_{2}(\mathbb{A}^{\infty})=\mathrm{GL}_2(\mathbb{A}_E^{\infty})$ a (finite level) Shimura variety $$ Y^U=\mathrm{Sh}(\mathrm{Res}_{E/\mathbb{Q}}\mathrm{GL}_2,(\textf{C}C-\mathrm{R}R)^d)^U. $$ This is a quasi-projective scheme over $\mathbb{Q}$. If $U$ is neat, then $Y^U$ is smooth. We denote by $X^U$ the Bailey-Borel compactification of $Y^U$; it is a projective scheme over $\mathbb{Q}$ with isolated singularities. Consider the cohomology groups \mathbf Egin{align} M_{\mathrm{\textrm{\'{e}t}}}^U:&=\mathrm{Im}\left(H^{d}_{\mathrm{\textrm{\'{e}t}},c}(Y^U \times\overline{\mathbb{Q}},\overline{\mathbb{Q}}_{\ell}) \longrightarrow H^{d}_{\mathrm{\textrm{\'{e}t}}}(Y^U \times\overline{\mathbb{Q}},\overline{\mathbb{Q}}_{\ell})\right), \label{Met}\\ M_B^U:&=\mathrm{Im}\left(H^{d}_{B,c}(Y^U(\textf{C}C), \textf{C}C) \longrightarrow H^{d}_{B}(Y^U(\textf{C}C),\textf{C}C)\right),\label{MB} \end{align} where the subscript $B$ denotes the Betti or singular cohomology. These vector spaces over $\overline{\mathbb{Q}}_{\ell}$ and $\textf{C}C$, respectively, are endowed with actions of the Hecke algebras $$ C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U,\overline{\mathbb{Q}}_{\ell}) {\sf q}uad\textrm{and}{\sf q}uad C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}^{\infty}_E)//U,\textf{C}C), $$ respectively. Upon choosing an isomorphism $\iota: \textf{C}C \to \overline{\mathbb{Q}}_{\ell}$ one obtains canonical comparison isomorphisms \mathbf Egin{align} \label{comparison} M_{B}^U \longrightarrow M_{\mathrm{\textrm{\'{e}t}}}^U, \end{align} compatible with the action of Hecke operators. We use the symbol $M^U$ because we think of the objects above as motives, although we will not verify that they are motives in any rigorous sense. To ease notation, we will henceforth omit the $\overline{\mathbb{Q}}_{\ell}$ and $\textf{C}C$ from our notation for Hecke algebras; the coefficient ring will be clear from the context. We note that $M_B^U$ can be viewed as an $L^2$-cohomology group, that is, $$ M_B^U \cong H_{(2)}^d(Y^U(\textf{C}C),\textf{C}C) \cong IH^d(X^U(\textf{C}C),\textf{C}C) $$ as a $C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U)$-module, where $IH$ denotes intersection cohomology with middle perversity \cite[\S 7.2]{GG}. Moreover, \mathbf Egin{align} M_{\mathrm{\textrm{\'{e}t}}}^U \cong IH_{\mathrm{\textrm{\'{e}t}}}^d(X^U \times \overline{\mathbb{Q}},\overline{\mathbb{Q}}_{\ell}) \end{align} as $\mathrm{Gal}_{\mathbb{Q}} \times C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U)$-modules (this follows from the result above and comparison isomorphisms in the context of \'etale intersection cohomology \cite[\S 6.1]{FP}). For an admissible representation ${\sf p}i^{\infty}$ of $\mathrm{GL}_2(\mathbb{A}_{E}^{\infty})$ denote by $M^U_{B}({\sf p}i^{\infty}) \leq M^U_B$ the ${\sf p}i^{\infty}$-isotypic component under the Hecke algebra. Note that there is a decomposition $$ M_B^U=\bigoplus_{{\sf p}i}M_B^U({\sf p}i^{\infty}) $$ where the sum is over all automorphic representations ${\sf p}i$ of $\mathrm{GL}_2(\mathbb{A}_E)$ such that $H^{d}(\mathfrak{g}, U_{\infty}; {\sf p}i_{\infty})\neq 0$ and ${\sf p}i$ is either cuspidal or the determinant mapping followed by a character \cite[\S 7.2]{GG}. Here as usual, $\mathfrak{g}$ is the complexification of the Lie algebra of $\mathrm{GL}_2(E_{\infty})$ and $U_{\infty}=(\mathrm{R}R_{>0} \mathrm{SO}_2(\mathrm{R}R))^d$ (to see this use \cite{BoCa}). Applying the comparison isomorphisms in \'etale cohomology, the choice of $\iota$ induces an isomorphism \mathbf Egin{align} M_B^U({\sf p}i^{\infty}) \longrightarrow M_{\mathrm{\textrm{\'{e}t}},\iota}^U({\sf p}i^{\infty}):=\iota(M_B^U({\sf p}i^{\infty})). \end{align} If ${\sf p}i$ is a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_E)$ with $H^{d}(\mathfrak{g}, U_{\infty}; {\sf p}i_{\infty})\neq 0$ denote by \mathbf Egin{align} \rho_{{\sf p}i,\iota}:\mathrm{Gal}_E \longrightarrow \mathrm{GL}_2(\overline{\mathbb{Q}}_{\ell}) \end{align} the associated Galois representation \cite{BlR, T}. It has the property that $\det(\rho_{{\sf p}i,\iota})$ is the cyclotomic character times a finite order character. The following theorem \cite{BrL} gives a description of $M^U_{\mathrm{\textrm{\'{e}t}},\iota}({\sf p}i^{\infty})$: \mathbf Egin{thm}[Brylinski-Labesse] As a $\mathrm{Gal}_{\mathbb{Q}} \times C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U)$-module one has that the semisimplifications of the representations $$ M_{\mathrm{\textrm{\'{e}t}},\iota}^U({\sf p}i^{\infty}){\sf q}uad \textrm{and} {\sf q}uad\mathrm{As}_{E/\mathbb{Q}}(\rho_{{\sf p}i,\iota}) \otimes {\sf p}i^{\infty U} $$ are isomorphic. \end{thm} Here the superscript $U$ means the vectors fixed by $U$. Thus $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U}({\sf p}i^{\infty})$ is of rank $2^{[E:\mathbb{Q}]}$ as a $C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U)$-module. We also remark that by ``semisimplification'' we mean semisimplification as a $\mathrm{Gal}_{\mathbb{Q}}$-module. As in the introduction, we use a superscript ``${}^{ss}$'' to denote the semisimplification of a representation of $\mathrm{Gal}_{k}$ for number fields $k/\mathbb{Q}$. \mathbf Egin{prop}\label{one-motive} Assume that ${\sf p}i$ is not a dihedral representation, that $\ell$ is sufficiently large (in a sense depending on ${\sf p}i$), and let $m=|\mathrm{Gal}(E/\mathbb{Q})_{\rho_{{\sf p}i,\iota}}|$. For even $d$, there is an abelian extension $k$ of $E$ with the property that for all finite extensions $k'/k$ one has that \mathbf Egin{align} M_{\mathrm{\textrm{\'{e}t}},\iota}^{U,ss}({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_{k'}} \end{align} has rank $0$ as a $C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U)$-module if $m$ is odd and otherwise has rank \mathbf Egin{align} \label{dim23} \left(\binom{m}{m/2}-\binom{m}{m/2-1}\right)^{d/m}. \end{align} \end{prop} \noindent In other words, if $m$ is even, \eqref{dim23} is the rank of the space of Tate cycles as a $C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U)$-module. \mathbf Egin{proof} By \cite[Proposition 3.8]{Dimi} for all but finitely many $\ell$ the reduction of the image of $\rho_{{\sf p}i,\iota}$ modulo $\ell$ contains $\mathrm{SL}_2(\mathbb{F}_{\ell})$. Thus the proposition follows from Proposition \ref{one} together with the fact that $\det(\rho_{{\sf p}i,\iota})$ is the cyclotomic character times a finite order character. \end{proof} \mathbb{S}ection{Twisting by characters}\label{sec-twist} As explained by Murty and Ramakrishnan \cite{MR}, there is another natural family of correspondences on $M_?^U$, $?\in \{\mathrm{\textrm{\'{e}t}}, B\}$, besides the Hecke correspondences, namely the twisting correspondences. In this section we recall their construction following the exposition of \cite[\S 9.3]{GG}. Fix an ideal $\mathfrak{c}\mathbb{S}ubset \mathcal{O}_E$ and denote the congruence subgroup of integral matrices of ``Hecke type" by $$ U_{1}(\mathfrak{c}):=\left\{\mathbf Egin{pmatrix}a &b\\c&d\end{pmatrix}\in\mathrm{GL}_2(\widehat{\mathcal{O}}_E) \,:\, d-1, \, c\in\mathfrak{c}\widehat{\mathcal{O}}_E \right\}. $$Let $Y_{1}(\mathfrak{c})$ denote the corresponding Hilbert modular variety, that is $$ Y_{1}(\mathfrak{c}):=Y^{U_{1}(\mathfrak{c})}. $$Similarly we use $X_{1}(\mathfrak{c})$ for its Baily-Borel compactification. Let $\theta : E^{\times}\backslash \mathbb{A}_E^{\times}\longrightarrow\textf{C}C^{\times}$ be a finite order Hecke character with conductor $\mathfrak{b}$. To ease notation, we fix an isomorphism $\iota:\textf{C}C \to \overline{\mathbb{Q}}_{\ell}$ and sometimes view $\theta$ as taking values in $\overline{\mathbb{Q}}_{\ell}$; in other words we sometimes identify $\theta$ and $\iota \circ \theta$. Suppose that $\mathfrak{b}\neq \mathcal{O}_E$. Let $b$ be a finite id\`{e}le with $[b]=\mathfrak{b}$. Define the fractional ideal $\Upsilon=b^{-1}\widehat{\mathcal{O}}_E$ of ${\sf p}rod_{\mathfrak{p}_v|\mathfrak{b}}E_v\times{\sf p}rod_{\mathfrak{p}_v\nmid\mathfrak{b}}\mathcal{O}_v$ by $$ \Upsilon:=\left\{t=(t_v)\in{\sf p}rod_{\mathfrak{p}_v|\mathfrak{b}}E_v\times{\sf p}rod_{\mathfrak{p}_v\nmid\mathfrak{b}}\mathcal{O}_v: \mathrm{ord}_v(t_v)\mathfrak{g}eq-\mathrm{ord}_v(b) \text{ whenever } \mathfrak{p}_v|\mathfrak{b}\right\}. $$ Let $\widetilde{\Upsilon}=b^{-1}\widehat{\mathcal{O}}_E/\widehat{\mathcal{O}}_E$ be a set of representatives for $\Upsilon$ modulo $\widehat{\mathcal{O}}_E={\sf p}rod_v\mathcal{O}_v$. Denote by $\theta_{\mathfrak{b}}:\widetilde{\Upsilon}\longrightarrow\textf{C}C$ the map defined by setting $$ \theta_{\mathfrak{b}}(t)=\mathbf Egin{cases} \theta(t){\sf q}uad \text{if } t\in \widetilde{\Upsilon}^{\times}:=\{\frac{x}{b} \in \widetilde{\Upsilon}: x \in \mathcal{O}_E^{\times}\}\\0{\sf q}uad\text{otherwise.}\end{cases} $$ For $t\in \Upsilon$ define $u_t=u(t)\in \mathrm{GL}_2(\mathbb{A}_E)$ by $u(t)_v=\big(\mathbf Egin{smallmatrix}1&0\\0&1\end{smallmatrix}\big)$ if $v\nmid \mathfrak{b}$ and $u(t)_v=\big(\mathbf Egin{smallmatrix}1&t_v\\0&1\end{smallmatrix}\big)$ if $v|\mathfrak{b}$. Then there is a correspondence \mathbf Egin{equation}\label{corres} \mathbf Egin{CD} Y_{1}(\mathfrak{c}\mathfrak{b}^2) @>{\cdot u_t}>> Y_{1}(\mathfrak{c}\mathfrak{b}^2) @> {\sf p}i >> Y_{1}(\mathfrak{c}), \end{CD} \end{equation} where the second map ${\sf p}i$ is the projection map. If $\mathfrak{b}=\mathcal{O}_E$ then we replace this correspondence by the identity map $Y_{1}(\mathfrak{c}) \to Y_{1}(\mathfrak{c})$. Let $\textf{C}C_X$ be the locally constant sheaf with stalk $\textf{C}C$ on $X$ for topological spaces $X$. If $X$ is a quasi-projective scheme over $\mathbb{Q}$, $w$ is a place above a rational prime $\ell$, $\varpi_w$ is a uniformizer of $\mathcal{O}_w$, and $n$ is an integer, let ${(\mathcal{O}_w/{\varpi_w^n})}_X$ be the locally constant sheaf with stalk $\mathcal{O}_w/\varpi_w^n$ on the \'etale site of $X \times \overline{\mathbb{Q}}$. Assume that $w$ is chosen so that $\theta$ has values in $\mathcal{O}_w$. Then we have the following lemma (see \cite[Lemma 9.3]{GG}): \mathbf Egin{lem}\label{betty} For $\textbf{R} \in \{\textf{C}C,\mathcal{O}_w/\varpi_w^n\}$ and $t\in\Upsilon$, the mapping (which we denote by $\cdot u_t$) \mathbf Egin{equation}\label{ut} [g, v]\longrightarrow [g u_t, \theta (\det g)\theta_{\mathfrak{b}}(t)^{-1}v] \end{equation} gives a well-defined, canonical isomorphism $$ \textbf{R}_{Y_{1}(\mathfrak{c}\mathfrak{b}^2)}\longrightarrow \textbf{R}_{Y_{1}(\mathfrak{c}\mathfrak{b}^2)}. $$ Equivalently, the mapping $P={\sf p}i\circ (\cdot u_t)$ defines a mapping $$ P : \textbf{R}_{Y_{1}(\mathfrak{c}\mathfrak{b}^2)}\longrightarrow \textbf{R}_{Y_{1}(\mathfrak{c})}. $$ {\sf q}ed \end{lem} Here when $\mathfrak{b}=\mathcal{O}_E$ we set $\theta_{\mathfrak{b}}(t)=1$, let $u_t$ be the identity matrix and we replace $P$ by the identity mapping from $Y_{1}(\mathfrak{c})$ to itself. It may not be evident that this definition makes sense in the \'etale setting because it appears to depend on $\det(x_{\infty})$. To explain this, recall that for all $U \leq \mathrm{Res}_{E/\mathbb{Q}}\mathrm{GL}_2(\mathbb{A}^{\infty})$ the determinant $\det:\mathrm{Res}_{E/\mathbb{Q}}\mathrm{GL}_2 \to \mathrm{Res}_{E/\mathbb{Q}}\mathbb{G}_m$ induces a morphism of Shimura varieties $$ Y^U \longrightarrow {\sf p}i_0(Y^U) $$ where ${\sf p}i_0(Y^U)$ is the scheme of connected components of $Y^U$. Letting $E_+^{\times}$ denote the group of totally positive elements of $E^{\times}$, the definition of the lift of $P$ in fact only depends on the image of $\det(x)$ in $$ E^{\times} \mathrm{R}R^{[E:\mathbb{Q}]}_{>0} \backslash \mathbb{A}_E^{\times}/\det(U_{1}(\mathfrak{c}\mathfrak{b}^2))=E_+^{\times} \backslash \mathbb{A}_E^{\infty \times}/\det(U_{1}(\mathfrak{c}\mathfrak{b}^2))={\sf p}i_0(Y_{1}(\mathfrak{c}\mathfrak{b}^2))(\textf{C}C). $$ In other words, the coefficients $\theta(x)$ depend only on the connected component that $x$ lies in, and these are all defined over a suitable finite extension of $\mathbb{Q}$. Given a finite order Hecke character $\theta$ with conductor $\mathfrak{b}$, define the {\it twisting correspondence} $\mathcal{T}_{\theta}(\mathfrak{c}\mathfrak{b}^2)_*$ to be the disjoint union over $t\in \widetilde{\Upsilon}$ of the correspondences \eqref{corres}. In the case $\textbf{R}=\textf{C}C$ this yields a map \mathbf Egin{align} \mathcal{T}_{\theta}(\mathfrak{c}\mathfrak{b}^2)_*:M_B^{U_{1}(\mathfrak{c}\mathfrak{b}^2)} \longrightarrow M_B^{U_{1}(\mathfrak{c})}. \end{align} In the \'etale case after passing to the limit over $n$ and tensoring with $\overline{k}_w \cong \overline{\mathbb{Q}}_{\ell}$, where $k_w$ is the fraction field of $\mathcal{O}_{w}$, we obtain \mathbf Egin{align} \mathcal{T}_{\theta}(\mathfrak{c}\mathfrak{b}^2)_*:M_{\mathrm{\textrm{\'{e}t}}}^{U_{1}(\mathfrak{c}\mathfrak{b}^2)} \longrightarrow M_{\mathrm{\textrm{\'{e}t}}}^{U_{1}(\mathfrak{c})}. \end{align} These correspondences are compatible with the comparison isomorphisms \eqref{comparison}. As noted above, the component set is equipped with a canonical isomorphism ${\sf p}i_0(Y^U)(\textf{C}C) = E^{\times}_+ \backslash \mathbb{A}_E^{\infty \times}/\det(U)$. We therefore have a composite map $$ \theta: Y_{1}(\mathfrak{c}\mathfrak{b}^2)(\textf{C}C) \longrightarrow {\sf p}i_0(Y_{1}(\mathfrak{c}\mathfrak{b}^2))(\textf{C}C) \longrightarrow \textf{C}C. $$ The field of definition $E_{\theta}$ of $\theta$ is therefore defined. Using the description of the $\mathrm{Gal}(\textf{C}C/\mathbb{Q})$ action on ${\sf p}i_0(Y_{1}(\mathfrak{c}\mathfrak{b}^2))$ (see \cite[\S 13, p. 349]{Mi} for details) we see that it is an abelian extension of $E$ contained in the narrow ring class field of conductor $\mathfrak{c}\mathfrak{b}^2$. Using this fact, one checks the following lemma: \mathbf Egin{lem} The map $\mathcal{T}_{\theta}(\mathfrak{c}\mathfrak{b}^2)$ may be viewed as a finite $\overline{\mathbb{Q}}_{\ell}$-linear combination of correspondences from $Y_{1}(\mathfrak{c}\mathfrak{b}^2) \times E_{\theta}$ to $Y_{1}(\mathfrak{c}) \times E_{\theta}$ that extend to correspondences on the Baily-Borel compactification. Thus $\mathcal{T}_{\theta}(\mathfrak{c}\mathfrak{b}^2)$ descends to a homomorphism $$ \mathcal{T}_{\theta}(\mathfrak{c}\mathfrak{b}^2)_*:(M_{\mathrm{\textrm{\'{e}t}}}^{U_{1}(\mathfrak{c}\mathfrak{b}^2)})^{\mathrm{Gal}_{E_{\theta}}} \longrightarrow (M_{\mathrm{\textrm{\'{e}t}}}^{U_{1}(\mathfrak{c})})^{\mathrm{Gal}_{E_{\theta}}}. $$ {\sf q}ed \end{lem} We note in addition that for each automorphic representation ${\sf p}i$ of $\mathrm{GL}_2(\mathbb{A}_E)$ the twisting correspondence induces push-forward and pull-back maps \mathbf Egin{align*} \mathcal{T}_{\theta}(\mathfrak{c} \mathfrak{b}^2)_*: M_B^{U_{1}(\mathfrak{c}\mathfrak{b}^2)}({\sf p}i^{\infty} \otimes \theta^{\infty}) & \longrightarrow M_B^{U_{1}(\mathfrak{c})}({\sf p}i^{\infty}) \\ \mathcal{T}_{\theta}(\mathfrak{c} \mathfrak{b}^2)^*:M_B^{U_{1}(\mathfrak{c})}({\sf p}i^{\infty}) & \longrightarrow M_B^{U_{1}(\mathfrak{c}\mathfrak{b}^2)}({\sf p}i^{\infty} \otimes \theta^{\infty}) \end{align*} and hence \mathbf Egin{align*} \mathcal{T}_{\theta}(\mathfrak{c} \mathfrak{b}^2)_*:M_{\mathrm{\textrm{\'{e}t}},\iota}^{U_{1}(\mathfrak{c}\mathfrak{b}^2)}({\sf p}i^{\infty} \otimes \theta^{\infty})^{\mathrm{Gal}_{E_{\theta}}} &\longrightarrow M_{\mathrm{\textrm{\'{e}t}},\iota}^{U_{1}(\mathfrak{c})}({\sf p}i^{\infty})^{\mathrm{Gal}_{E_{\theta}}}\\ \mathcal{T}_{\theta}(\mathfrak{c} \mathfrak{b}^2)^*:M_{\mathrm{\textrm{\'{e}t}},\iota}^{U_{1}(\mathfrak{c})}({\sf p}i^{\infty} )^{\mathrm{Gal}_{E_{\theta}}} &\longrightarrow M_{\mathrm{\textrm{\'{e}t}},\iota}^{U_{1} (\mathfrak{c}\mathfrak{b}^2)}({\sf p}i^{\infty} \otimes \theta^{\infty})^{\mathrm{Gal}_{E_{\theta}}} \end{align*} (compare \cite[Proposition 9.4]{GG}). \mathbb{S}ection{Twisted Hirzebruch-Zagier cycles} \label{sec-HZ} Our goal in this section is to define twisted Hirzebruch-Zagier cycles and give a criterion for their nontriviality. Suppose that $K \leq E$ is a quadratic subfield. Using the definitions from above we obtain a Shimura variety $$ Y_K^{U_K} $$ for all compact open subgroups $U_K \leq \mathrm{Res}_{K/\mathbb{Q}}\mathrm{GL}_2(\mathbb{A}^{\infty})$; it is again a quasi-projective scheme over $\mathbb{Q}$. Fix $U \leq \mathrm{Res}_{E/\mathbb{Q}}\mathrm{GL}_2(\mathbb{A}^{\infty})$, set $U_K:=U \cap \mathrm{Res}_{K/\mathbb{Q}}\mathrm{GL}_2(\mathbb{A}^{\infty})$, and consider the natural inclusion morphism \mathbf Egin{equation}\label{iota} Y_K^{U_K} \longrightarrow Y^U. \end{equation} It induces Gysin maps \mathbf Egin{align} H_{\mathrm{\textrm{\'{e}t}},c}^{0}(Y_K^{U_K} \times\overline{\mathbb{Q}},\,\overline{\mathbb{Q}}_{\ell}) &\longrightarrow H_{\mathrm{\textrm{\'{e}t}},c}^{d}(Y^U \times \overline{\mathbb{Q}},\,\overline{\mathbb{Q}}_{\ell}(d/2)),\label{Gys-et}\\ H_{B,c}^{0}(Y_K^{U_K} (\textf{C}C), \,\textf{C}C) &\longrightarrow H_{B,c}^{d}(Y^U (\textf{C}C),\,\textf{C}C(d/2)).\label{Gys-B} \end{align} The images admit natural maps to $M_{\mathrm{\textrm{\'{e}t}}}^U$ and $M_{B}^U$ respectively. We will denote the image of a fundamental class under the composition of \eqref{Gys-et} (resp. \eqref{Gys-B}) and the maps to $M_{?}^U(d/2)$ by \mathbf Egin{equation}\label{HZcycle} [Z^K] \in M_?^U(d/2) \end{equation} and refer to it as a {\it Hirzebruch-Zagier cycle}. Now for $\mathfrak{c}, \mathfrak{b} \mathbb{S}ubset\mathcal{O}_E$, let $U=U_{1}(\mathfrak{c} \mathfrak{b}^2)$ and let $\theta:E^{\times} \backslash \mathbb{A}_E^{\times} \to \textf{C}C^{\times}$ be a finite order Hecke character of conductor $\mathfrak{b}$. We let \mathbf Egin{equation}\label{tHZcycle} [Z^{K,\theta}]=\mathcal{T}_{\theta}(\mathfrak{c}\mathfrak{b}^2)_*[Z^K] \in M^{U_{1}(\mathfrak{c})}_?(d/2) \end{equation} and refer to it as a {\it twisted Hirzebruch-Zagier cycle}. Let ${\sf p}i$ be a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_E)$ satisfying $H^{d}(\mathfrak{g},U_{\infty};{\sf p}i_{\infty}) \neq 0$ and let $\mathfrak{c}$ be the conductor of ${\sf p}i$ (for unexplained notation, see \S \ref{coho}). Let $K \leq E$ be a quadratic subfield and let $\theta:E^{\times} \backslash \mathbb{A}_E^{\times} \to \textf{C}C^{\times}$ be a finite order Hecke character of conductor $\mathfrak{b}$. We now state our basic criterion for the projection of $[Z^{K,\theta}]$ to $M_{\mathrm{\textrm{\'{e}t}}, \iota}^{U_{1}(\mathfrak{c})}({\sf p}i^{\infty})(d/2)$ to be nontrivial. \mathbf Egin{thm}\label{nontrivial} Suppose that ${\sf p}i$ is a base change of a cuspidal automorphic representation ${\sf p}i_K$ of $\mathrm{GL}_2(\mathbb{A}_K)$ with central character $\omega_K$ and that $\theta|_{\mathbb{A}_{K}^{\times}}=\omega_K \mathrm{\textrm{\'{e}t}}a$, where $\langle \mathrm{\textrm{\'{e}t}}a \rangle =\mathrm{Gal}(E/K)^{\widetilde{\varepsilon}dge}$. Then $[Z^{K,\theta}]$ projects nontrivially onto $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U_{1}(\mathfrak{c})}({\sf p}i^{\infty})(d/2)$. \end{thm} \noindent The statement that $\langle \mathrm{\textrm{\'{e}t}}a \rangle=\mathrm{Gal}(E/K)^{\widetilde{\varepsilon}dge}$ is simply the statement that $\mathrm{\textrm{\'{e}t}}a$ is the Hecke character of $K$ corresponding to $E/K$ by class field theory. \mathbf Egin{proof} In view of the compatibility of the construction of $[Z^{K,\theta}]$ with the comparison isomorphism $M_B^{U_{1}(\mathfrak{c})}({\sf p}i^{\infty}) \cong M_{\mathrm{\textrm{\'{e}t}},\iota}^{U_{1}(\mathfrak{c})}({\sf p}i^{\infty})$ it suffices to show that under the given assumption $[Z^{K,\theta}]$ is nontrivial in $M_B^{U_{1}(\mathfrak{c})}({\sf p}i^{\infty})(d/2)$. There is a canonical Hecke-equivariant isomorphism $$ M_B^{U_{1}(\mathfrak{c})}(d/2) \cong IH^d(X_{1}(\mathfrak{c})(\textf{C}C),\textf{C}C(d/2)) $$ (see the remark below \cite[Theorem 7.1]{GG}). Therefore it suffices to show that the canonical class attached to $[Z^{K,\theta}]$ in the intersection homology group $IH_d(X_{1}(\mathfrak{c})(\textf{C}C),\textf{C}C(d/2))$ constructed in \cite[Chapter 9]{GG} pairs nontrivially with the ${\sf p}i^{\infty}$-isotypic component of $IH^d(X_{1}(\mathfrak{c})(\textf{C}C),\textf{C}C(d/2))$ under the given assumption. By \cite[Theorem 10.1]{GG} the canonical class does pair nontrivially with the ${\sf p}i^{\infty}$-isotypic component and this completes the proof of the theorem. \end{proof} \mathbb{S}ection{Statement and proof of main theorem} \label{sec-main} Let $E/\mathbb{Q}$ be a totally real number field with \textit{even} degree $d$. Let ${\sf p}i$ be a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_E)$ with $H^d(\mathfrak{g},U_{\infty}; {\sf p}i^{\infty}) \neq 0$, and let $\rho_{{\sf p}i, \iota}:\mathrm{Gal}_E \to \mathrm{GL}_2(\overline{\mathbb{Q}}_{\ell})$ be the $\ell$-adic Galois representation attached to ${\sf p}i$ and the isomorphism $\iota:\textf{C}C \to \overline{\mathbb{Q}}_{\ell}$. We are interested in providing algebraic cycles to account for classes in $$ M_{\mathrm{\textrm{\'{e}t}},\iota}^{U,ss}({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_k} $$ where $k/\mathbb{Q}$ is a finite extension and $U \leq \mathrm{GL}_2(\mathbb{A}_E^{\infty})$ is a compact open subgroup. The Tate conjecture (suitably generalized to the non-projective case) implies that this space should be spanned by (classes) of algebraic cycles over $k$. If $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U, ss}({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_k} =0$, then it is trivially spanned by algebraic cycles. We deal with the next simplest case: \mathbf Egin{thm}\label{main} Let $U \leq \mathrm{GL}_2(\mathbb{A}_E^{\infty})$ be a compact open subgroup and let ${\sf p}i$ be nondihedral. Suppose that the rank of $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U, ss}({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_{k}}$ as a $C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U)$ module is at most $1$ for all abelian extensions $k/E$. If $\ell$ is sufficiently large in a sense depending on ${\sf p}i$ then $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U,ss}({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_k}$ is spanned by algebraic cycles for all finite extensions $k/\mathbb{Q}$. \end{thm} We will prove the theorem by showing that the only way for the space of Tate classes to be nonzero is if it is spanned by a Hirzebruch-Zagier cycle or one of its twists. \mathbf Egin{proof}[Proof of Theorem \ref{main}] Suppose that as a $C_{c}^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U)$ module, $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U,ss}({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_k}$ is of rank at most $1$ for all abelian extensions $k/E$. Then, assuming $\ell$ is sufficiently large, from Proposition \ref{one-motive} we have that it is of rank $0$ for all finite extensions $k/E$ or it is of rank $1$ for all sufficiently large finite extensions $k/E$. In the former case we have nothing to prove, so assume that it is of rank $1$ as a $C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U)$-module for some $k/E$. From Proposition \ref{one-motive} we then have that $\mathrm{Gal}(E/\mathbb{Q})_{\rho_{{\sf p}i,\iota}}$ has order $2$. Let $\mathbb{Q}\leq K\leq E$ be the subfield of $E$ fixed by $\mathrm{Gal}(E/\mathbb{Q})_{\rho_{{\sf p}i,\iota}}$ and let $\mathrm{Gal}(E/K)=\langle\mathbb{S}igma\rangle$. Choose a character $\chi: \mathrm{Gal}_E \to \mathrm{GL}_1(\overline{\mathbb{Q}}_{\ell})$ such that $\rho_{{\sf p}i,\iota}^{\mathbb{S}igma} \cong \rho_{{\sf p}i ,\iota} \otimes \chi$. Applying \cite[Theorem 2]{LapRog} and our assumption that ${\sf p}i$ is not dihedral we can and do write $\chi=\mu\mu^{-\mathbb{S}igma}$ for some character $\mu$. Therefore $$ \rho_{{\sf p}i,\iota}\otimes (\mu\mu^{-\mathbb{S}igma})\cong\rho_{{\sf p}i,\iota}^{\mathbb{S}igma} $$which implies in turn that $$ \rho_{{\sf p}i,\iota}\otimes \mu\cong (\rho_{{\sf p}i,\iota}\otimes\mu)^{\mathbb{S}igma}. $$ This implies, by cyclic base change \cite[\S 2]{Langlands}, that there exists a cuspidal automorphic representation ${\sf p}i_0$ of $\mathrm{GL}_2(\mathbb{A}_K)$ such that $${\sf p}i \otimes \mu\cong{\sf p}i_{0 E},$$ where ${\sf p}i_{0 E}$ is the base change of ${\sf p}i_0$ to $E$. Choose a finite order character $\theta$ of $\mathbb{A}_E^{\times}$ such that $\theta|_{\mathbb{A}_K^{\times}}=\omega_{{\sf p}i_0}\mathrm{\textrm{\'{e}t}}a$, where $\omega_{{\sf p}i_0}$ is the central character of ${\sf p}i_0$ and $\mathrm{\textrm{\'{e}t}}a$ is the character attached to $E/K$ by class field theory (note that such a character exists by \cite[Lemma 2.1]{Hida}). Let $\mathfrak{c}_0$ denote the conductor of ${\sf p}i_{0 E}$. Then $[Z^{K,\theta}]$ projects nontrivially to $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U_{1}(\mathfrak{c}_0)}({\sf p}i_{0 E}^{\infty})(d/2)$ by Theorem \ref{nontrivial}. By newform theory, the fact that ${\sf p}i_{0E}$ is a twist of ${\sf p}i$, and our assumptions, we have that $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U_{1}(\mathfrak{c}_0),ss}({\sf p}i_{0E}^{\infty})(d/2)^{\mathrm{Gal}_k}$ is of rank $1$ for all sufficiently large number fields $k$ and any element of it is a generator for $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U, ss}({\sf p}i_0^{\infty})(d/2)^{\mathrm{Gal}_k}$ as a $C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_E^{\infty})//U)$-module for any compact open subgroup $U \leq \mathrm{GL}_2(\mathbb{A}_E^{\infty})$. Now let $\mathfrak{c}$ be the conductor of ${\sf p}i$, let $k$ be a sufficiently large number field, and let ${\sf p}hi \in M_{\mathrm{\textrm{\'{e}t}},\iota}^{U_{1}(\mathfrak{c}),ss}({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_k}$ be a generator for this one-dimensional space. If $\mathfrak{b}$ denotes the conductor of $\mu$, we have that \mathbf Egin{align} 0 \neq \mathcal{T}_{\mu}(\mathfrak{c} \mathfrak{b}^2)^*{\sf p}hi \in M_{\mathrm{\textrm{\'{e}t}},\iota}^{U_{1}(\mathfrak{c} \mathfrak{b}^2),ss}({\sf p}i_{0 E}^{\infty})(d/2)^{\mathrm{Gal}_k} \end{align} Thus $$ \mathcal{T}_{\mu}(\mathfrak{c} \mathfrak{b}^2)_*\mathcal{T}_{\mu}(\mathfrak{c} \mathfrak{b}^2)^*{\sf p}hi, $$ which is a scalar multiple of ${\sf p}hi$, is the class of an algebraic cycle. Applying newform theory again we see that $M_{\mathrm{\textrm{\'{e}t}},\iota}^{U,ss}({\sf p}i^{\infty})(d/2)^{\mathrm{Gal}_k}$ is spanned by algebraic cycles for all sufficiently large number fields $k$ and all compact open subgroups $U \leq \mathrm{GL}_2(\mathbb{A}_E^{\infty})$. Since there is a compatible action of $\mathrm{Gal}_{\overline{\mathbb{Q}}}$ on the space of algebraic cycles and the space of Tate classes, the theorem follows. \end{proof} \mathbb{S}ection*{Acknowledgement} The authors thank the referee for useful comments on this paper. \mathbf Egin{thebibliography}{} \bibitem[BBD]{FP} A.~A.~Beilinson, J.~Bernstein, and P.~Deligne, \emph{Fasceaux pervers}, \textbf{Analyse et topologie sur les espaces singuliers}, Asterisque {\bf 100}, 1982. \bibitem[BlR]{BlR} D.~Blasius and J.~D.~Rogawski, \emph{Motives for Hilbert modular forms}, Invent. Math. \textbf{14} (1993), 55--87. \bibitem[BoCa]{BoCa} A.~Borel and W.~Casselman, \emph{$L^2$-cohomology of locally symmetric manifolds of finite volume}, Duke Math.~J. {\bf 50} No.~3 (1983), 625-647. \bibitem[Bou]{Bourbaki} N.~Bourbaki, \textbf{Elements of Mathematics: Algebra I Chapters 1-3}, Springer, 2006. \bibitem[BrL]{BrL} J.-L. Brylinkski and J.-P. Labesse, \emph{Cohomologie d' intersection et fonctions $L$ de certaines vari\'et\'es de Shimura}, Ann. Sci. ENS, \textbf{17} Issue 3 (1984), 361--412. \bibitem[Del]{Deligne} P.~Deligne \emph{Travaux de Shimura}, S\'eminaire Bourbaki (1970/71), LNM {\bf 244} (1971),123-165. \bibitem[Di]{Dimi} M.~Dimitrov, \emph{Galois representations modulo $p$ and cohomology of Hilbert modular varieties}, Ann. Sci. ENS, \textbf{38} Issue 4 (2005), 505--551. \bibitem[GG]{GG} J.~R.~Getz and M.~Goresky, \textbf{Hilbert modular forms with coefficients in intersection homology and quadratic base change}, Progress in Mathematics \textbf{298}, Birkh\"auser Verlag, 2012. \bibitem[Ha]{Harder} G.~Harder, \emph{Eisenstein cohomology of arithmetic groups. The case $\mathrm{GL}_2$}, Invent. Math., \textbf{89} No.~1 (1987), 37--118. \bibitem[HLR]{HLR} G.~Harder, R.~P.~Langlands, and M.~Rapoport, \emph{Algebraische Zyklen auf Hilbert-Blumenthal-Fl\"achen}, J. Reine Angew. Math. {\bf 366} (1986), 53--120. \bibitem[Hi]{Hida} H.~Hida, \emph{Non-critical values of adjoint $L$-functions for $\mathrm{SL}(2)$}, Proc. Symp. Pure Math. \textbf{66} Part I (1999), 123--175. \bibitem[LaR]{LapRog} E.~Lapid and J.~Rogawski, \emph{On twists of cuspidal representations of $\mathrm{GL}(2)$}, Forum Mathematicum {\bf 10} (1998), 175-197. \bibitem[L]{Langlands} R.~P.~Langlands, \textbf{Base Change for $\mathrm{GL}(2)$}, Annals of Mathematics Studies {\bf 96}, Princeton University Press, 1980. \bibitem[Mi]{Mi} J.~S.~Milne, \emph{Introduction to Shimura varieties}, Harmonic analysis, the trace formula and Shimura varieties, Clay Math. Proc. \textbf{4}, 2003. \bibitem[MR]{MR} V.~K.~Murty and D.~Ramakrishnan, \emph{Period relations and the Tate conjecture for Hilbert modular surfaces}, Invent. Math. \textbf{89} (1987), 319--345. \bibitem[Ra]{RamaHil} D.~Ramakrishnan, \emph{Algebraic cycles on Hilbert modular fourfolds and poles of $L$-functions}, \textbf{Algebraic groups and arithmetic}, Tata Inst. Fund. Res., Mumbai (2005), 221--274. \bibitem[Ri]{Ribet} K.~A.~Ribet, \emph{On $\ell$-adic representations attached to modular forms}, Invent. Math. \textbf{28} (1975), 245--275. \bibitem[T]{Tate} J.~Tate, \emph{Algebraic cycles and poles of zeta functions}, \textbf{Arithmetical Algebraic Geometry} (Proc. Conf. Purdue Univ., 1963), 93--110, Harper and Row, New York, 1965. \bibitem[Ta]{T} R.~Taylor, \emph{On Galois representations associated to Hilbert modular forms}, Invent. Math. \textbf{98} (1989), 265--280. \end{thebibliography} \end{document}
\begin{document} \title[FIDL-modules: representation and duality] {Modules with fusion and implication based over distributive lattices: representation and duality} \author[Ismael Calomino \and William J. Zuluaga Botero] {Ismael Calomino* \and William J. Zuluaga Botero**} \newcommand{\newline\indent}{\newline\indent} \address{\llap{*\,}CIC and \newline\indent Departamento de Matem\'{a}ticas \newline\indent Facultad de Ciencias Exactas \newline\indent Universidad Nacional del Centro \newline\indent Tandil, ARGENTINA} \mathrm{e}mail{[email protected]} \address{\llap{**\,}Laboratoire J. A. Dieudonn\'{e} \newline\indent Universit\'{e} C\^ote d'Azur \newline\indent Nice, FRANCE \newline\indent and \newline\indent Departamento de Matem\'{a}ticas \newline\indent Facultad de Ciencias Exactas \newline\indent Universidad Nacional del Centro \newline\indent Tandil, ARGENTINA} \mathrm{e}mail{[email protected]} \thanks{This work was supported by the CONICET under Grant PIP 112-201501-00412} \subjclass[2010]{Primary 06D50, 06D05; Secondary 06D75} \keywords{Distributive lattice, module, Priestley-like duality} \begin{abstract} In this paper we study the class of {\it{modules with fusion and implication based over distributive lattices}}, or {\it{FIDL-modules}}, for short. We introduce the concepts of FIDL-subalgebra and FIDL-congruence as well as the notions of simple and subdirectly irreducible FIDL-modules. We give a bi-sorted Priestley-like duality for FIDL-modules and moreover, as an application of such a duality, we provide a topological bi-spaced description of the FIDL-congruences. This result will allows us to characterize the simple and subdirectly irreducible FIDL-modules. \mathrm{e}nd{abstract} \maketitle \section{Introduction} \label{Introduction} Bounded distributive lattices with additional operators occur often as algebraic models of non-classical logics. This is the case of Boolean algebras which are the algebraic semantics of classical logic, Heyting algebras which model intuitionistic logic, BL-algebras which correspond to algebraic semantics of basic propositional logic (\cite{Hohle}), MTL-algebras which are the algebraic semantics of the basic fuzzy logic of left-continuous t-norms (\cite{Esteva-Godo,Cabrer-Celani}), Modal algebras which model propositional modal logics (\cite{Chagrov,Venema}), to name a few. In all these cases, the binary operations $\vee$ and $\wedge$ model logical disjunction and conjunction and the additional operations are usually interpretations of other logical connectives such as the modal necessity ($\Box$) or modal possibility ($\Diamond$), or various types of implication. All these operations has as a common property: the preservation of some part of the lattice structure, for example, the necessity modal operator satisfies the conditions $\Box1=1$ and $\Box(x\wedge y)=\Box (x) \wedge \Box (y) $, or the possibility modal operator $\Diamond 0 =0$ and $\Diamond (x \vee y)= \Diamond (x) \vee \Diamond (y)$. In some sense, the aforementioned may suggest that these ideas can be treated as a more general phenomenon which can be studied by employing tools of universal algebra. Some papers in which this approach is used are \cite{Goldblatt} and \cite{Stokkermans}. Nevertheless, in an independent way, a more concrete treatment of the preservation of the lattice structure by two additional connectives in a distributive lattice leads to the introduction of the class of distributive lattices with fusion and implication in \cite{Celani1}, which encompasses all the algebraic structures mentioned before. The aim of this paper is to introduce the class of {\it{modules with fusion and implication based over distributive lattices}}, for short, {\it{FIDL-modules}}. The FIDL-modules generalize both distributive lattices with fusion and implication and modal distributive lattices, giving a different approach to study these structures. A bi-sorted Priestley-like duality is developed for FIDL-modules, extending the dualities given in \cite{Celani1} for distributive lattices with fusion and implication and in \cite{Urquhart} for algebras of relevant logics. This duality enables us to describe the congruences of a FIDL-module and also to give a topological characterization of the simple and subdirectly irreducible FIDL-modules. The paper is organized as follows. In Section \ref{Preliminaries} we give some definitions and introduce the notations which are needed for the rest of the paper. In Section \ref{FIDL-modules} we introduce the class of modules with fusion and implication based over distributive lattices, or simply FIDL-modules. Also the concept of FIDL-subalgebra is developed and studied. In Section \ref{Representation of FIDL-modules} we study the notion of FIDL-homomorphism and we exhibit a representation theorem for FIDL-modules by means of relational structures. In Section \ref{Topological duality for FIDL-modules} we use the representation theorem and together with a suitable extension of the Priestley duality, we obtain a duality for FIDL-modules as certain topological bi-spaces. Finally, in Section \ref{Congruences of FIDL-modules} we introduce the notion of congruence of FIDL-modules and as an application of the duality, we obtain a topological bi-spaced description for the simple and subdirectly irreducible FIDL-modules. \section{Preliminaries} \label{Preliminaries} Given a poset $\langle X, \leq \rangle$, a subset $U \subseteq X$ is said to be {\it{increasing}} ({\it{decreasing}}), if for every $x,y \in X$ such that $x \in U$ ($y \in U$) and $x \leq y$, then $y \in U$ ($x \in U$). The set of all increasing subsets of $X$ is denoted by $\mathcal{P}_{i}(X)$. For each $Y \subseteq X$, the increasing (decreasing) set generated by $Y$ is $[Y)=\{ x \in X \colon \mathrm{e}xists y \in Y (y \leq x) \}$ ($(Y]=\{ x \in X \colon \mathrm{e}xists y \in Y (x \leq y) \}$). If $Y=\{y\}$, then we will write $[y)$ and $(y]$ instead of $[\{y\})$ and $(\{y\}]$, respectively. Given a bounded distributive lattice ${\bf{A}} = \langle A, \vee, \wedge, 0, 1 \rangle$, a set $F \subseteq A$ is called {\it{filter}} if $1 \in F$, $F$ is increasing, and if $a,b \in F$, then $a \wedge b \in F$. The {\it{filter generated by a subset $X \subseteq A$}} is the set \begin{equation*} {\rm{Fig}}_{\bf{A}}(X)= \{ x \in A \colon \mathrm{e}xists x_{1}, \hdots ,x_{n} \in X {\hspace{0.1cm}} {\text{such that}} {\hspace{0.1cm}} x_{1} \wedge \hdots \wedge x_{n} \leq x \}. \mathrm{e}nd{equation*} If $X=\{a\}$, then ${\rm{Fig}}_{\bf{A}}(\{a\}) = [a)$. Denote by ${\rm{Fi}}(\textbf{A})$ the set of all filters of ${\bf{A}}$. A proper filter $P$ is {\it{prime}} if for every $a,b \in A$, $a \vee b \in P$ implies $a \in P$ or $b \in P$. We write $\mathcal{X}(\bf{A})$ the set of all prime filters of $\bf{A}$. Similarly, a set $I \subseteq A$ is called {\it{ideal}} if $0 \in I$, $I$ is decreasing, and if $a,b \in I$, then $a \vee b \in I$. Then the {\it{ideal generated by a subset $X \subseteq A$}} is the set \begin{equation*} {\rm{Idg}}_{\bf{A}}(X)= \{ x \in A \colon \mathrm{e}xists x_{1}, \hdots ,x_{n} \in X {\hspace{0.1cm}} {\text{such that}} {\hspace{0.1cm}} x \leq x_{1} \vee \hdots \vee x_{n} \}. \mathrm{e}nd{equation*} In particular, if $X=\{a\}$, then ${\rm{Idg}}_{\bf{A}}(\{a\}) = (a]$. Denote by ${\rm{Id}}(\textbf{A})$ the set of all ideals of ${\bf{A}}$. Let $\beta_{\bf{A}} \colon A \to \mathcal{P}_{i}(\mathcal{X}(\bf{A}))$ be the map defined by $\beta_{\bf{A}}(a)=\{ P \in \mathcal{X}({\bf{A}}) \colon a \in P\}$. Then the family $\beta_{\bf{A}}[A]=\{ \beta_{\bf{A}}(a) \colon a \in A\}$ is closed under unions, intersections, and contains $\mathrm{e}mptyset$ and $A$, i.e., it is a bounded distributive lattice. Moreover, $\beta_{\bf{A}}$ establishes an isomorphism between ${\bf{A}}$ and $\beta_{\bf{A}}[A]$. A {\it{Priestley space}} is a triple $\langle X, \leq, \tau \rangle$ where $\langle X, \leq \rangle$ is a poset and $\langle X, \tau \rangle$ is a compact totally order-disconnected topological space. A morphism between Priestley spaces is a continuous and monotone function between them. If $\langle X, \leq, \tau \rangle$ is a Priestley space, then the family of all clopen increasing sets is denoted by $\mathcal{C}(X)$, and it is well known that $\mathcal{C}(X)$ is a bounded distributive lattice. The Priestley space of a bounded distributive lattice ${\bf{A}}$ is the triple $\langle \mathcal{X}({\bf{A}}), \subseteq_{\bf{A}}, \tau_{\bf{A}} \rangle$, where $\tau_{\bf{A}}$ is the topology generated by taking as a subbase the family $\{ \beta_{\bf{A}}(a) \colon a \in A \} \cup \{ \beta_{\bf{A}}(a)^{c} \colon a \in A \}$, where $\beta_{\bf{A}}(a)^{c} = \mathcal{X}({\bf{A}}) - \beta_{\bf{A}}(a)$. Therefore, ${\bf{A}}$ and $\mathcal{C}(\mathcal{X}({\bf{A}}))$ are isomorphic. If $\langle X, \leq, \tau \rangle$ is a Priestley space, then the map $\mathrm{e}psilon_{X} \colon X \to \mathcal{X}(\mathcal{C}(X))$ defined by $\mathrm{e}psilon_{X}(x) = \{ U \in \mathcal{C}(X) \colon x \in U \}$, for every $x \in X$, is a homeomorphism and an order-isomorphism. On the other hand, if $Y$ is a closed set of $\mathcal{X}({\bf{A}})$, then the relation \begin{equation} \label{congruence-closed} \theta(Y)=\{(a,b)\in A\times A \colon \beta_{\textbf{A}}(a)\cap Y = \beta_{\textbf{A}}(b) \cap Y \} \mathrm{e}nd{equation} is a congruence of ${\bf{A}}$ and the correspondence $Y\rightarrow \theta(Y)$ establishes an anti-isomorphism between the lattice of closed subsets of $\mathcal{X}({\bf{A}})$ and the lattice of congruences of $\textbf{A}$. If $h \colon A \to B$ is a homomorphism between bounded distributive lattices ${\bf{A}}$ and ${\bf{B}}$, then the map $h^{*} \colon \mathcal{X}({\bf{B}}) \to \mathcal{X}({\bf{A}})$ defined by $h^{*}(P)=h^{-1}(P)$, for each $P \in \mathcal{X}({\bf{B}})$, is a continuous and monotone function. Conversely, if $\langle X, \leq_{X}, \tau_{X} \rangle$ and $\langle Y, \leq_{Y}, \tau_{Y} \rangle$ are Priestley spaces and $f \colon X \to Y$ is a continuous and monotone function, then the map $f^{*} \colon \mathcal{C}(Y) \to \mathcal{C}(X)$ defined by $f^{*}(U)=f^{-1}(U)$, for each $\mathcal{C}(Y)$, is a homomorphism between bounded distributive lattices. Furthermore, there is a duality between the algebraic category of bounded distributive lattices with homomorphisms and the category of Priestley spaces with continuous and monotone functions (\cite{Priestley,CLP,Cignoli}). \section{FIDL-modules} \label{FIDL-modules} In this section we present the class of \mathrm{e}mph{modules with fusion and implication based over distributive lattices}, or \mathrm{e}mph{FIDL-modules}, for short. These structures can be considered as bi-sorted distributive lattices endowed with two operations which preserve some of the lattice structure. We introduce the notion of FIDL-subalgebra and we exhibit a characterization of those in terms of some relations. \begin{definition} \label{def_FIDL-modules} Let ${\bf{A}}$, ${\bf{B}}$ be two bounded distributive lattices. A structure $\langle {\bf{A}}, {\bf{B}}, f \rangle$ is called a {\rm{FDL-module}}, if $f \colon A \times B \to A$ is a function such that for every $x,y \in A$ and every $b,c \in B$ the following conditions hold: \begin{itemize} \item[(F1)] $f(x \vee y, b)=f(x,b) \vee f(y,b)$, \item[(F2)] $f(x, b \vee c)=f(x,b) \vee f(x,c)$, \item[(F3)] $f(0,b)=0$, \item[(F4)] $f(x,0)=0$. \mathrm{e}nd{itemize} A structure $\langle {\bf{A}}, {\bf{B}}, i \rangle$ is called an {\rm{IDL-module}}, if $i \colon B \times A \to A$ is a function such that for every $x,y \in A$ and every $b,c \in B$ the following conditions hold: \begin{itemize} \item[(I1)] $i(b, x \wedge y)=i(b,x) \wedge i(b,y)$, \item[(I2)] $i(b \vee c, x)=i(b,x) \wedge i(c,x)$, \item[(I3)] $i(b,1)=1$. \mathrm{e}nd{itemize} Moreover, a structure $\mathcal{M}=\langle {\bf{A}}, {\bf{B}}, f, i \rangle$ is called a {\rm{FIDL-module}}, if $\langle {\bf{A}}, {\bf{B}}, f \rangle$ is a FDL-module and $\langle {\bf{A}}, {\bf{B}}, i \rangle$ is an IDL-module. \mathrm{e}nd{definition} \begin{remark} \label{Fusion and impliction as unary operations} Let $\mathcal{M}$ be a FIDL-module. Then the function $f$ determines and it is determined by a unique family $\mathcal{F}_{\bf{B}} = \{ f_{b} \colon A \to A \mid b \in B \}$ of unary operations on $\bf{A}$ such that for every $x,y \in A$ and every $b,c \in B$ the following conditions hold: \begin{itemize} \item[(F1')] $f_{b}(x \vee y)=f_{b}(x) \vee f_{b}(y)$, \item[(F2')] $f_{b \vee c}(x)=f_{b}(x) \vee f_{c}(x)$, \item[(F3')] $f_{b}(0)=0$, \item[(F4')] $f_{0}(x)=0$. \mathrm{e}nd{itemize} Analogously, the function $i$ determines and it is determined by a unique family $\mathcal{I}_{\bf{B}} = \{ i_{b} \colon A \to A \mid b \in B \}$ of unary operations on $\bf{A}$ such that for every $x,y \in A$ and every $b,c \in B$ the following conditions hold: \begin{itemize} \item[(I1')] $i_{b}(x \wedge y)=i_{b}(x) \wedge i_{b}(y)$, \item[(I2')] $i_{b \vee c}(x)=i_{b}(x) \wedge i_{c}(x)$, \item[(I3')] $i_{b}(1)=1$. \mathrm{e}nd{itemize} Hence the FIDL-module $\mathcal{M}$ is equivalent to the structure $\langle {\bf{A}}, \mathcal{F}_{\bf{B}}, \mathcal{I}_{\bf{B}} \rangle$. Therefore, along this paper we will use the families $\mathcal{F}_{\bf{B}}$ and $\mathcal{I}_{\bf{B}}$ and its corresponding functions $f$ and $i$ indistinctly. \mathrm{e}nd{remark} The following are important examples of FIDL-modules. \begin{example} An algebra $\langle {\bf{A}}, \circ, \to \rangle$ is a {\it{bounded distributive lattice with fusion and implication}} (\cite{Celani1,Cabrer-Celani}), if ${\bf{A}}$ is a bounded distributive lattice and $\circ$ and $\to$ are binary operations defined on ${\bf{A}}$ such that for all $x,y,z \in A$ the following conditions hold: \begin{enumerate} \item $x \circ (y \vee z) = (x \circ y) \vee (x \circ z)$, \item $(x \vee y) \circ z = (x \circ z) \vee (y \circ z)$, \item $x \circ 0 = 0 \circ x = 0$, \item $x \to 1 = 1$, \item $(x \to y) \wedge (x \to z) = x \to (y \wedge z)$, \item $(x \to z) \wedge (y \to z) = (x \vee y) \to z$. \mathrm{e}nd{enumerate} Notice that if $\mathcal{M}$ is a FIDL-module such that $B=A$ and we consider the functions $x \circ_{f} y = f(x,y)$ and $x \to_{i} y = i(x,y)$, then $\langle {\bf{A}}, \circ , \to \rangle$ is a bounded distributive lattice with fusion and implication. Moreover, if $\mathcal{M}$ satisfies the condition $f(x,y) \leq z$ if and only if $x \leq i(y,z)$, then the structure $\langle {\bf{A}}, \circ , \to \rangle$ is a residuated lattice (\cite{JipsenTsinakis}). \mathrm{e}nd{example} \begin{example} Recall that an algebra $\langle {\bf{A}}, \Box, \Diamond \rangle$ is a {\it{modal distributive lattice}}\footnote{Also called in \cite{Petrovich} \mathrm{e}mph{distributive lattices with join and meet-homomorphisms}.}, or {\it{$\Box \Diamond$-lattice}}, if ${\bf{A}}$ is a bounded distributive lattice and $\Box$ and $\Diamond$ are unary operations defined on ${\bf{A}}$ such that for every $x,y \in A$ we have $\Box 1 = 1$, $\Box(x \wedge y)= \Box(x) \wedge \Box(y)$, $\Diamond 0 = 0$ and $\Diamond (x \vee y) = \Diamond (x) \vee \Diamond (y)$ (\cite{Chagrov,Venema,C2005}). If $\mathcal{M}$ is a FIDL-module and $B=\{0,1\}$, we can consider the functions $\Diamond_{f} (x) = f(x,1)$ and $\Box_{i} (x) = i(1,x)$ such that $\langle {\bf{A}}, \Diamond, \Box \rangle$ is a $\Box \Diamond$-lattice. \mathrm{e}nd{example} \begin{example} Let $\langle {\bf{A}}, \to \rangle$ be a Heyting algebra, where ${\bf{A}}$ is its bounded lattice reduct. Let $X$ be a non-empty set and let ${\bf{A}}^{X} = \langle A^{X}, \vee, \wedge, 0, 1 \rangle$ be the bounded distributive lattice of functions from $X$ to $A$ with the operations defined pointwise. Then, by following the notation of Remark \ref{Fusion and impliction as unary operations}, if we consider the families of functions $\mathcal{F}_{\bf{A}}=\{f_{a} \colon A^{X} \to A^{X} \mid a \in A\}$ and $\mathcal{I}_{\bf{A}}=\{i_{a} \colon A^{X} \to A^{X} \mid a \in A\}$ defined for every $a \in A$ by $f_{a}(g)(x) = a \wedge g(x)$ and $i_{a}(g)(x) = a \to g(x)$, respectively, it is the case that $\langle {\bf{A}}^{X}, \mathcal{F}_{\bf{A}}, \mathcal{I}_{\bf{A}} \rangle$ is a FIDL-module. \mathrm{e}nd{example} The following results are inspired by \cite{Celani1}. \begin{prop} \label{propo_1} Let $\mathcal{M}$ be a FIDL-module. Then for every $x,y \in A$ and every $b,c \in B$, if $x \leq y$ and $b \leq c$, then $f(x,b) \leq f(y,c)$ and $i(c,x) \leq i(b,y)$. \mathrm{e}nd{prop} \begin{proof} Since $y=y \vee x$ and $c=b \vee c$, then by (F1) and (F2) of Definition \ref{def_FIDL-modules} \begin{equation*} f(y,c)=f(y \vee x, b \vee c)=f(y,b) \vee f(y,c) \vee f(x,b) \vee f(x,c) \geq f(x,b), \mathrm{e}nd{equation*} i.e., $f(x,b) \leq f(y,c)$. Analogously, as $x= x \wedge y$, by (I1) and (I2) of Definition \ref{def_FIDL-modules} we have \begin{equation*} i(c,x)=i(b \vee c, x \wedge y)=i(b,x) \wedge i(b,y) \wedge i(c,x) \wedge i(c,y) \leq i(b,y) \mathrm{e}nd{equation*} and $i(c,x) \leq i(b,y)$. \mathrm{e}nd{proof} Let $\mathcal{M}$ be a FIDL-module. Let $G \in {\rm{Fi}}(\bf{A})$ and $H \in {\rm{Fi}}(\bf{B})$. We define the following subsets: \begin{equation*} f(G,H)=\{x \in A \colon \mathrm{e}xists (g,h) \in G \times H {\hspace{0.1cm}} {\text{such that}} {\hspace{0.1cm}} f(g,h) \leq x \} \mathrm{e}nd{equation*} and \begin{equation*} i(H,G)=\{x \in A \colon \mathrm{e}xists (h,g) \in H \times G {\hspace{0.1cm}} {\text{such that}} {\hspace{0.1cm}} g \leq i(h,x) \}. \mathrm{e}nd{equation*} \begin{prop} Let $\mathcal{M}$ be a FIDL-module. If $G \in {\rm{Fi}}(\bf{A})$ and $H \in {\rm{Fi}}(\bf{B})$, then $f(G,H), i(H,G) \in {\rm{Fi}}(\bf{A})$. \mathrm{e}nd{prop} \begin{proof} We prove that $f(G,H) \in {\rm{Fi}}(\bf{A})$. It is clear that $1 \in f(G,H)$ and $f(G,H)$ is increasing. If $x,y \in f(G,H)$, then there exist $(g,h), (\hat{g}, \hat{h}) \in G \times H$ such that $f(g,h) \leq x$ and $f(\hat{g}, \hat{h}) \leq y$. Since $G$ and $H$ are filters, $\bar{g}=g \wedge \hat{g} \in G$ and $\bar{h}=h \wedge \hat{h} \in H$. By Proposition \ref{propo_1}, $f(\bar{g}, \bar{h}) \leq x$ and $f(\bar{g}, \bar{h}) \leq y$. So, $f(\bar{g}, \bar{h}) \leq x \wedge y$ and $x \wedge y \in f(G,H)$. Then $f(G,H) \in {\rm{Fi}}(\bf{A})$. The proof for $i(H,G) \in {\rm{Fi}}(\bf{A})$ is similar. \mathrm{e}nd{proof} \begin{theorem} \label{theo_1} Let $\mathcal{M}$ be a FIDL-module. Let $G \in {\rm{Fi}}(\bf{A})$, $H \in {\rm{Fi}}(\bf{B})$ and $P \in {\mathcal{X}}({\bf{A}})$. Then: \begin{enumerate} \item If $f(G,H) \subseteq P$, then there exist $Q \in {\mathcal{X}}({\bf{A}})$ and $R \in {\mathcal{X}}({\bf{B}})$ such that $G \subseteq Q$, $H \subseteq R$ and $f(Q,R) \subseteq P$. \item If $i(H,G) \subseteq P$, then there exist $R \in {\mathcal{X}}({\bf{B}})$ and $Q \in {\mathcal{X}}({\bf{A}})$ such that $H \subseteq R$, $G \subseteq Q$ and $i(R,Q) \subseteq P$. \mathrm{e}nd{enumerate} \mathrm{e}nd{theorem} \begin{proof} We prove only $(1)$ because the proof of $(2)$ is analogous. Let us consider the family \begin{equation*} \mathcal{J} = \{ (K,W) \in {\rm{Fi}}({\bf{A}}) \times {\rm{Fi}}({\bf{B}}) \colon G \subseteq K, H \subseteq W \hspace{0.1cm} {\text{and}} \hspace{0.1cm} f(K,W) \subseteq P \}. \mathrm{e}nd{equation*} Since $(G,H) \in \mathcal{J}$, then $\mathcal{J} \neq \mathrm{e}mptyset$. Observe that the union of a chain of elements of $\mathcal{J}$ is also in $\mathcal{J}$. So, by Zorn's Lemma, there is a maximal element $(Q,R) \in \mathcal{J}$. We see that $(Q,R) \in {\mathcal{X}}({\bf{A}}) \times {\mathcal{X}}({\bf{B}})$. Let $x,y \in A$ be such that $x \vee y \in Q$. Suppose that $x,y \notin Q$. Consider the filters $F_{x} = {\rm{Fig}}_{{\bf{A}}}(Q \cup \{x\})$ and $F_{y} = {\rm{Fig}}_{{\bf{A}}}(Q \cup \{y\})$. Then $Q \subset F_{x}$ and $Q \subset F_{y}$, and since $(Q,R)$ is maximal in $\mathcal{J}$, it follows that $f\left( F_{x}, R \right) \nsubseteq P$ and $f\left( F_{y}, R \right) \nsubseteq P$, i.e., there is $z \in f(F_{x}, R)$ such that $z \notin P$ and there is $t \in f(F_{y}, R)$ such that $t \notin P$. Then there exist $(f_{1}, r_{1}) \in F_{x} \times R$ and $(f_{2}, r_{2}) \in F_{y} \times R$ such that $f(f_{1}, r_{1}) \leq z$ and $f(f_{2}, r_{2}) \leq t$. So, there are $q_{1}, q_{2} \in Q$ such that $q_{1} \wedge x \leq f_{1}$ and $q_{2} \wedge y \leq f_{2}$. We take $q=q_{1} \wedge q_{2} \in Q$ and $r=r_{1} \wedge r_{2} \in R$. By Proposition \ref{propo_1}, we have $f(q \wedge x, r) \leq z$ and $f(q \wedge y, r) \leq t$. Thus, \begin{equation*} f(q \wedge x, r) \vee f(q \wedge y, r) = f \left( (q \wedge x) \vee (q \wedge y), r \right) = f \left( q \wedge (x \vee y), r \right) \leq z \vee t. \mathrm{e}nd{equation*} As $q, x \vee y \in Q$, then $q \wedge (x \vee y) \in Q$ and $z \vee t \in f(Q,R)$. On the other hand, since $f(Q,R) \subseteq P$, we have $z \vee t \in P$. As $P$ is prime, $z \in P$ or $t \in P$ which is a contradiction. Then $Q \in {\mathcal{X}}({\bf{A}})$. The proof for $R \in {\mathcal{X}}({\bf{B}})$ is similar. It follows that there exist $Q \in {\mathcal{X}}({\bf{A}})$ and $R \in {\mathcal{X}}({\bf{B}})$ such that $G \subseteq Q$, $H \subseteq R$ and $f(Q,R) \subseteq P$. \mathrm{e}nd{proof} Let $\mathcal{M}$ be a FIDL-module. We define the following relations $R_{\mathcal{M}} \subseteq {\mathcal{X}}({\bf{A}}) \times {\mathcal{X}}({\bf{B}}) \times {\mathcal{X}}({\bf{A}})$ and $T_{\mathcal{M}} \subseteq {\mathcal{X}}({\bf{B}}) \times {\mathcal{X}}({\bf{A}}) \times {\mathcal{X}}({\bf{A}})$ by \begin{equation} \label{relation_R_{A}} (Q,R,P) \in R_{\mathcal{M}} \Longleftrightarrow f(Q,R) \subseteq P, \mathrm{e}nd{equation} and \begin{equation} \label{relation_T_{A}} (R,P,Q) \in T_{\mathcal{M}} \Longleftrightarrow i(R,P) \subseteq Q. \mathrm{e}nd{equation} \begin{lemma} \label{lem_1} Let $\mathcal{M}$ be a FIDL-module. Let $x \in A$, $b \in B$ and $P \in {\mathcal{X}}({\bf{A}})$. Then: \begin{enumerate} \item $f(x,b) \in P$ if and only if there exist $Q \in {\mathcal{X}}({\bf{A}})$ and $R \in {\mathcal{X}}({\bf{B}})$ such that $(Q,R,P) \in R_{\mathcal{M}}$, $x \in Q$ and $b \in R$. \item $i(b,x) \in P$ if and only if for every $R \in {\mathcal{X}}({\bf{B}})$ and every $Q \in {\mathcal{X}}({\bf{A}})$, if $(R,P,Q) \in T_{\mathcal{M}}$ and $b \in R$, then $x \in Q$. \mathrm{e}nd{enumerate} \mathrm{e}nd{lemma} \begin{proof} $(1)$ Suppose $f(x,b) \in P$. We see that $f\left( [x), [b) \right) \subseteq P$. If $y \in f\left( [x), [b) \right)$, then there exists $(g,h) \in [x) \times [b)$ such that $f(g,h) \leq y$. So, $x \leq g$ and $b \leq h$, and by Proposition \ref{propo_1}, $f(x,b) \leq f(g,h) \leq y$. Since $P$ is a filter, $y \in P$ and $f\left( [x), [b) \right) \subseteq P$. So, by Theorem \ref{theo_1}, there exist $Q \in {\mathcal{X}}({\bf{A}})$ and $R \in {\mathcal{X}}({\bf{B}})$ such that $[x) \subseteq Q$, $[b) \subseteq R$ and $f(Q,R) \subseteq P$, i.e., $x \in Q$, $b \in R$ and $(Q,R,P) \in R_{\mathcal{M}}$. Conversely, if there exist $Q \in {\mathcal{X}}({\bf{A}})$ and $R \in {\mathcal{X}}({\bf{A}})$ such that $f(Q,R) \subseteq P$, $x \in Q$ and $b \in R$, because $(x,b) \in Q \times R$, we have $f(x,b) \in f(Q,R)$ and $f(x,b) \in P$. $(2)$ Suppose $i(b,x) \in P$. Let $R \in {\mathcal{X}}(\textbf{B})$ and $Q \in {\mathcal{X}}(\textbf{A})$ be such that $i(R,P) \subseteq Q$ and $b \in R$. Then $(b, i(b,x)) \in R \times P$ and $x \in i(R,P)$. So, $x \in Q$. Reciprocally, suppose $i(b,x) \notin P$. We prove that $i\left( [b), P \right) \cap (x] = \mathrm{e}mptyset$. Otherwise, there is $y \in i \left( [b), P \right)$ such that $y \in (x]$. Thus, there exists $(z,p) \in [b) \times P$ such that $p \leq i(z,y)$. Since $y \leq x$ and $b \leq z$, by Proposition \ref{propo_1}, we have $i(z,y) \leq i(b,x)$. Then $p \leq i(b,x)$ and $i(b,x) \in P$, which is a contradiction. So, $i\left( [b), P \right) \cap (x] = \mathrm{e}mptyset$ and since $i\left( [b), P \right) \in {\rm{Fi}}(\textbf{A})$, by the Prime Filter Theorem there exists $Q \in {\mathcal{X}}(\textbf{A})$ such that $i \left( [b), P \right) \subseteq Q$ and $x \notin Q$. Then, by Theorem \ref{theo_1}, there exist $R \in {\mathcal{X}}(\textbf{B})$ and $\hat{P} \in {\mathcal{X}}(\textbf{A})$ such that $[b) \subseteq R$, $P \subseteq \hat{P}$ and $i(R, \hat{P}) \subseteq Q$. It is clear that $i(R,P) \subseteq i(R, \hat{P})$. Summarizing, there exist $R \in {\mathcal{X}}(\textbf{B})$ and $Q \in {\mathcal{X}}(\textbf{A})$ such that $(R,P,Q) \in T_{\mathcal{M}}$, $b \in R$ and $x \notin Q$, which contradicts the hypothesis. Therefore, $i(b,x) \in P$. \mathrm{e}nd{proof} Now, we introduce the concept of subalgebra of a FIDL-module. \begin{definition} Let $\mathcal{M}$ be a FIDL-module. Let $\hat{\bf{A}}$ be a bounded sublattice of $\bf{A}$ and $\hat{\bf{B}}$ a bounded sublattice of $\bf{B}$. \begin{itemize} \item[(S1)] A structure $\langle \hat{\bf{A}}, \hat{\bf{B}}, f \rangle$ is called a {\rm{FDL-subalgebra of $\langle {\bf{A}}, {\bf{B}}, f \rangle$}}, if for every ${\hat{x}} \in {\hat{A}}$ and every ${\hat{b}} \in {\hat{B}}$, we have $f( \hat{x},\hat{b}) \in {\hat{A}}$. \item[(S2)] A structure $\langle \hat{\bf{A}}, \hat{\bf{B}}, i \rangle$ is called an {\rm{IDL-subalgebra of $\langle {\bf{A}}, {\bf{B}}, i \rangle$}}, if for every ${\hat{x}} \in {\hat{A}}$ and every ${\hat{b}} \in {\hat{B}}$, we have $i({\hat{b}},\hat{x}) \in {\hat{A}}$. \mathrm{e}nd{itemize} Moreover, a structure $\hat{\mathcal{M}}=\langle \hat{\bf{A}}, \hat{\bf{B}}, f, i \rangle$ is called a {\rm{FIDL-subalgebra of $\mathcal{M}$}}, if $\langle \hat{\bf{A}}, \hat{\bf{B}}, f \rangle$ is a FDL-subalgebra and $\langle \hat{\bf{A}}, \hat{\bf{B}}, i \rangle$ is an IDL-subalgebra. \mathrm{e}nd{definition} We conclude this section with a characterization of FIDL-subalgebras by means of the relations defined in (\ref{relation_R_{A}}) and (\ref{relation_T_{A}}). \begin{theorem} Let $\mathcal{M}$ be a FIDL-module. Let $\hat{\bf{A}}$ be a bounded sublattice of $\bf{A}$ and $\hat{\bf{B}}$ a bounded sublattice of $\bf{B}$. Then: \begin{enumerate} \item $\langle \hat{\bf{A}}, \hat{\bf{B}}, f \rangle$ is a FDL-subalgebra of $\langle {\bf{A}}, {\bf{B}}, f \rangle$ if and only if for all $P, Q, Q_{1} \in {\mathcal{X}}({\bf{A}})$ and for all $R_{1} \in {\mathcal{X}}({\bf{B}})$, if $(Q_{1}, R_{1}, P)\in R_{\mathcal{M}}$ and $P \cap \hat{A} \subseteq Q$, then there exist $Q_{2} \in {\mathcal{X}}({\bf{A}})$ and $R_{2} \in {\mathcal{X}}({\bf{B}})$ such that $Q_{1} \cap \hat{A} \subseteq Q_{2}$, $R_{1} \cap \hat{B} \subseteq R_{2}$ and $(Q_{2}, R_{2}, Q)\in R_{\mathcal{M}}$. \item $\langle \hat{\bf{A}}, \hat{\bf{B}}, i \rangle$ is an IDL-subalgebra of $\langle {\bf{A}}, {\bf{B}}, i \rangle$ if and only if for all $P, Q, Q_{1} \in {\mathcal{X}}({\bf{A}})$ and for all $R_{1} \in {\mathcal{X}}({\bf{B}})$, if $(R_{1}, Q, Q_{1})\in T_{\mathcal{M}}$ and $P \cap \hat{A} \subseteq Q$, then there exist $Q_{2} \in {\mathcal{X}}({\bf{A}})$ and $R_{2} \in {\mathcal{X}}({\bf{B}})$ such that $Q_{2} \cap \hat{A} \subseteq Q_{1}$, $R_{1} \cap \hat{B} \subseteq R_{2}$ and $(R_{2}, P,Q_{2})\in T_{\mathcal{M}}$. \mathrm{e}nd{enumerate} We conclude that the structure $\hat{\mathcal{M}}=\langle \hat{\bf{A}}, \hat{\bf{B}}, f, i \rangle$ is a FIDL-subalgebra of $\mathcal{M}$ if and only if verifies the conditions $(1)$ and $(2)$. \mathrm{e}nd{theorem} \begin{proof} $(1)$ Let $P, Q, Q_{1} \in {\mathcal{X}}({\bf{A}})$ and $R_{1} \in {\mathcal{X}}({\bf{B}})$ be such that $(Q_{1}, R_{1}, P)\in R_{\mathcal{M}}$ and $P \cap \hat{A} \subseteq Q$. Then, $f(Q_{1}, R_{1}) \subseteq P$. Consider the filters $F_{Q_{1}} = {\rm{Fig}}_{\bf{A}} (Q_{1} \cap \hat{A})$ and $F_{R_{1}} = {\rm{Fig}}_{\bf{B}} (R_{1} \cap \hat{B})$. It follows that $f \left( F_{Q_{1}}, F_{R_{1}}\right) \subseteq Q$. Indeed, if $x \in f \left( F_{Q_{1}}, F_{R_{1}} \right)$, then there exists $(g,h) \in F_{Q_{1}} \times F_{R_{1}}$ such that $f(g,h) \leq x$. So, there is $q_{1} \in Q_{1} \cap \hat{A}$ such that $q_{1} \leq g$ and there is $r_{1} \in R_{1} \cap \hat{B}$ such that $r_{1} \leq h$. By Proposition \ref{propo_1}, $f(q_{1}, r_{1}) \leq f(g,h) \leq x$. So, $f(q_{1}, r_{1}) \in f(Q_{1}, R_{1})$ and $f(q_{1}, r_{1}) \in P$. On the other hand, since $\langle \hat{\mathbf{A}}, \hat{\mathbf{B}}, f \rangle$ is a FDL-subalgebra, $f(q_{1}, r_{1}) \in \hat{A}$. Thus, $f(q_{1}, r_{1}) \in Q$ and $x \in Q$. Therefore, $f \left( F_{Q_{1}}, F_{R_{1}}\right) \subseteq Q$ and by Theorem \ref{theo_1}, there exist $Q_{2} \in {\mathcal{X}}({\bf{A}})$ and $R_{2} \in {\mathcal{X}}({\bf{B}})$ such that $Q_{1} \cap \hat{A} \subseteq Q_{2}$, $R_{1} \cap \hat{B} \subseteq R_{2}$ and $(Q_{2}, R_{2}, Q)\in R_{\mathcal{M}}$. Conversely, suppose there exist $\hat{x} \in \hat{A}$ and $\hat{b} \in \hat{B}$ such that $f(\hat{x}, \hat{b}) \notin \hat{A}$. We prove that ${\rm{Fig}}_{\bf{A}} ( [f(\hat{x}, \hat{b})) \cap \hat{A} ) \cap (f(\hat{x}, \hat{b})] = \mathrm{e}mptyset$. Otherwise, there is $y \in A$ such that $y \in {\rm{Fig}}_{\bf{A}} ( [f(\hat{x}, \hat{b})) \cap \hat{A})$ and $y \leq f(\hat{x}, \hat{b})$. Then there exists $z \in [f(\hat{x}, \hat{b})) \cap \hat{A}$ such that $z \leq y$. It follows that $f(\hat{x}, \hat{b})=z$. Since $z \in \hat{A}$, we have $f(\hat{x}, \hat{b}) \in \hat{A}$ which is a contradiction. Then ${\rm{Fig}}_{\bf{A}} ( [f(\hat{x}, \hat{b})) \cap \hat{A} ) \cap (f(\hat{x}, \hat{b})] = \mathrm{e}mptyset$ and by the Prime Filter Theorem there exists $Q \in {\mathcal{X}}({\bf{A}})$ such that $[f(\hat{x}, \hat{b})) \cap \hat{A}\subseteq Q$ and $f(\hat{x}, \hat{b}) \notin Q$. It is easy to see that $[f(\hat{x}, \hat{b})) \cap {\rm{Idg}}_{\bf{A}} ( Q^{c} \cap \hat{A} ) = \mathrm{e}mptyset$. Then there exists $P \in {\mathcal{X}}({\bf{A}})$ such that $f(\hat{x}, \hat{b}) \in P$ and $P \cap {\rm{Idg}}_{\bf{A}} ( Q^{c} \cap \hat{A} ) = \mathrm{e}mptyset$, i.e., $P \cap \hat{A} \subseteq Q$. Since $f(\hat{x}, \hat{b}) \in P$, by Lemma \ref{lem_1} there exist $Q_{1} \in {\mathcal{X}}({\bf{A}})$ and $R_{1} \in {\mathcal{X}}({\bf{B}})$ such that $f(Q_{1},R_{1}) \subseteq P$, $\hat{x} \in Q_{1}$ and $\hat{b} \in R_{1}$. So $(Q_{1},R_{1},P)\in R_{\mathcal{M}}$. By assumption, there exist $Q_{2} \in {\mathcal{X}}({\bf{A}})$ and $R_{2} \in {\mathcal{X}}({\bf{B}})$ such that $Q_{1} \cap \hat{A} \subseteq Q_{2}$, $R_{1} \cap \hat{B} \subseteq R_{2}$ and $f(Q_{2}, R_{2}) \subseteq Q$. Thus, $(Q_{2},R_{2},Q)\in R_{\mathcal{M}}$. Since $\hat{x} \in \hat{A}$ and $\hat{b} \in \hat{B}$, we have $\hat{x} \in Q_{2}$ and $\hat{b} \in R_{2}$. Hence, $f(\hat{x}, \hat{b}) \in f (Q_{2}, R_{2})$ and $f(\hat{x}, \hat{b}) \in Q$, which is a contradiction. Therefore, $f(\hat{x}, \hat{b}) \in \hat{A}$ and we conclude that $\langle \hat{\bf{A}}, \hat{\bf{B}}, f \rangle$ is a FDL-subalgebra. $(2)$ Let $P, Q, Q_{1} \in {\mathcal{X}}(\textbf{A})$ and $R_{1} \in {\mathcal{X}}(\textbf{A})$ be such that $(R_{1}, Q,Q_{1})\in T_{\mathcal{M}}$ and $P \cap \hat{A} \subseteq Q$. So $i(R_{1}, Q) \subseteq Q_{1}$. We see that \begin{equation*} {\rm{Fig}}_{\textbf{A}} ( i ( {\rm{Fig}}_{\textbf{B}} (R_{1} \cap \hat{B}), P ) \cap \hat{A} ) \subseteq Q_{1}. \mathrm{e}nd{equation*} If $x \in {\rm{Fig}}_{\textbf{A}} ( i ( {\rm{Fig}}_{\textbf{B}} (R_{1} \cap \hat{B}), P ) \cap \hat{A} )$, then there is $y \in i ( {\rm{Fig}}_{\textbf{B}} (R_{1} \cap \hat{B}), P ) \cap \hat{A}$ such that $y \leq x$. So, there are $r \in R_{1} \cap \hat{B}$ and $p \in P$ such that $p \leq i(r,y)$. Thus, $i(r,y) \in P$. On the other hand, as $y \in \hat{A}$, $r \in \hat{B}$ and $\langle \hat{\mathbf{A}}, \hat{\mathbf{B}}, i \rangle$ is an IDL-subalgebra, $i(r,y) \in \hat{A}$. Then $i(r,y) \in P \cap \hat{A}$ and $i(r,y) \in Q$. It follows that $y \in i(R_{1},Q)$ and $y \in Q_{1}$. Then $x \in Q_{1}$. Now, let us consider the family \begin{equation*} \mathcal{J} = \{ F \in {\rm{Fi}}(\textbf{A}) \colon i ( {\rm{Fig}}_{\textbf{B}} (R_{1} \cap \hat{B}), P ) \subseteq F \hspace{0.1cm} {\text{and}} \hspace{0.1cm} F \cap \hat{A} \subseteq Q_{1} \}. \mathrm{e}nd{equation*} Then $\mathcal{J} \neq \mathrm{e}mptyset$ and by Zorn's Lemma there exists an maximal element $Q_{2} \in \mathcal{J}$. We prove that $Q_{2} \in {\mathcal{X}}(\textbf{A})$. Let $x,y \in A$ be such that $x \vee y \in Q_{2}$ and suppose $x,y \notin Q_{2}$. We take the filters $F_{x} = {\rm{Fig}}_{\textbf{A}} ( Q_{2} \cup \{x\} )$ and $F_{y} = {\rm{Fig}}_{\textbf{A}} ( Q_{2} \cup \{y\} )$. Then $F_{x} \cap \hat{A} \nsubseteq Q_{1}$ and $F_{y} \cap \hat{A} \nsubseteq Q_{1}$, i.e., there exist $z \in F_{x} \cap \hat{A}$ and $t \in F_{y} \cap \hat{A}$ such that $z,t \notin Q_{1}$. So, there are $q_{1}, q_{2} \in Q_{2}$ such that $q_{1} \wedge x \leq z$ and $q_{2} \wedge y \leq t$. Then $(q_{1} \wedge q_{2}) \wedge (x \vee y) \leq z \vee t$ and $z \vee t \in Q_{2}$. Since $\hat{A}$ is a sublattice, $z \vee t \in Q_{2} \cap \hat{A}$ and $z \vee t \in Q_{1}$, which is a contradiction because $Q_{1}$ is prime and $z \vee t \notin Q_{1}$. Hence, $Q_{2} \in {\mathcal{X}}(\textbf{A})$. As $i ( {\rm{Fig}}_{\textbf{B}} (R_{1} \cap \hat{B}), P ) \subseteq Q_{2}$ and $F \cap \hat{A} \subseteq Q_{1}$, by Theorem \ref{theo_1} there exists $R_{2} \in {\mathcal{X}}(\textbf{B})$ such that $R_{1} \cap \hat{B} \subseteq R_{2}$ and $(R_{2},P,Q_{2})\in T_{\mathcal{M}}$. Reciprocally, suppose there exist $\hat{x} \in \hat{A}$ and $\hat{b} \in \hat{B}$ such that $i(\hat{b}, \hat{x}) \notin \hat{A}$. In order to prove our claim, first we show that ${\rm{Idg}}_{\textbf{A}} ( (i(\hat{b}, \hat{x})] \cap \hat{A} ) \cap [i(\hat{b}, \hat{x})) = \mathrm{e}mptyset$. If there is $y \in {\rm{Idg}}_{\textbf{A}} ( (i(\hat{b}, \hat{x})] \cap \hat{A} )$ such that $i(\hat{b}, \hat{x}) \leq y$, then there exists $z \in (i(\hat{b}, \hat{x})] \cap \hat{A}$ such that $y \leq z$. Thus, $i(\hat{b}, \hat{x})=z$ and $i(\hat{b}, \hat{x}) \in \hat{A}$, which is a contradiction. Then ${\rm{Idg}}_{\textbf{A}} ( (i(\hat{b}, \hat{x})] \cap \hat{A} ) \cap [i(\hat{b}, \hat{x})) = \mathrm{e}mptyset$ and consequently from the Prime Filter Theorem, there exists $P \in {\mathcal{X}}(\textbf{A})$ such that $i(\hat{b}, \hat{x}) \in P$ and ${\rm{Idg}}_{\textbf{A}} ( (i(\hat{b}, \hat{x})] \cap \hat{A} ) \cap P = \mathrm{e}mptyset$. It is easy to prove that $(i(\hat{b}, \hat{x})] \cap {\rm{Fig}}_{\textbf{A}}(P \cap \hat{A}) = \mathrm{e}mptyset$. Then again by the Prime Filter Theorem, there is $Q \in {\mathcal{X}}(\textbf{A})$ such that $P \cap \hat{A} \subseteq Q$ and $i(\hat{b}, \hat{x}) \notin Q$. By Lemma \ref{lem_1}, there exist $R_{1} \in {\mathcal{X}}(\textbf{B})$ and $Q_{1} \in {\mathcal{X}}(\textbf{A})$ such that $i(R_{1},Q) \subseteq Q_{1}$, $\hat{b} \in R_{1}$ and $\hat{x} \notin Q_{1}$. So, by hypothesis, there exist $Q_{2} \in {\mathcal{X}}(\textbf{A})$ and $R_{2} \in {\mathcal{X}}(\textbf{B})$ such that $Q_{2} \cap \hat{A} \subseteq Q_{1}$, $R_{1} \cap \hat{B} \subseteq R_{2}$ and $i(R_{2}, P) \subseteq Q_{2}$. Thus, $(R_{2}, P, Q_{2})\in T_{\mathcal{M}}$. As $\hat{b} \in R_{1}$, we have $\hat{b} \in R_{2}$. On the other hand, as $i(\hat{b}, \hat{x}) \in P$, we have $\hat{x} \in i(R_{2}, P)$ and $\hat{x} \in Q_{2}$. Then $\hat{x} \in Q_{2} \cap \hat{A}$ and $\hat{x} \in Q_{1}$ which is a contradiction. Hence, $i(\hat{b}, \hat{x}) \in \hat{A}$ and $\langle \hat{\bf{A}}, \hat{\bf{B}}, i \rangle$ is an IDL-subalgebra. \mathrm{e}nd{proof} \section{Representation for FIDL-modules} \label{Representation of FIDL-modules} The main purpose of this section is to show a representation theorem for FIDL-modules in terms of certain relational structures consisting of bi-posets endowed with two relations. We start by defining a category whose objects are FIDL-modules. So, we need to describe first, the notion of homomorphism between FIDL-modules. Recall that for every pair of functions $\alpha \colon A \to \hat{A}$ and $\gamma \colon B \to \hat{B}$ we can consider the map $\alpha \times \gamma \colon A \times B \to \hat{A} \times \hat{B}$ which is defined by $\left( \alpha \times \gamma \right) (x,y) = \left( \alpha(x), \gamma(y) \right)$. \begin{definition} \label{Definition FIDL-homomorphism} Let $\mathcal{M}=\langle {\bf{A}}, {\bf{B}}, f, i \rangle$ and $\hat{\mathcal{M}}=\langle \hat{\bf{A}}, \hat{\bf{B}}, \hat{f}, \hat{i} \rangle$ be two FIDL-modules. We shall say that a pair $\left( \alpha, \gamma \right) \colon \mathcal{M} \to \hat{\mathcal{M}}$ is a {\rm{FIDL-homomorphism}}, if $\alpha \colon A \to \hat{A}$ and $\gamma \colon B \to \hat{B}$ are homomorphisms between bounded distributive lattices and the following diagrams commute: \begin{displaymath} \begin{tabular}{ccc} \xymatrix{ A \times B \ar[r]^-{f} \ar[d]_-{\alpha \times \gamma} & A \ar[d]^-{\alpha} \\ \hat{A} \times \hat{B} \ar[r]_-{\hat{f}} & \hat{A} } & & \xymatrix{ B \times A \ar[r]^-{i} \ar[d]_-{\gamma \times \alpha} & A \ar[d]^-{\alpha} \\ \hat{B} \times \hat{A} \ar[r]_-{\hat{i}} & \hat{A} } \mathrm{e}nd{tabular} \mathrm{e}nd{displaymath} \mathrm{e}nd{definition} \begin{remark} \label{Remark_f and i} Notice that from Remark \ref{Fusion and impliction as unary operations}, the diagrams of Definition \ref{Definition FIDL-homomorphism} are commutative if and only if for every $b \in B$, the following diagrams commute: \begin{displaymath} \begin{tabular}{ccc} \xymatrix{ A \ar[r]^-{f_{b}} \ar[d]_-{\alpha} & A \ar[d]^-{\alpha} \\ \hat{A} \ar[r]_-{{\hat{f}}_{\gamma(b)}} & \hat{A} } & & \xymatrix{ A \ar[r]^-{i_{b}} \ar[d]_-{\alpha} & A \ar[d]^-{\alpha} \\ \hat{A} \ar[r]_-{{\hat{i}}_{\gamma(b)}} & \hat{A} } \mathrm{e}nd{tabular} \mathrm{e}nd{displaymath} We stress that for the rest of the paper we will use the functions ${\hat{f}}_{\gamma(b)}$ and ${\hat{i}}_{\gamma(b)}$ as well as the notation of Definition \ref{Definition FIDL-homomorphism} indistinctly. \mathrm{e}nd{remark} \begin{example} Let $\mathcal{M}$ be a FIDL-module. Let ${\bf{C}}$ be a bounded distributive lattice and $h \colon C \to B$ be a lattice homomorphism. If we define the functions $\hat{f} \colon A \times C \to A$ by $\hat{f}(x,c)=f(x,h(c))$ and $\hat{i} \colon C \times A \to A$ by $\hat{i}(c,x)=i(h(c),x)$, then the structure $\mathcal{N} = \langle {\bf{A}}, {\bf{C}}, \hat{f}, \hat{i} \rangle$ is a FIDL-module and the pair $\left( id_{A}, h \right) \colon \mathcal{N} \to \mathcal{M}$ is a FIDL-homomorphism. \mathrm{e}nd{example} Let $\mathcal{M}=\langle {\bf{A}}, {\bf{B}}, f, i \rangle$, ${\hat{\mathcal{M}}}=\langle \hat{\bf{A}}, \hat{\bf{B}}, \hat{f}, \hat{i} \rangle$ and ${\bar{\mathcal{M}}} = \langle \bar{\bf{A}}, \bar{\bf{B}}, \bar{f}, \bar{i} \rangle$ be FIDL-modules. Consider the FIDL-homomorphisms $\left( \alpha, \gamma \right) \colon \mathcal{M} \to {\hat{\mathcal{M}}}$ and $\left( \delta, \lambda \right) \colon {\hat{\mathcal{M}}} \to {\bar{\mathcal{M}}}$. Then we define the composition $\left( \delta, \lambda \right) \left( \alpha, \gamma \right) \colon \mathcal{M} \to {\bar{\mathcal{M}}}$ as the pair $\left( \delta \alpha, \lambda \gamma \right)$. It is clear that the FIDL-homomorphisms between FIDL-modules are closed by composition and that such a composition is associative. Moreover, we may define the identity of $\mathcal{M}$ as the pair $\left( id_{A}, id_{B} \right)$. So, we obtain that the class $\mathsf{FIMod}$ of FIDL-modules as objects and FIDL-homomorphisms as morphisms is a category. The following technical result will be useful later. \begin{lemma} \label{Isos FDL} Let $\mathcal{M}=\langle {\bf{A}}, {\bf{B}}, f, i \rangle$ and $\hat{\mathcal{M}}=\langle \hat{\bf{A}}, \hat{\bf{B}}, \hat{f}, \hat{i} \rangle$ be two FIDL-modules and $\left( \alpha, \gamma \right) \colon \mathcal{M} \to \hat{\mathcal{M}}$ a FIDL-homomorphism. Then the following conditions are equivalent: \begin{enumerate} \item $\left( \alpha, \gamma \right)$ is an isomorphism in the category ${\mathsf{FIMod}}$, \item $\alpha$ and $\gamma$ are isomorphisms of bounded distributive lattices. \mathrm{e}nd{enumerate} \mathrm{e}nd{lemma} \begin{proof} $(1) \Rightarrow (2)$ Immediate. $(2) \Rightarrow (1)$ Let us assume that $\alpha$ and $\gamma$ are isomorphism of bounded distributive lattices. We will show that the pair $\left( {\alpha}^{-1}, {\gamma}^{-1} \right) \colon \hat{\mathcal{M}} \to \mathcal{M}$ is a FIDL-homomorphism. Since ${\alpha}^{-1}$ and ${\gamma}^{-1}$ are isomorphisms of bounded distributive lattices, only remains to check the commutativity of the following diagram: \begin{displaymath} \xymatrix{ \hat{A} \times \hat{B} \ar[r]^-{{\hat{f}}} \ar[d]_-{{\alpha}^{-1} \times {\gamma}^{-1}} & \hat{A} \ar[d]^-{{\alpha}^{-1}} \\ A \times B \ar[r]_-{f} & A } \mathrm{e}nd{displaymath} By hypothesis, we have ${\hat{f}} \left( \alpha \times \gamma \right) = \alpha f$. So, ${\alpha}^{-1} {\hat{f}} \left( \alpha \times \gamma \right) = f$ and \begin{equation*} f \left( {\alpha}^{-1} \times {\gamma}^{-1} \right) = {\alpha}^{-1} {\hat{f}} \left( {\alpha} \times {\gamma} \right) \left( {\alpha}^{-1} \times {\gamma}^{-1} \right) = {\alpha}^{-1} {\hat{f}} \left( id_{\hat{A} \times \hat{B}} \right) = {\alpha}^{-1} {\hat{f}}, \mathrm{e}nd{equation*} i.e., $f \left( {\alpha}^{-1} \times {\gamma}^{-1} \right) = {\alpha}^{-1} {\hat{f}}$. Similarly, the commutativity of the following diagram is easily verified: \begin{displaymath} \xymatrix{ \hat{B} \times \hat{A} \ar[r]^-{{\hat{i}}} \ar[d]_-{{\gamma}^{-1} \times {\alpha}^{-1}} & \hat{A} \ar[d]^-{{\alpha}^{-1}} \\ B \times A \ar[r]_-{i} & A } \mathrm{e}nd{displaymath} By definition of the composition in ${\mathsf{FIMod}}$, it follows that $\left( \alpha, \gamma \right)^{-1} = \left( {\alpha}^{-1}, {\gamma}^{-1} \right)$ and $\left( \alpha, \gamma \right)$ is an isomorphism in the category ${\mathsf{FIMod}}$. \mathrm{e}nd{proof} Now, we introduce the class of relational structures needed to develop our representation theorem as well as the notion of morphisms between them. \begin{definition} \label{definition FI-frames} Let $\langle X, \leq_{X} \rangle$ and $\langle Y, \leq_{Y} \rangle$ be two posets. A structure $\langle X, Y, \leq_{X}, \leq_{Y}, R \rangle$ is called a {\rm{F-frame}}, if $R \subseteq X \times Y \times X$ is a relation such that: \begin{equation} \label{condition_R} \text{if} \hspace{0.1cm} (x,y,z) \in R, \bar{x} \leq_{X} x, \bar{y} \leq_{Y} y \hspace{0.1cm} \text{and} \hspace{0.1cm} z \leq_{X} \bar{z}, \hspace{0.1cm} \text{then} \hspace{0.1cm} (\bar{x},\bar{y},\bar{z}) \in R. \mathrm{e}nd{equation} A structure $\langle X, Y, \leq_{X}, \leq_{Y}, T \rangle$ is called an {\rm{I-frame}}, if $T \subseteq Y \times X \times X$ is a relation such that: \begin{equation} \label{condition_T} \text{if} \hspace{0.1cm} (y,x,z) \in T, \bar{y} \leq_{Y} y, \bar{x} \leq_{X} x \hspace{0.1cm} \text{and} \hspace{0.1cm} z \leq_{X} \bar{z}, \hspace{0.1cm} \text{then} \hspace{0.1cm} (\bar{y},\bar{x},\bar{z}) \in T. \mathrm{e}nd{equation} Moreover, a structure $\mathcal{F}=\langle X, Y, \leq_{X}, \leq_{Y}, R, T \rangle$ is called a {\rm{FI-frame}}, if $\langle X, Y, \leq_{X}, \leq_{Y}, R \rangle$ is a F-frame and $\langle X, Y, \leq_{X}, \leq_{Y}, T \rangle$ is an I-frame. \mathrm{e}nd{definition} \begin{definition} \label{Definition FI-morphism} Let $\mathcal{F}$ and $\hat{\mathcal{F}}$ be two FI-frames. We shall say that a pair $\left( g, h \right) \colon \mathcal{F} \to \hat{\mathcal{F}}$ is a {\rm{FI-morphism}}, if $g \colon X \to \hat{X}$ and $h \colon Y \to \hat{Y}$ are morphisms between posets and the following conditions hold: \begin{itemize} \item[(M1)] If $(x,y,z) \in R$, then $(g(x), h(y), g(z)) \in \hat{R}$. \item[(M2)] If $(\bar{x}, \bar{y}, g(z)) \in \hat{R}$, then there exist $x \in X$ and $y \in Y$ such that $(x,y,z) \in R$, $\bar{x} \leq_{\hat{X}} g(x)$ and $\bar{y} \leq_{\hat{Y}} h(y)$. \item[(N1)] If $(x,y,z) \in T$, then $(h(x), g(y), g(z)) \in \hat{T}$. \item[(N2)] If $(\bar{x}, g(y), \bar{z}) \in \hat{T}$, then there exist $x \in Y$ and $z \in X$ such that $(x,y,z) \in T$, $\bar{x} \leq_{\hat{Y}} h(x)$ and $g(z) \leq_{\hat{X}} \bar{z}$. \mathrm{e}nd{itemize} \mathrm{e}nd{definition} The composition of FI-morphisms is defined component-wise. It is clear from Definition \ref{Definition FI-morphism}, that such a composition is closed and associative and for every FI-Frame $\mathcal{F}$, the identity arrow is given by the pair $(id_{X},id_{Y})$. We write $\mathsf{FIFram}$ for the category of FI-frames and FI-morphisms. The following result is similar to Lemma \ref{Isos FDL} and will be useful at the moment of proving the main theorem of this section. \begin{lemma} \label{Isos FIF} Let $\mathcal{F}$ and $\hat{\mathcal{F}}$ be two FI-frames and $\left( g, h \right) \colon \mathcal{F} \to \hat{\mathcal{F}}$ a FI-morphism. Then the following conditions are equivalent: \begin{enumerate} \item $\left( g, h \right)$ is an isomorphism in the category ${\mathsf{FIFram}}$, \item $g$ and $h$ are isomorphisms of posets. \mathrm{e}nd{enumerate} \mathrm{e}nd{lemma} \begin{proof} $(1) \Rightarrow (2)$ Immediate. $(2) \Rightarrow (1)$ Since $g$ and $h$ are isomorphisms of posets, then there exist $g^{-1}$ and $h^{-1}$. It is clear that $(g,h)^{-1} = (g^{-1}, h^{-1})$. We need to check that $(g^{-1}, h^{-1}) \colon \hat{\mathcal{F}} \to \mathcal{F}$ is a FI-morphism. We prove (M1). Let $(\bar{x}, \bar{y}, \bar{z}) \in \hat{X} \times \hat{Y} \times \hat{X}$ such that $(\bar{x}, \bar{y}, \bar{z}) \in \hat{R}$ and suppose that $(g^{-1}(\bar{x}), h^{-1}(\bar{y}), g^{-1}(\bar{z})) \notin R$. Due to $(\bar{x}, \bar{y}, g(g^{-1}(\bar{z}))) \in \hat{R}$ and $(g,h)$ is a FI-morphism, there exist $x \in X$ and $y \in Y$ such that $(x,y,g^{-1}(\bar{z})) \in R$, $\bar{x} \leq_{\hat{X}} g(x)$ and $\bar{y} \leq_{\hat{Y}} h(y)$. On the other hand, since $g^{-1}$ and $h^{-1}$ are monotone, we have $g^{-1}(\bar{x}) \leq_{X} x$, $h^{-1}(\bar{y}) \leq_{Y} y$ and $g^{-1}(\bar{z}) \leq_{X} g^{-1}(\bar{z})$. It follows, by (\ref{condition_R}), that $(g^{-1}(\bar{x}), h^{-1}(\bar{y}), g^{-1}(\bar{z})) \in R$ which is a contradiction. We prove (M2). Let $(x,y,g^{-1}(\bar{z})) \in R$. As $x=g^{-1}(g(x))$ and $y=h^{-1}(h(y))$, then $(g^{-1}(g(x)), h^{-1}(h(y)), g^{-1}(\bar{z})) \in R$. Since $(g,h)$ is a FI-morphism, $(g(x), h(y), \bar{z}) \in \hat{R}$. Conditions (N1) and (N2) can be verified analogously. \mathrm{e}nd{proof} It is the moment to show how we build our representation. Let $\mathcal{F}=\langle X, Y, \leq_{X}, \leq_{Y}, R, T \rangle$ be a FI-frame. Then it follows that $\langle \mathcal{P}_{i}(X), \cup, \cap, \mathrm{e}mptyset, X \rangle$ and $\langle \mathcal{P}_{i}(Y), \cup, \cap, \mathrm{e}mptyset, Y \rangle$ are bounded distributive lattices. Let $U \in \mathcal{P}_{i}(X)$ and $V \in \mathcal{P}_{i}(Y)$, and let us to consider the following subsets of $X$: \begin{equation} \label{f_in P} f_{\mathcal{F}}(U,V)=\{ z \in X \colon \mathrm{e}xists (x,y) \in U \times V {\hspace{0.1cm}} {\text{such that}} {\hspace{0.1cm}} (x,y,z) \in R \} \mathrm{e}nd{equation} and \begin{equation} \label{i_in P} i_{\mathcal{F}}(V,U)=\{ y \in X \colon \forall x \in Y, \forall z \in X {\hspace{0.05cm}} (((x,y,z) \in T {\hspace{0.1cm}} {\text{and}} {\hspace{0.1cm}} x \in V) {\hspace{0.1cm}} {\text{implies}} {\hspace{0.1cm}} z \in U) \}. \mathrm{e}nd{equation} It is easy to prove that $f_{\mathcal{F}}(U,V), i_{\mathcal{F}}(V,U) \in \mathcal{P}_{i}(X)$. The proof of the following two results are routine so the details are left to the reader. \begin{lemma} \label{representation} Let $\mathcal{F}$ be a FI-frame. Then the structure \begin{equation*} \mathcal{M}_{\mathcal{F}}=\langle \mathcal{P}_{i}(X), \mathcal{P}_{i}(Y), f_{\mathcal{F}}, i_{\mathcal{F}} \rangle \mathrm{e}nd{equation*} is a FIDL-module, where $f_{\mathcal{F}}$ and $i_{\mathcal{F}}$ are given by \ref{f_in P} and \ref{i_in P}, respectively. \mathrm{e}nd{lemma} \begin{lemma} \label{Representation of a FI-frame} Let $ \mathcal{M}$ be a FIDL-module. Then the structure \begin{equation*} \mathcal{F}_{\mathcal{M}}=\langle {\mathcal{X}}({\bf{A}}), {\mathcal{X}}({\bf{B}}), \subseteq_{\bf{A}}, \subseteq_{\bf{B}}, R_{\mathcal{M}}, T_{\mathcal{M}} \rangle \mathrm{e}nd{equation*} is a FI-frame, where $R_{\mathcal{M}}$ and $T_{\mathcal{M}}$ are given by \ref{relation_R_{A}} and \ref{relation_T_{A}}, respectively. \mathrm{e}nd{lemma} \begin{lemma} \label{Representation of FIDL morphisms} Let $\mathcal{M}$ and $\hat{\mathcal{M}}$ be two FIDL-modules. If $( \alpha, \gamma) \colon \mathcal{M}\to \hat{\mathcal{M}}$ is a FIDL-homomorphism, then $(\alpha^{*}, \gamma^{*}) \colon \mathcal{F}_{\hat{\mathcal{M}}} \to \mathcal{F}_{\mathcal{M}}$ is a FI-morphism. \mathrm{e}nd{lemma} \begin{proof} We start by proving (M1). Let $(\hat{Q},\hat{R},\hat{P})\in R_{\hat{\mathcal{M}}}$. In order to prove $(\alpha^{\ast}(\hat{Q}),\gamma^{\ast}(\hat{R}),\alpha^{\ast}(\hat{P}))\in R_{\mathcal{M}}$, let $y\in f(\alpha^{\ast}(\hat{Q}),\gamma^{\ast}(\hat{R}))$. Then there exist $(a,b)\in \alpha^{\ast}(\hat{Q})\times \gamma^{\ast}(\hat{R})$ such that $f(a,b)\leq_{A} y$. Since $\alpha$ is monotone and $(\alpha,\gamma)$ is a FIDL-homomorphism, we have $\alpha(f(a,b))=\hat{f}(\alpha(a),\gamma(b))\leq_{\hat{A}} \alpha(a)$. Since $\alpha(a)\in P$ and $\gamma(b)\in R$, then $\alpha(y)\in \hat{f}(\hat{Q},\hat{R})$. So, our assumption allows us to conclude that $\alpha(y)\in \hat{P}$. Hence $f(\alpha^{\ast}(\hat{Q}),\gamma^{\ast}(\hat{R}))\subseteq_{\mathcal{X}(\bf{A})} \alpha^{\ast}(\hat{P})$. The proof of (N1) is similar. Now we prove (M2). Let us assume that $(Q,R,\alpha^{\ast}(\hat{P}))\in R_{\mathcal{M}}$. Note that, such an assumption allows us to say that $f(a,b)\in \alpha^{\ast}(\hat{P})$ for every $(a,b)\in Q\times R$. Then, since $(\alpha,\gamma)$ is a FIDL-homomorphism, it follows that $\hat{f}(\alpha(a),\gamma(b))\in P$. From $(1)$ of Lemma \ref{lem_1}, there exist $\hat{Q}\in \mathcal{X}(\hat{\mathbf{A}})$ and $\hat{R}\in \mathcal{X}(\hat{\mathbf{B}})$ such that $\alpha(a)\in Q$, $\gamma(b)\in R$ such that $(\hat{Q},\hat{R},\hat{P})\in R_{\hat{\mathcal{M}}}$. Hence (M2) holds. Finally, for proving (N2), let us assume that $(R,\alpha^{\ast}(\hat{P}),Q)\in T_{\mathcal{M}}$. Consider $F={\rm{Fig}}_{\hat{\bf{B}}}(\gamma(R))$ and $I={\rm{Idg}}_{\hat{\bf{A}}}(\alpha(Q^{c}))$. We see that $\hat{i}(F, \hat{P})\cap I=\mathrm{e}mptyset$. Assume the contrary, then there exist $y\in \hat{A}$, $a\in \hat{P}$, $b\in \hat{B}$, $r\in R$ and $q\notin Q$ such that $a\leq_{\hat{A}} \hat{i}(b,y)$, $\gamma(r)\leq_{\hat{B}} b$ and $y\leq_{\hat{A}} \alpha(q)$. Since $(\alpha,\gamma)$ is a FIDL-homomorphism, by Proposition \ref{propo_1}, we obtain that $a\leq_{\hat{A}} \hat{i}(\gamma(r), \alpha(q))=\alpha(i(r,q))$. Hence $i(r,q)\in \alpha^{\ast}(\hat{P})$ so $q\in i(R,\alpha^{\ast}(\hat{P}))$ and therefore $q\in Q$, which is a contradiction. Then $\hat{i}(F, \hat{P})\cap I=\mathrm{e}mptyset$ and by the Prime Filter Theorem, there exist $\hat{Q}\in \mathcal{X}(\hat{\mathbf{A}})$ such that $\hat{i}(F,\hat{P})\subseteq_{\mathcal{X}(\hat{\mathbf{A}})} \hat{Q}$ and $\hat{Q}\cap I=\mathrm{e}mptyset$. On the other hand, from Theorem \ref{theo_1}, there exists $\hat{R}\in \mathcal{X}(\hat{\mathbf{B}})$ such that $\hat{i}(\hat{R},\hat{P})\subseteq_{\mathcal{X}(\hat{\mathbf{A}})} \hat{Q}$ and $R\subseteq_{\mathcal{X}(\mathbf{B})} \gamma^{\ast}(\hat{R})$. As $\alpha(Q^{c})\subseteq_{\mathcal{X}(\hat{\mathbf{A}})} I$ we get that $\hat{Q}\cap \alpha(Q^{c})=\mathrm{e}mptyset$. It is not hard to see that the latter is equivalent to say that $\alpha^{\ast}(\hat{Q})\subseteq_{\mathcal{X}(\mathbf{A})} Q$. \mathrm{e}nd{proof} \begin{lemma}\label{Representation of FI-frame morphisms} Let $\mathcal{F}$ and $\hat{\mathcal{F}}$ be two FI-frames. If $(g, h) \colon \mathcal{F} \to \hat{\mathcal{F}}$ is a FI-morphism, then $(g^{*}, h^{*}) \colon \mathcal{M}_{\hat{\mathcal{F}}} \\ \to \mathcal{M}_{\mathcal{F}}$ is a FIDL-homomorphism. \mathrm{e}nd{lemma} \begin{proof} Let $\mathcal{M}_{\mathcal{F}}$ and $\mathcal{M}_{\mathcal{\hat{F}}}$ be the FIDL-modules that arise from Lemma \ref{representation}. In order to simplify notation, in this proof we write $f$ and $i$ instead of $f_{\mathcal{F}}$. Similarly, we write $\hat{f}$ and $\hat{i}$ instead of $f_{\mathcal{\hat{F}}}$ and $i_{\mathcal{\hat{F}}}$. This is with the aim of setting our proof within the context of Remark \ref{Remark_f and i}. It is clear that $g^{\ast}:\mathcal{P}_{i}(\hat{X})\rightarrow \mathcal{P}_{i}(X)$ and $h^{\ast}:\mathcal{P}_{i}(\hat{Y})\rightarrow \mathcal{P}_{i}(Y)$ are homomorphisms of bounded distributive lattices so, in order to prove our claim we proceed to check that for every $U\in \mathcal{P}_{i}(\hat{Y})$, the following diagrams \begin{displaymath} \begin{array}{ccc} \xymatrix{ \mathcal{P}_{i}(\hat{X}) \ar[d]_-{g^{\ast}} \ar[r]^-{\hat{f}_{U}} & \mathcal{P}_{i}(\hat{X}) \ar[d]^-{g^{\ast}} \\ \mathcal{P}_{i}(X) \ar[r]_-{f_{h^{\ast}(U)}} & \mathcal{P}_{i}(X) } & & \xymatrix{ \mathcal{P}_{i}(\hat{X}) \ar[d]_-{g^{\ast}} \ar[r]^-{\hat{i}_{U}} & \mathcal{P}_{i}(\hat{X}) \ar[d]^-{g^{\ast}} \\ \mathcal{P}_{i}(X) \ar[r]_-{i_{h^{\ast}(U)}} & \mathcal{P}_{i}(X) } \mathrm{e}nd{array} \mathrm{e}nd{displaymath} commute. For the diagram of the left, let $V\in \mathcal{P}_{i}(\hat{X})$ and $z\in g^{\ast}(\hat{f}_{U}(V))$. So, there exist $x'\in U$ and $y'\in V$ such that $(x',y',g(z))\in \hat{R}$. Since $(g,h)$ is a FI-morphism then from (M2) of Definition \ref{Definition FI-morphism}, there exist $x\in Y$ and $y\in X$ such that $x'\leq_{\hat{Y}} h(x)$, $y'\leq_{\hat{X}} g(y)$ and $(x,y,z)\in R$. Hence $x\in h^{\ast}(U)$, $y\in g^{\ast}(V)$ and $(x,y,z)\in R$. That is to say, $z\in f_{h^{\ast}(U)}(g^{\ast}(V))$. The other inclusion is straightforward. For the diagram of the right, let $V\in \mathcal{P}_{i}(\hat{X})$ and suppose that $y\in i_{h^{\ast}(U)}(g^{\ast}(V))$. So, for every $x\in Y$ and $z\in X$, if $(x,y,z)\in T$ and $h(x)\in U$, then $g(z)\in V$. We recall that for showing $y\in g^{\ast}(\hat{i}_{U}(V))$ we need to prove that for every $x'\in \hat{Y}$ and $z'\in \hat{X}$ such that $(x',g(y),z')\in \hat{T}$ and $x'\in U$, then $z'\in V$. Indeed, since $(g,h)$ is a FI-morphism, from (N2) of Definition \ref{Definition FI-morphism}, there exist $x\in Y$ and $z\in X$ such that $(x,y,z)\in T$, $x'\leq_{\hat{Y}} h(x)$ and $g(z)\leq_{\hat{X}} z'$. As $U\in \mathcal{P}_{i}(Y)$, it follows that $h(x)\in U$ and since $(x,y,z)\in T$ from assumption, then we obtain $g(z)\in V$ and $z' \in V$. The remaining inclusion is easy. \mathrm{e}nd{proof} Observe that Lemmas \ref{representation} and \ref{Representation of FI-frame morphisms} allow to define a functor $\mathbb{G} \colon \mathsf{FIFram} \to \mathsf{FIMod}^{op}$ as follows: \begin{displaymath} \begin{array}{rcl} \mathcal{F} & \mapsto & \mathcal{M}_{\mathcal{F}} \\ (g,h) & \mapsto & (g^{\ast},h^{\ast}). \mathrm{e}nd{array} \mathrm{e}nd{displaymath} On the other hand, from Lemmas \ref{Representation of a FI-frame} and \ref{Representation of FIDL morphisms}, we can define a functor $\mathbb{F} \colon \mathsf{FIMod}^{op} \to \mathsf{FIFram}$ as follows: \begin{displaymath} \begin{array}{rcl} \mathcal{M} & \mapsto & \mathcal{F}_{\mathcal{M}} \\ (\alpha,\gamma) & \mapsto & (\alpha^{\ast},\gamma^{\ast}). \mathrm{e}nd{array} \mathrm{e}nd{displaymath} We conclude this section by proving our representation theorem for FIDL-modules. \begin{theorem} \label{Functor 2 DLFI} $\mathbb{G}$ is a left adjoint of $\mathbb{F}$ and the counit is an isomorphism. \mathrm{e}nd{theorem} \begin{proof} We start by showing that $\mathbb{G}$ is a left adjoint of $\mathbb{F}$. Let $\mathcal{F}=\langle X,Y,\leq_{X},\leq_{Y}, R,T\rangle$ be a FI-frame, $\mathcal{M}=\langle {\bf{A}}, {\bf{B}}, f, i \rangle$ a FIDL-module and $(g,h) \colon \mathcal{F} \to \mathcal{F}_{\mathcal{M}}$ a FI-morphism. Then $g \colon X \to \mathcal{X}(\textbf{A})$ and $h \colon Y \to \mathcal{X}(\textbf{B})$ are maps of posets satisfying the conditions of Definition \ref{Definition FI-morphism}. From Stone's representation theorem, there exist a unique pair of lattice homomorphisms $\overline{g} \colon A \to \mathcal{P}_{i}(X)$ and $\overline{h} \colon B \to \mathcal{P}_{i}(Y)$ defined by $\overline{g}(a) = \{y \in X \colon a \in g(y)\}$ and $\overline{h}(b) = \{x \in Y \colon b \in h(x)\}$. In order to prove that $(\overline{g}, \overline{h}) \colon \mathcal{M} \to \mathcal{M}_{\mathcal{F}}$ is a FIDL-homomorphism, we need to show the commutativity of the following diagrams \begin{displaymath} \begin{array}{ccc} \xymatrix{ A \ar[r]^-{f_{b}} \ar[d]_-{\overline{g}} & A \ar[d]^-{\overline{g}} \\ \mathcal{P}_{i}(X) \ar[r]_-{f_{{\mathcal{F}}_{\overline{h}(b)}}} & \mathcal{P}_{i}(X) } & & \xymatrix{ A \ar[r]^-{i_{b}} \ar[d]_-{\overline{g}} & A \ar[d]^-{\overline{g}} \\ \mathcal{P}_{i}(X) \ar[r]_-{i_{{\mathcal{F}}_{\overline{h}(b)}}} & \mathcal{P}_{i}(X) } \mathrm{e}nd{array} \mathrm{e}nd{displaymath} for every $b \in B$. We prove $\overline{g}(f_{b}(a)) = f_{\mathcal{F}_{\overline{h}(b)}}(\overline{g}(a))$, for every $a \in A$. So to check that $\overline{g}(f_{b}(a)) \subseteq f_{\mathcal{F}_{\overline{h}(b)}}(\overline{g}(a))$, let $z\in \overline{g}(f_{b}(a))$. Thus, $f_{b}(a)\in g(z)$. By Lemma \ref{lem_1}, there exist $Q \in \mathcal{X}(\textbf{B})$ and $E \in \mathcal{X}(\textbf{A})$ such that $b \in Q$, $a \in E$ and $(Q,E, g(z)) \in R_{\mathcal{M}}$. From condition (M2), there exist $x\in Y$ and $y\in X$ such that $Q\subseteq_{\mathcal{X}(\textbf{B})} h(x)$, $E \subseteq_{\mathcal{X}(\textbf{A})} g(x)$ and $(x,y,z) \in R$. Hence, $x\in \overline{h}(b)$ and $y \in \overline{g}(a)$. Therefore $z \in f_{\mathcal{F}_{\overline{h}(b)}}(\overline{g}(a))$. Conversely, if $z \in f_{\mathcal{F}_{\overline{h}(b)}}(\overline{g}(a))$, then there exist $x \in \overline{h}(b)$ and $y \in \overline{g}(a)$ such that $(x,y,z)\in R$. So, $(h(x),g(y),g(z)) \in R_{\mathcal{M}}$ from (M1) and consequently, $f(h(x),g(y))\subseteq g(z)$. By Lemma \ref{lem_1}, $f_{b}(a) \in g(z)$, or equivalently, $z \in \overline{g}(f_{b}(a))$. Now we prove that $\overline{g}(i_{b}(a)) = i_{\mathcal{F}_{\overline{h}(b)}}(\overline{g}(a))$, for every $a \in A$. Let $z \in \overline{g}(i_{b}(a))$. If $(x,y,z)\in T$ and $a \in h(x)$, then from condition (N1) we have $i(h(x),g(y)) \subseteq_{\mathcal{X}(\textbf{A})} g(z)$. Since $i_{b}(a) \in g(z)$, from Lemma \ref{lem_1}, we can conclude that $a \in g(z)$ and $z \in i_{\mathcal{F}_{\overline{h}(b)}}(\overline{g}(a))$. On the other hand, assume that $z \in i_{\mathcal{F}_{\overline{h}(b)}}(\overline{g}(a))$. In order to prove that $z\in \overline{g}(i_{b}(a))$, let $R\in \mathcal{X}(\textbf{B})$ and $Q \in \mathcal{X}(\textbf{A})$ such that $(R,g(y),Q)\in T_{\mathcal{M}}$ and $b \in R$. So, $i(R,g(y))\subseteq_{\mathcal{X}(\textbf{A})} Q$. From condition (N2), then there exist $x \in Y$ and $z\in X$ such that $(x,y,z)\in T$, $R\subseteq_{\mathcal{X}(\textbf{B})}h(x)$ and $g(z)\subseteq_{\mathcal{X}(\textbf{B})}Q$. So, from assumption we have $a \in g(z)$. Hence, by Lemma \ref{lem_1}, $z \in \overline{g}(i_{b}(a))$. For the last part, note that for each frame $\mathcal{F}=\langle X,Y,\leq_{X},\leq_{Y}, R,T\rangle$, the counit of the adjunction $\mathbb{G} \dashv \mathbb{F}$ is determined by the pair of monotone maps $\mathrm{e}psilon_{X} \colon X \to X(\mathcal{P}_{i}(X))$ and $\mathrm{e}psilon_{Y} \colon Y \to X(\mathcal{P}_{i}(Y))$, which are defined by $\mathrm{e}psilon_{X}(y) = \{V \in \mathcal{P}_{i}(X) \colon y\in V\}$ and $\mathrm{e}psilon_{Y}(x) = \{U \in \mathcal{P}_{i}(Y) \colon x\in U\}$. It is clear from Stone's representation theorem, that the maps $\mathrm{e}psilon_{X}$ and $\mathrm{e}psilon_{Y}$ are isomorphisms of posets. So, from Lemma \ref{Isos FIF}, the result follows. \mathrm{e}nd{proof} \section{Topological duality} \label{Topological duality for FIDL-modules} In this section we prove a duality for FIDL-modules by using some of the results of Section \ref{Representation of FIDL-modules} together with a suitable extension of Priestley duality for distributive lattices. The dual objects are certain topological bi-spaces endowed with two relations satisfying some particular properties. \begin{definition}\label{Associated Spaces} A structure $\mathcal{U}=\langle X,Y,\leq_{X}, \leq_{Y}, \tau_{X}, \tau_{Y}, R, T\rangle$ is called an {\rm{Urquhart space}}, if the following conditions hold: \begin{enumerate} \item $\langle X, \leq_{X}, \tau_{X} \rangle$ and $\langle Y, \leq_{Y}, \tau_{Y} \rangle$ are Priestley spaces, \item $R\subseteq X\times Y\times X$ and $T\subseteq Y\times X \times X$, \item For every $U\in \mathcal{C}(Y)$ and every $V \in \mathcal{C}(X)$, we have $f(V,U), i(U,V)\in \mathcal{C}(X)$, \item For every $x \in Y$ and every $y,z\in X$, if $f(\mathrm{e}psilon_{X}(y) ,\mathrm{e}psilon_{Y}(x))\subseteq \mathrm{e}psilon_{X}(z)$, then $(y, x, z)\in R$, \item For every $x\in Y$ and every $y,z\in X$, if $i(\mathrm{e}psilon_{Y}(x) ,\mathrm{e}psilon_{X}(y))\subseteq \mathrm{e}psilon_{X}(z)$, then $(x,y,z)\in T$. \mathrm{e}nd{enumerate} \mathrm{e}nd{definition} \begin{lemma}\label{Spaces are Frames} If $\mathcal{U}$ is an Urquhart spaces, then it is a FI-frame. \mathrm{e}nd{lemma} \begin{proof} Let $\mathcal{U}=\langle X,Y,\leq_{X}, \leq_{Y}, R,T\rangle$ be an Urquhart space. We need to check that $R$ and $T$ satisfy conditions (\ref{condition_R}) and (\ref{condition_T}) of Definition \ref{definition FI-frames}, respectively. Since both proofs are similar, we only prove that $R$ satisfies (\ref{condition_R}). Suppose $(y,x,z)\in R$, $y'\leq_{X} y$, $x' \leq_{Y} x$ and $z \leq_{X} z'$. Because of $\mathrm{e}psilon_{X}$ and $\mathrm{e}psilon_{Y}$ are monotonous, then $\mathrm{e}psilon_{X}(y')\subseteq \mathrm{e}psilon_{X}(y)$ and $\mathrm{e}psilon_{Y}(x')\subseteq \mathrm{e}psilon_{Y}(x)$. So, from Proposition \ref{propo_1}, we obtain $f(\mathrm{e}psilon_{X}(y'), \mathrm{e}psilon_{Y}(x'))\subseteq f(\mathrm{e}psilon_{X}(y) ,\mathrm{e}psilon_{Y}(x))$. If $P\in f(\mathrm{e}psilon_{X}(y') ,\mathrm{e}psilon_{Y}(x'))$, then there exist $S\in \mathcal{C}(X)$ and $Q\in \mathcal{C}(Y)$ such that $y\in S$, $x\in Q$ and $f(S,Q)\subseteq P$. As $\mathcal{U}$ is an Urquhart space then $f(S,Q)\in \mathcal{C}(X)$, and due to $(y,x,z)\in R$ from hypothesis, we obtain $z\in f(S,Q)$. Thus, $z\in P$. Because $P\in \mathcal{C}(X)$ and $z\leq_{X}z'$, we conclude that $f(\mathrm{e}psilon_{Y}(x'),\mathrm{e}psilon_{X}(y'))\subseteq \mathrm{e}psilon_{X}(z')$. Therefore, from $(4)$ of Definition \ref{Associated Spaces}, we get $(y',x',z')\in R$. \mathrm{e}nd{proof} \begin{definition} Let $\mathcal{U}=\langle X,Y,\leq_{X}, \leq_{Y}, \tau_{X}, \tau_{Y}, R, T\rangle$ and $\hat{\mathcal{U}}=\langle \hat{X},\hat{Y},\leq_{\hat{X}}, \leq_{\hat{Y}}, \tau_{\hat{X}}, \tau_{\hat{X}} \hat{R}, \hat{T}\rangle$ be Urquhart spaces. We shall say that a pair $(g,h) \colon \mathcal{U} \to \hat{\mathcal{U}}$ is an {\rm{U-map}}, if $g \colon X \to \hat{X}$ and $h \colon Y \to \hat{Y}$ are monotonous and continuous maps, and satisfy the conditions (M1), (M2), (N1) and (N2) of Definition \ref{Definition FI-morphism}. \mathrm{e}nd{definition} We denote by $\mathsf{USp}$ the category of Urquhart spaces and U-maps. Let $\mathcal{M}$ be a FIDL-module and $\mathbb{F} \colon \mathsf{FIMod}^{op} \to \mathsf{FIFram}$ the functor of Theorem \ref{Functor 2 DLFI}. Notice that $\mathbb{F}(\mathcal{M})=\mathcal{F}_{\mathcal{M}}$ is an Urquhart space. The latter assertion lies in the following facts which are immediate from Priestley duality: (1) $\langle\mathcal{X}(\textbf{A}),\subseteq_{\bf{A}}, \tau_{\textbf{A}}\rangle$ and $\langle\mathcal{X}(\textbf{B}), \subseteq_{\bf{B}}, \tau_{\textbf{B}}\rangle$ are Priestley spaces; (2) Since $\mathcal{C}(\mathcal{X}(\textbf{A}))=\{\beta_{\bf{A}}(a) \colon a\in A \}$ and $\mathcal{C}(\mathcal{X}(\textbf{B}))=\{\beta_{\bf{B}}(b) \colon b\in B\}$ then, for every $U\in \mathcal{C}(\mathcal{X}(\textbf{B}))$ and every $V \in \mathcal{C}(\mathcal{X}(\textbf{A}))$, there exist $a\in A$ and $b\in B$ such that $f(V,U)=f(\beta_{\bf{A}}(a), \beta_{\bf{B}}(b))=\beta_{\bf{A}}(f(a,b))$ and $i(U,V)=i(\beta_{\bf{B}}(b),\beta_{\bf{A}}(a))=\beta_{\bf{A}}(i(b,a))$. Hence $f(V,U), i(U,V)\in \mathcal{C}(\mathcal{X}(\textbf{A}))$; (3) Since $\mathrm{e}psilon_{\mathcal{X}(\textbf{A})}(P)=P$ and $\mathrm{e}psilon_{\mathcal{X}(\textbf{B})}(Q)=Q$, for every $P\in \mathcal{X}(\textbf{A})$ and every $Q\in \mathcal{X}(\textbf{B})$, it follows that conditions $(4)$ and $(5)$ of the Definition \ref{Associated Spaces} hold. In addition, if $(\alpha,\gamma) \colon \mathcal{M} \to \hat{\mathcal{M}}$ is a FIDL-homomorphism between two FIDL-modules $\mathcal{M}$ and $\hat{\mathcal{M}}$, it is also clear from Priestley duality that $\mathbb{F}(\alpha, \gamma) = (\alpha^{\ast}, \lambda^{\ast})$ is an U-map between Urquhart spaces. On the other hand, if $\mathcal{U}$ is an Urquhart space, then from Priestley duality the structure \begin{equation*} \mathcal{M}_{\mathcal{U}}= \langle\mathcal{C}(X), \mathcal{C}(Y),f_{\mathcal{U}},i_{\mathcal{U}}\rangle \mathrm{e}nd{equation*} is a FIDL-module. These facts allows us to define an assignment $\mathbb{J} \colon \mathsf{USp} \rightarrow \mathsf{FIMod}^{op}$ as follows: \begin{displaymath} \begin{array}{rcl} \mathcal{U} & \mapsto & \mathcal{M}_{\mathcal{U}} \\ (g,h) & \mapsto & (g^{\ast},h^{\ast}), \mathrm{e}nd{array} \mathrm{e}nd{displaymath} where $f_{\mathcal{U}}$ and $i_{\mathcal{U}}$ are the operations defined in (\ref{f_in P}) and (\ref{i_in P}), respectively. Such an assignment is clearly functorial. Notice that as an straight application of Priestley duality, it follows that $\mathbb{J}$ is the inverse functor of $\mathbb{F}$. Since this is routine, we leave to the reader the details of the proof of the following result. \begin{theorem} \label{Duality for DLFI-modules} The categories $\mathsf{FIMod}$ and $\mathsf{USp}$ are dually equivalent. \mathrm{e}nd{theorem} \section{Congruences of FIDL-modules} \label{Congruences of FIDL-modules} In this section we introduce the concept of congruence in the class of FIDL-modules and we show how through the duality of Section \ref{Topological duality for FIDL-modules} we can provide a characterization of these in terms of certain pairs of closed subsets of the associated Urquhart space. This result will allows us to give a topological bi-spaced characterization of the simple and subdirectly irreducible FIDL-modules. \begin{definition} \label{DLFI Congruences} Let $\mathcal{M}$ be a FIDL-module. Let $\theta_{\bf{A}}$ be a congruence of $\bf{A}$ and $\theta_{\bf{B}}$ a congruence of $\bf{B}$. \begin{itemize} \item[(C1)] A pair $\left( \theta_{\bf{A}}, \theta_{\bf{B}} \right) \subseteq A^{2} \times B^{2}$ is called a {\rm{FDL-congruence of $\langle {\bf{A}}, {\bf{B}}, f \rangle$}}, if for every $(a,c) \in \theta_{\bf{A}}$ and every $(b,d) \in \theta_{\bf{B}}$, we have $(f(a,b), f(c,d)) \in \theta_{\bf{A}}$. \item[(C2)] A pair $\left( \theta_{\bf{A}}, \theta_{\bf{B}} \right) \subseteq A^{2} \times B^{2}$ is called an {\rm{IDL-congruence of $\langle {\bf{A}}, {\bf{B}}, i \rangle$}}, if for every $(a,c) \in \theta_{\bf{A}}$ and every $(b,d) \in \theta_{\bf{B}}$, we have $(i(b,a), i(d,c)) \in \theta_{\bf{A}}$. \mathrm{e}nd{itemize} Moreover, a pair $\left( \theta_{\bf{A}}, \theta_{\bf{B}} \right) \subseteq A^{2} \times B^{2}$ is called a {\rm{FIDL-congruence of $\mathcal{M}$}}, if $\left( \theta_{\bf{A}}, \theta_{\bf{B}} \right)$ is a FDL-congruence and an IDL-congruence. \mathrm{e}nd{definition} If $\mathcal{M}$ is a FIDL-module, then we write $Con_{f}(\mathcal{M})$ for the set of all FDL-congruences, $Con_{i}(\mathcal{M})$ for the set of all IDL-congruences, and $Con(\mathcal{M})$ for the set of all FIDL-congruences of $\mathcal{M}$. It is not hard to see that $Con(\mathcal{M})$ is an algebraic lattice. We now proceed to introduce the topological notions required for our characterization. Let $\mathcal{U}$ be an Urquhart space. We define the following subsets of $X$ and $Y$: \begin{itemize} \item For every $x,z \in X$ and every $y \in Y$, we have \[R^{1}(y,z) = \{x \in X \colon x {\hspace{0.1cm}} \text{is maximal in $X$ and} {\hspace{0.1cm}} (x,y,z) \in R\},\] \[R^{2}(x,z) = \{y \in Y \colon y {\hspace{0.1cm}} \text{is maximal in $Y$ and} {\hspace{0.1cm}} (x,y,z) \in R\},\] \[T^{1}(x,z) = \{y \in Y \colon y {\hspace{0.1cm}} \text{is maximal in $Y$ and} {\hspace{0.1cm}} (y,x,z) \in T\},\] \[T^{3}(y,x) = \{z \in X \colon z {\hspace{0.1cm}} \text{is minimal in $X$ and} {\hspace{0.1cm}} (y,x,z)\in T\}.\] \item For every $x,z \in X$, we have \[Max(R^{-1}(z)) =\{ (x,y) \in X \times Y \colon x \in R^{1}(y,z) {\hspace{0.1cm}} \text{and} {\hspace{0.1cm}} y \in R^{2}(x,z)\},\] \[\mathcal{D}(x) =\{ (y,z) \in Y \times X \colon y \in T^{1}(x,z) {\hspace{0.1cm}} \text{and} {\hspace{0.1cm}} z \in T^{3}(y,x)\}.\] \mathrm{e}nd{itemize} \begin{definition} \label{strongly closed sets} Let $\mathcal{U}$ be an Urquhart space. Let $Z_{1}$ be a closed set of $X$ and $Z_{2}$ a closed set of $Y$. \begin{itemize} \item[(CL1)] A pair $(Z_{1},Z_{2}) \subseteq X \times Y$ is called a {\rm{$R$-closed set of $\mathcal{U}$}}, if for every $z \in Z_{1}$, we have $Max(R^{-1}(z)) \subseteq Z_{1} \times Z_{2}$. \item[(CL2)] A pair $(Z_{1},Z_{2}) \subseteq X \times Y$ is called a {\rm{$T$-closed set of $\mathcal{U}$}}, if for every $x \in Z_{1}$, we have $\mathcal{D}(x) \subseteq Z_{2} \times Z_{1}$. \mathrm{e}nd{itemize} Moreover, a pair $(Z_{1},Z_{2}) \subseteq X \times Y$ is called a {\rm{strongly closed set of $\mathcal{U}$}}, if $(Z_{1},Z_{2})$ is both a $R$-closed set and a $T$-closed set. \mathrm{e}nd{definition} If $\mathcal{U}$ is an Urquhart space, then we write $\mathcal{C}_{f}(\mathcal{U})$ for the set of all $R$-closed sets of $\mathcal{U}$, $\mathcal{C}_{i}(\mathcal{U})$ for the set of all $T$-closed sets of $\mathcal{U}$, and $\mathcal{C}_{s}(\mathcal{U})$ for the set of all strongly closed sets of $\mathcal{U}$. \begin{theorem} \label{Characterization of congruences} Let $\mathcal{M}$ be a FIDL-module and $\mathcal{F}_{\mathcal{M}}$ be the Urquhart space associated of $\mathcal{M}$. We consider the correspondence $(Z_{1},Z_{2}) \to (\theta(Z_{1}),\theta(Z_{2}))$ for every $Z_{1} \in \mathcal{C}( \mathcal{X}(\textbf{A}))$ and every $Z_{2} \in \mathcal{C}( \mathcal{X}(\textbf{B}))$, where $\theta(-)$ is given by (\ref{congruence-closed}). Then: \begin{enumerate} \item There exists an anti-isomorphism between $\mathcal{C}_{f}(\mathcal{F}_{\mathcal{M}})$ and $Con_{f}(\mathcal{M})$. \item There exists an anti-isomorphism between $\mathcal{C}_{i}(\mathcal{F}_{\mathcal{M}})$ and $Con_{i}(\mathcal{M})$. \item There exists an anti-isomorphism between $\mathcal{C}_{s}(\mathcal{F}_{\mathcal{M}})$ and $Con(\mathcal{M})$. \mathrm{e}nd{enumerate} \mathrm{e}nd{theorem} \begin{proof} Since $(3)$ is clearly a straight consequence of $(1)$ and $(2)$, we only prove such items. $(1)$ Let us assume that $(Z_{1},Z_{2})$ is a $R_{\mathcal{M}}$-closed set of $\mathcal{F}_{\mathcal{M}}$. We prove that $(\theta(Z_{1}),\theta(Z_{2}))$ is a FDL-congruence. Let $(x,y) \in \theta(Z_{1})$ and $(b,c) \in \theta(Z_{2})$. If $P \in \beta_{\bf{A}}(f(x,b)) \cap Z_{1}$, then $f(x,b) \in P$. By Lemma \ref{lem_1}, there exist $Q \in \mathcal{X}(\textbf{A})$ and $R \in \mathcal{X}(\textbf{B})$ such that $f(Q,R) \subseteq P$, $x \in Q$ and $b \in R$. Using Zorn's Lemma, it is easy to prove that there are $Q' \in \mathcal{X}(\textbf{A})$ and $R' \in \mathcal{X}(\textbf{B})$ maximals such that $(Q',R') \in Max(R_{\mathcal{M}}^{-1}(P))$. Since $(Z_{1},Z_{2})$ is a $R_{\mathcal{M}}$-closed set of $\mathcal{F}_{\mathcal{M}}$ by assumption, then it follows that $Q' \in Z_{1}$ and $R' \in Z_{2}$. Thus, $f(Q',R') \subseteq P$, $y \in Q'$ and $c \in R'$. Then, by Lemma \ref{lem_1}, we have $f(y,c) \in P$. So, $P \in \beta_{\bf{A}}(f(y,c)) \cap Z_{1}$. The other inclusion is similar. Therefore $(f(x,b), f(y,c)) \in \theta(Z_{1})$. For the converse, let $(\theta(Z_{1}),\theta(Z_{2}))$ be a FDL-congruence and suppose that the pair $(Z_{1},Z_{2})$ is not a $R_{\mathcal{M}}$-closed set of $\mathcal{F}_{\mathcal{M}}$. Then, there exist $P \in Z_{1}$ and $(Q,R) \in \mathcal{X}(\textbf{A}) \times \mathcal{X}(\textbf{B})$ such that $(Q,R) \in Max(R_{\mathcal{M}}^{-1}(P))$ and $(Q,R) \notin Z_{1} \times Z_{2}$. Suppose that $Q \notin Z_{1}$. Since $Z_{1}$ is a closed set of $\mathcal{X}(\textbf{A})$, then there exist $a,b \in A$ such that $a \in Q$, $b \notin Q$ and $(a \wedge b,a) \in \theta (Z_{1})$. Let us consider the filter ${\rm{Fig}}_{\textbf{A}}( Q \cup \{b\})$. As $Q \in R^{1}_{\mathcal{M}}(R,P)$, then $Q$ is maximal and $f({\rm{Fig}}_{\textbf{A}}(Q \cup \{b\}), R) \nsubseteq P$. So, there exist $q \in Q$ and $r \in R$ such that $f(q \wedge b, r) \notin P$. Since $(\theta(Z_{1}),\theta(Z_{2}))$ is a FDL-congruence, it follows that $(f(a \wedge b \wedge q, r), f(a \wedge q, r)) \in \theta(Z_{1})$. Now, since $a \wedge q \in Q$, then $f(a \wedge q, r) \in f(Q,R) \subseteq P$. Hence $f(a \wedge b \wedge q, r) \in P$. Notice that $f(a \wedge b \wedge q, r) \leq f(b \wedge q, r )$, therefore $f(b \wedge q, r) \in P$, which is a contradiction. Then $Q \in Z_{1}$. The proof of $R \in Z_{2}$ is similar. So, $(Z_{1},Z_{2})$ is a $R_{\mathcal{M}}$-closed. $(2)$ Assume that $(Z_{1},Z_{2})$ is a $T_{\mathcal{M}}$-closed set of $\mathcal{F}_{\mathcal{M}}$. Let $(x,y) \in \theta(Z_{1})$ and $(b,c) \in \theta(Z_{2})$. We will see that $(\theta(Z_{1}), \theta(Z_{2}))$ is an IDL-congruence of ${\bf{A}}$. Let $P \in \mathcal{X}(\textbf{A})$. Suppose that $P \in \beta_{\bf{A}}(i(b,x)) \cap Z_{1}$ and $P \notin \beta_{\bf{A}}(i(c,y)) \cap Z_{1}$, i.e., $i(b,x) \in P$ and $i(c,y) \notin P$. By Lemma \ref{lem_1}, there exist $R \in \mathcal{X}(\textbf{B})$ and $Q \in \mathcal{X}(\textbf{A})$ such that $i(R,P) \subseteq Q$, $c \in R$ and $y \notin Q$. Note that from Zorn's Lemma it is not hard to see that there are $R' \in \mathcal{X}(\textbf{B})$ and $Q' \in \mathcal{X}(\textbf{A})$ such that $(R',Q') \in \mathcal{D}(P)$. Since $(Z_{1},Z_{2})$ is a $T_{\mathcal{M}}$-closed set, then $R' \in Z_{2}$ and $Q' \in Z_{1}$. Due to $R \subseteq R'$, we have $c \in R'$ and because $(b,c) \in \theta(Z_{2})$, it follows that $b \in R'$. On the other hand, since $i(b,x) \in P$, $i(R',P) \subseteq Q'$ and $b \in R'$, by Lemma \ref{lem_1} we have $x \in Q'$. Then $(x,y) \in \theta(Z_{1})$, $y \in Q' \subseteq Q$ and $y \in Q$, which is a contradiction. We conclude that $(\theta(Z_{1}), \theta(Z_{2}))$ is an IDL-congruence. Conversely, we assume $(\theta(Z_{1}), \theta(Z_{2}))$ is an IDL-congruence. Suppose that $(Z_{1},Z_{2})$ is not a $T_{\mathcal{M}}$-closed set of $\mathcal{F}_{\mathcal{M}}$. Then there exist $P \in Z_{1}$, $Q \in \mathcal{X}(\textbf{A})$ and $R \in \mathcal{X}(\textbf{B})$ such that $(R,Q) \in \mathcal{D}(P)$ and $(R,Q) \notin Z_{2} \times Z_{1}$. If $R \notin Z_{2}$, then since $Z_{2}$ is a closed set of $\mathcal{X}(\textbf{B})$ there exist $b,c \in B$ such that $b \in R$, $c \notin R$ and $(b \wedge c, b) \in \theta(Z_{2})$. Let us consider ${\rm{Fig}}_{\textbf{B}}(R \cup \{c\})$. Since $R \in T_{\mathcal{M}}^{1}(P,Q)$, then $i({\rm{Fig}}_{\textbf{B}}(R \cup \{c\}), P) \nsubseteq Q$, i.e., there exists $x \in A$ such that $x \notin Q$ and $p \leq i(r \wedge c, z)$, for some $r \in R$ and $p \in P$. Hence $i(r \wedge c, z) \in P$ and by Proposition \ref{propo_1}, $i(r \wedge b \wedge c, z) \in P$. On the other hand, since $(\theta(Z_{1}), \theta(Z_{2}))$ is a congruence, we obtain that $(i(r \wedge b \wedge c, z), i(r \wedge b, z)) \in \theta(Z_{1})$ and $i(r \wedge b, z) \in P$. So, $i(R,P) \subseteq Q$, $r \wedge b \in R$ and by Lemma \ref{lem_1} we have $z \in Q$, which is a contradiction. If $Q \notin Z_{1}$, then there exist $x,y \in A$ such that $x \in Q$, $y \notin Q$ and $(x \wedge y, x) \in \theta(Z_{1})$. Let us consider $I={\rm{Idg}}_{\textbf{A}}(Q^{c} \cup \{y\})$. Observe that $I \cap i(R,P) \neq \mathrm{e}mptyset$, because otherwise from the Prime Filter Theorem, there would exists $H \in \mathcal{X}(\textbf{A})$ such that $i(R,P) \subseteq H$, $H \subseteq Q$ and $x \notin H$ which is absurd since $Q$ is minimal. Thus, there exist $a \in A$ such that $a \leq q \vee x$ and $p \leq i(r,a)$, for some $q \notin Q$, $r \in R$ and $p \in P$. So, by Proposition \ref{propo_1}, $p \leq i(r,a) \leq i(r, q \vee x)$ and $i(p, q \vee x) \in P$. Therefore, since $(\theta(Z_{1}), \theta(Z_{2}))$ is a congruence, it follows that $(i(r, q \vee (x \wedge y)), i(r, q \vee x)) \in \theta(Z_{1})$. Hence $i(r, q \vee (x \wedge y)) \in P$. Since $i(R,P) \subseteq Q$ and $r \in R$, then by Lemma \ref{lem_1} we get that $q \vee (x \wedge y) \in Q$, which is a contradiction because $Q$ is prime. Then $(Z_{1},Z_{2})$ is a $T_{\mathcal{M}}$-closed set. \mathrm{e}nd{proof} Let $\{\mathcal{M}_{k}\}_{k\in K}$ be a family of FIDL-modules, with $\mathcal{M}_{k}=\langle \textbf{A}_{k}, \textbf{B}_{k},f_{k},i_{k}\rangle$. Then \begin{equation*} \underset{k\in K}{\prod}\mathcal{M}_{k} = \left\langle \underset{k\in K}{\prod}{\bf{A}_{k}}, \underset{k\in K}{\prod}{\bf{B}_{k}}, f, i \right\rangle \mathrm{e}nd{equation*} has a FIDL-module structure, where $f(a,b)(k)=f_{k}(a(k),b(k))$ and $i(b,a)(k)=i_{k}(b(k),a(k))$, for every $k \in K$. Let $\pi^{\bf{A}}_{k}: \underset{k\in K}{\prod}{\bf{A}_{k}} \rightarrow \bf{A}_{k}$ and $\pi^{\bf{B}}_{k}:\underset{k\in K}{\prod}{\bf{B}_{k}} \rightarrow \bf{B}_{k}$ be the projection homomorphisms. Note that the pair $(\pi^{\bf{A}}_{k}, \pi^{\bf{B}}_{k})$ is a FIDL-homomorphism, for every $k \in K$. It is no hard to see that $\underset{k\in K}{\prod}\mathcal{M}_{k}$ together with the family $\{(\pi^{\bf{A}}_{k}, \pi^{\bf{B}}_{k})\}_{k \in K}$ is in fact the categorical product of $\{\mathcal{M}_{k}\}_{k\in K}$. Let $(\alpha,\gamma)$ be a FIDL-homomorphism. We say that $(\alpha,\gamma)$ is a 1-1 FIDL-homomorphism if $\alpha$ and $\gamma$ are 1-1, and similarly, we say that $(\alpha,\gamma)$ is a onto FIDL-homomorphism if $\alpha$ and $\gamma$ are onto. If $\mathcal{M}$ is a FIDL-module, then we introduce the following concepts: \begin{itemize} \item We will say that $\mathcal{M}$ is a {\it{subdirect product}} of a family $\{\mathcal{M}_{k}\}_{k\in K}$ of FIDL-modules, if there exists a 1-1 FIDL-homomorphism \begin{equation*} (\alpha, \gamma) \colon \mathcal{M} \to \underset{k\in K}{\prod}\mathcal{M}_{k} \mathrm{e}nd{equation*} such that $(\pi^{\bf{A}}_{k} \alpha, \pi^{\bf{B}}_{k} \gamma)$ is an onto FIDL-homomorphism, for every $k\in K$. \item We will say that $\mathcal{M}$ is \mathrm{e}mph{subdirectly irreducible} if for every family of FIDL-modules $\{\mathcal{M}_{k}\}_{k\in K}$ and 1-1 FIDL-homomorphism \begin{equation*} (\alpha, \gamma) \colon \mathcal{M} \to \underset{k\in K}{\prod}\mathcal{M}_{k} \mathrm{e}nd{equation*} there exists a $k\in K$ such that $(\pi^{\bf{A}}_{k} \alpha, \pi^{\bf{B}}_{k} \gamma)$ is an isomorphism of FIDL-modules. \item We will say that $\mathcal{M}$ is \mathrm{e}mph{simple} if the lattice of the FIDL-congruences has only two elements. \mathrm{e}nd{itemize} The following result is immediate from Theorem \ref{Characterization of congruences}. \begin{corol} \label{subdirectly irreducible DLFI-modules} Let $\mathcal{M}$ be a FIDL-module. Then $\mathcal{M}$ is subdirectly irreducible if and only if $\mathcal{M}$ is trivial or there exists a minimal non-trivial FIDL-congruence in $\mathcal{M}$. \mathrm{e}nd{corol} If $\mathcal{U}$ is an Urquhart space, then from Theorem \ref{Characterization of congruences} it is clear that $\mathcal{C}_{s}(\mathcal{U})$ is an algebraic lattice. So, if $Z_{1} \times Z_{2} \subseteq X \times Y$, let ${\rm{cl}}_{\mathcal{C}_{s}}(Z_{1},Z_{2})$ be the smallest element of $\mathcal{C}_{s}(\mathcal{U})$ which contains $Z_{1} \times Z_{2}$. Let $(x,y) \in X \times Y$. If there is no place to confusion, we write ${\rm{cl}}_{\mathcal{C}_{s}}(x,y)$ instead of ${\rm{cl}}_{\mathcal{C}_{s}}(\{x\},\{y\})$. \begin{prop} \label{Simple algebras} Let $\mathcal{M}$ be a FIDL-module and $\mathcal{F}_{\mathcal{M}}$ be the Urquhart space associated of $\mathcal{M}$. Then $\mathcal{M}$ is simple if and only if ${\rm{cl}}_{\mathcal{C}_{s}}(P,Q) = \mathcal{X}(\textbf{A}) \times \mathcal{X}(\textbf{B})$, for every $ (P,Q) \in \mathcal{X}(\textbf{A}) \times \mathcal{X}(\textbf{B})$. \mathrm{e}nd{prop} \begin{proof} Since $\mathcal{M}$ is simple if and only $Con(\mathcal{M})=\{(\Delta^{\mathbf{A}}, \Delta^{\mathbf{B}}), (\nabla^{\mathbf{A}},\nabla^{\mathbf{B}})\}$, then by Theorem \ref{Characterization of congruences} this is equivalent to $\mathcal{C}_{s}(\mathcal{F}_{\mathcal{M}})=\{(\mathrm{e}mptyset,\mathrm{e}mptyset),(\mathcal{X}(\textbf{A}),\mathcal{X}(\textbf{B}))\}$ and the result follows. \mathrm{e}nd{proof} \begin{theorem} \label{Subdirectly irreducible algebras} Let $\mathcal{M}$ be a FIDL-module and $\mathcal{F}_{\mathcal{M}}$ be the Urquhart space associated of $\mathcal{M}$. Then $\mathcal{M}$ is subdirectly irreducible but no simple if and only if the set \begin{equation*} \mathcal{J} = \{ (P,Q) \in \mathcal{X}(\textbf{A}) \times \mathcal{X}(\textbf{B}) \colon {\rm{cl}}_{\mathcal{C}_{s}}(P,Q)=( \mathcal{X}(\textbf{A}), \mathcal{X}(\textbf{B})) \} \mathrm{e}nd{equation*} is a non-empty open set distinct from $(\mathcal{X}(\textbf{A}), \mathcal{X}(\textbf{B}))$. \mathrm{e}nd{theorem} \begin{proof} Let us assume that $\mathcal{M}$ is subdirectly irreducible. Then $Con(\mathcal{M})-\{(\Delta^{\mathbf{A}},\Delta^{\mathbf{B}})\}$ has a minimum element. From Theorem \ref{Characterization of congruences}, $\mathcal{C}_{s}(\mathcal{F}_{\mathcal{M}})-(\mathcal{X}(\textbf{A}),\mathcal{X}(\textbf{B}))$ has a maximum element. Let $(Z_{1},Z_{2})$ be such an element. Then $Z_{1}$ and $Z_{2}$ are non-empty. We prove that $\mathcal{J}=(Z_{1},Z_{2})-(\mathcal{X}(\textbf{A}),\mathcal{X}(\textbf{B}))$. On the one hand, if $(P,Q)\notin (Z_{1},Z_{2})$, then $(Z_{1},Z_{2}) \subseteq (Z_{1},Z_{2}) \cup {\rm{cl}}_{\mathcal{C}_{s}}(P,Q)$. So it must be that ${\rm{cl}}_{\mathcal{C}_{s}}(P,Q)=( \mathcal{X}(\textbf{A}), \mathcal{X}(\textbf{B}))$, because if it is not the case, then $(Z_{1},Z_{2})$ it would not be the maximum of $\mathcal{C}_{s}(\mathcal{F}_{\mathcal{M}})-(\mathcal{X}(\textbf{A}),\mathcal{X}(\textbf{B}))$, which is a contradiction. On the other hand, if $(P,Q) \in \mathcal{J} \cap (Z_{1},Z_{2})$, then ${\rm{cl}}_{\mathcal{C}_{s}}(P,Q)=( \mathcal{X}(\textbf{A}), \mathcal{X}(\textbf{B}))=(Z_{1},Z_{2})$, which is absurd from assumption. We conclude the proof by noticing that if $\mathcal{J}$ is a non-empty open set distinct from $(\mathcal{X}(\textbf{A}), \mathcal{X}(\textbf{B}))$, then it is easy to see that $\mathcal{J}-(\mathcal{X}(\textbf{A}), \mathcal{X}(\textbf{B}))$ is the maximum of $\mathcal{C}_{f}(\mathcal{F}_{\mathcal{M}})-(\mathcal{X}(\textbf{A}),\mathcal{X}(\textbf{B}))$. Then the result is an immediate consequence of Theorem \ref{Characterization of congruences}. \mathrm{e}nd{proof} \begin{thebibliography}{99} \bibitem{Venema} \uppercase{Blackburn, P.---de Rijke, M.---Venema, Y.}: \textit{Modal Logic}, Cambridge University Press, 2001. \bibitem{Cabrer-Celani} \uppercase{Cabrer, L.---Celani, S.}: \textit{Priestley dualities for some lattice-ordered algebraic structures, including MTL, IMTL and MV-algebras}, Cent. Eur. J. Math. \textbf{4} (2006), 600--623. \bibitem{Celani1} \uppercase{Celani, S.}: \textit{Ditributive lattices with fusion and implication}, Southeast Asian Bull. Math. \textbf{28} (2004), 999--1010. \bibitem{C2005} \uppercase{Celani, S.}: \textit{Simple and subdirectly irreducibles bounded distributive lattices with unary operators}, Int. J. Math. Math. Sci. \textbf{2006} (2006), 20 p. \bibitem{Chagrov} \uppercase{Chagrov, A.---Zakharyaschev, M.}: \textit{Modal Logic}, Oxford Logic Guides, Oxford University Press, 1997. \bibitem{CLP} \uppercase{Cignoli, R.---Lafalce, S.---Petrovich, A.}: \textit{Remarks on Priestley duality for distributive lattices}, Order \textbf{8} (1991), 299--315. \bibitem{Cignoli} \uppercase{Cignoli, R.}: \textit{Distributive lattice congruences and Priestley Spaces}, Proceedings of the first congress of mathematics ``Dr. Antonio A. R. Monteiro'', Bah\'{i}a Blanca, Argentina (1991), 81--84. \bibitem{Esteva-Godo} \uppercase{Esteva, F.---Godo, L.}: \textit{Monoidal t-norm based Logic: Towards a logic for left-continuous t-norms}, Fuzzy Sets Syst. \textbf{124} (2001), 271--288. \bibitem{Goldblatt} \uppercase{Goldblatt, R.}: \textit{Varieties of complex algebras}, Ann. Pure Appl. Logic \textbf{44} (1989), 173--242. \bibitem{Hohle} \uppercase{H\"ohle, U.}: \textit{Commutative, residuated l-monoids}, Proceedings of the 14th Linz seminar on fuzzy set theory held at Linz, Austria. Kluwer Academic Publishers (1995), 53--106. \bibitem{JipsenTsinakis} \uppercase{Jipsen, P.---Tsinakis, C.}: \textit{A Survey of Residuated Lattices}, Ordered Algebraic Structures, Dordrecht. Kluwer Academic Publishers (2002), 19--56. \bibitem{Petrovich} \uppercase{Petrovich, A.}: \textit{Distributive lattices with an operator}, Stud. Log. \textbf{56} (1996), 205--224. \bibitem{Priestley} \uppercase{Priestley, H.}: \textit{Ordered topological spaces and the representation of distributive lattices}, Proc. London Math. Soc. \textbf{24} (1972), 507--530. \bibitem{Urquhart} \uppercase{Urquhart, A.}: \textit{Duality for algebras of relevant logics}, Stud. Log. \textbf{56} (1996), 263--276. \bibitem{Stokkermans} \uppercase{Sofronie-Stokkermans, V.}: \textit{Resolution-based decision procedures for the universal theory of some classes of distributive lattices with operators}, J. Symb. Comput. \textbf{36} (2003), 891--924. \mathrm{e}nd{thebibliography} \mathrm{e}nd{document}
\begin{document} \baselineskip 14pt \parindent.4in \catcode`\@=11 \begin{center} {{I\!\!H}uge \bf Convergence Relative to a Microstructure: Properties, Optimal Bounds and Application} \\[5mm] {\bf Tuhin GHOSH and Muthusamy VANNINATHAN }\\[4mm] \textit{Centre for Applicable Mathematics, Tata Institute of Fundamental Research, India.}\\[2mm] Email : \textit{[email protected]\ ,\ [email protected] } \end{center} \begin{abstract} \noindent In this work, we study a new notion involving convergence of microstructures represented by matrices $B^\epsilon$ related to the classical $H$-convergence of $A^\epsilon$. It incorporates the interaction between the two microstructures. This work is about its effects on various aspects : existence, examples, optimal bounds on emerging macro quantities, application etc. Five among them are highlighted below : $(1)$ The usual arguments based on translated inequality, $H$-measures, Compensated Compactness etc for obtaining optimal bounds are not enough. Additional compactness properties are needed. $(2)$ Assuming two-phase microstructures, the bounds define naturally four optimal regions in the phase space of macro quantities. The classically known single region in the self-interacting case , namely $B^\epsilon= A^\epsilon$ can be recovered from them, a result that indicates we are dealing with a true extension of the $\mathcal{G}$-closure problem. $(3)$ Optimality of the bounds is not immediate because of (a priori) non-commutativity of macro-matrices, an issue not present in the self-interacting case. Somewhat surprisingly though, commutativity follows a posteriori. $(4)$ From the application to ``Optimal Oscillation-Dissipation Problems'', it emerges that oscillations and dissipation can co-exist optimally and the microstructures behind them need not be the same though they are closely linked. Furthermore, optimizers are found among $N$-rank laminates with interfaces. This is a new feature. $(5)$ Explicit computations in the case of canonical microstructures are performed, in which we make use of $H$-measure in a novel way. \end{abstract} \vskip .5cm\noindent {\bf Keywords:} Homogenization, Optimal Design Problem, Optimal Oscillation Dissipation Problem, Compensated Compactness, $H$-measure, Optimal bounds. \vskip .5cm \noindent {\bf Mathematics Subject Classification:} 35B; 49J; 62K05; 78M40; 74Q20; 78A48 \section{Introduction} \setcounter{equation}{0} In this work, we are concerned with a new notion of convergence related to the classical $H$-convergence of the homogenization theory. We begin by recalling the notion of $H$-convergence \cite{T}. Let $\mathcal{M}(\alpha, \beta;{|\!\!\!O}mega)$ with $0<\alpha<\beta$ denote the set of all real $N\times N$ symmetric matrices $A(x)$ of functions defined almost everywhere on a bounded open subset ${|\!\!\!O}mega$ of $\mathbb{R}^N$ such that if $A(x)=[a_{kl}(x)]_{1\leq k,l\leq N}$ then \begin{equation*} a_{kl}(x)=a_{lk}(x)\ \forall l, k=1,..,N \ \mbox{and }\ (A(x)\xi,\xi)\geq \alpha|\xi|^2,\ |A(x)\xi|\leq \beta|\xi|,\ \forall\xi \in \mathbb{R}^N,\ \mbox{ a.e. }x\in{|\!\!\!O}mega.\end{equation*} Let $A^\epsilon$ and $A^{*}$ belong to $\mathcal{M}(a_1,a_2, {|\!\!\!O}mega)$ with $0 < a_1 < a_2$. We say $A^\epsilon \xrightarrow{H} A^{*}$ or $H$-converges to a homogenized matrix $A^{*}$, if $A^\epsilon\nabla u^\epsilon \rightharpoonup A^{*}\nabla u$ in $L^2({|\!\!\!O}mega)$ weak, for all test sequences $u^\epsilon$ satisfying \begin{equation}\begin{aligned}\label{ad13} u^{\epsilon} &\rightharpoonup u \quad\mbox{weakly in }H^1({|\!\!\!O}mega)\\ -div(A^\epsilon\mathbb\nabla u^\epsilon(x))& \mbox{ is strongly convergent in } H^{-1}({|\!\!\!O}mega). \end{aligned}\end{equation} Convergence of the canonical energy densities follows as a consequence: \begin{equation}\label{ad12} A^\epsilon\nabla u^\epsilon\cdot\nabla u^\epsilon \rightharpoonup A^{*}\nabla u\cdot\nabla u \mbox{ in } \mathcal{D}^\prime({|\!\!\!O}mega). \end{equation} Further, total energies over ${|\!\!\!O}mega$ converge if $u^\epsilon$ and $u$, for example, lie in $H^1_0({|\!\!\!O}mega)$ : \begin{equation}\label{ad15} \int_{|\!\!\!O}mega A^\epsilon\nabla u^\epsilon\cdot\nabla u^\epsilon dx \rightarrow \int_{|\!\!\!O}mega A^{*}\nabla u\cdot\nabla u\ dx. \end{equation} In this work, we are concerned with other oscillating quadratic energy densities; more precisely, let us take another sequence of matrices $B^\epsilon$ and consider the corresponding energy density : \begin{equation}\label{bs14} B^\epsilon \nabla u^\epsilon\cdot \nabla u^\epsilon \end{equation} and study its behaviour as $\epsilon$ tends to zero. Just like \eqref{ad12}, we may expect the appearance of new macro quantities in its weak limit. This motivates the following notion: \begin{definition}\label{sid} Let $A^\epsilon$ $H$-converge to $A^{*}$. Let $B^\epsilon$ be given in $\mathcal{M}(b_1,b_2;{|\!\!\!O}mega)$ where $0 < b_1 < b_2$. We say $B^\epsilon$ converges to the matrix of functions $B^{\#}(x)$ relative to $A^\epsilon$ (denoted $B^\epsilon\xrightarrow{A^\epsilon}B^{\#}$) if for all test sequences $u^\epsilon$ satisfying \eqref{ad13} we have \begin{equation}\label{ad17} B^\epsilon\nabla u^\epsilon\cdot\nabla u^\epsilon \rightharpoonup B^{\#}\nabla u\cdot\nabla u \mbox{ in } \mathcal{D}^\prime({|\!\!\!O}mega). \end{equation} \end{definition} \noindent Further, analogous to \eqref{ad15}, we have as a consequence \begin{equation*} \int_{|\!\!\!O}mega B^\epsilon\nabla u^\epsilon\cdot\nabla u^\epsilon dx \rightarrow \int_{|\!\!\!O}mega B^{\#}\nabla u\cdot\nabla u dx, \end{equation*} if $ u^\epsilon$ and $u$ belong to $H^1_0({|\!\!\!O}mega)$ (see Remark \ref{ub19}). We will also show $B^{\#}\in\mathcal{M}(b_1,\widetilde{b_2};{|\!\!\!O}mega)$ with $\widetilde{b_2}=b_2\frac{a_2}{a_1}\geq b_2$ (see \eqref{Sd3}).\\ \\ The significance of the limit $B^{\#}$ in the context of Calculus of Variations will be clear in the application in Section \ref{qw10}. In some sense, $B^{\#}$ can be interpreted as macro coefficients associated with the optimal dissipation of energy of oscillations associated with $A^{*}$. The main characteristic feature behind $B^{\#}$ is the interaction between microstructures of $A^\epsilon$ and $B^\epsilon$ through the triple product of oscillating quantities \eqref{bs14}. The case $A^\epsilon=B^\epsilon$is classical and is referred to as self- interacting case. In this case, comparing \eqref{ad12} with \eqref{ad17}, we have $B^{\#}=A^{*}$ and thus the new notion of relative convergence extends $H$-convergence. We may say that $H$-convergence is based on both flux-energy convergence whereas the new notion is energy-based. (In general the weak limit of $B^\epsilon\nabla u^\epsilon$ is not equal to $B^{\#}\nabla u$). It is worthwhile to note that $B^{\#}$ is nontrivial and interesting even if $B^\epsilon$ is independent of $\epsilon$. On the other hand, if $A^\epsilon$ is independent of $\epsilon$ then $B^{\#}$ is somewhat trivial because it coincides with the weak limit $\overline{B}$ of $B^\epsilon$. Since $B^{*}$ ($H$-limit of $B^\epsilon$) is defined using solely the canonical oscillating test functions of $B^\epsilon$ (not involving those of $A^\epsilon$) $B^{\#}$ is in general different from $B^{*}$. In fact, we give an expression of $B^{\#}$ using the oscillating test functions associated to $A^\epsilon$ in the next section (see Corollary \ref{Sd14}) which will show that $B^{\#}$ and $B^{*}$ need not be the same. There is another type of interaction between $A^\epsilon$ and $B^\epsilon$. Indeed, it is possible to associate with $B^\epsilon$ macro quantities $B^{\#}_{FL}$ describing effective behaviour of fluxes relative to the given microstructure $A^\epsilon$; More precisely, $B^{\#}_{FL}$ possesses the following property : \begin{equation*}B^\epsilon\nabla u^\epsilon \rightharpoonup B^{\#}_{FL}\nabla u\ \mbox{ in } L^2({|\!\!\!O}mega) \mbox{ weak, } \end{equation*} for all test sequences $u^\epsilon$ satisfying \eqref{ad13}. Existence of such macro coefficients is obtained using the correctors associated with $A^\epsilon$ (cf. Remark \ref{ad16}). It is immediately seen that $B^{\#}_{FL}$ coincides with $A^{*}$ if $B^\epsilon = A^\epsilon$ and on the other hand if $B^\epsilon=B$ is independent of $\epsilon$, then $B^{\#}_{FL} = B$. Unfortunately, the above matrix $B^{\#}_{FL}$ is not capable of describing the macro behaviour of energy density $B^\epsilon \nabla u^\epsilon \cdot \nabla u^\epsilon$; that is, we cannot say that the weak limit of $B^\epsilon \nabla u^\epsilon\cdot \nabla u^\epsilon$ is given by $B^{\#}_{FL} \nabla u\cdot \nabla u$. Since the applications that we envisage in Calculus of Variations involve energy densities in their objective functionals, $B^{\#}_{FL}$ does not seem to play any role in the examples of Section \ref{qw5}. In contrast, $B^{\#}$ will play a role, as we shall see below. For more elaborate study of $B^{\#}_{FL}$, see \cite{TG-MV}. In general, $B^{\#}$ is different from $B^{\#}_{FL}$, even for simple microstructures as shown below (see Remark \ref{ot10}). This can also be seen in more general terms : $B^{\#}_{FL}$ is born out of interaction between $B^\epsilon$ and the canonical oscillations associated with $A^\epsilon$ (namely, the correctors) in a linear fashion whereas $B^{\#}$ is an outcome of quadratic interaction with the correctors of $A^\epsilon$. This entire article is concerned with the effects of this later interaction. There are advantages in separating the macro behaviour of flux density and energy density which are ``mixed'' in $H$-convergence. For one thing, we can take $B^\epsilon$ more general and this has consequences in Calculus of Variations as shown in Section \ref{qw5}. Our first result is a compactness result proving the existence of $B^{\#}$. (See Theorem \ref{bs8}) The main difference with $H$-limit is that the oscillations in $\nabla u^\epsilon$ are restricted by conditions \eqref{ad13} in which $A^\epsilon$ appears and $B^\epsilon$ does not. The proof introduces adjoint state associated with the constraint \eqref{ad13}. Repeated application of div-curl lemma then enables us to prove the nice behaviour of the energy density \eqref{ad17}. This result implicit in \cite{KP,KV} is given a quick proof by different arguments. The point to note is that even if the entire sequence $A^\epsilon$ $H$-converges to $A^{*}$ and $B^\epsilon = B$ is independent of $\epsilon$, there may be several limit points $B^{\#}$ (see \eqref{FL19}). All such limit points lie on the fibre over $A^{*}$, an object defined in \eqref{kfab}. Basic objectives in the theory of Homogenization include not only to show the existence of the homogenized limit $A^{*}$ but also establish optimal bounds on it independent of microgeometric details along with application to optimal design problem (ODP). This has been completely accomplished in the case of two-phase composites \cite{A,GB,LC,ML1,T}. We intend to develop a similar program for the new object $B^{\#}$. Our main assumption in this task will be that $A^\epsilon$ and $B^\epsilon$ have two phases with prescribed volume proportions : \begin{equation*} A^{\epsilon}(x) = a^{\epsilon}(x)I\ \mbox{ where, }\ a^{\epsilon}(x)= a_1\chi_{{\omega}_{A^{\epsilon}}}(x) +a_2 (1-\chi_{{\omega}_{A^{\epsilon}}}(x))\ \mbox{ a.e. in }\ x\in{|\!\!\!O}mega.\end{equation*} $\chi_{{\omega}_{A^{\epsilon}}}(x)$ defines the microstructures in the medium, with $\chi_{{\omega}_{A^{\epsilon}}}(x)\rightharpoonup \theta_A(x)$ in $L^{\infty}$ weak* topology, $\theta_A(x)$ satisfying $0\leq \theta_A(x)\leq 1$ a.e. $x\in{|\!\!\!O}mega$ defines the local proportion for the component $a_1 I$ in ${|\!\!\!O}mega$ (or, $(1-\theta_A(x))$ for the component $a_2 I$). \begin{equation*} B^{\epsilon}(x)= b^{\epsilon}(x)I\ \mbox{ where }\ b^{\epsilon}(x)= b_1\chi_{{\omega}_{B^{\epsilon}}}(x) + b_2 (1-\chi_{{\omega}_{B^{\epsilon}}}(x))\ \mbox{ a.e. in }\ x\in{|\!\!\!O}mega.\end{equation*} and we assume that $\chi_{{\omega}_{B^{\epsilon}}}(x)\rightharpoonup \theta_B(x)$ in $L^{\infty}$ weak* topology, $\theta_B(x)$ satisfying $0\leq \theta_B(x)\leq 1$ a.e. $x\in{|\!\!\!O}mega$ defines the local proportion for the component $b_1 I$ in ${|\!\!\!O}mega$ (or, $(1-\theta_B(x))$ for the component $b_2 I$). Before describing our results, let us cite few situations where interactions of similar kind arise and the above notion and our results may be of interest. From the works of Busse, Howard, Doering and Constantin cited in \cite{BUS,DCR}, we see that there is a general interest of averaging as many observables as possible in complex/oscillating systems and deriving bounds on the emerging macro parameters independent of the details of the complexities. Some of these observables have the triple product structure as in \eqref{bs14}. In the present context, the model complex system is defined by $A^\epsilon$ via the conditions \eqref{ad13}. Thanks to the theory of $H$-convergence, we could study the behaviour of the canonical energy density \eqref{ad12} which corresponds to self- interacting case. The $H$-limit $A^{*}$ describes its behaviour. Estimates on $A^{*}$ constitute one of the celebrated results in the theory (see the references \cite{A,GB,LC,ML1,T}). Within the linear framework, other basic quantities of interest are quadratic densitites of the form \eqref{bs14} for the same system \eqref{ad13}. Since $(A^{*},B^{\#})$ are the macro quantities in their weak limits, it is natural to seek optimal bounds on $(A^{*},B^{\#})$ independent of microstructural details. Such bounds are stated in Section \ref{ad18} and this article is devoted to their proof and applications. Secondly, let us cite the example of an Optimal Control Problem (OCP) which is a linear-quadratic regulator problem in which both the state equation and the cost functional involves finely oscillating coefficients $A^\epsilon$ and $B^\epsilon$ respectively \cite{KP,KV}. In such a case, there is an interaction between the cost functional and the state equation through their microstructures. The limit homogenized problem is again an OCP of similar type with $(A^{*},B^{\#})$ replacing $(A^\epsilon,B^\epsilon)$ respectively. This shows that $B^{\#}$ is an inevitable macro quantity appearing in such fundamental optimization processes. Next, let us mention the question of distinguishing two microstructures admitting the same homogenized matrix $A^{*}$. (Such questions could be of interest in Inverse Problems). Since $B^{\#}$ incorporates some features of the microstructures underlying $A^\epsilon$ through the interaction of $A^\epsilon$ and $B^\epsilon$, the idea is to exploit it in the process of distinction. Thus, let us consider two sequences $A^\epsilon$ and $A^\epsilon_1$ admitting the same $H$-limit $A^{*}$. Further, we assume both $A^\epsilon$ and $A^\epsilon_1$ have same two-phases $(a_1,a_2)$ with the same local volume proportions $(\theta_A(x) ,(1-\theta_A(x))$. Their microgeometries need not however be the same. The question of interest is the following : Can one distinguish the microgeometries of $A^\epsilon$ and $A^\epsilon_1$ by means of the macro quantity $B^{\#}$ for some suitable choice of $B^\epsilon$ ? Let $B^\epsilon = B$ be independent of $\epsilon$, that is, $B^\epsilon$ admits no microstructure. Suppose $B\xrightarrow{A^\epsilon} B^{\#}$ and $B\xrightarrow{A^\epsilon_1} B^{\#}_1$ relative to $A^\epsilon$ and $A^\epsilon_1$ respectively. Then it can be shown that $B^{\#} = B^{\#}_1$. Indeed, in this case, $B^{\#}$ can be expressed as a (complicated) function of ${A^{*},B, \theta_A,a_1,a_2}$ (see Remark \ref{ad19}). Thus distinction is not possible in this case. The next natural step is to take $B^\epsilon$ with two-phase microstructure as a diagnostic tool to distinguish between the two microgeometries of $A^\epsilon$ and $A^\epsilon_1$. In this case, we can indeed choose a microstructure $B^\epsilon$ such that $B^{\#}$ and $B^{\#}_1$ are different (see Remark \ref{Sd12}). As the fourth situation, let us mention one novel application to Calculus of Variations which is carried out in this paper (see Section \ref{qw5}). Our bounds in Section \ref{ad18} will be used in full force. Application of $H$-Convergence to Calculus of Variations is classical and relaxed solutions are described in terms of $H$-limits $A^{*}$ \cite{A,CHA,MT1,GB,LC,ML1}. The objective functional in this class of problems are defined on single characteristic function $\chi_{\omega_A}$ and depends only on the state but not on its gradient. The usefulness of the $H$-convergence in such problems is well-known; it provides a method to obtain relaxed version of the problem \cite{A,CHA,MT1}. Indeed, homogenization limits and optimal bounds on them describe all relaxed microstructures along with the constraints on them. Minimizers are found among them and further one can write down the optimality conditions at minimizers. For various methods of relaxation, one may refer to \cite{KS1,KS2,KS3}. Our objective functional which is minimized involves the gradient of the state with variable coefficients. More precisely, it is \begin{equation*}\int_{{|\!\!\!O}mega}B(x)\nabla u\cdot \nabla u\ dx\ \ \ (cf.\eqref{ub11}). \end{equation*} The case $B(x) = I$ is treated in the literature. Its relaxation will involve the functional $\int_{|\!\!\!O}mega I^{\#}\nabla u\cdot \nabla u$ and optimal bounds on it. See \cite{GRB,LV}; One of our objectives in this work is to derive stronger results which yield optimal point-wise bounds on such energy densities (cf.\eqref{bs1},\eqref{Sd11}) and we get thereby the relaxed objective functional (see Section \ref{qw5}). Earlier work was done without formally introducing $I^{\#}$; introduction of $I^{\#}$ simplifies somewhat the relaxation process. We now consider the case $B(x)\neq I$; More precisely we assume $B(x)$ has $2$-phases given by $B(x)= b_1\chi_{\omega_B}(x) +b_2(1-\chi_{\omega_B}(x))$. Now the above functional depends on the pair of characteristic functions $(\chi_{\omega_A},\chi_{\omega_B})$ and we minimize over them. Such problems are called Optimal Oscillation-Dissipation Problems because oscillations are present in the gradient fields of the state and we try to minimize/dissipate their energy although in a non-uniform way via $B(x)$. Interacting microstructures naturally appear in the minimization process. Following interpretation may be given to this type of problems : when $B(x) = I$, that is when $\chi_{\omega_B} \equiv 1$, one seeks a generalized microstructure on which the energy of the oscillation of gradient of the state is suppressed/dissipated to the extent possible within the constraints. Since $B(x)=I$, this dissipation occurs uniformly in the space. Anticipating non-uniformity in the energy dissipation in heterogeneous media, it is more natural to admit a variable matrix $B(x)$ : roughly, larger value of $B(x)$ means smaller energy of the oscillations in the gradient. Furthermore, by minimizing with respect to $(\chi_{\omega_A},\chi_{\omega_B})$, we allow the system to choose its own ``conductivity matrix'' $A^{*}$ and an associated inverse ``dissipative matrix''$B^{\#}$. Basic difficulty in such Optimal Oscillation-Dissipation Problems is as follows : To minimize the objective functional, ideally speaking, we would like to have minimal value of $B$ in regions of large gradient of the state. The difficulty is that such regions can be large and are not known a priori. Moreover, the minimal value material in $B$ can have small volume. Such issues make the problem somewhat nontrivial. As usual, one does not expect ``classical solution'' to this problem and we need to relax it. The new notion of relative limit $B^{\#}$ and the bounds on it stated in Section \ref{ad18} are needed to formulate the relaxed version. Minimizers for this problem would be pairs $(A^{*},B^{\#})$ in which $A^{*}$ captures oscillatory effects and $B^{\#}$ describes dissipation effects. Both these effects co-exist optimally in the problem. Quite remarkably, minimizers are found among $N$-rank laminates across which the core-matrix values get switched. This seems to be a new phenomenon when compared with the fact that minimizers for the classical ODP usually do not have such interfaces. It may be mentioned that there are other problems of the type min-max for which our results apply. See Remark \ref{bs6}. As the last situation, we mention the following extension of the classical \textit{G-closure} problem for conductivities : finding all possible conductivities at a given location in the physical domain when two conductors are mixed in a given volume proportion $(\theta_A,1-\theta_A)$. The set is classically denoted as $\mathcal{G}_{\theta_A}$. Its extension incorporating interaction between $A^\epsilon$ and $B^\epsilon$ is as follows : Given $\{a_1,a_2,\theta_A\}$, $\{b_1,b_2,\theta_B\}$, find optimal bounds on $(A^{*},B^{\#})$ independent of microgeometric details of $A^\epsilon$ and $B^\epsilon$. They can depend on $A^{*}$, but otherwise independent of microgeometric details of $A^\epsilon$. When $B^\epsilon = A^\epsilon$, we know that $B^{\#} = A^{*}$ and so the later problem obviously extends the first one. In this work, we solve the extended problem, namely, we estimate $(A^{*},B^{\#})$ independent of microstructures in terms of $\{a_1,a_2, \theta_A\}$, $\{b_1,b_2,\theta_B\}$. As an easy example, we can choose $B^\epsilon$ to be any function of $A^\epsilon$ without altering its microstructure. In this context, we cite \cite{AM} in which a different problem involving $A^\epsilon,B^\epsilon$ with the same underlying microstructure is considered without introducing $B^{\#}$. Obviously, more challenging case is to deal with the change of microstructures; namely that $A^\epsilon$ and $B^\epsilon$ have different microstructures. While this case is treated completely here, the more difficult problem of deriving optimal bounds on $B^{\#}$ in terms of $(a_1,a_2,\theta_A),(b_1,b_2,\theta_B)$, independent of microstructures and without involving $A^{*}$ is left open. Next, we highlight some of main points of our work. First point is concerned with the proof of optimal bounds on $(A^{*},B^{\#})$ assuming that $B^\epsilon$ and $A^\epsilon$ have two-phases. This is an interesting mathematical challenge because it involves the study of interaction of two microstructures $A^\epsilon$ and $B^\epsilon$. We need to choose a suitable method capable of handling interacting microstructures. Of various methods available to obtain optimal bounds on $A^{*}$, we found that the method of using the combination of translated inequality and Compensated Compactness Theory along with $H$-measure \cite{T,T1} can be extended to include interaction between microstructures and establish the required bounds on $(A^{*},B^{\#})$. Two key elements of the method are important : one is the choice of a suitable macro field \eqref{abc1} and the associated oscillating gradient field satisfying the basic compactness condition \eqref{ad13}. This will be used to test the appropriate translated inequality. The self-interacting case suggests the use of the same oscillating field for both $A^{*}$ and $(A^{*},B^{\#})$. The second element is the introduction of a suitable oscillating system with differential constraints to bound tightly the so-called $H$-measure term. Due to interaction of microstructures, this step is complicated and we need additional compactness \eqref{eiz} to carry it out; mere \eqref{ad13} is not enough. The fundamental problem is that generally compactness suppresses oscillation. Therefore the question arises whether the oscillating field satisfying all the previous requirements has also the additional compactness. This is a delicate point. While the oscillation around \eqref{abc1} exists along with the compactness \eqref{ad13} for all $A^{*}$, the additional compactness holds only for some special $A^{*}$. In fact our results show that for $A^{*}$ in the interior of the phase region, such additional compactness does not hold. Fortunately, for $A^{*}$ on the boundary of the phase region, it does hold and it is a consequence of optimality nature of such $A^{*}$. Thus both additional compactness and the usual oscillations co-exist for $A^{*}$ belonging to boundary. Using this property of the system,we are able to deduce optimal lower bound on $(A^{*},B^{\#})$ if $A^{*}$ lies on boundary of the region. Exploiting the phase space structure of $A^{*}$, same estimates on $(A^{*},B^{\#})$ are later extended if $A^{*}$ lies in the interior by other means. \begin{comment} Of various methods of proof available to obtain optimal bounds on effective quantities, we found the method using the combination of translated inequality and Compensated Compactness theory along with $H$-measure \cite{T1} very flexible to treat the interaction between the two microstructures and establish the required bounds. However,it is not fully adequate for our needs. Additional arguments are required. As in the study of any interaction, we need compactness property in some form or the other. The question is to know what type of compactness is needed and how to devise a method to use it. We address these issues and that forms the core of our work in Section \ref{ts}. The required compactness for our purposes is stated and proved in Theorem \ref{ub8} and Theorem \ref{ad20}. They are needed to define what we call an inflated oscillatory system (see \eqref{eik}) designed specifically to accommodate interaction. Roughly speaking, we need to work with special oscillating gradient fields in the translated inequality, which correspond to optimal structures of $A^{*}$ because they possess some nice compactness properties described in Theorem \ref{ub8} and needed to treat the interaction. The method gives a careful treatment to the so-called $H$-measure term, especially when there are interacting microstructures. Once the case of optimal $A^{*}$ is finished, other cases (non-optimal structures of $A^{*}$) are handled subsequently by other arguments. \end{comment} The second point is about the bounds themselves. Estimates on $(A^{*},B^{\#})$ keeping $\{a_1,a_2,\theta_A\}$, $\{b_1,b_2,\theta_B\}$ fixed are quantified in the form of inequalities in Section \ref{ad18}. Naturally, the analysis divides the physical domain ${|\!\!\!O}mega$ into four sub-domains denoted as ${|\!\!\!O}mega_{(Li,Uj)}$ $i,j=1,2$ (see \eqref{FL1}) with interfaces separating them depending on local volume proportions $\{\theta_A,\theta_B\}$. Figures showing these regions are included in Section \ref{ad18}. These estimates seem complicated. We arrived at them via a two-pronged strategy : on one hand, computing $B^{\#}$ for canonical microstructures of $A^\epsilon$ and on the other, trying to produce a rigorous proof. In our procedure of proving optimal estimates, we go though several intermediate easy cases before taking up the general case pointing out the difficulties. The estimates obtained in the four cases are labeled as $L1$, $L2$, $U1$, $U2$. They define four optimal regions denoted as $(Li,Uj)$ in the phase space of macro parameters $(A^{*},B^{\#})$, whose union is denoted as $\mathcal{G}_{(\theta_A,\theta_B)}$. This constitutes an important structural change in the product space $(A^{*},B^{\#})$ when compared with the single known region $\mathcal{G}_{\theta_A}$ in the $A^{*}$-space. It is instructive to imagine $\mathcal{G}_{(\theta_A,\theta_B)}$ consisting of pairs $(A^{*},B^{\#})$ in which $B^{\#}$ is in a fibre sitting over each $A^{*}\in\mathcal{G}_{\theta_A}$. While each fibre is convex (see Remark \ref{qw1}), it is not clear what kind of other convexity properties each region in $\mathcal{G}_{(\theta_A,\theta_B)}$ enjoys. The link between the regions $\mathcal{G}_{(\theta_A,\theta_B)}$ and $\mathcal{G}_{\theta_A}$ is interesting. In the self-interacting case, namely if $A^\epsilon = B^\epsilon$, the bounds L1 and L2 coincide with the well-known lower and upper bounds on $A^{*}$ respectively, thus we recover $\mathcal{G}_{\theta_A}$ from them. On the other hand, the bounds U1, U2 on $A^{*}$ do not seem to have any special significance in the classical phase space. (For details, see Remark \ref{eiu} and Figure 2). Finally, while relaxed solutions for Optimal Design Problems (ODP) are found in $\mathcal{G}_{\theta_A}$, we need the regions in $(A^{*},B^{\#})$ -space to capture relaxed solutions for Optimal Oscillation-Dissipation Problems (OODP). It is worthwhile to point out that relaxed solutions are found among $N$-rank laminates with an interface across which the core-matrix values are interchanged. This appears to be a new feature. As the third point, let us recall that explicit computation of $A^{*}$ on various canonical microstructures such as simple and $N$-rank laminates, Hashin-Shtrikman coated balls are found in the literature \cite{T}. These are useful in proving the optimality of bounds on $A^{*}$. New computations for $B^{\#}$ associated to the above $A^{*}$ are made in our work in Section \ref{ub12} and in Section \ref{ts}. They reveal the structure of $B^{\#}$ which is used in establishing that various bounds in Section \ref{ad18} are indeed saturated. They also help us find relaxed solutions of OODP. As an example, let us mention laminates. Assuming that $A^{\epsilon}$ is governed with $p-$sequential laminate microstructures, and that $\theta_A\leq \theta_B$, we construct the relative limits $B^{\#}_p$ and $B^{\#}_{p,p}$ associated to the $p$-laminate $A^{*}_p$ with the same lamination directions $\{e_i\}_{1\leq i\leq p}$ and with the same proportions $\{m_i\}_{1\leq i\leq p}$ as in $A^{*}_p$ but with $\omega_{A^\epsilon}\subseteq \omega_{B^\epsilon}$ (see \eqref{bs16}). To carry this out, we first establish a general relation (eg. \eqref{ad3}) between $A^{*}$ and $B^{\#}$ using $H$-measure techniques and we exploit it subsequently to compute the relative limits $B^{\#}_p$ and $B^{\#}_{p,p}$ corresponding to $p$-laminate $A^{*}_p$. The final point stated in Section \ref{ad18} and proved in Section \ref{qw4} is about optimality of the regions $(Li,Uj)$ in the sub-domain $int({|\!\!\!O}mega_{(Li,Uj)})$, $i,j=1,2$. This means that given $(A^{*},B^{\#})$ lying in a region $(Li,Uj)$, they can be realized as $H$-limit and a relative limit for suitable sequences $(A^\epsilon$, $B^\epsilon)$ with two phases and with their local volume proportions satisfying appropriate inequalities. This task has been accomplished in the case of $A^{*}$ in the literature (see \cite{A} for instance). Because of the presence of two matrices $(A^{*},B^{\#})$, non-commutativity is an extra difficulty which is not present in the self-interacting case. This is tackled with what we call Optimality-Commutativity Lemma (Lemma \ref{zz14}) which exploits the specific structure of our bounds. Apart from this, we need one more new element and that is fibre-wise convexity of the region (Li,Uj) in the phase space (see Remark \ref{qw1}). Modulo these new elements, the proof of optimality follows an established procedure. It has three parts. In the first, we work with macroscopically homogeneous cases. In the second part, we treat the general case by using piecewise constant approximation of function. We exploit here the fibre-wise convexity of the regions (Li,Uj). Third part proves optimality of all four regions taken together in ${|\!\!\!O}mega$. Somewhat strangely, another consequence of Optimality-Commutativity Lemma is that the $H$-limit $A^{*}$ and the relative limit $B^{\#}$ lying on the fibre over $A^{*}$ commute with each other in ${|\!\!\!O}mega$, i.e. $A^{*}(x)B^{\#}(x)=B^{\#}(x)A^{*}(x)$. This commutativity property is not a priori clear to start with ; our arguments need the optimal bounds to show it. An easy consequence of commutativity is that the optimal bounds stated in Section \ref{ad18}, can be formulated in terms of eigenvalues of $A^{*}$ and $B^{\#}$ (see \eqref{eg6},\eqref{eg7},\eqref{eg10},\eqref{eg5}). It is well-known \cite[Page No. 690]{ML} that the \textit{G-closure problem} for elasticity is open due to the fact that laminated microstructures are insufficient to generate all possible homogenized materials and that this difficulty does not exist for $H$-limits $A^{*}$ of the conductivity problem with two phases. In this context, let us state that the \textit{G-closure} type difficulty does not exist for relative limits $B^{\#}$ either. Let us now describe briefly the plan of this paper. After proving the existence of $B^{\#}$ in the next section, we present also its straight-forward properties. We take up the one-dimensional case in some detail in Section \ref{hsf}. Optimal estimates involving the pair $(A^{*},B^{\#})$ which constitute the main results of this work are stated in Section \ref{ad18}. There are four cases, which naturally arise in the analysis. Proof of these bounds is given in Section \ref{ts}. In the same section, we treat separately the easy case when $B^\epsilon = B$ independent of $\epsilon$. Here, we distinguish two types of arguments: one to bound the energy density associated with $B^{\#}$, namely $B^{\#}\nabla u\cdot\nabla u$ and another to estimate the matrix $B^{\#}$ itself via its quadratic form $B^{\#}\eta\cdot\eta$. Computation of new macro coefficients $B^{\#}$ on classical microstructures associated with $A^\epsilon$ can be found in Section \ref{ub12} and Section \ref{ts}. There is a separate discussion in Section \ref{qw4} about the optimality of the regions $(Li,Uj)$ $i,j=1,2$ defined by the bounds L1,L2,U1,U2. Final Section \ref{qw5} is devoted to two applications to problems of Calculus of Variations.\\ \\ For other comments on the contents of this paper, the reader is refer to various sections below. \tableofcontents \section{Existence of $B^{\#}$ and its properties}\label{Sd13} \setcounter{equation}{0} \subsection{Existence of $B^{\#}$}\label{bs8} \begin{theorem}\label{ot1} Given $B^\epsilon\in\mathcal{M}(b_1,b_2;{|\!\!\!O}mega)$, there exist a subsequence and $B^{\#}\in\mathcal{M}(b_1,\widetilde{b_2};{|\!\!\!O}mega)$ with $\widetilde{b_2}=b_2\frac{a_2}{a_1}$ such that \begin{equation}\label{dc1} B^\epsilon\nabla u^\epsilon\cdot\nabla u^\epsilon \rightharpoonup B^{\#}\nabla u\cdot\nabla u \ \mbox{ in }\mathcal{D}^\prime({|\!\!\!O}mega).\end{equation} \end{theorem} \begin{proof} \textbf{Step(1): Definition of $B^{\#}$:} Let $\{e_k\}_k,k=1,2,..,N$ be the standard basis vectors in $\mathbb{R}^N$. We define the oscillatory test functions $\chi_{k}^{\epsilon}$, $\zeta_{k}^{\epsilon}$ and $\psi_{k}^{\epsilon}$ in $H^1({|\!\!\!O}mega)$ to define $A^{*}$, $B^{*}$, $B^{\#}$ as follows. Let $A^\epsilon$ $H$- converges to $A^{*}$, then upto a subsequence still denoted by $A^\epsilon$, there exists a sequence $\{\chi_k^\epsilon\}\in H^1({|\!\!\!O}mega)$ such that \begin{equation*}\begin{aligned} \chi_{k}^{\epsilon} &\rightharpoonup 0 \mbox{ weakly in }H^1({|\!\!\!O}mega),\\ A^{\epsilon}(\nabla\chi_{k}^{\epsilon} + e_k ) &\rightharpoonup A^{*}{e_k} \mbox{ weakly in }L^2({|\!\!\!O}mega) \mbox{ with }\\ -div (A^{\epsilon}(\nabla\chi_{k}^{\epsilon} + e_k )) &= -div(A^{*}{e_k})\ \mbox{ in }{|\!\!\!O}mega, \ \ k=1,2,..,N. \end{aligned}\end{equation*} We consider the matrix $X^{\epsilon}$ defined by its columns $(\nabla\chi_{k}^{\epsilon})$ is called the corrector matrix for $A^{\epsilon}$, with the following property : \begin{equation}\label{ot9}\nabla u^{\epsilon} - (X^{\epsilon}+I)\nabla u \rightarrow 0 \quad\mbox{ in }L_{loc}^1({|\!\!\!O}mega).\end{equation} The existence of such sequence $\{\chi_{k}^{\epsilon}\}$ is well known in homogenization theory, for more details one may look at \cite{A,T}.\\ \\ Similarly, let $B^\epsilon$ $H$-converges to $B^{*}$, then upto a subsequence still denoted by $B^\epsilon$, we define the corrector matrix $Y^{\epsilon}$ defined by its columns $(\nabla\zeta_{k}^{\epsilon})$ satisfying \begin{equation*}\begin{aligned} \zeta_{k}^{\epsilon} &\rightharpoonup 0 \mbox{ weakly in }H^1({|\!\!\!O}mega),\\ B^{\epsilon}(\nabla\zeta_{k}^{\epsilon} + e_k ) &\rightharpoonup B^{*}e_k \mbox{ weakly in }L^2({|\!\!\!O}mega) \mbox{ with }\\ -div(B^{\epsilon}(\nabla\zeta_{k}^{\epsilon} + e_k )) &= -div(B^{*}e_k)\ \mbox{ in }{|\!\!\!O}mega, \ \ k=1,2,..,N. \end{aligned}\end{equation*} And finally we define the test functions $\psi_{k}^{\epsilon}$ bounded uniformly with respect to $\epsilon$ in $H^1({|\!\!\!O}mega)$, satisfying \begin{equation*} div(A^{\epsilon} \nabla \psi_{k}^{\epsilon} - B^{\epsilon}(\nabla\chi_{k}^{\epsilon} + e_k)) =0\ \mbox{ in }{|\!\!\!O}mega, \ \ k=1,2,..,N.\end{equation*} Such test functions $\{\psi_{k}^{\epsilon}\}$ have been introduced in \cite{KP,KV} subject to an optimal control problem. Then upto a subsequence we consider the limit as \begin{equation*}\begin{aligned} \psi_{k}^{\epsilon} &\rightharpoonup \psi_{k} \mbox{ weakly in }H^1({|\!\!\!O}mega),\\ A^{\epsilon}\nabla \psi_{k}^{\epsilon} - B^{\epsilon}(\nabla\chi_{k}^{\epsilon} + e_k) &\rightharpoonup\ \varsigma_k \mbox{\ (say) weakly in }L^2({|\!\!\!O}mega)\mbox{ with }\\ div(\varsigma_k) &=0\ \mbox{ in }{|\!\!\!O}mega, \ \ k=1,2,..,N. \end{aligned}\end{equation*} Next, we define the limiting matrix $B^{\#}$ : For each $k=1,2,..,N$ \begin{equation}\label{Sd2} B^{\#}e_k := A^{*}\nabla \psi_{k} - \varsigma_k = A^{*}\nabla \psi_{k}- lim \{ A^{\epsilon}\nabla \psi_{k}^{\epsilon} - B^{\epsilon}(\nabla\chi_{k}^{\epsilon} + e_k)\}; \end{equation} and as a perturbation of $H$-limit $B^{*}$ we write \begin{equation*} B^{\#}e_k = B^{*}e_k + A^{*}\nabla \psi_k - lim \{ A^{\epsilon} \nabla \psi_k^\epsilon - B^{\epsilon}(\nabla\chi_k^{\epsilon} - \nabla\zeta_k^{\epsilon})\}. \end{equation*} \ \ \ \ (The above limits are to be understood as $L^2({|\!\!\!O}mega)$ weak limit).\\ \\ \textbf{Step(2):} We introduce $p^\epsilon\in H^1({|\!\!\!O}mega)$ such that \begin{equation}\begin{aligned}\label{FG1} \nabla p^\epsilon \rightharpoonup \nabla p \mbox{ weakly in } L^2({|\!\!\!O}mega),&\\ div(A^{\epsilon}\nabla p^{\epsilon} - B^{\epsilon}\nabla u^{\epsilon}) =\ 0 \mbox{ in }{|\!\!\!O}mega.& \end{aligned}\end{equation} We introduce the new flux $z^{\epsilon} = A^{\epsilon}\nabla p^{\epsilon} - B^{\epsilon}\nabla u^{\epsilon}$, and say $ z^{\epsilon}\rightharpoonup z$ weakly in $L^2({|\!\!\!O}mega)$.\\ \\ \textbf{Step(3):} We apply the well-known div-curl lemma \cite{T} several times to simply have the following convergences : \begin{equation*} (A^{\epsilon}\nabla \psi_{k}^{\epsilon}- B^{\epsilon}(\nabla\chi_{k}^{\epsilon} + {e_k}))\cdot (\nabla\chi_{k}^{\epsilon} + {e_k})\rightharpoonup (A^{*}\nabla \psi_{k}- B^{\#}e_k)\cdot e_k \ \mbox{ in }\mathcal{D}^{\prime}({|\!\!\!O}mega)\end{equation*} and \begin{equation*} A^{\epsilon}(\nabla\chi_{k}^{\epsilon} + {e_k})\cdot \nabla\psi_{k}^{\epsilon}\rightharpoonup A^{*}e_k\cdot\nabla\psi_{k} \ \mbox{ in }\mathcal{D}^{\prime}({|\!\!\!O}mega). \end{equation*} Since $A^\epsilon = (A^\epsilon)^t$ and $A^{*}=(A^{*})^t$, thus combining the above two convergences we obtain \begin{equation}\label{FL12} B^{\epsilon}(\nabla\chi_{k}^{\epsilon} + {e_k})\cdot (\nabla\chi_{k}^{\epsilon} + {e_k})\rightharpoonup B^{\#}e_k\cdot e_k \ \mbox{ in }\mathcal{D}^{\prime}({|\!\!\!O}mega), \ k=1,2..,N. \end{equation} On the other hand, thanks to div-curl lemma we also have \begin{equation*} (A^\epsilon\nabla p^\epsilon- B^\epsilon\nabla u^\epsilon)\cdot (\nabla\chi_{k}^{\epsilon} + {e_k})\rightharpoonup z\cdot e_k \ \mbox{ in }\mathcal{D}^{\prime}({|\!\!\!O}mega) \end{equation*} and \begin{equation*} A^\epsilon (\nabla\chi_{k}^{\epsilon} + {e_k})\cdot\nabla p^\epsilon \rightharpoonup A^{*}e_k\cdot\nabla p \ \mbox{ in }\mathcal{D}^{\prime}({|\!\!\!O}mega). \end{equation*} Thus one gets, \begin{equation}\label{eir} B^\epsilon\nabla u^\epsilon\cdot (\nabla\chi_{k}^{\epsilon} + {e_k})\rightharpoonup A^{*}\nabla p\cdot e_k- z\cdot e_k \ \mbox{ in }\mathcal{D}^{\prime}({|\!\!\!O}mega). \end{equation} Similarly, having \begin{equation*} (A^{\epsilon}\nabla \psi_{k}^{\epsilon}- B^{\epsilon}(\nabla\chi_{k}^{\epsilon} + {e_k}))\cdot \nabla u^{\epsilon}\rightharpoonup (A^{*}\nabla \psi_{k}- B^{\#}e_k)\cdot\nabla u \ \mbox{ in }\mathcal{D}^{\prime}({|\!\!\!O}mega) \end{equation*} and \begin{equation*} A^\epsilon\nabla u^\epsilon\cdot\nabla \psi_{k}^{\epsilon} \rightharpoonup A^{*}\nabla u\cdot\nabla \psi_k \ \mbox{ in }\mathcal{D}^{\prime}({|\!\!\!O}mega), \end{equation*} one obtains, \begin{equation}\label{FG13} B^\epsilon (\nabla\chi_{k}^{\epsilon} + {e_k})\cdot \nabla u^\epsilon\rightharpoonup B^{\#}e_k\cdot\nabla u \ \mbox{ in }\mathcal{D}^{\prime}({|\!\!\!O}mega). \end{equation} Now by simply combining \eqref{eir} and \eqref{FG13}, we determine the expression of $z$ : \begin{equation*} z\cdot e_k = (A^{*}\nabla p -B^{\#}\nabla u)\cdot e_k, \ \ k=1,2,..N.\end{equation*} Thus, $ z = A^{*}\nabla p -B^{\#}\nabla u $. Since $div\ z^\epsilon =0$ and $z^\epsilon \rightharpoonup z\ \mbox{ in }L^2({|\!\!\!O}mega) \mbox{ weak }$, so $div\ z=0$.\\ \noindent Therefore, we conclude that \begin{equation*} B^\epsilon\nabla u^\epsilon\cdot\nabla u^\epsilon =\ A^\epsilon\nabla u^\epsilon\cdot\nabla p^\epsilon - z^\epsilon\cdot\nabla u^\epsilon \rightharpoonup A^{*}\nabla u\cdot\nabla p - z\cdot\nabla u = B^{\#}\nabla u\cdot\nabla u\ \mbox{ in } \mathcal{D}^\prime({|\!\!\!O}mega).\end{equation*} Hence, \eqref{dc1} follows. We prove later that $B^{\#}\in \mathcal{M}(b_1,\widetilde{b_2};{|\!\!\!O}mega)$. (See \eqref{Sd3}). \end{proof} \noindent The above result justifies the Definition \ref{sid} stated in Introduction. \begin{remark}\label{ub19} If $u^\epsilon$ satisfies Dirichlet boundary condition i.e. $u^\epsilon\in H^1_0({|\!\!\!O}mega)$, then it is natural to impose same Dirichlet boundary condition on $p^\epsilon$ also. Multiplying the equation $-div(A^\epsilon\nabla u^\epsilon)= f^\epsilon$ in ${|\!\!\!O}mega$ for $u^\epsilon$ (where $f^\epsilon$ strongly converges to $f$ in $H^{-1}({|\!\!\!O}mega)$), by $p^\epsilon$ and the equation $div(A^\epsilon\nabla p^\epsilon-B^\epsilon\nabla u^\epsilon)=0$ in ${|\!\!\!O}mega$ for $p^\epsilon$ by $u^\epsilon$, and passing to the limit after subtraction, we obtain the desired result, namely \begin{equation*} \int_{|\!\!\!O}mega B^\epsilon \nabla u^\epsilon\cdot\nabla u^\epsilon\ dx \rightarrow \int_{|\!\!\!O}mega B^{\#}\nabla u\cdot\nabla u\ dx. \end{equation*} \qed\end{remark} \begin{remark}\label{abc3} The quadratic quantities \eqref{bs14} are bounded in $L^1({|\!\!\!O}mega)$ and so their weak limit points are a priori Radon measures. However, due to the special nature of the sequence $\nabla u^\epsilon$, (\eqref{ad13} is satisfied), they are in fact $L^1({|\!\!\!O}mega)$ functions according to Definition \ref{sid} and Theorem \ref{ot1}. \qed\end{remark} \begin{remark}\label{ad16} Let us consider the flux sequence $\{B^\epsilon\nabla u^\epsilon\}_{\epsilon>0}$, which is bounded uniformly in $L^2({|\!\!\!O}mega).$ Then using \eqref{ot9}, upto a subsequence we find \begin{equation*} B^\epsilon\nabla u^\epsilon \rightharpoonup B^{\#}_{FL}\nabla u \ \mbox{ in }L^2({|\!\!\!O}mega) \end{equation*} where, \begin{equation*} B^\epsilon(\nabla\chi^\epsilon_k + e_k)\rightharpoonup B^{\#}_{FL}e_k \mbox{ in }\mathcal{D}^\prime({|\!\!\!O}mega), \ \ k=1,2,..,N.\end{equation*} However, in general the limiting macro quantities $B^{\#}_{FL}$ and $B^{\#}$ appear in flux and energy convergence respectively need not be same, i.e. $B^{\#}_{FL} \neq B^{\#}$ (cf. Remark \ref{ot10}). \qed\end{remark} \begin{remark}\label{zz17} Above arguments show that the following characteristic property of $(A^{*},B^{\#})$ : $v^\epsilon\in H^1({|\!\!\!O}mega)$ such that $\nabla v^\epsilon \rightharpoonup \nabla v$ in $L^2({|\!\!\!O}mega)$ and $div(A^\epsilon\nabla v^\epsilon)$ in $H^{-1}({|\!\!\!O}mega)$ convergent then \begin{equation*} A^\epsilon\nabla v^\epsilon \rightharpoonup A^{*}\nabla v \mbox{ in } L^2({|\!\!\!O}mega)\ \ \mbox{and }\ \ B^\epsilon\nabla v^\epsilon\cdot\nabla v^\epsilon \rightharpoonup B^{\#}\nabla v\cdot\nabla v\mbox{ in }\mathcal{D}^\prime({|\!\!\!O}mega). \end{equation*} Moreover if $u^\epsilon$ is another test sequence like $v^\epsilon$ then \begin{equation*} B^\epsilon\nabla u^\epsilon\cdot\nabla v^\epsilon \rightharpoonup B^{\#}\nabla u\cdot\nabla v\mbox{ in }\mathcal{D}^\prime({|\!\!\!O}mega). \end{equation*} This follows simply due to \begin{equation*}B^\epsilon\nabla u^\epsilon\cdot\nabla v^\epsilon =\ A^\epsilon\nabla v^\epsilon\cdot\nabla p^\epsilon - z^\epsilon\cdot\nabla v^\epsilon \rightharpoonup A^{*}\nabla v\cdot\nabla p - z\cdot\nabla v = B^{\#}\nabla u\cdot\nabla v\ \mbox{ in } \mathcal{D}^\prime({|\!\!\!O}mega). \end{equation*} \qed\end{remark} \subsection{Properties of $B^{\#}$} Let us first define $B^{\#}$ element wise i.e. to define $(B^{\#})_{lk}=B^{\#}e_l\cdot e_k.$ We consider the sequences $(A^{\epsilon}\nabla\psi_{k}^{\epsilon} - B^{\epsilon}(\nabla\chi_{k}^{\epsilon} + e_k))$ and $(\nabla\chi_{l}^{\epsilon} +{e_l})$ where $e_k,e_l\in\mathbb{R}^N$ are the canonical basis vectors, then by applying the div-curl lemma as before, we have the following elements wise convergence \begin{equation}\label{dc2} B^{\epsilon}(\nabla\chi_k^{\epsilon}+e_k)\cdot(\nabla\chi_l^{\epsilon}+ e_l) \rightharpoonup B^{\#}e_k\cdot e_l\quad\mbox{in }\mathcal{D}^{\prime}({|\!\!\!O}mega) \end{equation} \begin{remark} Notice that if $A^{\epsilon}$ is independent of $\epsilon$ i.e. $A^{\epsilon} = A$ then we have $X^{\epsilon}=0$, so $B^{\#} = \overline{B}$, where, $\overline{B}$ is the $L^{\infty}({|\!\!\!O}mega)$ weak* limit of $B^{\epsilon}$. \qed \end{remark} \noindent As an application of the above distributional convergence \eqref{dc2} one has the following : \begin{enumerate} \item[(i)] Since $\{B^{\epsilon}\}_{\epsilon>0}$ is symmetric, $B^{\#}$ is also a symmetric matrix. \item[(ii)] Let $B^{\epsilon} \in \mathcal{M}(b_1,b_2;\ {|\!\!\!O}mega)$. Then the ellipticity constant of $B^{\#}$ remains same as for $B^\epsilon$ i.e. $B^{\#}\geq b_1I$. However, the upper bounds for $B^\epsilon$ and $B^{\#}$ need not be the same. We have $B^{\#}\leq\widetilde{b_2}I$ with $\widetilde{b_2}=b_2\frac{a_2}{a_1}$. In this context, let us recall that the homogenized matrix $B^{*}$ admits bounds : $b_1I\leq B^{*}\leq b_2I$. \end{enumerate} Let $\lambda=(\lambda_1,\lambda_2,..,\lambda_N)\in\mathbb{R}^N$ be an arbitrary vector, we define the corresponding oscillatory test function $\chi^\epsilon_\lambda= \sum_{k=1}^N \lambda_k\chi^\epsilon_k\in H^1({|\!\!\!O}mega)$ and $\zeta^\epsilon_\lambda= \sum_{k=1}^N \lambda_k\zeta^\epsilon_k\in H^1({|\!\!\!O}mega)$ to have \begin{equation}\label{ot11}B^{\#}\lambda\cdot\lambda =\ limit\ B^{\epsilon}(\nabla\chi_{\lambda}^{\epsilon}+ \lambda)\cdot(\nabla\chi_{\lambda}^{\epsilon}+\lambda);\end{equation} and as a perturbation of the $H$-limit $B^{*}$, \begin{equation}\label{os18} B^{\#}\lambda\cdot {\lambda}=\ B^{*}\lambda\cdot {\lambda} + limit\ B^{\epsilon}(\nabla\chi_{\lambda}^{\epsilon} - \nabla\zeta_{\lambda}^{\epsilon})\cdot(\nabla\chi_{\lambda}^{\epsilon} - \nabla\zeta_{\lambda}^{\epsilon}). \\ \end{equation} \ \ \ (The above `limits' are to be understood in the sense of distributions.) \\ \begin{corollary}\label{Sd14} \begin{equation}\label{Sd4} B^{\#} \geq B^{*} \end{equation} where the equality holds if and only if $\nabla(\chi_{\lambda}^{\epsilon} - \zeta_{\lambda}^{\epsilon})\rightarrow 0 $ in $L^2({|\!\!\!O}mega)$ for each $\lambda\in\mathbb{R}^N.$ \qed\end{corollary} \noindent In the following lemma as an application of the distributional converge \eqref{ot11}, we provide the general bounds on $B^{\#}$. \begin{lemma}[\textbf{General Bounds}]\label{hsk} Let $A^{\epsilon}\in \mathcal{M}(a_1,a_2;{|\!\!\!O}mega)$ with $0 <a_1\leq a_2 <\infty $ and $B^{\epsilon}\in\mathcal{M}(b_1,b_2;\ {|\!\!\!O}mega)$ with $0 <b_1\leq b_2 <\infty $, $H$-converges to $A^{*}\in\mathcal{M}(a_1,a_2;\ {|\!\!\!O}mega)$ and $B^{*}\in\mathcal{M}(b_1,b_2;\ {|\!\!\!O}mega)$ respectively. Then we have the following bounds \begin{equation}\label{Sd3} b_1I\leq \underline{B} \leq B^{*} \leq B^{\#} \leq \frac{b_2}{a_1}A^{*} \leq \frac{b_2}{a_1}\overline{A}\leq b_2\frac{a_2}{a_1}I\end{equation} where, $(\underline{B})^{-1}$ is the $L^{\infty}$ weak* limit of the matrix sequence $(B^{\epsilon})^{-1}$ and $\overline{A}$ is the $L^{\infty}$ weak* limit of the matrix sequence $A^{\epsilon}$. \end{lemma} \begin{proof} In general we have the following trivial bounds with respect to $L^{\infty}({|\!\!\!O}mega)$ weak* limit of the sequence and its inverse \begin{equation*}\underline{A} \leq A^{*} \leq \overline{A} \quad\mbox{ and }\quad \underline{B} \leq B^{*}\leq \overline{B}\end{equation*} where, $A^{\epsilon}$, $(A^{\epsilon})^{-1}$, $B^{\epsilon}$, $(B^{\epsilon})^{-1}$ converges to $\overline{A}$, $(\underline{A})^{-1}$, $\overline{B}$, $(\underline{B})^{-1}$ in $L^{\infty}({|\!\!\!O}mega)$ weak* limit respectively.\\ So by using \eqref{Sd4} we have the lower bound for $B^{\#}$ \begin{equation*} b_1I\leq \underline{B} \leq B^{*} \leq B^{\#}.\end{equation*} Next we seek the upper bound for $B^{\#}$. In general, $B^{\#}\leq \overline{B}$ is not true (cf. Remark \ref{Sd8}). In fact, in the one-dimension problem (Example: \ref{hsf}) for two phase material ($a^{\epsilon}(x)=a_1\chi_{a_\epsilon}(x)+a_2(1-\chi_{a_\epsilon})$, and $b^{\epsilon}(x)=b_1\chi_{b_\epsilon}(x)+ b_2(1-\chi_{b_\epsilon})$ ) by taking $(\frac{a_2}{a_1} -1)$ small enough or $(\frac{b_2}{b_1} -1)$ is large enough it could be possible to get even $b^{\#}\geq b_2 \ ( > \overline{b} )$. It's a new phenomena. However we derive upper bounds of it in terms of $A^{*}$ (or $\overline{A}$). Let us consider the following inequality \begin{equation*}B^{\epsilon}(\nabla\chi_{\lambda}^{\epsilon} +\lambda)\cdot(\nabla\chi_{\lambda}^{\epsilon} + \lambda) \leq\ b_2(\nabla\chi_{\lambda}^{\epsilon} + \lambda)\cdot(\nabla\chi_{\lambda}^{\epsilon} + \lambda) \leq\ \frac{b_2}{a_1} A^{\epsilon}(\nabla\chi_{\lambda}^{\epsilon} +{\lambda})\cdot(\nabla\chi_{\lambda}^{\epsilon} +\lambda).\end{equation*} So by using the distributional convergence we have \begin{equation*}B^{\#}{\lambda}\cdot {\lambda} \leq\ \frac{b_2}{a_1} A^{*}{\lambda}\cdot {\lambda} \leq\ \frac{b_2}{a_1}\overline{A}\ \lambda\cdot {\lambda}\leq \frac{b_2}{a_1}a_2 \lambda\cdot {\lambda}\end{equation*} which gives the desired upper bound on $B^{\#}$. \end{proof} \begin{remark} A bound which is not sharp was found earliar in \cite{KR}. Notice that if $B^{\epsilon} = \frac{b_2}{a_1}A^{\epsilon}$, then clearly $B^{\#} =\frac{b_2}{a_1}A^{*}$ and the fourth inequality becomes equality in \eqref{Sd3} and Corollary \ref{hsk} provides the condition for the third inequality to become equality in \eqref{Sd3}. \qed\end{remark} \noindent \begin{remark}[Localization]\label{ub13} If $A^\epsilon\in\mathcal(a_1,a_2;{|\!\!\!O}mega)$ $H$-converges to $A^{*}$ and $\omega$ is an open subset of ${|\!\!\!O}mega$, then the sequence $A^\epsilon|_{\omega}$ restrictions of $A^\epsilon$ to $\omega$ $H$ converges to $A^{*}|_{\omega}$ (see \cite[Lemma 10.5]{T}). Similarly we remark that if $B^\epsilon\in\mathcal(b_1,b_2;{|\!\!\!O}mega)$ converges to $B^{\#}$ relative to $A^\epsilon$ in ${|\!\!\!O}mega$, then $B^\epsilon|_\omega$ converges to $B^{\#}|_\omega$ relative to $A^\epsilon|_\omega$. \qed\end{remark} \begin{remark}\label{sii} If $A^\epsilon\in\mathcal{M}(a_1,a_2;{|\!\!\!O}mega)$ strongly converges to $A^{*}$ in $L^p({|\!\!\!O}mega)$ for $1\leq p<\infty$ then $A^\epsilon$ $H$-converges to $A^{*}$ (see, \cite[Lemma 1.2.22]{A}). Similarly, we remark that, if $A^\epsilon\in\mathcal{M}(a_1,a_2;{|\!\!\!O}mega)$ and $B^\epsilon\in\mathcal{M}(b_1,b_2;{|\!\!\!O}mega)$ strongly converge to $A^{*}$ and $B^{\#}$ respectively in $L^p({|\!\!\!O}mega)$ for $1\leq p<\infty$ then $B^\epsilon$ converges to $B^{\#}$ relative to $A^\epsilon$. \qed\end{remark} \begin{lemma}\label{pol5} If $B^\epsilon_i \xrightarrow{A^\epsilon} B^{\#}_i$ for $i=1,..,k$ then $\sum_{i=1}^k c_i B^\epsilon_i\xrightarrow{A^\epsilon} \sum_{i=1}^k c_i B^{\#}_i$, where $c_i>0$ are constants and $\sum_{i=1}^k c_i =1$. \end{lemma} \begin{proof} The proof simply follows the definition of the relative convergence \eqref{dc1}. \end{proof} We end this section by establishing that the macro quantities $(A^{*},B^{\#})$ can be estimated in terms of underlying microstructure. Such results will be needed in our study of $(A^{*},B^{\#})$ and in establishing optimal bounds on $(A^{*},B^{\#})$. (See Section \ref{ad18}). The estimate on $A^{*}$ are classical \cite{A} and we extend it for the relative limit $B^{\#}$. \begin{lemma}\label{zz15} Let $A^{\epsilon,i}\in\mathcal{M}(a_1,a_2;{|\!\!\!O}mega)$, $B^{\epsilon,i}\in\mathcal{M}(b_1,b_2;{|\!\!\!O}mega)$ and \begin{center} $A^{\epsilon,i} = \{a_1\chi_{\omega_{A^{\epsilon,i}}} + a_2(1-\chi_{\omega_{A^{\epsilon,i}}})\}I \xrightarrow{H} A^{*,i}$ and $B^{\epsilon,i} = \{b_1\chi_{\omega_{B^{\epsilon,i}}} + b_2(1-\chi_{\omega_{B^{\epsilon,i}}})\}I \xrightarrow{A^{\epsilon,i}} B^{\#,i}$ \end{center} for $i=1,2$. We assume that $A^{*,i}$ and $B^{\#,i}$ are the constant matrices. Then there exist positive constants $C>0$ and $\delta_A>0,\delta_B>0$ (independent of microstructures) such that, \begin{equation}\label{FL16} ||(A^{*,1}-A^{*,2})||\ \leq\ C\ \underset{\epsilon\rightarrow 0}{limsup}\left( \int_{|\!\!\!O}mega |(\chi_{\omega_{A^{\epsilon,1}}}(x) - \chi_{\omega_{A^{\epsilon,2}}}(x))|\ dx\right)^{\delta_A} ; \end{equation} \begin{align}\label{FL14} ||(B^{\#,1}-B^{\#,2})||\ \leq\ & C\ \underset{\epsilon\rightarrow 0}{limsup}\ \{\left( \int_{|\!\!\!O}mega |(\chi_{\omega_{A^{\epsilon,1}}}(x) - \chi_{\omega_{A^{\epsilon,2}}}(x))|\ dx\right)^{\delta_A}\notag\\ &\qquad\qquad\qquad\qquad+ \left( \int_{|\!\!\!O}mega |(\chi_{\omega_{B^{\epsilon,1}}}(x) - \chi_{\omega_{B^{\epsilon,2}}}(x))|\ dx \right)^{\delta_B}\}. \end{align} \end{lemma} \begin{proof} As $A^{*,i}$, $i=1,2$ are constant homogenized matrices we can take the following oscillatory test functions (See \cite[Page no. 6]{KP}) $\{w^{\epsilon,i}_k\}_{k=1}^N\in (H^1({|\!\!\!O}mega))^N$ satisfying : \begin{equation}\label{zz16} -div A^{\epsilon,i}\nabla w^{\epsilon,i}_k = 0 \mbox{ in } {|\!\!\!O}mega, \quad w^{\epsilon,i}_k = x_k \ \mbox{ on }\partial{|\!\!\!O}mega, \ i=1,2 \end{equation} so that, \begin{equation*} \nabla w^{\epsilon,i}_k \rightharpoonup e_k \mbox{ in } L^2({|\!\!\!O}mega)\ \mbox{ and }\ A^{\epsilon,i}\nabla w^{\epsilon,i}_k \rightharpoonup A^{*,i}e_k \mbox{ in }L^2({|\!\!\!O}mega),\ i=1,2. \end{equation*} Using $w^{\epsilon,1}_k- w^{\epsilon,2}_k =0$ on $\partial{|\!\!\!O}mega$ and integrating by parts in \eqref{zz16} we have \begin{equation*} \int_{|\!\!\!O}mega A^{\epsilon,1}\nabla(w^{\epsilon,1}_k -w^{\epsilon,2}_k)\cdot\nabla(w^{\epsilon,1}_k -w^{\epsilon,2}_k)\ dx = \int_{|\!\!\!O}mega (A^{\epsilon,2}-A^{\epsilon,1})\nabla w^{\epsilon,2}_k\cdot\nabla (w^{\epsilon,1}_k -w^{\epsilon,2}_k)\ dx. \end{equation*} Then by using coercivity of $A^{\epsilon,1}$ we simply get \begin{equation}\label{zz19} ||\nabla(w^{\epsilon,1}_k -w^{\epsilon,2}_k)||_{L^2({|\!\!\!O}mega)} \leq C ||(\chi_{\omega_{A^{\epsilon,1}}}-\chi_{\omega_{A^{\epsilon,2}}})\nabla w^{\epsilon,2}_k||_{L^2({|\!\!\!O}mega)}. \end{equation} By using the Meyers theorem (cf. \cite[Theorem 1.3.41]{A}), there exists an exponent $p>2$ such that $||\nabla w^{\epsilon,i}_k||_{L^p({|\!\!\!O}mega)}\leq C$, $i=1,2$ (independent of $\epsilon$), and further using Young's inequality in \eqref{zz19} we deduce \begin{equation*} ||\nabla(w^{\epsilon,1}_k -w^{\epsilon,2}_k)||_{L^2({|\!\!\!O}mega)} \leq C ||(\chi_{\omega_{A^{\epsilon,1}}}-\chi_{\omega_{A^{\epsilon,2}}})||_{L^{\frac{2p}{p-2}}({|\!\!\!O}mega)}. \end{equation*} \begin{align*} |(A^{*,1}-A^{*,2})e_k| &\leq\ \underset{\epsilon\rightarrow 0}{limsup}\ |\{\int_{|\!\!\!O}mega A^{\epsilon,1}(\nabla w^{\epsilon,1}_k -\nabla w^{\epsilon,2}_k)\cdot e_k\ dx + \int_{|\!\!\!O}mega (A^{\epsilon,1}-A^{\epsilon,2})\nabla w^{\epsilon,2}_k\cdot e_k\ dx\}| \\ &\leq C\ \underset{\epsilon\rightarrow 0}{limsup} \left(||\nabla(w^{\epsilon,1}_k -w^{\epsilon,2}_k)||_{L^2({|\!\!\!O}mega)} + ||(\chi_{\omega_{A^{\epsilon,1}}}-\chi_{\omega_{A^{\epsilon,2}}})||_{L^{\frac{2p}{p-2}}({|\!\!\!O}mega)}\right)\\ &\leq C\ \underset{\epsilon\rightarrow 0}{limsup} ||(\chi_{\omega_{A^{\epsilon,1}}}-\chi_{\omega_{A^{\epsilon,2}}})||_{L^{\frac{2p}{p-2}}({|\!\!\!O}mega)}. \end{align*} On the other hand, by the definition of relative convergence, we have \begin{equation*} B^{\epsilon,i}\nabla w^{\epsilon,i}_k\cdot\nabla w^{\epsilon,i}_k \rightharpoonup B^{\#,i}e_k\cdot e_k \mbox{ in }\mathcal{D}^\prime({|\!\!\!O}mega), \ i=1,2. \end{equation*} Moreover, we claim : \begin{equation}\label{zz18} \int_{|\!\!\!O}mega B^{\epsilon,i}\nabla w^{\epsilon,i}_k\cdot\nabla w^{\epsilon,i}_k\ dx \rightarrow \int_{|\!\!\!O}mega B^{\#,i}e_k\cdot e_k\ dx, \ i=1,2. \end{equation} Introducing the adjoint state $p^{\epsilon,i}_k\in H^1_0({|\!\!\!O}mega)$ via the relation : \begin{equation*} div (A^{\epsilon,i}\nabla p^{\epsilon,i}_k -B^{\epsilon,i}\nabla w^{\epsilon,i}_k) =0 \mbox{ in }{|\!\!\!O}mega ; \ \ i=1,2. \end{equation*} Multiply the above equation of $p^{\epsilon,i}_k$ by $w^{\epsilon,i}_k$ and the equation of $w^{\epsilon,i}_k$ by $p^{\epsilon,i}_k$, we obtain \begin{equation*} \int_{|\!\!\!O}mega B^\epsilon\nabla w^{\epsilon,i}_k\cdot\nabla w^{\epsilon,i}_k\ dx = -\int_{\partial{|\!\!\!O}mega}(z^{\epsilon,i}_k\cdot \nu)\ x_k \ d\sigma \mbox{ with }z^{\epsilon,i}_k = (A^{\epsilon,i}\nabla p^{\epsilon,i}_k -B^{\epsilon,i}\nabla w^{\epsilon,i}_k). \end{equation*} Taking limit $\epsilon \rightarrow 0$ and using that $z^{\epsilon,i}_k \rightharpoonup z^{i}_k = A^{*,i}\nabla p^{i}_k -B^{\#,i}e_k$ in $L^2({|\!\!\!O}mega)$ weak, we get \begin{equation*} \int_{|\!\!\!O}mega B^\epsilon\nabla w^{\epsilon,i}_k\cdot\nabla w^{\epsilon,i}_k\ dx \rightarrow -\int_{\partial{|\!\!\!O}mega}(z^i_k\cdot \nu)\ x_k\ d\sigma \end{equation*} By repeating the above steps, we can compute the above limit and establish our claim \eqref{zz18}. \\ \\ Then it follows that, \begin{align*} |(B^{\#,1}-B^{\#,2})e_k\cdot e_k| &\leq\ \underset{\epsilon\rightarrow 0}{limsup}\ |\{\int_{|\!\!\!O}mega B^{\epsilon,1}(\nabla w^{\epsilon,1}_k\cdot\nabla w^{\epsilon,1}_k -\nabla w^{\epsilon,2}_k\cdot\nabla w^{\epsilon,2}_k)\ dx \\ &\qquad\qquad\qquad\qquad + \int_{|\!\!\!O}mega (B^{\epsilon,1}-B^{\epsilon,2})\nabla w^{\epsilon,2}_k\cdot \nabla w^{\epsilon,2}_k\ dx\}| \\ &\leq C\ \underset{\epsilon\rightarrow 0}{limsup} \left(||\nabla(w^{\epsilon,1}_k -w^{\epsilon,2}_k)||_{L^2({|\!\!\!O}mega)} + ||(\chi_{\omega_{B^{\epsilon,1}}}-\chi_{\omega_{B^{\epsilon,2}}})||_{L^{\frac{p}{p-2}}({|\!\!\!O}mega)}\right)\\ &\leq C\ \underset{\epsilon\rightarrow 0}{limsup} \left(||(\chi_{\omega_{A^{\epsilon,1}}}-\chi_{\omega_{A^{\epsilon,2}}})||_{L^{\frac{2p}{p-2}}({|\!\!\!O}mega)} + ||(\chi_{\omega_{B^{\epsilon,1}}}-\chi_{\omega_{B^{\epsilon,2}}})||_{L^{\frac{p}{p-2}}({|\!\!\!O}mega)}\right). \end{align*} Thus \eqref{FL16}, \eqref{FL14} follows with $\delta_A = \frac{p-2}{2p}$ and $\delta_B = \frac{p-2}{p}$. \end{proof} \noindent Finally, we add one covariance property for the relative convergence. \begin{lemma}[Covariance property]\label{pol6} Let $A^\epsilon \in \mathcal{M}(a_1,a_1;{|\!\!\!O}mega)$ $H$-converges to $A^{*}$, and $B^\epsilon\in\mathcal{M}(b_1,b_2;{|\!\!\!O}mega)$ converges to $B^{\#}$ relative to $A^\epsilon$, and $\phi$ is a diffeomorphism ${|\!\!\!O}mega$ onto $\phi({|\!\!\!O}mega)$. Let us define : \begin{equation*} \widetilde{A}^\epsilon(\phi(x)) \stackrel{def}{=} \frac{1}{\mbox{det }(\nabla \phi(x))}\nabla \phi(x) A^\epsilon(x)\nabla\phi^{T}(x),\ \widetilde{B}^\epsilon(\phi(x)) \stackrel{def}{=} \frac{1}{\mbox{det }(\nabla \phi(x))}\nabla \phi(x) B^\epsilon(x)\nabla\phi^{T}(x). \end{equation*} Then we have \begin{align*} \widetilde{A}^\epsilon(\phi(x)) &\xrightarrow{H} \frac{1}{\mbox{det }(\nabla \phi(x))}\nabla \phi(x) A^{*}(x)\nabla\phi^{T}(x) = \widetilde{A}^{*}(\phi(x)) \mbox{ in }\phi({|\!\!\!O}mega)\\ \mbox{and }\ \widetilde{B}^\epsilon(\phi(x)) &\xrightarrow{\widetilde{A}^\epsilon(\phi(x))} \frac{1}{\mbox{det }(\nabla \phi(x))}\nabla \phi(x) B^{\#}(x)\nabla\phi^{T}(x) = \widetilde{B}^{\#}(\phi(x)) \mbox{ in }\phi({|\!\!\!O}mega). \end{align*} \end{lemma} \begin{proof} The proof is analogous to the proof of the covariance property of $H$-convergence given in \cite[Lemma 21.1]{T}. \end{proof} \section{One-dimensional Case}\label{hsf} \setcounter{equation}{0} Let us present the one-dimensional case where we compute the macro limits $A^{*}$ and $B^{\#}$ explicitly. We point out that the relation \eqref{FL19} was earliar obtained in \cite{KP}. Let $0 < a_1 \leq a^{\epsilon} \leq a_2 \mbox{ and } 0 < b_1 \leq b^{\epsilon} \leq b_2$ in some bounded open interval $I\subset \mathbb{R}$. We have $u^{\epsilon},p^{\epsilon}\in H^1_0(I)$ solving the following system of equations with $f\in H^{-1}(I)$ : \begin{equation*}\begin{aligned} &-\frac{d}{dx}(a^{\epsilon}\frac{du^{\epsilon}}{dx}) = f \mbox{ in }I,\\ & \frac{d}{dx}(a^{\epsilon}\frac{dp^{\epsilon}}{dx} - b^{\epsilon}\frac{du^{\epsilon}}{dx}) = 0 \mbox{ in }I. \end{aligned}\end{equation*} Then from the one-dimensional homogenization, we know that \begin{equation*}u^{\epsilon}\rightharpoonup u \mbox{ weakly in }H^1_0(I)\mbox{ and the flux }\sigma^{\epsilon} = a^{\epsilon}\frac{du^{\epsilon}}{dx}\rightarrow \sigma=a^{*}\frac{du}{dx} \mbox{ strongly in }L^2(I),\end{equation*} where $(a^{*})^{-1}= L^{\infty}(I)\mbox{ weak* limit of }(a^{\epsilon})^{-1}= \underline{a}^{-1}$ (say). \\ Similarly, from the adjoint equation, we have \begin{align*}p^{\epsilon} \rightharpoonup p\mbox{ in }H^1_0(I)\mbox{ and the flux }z^{\epsilon} = (a^{\epsilon}\frac{dp^{\epsilon}}{dx} - b^{\epsilon}\frac{du^{\epsilon}}{dx}) = c^{\epsilon}\mbox{ (constant) converges strongly}&\quad\\ \mbox{ to $z=c$ (constant) in }L^2(I)&.\end{align*} We combine these two strongly convergent sequences $\sigma^{\epsilon}$, $z^{\epsilon}$ in the following way : \begin{equation}\label{qw3} \frac{dp^{\epsilon}}{dx} - \frac{b^{\epsilon}}{(a^{\epsilon})^2}\sigma^{\epsilon} = \frac{c^{\epsilon}}{a^{\epsilon}}.\end{equation} Passing to the limit as $\epsilon \rightarrow 0$ in the above equation we have, \begin{equation*} \frac{dp}{dx} - (lim^{*}\frac{b^{\epsilon}}{(a^{\epsilon})^2})\sigma = \frac{c}{\underline{a}}, \quad\mbox{ where }(lim^{*}\frac{b^{\epsilon}}{(a^{\epsilon})^2}) = \mbox{$L^{\infty}(I)$ weak* limit of }\frac{b^{\epsilon}}{(a^{\epsilon})^2}. \end{equation*} Then by using it in \eqref{qw3}, we have \begin{equation*}\begin{aligned} &z = \underline{a}\frac{dp}{dx} - \{(\underline{a})^2(lim^{*}\frac{b^{\epsilon}}{(a^{\epsilon})^2})\}\frac{du}{dx} = c\\ \mbox{or, }\ \ &\frac{d}{dx}(\underline{a}\frac{dp}{dx} - \{(\underline{a})^2(lim^{*}\frac{b^{\epsilon}}{(a^{\epsilon})^2})\}\frac{du}{dx}) = 0 \end{aligned}\end{equation*} Therefore, \begin{equation}\label{FL19} b^{\#} = (\underline{a})^2\hspace{1pt}(lim^{*}\frac{b^{\epsilon}}{(a^{\epsilon})^2}). \end{equation} Even if $a^\epsilon \xrightarrow{H} a^{*} =\underline{a}$ and $b^\epsilon \rightharpoonup \overline{b}$ in $L^{\infty}(I)$ weak* for the entire sequence, $\frac{b^\epsilon}{(a^\epsilon)^2}$ may oscillate as microstructures vary and so its limit need not be unique. In other words, the behaviour is different from $a^{*}=\underline{a}$. \begin{remark}\label{ot10} One also notices that, the flux \begin{equation*} b^\epsilon\frac{du^{\epsilon}}{dx} =\frac{b^\epsilon}{a^\epsilon}\sigma^\epsilon \rightharpoonup (lim^{*}\frac{b^{\epsilon}}{a^{\epsilon}})\hspace{1.5pt}\sigma = (lim^{*}\frac{b^{\epsilon}}{a^{\epsilon}})\hspace{2pt}\underline{a}\frac{du}{dx}, \mbox{ weakly in }L^2(I),\end{equation*} and the energy \begin{equation*} b^\epsilon\frac{du^{\epsilon}}{dx}\frac{du^{\epsilon}}{dx} \rightharpoonup (lim^{*}\frac{b^{\epsilon}}{(a^{\epsilon})^2})\hspace{2pt}(\underline{a})^2\frac{du}{dx}\frac{du}{dx} \mbox{ weakly in }\mathcal{D}^\prime(I).\end{equation*} The limiting macro quantities that appear in flux and energy convergence respectively need not be same. For instance, let $a^\epsilon,b^\epsilon$ be two-phase medium having the same microstructure i.e. \begin{equation*} a^\epsilon = a_1 \chi^\epsilon + a_2(1-\chi^\epsilon),\ (0<a_1<a_2<\infty)\ \mbox{ and } b^\epsilon = b_1\chi^\epsilon + b_2(1-\chi^\epsilon), \ (0<b_1\leq b_2<\infty).\end{equation*} Then one can show \begin{equation*} b^{\#}_{FL} = (lim^{*}\frac{b^{\epsilon}}{a^{\epsilon}})\hspace{2pt}\underline{a} \neq (lim^{*}\frac{b^{\epsilon}}{(a^{\epsilon})^2})\hspace{2pt}(\underline{a})^2 = b^{\#}. \end{equation*} \qed\end{remark} \begin{remark}\label{Sd12} Let us consider two-phase media, $a^\epsilon_1$ and $a^\epsilon_2$ given by two different microstructures, and possessing the same homogenized limit $\underline{a}$. Then there exists $b^\epsilon$ such that $a^\epsilon_j \xrightarrow{H} \underline{a}$, $b^\epsilon\xrightarrow{a^\epsilon_j} b^{\#}_j $ for $j=1,2$ with $b^{\#}_1 \neq b^{\#}_2$. However, if $b^\epsilon$ is independent of $\epsilon$, then $b^{\#}$ is constant. \qed\end{remark} \subsection{One-dimensional Bounds}\label{Sd6} Here we find bounds on $b^{\#}$ where both $a^\epsilon(x)$ and $b^\epsilon(x)$ are given by two phase medium. Let $I$ be an interval in $\mathbb{R}$, we consider \begin{equation*}\begin{aligned} a^{\epsilon}(x) &= a_1\chi_{{\omega}_{A^{\epsilon}}}(x) + a_2(1-\chi_{{\omega}_{A^{\epsilon}}}(x))\mbox{ with }(a_1 < a_2 ),\ \ x\in I\\ \mbox{and }\quad b^{\epsilon}(x) &= b_1\chi_{{\omega}_{B^{\epsilon}}}(x) + b_2(1-\chi_{{\omega}_{B^{\epsilon}}}(x))\mbox{ with }(b_1\leq b_2),\ \ x\in I. \end{aligned}\end{equation*} Let us assume that, \begin{equation*} \chi_{{\omega}_{A^{\epsilon}}}(x)\rightharpoonup \theta_A(x) \mbox{ and } \chi_{{\omega}_{B^{\epsilon}}}(x) \rightharpoonup \theta_B(x) \quad\mbox{ in }L^{\infty}(I)\mbox{ weak*. } \end{equation*} Then following \eqref{FL19} we find \begin{align} b^{\#}(x) &=(\underline{a})^{2}\underset{\epsilon\rightarrow 0}{lim}\ \frac{b^{\epsilon}}{(a^{\epsilon})^2}(x)\notag \\ &=(\underline{a})^{2} \underset{\epsilon\rightarrow 0}{lim}\ \{ \frac{b_2}{a_2^2} + \frac{(b_1-b_2)}{a_2^2}\chi_{{\omega}_{B^{\epsilon}}}(x) + (\frac{b_2}{a_1^2}-\frac{b_2}{a_2^2})\chi_{{\omega}_{A^{\epsilon}}}(x) - {(b_2-b_1)}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\chi_{{\omega}_{A^{\epsilon}}}(x)B \}\notag\\ &=(\underline{a})^{2}\{\frac{b_2}{a_2^2} + \frac{(b_1-b_2)}{a_2^2}\theta_B + {b_2}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_A - {(b_2-b_1)}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_{AB}\}\label{FL18} \end{align} where, $\theta_{AB}$ = $L^{\infty}(I)$ weak* limit of $\chi_{{\omega}_{A^{\epsilon}}}(x).\chi_{{\omega}_{B^{\epsilon}}}(x) (= \chi_{{\omega}_{A^{\epsilon}}}(x)B$) and it satisfies the following bounds : \begin{equation}\begin{aligned}\label{FG2} \mbox{ Upper Bound : }\ \ & \theta_{AB}\ \leq\ min\ \{\theta_A,\theta_B\}.\\ \mbox{ Lower Bound : }\ \ & \theta_{AB}\ \geq\ 0, \ \mbox{ always, }\\ \mbox{and }\ \ \ \ & \theta_{AB}\ \geq\ \theta_A + \theta_B -1, \ \mbox{ whenever }\theta_A + \theta_B \geq 1. \end{aligned}\end{equation} Using this, we find maximum and the minimum value of $b^{\#}$ (cf.\eqref{FL18}) for given $(a_1,a_2,\theta_A)$ and $(b_1,b_2,\theta_B)$ fixed. Notice that, the final term in the expression \eqref{FL18} has the negative sign. So in order to get the lower (or upper) bound of the left hand side we need to use upper (or lower) bound of $\theta_{AB}$. Thus we obtain the following lower bound \begin{equation}\begin{aligned}\label{FL20} \mbox{when $\theta_A \leq \theta_B$, }\ b^{\#} \geq\ &\ (\underline{a})^{2}\{\frac{b_2}{a_2^2} + \frac{(b_1-b_2)}{a_2^2}\theta_B + {b_1}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_A \}= l^{\#}_1(x) \mbox{ (say)} \\ \mbox{when $\theta_B \leq \theta_A $, }\ b^{\#} \geq\ &\ (\underline{a})^{2}\{\frac{b_2}{a_2^2} + \frac{(b_1-b_2)}{a_1^2}\theta_B + {b_2}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_A\}= l^{\#}_2(x) \mbox{ (say).} \end{aligned}\end{equation} Similarly, we find the upper bounds \begin{equation}\begin{aligned}\label{FL17} \mbox{when $\theta_A + \theta_B \leq 1,$ }\ b^{\#} \leq\ &\ (\underline{a})^{2}\{\frac{b_2}{a_2^2} + \frac{(b_1-b_2)}{a_2^2}\theta_B + {b_2}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_A\} = u^{\#}_1(x) \mbox{ (say) }\\ \mbox{when $\theta_A + \theta_B \geq 1,$ }\ b^{\#} \leq\ &\ (\underline{a})^{2}\{\frac{(b_2-b_1)}{a_1^2} + \frac{b_1}{a_2^2} + \frac{(b_1-b_2)}{a_1^2}\theta_B + {b_1}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_A\}= u^{\#}_2(x) \end{aligned}\end{equation} Moreover, a simple computation shows that \begin{equation*} max\ \{\ l^{\#}_1, l^{\#}_2\ \}\ \leq \ min\ \{\ u^{\#}_1,u^{\#}_2\ \}. \end{equation*} \textbf{One-dimensional Bound :} \begin{equation}\label{FL13} min\ \{\ l^{\#}_1, l^{\#}_2\ \} \leq b^{\#} \leq max\ \{\ u^{\#}_1, u^{\#}_2\ \}. \end{equation} \textbf{Optimality of the Bounds : } We define \begin{align*} u_{\#}=\begin{cases} u^{\#}_1 \quad\mbox{if }\theta_A +\theta_B \leq 1 \\ u^{\#}_2 \quad\mbox{if }\theta_A +\theta_B \geq 1 \end{cases} ,\quad l_{\#}=\begin{cases} l^{\#}_1 \quad\mbox{if }\theta_A \leq \theta_B \\ l^{\#}_2 \quad\mbox{if }\theta_B \leq \theta_A \end{cases}. \end{align*} Then the following theorem provides the optimality of the region defined by the bounds \eqref{FL13}. \begin{theorem}\label{Sd10} By varying microstructures for $\{a^\epsilon, b^\epsilon \}$, $b^{\#}(x)$ attains all values in the interval $[l_{\#}(x),u_{\#}(x)]$. \end{theorem} \begin{proof} The equality of the above bound (lower and upper) can be achieved easily. For upper bound equality $u_{\#}$, one considers the ${\omega}_{A^{\epsilon}}$ and ${\omega}_{B^{\epsilon}}$ in such a way that they intersect (${\omega}_{A^{\epsilon}} \cap {\omega}_{B^{\epsilon}}$) in a least way, i.e. whenever we have $\theta_A +\theta_B \leq 1$, we take ${\omega}_{A^{\epsilon}} \cap {\omega}_{B^{\epsilon}} =\emptyset$ and for $\theta_A +\theta_B \geq 1$, we consider ${\omega}_{A^{\epsilon}}$, ${\omega}_{B^{\epsilon}}$ in such a way that $L^{\infty}$ weak* limit of $\chi_{{\omega}_{A^{\epsilon}}}(x)B$ becomes $(\theta_A +\theta_B-1)$. Similarly for the lower bound equality $l_{\#}$, one takes ${\omega}_{B^{\epsilon}} \subseteq {\omega}_{A^{\epsilon}}$ for $\theta_A\geq \theta_B$. When $\theta_B\geq \theta_A$ we take ${\omega}_{A^{\epsilon}} \subseteq {\omega}_{B^{\epsilon}}$. Another important thing to notice that for given any value $v$ between $v\in (l_{\#},u_{\#})$, there are corresponding microstructures $\chi_{{\omega}_{A^{\epsilon}}}(x),\chi_{{\omega}_{B^{\epsilon}}}(x)$ such that simply by fixing the value of the weak* limit of $\chi_{{\omega}_{A^{\epsilon}}}(x)B$ to be $\theta_{AB}$, we can achieve the intermediate value $v$. The following lemma from \cite{T} allows us to do that. For this purpose, it is enough to express $v$ in terms of $\theta_{AB}$ using the relation \eqref{FL18}. \end{proof} \begin{lemma}[Lypunov] If $\theta \in L^{\infty}({|\!\!\!O}mega)$ satisfies $0 \leq \theta \leq 1$ a.e. in ${|\!\!\!O}mega$ then there exists a sequence of characteristic functions $\chi_{\epsilon}$ satisfying $\chi_{\epsilon}(x)\rightharpoonup \theta(x)$ in $L^{\infty}({|\!\!\!O}mega)$ weak*. \qed \end{lemma} We conclude this section by making some general comments on $N$ dimensional case. In general the homogenized matrices $A^{*}$, $B^{\#}$ are difficult to calculate. Description of the set of all possible macro quantities is analogous to the known famous \textit{G-closure problem} \cite{A,AM} in homogenization theory. It largely depends the microstructure or microgeometry which provides the structure or geometry in microscale in which way the components are mixed. Our goal is to obtain bounds on the set of all possible limit matrices from below and above which are independent of microstructure. We also want the bounds to be optimal in the sense of Theorem \ref{Sd10}. \section{Statement of Main Results}\label{ad18} \setcounter{equation}{0} Here in this section we announce the results concerning the bounds on $(A^{*},B^{\#})$. We assume $A^{\epsilon}$ and $B^{\epsilon}$ are governed with the two phase medium. More precisely let $A^{\epsilon}\in \mathcal{M}(a_1,a_2;{|\!\!\!O}mega)$ with $0 < a_1\leq a_2 <\infty$ and $B^{\epsilon}\in\mathcal{M}(b_1,b_2;{|\!\!\!O}mega)$ with $0< b_1\leq b_2 < \infty$ are governed with two phase medium : \begin{equation}\label{ta}\begin{aligned} A^{\epsilon}(x) = a^{\epsilon}(x)I\ \mbox{ where, }\ a^{\epsilon}(x) &= a_1\chi_{{\omega}_{A^{\epsilon}}}(x) +a_2 (1-\chi_{{\omega}_{A^{\epsilon}}}(x))\ \mbox{ a.e. in }\ x\in{|\!\!\!O}mega \\ \chi_{{\omega}_{A^{\epsilon}}}(x) &\rightharpoonup \theta_A(x) \mbox{ in }L^{\infty}({|\!\!\!O}mega)\mbox{ weak* topology }. \end{aligned}\end{equation} \begin{equation}\label{tb}\begin{aligned} B^{\epsilon}(x)= b^{\epsilon}(x)I\ \mbox{ where }\ b^{\epsilon}(x) &= b_1\chi_{{\omega}_{B^{\epsilon}}}(x) + b_2 (1-\chi_{{\omega}_{B^{\epsilon}}}(x))\ \mbox{ a.e. in }\ x\in{|\!\!\!O}mega\\ \chi_{{\omega}_{B^{\epsilon}}}(x) &\rightharpoonup \theta_B(x)\mbox{ in } L^{\infty}({|\!\!\!O}mega) \mbox{ weak* topology }. \end{aligned} \end{equation} In this section, we state four bounds labeled as L1, L2, U1, U2 involving $(A^{*},B^{\#})$, $\{a_1,a_2,\theta_A\}$, $\{b_1,b_2,\theta_B\}$. They are shown to be valid in sub-domains $int({|\!\!\!O}mega_{L1})$, $int({|\!\!\!O}mega_{L2})$, $int({|\!\!\!O}mega_{U1})$, $int({|\!\!\!O}mega_{U2})$ which are as follows : \begin{align*} &{|\!\!\!O}mega_{L1}:= \{x\in{|\!\!\!O}mega : \theta_A(x) \leq \theta_B(x)\},\ \ \ \ \ \ \ {|\!\!\!O}mega_{L2}:= \{x\in{|\!\!\!O}mega : \theta_B(x) < \theta_A(x)\},\\ &{|\!\!\!O}mega_{U1}:= \{x\in{|\!\!\!O}mega : \theta_A(x) + \theta_B(x)\leq 1\},\ \ {|\!\!\!O}mega_{U2}:= \{x\in{|\!\!\!O}mega : \theta_B(x) + \theta_A(x)> 1\}. \end{align*} The bounds define four regions in the phase space denoted by $(Li,Uj)$, $i,j=1,2$. Physical sub-domain on which the region $(Li,Uj)$ is optimal is $int({|\!\!\!O}mega_{(Li,Uj)})$, where $ {|\!\!\!O}mega_{(Li,Uj)} = {|\!\!\!O}mega_{Li} \cap {|\!\!\!O}mega_{Uj} \mbox{ for }i,j=1,2$. Compare this situation with the classical case involving $A^{*}$ in which there is only one region in the phase space and it is optimal in the entire physical domain ${|\!\!\!O}mega$. Their proofs presented in Section \ref{ts} use a combination of translated inequality, $H$-measure techniques and Compensated Compactness. We start by recalling the optimal bounds on the homogenized matrices $A^{*}$ for two-phase medium. \paragraph{Optimal Bounds on $A^{*}(x)$ :} Let $\mathcal{G}_{\theta_A(x)}$ (known as the \textit{G-closure set}) be the set of all possible effective tensors $A^{*}(x)$ obtained by the homogenization of two phases $a_1,a_2$ associated with the volume fraction $\theta_A(x)$ and $(1-\theta_A(x))$ respectively, which has the following pointwise characterization in terms of the trace inequalities (see \cite[Proposition 10]{MT1}) : The set $\mathcal{G}_{\theta_A(x)}$ is the set of all symmetric matrices with eigenvalues $\lambda_1(x),..,\lambda_N(x)$ satisfying pointwise \begin{equation}\begin{aligned}\label{FL11} \underline{a}(x) \leq \lambda_i(x)&\leq \overline{a}(x),\quad \forall 1\leq i\leq N\\ \mbox{(Lower Trace Bound) : }\quad \ \sum_{i=1}^N \frac{1}{\lambda_i(x)-a_1} &\leq \frac{1}{\underline{a}(x)-a_1} + \frac{N-1}{\overline{a}(x)-a_1}\\ \mbox{(Upper Trace Bound) : }\quad \ \sum_{i=1}^N \frac{1}{a_2 -\lambda_i(x)} &\leq \frac{1}{a_2-\underline{a}(x)} + \frac{N-1}{a_2-\overline{a}(x)}, \end{aligned}\end{equation} where $\underline{a}(x), \overline{a}(x)$ are the harmonic and arithmetic means of $a_1,a_2$ respectively, defined as $\underline{a}(x) = (\frac{\theta_A(x)}{a_1} +\frac{1-\theta_A(x)}{a_2})^{-1}$ and $\overline{a}(x)=a_1\theta_A(x)+a_2(1-\theta_A(x))$. \\ \\ $\mathcal{G}_{\theta_A(x)}$ is a convex region for fixed $x$ (see \cite[Remark 2.2.15]{A}). Let the boundaries of $\mathcal{G}_{\theta_A(x)}$ be $\partial\mathcal{G}^{L}_{\theta_A(x)}$ and $\partial\mathcal{G}^{U}_{\theta_A(x)}$ i.e. ($\partial\mathcal{G}_{\theta_A(x)} = \partial\mathcal{G}^{L}_{\theta_A(x)} \cup \partial\mathcal{G}^{U}_{\theta_A(x)}$). The set $\partial\mathcal{G}^{L}_{\theta_A(x)}$ (respectively, $\partial\mathcal{G}^{U}_{\theta_A(x)}$) represents the lower bound equality (respectively, the upper bound equality) of \eqref{FL11}. \qed \\ \\ We also introduce $\mathcal{K}_{\theta_A}\subset \mathbb{R}^N$ consisting of $(\lambda_1,..,\lambda_N)$ satisfying the above three inequalities \eqref{FL11}. Having the above characterization of the set $\mathcal{G}_{\theta_A(x)}$, we will present our results concerning the trace bounds involving $(A^{*},B^{\#})$ in arbitrary dimension. It is divided into four cases and their optimality will be taken up in Section \ref{qw4} (see \eqref{qw2}). \subsection{Optimal Trace Bounds : $A^{\epsilon}$ is governed by a two phase medium and $B^{\epsilon}$ is independent of $\epsilon$ }\label{FL7} We consider $A^{\epsilon}\in\mathcal{M}(a_1,a_2;{|\!\!\!O}mega)$ is given by \eqref{ta} and $B^{\epsilon}(x)=\ b(x)I\in\mathcal{M}(b_1,b_2;{|\!\!\!O}mega)$ for some $b\in L^{\infty}({|\!\!\!O}mega)$. That is, $B^\epsilon$ does not have microstructure. Then we have following bounds in terms of the trace of the matrices $A^{*}(x)$ and $B^{\#}(x)$ hold almost everywhere in $x\in{|\!\!\!O}mega$ : \paragraph{Lower Trace Bound L :} \begin{equation}\label{tw} tr\ \{b(x)(a_2I-A^{*}(x))(a_2B^{\#}(x)-b(x)A^{*}(x))^{-1}(a_2I-A^{*}(x))\} \leq\ N\theta_A(x)(a_2-a_1). \end{equation} \noindent \textbf{Upper Trace Bound U :} \begin{equation}\label{tq} tr\ \{ b(x)(A^{*}(x) - a_1I)(b(x)A^{*}(x)-a_1 B^{\#}(x))^{-1}(A^{*}(x) - a_1I)\} \leq\ N(1-\theta_A(x))(a_2-a_1). \end{equation} In the sequel, when we talk about lower/upper trace bound, we always mean the above bounds supplemented with the bound \eqref{Sd3}. The above bounds are saturated/optimal in the sense of Theorem \ref{qw6} below. We will not write down explicitly the proof because more complicated case of $B^\epsilon$ having microstructures will be discussed in Section \ref{qw4}.\\ \\ We further discover (see Remark \ref{eg3}) that for $A^\epsilon(x)$ satisfying \eqref{ta}, the corresponding $H$-limit $A^\epsilon(x) \xrightarrow{H} A^{*}(x)$ in ${|\!\!\!O}mega$ and the relative limit $b(x)I\xrightarrow{A^\epsilon(x)} B^{\#}(x)$ in ${|\!\!\!O}mega$, commute with each other, i.e. \begin{equation} A^{*}(x)B^{\#}(x) = B^{\#}(x)A^{*}(x),\quad x\in{|\!\!\!O}mega\ \mbox{ a.e.} \end{equation} That means that they have a common set of eigenvector basis, say $\{u_1(x),\ldots,u_N(x)\}$, $x\in{|\!\!\!O}mega$. If $\lambda_i(x)$ and the $\mu_i(x)$ are the eigenvalues of $A^{*}(x)$ and $B^{\#}(x)$ corresponding to the common eigenvector $u_i(x)$ i.e. \begin{equation} A^{*}(x)u_i(x) = \lambda_i(x) u_i(x) \mbox{ and } B^{\#}(x)u_i(x)=\mu_i(x) u_i(x) ,\ \ i=1,\ldots,N, \ x\in{|\!\!\!O}mega \mbox{ a.e.}, \end{equation} then the above bounds \eqref{tw} and \eqref{tq} are also written as : \begin{align} &\textbf{Lower Trace Bound L :}\ \sum_{i=1}^N \frac{b(x)(a_2-\lambda_i(x))^2}{(a_2\lambda_i(x)-b(x)\mu_i(x))} \leq N\theta_A(x)(a_2-a_1).\label{eg6}\\ &\textbf{Upper Trace Bound U :}\ \sum_{i=1}^N \frac{b(x)(\lambda_i(x) - a_1)^2}{(b(x)\lambda_i(x)-a_1\mu_i(x))} \leq N(1-\theta_A(x))(a_2-a_1). \label{eg7} \end{align} \subsection{Optimal Trace Bounds : $A^{\epsilon}$ and $B^{\epsilon}$ are both governed by two phase medium}\label{Sd9} Here we assume $A^{\epsilon}$ and $B^{\epsilon}$ are given by \eqref{ta} and \eqref{tb} respectively. Then we have the following optimal trace bounds that hold on various sub-domains of Omega specified in each case. \paragraph{Lower Trace Bound L1 :} Let $A^{*}(x)\in\mathcal{G}_{\theta_A(x)}$. Then by the structure of the phase space of $A^{*}$, we know that there exists a unique $\theta(x)$ with $\theta(x)\leq \theta_A(x)$ almost everywhere $x\in{|\!\!\!O}mega$ such that $A^{*}(x)\in\partial\mathcal{G}_{\theta(x)}^L$ (see Figure 4) : \begin{equation}\label{eib} tr\ (A^{*}(x)-a_1I)^{-1} = tr\ (\overline{A}_{\theta}(x)-a_1I)^{-1} + \frac{\theta(x)}{(1-\theta(x))a_1} \end{equation} where $\overline{A}_{\theta}(x) =(a_1\theta(x) +a_2(1-\theta(x))I$.\\ \\ Then the corresponding $(A^{*},B^{\#})$ with $A^{*}\in\mathcal{G}_{\theta_A}$ satisfies \begin{align}\label{tt} tr\ (B^{\#}(x)-b_1I)(\overline{A}_{\theta}(x)-a_1I)^2(A^{*}(x)-a_1I)^{-2}\geq\ & N(b_2-b_1)(1-\theta_B(x))\notag\\ &\quad+\frac{b_1(a_2-a_1)^2}{a_1^2}\theta(x)(1-\theta(x)). \end{align} where $\theta(x)$ is given by \eqref{eib} in terms of $A^{*}(x).$\\ \\ By eliminating $\theta$ from \eqref{tt}, the above lower bound is equivalent to the following : \begin{align}\label{ub6} tr\ (B^{\#}(x)-b_1I)(A^{*}(x)-a_1I)^{-2}\geq\ & \frac{N(b_2-b_1)(1-\theta_B(x))\left( a_1tr\ (A^{*}(x)-a_1I)^{-1} +1\right)^2}{(a_2 +a_1(N-1))^2}\notag\\ &\quad +\frac{b_1}{a_1}\frac{\left((a_2-a_1)\hspace{2pt} tr\ (A^{*}(x)-a_1I)^{-1} - N\right)}{(a_2 +a_1(N-1))}. \end{align} The above lower bound is valid and optimal in the sense of Theorem \ref{qw6} in the sub-domain $int({|\!\!\!O}mega_{L1})$. \paragraph{Lower Trace Bound L2 :} Let $A^{*}(x)\in\mathcal{G}_{\theta_A(x)}$. Then we know that there exists a unique $\theta(x)$ with $\theta(x)\geq \theta_A(x)$ almost everywhere $x\in{|\!\!\!O}mega$ such that $A^{*}\in\partial\mathcal{G}_{\theta(x)}^U$ (see Fig 2) : \begin{equation}\label{eic} tr\ ({A^{*}(x)}^{-1}-a_2^{-1}I)^{-1} = tr\ ({\underline{A}_{\theta}}^{-1}(x) - a_2^{-1}I)^{-1} + (N-1)\frac{(1-\theta(x))a_2}{\theta(x)} \end{equation} where $\underline{A}_{\theta}^{-1}(x) = (\frac{\theta(x)}{a_1}+\frac{(1-\theta(x))}{a_2})I.$\\ \\ Then the corresponding $(A^{*},B^{\#})$ with $A^{*}\in\mathcal{G}_{\theta_A}$ satisfies :\\ \\ \textbf{Case (a):\ when $\frac{b_2}{a_2^2}\leq \frac{b_1}{a_1^2}$} \begin{align}\label{to} tr\ \{({A^{*}}^{-1}(x)B^{\#}(x)&{A^{*}}^{-1}(x)- \frac{b_2}{a_2^2} I)(\underline{A}_{\theta}^{-1}(x) - a_2^{-1}I)^2({A^{*}}^{-1}(x)- a_2^{-1}I)^{-2}\} \notag\\ &\qquad\geq\ N(l(x)-\frac{b_2}{a_2^2})+\frac{b_2(a_2-a_1)^2}{(a_1a_2)^2}\theta(x)(1-\theta(x))(N-1) . \end{align} \textbf{Case (b):\ when $\frac{b_2}{a_2^2}\geq \frac{b_1}{a_1^2}$} \begin{align}\label{tn} &tr\ \{({A^{*}}^{-1}(x)B^{\#}(x){A^{*}}^{-1}(x)- \frac{b_1}{a_1^2} I)(\underline{A}_{\theta}^{-1}(x) - a_2^{-1}I)^2({A^{*}}^{-1}(x)- a_2^{-1}I)^{-2}\}\notag\\ &\geq N(l(x)-\frac{b_1}{a_1^2}) +\{ \frac{b_1(a_2-a_1)^2}{a_1^4}\theta(x)+2(\frac{b_2}{a_2^2}-\frac{b_1}{a_1^2})\frac{(a_2-a_1)}{a_1}\}(1-\theta(x))(N-1) \end{align} where, \begin{equation*} l(x)= \ \frac{b_1}{a_1^2}\theta_B(x) + \frac{b_2}{a_1^2}(\theta(x) - \theta_B(x)) + \frac{b_2}{a_2^2}(1-\theta(x))\end{equation*} and $\theta(x)$ is given by \eqref{eic} in terms of $A^{*}$.\\ \\ The above lower bound is valid and optimal in the sense of Theorem \ref{qw6} in the sub-domain $int({|\!\!\!O}mega_{L2})$. \paragraph{Upper Trace Bound U1 :} Let $A^{*}(x)\in\mathcal{G}_{\theta_A(x)}$. Then we know that there exists a unique $\theta(x)$ with $\theta(x)\leq \theta_A(x)$ almost everywhere $x\in{|\!\!\!O}mega$ such that $A^{*}\in\partial\mathcal{G}_{\theta(x)}^L$, satisfying \eqref{eib}. Then the corresponding $(A^{*},B^{\#})$ with $A^{*}\in\mathcal{G}_{\theta_A}$ satisfies \begin{align}\label{hsm} tr\ \{(\frac{b_2}{a_1}A^{*}(x)-B^{\#}(x))(\overline{A}_{\theta}(x)-a_1I)^2(A^{*}(x)- a_1 I)^{-2}\} \geq\ &N(b_2-b_1)\theta_B(x) \notag\\ & + N\frac{b_2}{a_1}(a_2-a_1)(1-\theta(x)). \end{align} The above upper bound is valid and optimal in the sense of Theorem \ref{qw6} in the sub-domain $int({|\!\!\!O}mega_{U1})$. \paragraph{Upper Trace Bound U2 :} Let $A^{*}(x)\in\mathcal{G}_{\theta_A(x)}$. Then we know that there exists a unique $\theta(x)$ with $\theta(x)\geq \theta_A(x)$ almost everywhere $x\in{|\!\!\!O}mega$ such that $A^{*}\in\partial\mathcal{G}_{\theta(x)}^U$, satisfying \eqref{eic}. Then the corresponding $(A^{*},B^{\#})$ with $A^{*}\in\mathcal{G}_{\theta_A}$ satisfies \begin{align}\label{tm} &tr\ \{(\frac{b_2a_2}{a_1^2}{A^{*}}^{-1}(x)-{A^{*}}^{-1}(x)B^{\#}(x){A^{*}}^{-1}(x))(\underline{A}_{\theta}(x)^{-1} - a_2^{-1}I)^2({A^{*}(x)}^{-1} - a_2^{-1}I)^{-2}\}\notag\\ &\geq\ N(\frac{b_2}{a_1^2}-\theta^{*}(x))+ N\frac{b_2(a_2-a_1)}{a_1^3}(1-\theta(x))- 2\frac{(b_2-b_1)(a_2-a_1)}{a_1^3}(1-\theta(x))(N-1) \end{align} where, \begin{equation*}\theta^{*}(x)= \frac{b_2}{a_1^2} + \frac{(b_1-b_2)}{a_1^2}\theta_B(x) + (\frac{b_1}{a_2^2}-\frac{b_1}{a_1^2})(1-\theta(x))\end{equation*} The above upper bound is valid and optimal in the sense of Theorem \ref{qw6} in the sub-domain $int({|\!\!\!O}mega_{U2})$.\\ \\ Note that, all the above lower/upper trace bounds are supplemented with the bound \eqref{Sd3}. \begin{remark} As we have done in the case of \eqref{ub6}, we can express other bounds also by eliminating $\theta(x)$ in terms of $(a_1,a_2,A^{*},b_1,b_2,\theta_A,\theta_B)$. \qed\end{remark} \begin{remark} We further discover (see Remark \ref{eg3}) that for any two sequences $A^\epsilon(x)$ and $B^\epsilon(x)$ satisfying \eqref{ta} and \eqref{tb} respectively, the corresponding $H$-limit $A^\epsilon(x) \xrightarrow{H} A^{*}(x)$ in ${|\!\!\!O}mega$ and the relative limit $B^\epsilon(x)\xrightarrow{A^\epsilon(x)} B^{\#}(x)$ in ${|\!\!\!O}mega$, commute with each other locally in ${|\!\!\!O}mega$, i.e. \begin{equation} A^{*}(x)B^{\#}(x) = B^{\#}(x)A^{*}(x),\quad x \mbox{ a.e. in }{|\!\!\!O}mega. \end{equation} That means that they have a common set of eigenvector basis, say $\{u_1(x),\ldots,u_N(x)\}$, $x\in{|\!\!\!O}mega$. If $\lambda_i(x)$ and the $\mu_i(x)$ are the eigenvalues of $A^{*}(x)$ and $B^{\#}(x)$ respectively corresponding to the common eigenvector $u_i(x)$, i.e. \begin{equation} A^{*}(x)u_i(x) = \lambda_i(x) u_i(x) \mbox{ and } B^{\#}(x)u_i(x)=\mu_i(x) u_i(x) ,\ \ i=1,\ldots,N, \ x \mbox{ a.e. in }{|\!\!\!O}mega, \end{equation} then the above $L1,L2,U1,U2$ bounds can be expressed in terms of eigenvalues $\{\lambda_i\}_{1\leq i\leq N}$ and $\{\mu_i\}_{1\leq i \leq N}$. For example, the L1 bound \eqref{ub6} and the U1 bound \eqref{hsm} can be written as : \begin{align} &\textbf{Lower Trace Bound L1 :}\notag\\ &\sum_{i=1}^N \frac{(\mu_i(x)-b_1)}{(\lambda_i(x)-a_1)^{2}}\ \geq\ \frac{N(b_2-b_1)(1-\theta_B(x))\left( a_1\sum_{i=1}^N (\lambda_i(x)-a_1)^{-1} +1\right)^2}{(a_2 +a_1(N-1))^2}\notag\\ &\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad +\frac{b_1}{a_1}\frac{\left((a_2-a_1)\sum_{i=1}^N (\lambda_i(x)-a_1)^{-1} - N\right)}{(a_2 +a_1(N-1))}. \label{eg10}\\ &\textbf{Upper Trace Bound U1 :}\notag\\ &\sum_{i=1}^N \frac{(\frac{b_2}{a_1}\lambda_i(x)-\mu_i(x))}{(\lambda_i(x)- a_1)^{2}}\ \geq\ \frac{ N(b_2-b_1)\theta_B(x)\left( a_1\sum_{i=1}^N (\lambda_i(x)-a_1)^{-1} +1\right)^2}{(a_2 +a_1(N-1))^2} \notag\\ &\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\quad + N\frac{b_2}{a_1}\frac{\left( a_1\sum_{i=1}^N (\lambda_i(x)-a_1)^{-1} +1\right)}{(a_2 +a_1(N-1))}.\label{eg5} \end{align} \end{remark} \begin{figure} \caption{\textit{$N=2$ : $(L1,U1)$ bounds for $B^{\#} \end{figure} \begin{remark}\label{eiu} Let us consider the self-interacting case i.e. $B^\epsilon = A^\epsilon$, then we have $B^{\#}=A^{*}$ and the corresponding inequalities in lower bound L1 (cf.\eqref{ub6}) and lower bound $L2(a)$ coincide with the classical optimal bounds for $A^{*}$ given in \eqref{FL11}. On the other hand, $U1,U2$ define two regions in the \textit{G-closure set} of $A^{*}$ whenever $\theta_A\leq \frac{1}{2}$ and $\theta_A> \frac{1}{2}$ respectively. These regions do not seem to have any special significance in the classical phase diagram for $A^{*}$. For example the trace bound U1 says that the following inequality folds for all $A^{*}\in\underset{\theta_A\leq\frac{1}{2}}{\cup}\mathcal{G}_{\theta_A}$: \begin{equation*} tr\ (A^{*}-a_1I)^{-1} + a_1tr\ (A^{*}-a_1I)^{-2}\geq \frac{(a_1tr\ (A^{*}-a_1I)^{-1} +1)}{(a_2 +a_1(N-1))}+ a_1\frac{(a_1tr\ (A^{*}-a_1I)^{-1} +1)^2}{(a_2 +a_1(N-1))^2}. \end{equation*} \qed\end{remark} \begin{remark} If $\theta_A \rightarrow 0 $ i.e. $A^\epsilon$ becomes homogeneous/ independent of $\epsilon$ then the bounds L1 and U1 imply $tr\ B^{\#} = tr\ \overline{B}$. Actually, the matrix inequalities \eqref{bs10},\eqref{OP4} from which L1, U1 are deduced by taking trace, give $B^{\#}=\overline{B}$. We can see this result directly by our arguments in Section 2. Similar conclusions can be reached by taking $\theta_A\rightarrow 1$ in L2, U2. \qed\end{remark} \begin{figure} \caption{\textit{$N=2$ : ${|\!\!\!O} \end{figure} \begin{remark}[Fibre-wise Convexity]\label{qw1} We define the union of the four regions in the phase space corresponding to four quadrants in the physical domain shown in Figure 2 : \begin{align}\label{kab} \mathcal{K}_{(\theta_A,\theta_B)} =\{ (A^{*},B^{\#})\mbox{ constant matrices such that }A^{*}\in\mathcal{G}_{\theta_A} \mbox{ and } (A^{*},B^{\#}) \mbox{ is in the }\quad &\notag\\ \mbox{regions }(Li,Uj), \mbox{ for some }i,j=1,2 \mbox{ with constant proportions }(\theta_A,\theta_B)\}& \end{align} and for $A^{*}\in\mathcal{G}_{\theta_A}$ we set \begin{align}\label{kfab} \mathcal{K}^{f}_{(\theta_A,\theta_B)}(A^{*}) =\{ B^{\#};\ (A^{*},B^{\#})\in \mathcal{K}_{(\theta_A,\theta_B)}\}. \end{align} Note $\mathcal{K}^{f}_{(\theta_A,\theta_B)}(A^{*})$ is nothing but the fibre over $A^{*}$. It can be easily verified that the fibre is a closed convex set for fixed $A^{*}$ using the linearity of the bounds with respect to $B^{\#}$. It's not clear what sort of other convexity properties $\mathcal{K}_{(\theta_A,\theta_B)}$ possesses. Fortunately, we will be needing in the sequel only the convexity of $\mathcal{K}^{f}_{(\theta_A,\theta_B)}(A^{*})$. \end{remark} \begin{figure} \caption{\textit{Horizontal (resp.vertical) axis represents the phase space of $A^{*} \end{figure} \noindent Let $\mathcal{G}_{(\theta_A(x),\theta_B(x))}$ be the set of all possible pairs of effective tensor fields $(A^{*}(x),B^{\#}(x))$ with $A^{*}(x)\in\mathcal{G}_{\theta_A(x)}$ obtained by the homogenization of two phases $\{a_1,a_2\}$ with volume fractions $\{\theta_A(x),(1-\theta_A(x))\}$ associated with $A^{*}(x)$, and $B^{\#}(x)$ the corresponding relative limit obtained with two phases $\{b_1,b_2\}$ with volume fractions $\{\theta_B(x),(1-\theta_B(x))\}$ respectively. Then it is established in Section \ref{sil} that if $(A^{*}(x),B^{\#}(x))\in \mathcal{G}_{(\theta_A(x),\theta_B(x))}$ then it lies in one of the regions $(Li,Uj)$ ($i,j=1,2$) pointwise, depending upon the values of $\theta_A(x)$ and $\theta_B(x)$, i.e. $\mathcal{G}_{(\theta_A(x),\theta_B(x))}\subseteq \mathcal{K}_{(\theta_A(x),\theta_B(x))}$, $x\in{|\!\!\!O}mega$ almost everywhere. The reverse inclusion is also true, which is known as optimality of the bounds. \begin{theorem}[Optimality]\label{qw6} \noindent \begin{enumerate} \item Optimality of L1\ :\ Let $A^{*}(x)$, $B^{\#}(x)$ be symmetric positive definite matrices and $L^\infty$ functions $\theta_A(x)$, $\theta_B(x)$ be defined in domain ${|\!\!\!O}mega_1$, an open subset of ${|\!\!\!O}mega$. We assume that $A^{*}(x)\in\mathcal{G}_{\theta_A(x)}$ and the pair $(A^{*}(x),B^{\#}(x))$ satisfies equality of the bound L1 with \eqref{Sd3} for $x$ a.e. in ${|\!\!\!O}mega_1$. In addition, it is assumed that $0\leq \theta_A(x)\leq \theta_B(x) \leq 1$ for $x$ a.e in ${|\!\!\!O}mega_1$. Then there exist sequences $A^\epsilon(x)$, $B^\epsilon(x)$, defined in ${|\!\!\!O}mega_1$ and measurable subsets $\omega_{A^\epsilon}$, $\omega_{B^\epsilon}$ of ${|\!\!\!O}mega_1$ satisfying \eqref{ta}, \eqref{tb} with $\omega_{A^\epsilon} \subset \omega_{B^\epsilon}$ and that $A^\epsilon\xrightarrow{H}A^{*}$ and $B^\epsilon\xrightarrow{A^\epsilon}B^{\#}$ in the domain ${|\!\!\!O}mega_1$. \item Statements analogous to the above one hold with regard to the optimality of other bounds L2, U1, U2. \item Optimality of the region $(L1,U1)$\ :\ Let $A^{*}(x)$, $B^{\#}(x)$ be symmetric positive definite matrices and $L^\infty$ functions $\theta_A(x)$, $\theta_B(x)$ be defined in some domain ${|\!\!\!O}mega_{1,1}$, an open subset of ${|\!\!\!O}mega$. We assume that $A^{*}(x)\in\mathcal{G}_{\theta_A(x)}$ and the pair $(A^{*}(x),B^{\#}(x))$ satisfies the bounds L1 and U1 with \eqref{Sd3} for $x$ a.e. in ${|\!\!\!O}mega_{1,1}$. In addition, it is assumed that $0\leq \theta_A(x)\leq \theta_B(x) \leq 1$ and $0\leq \theta_A(x)+\theta_B(x)\leq 1$ for $x$ a.e in ${|\!\!\!O}mega_{1,1}$. Then there exist sequences $A^\epsilon(x)$, $B^\epsilon(x)= \widetilde{\beta_1}(x)B^\epsilon_1(x) +\widetilde{\beta_2}(x)B^\epsilon_2(x)$ with $\widetilde{\beta_1},\widetilde{\beta_2}\geq 0$ and $\widetilde{\beta_1}+\widetilde{\beta_2}=1$, defined in ${|\!\!\!O}mega_{1,1}$ and measurable subsets $\omega_{A^\epsilon}$, $\omega_{B^\epsilon_1}$, $\omega_{B^\epsilon_2}$ of ${|\!\!\!O}mega_1$ satisfying \eqref{ta}, \eqref{tb} with $\omega_{A^\epsilon} \subset \omega_{B^\epsilon_1}$ and $\omega_{A^\epsilon} \subset \omega^c_{B^\epsilon_2}$, and that $A^\epsilon\xrightarrow{H}A^{*}$ and $B^\epsilon\xrightarrow{A^\epsilon}B^{\#}$ in the domain ${|\!\!\!O}mega_{1,1}$. \item Statements analogous to the above one hold with regard to the optimality of the other regions $(L1,U2)$, $(L2,U1)$, $(L2,U2)$. \item Optimality of all regions taken together\ :\ Let $A^{*}(x)$, $B^{\#}(x)$ be symmetric positive definite matrices and $L^\infty$ functions $\theta_A(x)$, $\theta_B(x)$ be defined in domain ${|\!\!\!O}mega_1$ an open subset of ${|\!\!\!O}mega$ with $0\leq \theta_A(x),\theta_B(x)\leq 1$. We assume that $A^{*}(x)\in\mathcal{G}_{\theta_A(x)}$ and the pair $(A^{*}(x),B^{\#}(x))\in \mathcal{K}_{(\theta_A(x),\theta_B(x))}$ $x$ a.e. with \eqref{Sd3} for $x$ a.e. in ${|\!\!\!O}mega_1$. Then there exist sequences $A^\epsilon(x)$, $B^\epsilon(x)$, defined in ${|\!\!\!O}mega_1$ and measurable subsets $\omega_{A^\epsilon}$, $\omega_{B^\epsilon}$ of ${|\!\!\!O}mega_1$ satisfying \eqref{ta}, \eqref{tb} and that $A^\epsilon\xrightarrow{H}A^{*}$ and $B^\epsilon\xrightarrow{A^\epsilon}B^{\#}$ in the domain ${|\!\!\!O}mega_1$. \end{enumerate} \end{theorem} \noindent The proof will be presented in Section \ref{qw4}. \qed \begin{remark}\label{ED4} In the context of point (5) of Theorem \ref{qw6}, we can impose the volume proportion $\delta_A$ and $\delta_B$ of the $a_1$ material and $b_1$-material in ${|\!\!\!O}mega$, i.e. \begin{equation}\label{ED2} \frac{|\omega_{A^\epsilon}|}{|{|\!\!\!O}mega|}=\delta_A\mbox{ and }\frac{|\omega_{B^\epsilon}|}{|{|\!\!\!O}mega|} =\delta_B,\ \forall \epsilon. \end{equation} Consequently, the $L^\infty({|\!\!\!O}mega)$ weak* limit of $\chi_{\omega_{A^\epsilon}}$ and $\chi_{\omega_{B^\epsilon}}$ are $\theta_A$ and $\theta_B$ respectively and they satisfy \begin{equation}\label{ED3} \frac{1}{|{|\!\!\!O}mega|}\int_{|\!\!\!O}mega \theta_A = \delta_A\mbox{ and }\frac{1}{|{|\!\!\!O}mega|}\int_{|\!\!\!O}mega\theta_B =\delta_B. \end{equation} Conversely, given $\{\theta_A,\theta_B,\delta_A,\delta_B\}$ satisfying \eqref{ED3}, we can choose $A^\epsilon, B^\epsilon$ as in point (5) of Theorem \ref{qw6} and in addition they satisfy the condition \eqref{ED2}. \begin{proof} The proof lies in the fact that, for a given $\delta_A>0$, and a sequence of measurable sets $\{\omega_{A^\epsilon}\}_\epsilon$, $\omega_{A^\epsilon}\subset {|\!\!\!O}mega$, such that $\frac{|\omega_{A^\epsilon}|}{|{|\!\!\!O}mega|}\rightarrow \delta_A$ as $\epsilon \rightarrow 0$, then there exist a sequence of measurable sets $\widetilde{\omega}_{A^\epsilon}\subset {|\!\!\!O}mega$ such that $\frac{|\widetilde{\omega}_{A^\epsilon}|}{|{|\!\!\!O}mega|}=\delta_A $, $\forall \epsilon$ and $|\widetilde{\omega}_{A^\epsilon}\smallsetminus \omega_{A^\epsilon} | \rightarrow 0 $ as $\epsilon \rightarrow 0$. \\ We construct $\widetilde{\omega}_{A^\epsilon}$ in the folllowing way : We define for each $\epsilon$, $\mbox{if }\delta_A - \frac{|\omega_{A^\epsilon}|}{|{|\!\!\!O}mega|} >0, \mbox{ then }$ \begin{align*} \widetilde{\omega}_{A^\epsilon} = \omega_{A^\epsilon} \cup T_1^\epsilon \mbox{ where, } T_1^\epsilon \mbox{ is mesurable and }T^\epsilon_1 \subset ({|\!\!\!O}mega\smallsetminus \omega_{A^\epsilon}), \mbox{ with }|T_1^\epsilon| = \delta_A-\frac{|\omega_{A^\epsilon}|}{|{|\!\!\!O}mega|}>0, \end{align*} $\mbox{and if }\frac{|\omega_{A^\epsilon}|}{|{|\!\!\!O}mega|}-\delta_A>0, \mbox{ then }$ \begin{align*} \widetilde{\omega}_{A^\epsilon} = \omega_{A^\epsilon}\smallsetminus T_2^\epsilon \mbox{ where, } T_2^\epsilon \mbox{ is mesurable and }T^\epsilon_2 \subset \omega_{A^\epsilon} \mbox{ with }|T_2^\epsilon| = \frac{|\omega_{A^\epsilon}|}{|{|\!\!\!O}mega|}-\delta_A>0. \end{align*} Thus we have $\frac{|\widetilde{\omega}_{A^\epsilon}|}{|{|\!\!\!O}mega|}=\delta_A $, $\forall \epsilon$ and $|\widetilde{\omega}_{A^\epsilon}\smallsetminus \omega_{A^\epsilon} | \rightarrow 0 $ as $\epsilon \rightarrow 0$. Similar construction can be done for the sequence $\frac{|\omega_{B^\epsilon}|}{|{|\!\!\!O}mega|}\rightarrow\delta_B$, to have $\frac{|\widetilde{\omega}_{B^\epsilon}|}{|{|\!\!\!O}mega|}=\delta_B $, $\forall \epsilon$ and $|\widetilde{\omega}_{B^\epsilon}\smallsetminus \omega_{B^\epsilon} | \rightarrow 0 $ as $\epsilon \rightarrow 0$.\\ \\ Now for given $A^{*}\in \mathcal{G}_{\theta_A}$ and $B^{\#}\in \mathcal{K}^f_{(\theta_A,\theta_B)}(A^{*})$ with $\theta_A,\theta_B$ are satisfying \eqref{ED3}, then following (5) of Theorem \ref{qw6} there exist $A^\epsilon$ and $B^\epsilon$ satisfying \eqref{ta} and \eqref{tb} respectively such that $A^\epsilon\xrightarrow{H}A^{*}$ and $B^\epsilon\xrightarrow{A^\epsilon}B^{\#}$ in ${|\!\!\!O}mega$. Now we consider the sequences $$ \widetilde{A}^\epsilon = \{a_1\chi_{\widetilde{\omega}_{A^\epsilon}}+a_2(1-\chi_{\widetilde{\omega}_{A^\epsilon}})\}I \mbox{ and }\widetilde{B}^\epsilon = \{b_1\chi_{\widetilde{\omega}_{B^\epsilon}}+b_2(1-\chi_{\widetilde{\omega}_{B^\epsilon}})\}I$$ where the respective microstructures satisfies \eqref{ED2}. And as we have from our construction $$ ||\widetilde{A}^\epsilon - A^\epsilon||_{L^1({|\!\!\!O}mega)} \rightarrow 0 \mbox{ and } ||\widetilde{B}^\epsilon - B^\epsilon||_{L^1({|\!\!\!O}mega)} \rightarrow 0 \mbox{ as }\epsilon\rightarrow 0. $$ Then using the Remark \ref{sii} we have, $\widetilde{A}^\epsilon\xrightarrow{H}A^{*}$ and $\widetilde{B}^\epsilon\xrightarrow{\widetilde{A}^\epsilon}B^{\#}$ in ${|\!\!\!O}mega$.\\ Alternatively, one can also exploit the fact that the construction in Section \ref{qw4} produces infact $B^\epsilon$ converging $B^{\#}$ in $L^1({|\!\!\!O}mega)$. This is stronger than the relative convergence of $B^\epsilon\xrightarrow{A^\epsilon}B^{\#}$ in ${|\!\!\!O}mega$. \end{proof} \end{remark} \noindent The above Remark \ref{ED4} together with the Theorem \ref{qw6} of optimality, plays a crucial role in the applications of problems of Calculus of Variations discussed in Section \ref{qw5}. \section{Optimal Microstructures}\label{ub12} \setcounter{equation}{0} Before establishing the above bounds L1, L2, U1, U2 we present the analysis of $A^{*}$ and $B^{\#}$ for two important class of microstructures known as laminates and Hashin-Shtrikman constructions. Recall that, these microstructures are optimal microstructures for providing optimality of the bounds in the classical case i.e. for $A^{*}$ bound. We will see that they are useful for the study of $B^{\#}$ as well. \subsection{Laminated Microstructures : Simple Laminates}\label{bs12} We begin with the laminates. The laminate microstructures are defined when the geometry of the problem varies only in a single direction that is the sequence of matrices $A^{\epsilon}$ depends on a single space variable, say $x_1$, $A^{\epsilon}(x) = A^{\epsilon}(x_1)$ and the homogenized composite is called laminate. If the component phases are stacked in slices orthogonal to the $e_{1}$ direction, in that case it is a generalization of the one-dimensional settings. In particular the $H$-convergence can be reduced to the usual weak convergence of some combinations of entries of the matrix $A^{\epsilon}$. In effect, this yields another type of explicit formula for the homogenized matrix as in the one-dimensional case. Let us recall this result : let $A^{\epsilon} \in \mathcal{M}(a_1,a_2,{|\!\!\!O}mega)$ satisfy the assumption $A^{\epsilon}(x) = A^{\epsilon}(x_1)$. Then $A^{\epsilon}$ $H$-converges to a homogenized matrix $A^{*}$ iff the following convergences hold in $L^{\infty}({|\!\!\!O}mega)$ weak* : (See \cite{A}) \begin{equation*} \begin{aligned} &\frac{1}{A^{\epsilon}_{11}} \rightharpoonup \frac{1}{A^{*}_{11}},\quad \frac{A^{\epsilon}_{1j}}{A^{\epsilon}_{11}} \rightharpoonup \frac{A^{*}_{1j}}{A^{*}_{11}} \mbox{ for }2\leq j \leq N,\quad \frac{A^{\epsilon}_{i1}}{A^{\epsilon}_{11}} \rightharpoonup \frac{A^{*}_{i1}}{A^{*}_{11}} \mbox{ for }2\leq i \leq N,\\ &(A^{\epsilon}_{ij}- \frac{A^{\epsilon}_{1j} A^{\epsilon}_{i1}}{A^{\epsilon}_{11}}) \rightharpoonup (A^{*}_{ij}- \frac{A^{*}_{1j} A^{*}_{i1}}{A^{*}_{11}}) \mbox{ for }i\neq j\mbox{, }2\leq i \leq N\mbox{, }1\leq j\leq N. \end{aligned} \end{equation*} where $(A^{\epsilon}_{ij})_{1\leq i,j\leq N}$ and $(A^{*}_{ij})_{1\leq i,j\leq N}$ denote the entries of $A^{\epsilon}$ and $A^{*}$, respectively.\\ The oscillating test functions matrix $X^{\epsilon}$ in \eqref{dc2} can also be explicitly written in the case of laminated structures. Indeed, it is easy to check that \begin{equation*} \begin{aligned} &X^{\epsilon}_{11} = \frac{A^{*}_{11}}{A^{\epsilon}_{11}},\quad X^{\epsilon}_{1j} = \frac{A^{*}_{1j} - A^{\epsilon}_{1j}}{A^{\epsilon}_{11}}\mbox{ for }2\leq j \leq N\\ & X^{\epsilon}_{ii} = 1 \mbox{ for }2\leq i \leq N,\quad X^{\epsilon}_{ij} = 0 \mbox{ for }i\neq j\mbox{, }2\leq i \leq N\mbox{, }1\leq j\leq N. \end{aligned} \end{equation*} Then using the convergence result \eqref{dc2}, we obtain $B^{\#}$ matrix explicitly : \begin{equation*} \begin{aligned} &B^{\#}_{11} =\ (A^{*}_{11})^2 \underset{\epsilon \rightarrow 0}{lim}\ \frac{B^{\epsilon}_{11}}{(A^{\epsilon}_{11})^2},\quad B^{\#}_{1j} =\ \underset{\epsilon \rightarrow 0}{lim}\ {B^{\epsilon}_{1j}}(\frac{A^{*}_{1j} - A^{\epsilon}_{1j}}{A^{\epsilon}_{11}} )^2 \mbox{ for }2\leq j \leq N,\\ &B^{\#}_{ii} =\ \overline{B} \mbox{ for }2\leq i \leq N,\quad B^{\#}_{ij} =\ 0 \mbox{ for }i\neq j\mbox{, }2\leq i \leq N\mbox{, }1\leq j\leq N. \end{aligned} \end{equation*} (The above limits have been taken in $L^{\infty}$ weak* sense.)\\ \noindent Now from the above result we can deduce a number of special cases of particular interest. \paragraph{Laminated microstructures for isotropic case, i.e. $A^\epsilon=a^\epsilon I$ and $B^\epsilon=b^\epsilon I$ are scalar matrices :} Let us assume that the matrix $A^{\epsilon}(x)$ is isotropic and depends only on $x_1$, i.e. $ A^{\epsilon}(x) = a^{\epsilon}(x_1)I$ and $B^{\epsilon}(x)=b^{\epsilon}(x)I$ is also isotropic but unlike $A^{\epsilon}$ it may depend on other variables also $x=(x_1,..,x_N)$. Then, we see \begin{equation}\begin{aligned}\label{FG17} &A^{*} = diag\{\hspace{1pt}\underline{a}(x_1),\overline{a}(x_1),...,\overline{a}(x_1)\}\\ &B^{\#} = diag\{\hspace{1pt}(\underline{a}(x_1))^2\ \underset{\epsilon\rightarrow 0}{lim}\frac{b^{\epsilon}(x)}{(a^{\epsilon}(x_1))^2},\ \overline{b}(x),..,\overline{b}(x)\} \end{aligned}\end{equation} where, $(\underline{a})^{-1}$ is the $L^{\infty}({|\!\!\!O}mega)$ weak* limit of $ (a^{\epsilon})^{-1}$ and $\overline{a}$ and $\overline{b}$ is the $L^{\infty}({|\!\!\!O}mega)$ weak* limit of $a^{\epsilon}$ and $b^{\epsilon}$ respectively.\\ \noindent If $b^{\epsilon}(x)$ is independent of $\epsilon$ say $b^{\epsilon}(x)=b(x)$, then corresponding $B^{\#}$ will be \begin{equation}B^{\#} = diag\{\hspace{1.5pt}b(x)(\underline{a}(x_1))^2\ \underset{\epsilon\rightarrow 0}{lim}(a^{\epsilon})^{-2}(x_1),\ b(x),..,b(x)\}.\end{equation} Following the formula \eqref{FG17}, we would like to consider few more cases as follows. \paragraph{(a): Laminated microstructures for $a^{\epsilon}$ and $b^{\epsilon}$ both in the same direction $e_1$:} We consider $ a^{\epsilon}(x) = a^{\epsilon}(x_1)$ and $b^{\epsilon}(x) = b^{\epsilon}(x_1)$ then from \eqref{FG17} \begin{equation*}B^{\#}_{11}=\ (\underline{a}(x_1))^2\ \underset{\epsilon\rightarrow 0}{lim}\frac{b^{\epsilon}}{(a^{\epsilon})^2}(x_1)\end{equation*} $-$ exactly the formula what we have derived in $1$-dim case, and $B^{\#}_{ii} = \overline{b}(x_1)$ for $2\leq i\leq N.$\\ \textbf{(b): Laminated microstructures for $a^{\epsilon}$ and $b^{\epsilon}$ in the mutually transverse direction :} By that we mean if $ a^{\epsilon}(x) = a^{\epsilon}(x_1)$ then we are considering $b^{\epsilon}(x) = b^{\epsilon}(x_2,..,x_N)$, i.e.independent of $x_1$ variable. Then \begin{equation*} B^{\#}_{11} = (\underline{a}(x_1))^2\ \overline{b}(x_2,..,x_N)\ \underset{\epsilon\rightarrow 0}{lim}\frac{1}{(a^{\epsilon})^2}(x_1) \ \ \mbox{and } \ B^{\#}_{ii} = \overline{b}(x_2,.,x_N)\ \mbox{ for }\ 2\leq i\leq N. \end{equation*} Now using the fact \begin{equation*}\underset{\epsilon\rightarrow 0}{lim}\ \frac{1}{(a^{\epsilon})^2}(x_1) \geq \frac{1}{(\underline{a}(x_1))^2}.\end{equation*} We get \begin{equation*} B^{\#} \geq \overline{b}I.\end{equation*} \begin{remark}\label{Sd8} The above property is different from that of the usual homogenized limit $B^{*}$ which is always bound above by $\overline{b}I$. It indicates that the upper bound of $B^{\#}$ is bigger than the upper bound of $B^{*}$. \qed\end{remark} \noindent\textbf{(c): $a^\epsilon$ and $b^\epsilon$ are governed by two phase medium :} Let us consider, \begin{equation}\begin{aligned}\label{bs17} a^{\epsilon}(x)&= a^\epsilon(x_1) = a_1\chi_{{\omega}_{A^{\epsilon}}}(x)C + a_2(1-\chi_{{\omega}_{A^{\epsilon}}}(x)C)\mbox{ with }(a_1 < a_2 )\\ \mbox{and }\ b^{\epsilon}(x) &= b_1\chi_{{\omega}_{B^{\epsilon}}}(x) + b_2(1-\chi_{{\omega}_{B^{\epsilon}}}(x))\mbox{ with }(b_1 < b_2) \end{aligned}\end{equation} where \begin{equation*} \chi_{{\omega}_{A^{\epsilon}}}(x)C\rightharpoonup \theta_A(x_1) \mbox{ and } \chi_{{\omega}_{B^{\epsilon}}}(x) \rightharpoonup \theta_B(x) \quad\mbox{ in }L^{\infty}({|\!\!\!O}mega)\mbox{ weak* limit. } \end{equation*} Notice that just like $\theta_A$, $\theta_B$ we need a new information on the microstructures, i.e. $\theta_{AB}$ = $L^{\infty}$ weak* limit of $\chi_{{\omega}_{A^{\epsilon}}}(x)C\chi_{{\omega}_{B^{\epsilon}}}(x)$ and it satisfies the bounds \eqref{FG2}. Then using \eqref{FG17} we compute the simple laminates $B^{\#}=diag\{B^{\#}_{kk}\}_{1\leq k\leq N}$. Following the one-dimensional case computations (cf. Section \ref{Sd6}), we have \begin{equation}\label{Sd7}\begin{aligned} B^{\#}_{11} &= (\underline{a}(x_1))^2 \underset{\epsilon\rightarrow 0}{lim} \frac{b^{\epsilon}(x)}{(a^{\epsilon}(x_1))^2}\\ &=(\underline{a})^{2}\{\frac{b_2}{a_2^2} + \frac{(b_1-b_2)}{a_2^2}\theta_B + (\frac{b_2}{a_1^2} -\frac{b_2}{a_2^2})\theta_A - {(b_2-b_1)}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_{AB}\}\\ B^{\#}_{kk} &= b_1\theta_B + b_2(1-\theta_B)\ \ \mbox{for }k=2,..,N. \end{aligned}\end{equation} Next by using the bounds \eqref{FG2} over \eqref{Sd7}, we get the following lower bounds as : \begin{equation}\begin{aligned}\label{lb10} \mbox{when $\theta_A \leq \theta_B$, }\ B^{\#}_{11}\ \geq\ &\ (\underline{a})^{2}\{\frac{b_2}{a_2^2} + \frac{(b_1-b_2)}{a_2^2}\theta_B + {b_1}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_A \}= L^{\#}_1 \mbox{ (say)} \\ \mbox{when $\theta_B \leq \theta_A $, }\ B^{\#}_{11}\ \geq\ &\ (\underline{a})^{2}\{\frac{b_2}{a_2^2} + \frac{(b_1-b_2)}{a_1^2}\theta_B + {b_2}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_A\}= L^{\#}_2 \mbox{ (say)}. \end{aligned}\end{equation} Similarly, the upper bounds as : \begin{equation}\begin{aligned}\label{tp} \mbox{when $\theta_A + \theta_B \leq 1,$ }\ B^{\#}_{11}\ \leq\ &\ (\underline{a})^{2}\{\frac{b_2}{a_2^2} + \frac{(b_1-b_2)}{a_2^2}\theta_B + {b_2}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_A\} = U^{\#}_1 \mbox{ (say) }\\ \mbox{when $\theta_A + \theta_B \geq 1,$ }\ B^{\#}_{11}\ \leq\ &\ (\underline{a})^{2}\{\frac{(b_2-b_1)}{a_1^2} + \frac{b_1}{a_2^2} + \frac{(b_1-b_2)}{a_1^2}\theta_B + {b_1}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_A\}= U^{\#}_2 \mbox{ (say). } \end{aligned}\end{equation} Moreover, a simple computations shows that \begin{equation*} max\ \{ L^{\#}_1, L^{\#}_2\}\ \leq \ min\ \{U^{\#}_1, U^{\#}_2\}.\end{equation*} Thus in the class of simple laminations we have obtained the bounds for $B^{\#}$ as, \begin{align}\label{FL10} & min\ \{L^{\#}_1, L^{\#}_2\} \leq B^{\#}_{11}\ \leq max\ \{U^{\#}_1, U^{\#}_2\}\\ \mbox{ and }& B^{\#}_{kk} = b_1\theta_B + b_2(1-\theta_B) \mbox{ for }k=2,..,N.\notag \end{align} Following the Theorem \ref{Sd10} established in one-dimensional case, the above inequality \eqref{FL10} provides an optimal bound for simple laminates. \begin{remark} If $B^{\epsilon}=b(x)I$ i.e. independent of $\epsilon$, then we obtain : \begin{equation}\label{bs11} B^{\#}=\ diag\{b(x)(\underline{a}(x_1))^2(\frac{\theta_A(x_1)}{a_1^2}+\frac{1-\theta_A(x_1)}{a_2^2}),\ b(x),..,b(x)\}.\end{equation} Similarly if \begin{equation*}B^{\epsilon}=b^\epsilon(x_2,..,x_N)I=\ \{b_1\chi_{\omega_{B^\epsilon}}(x_2,..,x_N)+ b_2(1-\chi_{\omega_{B^\epsilon}}(x_2,..,x_N))\}I\ \ \mbox{ (independent of $x_1$), }\end{equation*} then as \begin{equation*}\chi_{\omega_{B^\epsilon}}(x_2,..,x_N)\chi_{\omega_{A^\epsilon}}(x_1) \rightharpoonup \theta_B(x_2,..,x_N)\theta_A(x_1) \ \ L^{\infty}({|\!\!\!O}mega)\mbox{ weak* }; \end{equation*} We obtain : \begin{equation}\begin{aligned}\label{lb6} B^{\#}_{11}&=\ (\underline{a})^{2}\{\frac{b_2}{a_2^2} + \frac{(b_1-b_2)}{a_2^2}\theta_B + {b_2}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_A - {(b_2-b_1)}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_B\theta_A\}\\ \mbox{and }\ B^{\#}_{kk}&=\ b_1\theta_B + b_2(1-\theta_B),\ \ k=2,..,N. \end{aligned} \end{equation} Note that, $min \{L^{\#}_1,L^{\#}_2\}<B^{\#}_{11} < max \{U^{\#}_1,U^{\#}_2\}$. Thus, the inequality \eqref{FL10} are strict in this case. \qed \end{remark} \subsection{Sequential Laminates}\label{Sd19} Let us mention a subclass of laminated homogenized tensor which is known as sequential laminates. (See \cite[Section 2.2]{A}). \begin{example}[Rank-$p$ Sequential Laminates] Let $\{e_i\}_{1\leq i \leq p}$ be a collection of unit vectors in $\mathbb{R}^{N}$ and $\{\theta_i\}_{1\leq i \leq p}$ the proportions at each stage of the lamination process, i.e. first we laminate $a_1$ and $a_2$ in $e_1$ direction with the proportion $\theta_1$ and $(1-\theta_1)$ respectively to get $A^{*}_1$ then we laminate $A^{*}_1$ and $a_2$ in $e_2$ direction with the proportion $\theta_2$ and $(1-\theta_2)$ respectively to get $A^{*}_2$ and this is repeated $p$ times.\\ \\ (a): We then have the following formulas from \cite{A} for a rank-p sequential laminate with matrix $a_2 I$ and core $a_1 I$. \begin{equation*}(\prod_{j=1}^{p} \theta_j)( A^{*}_p - a_2 I)^{-1} = (a_1 - a_2)^{-1}I + \sum_{i=1}^{p}\left( (1-\theta_i)(\prod_{j=1}^{i-1} \theta_j)\right)\frac{(e_i\otimes e_i)}{a_2}.\end{equation*} (b): For rank-p sequential laminate with matrix $a_1 I$ and core $a_2 I$, we have \begin{equation*}(\prod_{j=1}^{p} (1-\theta_j))( A^{*}_{p} - a_1 I)^{-1} = (a_2 - a_1)^{-1}I + \sum_{i=1}^{p}\left( \theta_i(\prod_{j=1}^{i-1}(1-\theta_j)\right)\frac{(e_i\otimes e_i)}{a_1}.\end{equation*} \qed\end{example} \noindent The following lemma from \cite{A} is important for proving the saturation / optimality of the bound of $A^{*}$. \begin{lemma}\cite{A} Let $\{e_i\}_{1\leq i \leq p}$ be a collection of unit vectors. Let $\theta_A \in (0,1)$. Now for any collection of non-negative real numbers $\{m_i\}_{1\leq i \leq p}$ satisfying $\sum_{i=1}^{p} m_i =1 $, there exists a rank-$p$ sequential laminate $A^{*}_p$ with matrix $a_2 I$ and core $a_1 I$ in proportion $(1-\theta_A)$ and $\theta_A$ respectively and with lamination directions $\{e_i\}_{1\leq i \leq p}$ such that \begin{equation}\label{OP3} \theta_A( A^{*}_{p} - a_2 I)^{-1} = (a_1 - a_2)^{-1}I + (1-\theta_A)\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{a_2 (e_i\cdot e_i)}. \end{equation} \noindent An analogous result holds when the roles of $a_2$ and $a_1$ (in proportions $(1-\theta_A)$ and $\theta_A$ respectively) in the lemma above are switched. The formula above is replaced by \begin{equation}\label{OP2} (1-\theta_A)(A^{*}_p - a_1 I)^{-1} = (a_2 - a_1)^{-1}I + \theta_A\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{a_1( e_i\cdot e_i)}. \end{equation} \qed\end{lemma} In order to define the sequential laminates for $B^{\#}$, we assume that $A^{\epsilon}$ is governed with $p-$sequential laminate microstructures, (say $A^\epsilon_p$) and by considering any $B^{\epsilon}(x)$ independent of $\epsilon$ one defines the limit matrix say $B^{\#}_p$. We call it a quasi-sequential laminate $B^{\#}_p$ which corresponds to the sequential laminate $A^{*}_p$. Now we consider $B^\epsilon$ with two-phases and with a microgeometry with layer corresponding to each layer of $p-$sequential laminated microstructures of $A^\epsilon_p$ there is an associated layered microstructure for $B^\epsilon$ also (say $B^\epsilon_p$). Then we define the corresponding limit matrix as $B^\epsilon_p\xrightarrow{A^\epsilon_p} B^{\#}_{p,p}$, by saying $(p,p)-$ sequential laminates.\\ One possible way to get these $B^{\#}_p$ would be finding the corrector matrix $X^{\epsilon}$ corresponding to $A^\epsilon_p\xrightarrow{H} A^{*}_p$ (i.e.$(X^{\epsilon})^t A^{\epsilon}_p X^{\epsilon} \rightharpoonup A^{*}_p $ in $\mathcal{D}^{\prime}({|\!\!\!O}mega)$) and then apply the convergence result \eqref{dc2} to get the the limit matrix $B^{\#}_p$. It is known that for $p\geq2$, getting corrector matrix $X^{\epsilon}$ is not easy. Briane \cite{B} gave an iteration procedure to obtain $X^{\epsilon}$. Getting the expression for $B^{\#}_p, B^{\#}_{p,p}$ is even more complicated. Fortunately, one gets an explicit expression for $B^{\#}_{p,p}$ through $H$-measure techniques which can be carried out in four different cases which are optimal for our bounds L1, L2, U1, U2. We postponed the construction of $B^{\#}_p, B^{\#}_{p,p}$ to Section \ref{ts} because we need $H$-measure, a tool which we have not yet introduced. Here we merely give expression for these two matrices. \begin{example} (a) Let us consider $B^{\epsilon}$ is independent of $\epsilon$ say $B^{\epsilon}= b(x)I$ with $b\in L^{\infty}({|\!\!\!O}mega)$ and bounded below by a positive constant. In this case we define the $p$-sequential laminates $B^{\#}_p$, with matrix $a_1 I$ and core $a_2 I$ for $A^{*}_p$ (cf.\eqref{OP2}) as follows : \begin{equation*}b(\overline{A}-A^{*}_p)(B^{\#}_p-bI)^{-1}(\overline{A}-A^{*}_p) =\ \theta_A(1-\theta_A)(a_2 -a_1)^2 (\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i}) ;\ \mbox{ with }\sum_{i=1}^p m_i =1.\end{equation*} (b) By considering $B^{\epsilon}=b^{\epsilon}I$ defined in \eqref{bs17}, we define the $(p,p)-$sequential laminates $B^{\#}_{p,p}$, whenever $\omega_{A^{\epsilon}},\omega_{B^{\epsilon}}$ be the $p$-sequential laminate microstructures with $\omega_{A^{\epsilon}}\subseteq \omega_{B^{\epsilon}}$ in the same directions $\{e_i\}_{1\leq i\leq p}$ and with matrix $a_1 I$ and core $a_2 I$ for $A^{*}_p$ (cf. \eqref{OP2}) as follows : \begin{align}\label{bs16} &\{\frac{(\overline{B}-b_1I)}{(\overline{A}-a_1I)}(A^{*}_p-a_1I)+\frac{b_1}{a_1}(\overline{A}-A^{*}_p)\}(B^{\#}_{p,p}-b_1I)^{-1}\{\frac{(\overline{B}-b_1I)}{(\overline{A}-a_1I)}(A^{*}_p-a_1I)+\frac{b_1}{a_1}(\overline{A}-A^{*}_p)\}\notag\\ &=\ (\overline{B}-b_1 I)+\frac{b_1(a_2-a_1)^2}{a_1^2}\theta_A(1-\theta_A)(\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i});\ \mbox{ with }\sum_{i=1}^p m_i =1. \end{align} \qed\end{example} \subsection{Hashin-Shtrikman Constructions}\label{hsl} In this section, sequences are indexed by $n$ (not by $\epsilon$) and $n\rightarrow\infty$. Before we move into finding bounds on $B^{\#}$, let us mention another important class of microstructures which is known as Hashin-Shritkman microstructures. In the beginning of the theory of homogenization it played a very crucial role to provide the bounds on the two-phase isotropic medium without even development of $H$-convergence and so on. We follow \cite[chapter 25]{T} to define the Hashin-Shtrikman microstructures. \begin{definition}\label{lb14} Let $\omega \subset \mathbb{R}^N$ be a bounded open with Lipschitz boundary. Let $A_{\omega}(x)=[a^{\omega}_{kl}(x)]_{1\leq k,l\leq N} \in \mathcal{M}(a_1,a_2;\ \omega)$ be such that after extending $A_{\omega}$ by $A_{\omega}(y) = M$ for $y \in \mathbb{R}^N\smallsetminus \omega$ where $M \in L_{+}(\mathbb{R}^N ; \mathbb{R}^N)$ (i.e. $M = [m_{kl}]_{1\leq k,l \leq N}$ is a constant positive definite $N\times N$ matrix), if for each $\lambda \in \mathbb{R}^N$ there exists a $w_{\lambda}\in H^{1}_{loc}(\mathbb{R}^N)$ satisfies \begin{equation}\label{hsw} - div (A_{\omega}(y)\nabla w_{\lambda}(y)) = 0 \quad\mbox{in }\mathbb{R}^N,\quad w_{\lambda}(y) = \lambda\cdot y \quad\mbox{in }\mathbb{R}^N \smallsetminus \omega; \end{equation} Then $A_{\omega}$ is said to be \textit{equivalent} to $M$. \end{definition} Then one uses a sequence of Vitali coverings of ${|\!\!\!O}mega$ by reduced copies of $\omega,$ \begin{equation}\label{hso} meas({|\!\!\!O}mega \smallsetminus \underset{p\in K}{\cup}(\epsilon_{p,n}\omega + y^{p,n}) = 0, \mbox{ with } \kappa_n = \underset{p\in K}{sup}\hspace{2pt} \epsilon_{p,n}\rightarrow 0\end{equation} for a finite or countable $K$. These define the microstructures in $A^{n}$. One defines for almost everywhere $x\in {|\!\!\!O}mega$ \begin{equation}\label{tl} A^{n}_{\omega}(x) = A_{\omega}(\frac{x - y^{p,n}}{\epsilon_{p,n}}) \mbox{ in } \epsilon_{p,n}\omega + y^{p,n},\quad p\in K\end{equation} which makes sense since for each $n$ the sets $\epsilon_{p,n}\omega + y^{p,n},\ p\in K$ are disjoint. The above construction \eqref{tl} represents the so-called Hashin-Shtrikman microstructures.\\ \\ The $H$-limit of the entire sequence $A^{n}_{\omega}(x)$ exist and in particular we have (see \cite{T}) \begin{equation*}A^{n}_{\omega} \xrightarrow{H} M .\end{equation*} It can be seen as follows: One defines $v^{n} \in H^1({|\!\!\!O}mega)$ by \begin{equation*} v^{n}(x) = \epsilon_{p,n}w_{\lambda}(\frac{x-y^{p,n}}{\epsilon_{p,n}})+ \lambda\cdot y^{p,n},\quad\mbox{ in }\epsilon_{p,n}\omega + y^{p,n} \end{equation*} which satisfies \begin{equation}\begin{aligned}\label{hsc} & v^{n}(x) \rightharpoonup \lambda\cdot x \mbox{ weakly in }H^1({|\!\!\!O}mega;\mathbb{R}^N)\\ \mbox{and}\ \ & A^{n}_{\omega}\nabla v^{n}\rightharpoonup M\lambda \mbox{ weakly in } L^2({|\!\!\!O}mega;\mathbb{R}^N). \end{aligned}\end{equation} We have the following integral representation of the homogenized matrix $M$ \begin{equation*} Me_k\cdot e_l = \frac{1}{|\omega|}\int_{\omega} A_{\omega}(y)\nabla w_{e_k}(y)\cdot e_l\ dy = \frac{1}{|\omega|}\int_{\omega} A_{\omega}(y)\nabla w_{e_k}\cdot\nabla w_{e_l}\ dy\end{equation*} where $w_{e_k}, w_{e_l}$ are the solution of \eqref{hsw} for $\lambda= e_k$ and $\lambda=e_l$ respectively. \begin{example}[Spherical Inclusions in two-phase medium]\label{tk} If $\omega=B(0,1)$ a ball of radius one, and \begin{equation}\begin{aligned}\label{sia} A_{\omega}(y)= a_B(r)I &=\ a_1 I \quad\mbox{if } |y| \leq R\\ &=\ a_2 I \quad\mbox{if } R < |y| \leq 1 \end{aligned} \end{equation} with the volume proportion $\theta_A = R^N$ for $a_1I.$ In the literature, $\{a_1,a_2\}$ are called core-coating values respectively.\\ \\ Then $A_{\omega}$ is equivalent to $m I$ or $A^{n}_{\omega} \xrightarrow{H} m I$, where $A^{n}_{\omega}$ is defined as \eqref{tl} and $m$ satisfies \cite{HS} \begin{equation}\label{sim} \frac{m - a_2}{m + (N-1)a_2} = \theta_A \frac{a_1 - a_2}{a_1 + (N-1)a_2}. \end{equation} \qed\end{example} \begin{example}[Elliptical Inclusions in two-phase medium]\label{tj} For $m_1,..,m_N\in \mathbb{R}$, and $\rho + m_j >0 $ for $j=1,..,N$, the family of confocal ellipsoids $S_\rho$ of equation \begin{equation*} \sum_{j=1}^N \frac{y^2_j}{\rho_2 + m_j} = 1,\end{equation*} defines implicitly a real function $\rho$, outside a possibly degenerate ellipsoid in a subspace of dimension $<$ $N$.\\ Now if we consider $\omega=E_{\rho_2+m_1,..,\rho_2+m_N}= \{ y\ | \ \sum_{j=1}^N \frac{y^2_j}{\rho_2 + m_j} \leq 1\},$ with $\rho_2 + \underset{j}{min}\ m_j >0 $ and \begin{align*} A_\omega(y)= a_E(\rho)I &=\ a_1 I \quad\mbox{if } \rho \leq \rho_1\\ &=\ a_2 I \quad\mbox{if } \rho_1 < \rho \leq \rho_2 \end{align*} then $A_\omega$ is equivalent to a constant diagonal matrix $\Gamma= [\gamma_{jj}]_{1\leq j\leq N}$ satisfying \begin{equation*}\sum_{j=1}^N \frac{1}{a_2 - \gamma_{jj}} =\ \frac{(1-\theta_A)a_1 + (N+\theta_A-1)\beta}{\theta_A a_2(a_2- a_1)}\ \mbox{ where }\theta_A = \underset{j}{\varPi}\ \sqrt{\frac{\rho_1 + m_j}{\rho_2 + m_j}}.\end{equation*} \qed \end{example} Let us assume that $B^n_{\omega}$ is consistent with $A^n_{\omega}$ in the sense that both are defined w.r.t. the same Vitali covering of ${|\!\!\!O}mega$ (cf. \eqref{hso}). We consider a $B_{\omega}=[b^{\omega}_{kl}]\in \mathcal{M}(b_1,b_2;\ \omega)$ and define a sequence $B^n_{\omega} \in \mathcal{M}(b_1,b_2;\ {|\!\!\!O}mega)$ by \begin{equation*} B^{n}_{\omega}(x) = B_{\omega}(\frac{x - y^{p,n}}{\epsilon_{p,n}}) \quad\mbox{ in } \epsilon_{p,n}\omega + y^{p,n}\end{equation*} Then we want to find the corresponding $B^{\#}$ for this Hashin-Shtrikman microstructures.\\ As long as we have the corrector results \eqref{hsc} then by using the convergence result \eqref{dc2} we define the $B^{\#}$ as follows : For each $\lambda\in \mathbb{R}^N$ \begin{equation*} B^n_{\omega}\nabla v^{n}\cdot \nabla v^{n} \rightharpoonup B^{\#}\lambda\cdot\lambda \quad\mbox{in }\mathcal{D}^{\prime}({|\!\!\!O}mega). \end{equation*} Moreover, by taking any $\varphi\in \mathcal{D}({|\!\!\!O}mega)$ \begin{align*} \frac{1}{|{|\!\!\!O}mega|}\int_{{|\!\!\!O}mega} B^{n}_{\omega}\nabla v^{n}\cdot \nabla v^{n}&\varphi(x)dx\\ =\ &\frac{1}{|{|\!\!\!O}mega|}\sum_{p\in K} \int_{\epsilon_{p,n}\omega +y^{p,n}} B_{\omega}(\frac{x - y^{p,n}}{\epsilon_{p,n}})\nabla w_{k}(\frac{x-y^{p,n}}{\epsilon_{p,n}})\cdot\nabla w_{l}(\frac{x-y^{p,n}}{\epsilon_{p,n}})\ \varphi(x)dx\\ =\ &\frac{1}{|{|\!\!\!O}mega|}\sum_{p\in K} \epsilon_{p,n}^N. \int_{\omega}B_{\omega}(y)\nabla w_{e_k}(y)\cdot\nabla w_{e_l}(y)\ \varphi(\epsilon_{p,n}y+ y^{p,n}) dy\\ \rightarrow\ & \left(\frac{1}{|\omega|}\int_{\omega}B_{\omega}(y)\nabla w_{e_k}(y)\cdot\nabla w_{e_l}(y) dy\right).\left(\frac{1}{|{|\!\!\!O}mega|}\int_{{|\!\!\!O}mega}\varphi(x) dx\right). \end{align*} Thus we have the following integral representation of $B^{\#}$ : \begin{equation}\label{hse} B^{\#}e_k\cdot e_l = \frac{1}{|\omega|}\int_{\omega} B_{\omega}(y)\nabla w_{e_k}(y)\cdot \nabla w_{e_l}(y)\ dy . \end{equation} \begin{remark} Note that $B^{\#}$ is a constant matrix just as $A^{*}$ is. However, it is not clear whether $B^{\#}$ can be obtained using the idea of equivalence in the sense of Definition \ref{lb14}. \qed\end{remark} Now we consider the previous Example \ref{tk} of spherical inclusion where we know $w_{e_l}$ explicitly and will find out $B^{\#}$ explicitly in various cases. We seek the solution of \eqref{hsw} for the spherical inclusion i.e. when $A_\omega(y)=a_{B(0,1)}(y)I$ is given by \eqref{sia}, in the form of \begin{equation}\label{sis} w_{e_l}(y) =\ y_lf(r),\ y\in B(0,1);\end{equation} where, $f(r)$ is of the form of \begin{equation}\begin{aligned}\label{sif} f(r) &=\ \widetilde{b_1} \ \mbox{ if }r< R,\\ &=\ \widetilde{b_2} + \frac{\widetilde{c}}{r^N} \ \mbox{ if }R < r < 1\\ &=\ 1 \ \mbox{ if }1 < r. \end{aligned}\end{equation} In order to keep the solution $w_{e_l}(y)$ and flux $a(r)(f(r)+rf^{\prime}(r))$ to be continuous across the inner boundary $(r=R)$ and the outer boundary $(r=1)$ we have these following conditions to satisfy \begin{equation}\begin{aligned}\label{ad5} \widetilde{b_1} =\ \widetilde{b_2} + \frac{\widetilde{c}}{r_1^N}, &\ \mbox{ and }\ a_1\widetilde{b_1} =\ a_2(\widetilde{b_2} + \frac{(1-N)\widetilde{c}}{r_1^N} )\\ \widetilde{b_2} + \widetilde{c} =\ 1, &\ \mbox{ and }\ a_2(\widetilde{b_2} + (1-N)\widetilde{c}) =\ m. \\ \end{aligned}\end{equation} Then solving $(\widetilde{b_1},\widetilde{b_2},\widetilde{c})$ in terms of $(a_1, a_2, \theta_A)$ from the first three equation of \eqref{ad5}, we have \begin{equation}\label{sic} \widetilde{b_1} = \ \frac{Na_2}{(1-\theta_A)a_1 + (N+\theta_A -1)a_2},\quad \widetilde{b_2} = \ \frac{(1-\widetilde{b_1}\theta_A)}{(1-\theta_A)} \quad\mbox{and}\quad \widetilde{c} = \frac{(\widetilde{b_1} -1)\theta_A}{(1-\theta_A)} \end{equation} and finally putting it into the fourth equation of \eqref{ad5}, $`m$' can be written as in \eqref{sim}.\\ \\ Based on this, next we derive the expression of $B^{\#}$ for various cases. \paragraph{(1): $B_{B(0,1)}(x)I = b I$ for some constant $b >0$ : } In this case $B^{n}_{B(0,1)}$ becomes independent of $n$ and equal to $bI$. $A_\omega= a_{B(0,1)}I$ defined as in \eqref{sia}, i.e. considering $a_1$ as a core and $a_2$ as a coating. Then from \eqref{hse} it follows that $B^{\#} = b^{\#}I$ with \begin{equation*} b^{\#} = b\int_{B(0,1)} \nabla w_{e_l}(x)\cdot \nabla w_{e_l}(x) dx. \end{equation*} Now as we see, \begin{align*} m &=\ \int_{B(0,1)} a_{B(0,1)}(x)\nabla w_{e_l}(x)\cdot\nabla w_{e_l}(x) dx \\ &=\ a_2 \int_{B(0,1)} \nabla w_{e_l}(x)\cdot\nabla w_{e_l}(x) dx + (a_1 -a_2) \int_{B(0,R)} \nabla w_{e_l}(x)\cdot\nabla w_{e_l}(x) dx \end{align*} or, by using \eqref{sif} one gets \begin{equation}\label{siw} \int_{B(0,1)} \nabla w_{e_l}(x)\cdot\nabla w_{e_l}(x) dx =\ \frac{m - (a_1 -a_2){\widetilde{b_1}}^2 \theta_A}{a_2}. \end{equation} Plugging this, in the expression of $b^{\#}$ and using \eqref{sim} and \eqref{sic}, finally one gets \begin{equation}\label{hsb} b^{\#} =\ b\ [\ 1 + \frac{N\theta_A(1-\theta_A)(a_2-a_1)^2}{((1-\theta_A)a_1 + (N+\theta_A-1)a_2)^2}\ ]. \end{equation} Similarly, changing the role of $a_1$ and $a_2$ (make $a_2$ as core and coated with $a_1$) while keeping the volume fraction $\theta_A$ fixed for $a_1$, and say $m_{*}$ be the new homogenized coefficient for $A^{n}$ : \begin{equation}\label{FL9} \frac{m_{*} - a_1}{m_{*} + (N-1)a_1} = (1-\theta_A) \frac{a_2 - a_1}{a_2 + (N-1)a_1}.\end{equation} In this case with core $a_2 I$ and matrix $a_1 I$, the expression for $B^{\#}= b^{\#}I$ becomes \begin{equation}\label{hsd} b^{\#} =\ b\ [\ 1 + \frac{N\theta_A(1-\theta_A)(a_2-a_1)^2}{(\theta_Aa_2 + (N-\theta_A)a_1)^2}\ ]. \end{equation} It is well known that both of these formula \eqref{sim} and \eqref{FL9} provide the saturation / optimality of the upper bound and lower bound respectively for the two-phase $(a_1, a_2, \theta_A)$ homogenization. In a same spirit we will show the formula \eqref{hsb} and \eqref{hsd} provide the saturation/optimality of the lower bound and upper bound for $B^{\#}$ respectively when $B^{\epsilon}=bI$ independent of $\epsilon$. \paragraph{(2): $B_{B(0,1)}$ is governed with two-phase medium :} Let us consider, for any measurable set $\omega_B\subset B(0,1)$ \begin{equation}\begin{aligned}\label{sib} B_{B(0,1)}(y)= b_B(y)I &=\ b_1 I \quad\mbox{if } y\in\omega_B \\ &=\ b_2 I \quad\mbox{if }y\in B(0,1)\smallsetminus \omega_B \end{aligned} \end{equation} with the volume proportion $\theta_B = |\omega_B|$ for $b_1I.$\\ \\ Next we consider few straight forward cases only with $\omega_A =B(0,R)$ with $0<R<1$ (spherical inclusion, cf.\eqref{sia}), which are indeed very useful for showing the saturation / optimality of the bounds $(A^{*},B^{\#})$ announced in Section \ref{Sd9}. \paragraph{(2a): Core $a_1 I$ with coating $a_2 I$ for $A_{B(0,1)}$ and core $b_1 I$ with coating $b_2 I$ for $B_{B(0,1)}$, with $\omega_B\subseteq \omega_A$ :} Using the integral representation \eqref{hse} for $B^{\#} = b^{\#}I$ we get \begin{align*} b^{\#} &=\ \int_{B(0,1)} \{b_1\chi_{\omega_B}(x) + b_2(1-\chi_{\omega_B}(x))\}\ \nabla w_{e_l}(x)\cdot \nabla w_{e_l}(x) dx \\ &=\ b_2\int_{B(0,1)} \nabla w_{e_l}(x)\cdot \nabla w_{e_l}(x) dx + (b_1-b_2)\int_{\omega_B}\nabla w_{e_l}(x)\cdot\nabla w_{e_l}(x) dx\\ &=\ \frac{b_2}{a_2}\{m- (a_1-a_2){\widetilde{b_1}}^2\theta_A\} + (b_1 - b_2) {\widetilde{b_1}}^2\theta_B,\ \ \mbox{ as $\omega_B\subseteq\omega_A$; with using }\eqref{sif} \mbox{ and } \eqref{siw}. \end{align*} Finally by using the expression of $m$ (cf. \eqref{sim}) and $\widetilde{b_1}$ (cf. \eqref{sic}) we get, \begin{equation}\label{FG6} b^{\#} =\ b_2\ [\ 1 + \frac{N\theta_A(1-\theta_A)(a_2-a_1)^2}{((1-\theta_A)a_1 + (N+\theta_A-1)a_2)^2}\ ] - \frac{(b_2-b_1)(Na_2)^2\theta_B}{((1-\theta_A)a_1 + (N+\theta_A-1)a_2)^2}. \end{equation} First we notice that for $N=1$ the above formula becomes identical with the lower bound $l^{\#}_2$ for $\theta_B\leq\theta_A$ case in \eqref{FL20}. It goes same for higher dimension also.\\ Similar formula can be derived by changing the core and coating for $A_{B(0,1)}$ or $B_{B(0,1)}$ or both, while keeping the volume fractions $\theta_A,\theta_B$ same as before.\\ \\ \textbf{(2b): Core $a_2 I$ with coating $a_1 I$ for $A_{B(0,1)}$ and core $b_2 I$ with coating $b_1 I$ for $B_{B(0,1)}$, with $\omega_A \subseteq \omega_B$ :} Compared to the previous case $(2a)$, here we are changing the role of $a_1$ and $a_2$ while keeping the volume fraction $\theta_A$ fixed for $a_1$, also the role of $b_1$ ,$b_2 $ by keeping the volume fraction $\theta_B$ fixed for $b_1$, with $\omega_A \subseteq\omega_B$ (or, $\omega_B^c \subseteq \omega_A^c$). Then doing the above mentioned changes in the above formulation \eqref{FG6} we simply get, \begin{equation}\label{ED1} b^{\#} =\ b_1\ [\ 1 + \frac{N\theta_A(1-\theta_A)(a_2-a_1)^2}{(\theta_A a_2 + (N-\theta_A)a_1)^2}\ ] - \frac{(b_1-b_2)(Na_1)^2(1-\theta_B)}{(\theta_A a_2 + (N-\theta_A)a_1)^2}. \end{equation} It can be seen that for $N=1$ the above formula becomes identical with the lower bound $l^1_{\#}$ for $\theta_A\leq\theta_B$ case in \eqref{FL20}. It goes same for higher dimension too. \begin{remark} As an important comparison between two cases $(2a)$ and $(2b)$, we see both are giving saturation / optimality of the lower bound of $B^{\#}$ whenever $\theta_B \leq \theta_A$ and $\theta_A \leq \theta_B$ respectively by switching the roles of $a_1$, $a_2$ and $b_1$, $b_2$ both. \qed\end{remark} \noindent\textbf{(2c): Core $a_2 I$ with coating $a_1 I$ for $A_{B(0,1)}$ and core $b_1 I$ with coating $b_2 I$ for $B_{B(0,1)}$, with $\omega_A \subseteq \omega_B^c$ :} Compared to the previous case $(2b)$, here we are only changing the role of $b_1$ and $b_2$ while keeping the volume fraction $\theta_B$ fixed for $b_1$, with $\omega_A \subseteq \omega_B^c$. Then making this change in the above formulation \eqref{ED1} we simply get, \begin{equation}\label{FG7} b^{\#} =\ b_2\ [\ 1 + \frac{N\theta_A(1-\theta_A)(a_2-a_1)^2}{(\theta_A a_2 + (N-\theta_A)a_1)^2}\ ] - \frac{(b_2-b_1)(Na_1)^2\theta_B}{(\theta_A a_2 + (N-\theta_A)a_1)^2}. \end{equation} It provides the saturation / optimality of the upper bound of $B^{\#}$ for $\theta_B + \theta_A \leq 1$ case. For $N=1$ the above formula becomes identical with the upper bound $u^{\#}_1$ in \eqref{FL17}. \\ \\ \textbf{(2d): Core $a_1 I$ with coating $a_2 I$ for $A_{B(0,1)}$ and core $b_2 I$ with coating $b_1 I$ for $B_{B(0,1)}$, with $\omega_A^c \subseteq \omega_B$ :} Here we show one more variation. Compared to the previous case $(2c)$, here we are changing the role of $b_1$, $b_2$ and $a_1$, $a_2$ both under the condition $\omega_A^c \subseteq \omega_B$. Then making these changes in the above formulation \eqref{FG7} we get, \begin{equation}\label{bs19} b^{\#} =\ b_1\ [\ 1 + \frac{N\theta_A(1-\theta_A)(a_2-a_1)^2}{((1-\theta_A) a_1 + (N +\theta_A-1)a_2)^2}\ ] - \frac{(b_1-b_2)(Na_2)^2(1-\theta_B)}{((1-\theta_A)a_1 + (N+\theta_A-1)a_2)^2}. \end{equation} It provides the saturation / optimality of the upper bound of $B^{\#}$ for $\theta_B + \theta_A \geq 1$ case. For $N=1$ the above formula becomes identical with the upper bound $u^{\#}_2$ in \eqref{FL17}. \begin{remark} Note that, in the above two cases $(2c)$ and $(2d)$, the core and coating combination has switched as a pair for both $A_{B(0,1)}$ and $B_{B(0,1)}$, in order to give the saturation / optimality of the upper bound of $B^{\#}$. \qed\end{remark} \begin{remark} One can also consider the above core and coating combinations with different inclusion conditions but for those construction the corresponding $B^{\#}$ will not stand as an optimal one. It remains inside in between optimal lower bound and optimal upper bound. \qed\end{remark} \begin{remark}\label{ad14} We also notice that, for a given fixed laminate microstructures or Hashin-Shtrikman constructions of $A^{\epsilon}$ providing the saturation / optimality for $A^{*}$ bound there are plenty of microstructures for $B^{\epsilon}$ are available with satisfying the required inclusion conditions such that the resulting $(A^{*},B^{\#})$ is unique and provides the saturation / optimality of its bound (see Section \ref{qw4}). \qed\end{remark} \noindent Now we move into the final part to establish the optimal bounds on $(A^{*},B^{\#})$ announced in Section \ref{Sd9}. \section{Proof of Optimal Bounds :}\label{ts} \setcounter{equation}{0} Here we are going to establish the optimal trace bounds announced in Section \ref{ad18}. We shall begin with recalling few key components of our main tool namely the $H$-measure techniques and its application to compensated compactness theory. \subsection{$H$-measure}\label{hsn} The notion of $H$-measure has been introduced by Gerard and Tartar. It is a defect measure which quantifies the lack of compactness of weakly converging sequences in $L^2(\mathbb{R}^N)$ in the phase space. More specifically, it indicates where in the physical space, and at which frequency in the Fourier Space are the obstructions to strong convergence \cite{AM,PG2,T1,T2}. Let $v_\epsilon$ be a sequence of functions defined in $\mathbb{R}^N$ with values in $\mathbb{R}^P$. The components of the vector valued function $v_{\epsilon}$ are denoted by $({v_{\epsilon}}_i)_{1\leq i\leq P}$. We assume that $v_\epsilon $ converges weakly to $0$ in $L^2(\mathbb{R}^N)^P$. Then there exist a subsequence (still denoted by $\epsilon $) and a family of complex-valued Random measures $(\mu_{ij}(x,\xi))_{1\leq i,j\leq P}$ on $\mathbb{R}^N \times \mathbb{S}^{N-1} $ such that, for test functions $ \varphi_1(x),\varphi_2(x)$ in $C_0(\mathbb{R}^N)$ (the space of continuous functions vanishes at infinity), and $\psi(\xi)$ in $C(\mathbb{S}^{N-1})$ (the space of continuous functions over a $N$ dimensional sphere in $\mathbb{R}^N$), it satisfies \begin{equation}\label{dc6} \int_{\mathbb{R}^N} \int_{\mathbb{S}^{N-1}} \varphi_1(x)\overline{\varphi_2(x)}\psi(\frac{\xi}{|\xi|})(\mu_{ij}(dx,d\xi)) =\ \underset{\epsilon\rightarrow 0}{\textrm{lim}}\ \int_{\mathbb{R}^N} \widehat{F}(\varphi_1(x){v_{\epsilon}}_i)(\xi)\overline{\widehat{F}(\varphi_2(x){v_{\epsilon}}_j)}\psi(\frac{\xi}{|\xi|})\ d\xi \end{equation} where $\widehat{F}$ is the usual Fourier transform operator defined in $L^2(\mathbb{R}^N)$ by \begin{equation*}(\widehat{F}\varphi)(\xi) = \int_{\mathbb{R}^N} \varphi(x) e^{-2i\pi(x\cdot \xi)}\ dx.\end{equation*} \noindent The matrix of measures $\mu = (\mu_{ij}$) is called the $H$-measure of the subsequence $v_{\epsilon}$. It takes its values in the set of hermitian and non-negative matrices \begin{equation*}\mu_{ij} = \overline{\mu}_{ji},\quad \sum_{i,j =1}^P \lambda_i \overline{\lambda_j}\hspace{2pt}\mu_{ij}\geq 0 \ \ \ \forall \lambda \in \mathbb{C}^P.\end{equation*} In \eqref{dc6} the role of the test functions $\varphi_1$ and $\varphi_2$ are to localize in space and $\psi$ is to localize in the directions of oscillations. When we take $\psi =1$, we recover the usual defect measure in the physical space, i.e. \begin{center} $\int_{\mathbb{S}^{N-1}}d\mu_{ij}(x,d\xi)$ is just the weak* limit measure of the sequence $v_{\epsilon}^i\overline{v_{\epsilon}^j}$, \end{center} which is bounded in $L^1(\mathbb{R}^N)$. Thus, the $H$-measure gives a more precise representation of the lack of compactness, by taking into account oscillation directions. An important property of the $H$-measure is its so-called localization principle which is indeed a generalization of the compensated compactness theory of Murat and Tartar. \begin{theorem}[Localization principle]\label{dc9} Let $v_{\epsilon}$ be a sequence which converges weakly to $0$ in $L^2(\mathbb{R}^N)^P$ and defines a $H$-measure $\mu $. If $v_{\epsilon}$ is such that for $ 1\leq m\leq m_0,$ \begin{equation}\label{dc7} \sum_{j=1}^{P} \sum_{k=1}^{N} \frac{\partial}{\partial x_k} ( A_{jk}^{m}(x){v_{\epsilon}}_j) \longrightarrow 0 \quad\mbox{in } H^{-1}_{\textrm{loc}} ({|\!\!\!O}mega)\mbox{ strongly } \end{equation} where the coefficients $ A_{jk}^m $ are continuous in an open set ${|\!\!\!O}mega $ of $\mathbb{R}^N$ then the $H$-measures satisfies \begin{equation}\label{dc8} \sum_{j=1}^{P} \sum_{k=1}^{N} A_{jk}^{m}(x) \xi_k \mu_{ji}=\ 0 \quad\mbox{in } {|\!\!\!O}mega\times \mathbb{S}^{N-1} \ \ \forall i,m \quad\mbox{satisfying } 1\leq i\leq P ,\ \ 1\leq m \leq m_0. \end{equation} \qed\end{theorem} \noindent As a consequence of the localization principle we state an another important result which associates with the quadratic form. Let us define a standard pseudo-differential operator $q$ which is defined through its symbol $(q_{ij}(x,\xi))_{1\leq i,j\leq P} \in C^{\infty}(\mathbb{R}^N \times \mathbb{R}^N)$ by \begin{equation*} (qv)_i(x)= \sum_{j=1}^P \textit{F}^{-1}\{q_{ij}(x,.)\textit{F}v_j(.)\}(x) \end{equation*} for any smooth and compactly supported function $v$. Here in particular we are concerned with the so called poly-homogeneous pseudo-differential operator of order $0$, i.e. whose principal symbol is homogeneous of degree $0$ in $\xi$ and with compact support in $x$. We borrow the following remark from \cite{AM}. \begin{remark} Being homogeneous of degree $0$, the symbol $(q_{ij}(x,\xi))_{1\leq i,j\leq P}$ is not smooth at $\xi=0$. This is not a problem since any regularization by a smooth cut-off at the origin gives rise to the same pseudo- differential operator up to the addition of a smoothing operator. \qed\end{remark} \noindent Then we have the following theorem. \begin{theorem}[Compensated Compactness]\label{dc10} (a) Let $v_{\epsilon}$ be a sequence which converges weakly to $0$ in $L^2(\mathbb{R}^N)^P$. Then there exist a subsequence and a $H$-measure $\mu$ such that, for any poly-homogeneous pseudo-differential operator $q$ of order $0$ with principal symbol $(q_{ij}(x,\xi))_{1\leq i,j\leq P} $, we have \begin{equation*} \underset{\epsilon \rightarrow 0}{\textrm{lim}} \int_{\mathbb{R}^N}q(v_{\epsilon})\cdot\overline{v_{\epsilon}} dx = \int_{\mathbb{R}^N}\int_{\mathbb{S}^{N-1}}\sum_{i,j=1}^P q_{ij}(x,\xi)\mu_{ij}(dx,d\xi). \end{equation*} (b) Suppose the sequence $v_{\epsilon}$ also satisfies the differential constraints given by \eqref{dc7}. So, the corresponding $H$-measure $\mu$ is satisfying \eqref{dc8}.\\ Introduce the wave cone \begin{equation*} \Lambda = \{\ (x,\xi, V)\in \mathbb{R}^N \times \mathbb{S}^{N-1} \times \mathbb{C}^P\mbox{ s.t. }\sum_{j=1}^{P} \sum_{k=1}^{N} A_{jk}^{m}(x) \xi_k V_{j}=0, 1\leq m\leq m_0\ \}. \end{equation*} Now \begin{equation*} \mbox{if }\ \sum_{i,j=1}^P q_{ij}(x,\xi)V_i\overline{V_j} \geq 0 \quad\mbox{for any } (x,\xi, V)\in \Lambda, \mbox{ then } \sum_{i,j=1}^P q_{ij}\mu _{ij}\geq 0. \end{equation*} \qed\end{theorem} \noindent As a matter of consequence of the previous theorem we state the result of compensated compactness with variable coefficients which will be the main technical ingredient of this section. Let $v_{\epsilon}$ be a sequence converging weakly to $v$ in $L^2(\mathbb{R}^N)^P$. Assume that, for $1\leq m \leq m_0,$ $v_{\epsilon}$ satisfies \begin{equation*} \sum_{j=1}^P\sum_{k=1}^N\frac{\partial}{\partial x_k}(A_{jk}^{m}(x){v_{\epsilon}}_j)\longrightarrow \sum_{j=1}^P\sum_{k=1}^N\frac{\partial}{\partial x_k}(A_{jk}^{m}(x)v_j) \mbox{ in }H^{-1}_{loc}({|\!\!\!O}mega)\mbox{ strongly. } \end{equation*} where the coefficients $A_{jk}^m$ are continuous in an open set ${|\!\!\!O}mega$ of $\mathbb{R}^N$. Let $\Lambda$ be the Characteristic set defined by \begin{equation*} \Lambda =\{\ (x,\xi,V)\in \mathbb{R}^N \times \mathbb{S}^{N-1} \times \mathbb{C}^P\mbox{ such that }\sum_{j=1}^P\sum_{k=1}^N A_{jk}^{m}\xi_{k}V_j=0; 1\leq m \leq m_0 \ \}. \end{equation*} Let $q$ be a poly-homogeneous pseudo-differential operator of order $0$ with hermitian principal symbol $ (q_{ij}(x,\xi))_{1\leq i,j \leq p}$ such that \begin{equation*} \sum_{i,j=1}^{P} (q_{ij}(x,\xi))\overline{V_i}V_j \geq 0\mbox{ for any }(x,\xi,V)\in \Lambda. \end{equation*} Then for any non-negative smooth $\varphi$ with compact support in ${|\!\!\!O}mega$ \begin{equation*} \underset{\epsilon \rightarrow 0}{\textrm{lim\hspace{1.5pt}inf}}\ \int_{\mathbb{R}^N} \varphi(x)q(v_{\epsilon})\cdot\overline{v_{\epsilon}}\ dx \geq \int_{\mathbb{R}^N} \varphi(x)q(v)\cdot\overline{v}\ dx. \end{equation*} \qed \subsection{Bounds : $B^{\epsilon}$ is independent of $\epsilon$ }\label{os9} Let us begin with the case when the sequence $B^{\epsilon}$ is independent of $\epsilon$. We assume $B^{\epsilon}(x) = b(x)I\in \mathcal{M}(b_1,b_2;{|\!\!\!O}mega)$. To start with we recall from \eqref{dc1}, \begin{equation}\label{FG3} b(x)\nabla u^{\epsilon}\cdot\nabla u^{\epsilon} \rightharpoonup B^{\#}(x)\nabla u\cdot\nabla u \quad\mbox{ in }\mathcal{D}^{\prime}({|\!\!\!O}mega).\end{equation} Let us remark that $B^{\#} = b(x)I^{\#}(x)$. We have the constraints on $\nabla u^\epsilon $ : \begin{equation}\label{abc2} \nabla u^\epsilon \rightharpoonup\nabla u \mbox{ weakly in } L^2({|\!\!\!O}mega),\ \ -div(A^\epsilon\nabla u^\epsilon) \mbox{ is }H^{-1}({|\!\!\!O}mega)\mbox{ convergent,} \end{equation} and by homogenization theory, we have the following limit : \begin{equation}\label{ot15}A^{\epsilon}\nabla u^{\epsilon} \rightharpoonup A^{*}\nabla u \mbox{ in }L^2({|\!\!\!O}mega)\mbox{ weak.}\end{equation} We derive here the optimal lower bound \eqref{tw} and optimal upper bound \eqref{tq} on $(A^{*},B^{\#})$ respectively. \paragraph{Lower Bound :} Let us introduce the constant vector $\eta\in\mathbb{R}^N$ and start with the so-called translated inequality for $A^\epsilon\in\mathcal{M}(a_1,a_2;{|\!\!\!O}mega)$ : \begin{equation*} (a_2I-A^{\epsilon})(\nabla u^{\epsilon} + \eta)\cdot(\nabla u^{\epsilon} + \eta)\ \geq\ 0 \quad\mbox{a.e. in }{|\!\!\!O}mega.\end{equation*} Then multiplying by $b(x)$ on both sides and expanding it, we get \begin{equation*} a_2\hspace{2pt} b\nabla u^{\epsilon}\cdot\nabla u^{\epsilon}-bA^{\epsilon}\nabla u^{\epsilon}\cdot\nabla u^{\epsilon} + 2b(a_2\nabla u^\epsilon\cdot\eta -A^{\epsilon}\nabla u^{\epsilon}\cdot\eta) + b(a_2I-A^{\epsilon})\eta\cdot\eta\ \geq\ 0. \end{equation*} By passing to the limit as $\epsilon\rightarrow 0$ in the above inequality, we simply get \begin{equation}\label{ot13} a_2B^{\#}\nabla u\cdot\nabla u - bA^{*}\nabla u\cdot\nabla u + 2b(a_2I-A^{*})\nabla u\cdot\eta + b(a_2I-\overline{A})\eta\cdot\eta\ \geq\ 0. \end{equation} To get lower bound on $B^{\#}$, we minimize w.r.t. $\nabla u$. It is recalled that any fixed vector in $\mathbb{R}^N$ can be realized as $\nabla u$ with $u^\epsilon$ satisfying the usual constraints \eqref{ad13}. Hence we have \begin{equation}\label{ot12} b\ (a_2I-A^{*})(a_2B^{\#}-bA^{*})^{-1}(a_2I-A^{*})\eta\cdot\eta \leq\ \theta_A(a_2-a_1)\eta\cdot\eta. \end{equation} Note that, $(a_2B^{\#}-bA^{*})$ is positive definite matrix (cf. \eqref{Sd3}). Now, by taking trace on both sides, \eqref{tw} follows. \qed \paragraph{Upper Bound :} Here we consider another translated inequality as \begin{equation}\label{bs7}(A^{\epsilon} - a_1I)(\nabla u^{\epsilon} + \eta)\cdot(\nabla u^{\epsilon} + \eta)\ \geq\ 0 \quad\mbox{a.e. in }{|\!\!\!O}mega,\ \mbox{ with constant vector }\eta \in \mathbb{R}^N. \end{equation} Then by multiplying $b(x)$ on both sides and expanding it, we get \begin{equation*}bA^{\epsilon}\nabla u^{\epsilon}\cdot\nabla u^{\epsilon} - a_1 b\nabla u^{\epsilon}\cdot\nabla u^{\epsilon} + 2b(A^{\epsilon}-a_1I)\nabla u^{\epsilon}\cdot\eta + b(A^{\epsilon}-a_1 I)\eta\cdot\eta\ \geq\ 0. \end{equation*} Now by passing to the limit in the above inequality, we simply get \begin{equation}\label{ot17} bA^{*}\nabla u\cdot\nabla u - a_1 B^{\#}\nabla u\cdot\nabla u + 2b(A^{*}-a_1I)\nabla u\cdot\eta + b(\overline{A}-a_1 I)\eta\cdot\eta\ \geq\ 0. \end{equation} Finally minimizing with respect to $\nabla u$, we get \begin{equation}\label{bs2} b\ (A^{*} - a_1I)(bA^{*}-a_1 B^{\#})^{-1}(A^{*} - a_1I)\eta\cdot\eta \leq\ (1-\theta_A)(a_2-a_1)\eta\cdot\eta. \end{equation} Note that, $(bA^{*}-a_1B^{\#})$ is positive definite matrix (cf. \eqref{Sd3}). Now, by taking trace on both sides, \eqref{tq} follows. \qed \begin{remark} Let us note the reverse : for deriving lower bound (upper bound) on $(A^{*},B^{\#})$, we used the translated inequality usually meant for deriving upper bound (lower bound) for $A^{*}$. \qed\end{remark} \begin{remark}[Pointwise bounds on energy density $B^{\#}\nabla u\cdot\nabla u$]\label{hsg} Having found the bounds \eqref{ot12},\eqref{bs2} on the matrix $B^{\#}$ itself, we now find the bounds on energy density $B^{\#}\nabla u\cdot\nabla u$, which is useful in solving optimal design problems, as shown in Section \ref{qw5}. We consider the inequalities obtained in \eqref{ot13} and \eqref{ot17} and minimize them with respect to $\eta\in\mathbb{R}^N$ to obtain the lower bound and upper bound respectively as follows : \begin{align} (i):\mbox{(Lower bound)}\ \ \ B^{\#}\nabla u\cdot\nabla u \geq \frac{b}{a_2}\{A^{*} + (a_2I-\overline{A})^{-1}(a_2I-A^{*})^2\}\nabla u\cdot \nabla u. \label{bs1}\\ (ii):\mbox{(Upper bound)}\ \ \ B^{\#}\nabla u\cdot\nabla u \leq \frac{b}{a_1}\{A^{*} + (\overline{A}-a_1I)^{-1}(A^{*}-a_1I)^2\}\nabla u\cdot \nabla u. \label{Sd11} \end{align} The corresponding $A^{*}$ and $B^{\#}$ (constructed in Section \ref{ub12}) achieve equality in the above bounds. This shows the saturation/optimality property of these special structures. \qed\end{remark} \noindent \textbf{Saturation/Optimality of the Bounds :} A simple calculation shows that, the simple laminates that we have defined in \eqref{bs11} provides the equality of both bounds \eqref{ot12},\eqref{bs2} (and hence equality in \eqref{tw},\eqref{tq}) for all lamination directions $e_1,e_2,..,e_N$. They are the points of intersections the lower trace bound \eqref{tw} and the upper trace bound \eqref{tq} as in the case of $\mathcal{G}_{\theta_A}$ (see Figure 4).\\ Similarly, the equality of the lower bound \eqref{ot12} of $(A^{*},B^{\#})$ is achieved by the composite based on Hashin-Shtrikman construction with core $a_1I$ and coating $a_2I$ given in \eqref{hsb} for $B^{\#}$ and \eqref{sim} for $A^{*}$. Similarly, the equality in the upper bound \eqref{bs2} of $(A^{*},B^{\#})$ is achieved by the Hashin-Shtrikman construction with core $a_2I$ and coating $a_1I$ described in \eqref{hsd} for $B^{\#}$ and \eqref{FL9} for $A^{*}$. Notice that, the Hashin-Shtrikman microstructure which gave the upper bound (or, lower bound ) equality for $A^{*}$ gives the lower bound (or, upper bound) equality for $(A^{*},B^{\#})$. \qed \noindent Having mentioned optimal properties of simple laminates and Hashin-Shtrikman constructions, we introduce and discuss the saturation / optimality of rank $N$ laminates. \paragraph{Construction of Sequential Laminates :} Here we construct the $p$-rank or sequential laminates formula for $B^{\#}$ (denoted by $B^{\#}_p$) using $H$-measure techniques. We do it in three steps. The crucial step is to obtain a relation between $B^{\#}$, $A^{*}$ and the $H$-measure term (see \eqref{ad3} below). Subsequently we exploit it to define the laminate $B^{\#}_p$. We now provide details. \paragraph{Step 1 :} We start with recalling the optimal lower bound on $A^{*}$. If a sequence of characteristic function $\chi_{{\omega}_{A^{\epsilon}}}(x) \rightharpoonup \theta_A $ in $L^{\infty}({|\!\!\!O}mega)$ weak*, with \begin{equation*}A^{\epsilon}=\ a^{\epsilon}I=\ (a_1\chi_{{\omega}_{A^{\epsilon}}}(x)+a_2(1-\chi_{{\omega}_{A^{\epsilon}}}(x))I \mbox{ $H$-convergence to } A^{*}\mbox{ in }{|\!\!\!O}mega,\quad (a_1 < a_2).\end{equation*} Then we know that the optimal lower bound on $A^{*}$ is as follows \cite{T} : \begin{equation}\label{lb3} tr\ (A^{*}-a_1 I)^{-1} \leq \frac{N}{(1-\theta_A)(a_2 -a_1)} + \frac{\theta_A}{(1-\theta_A)a_1}. \end{equation} The $N$-sequential laminate \eqref{OP2} (with $p=N$) with matrix $a_1 I$ and core $a_2 I$ achieves the lower bound \eqref{lb3} with equality. Conversely, any point on the lower bound equation \eqref{lb3} with equality can be achieved through $N$-sequential laminates \eqref{OP2}, see \cite{A}.\\ \\ Let us recall the translated inequality \eqref{bs7}, which implies, \begin{equation}\label{lb1} A^{\epsilon}\nabla u^{\epsilon}\cdot\nabla u^{\epsilon} + (A^{\epsilon}-a_1 I)\eta \cdot \eta - 2a_1 \nabla u^{\epsilon}\cdot \eta\ \geq\ a_1\nabla u^{\epsilon}\cdot\nabla u^{\epsilon}- 2A^{\epsilon}\nabla u^{\epsilon}\cdot \eta. \end{equation} One derives the optimal lower bound \eqref{lb3} by passing to the limit in this inequality. \\ \\ Using \eqref{abc2} and \eqref{ot15}, passing to the limit on the left hand side of \eqref{lb1} is rather easy. We focus our attention on the right hand side of \eqref{lb1}. By multiplying it by $b(x)$, we get \begin{equation}\label{ad1} b(x)a_1\nabla u^{\epsilon}\cdot\nabla u^{\epsilon} - 2b(x)A^{\epsilon}\nabla u^{\epsilon}\cdot \eta \end{equation} and we pass to the limit in the above equation \eqref{ad1} in two different ways.\\ \\ Firstly, by using \eqref{FG3} and \eqref{ot15} we get \begin{equation}\label{ad2} a_1 B^{\#}\nabla u\cdot\nabla u - 2bA^{*}\nabla u\cdot \eta . \end{equation} Secondly, we use the notion of $H$-measure in order to pass to the limit in \eqref{ad1} or in the right hand side of \eqref{lb1}.\\ \\ To this end, introducing a coupled variable $V_{\epsilon}=(\nabla u^{\epsilon}, A^{\epsilon}\eta)$, we write the right hand side of \eqref{lb1} in a quadratic form $q(V_{\epsilon})\cdot V_{\epsilon} $. Here $q$ is the following linear map : \begin{equation*} q:\mathbb{R}^N \times \mathbb{R}^N \longmapsto \mathbb{R}^N \times \mathbb{R}^N\ \mbox{ defined by }\ q(\nabla u^\epsilon,A^\epsilon\eta)=\ (a_1\nabla u^\epsilon-A^\epsilon\eta, -\nabla u^\epsilon ). \end{equation*} Introducing $V_0 =(\nabla u,\overline{A}\eta)$, we have \begin{equation*} q(V_{\epsilon})\cdot V_{\epsilon}=2q(V_{\epsilon})\cdot V_0 -q(V_0)\cdot V_0 + q(V_{\epsilon}- V_0)\cdot(V_{\epsilon}- V_0).\end{equation*} Let $\varPi_V$ denote the $H$-measure of $(V_{\epsilon}- V_0)$. Its $x$-projection is a Radon measure known as defect measure. However, due to Remark \ref{abc3}, it is in fact a $L^1({|\!\!\!O}mega)$ function. This will be useful below. We pass to the limit in \eqref{ad1} by virtue of Theorem \ref{dc10} to get \begin{equation}\label{lb2} b(x) a_1\nabla u\cdot\nabla u- 2b(x)\overline{A}\nabla u\cdot \eta + b(x) X, \end{equation} where $X$ is the $H$-measure correction term defined by \begin{equation*} X = \underset{\epsilon \rightarrow 0}{\textrm{lim}}\ q(V_{\epsilon}- V_0)\cdot(V_{\epsilon}- V_0) = \int_{\mathbb{S}^{N-1}} tr\ (q\varPi_V (x,d\xi)) \end{equation*} or equivalently, \begin{equation}\label{eid} X = \langle\langle \varPi_V, Q(U,A\eta)\rangle\rangle \quad\mbox{with } Q(U,A\eta)=q(U,A\eta)\cdot(U,A\eta). \end{equation} Here we are using the angular brackets to denote the directional average, a notation introduced in \cite{T}.\\ \\ Equating the limit \eqref{lb2} with \eqref{ad2}, we get \begin{equation*} a_1 B^{\#}\nabla u\cdot\nabla u - 2b\hspace{1.5pt}A^{*}\nabla u\cdot \eta = b\left( a_1\nabla u\cdot\nabla u-2\overline{A}\nabla u\cdot \eta + X \right). \end{equation*} Now minimizing with respect to $\nabla u$, we obtain a useful relation linking $B^{\#}$ with $A^{*}$ with $H$-measure term : \begin{equation}\label{ad3} b(\overline{A}-A^{*})(B^{\#}-bI)^{-1}(\overline{A}-A^{*})\eta\cdot\eta= a_1X. \end{equation} On the other hand by passing to the limit in \eqref{lb1}, it yields \begin{equation}\label{ot14} A^{*}\nabla u\cdot\nabla u + (\overline{A}-a_1 I)\eta \cdot \eta - 2a_1\nabla u\cdot \eta\ \geq\ a_1\nabla u\cdot\nabla u -2\overline{A}\nabla u\cdot \eta + X . \end{equation} \textbf{Step 2 :} Next, the idea is now to use Theorem \ref{dc10} in order to get a lower bound on $X$. However, the quadratic form $q$, which is defined by \begin{equation*} q(V)\cdot V= a_1 |U|^2 - 2U\cdot A\eta \mbox{ with } V=(U,A\eta) \in \mathbb{R}^N \times \mathbb{R}^N \end{equation*} is not coercive with respect to the variable $A\eta$. So there is no hope to prove that $q$ is non-negative on wave cone, as required by Theorem \ref{dc10}. Here, we apply this result to a slightly different quadratic form. The following arguments were inspired from \cite{AM}.\\ As the gradient $\nabla u^{\epsilon}$ satisfies $curl(\nabla u^{\epsilon}) =0 $, in the spirit of Theorem \ref{dc9}, we introduce the oscillation variety \begin{equation}\label{os16} \vartheta=\ \{\ (\xi,U,A\eta) \in \mathbb{S}^{N-1} \times \mathbb{R}^N\times \mathbb{R}^N;\ \xi_{i}U_j -\xi_{j}U_i = 0\ \forall i,j \} \end{equation} and its projection (wave cone) \begin{align*} \Lambda &=\ \{(U,A\eta) \in \mathbb{R}^N\times \mathbb{R}^N;\ \exists\ \xi\in \mathbb{S}^{N-1} \mbox{ such that }(\xi,U,A\eta) \in \vartheta \}\\ &=\ \{(U,A\eta) \in \mathbb{R}^N\times \mathbb{R}^N;\ U \parallel \xi,\mbox{ for some }\xi\in \mathbb{S}^{N-1}\}. \end{align*} We define $\Lambda_{\xi}\subset \Lambda$, $\xi\in \mathbb{R}^N\smallsetminus \{0\}$ as \begin{equation}\label{sir} \Lambda_{\xi}=\ \{(U,A\eta) \in \mathbb{R}^N\times \mathbb{R}^N;\ U= t\hspace{1.5pt}\xi,\ t\in \mathbb{R} \}=\ \mathbb{R}\xi;\quad\mbox{So, } \underset{\xi\neq 0}{\cup} \Lambda_{\xi}=\Lambda.\end{equation} We introduce a new linear form $q^{\prime}_{\xi}$, whose associated quadratic form defined as \begin{equation}\label{sit} Q^\prime_\xi(U,A\eta):= q^{\prime}_{\xi}(U,A\eta)\cdot(U,A\eta)=\ q(U,A\eta)\cdot(U,A\eta)-\underset{U \in \Lambda_{\xi}}{min}\ q(U,A\eta)\cdot(U,A\eta). \end{equation} The above quadratic form is non-negative on the wave cone $\Lambda_\xi$. Thus applying Theorem \ref{dc10} we get \begin{equation}\label{eiy} trace\ (q^{\prime}_{\xi}\varPi_V) \geq 0 \end{equation} which implies that \begin{equation}\label{eie} X =\ \langle\langle\varPi_V, Q(U,A\eta)\rangle\rangle \geq \langle\langle\varPi_V,\underset{U \in \Lambda_{\xi}}{min}\ Q(U,A\eta)\rangle\rangle. \end{equation} Introducing $\varPi_A $, the $H$-measure of $ (A^{\epsilon} - \overline{A})\eta$, since $ \underset{U \in \Lambda}{min}\ Q(U,A\eta) $ depends only on $A\eta$, to obtain : \begin{equation*} X \geq\ \langle\langle\varPi_V,\underset{U \in \Lambda_{\xi}}{min}\ Q(U,A\eta)\rangle\rangle = \langle\langle\varPi_{A},\underset{U \in \Lambda_{\xi}}{min}\ Q(U,A\eta)\rangle\rangle .\end{equation*} It remains to compute the right hand side of the above inequality. We simply have \begin{equation}\label{sij} \underset{U \in \Lambda_\xi}{min}\ Q(U,A\eta)=\ -\frac{(A\eta\cdot \xi)^2}{a_1 |\xi|^2} \end{equation} with the minimizers $U_{min}$ in $\Lambda_\xi$ : \begin{equation}\label{sio} U_{min} :=\ \frac{(A\eta\cdot\xi)}{a_1|\xi|^2}\hspace{1.5pt}\xi.\end{equation} Since $(A^{\epsilon}-\overline{A})\eta =(a_1 -a_2)(\chi_{\epsilon} -\theta_A)\eta $, the $H$-measure $\varPi_A$ reduces to \begin{equation*} (\varPi_A)_{ij} = (a_2 -a_1)^2(\nu_A)\eta_i\eta_j \ \ \forall i,j =1,..,N \end{equation*} where $\nu_A$ is a $H$-measure of the sequence $(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)$ with \begin{equation}\label{hsz} \nu_A(x,d\xi)\geq 0 \ \mbox{ and }\ \int_{\mathbb{S}^{N-1}} \nu_A (x,d\xi) = \theta_A(x)(1-\theta_A(x)).\end{equation} We finally obtain \begin{equation}\label{os10} \langle\langle\varPi_A, \frac{(A\eta\cdot\xi)^2}{|\xi|^2} \rangle\rangle =\ (a_2 -a_1)^2\int_{\mathbb{S}^{N-1}} \frac{(\eta \cdot \xi)^2}{|\xi|^2}\nu_A (x,d\xi). \end{equation} Introducing a matrix $M_A$ defined by \begin{equation}\label{os19} M_A = \frac{1}{(1-\theta_A)\theta_A}\int_{\mathbb{S}^{N-1}} \xi \otimes \xi\ \nu_A (x,d\xi) \end{equation} which is non-negative and has unit trace.\\ \\ Therefore \begin{equation}\label{FL5} X\ \geq\ - \frac{\theta_A(1-\theta_A)(a_2 -a_1)^2}{a_1} M_A\eta\cdot \eta\ =: X_{min} \end{equation} Clearly, $X$ achieves its minimum $X_{min}$ with $U=U_{min}$ defined in \eqref{sio}.\\ \\ Thus from \eqref{ot14} we obtain \begin{equation}\label{lb4} (A^{*}-a_1 I)\nabla u\cdot\nabla u + (\overline{A}-a_1 I)\eta \cdot \eta + 2(\overline{A}-a_1 I)\nabla u\cdot \eta\ \geq\ \frac{-\theta_A(1-\theta_A)(a_2 -a_1)^2}{a_1} M_A\eta\cdot \eta . \end{equation} Above inequality is pointwise which allows us to localize the problem of estimation. To do so, let us take the point $x=x_0$, where $A^{*}(x_0)$ is defined as a constant homogenized matrix with the proportion $\theta_A(x_0)$. Construction of classical oscillatory test function shows that $\nabla u$ is an arbitrary vector in $\mathbb{R}^N$ : \begin{equation}\label{zz11} (i)\ \ \nabla u^\epsilon \rightharpoonup \zeta\in\mathbb{R}^N \mbox{ arbitrary and }(ii)\ \ div(A^\epsilon\nabla u^\epsilon) \mbox{ converges }H^{-1}({|\!\!\!O}mega) \mbox{ strong.} \end{equation} We minimize \eqref{lb4} with respect to $\nabla u=\zeta$ with its minimizer \begin{equation}\label{abc1} \zeta = - (\overline{A}(x_0)-a_1I)(A^{*}(x_0)- a_1 I)^{-1}\eta ; \end{equation} so \eqref{lb4} yields the following estimate at $x_0$ : \begin{equation}\label{lb5} (A^{*}-a_1 I)^{-1}\eta\cdot \eta\ \leq\ \frac{|\eta|^2}{(1-\theta_A)(a_2 -a_1)} + \frac{\theta_A}{(1-\theta_A)a_1}M_A\eta\cdot\eta. \end{equation} Since $x_0$ is arbitrary, the matrix bound \eqref{lb5} is pointwise bound for $x$ almost everywhere. Since $\eta $ is an arbitrary vector, by taking trace we get the well known optimal lower bound \eqref{lb3} for $A^{*}$. We will use later the same minimizer $\zeta = - (\overline{A}-a_1I)(A^{*}- a_1 I)^{-1}\eta$ later for our purposes. \paragraph{Step 3 :} Note that the expression \eqref{OP2} for the $p$-sequential laminates $A^{*}_p$ with matrix $a_1I$ and core $a_2I$ can be written as \begin{equation}\label{UD10} (1-\theta_A)( A^{*}_{p} - a_1 I)^{-1} = (a_2 - a_1)^{-1}I + \frac{\theta_A}{a_1} \int_{\mathbb{S}^{N-1}} \frac{e\otimes e}{e\cdot e}d\nu_A(e), \end{equation} for the probability measure $\nu_A$ on the unit sphere $\mathbb{S}^{N-1}$ given by $\nu_A = \sum_{j=1}^p m_j\delta_{e_j}$ with, $\sum_{j=1}^p m_j=1$. See \cite{T}. Now going back to \eqref{os19},\eqref{FL5}, one sees the above integrals over $\mathbb{S}^{N-1}$ appears naturally through the $H$-measure techniques and the equality takes place with $X=X_{min}$. \\ \\ It naturally enables us to define the corresponding $p$-sequential laminate for $B^{\#}$ as follows : We recall \eqref{ad3} with $A^{*}=A^{*}_p$ (cf.\eqref{UD10}) and $X=X_{min}$ (cf.\eqref{FL5}) with $M_A=\sum_{i=1}^p m_i\frac{e_i\otimes e_i}{e_i\cdot e_i}$ where $\{e_i\}$ are the canonical basis vectors and $\sum_{i=1}^p m_i =1$. Then the resulting $B^{\#}$ defines the $p$-sequential laminate say $B^{\#}_p$ with matrix $a_1 I$ and core $a_2 I$ satisfying, \begin{equation}\label{FG4} b(B^{\#}_p-bI)^{-1}(\overline{A}-A^{*}_p)^2 =\ \theta_A(1-\theta_A)(a_2 -a_1)^2\ \left(\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i\cdot e_i}\right),\ \ \mbox{ with }\ \sum_{i=1}^p m_i =1. \end{equation} Note that $B^{\#}_p$ is diagonal, since $A^{*}_p$ is diagonal. \\ \\ Similarly, by changing the role of $a_1$,$a_2$, one defines the $p$-sequential laminate $B^{\#}_p$ with matrix $a_2 I$ and core $a_1 I$ as \begin{equation}\label{bs13} b(B^{\#}_p-bI)^{-1}(\overline{A}-A^{*}_p)^2 =\ \theta_A(1-\theta_A)(a_2 -a_1)^2\ \left(\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i\cdot e_i}\right),\ \ \mbox{ with }\ \sum_{i=1}^p m_i =1. \end{equation} where $A^{*}_p$ is the $p$-sequential laminates \eqref{OP3} with matrix $a_2 I$ and core $a_1 I$. \qed \begin{remark}[Uniqueness of $B^{\#}$]\label{ad19} As we see from the relation \eqref{ad3}, if $A^{*}$ and $X$ are fixed then $B^{\#}$ is also fixed. Now suppose for two different microstructures if we have same homogenized limit $A^{*}\in \partial\mathcal{G}_{\theta_A}$ then associated matrix $M_A$ is also the same (because equality holds in \eqref{lb5}). Hence both the microstructures possess the same $X=X_{min}$ (cf.\eqref{FL5}). Therefore, both microstructures lead to the same macro relative limit $B^{\#}$ with $A^{*}\in\partial\mathcal{G}_{\theta_A}$. If $A^{*}\in int\ \mathcal{G}_{\theta_A}$, it can be realized as $A^{*}\in\partial\mathcal{G}_{\widetilde{\theta_A}}$ for some $\widetilde{\theta_A}$ uniquely determined by $\theta_A,A^{*}$ (see Figure 4). Then using the previous arguments in Step $2$ once again, replacing $\theta_A$ by $\widetilde{\theta_A}$ everywhere we conclude that $B^{\#}$ is uniquely determined. \qed\end{remark} \begin{remark}[Optimality]\label{ot18} \noindent $(1)\ :\ $ The $N$-sequential laminates $(A^{*}_N,B^{\#}_N)$ given in \eqref{OP3},\eqref{bs13} and \eqref{OP2},\eqref{FG4} achieve the equality in the lower bound \eqref{tw} and the upper bound \eqref{tq} respectively. Conversely, it is known that any point on $\partial\mathcal{G}_{\theta_A}^L$ or $\partial\mathcal{G}_{\theta_A}^U$ can be achieved by $N$-sequential laminates $A^{*}_N$. Further, the relative limit $B^{\#}_N$ constructed above along with $A^{*}_N$ achieves the equality in the upper bound \eqref{tq} or the lower bound \eqref{tw} respectively. \\ \noindent $(2)\ :\ $ Given $A^{*}$, $B^{\#}$ satisfying both inequalities \eqref{tq},\eqref{tw}, there exists $A^\epsilon$ satisfying \eqref{ta} and $b(x)$ with $b_1\leq b(x)\leq b_2$ such that $A^\epsilon \xrightarrow{H} A^{*}$ and $b\xrightarrow{A^\epsilon} B^{\#}$. We will not prove such assertions here. We will deal with more complicated cases of two-phase $\{A^\epsilon, B^\epsilon\}$ with $B^\epsilon$ depending on $\epsilon$. See Theorem \ref{qw6} and its proof in Section \ref{qw4}. \qed\end{remark} \subsection{Bounds : $B^{\epsilon}(x)$ is governed by two-phase medium.}\label{sil} Now we move to the case when $B^{\epsilon}(x)$ is governed with the two-phase medium. We consider the sequences $\chi_{{\omega}_{A^{\epsilon}}}(x) \rightharpoonup \theta_A $ and $\chi_{{\omega}_{B^{\epsilon}}}(x) \rightharpoonup \theta_B $ in $L^{\infty}({|\!\!\!O}mega)$ weak*, with \begin{equation*}A^{\epsilon}=\ a^{\epsilon}I=\ (a_1\chi_{{\omega}_{A^{\epsilon}}}(x)+a_2(1-\chi_{{\omega}_{A^{\epsilon}}}(x))I \mbox{ $H$-converges to } A^{*}\mbox{ in }{|\!\!\!O}mega, \end{equation*} and \begin{equation*} B^{\epsilon} =\ b^{\epsilon}I=\ (b_1\chi_{{\omega}_{B^{\epsilon}}}(x) + b_2(1-\chi_{{\omega}_{B^{\epsilon}}}(x)))I \ \mbox{converges to }B^{\#} \mbox{ relative to }A^\epsilon \mbox{ in }{|\!\!\!O}mega. \end{equation*} We assume $(0<a_1<a_2<\infty)$ and $(0<b_1<b_2<\infty)$.\\ \\ Let us begin with by considering two particular cases namely, when $\omega_{B^\epsilon}=\omega_{A^\epsilon}$ and $\omega_{B^\epsilon}=\omega^c_{A^\epsilon}$ respectively. Let us first derive the lower bound when $\omega_{B^\epsilon}=\omega_{A^\epsilon}$. \paragraph{1. Lower Bound : when $\omega_{B^\epsilon}=\omega_{A^\epsilon}$ :} We introduce the constant vector $\eta$ in $\mathbb{R}^N$ and consider the simple translated inequality for $B^\epsilon$ with oscillation field $\nabla u^\epsilon$ associated to $A^\epsilon$ : \begin{equation}\label{FL8} (B^{\epsilon}-b_1 I)(\nabla u^{\epsilon} +\eta)\cdot(\nabla u^{\epsilon} +\eta)\geq 0 \quad\mbox{a.e. in }{|\!\!\!O}mega. \end{equation} which is rewritten as \begin{equation}\label{Sd18} B^{\epsilon}\nabla u^{\epsilon}\cdot\nabla u^{\epsilon} + (B^{\epsilon}- b_1 I)\eta \cdot \eta + 2(B^\epsilon-b_1 I) \nabla u^{\epsilon}\cdot \eta\ \geq\ b_1\nabla u^{\epsilon}\cdot\nabla u^{\epsilon}. \end{equation} We impose the constraints on $\nabla u^\epsilon $ : \begin{equation}\label{six} \nabla u^\epsilon \rightharpoonup\nabla u \mbox{ weakly in } L^2({|\!\!\!O}mega),\ \ -div(A^\epsilon\nabla u^\epsilon) \mbox{ is }H^{-1}({|\!\!\!O}mega)\mbox{ convergent. } \end{equation} Thanks to the distributional convergence \eqref{dc1}, we know \begin{equation*}B^{\epsilon}\nabla u^{\epsilon}\cdot\nabla u^{\epsilon}\ \rightharpoonup\ B^{\#}(x)\nabla u\cdot\nabla u\ \mbox{ in }\ \mathcal{D}^{\prime}({|\!\!\!O}mega).\end{equation*} Passing to the limit in the left hand side of \eqref{Sd18} is rather easy. It is enough to use the relation between the two fluxes and we have : \begin{equation}\label{sik} (B^\epsilon-b_1I)\nabla u^\epsilon = \frac{(b_2-b_1)}{(a_2-a_1)}(A^\epsilon-a_1I)\nabla u^\epsilon\ \rightharpoonup\ \frac{(b_2-b_1)}{(a_2-a_1)}(A^{*}-a_1I)\nabla u\ \mbox{ in } L^2({|\!\!\!O}mega).\end{equation} On the other hand, in order to pass to the limit in the right hand side of \eqref{Sd18} we use the $H$-measure techniques. Recalling that the limit of right hand side of \eqref{lb1} obtained in the previous Section \ref{os9} as \begin{equation}\label{siy} a_1\nabla u^{\epsilon}\cdot\nabla u^{\epsilon}- 2A^{\epsilon}\nabla u^{\epsilon}\cdot \eta \rightharpoonup a_1\nabla u\cdot\nabla u- 2\overline{A}\nabla u\cdot \eta + X\ \mbox{ in }\ \mathcal{D}^{\prime}({|\!\!\!O}mega)\end{equation} where, $X$ is the $H$-measure corrector term. Hence, \begin{equation*} b_1\nabla u^{\epsilon}\cdot\nabla u^{\epsilon} \rightharpoonup b_1\nabla u\cdot\nabla u + 2\frac{b_1}{a_1}(A^{*}-\overline{A})\nabla u\cdot \eta + \frac{b_1}{a_1}X \ \mbox{ in }\ \mathcal{D}^{\prime}({|\!\!\!O}mega).\end{equation*} Therefore passing to the limit in \eqref{Sd18} we simply get, \begin{equation*}B^{\#}\nabla u\cdot\nabla u + (\overline{B}-b_1 I)\eta \cdot \eta + 2\frac{(b_2-b_1)}{(a_2-a_1)}(A^{*}-a_1I)\nabla u\cdot \eta\ \geq\ b_1\nabla u\cdot\nabla u + 2\frac{b_1}{a_1}(A^{*}-\overline{A})\nabla u\cdot \eta + \frac{b_1}{a_1}X.\end{equation*} Next, by using the lower bound on $X$ (cf.\eqref{FL5}) we obtain \begin{align*} (B^{\#}-b_1I)\nabla u\cdot\nabla u + (\overline{B}-b_1 I)\eta \cdot \eta + 2\{\frac{(b_2-b_1)}{(a_2-a_1)}(A^{*}-a_1I)+\frac{b_1}{a_1}(\overline{A}-A^{*})\}\nabla u\cdot \eta\ \qquad\qquad &\\ +\frac{b_1}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A)M_A\eta\cdot\eta\ \geq\ 0. & \end{align*} Finally, minimizing over $\nabla u$ we obtain the lower bound as \begin{align} &\{\frac{(b_2-b_1)}{(a_2-a_1)}(A^{*}-a_1I)+\frac{b_1}{a_1}(\overline{A}-A^{*})\}(B^{\#}-b_1I)^{-1}\{\frac{(b_2-b_1)}{(a_2-a_1)}(A^{*}-a_1I)+\frac{b_1}{a_1}(\overline{A}-A^{*})\}\eta\cdot\eta\notag\\ &\quad\qquad\qquad\qquad\qquad\qquad\quad\leq\ (\overline{B}-b_1 I)\eta \cdot \eta +\frac{b_1}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A)M_A\eta\cdot\eta. \label{siq} \end{align} Since $\eta $ is an arbitrary vector in $\mathbb{R}^N$, by taking trace on both sides of \eqref{siq} and using the fact that the matrix $M_A$ has unit trace, one obtains the following lower trace bound for $(A^{*},B^{\#})$ whenever the two corresponding microstructures $\omega_{A^\epsilon}=\omega_{B^\epsilon}$. \begin{align} tr\ \{[\frac{(b_2-b_1)}{(a_2-a_1)}(A^{*}-a_1I)+\frac{b_1}{a_1}(\overline{A}-A^{*})]^2(B^{\#}-b_1I)^{-1}\}\leq\ &N(b_2-b_1)(1-\theta_A)\notag \\ &+\frac{b_1}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A). \label{bs20} \end{align} \textbf{2. Lower Bound : when $\omega_{B^\epsilon}=\omega^c_{A^\epsilon}$ : } In this case we have the following flux convergence : \begin{equation*}(B^\epsilon-b_1I)\nabla u^\epsilon = \frac{(b_2-b_1)}{(a_2-a_1)}(a_2I-A^\epsilon)\nabla u^\epsilon\ \rightharpoonup\ \frac{(b_2-b_1)}{(a_2-a_1)}(a_2I-A^{*})\nabla u\ \mbox{ in } L^2({|\!\!\!O}mega). \end{equation*} Starting with the above information,one follows the procedure of the previous case to obtain the following inequality : \begin{align} &\{\frac{(b_2-b_1)}{(a_2-a_1)}(a_2I-A^{*})+\frac{b_1}{a_1}(\overline{A}-A^{*})\}(B^{\#}-b_1I)^{-1}\{\frac{(b_2-b_1)}{(a_2-a_1)}(a_2I-A^{*})+\frac{b_1}{a_1}(\overline{A}-A^{*})\}\eta\cdot\eta\notag\\ &\quad\qquad\qquad\qquad\qquad\qquad\quad\leq\ (\overline{B}-b_1 I)\eta \cdot \eta +\frac{b_1}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A)M_A\eta\cdot\eta. \label{eia} \end{align} Then by taking trace on both sides of \eqref{eia}, we simply obtain \begin{align}\label{FL15} tr\ \{[\frac{(b_2-b_1)}{(a_2-a_1)}(a_2I-A^{*})+\frac{b_1}{a_1}(\overline{A}-A^{*})]^2(B^{\#}-b_1I)^{-1}\}\leq\ &N(b_2-b_1)(1-\theta_B)\notag\\ &+\frac{b_1}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A). \end{align} \textbf{Saturation/Optimality of the above lower bounds : } Simple calculation shows that the equality of the above lower bound \eqref{bs20} of $(A^{*},B^{\#})$ is achieved by the simple laminates, Hashin-Shtrikman construction given in the Section \ref{bs12} and Section \ref{hsl} respectively. However, in the case $\omega_{B^\epsilon}=\omega_{A^\epsilon}^c$, the equality of the above lower bound \eqref{FL15} is achieved by the simple laminates, Hashin-Shtrikman construction when $\theta_A \leq \frac{1}{2}$ (i.e. when $\theta_B =(1-\theta_A) \geq \theta_A$). Later we will see in order to find the optimal bound when $\theta_A >\frac{1}{2}$, one needs to start with the dual version of \eqref{FL8} given below in \eqref{tc}. With regard to $N$-sequential laminates, the construction of $A^{*}_N$ is classical. The construction of the corresponding relative limit $B^{\#}_{N,N}$ can be based on a relation linking $A^{*}_N$ and $B^{\#}_{N,N}$ which is analogous to \eqref{FG4}. This relation is given in \eqref{hsa} for the case $\omega_{A^\epsilon} = \omega_{B^\epsilon}$. Similar thing can be done for $\omega_{B^\epsilon}=\omega_{A^\epsilon}^c$. These structures $A^{*}_N,B^{\#}_{N,N}$ provide the saturation / optimality of the above bounds. A useful observation is that $A^{*}$ belongs to boundary of $\mathcal{G}_{\theta_A}$ in all three cases above. \qed \paragraph{General Case :} Although our ultimate goal is to find bounds over the arbitrary microstructures $\omega_{A^\epsilon},\omega_{B^\epsilon}$, it is a difficult task to perform. The source of difficulty lies in the fact that we do not have convergence result for the flux $B^\epsilon\nabla u^\epsilon$ as in \eqref{sik} for arbitrary microstructures $\omega_{A^\epsilon},\omega_{B^\epsilon}$. We see the products involving $B^{\epsilon}$ and $\nabla u^{\epsilon}$, the oscillations in $\nabla u^{\epsilon}$ is controlled by $A^{\epsilon}$ through the state equation governed with its microstructures. In order to obtain the optimal bounds, one has to take into account of this fact. In the previous cases, such convergences (cf.\eqref{sik}) are used crucially in order to obtain optimal bounds. However, from these particular cases we make an useful observation that the optimal structures of $A^{*}$ (i.e. $A^{*}\in\partial\mathcal{G}_{\theta_A}=\partial\mathcal{G}^L_{\theta_A}\cup \partial\mathcal{G}^U_{\theta_A}$) provides the saturation / optimality of the $(A^{*},B^{\#})$ bounds. Motivated by this, in order to treat the general case we would like to take the following strategy : First we will be finding the bounds on $B^{\#}$ with arbitrary $\omega_{B^\epsilon}$, whereas $\omega_{A^\epsilon}$ be the corresponding optimal microstructures for the homogenized tensor i.e. $A^{*}\in\partial\mathcal{G}_{\theta_A}$. Then in the second step, we will treat the case $A^{*}\in int(\mathcal{G}_{\theta_A})$. \paragraph{Lower Bound :} Let us establish the lower bound L1 on $(A^{*},B^{\#})$ for arbitrary microstructures $\omega_{A^\epsilon},\omega_{B^\epsilon}$. We break it into two steps as mentioned above : first we treat $A^{*}\in\partial\mathcal{G}_{\theta_A}^L$ (and $A^{*}\in\partial\mathcal{G}_{\theta_A}^U$ can be dealt with analogously); next we consider $A^{*}\in int(\mathcal{G}_{\theta_A})$. \paragraph{Step 1a : (Compactness property of $(\nabla u^\epsilon,A^\epsilon\eta)$)} In this step, assuming $A^{*}\in \partial\mathcal{G}^L_{\theta_A}$, we state and prove a compactness property of $(\nabla u^\epsilon,A^\epsilon\eta)$. We formulate it (cf.\eqref{eiz}) in such a way that it can be used to study the inflated system \eqref{eik} to derive the required lower trace bound L1. Recall the field $\nabla u^\epsilon$ possesses natural constraints \eqref{six}, i.e. \begin{equation*} \nabla u^\epsilon \rightharpoonup\nabla u \mbox{ weakly in } L^2({|\!\!\!O}mega),\ \ -div(A^\epsilon\nabla u^\epsilon) \mbox{ is }H^{-1}({|\!\!\!O}mega)\mbox{ convergent. }\end{equation*} Now, we impose one more constraint on the fields $\nabla u^\epsilon$ by restricting them inside the class of fields corresponding to microstructures providing the saturation/optimality of the lower bound \eqref{lb5}. In order to define such class of optimal fields, we go back to the right hand side of \eqref{lb1} as well as its limit : \begin{equation}\label{lb20} a_1\nabla u^{\epsilon}\cdot\nabla u^{\epsilon}- 2A^{\epsilon}\nabla u^{\epsilon}\cdot \eta \rightharpoonup a_1\nabla u\cdot\nabla u- 2\overline{A}\nabla u\cdot \eta + X\ \mbox{ in }\ \mathcal{D}^{\prime}({|\!\!\!O}mega)\end{equation} where $X$ is the $H$-measure correction term defined as in \eqref{eid} : \begin{equation*} X = \langle\langle \varPi_V, Q(U,A\eta)\rangle\rangle \quad\mbox{with } Q(U,A\eta)= a_1|U|^2-2AU\cdot\eta. \end{equation*} Following \eqref{eie},\eqref{FL5} we had the lower bound on $X$ as, \begin{equation*} X \geq\ \langle\langle\varPi_V,\underset{U \in \Lambda_{\xi}}{min}\ Q(U,A\eta)\rangle\rangle = - \frac{\theta_A(1-\theta_A)(a_2 -a_1)^2}{a_1} M_A\eta\cdot\eta\ = X_{min}\end{equation*} It has been also noted that the equality of this above bound provides the saturation / optimality of the lower bound of $A^{*}$. In the following result, we will investigate the compactness property of $\nabla u^\epsilon$ satisfying \eqref{lb20} with $X=X_{min}$. \begin{theorem}[Compactness]\label{ub8} Let us consider the following constrained oscillatory system : \begin{equation}\label{eiw} V_\epsilon=\ (\nabla u^\epsilon,A^\epsilon\eta) \rightharpoonup (\nabla u,\overline{A}\eta)=V_0\ \mbox{ in }L^2({|\!\!\!O}mega)^{2N}\mbox{ weak }, \eta\in\mathbb{R}^N\smallsetminus\{0\},\end{equation} \begin{equation}\begin{cases}\label{eiz} a_1\Delta U_\epsilon - \nabla( div(A^\epsilon\eta) ) \in H^{-2}_{loc}({|\!\!\!O}mega)\mbox{ convergent, } \\ \ U_\epsilon = \nabla u^\epsilon. \end{cases}\end{equation} Then \begin{equation}\label{eil} a_1\nabla u^{\epsilon}\cdot\nabla u^{\epsilon}- 2A^{\epsilon}\nabla u^{\epsilon}\cdot \eta \rightharpoonup a_1\nabla u\cdot\nabla u- 2\overline{A}\nabla u\cdot \eta + X_{min} \mbox{ in }\ \mathcal{D}^{\prime}({|\!\!\!O}mega) \end{equation} where, $ X_{min}$ is defined above (cf.\eqref{FL5}). \\ \\ Conversely, if the sequence satisfying \eqref{eiw} possesses the property \eqref{eil}, then \eqref{eiz} must hold. \end{theorem} \begin{proof} The oscillation variety $\vartheta_1$ of the above differential system \eqref{eiw},\eqref{eiz} is \begin{equation*} \vartheta_1=\ \{\ (\xi,U,A\eta) \in \mathbb{S}^{N-1}\times\mathbb{R}^N\times\mathbb{R}^N\ : \ U= \frac{(A\eta\cdot\xi)}{a_1|\xi|^2}\xi\ \}. \end{equation*} Note, both $\vartheta$ (see \eqref{os16}) and $\vartheta_1$ have some common constraints, namely $\xi_{i}U_j -\xi_{j}U_i = 0\ \forall i,j.$ The corresponding wave cone $\Lambda_1$ is as follows : \begin{equation*} \Lambda_1 =\ \{ (U,A\eta)\in\mathbb{R}^N\times\mathbb{R}^N;\ \exists\ \xi\in \mathbb{S}^{N-1} \mbox{ such that } U= \frac{(A\eta\cdot\xi)}{a_1|\xi|^2}\xi \}. \end{equation*} Next we define $\Lambda_{1,\xi}\subset \Lambda_1$, $\xi\in \mathbb{S}^{N-1}$, \begin{equation*} \Lambda_{1,\xi}=\ \{(U,A\eta) \in \mathbb{R}^N\times \mathbb{R}^N;\ U= \frac{(A\eta\cdot\xi)}{a_1|\xi|^2}\xi \};\ \mbox{ So, } \underset{\xi\in\mathbb{S}^{N-1}}{\cup}\Lambda_{1,\xi}=\Lambda_1 \end{equation*} Following that, we introduce a new linear map $q^{\prime\prime}_{1,\xi}:\mathbb{R}^N \times \mathbb{R}^{N} \longmapsto \mathbb{R}^N \times \mathbb{R}^{N}$, whose associated quadratic form is given by \begin{equation*} Q_{1,\xi}^{\prime\prime}(U,A\eta) :=\ q_{1,\xi}^{\prime\prime}(U,A\eta)\cdot(U,A\eta) =\ Q(U,A\eta)- \underset{U\in \Lambda_{1,\xi}}{min}Q(U,A\eta) \end{equation*} where, $Q(U,A\eta)=a_1|U|^2-2U\cdot A\eta$.\\ Since the above quadratic form $Q_{1,\xi}^{\prime\prime}(U,A\eta)$ is zero on the wave cone $\Lambda_{1,\xi}$, we get by applying Theorem \ref{dc10}, \begin{equation*} \langle\langle \varPi_V,Q_{1,\xi}^{\prime\prime}\rangle\rangle = trace\ (q_{1,\xi}^{\prime\prime}\varPi_V) = 0 \end{equation*} where $\varPi_V$ is the $H$-measure of the sequence $(V_\epsilon-V_0)$. Thus we get using \eqref{sij} \begin{align*} X=\ \langle\langle\varPi_{V}, Q(U,A\eta)\rangle\rangle\ =\ \langle\langle\varPi_{V},\underset{U\in \Lambda_{1,\xi}}{min}Q(U,A\eta) \rangle\rangle &=\ \langle\langle\varPi_A, Q(\frac{(A\eta\cdot\xi)}{a_1|\xi|^2}\xi,A\eta) \rangle\rangle \notag\\ &=\ \langle\langle\varPi_A, -\frac{(A\eta\cdot\xi)^2}{a_1|\xi|^2} \rangle\rangle = X_{min}. \end{align*} Hence, under the oscillatory system \eqref{eiz}, the limit \eqref{eil} holds by Theorem \ref{dc10}.\\ \\ \textbf{Converse part : } The proof is inspired from \cite[Chapter 28]{T}. Let us recall \eqref{eiy}, for $V_\epsilon \rightharpoonup V_0$ in $L^2({|\!\!\!O}mega)$ weak, we had shown that : \begin{equation}\label{lb19} trace\ (q^{\prime}_{\xi}\varPi_V) \geq 0, \end{equation} where the matrix $q^\prime_\xi$ introduced in \eqref{sit} is given by \begin{equation}\label{eit} q^\prime_\xi \in \mathcal{L}_{x,\xi}(\mathbb{R}^{2N};\mathbb{R}^{2N});\ \ q^\prime_\xi = \begin{bmatrix} a_1I_{N\times N} & -I_{N\times N}\\ -I_{N\times N} & a_1^{-1}B \end{bmatrix}_{2N\times 2N} \end{equation} where, $\mathcal{L}_{x,\xi}(\mathbb{R}^{N_1};\mathbb{R}^{N_2})$ denotes the space of $N_1\times N_2$ matrix whose coefficients may depend on $(x,\xi)\in \mathbb{R}^N\times \mathbb{S}^{N-1}$. \begin{equation*} B\in \mathcal{L}_{x,\xi}(\mathbb{R}^{N};\mathbb{R}^{N});\ B= \begin{bmatrix} \xi_1^2 & \xi_1\xi_2 & .. & \xi_1\xi_N \\ \xi_1\xi_2 & \xi_2^2 & .. & \xi_2\xi_N \\ .. & .. & .. & .. \\ \xi_1\xi_N & \xi_2\xi_N & .. & \xi_N^2 \end{bmatrix}_{N\times N}. \end{equation*} Notice that, $B^2=B$ as $\xi\in \mathbb{S}^{N-1}$.\\ \\ Here $\varPi_V\in \mathcal{M}({|\!\!\!O}mega\times\mathbb{S}^{N-1};\mathbb{R}^{2N\times 2N})$ ($\varPi_V =\varPi_V^{*}$, Hermitian) is associated $H$-measure of the sequence $(V_\epsilon-V_0)$. Since $V^\epsilon=(\nabla u^\epsilon, A^\epsilon\eta)$, where $A^\epsilon = a^\epsilon I$ satisfies the constraints \begin{equation*} curl\ (\nabla u^\epsilon) = 0, \mbox{ and } -div\left( (A^\epsilon\eta)_j\nabla u^\epsilon\right) \mbox{ converges strongly in } H^{-1}({|\!\!\!O}mega), \mbox{ for each }j=1,..,N, \end{equation*} we have by the localization principle (cf. Theorem \ref{dc9}), $H$-measure $\varPi_V$ satisfies the following relations :\\ \\ (a): The sub-matrix $\{(\varPi_V)_{jk}\}_{1\leq j,k\leq N} $ of $\varPi_V$ satisfies : \begin{equation}\label{sin} \xi_k(\varPi_V)_{jl} - \xi_j(\varPi_V)_{kl} = 0 \mbox{ for }j,k,l=1,..,N. \end{equation} (b): The sub-matrix $\{(\varPi_V)_{jk}\}_{1\leq k\leq N, N+1\leq j\leq 2N}$ of $\varPi_V$ satisfies \begin{equation}\label{lb16} \sum_{k=1}^N\xi_k(\varPi_V)_{jk} = 0 \mbox{ for }j=N+1,..,2N. \end{equation} Now \eqref{eil} holds only if the equality holds in \eqref{lb19}, i.e. \begin{equation}\label{eix} trace\ (q^{\prime}_{\xi}\varPi_V) = 0.\end{equation} Now we will show \eqref{eix} implies \eqref{eiz}.\\ \\ In order to do that, we begin with localizing the sequence $V_\epsilon-V_0$ by $\phi(V_\epsilon-V_0)$, $\phi\in C_c^1({|\!\!\!O}mega)$. We will be using $\frac{\partial}{\partial x_j}=(-\Delta)^{\frac{1}{2}}R_j$ and $\frac{{\partial}^2}{\partial x_j\partial x_k}=(-\Delta)R_jR_k$ and where, $R_j$ is the Riesz operator (see \cite{STN}). Let us define the sequence \begin{equation*} \omega^\epsilon_k = a_1\sum_{j=1}^N R_jR_j \phi(U_\epsilon-U)_k - R_k\sum_{j=1}^N R_j \phi(A^\epsilon\eta-\overline{A}\eta)_j ; \ \ k=1,..,N. \end{equation*} We know \begin{equation*} \omega_\epsilon =\{\omega^\epsilon_k\}_{1\leq k\leq N}\ \rightharpoonup 0 \mbox{ in }L^2(\mathbb{R}^N)^N \mbox{ weak. } \end{equation*} To prove \eqref{eiz}, it is enough to show that the sequence \begin{equation}\label{lb15} \omega_\epsilon =\{\omega^\epsilon_k\}_{1\leq k\leq N}\ \rightarrow 0 \ \mbox{ in }L^2(\mathbb{R}^N)^N\mbox{ strong.} \end{equation} Let $\varPi\in \mathcal{M}({|\!\!\!O}mega\times\mathbb{S}^{N-1};\mathbb{R}^{N\times N})$ be $H$-measure associated to $\{\omega_k^\epsilon\}_{1\leq k\leq N}$ then we compute trace of $\varPi$ : \begin{equation*} trace\ \varPi = -\sum_{k=1}^N a_1^2|\phi|^2(\varPi_V)_{kk} -2\sum_{k=1}^N\sum_{l=1}^{N} \xi_k|\phi|^2(\varPi_V)_{k,l+N} -\sum_{k=1}^N\sum_{l=1}^N\xi_k\xi_l|\phi|^2(\varPi_V)_{k+N,l+N}. \end{equation*} The middle term of right hand side vanishes because of \eqref{lb16}. Another simple computation using \eqref{eit} shows that the right hand side is equal to $a_1|\phi|^2trace\ (q^\prime_\xi\varPi_V))$ and hence $trace\ \varPi =0$ because of \eqref{eix}. So \eqref{lb15} follows. Consequently, \eqref{eiz} follows. \end{proof} \noindent \textbf{Step 1b : ($H$-measure term) :} Here onwards we will find the lower bounds by choosing the field $\nabla u^\epsilon$ satisfying \eqref{six} and \eqref{eiz} or equivalently $A^{*}\in\partial\mathcal{G}_{\theta_A}^L$ and using them in the translated inequality \eqref{FL8} : \begin{equation*}(B^{\epsilon}-b_1 I)(\nabla u^{\epsilon} +\eta)\cdot(\nabla u^{\epsilon} +\eta)\geq 0 \quad\mbox{a.e. in }{|\!\!\!O}mega.\end{equation*} We expand the translated inequality to write, \begin{equation}\label{lb8}B^{\epsilon}\nabla u^{\epsilon}\cdot\nabla u^{\epsilon} + (B^{\epsilon}- b_1 I)\eta \cdot \eta -2b_1\nabla u^{\epsilon}\cdot\eta\ \geq\ b_1\nabla u^{\epsilon}\cdot\nabla u^{\epsilon} -2B^\epsilon\nabla u^{\epsilon}\cdot \eta\end{equation} Passing to the limit on the left hand side is well known. On the other hand in order to pass to the limit in the right hand side, we use the $H$-measure techniques. Introducing a coupled variable $W_{\epsilon}=(\nabla u^{\epsilon}, B^{\epsilon}\eta)$, we write the right hand side of \eqref{lb8} in a quadratic form $q_1(W_{\epsilon})\cdot W_{\epsilon} $. Here $q_1$ is a linear map : \begin{equation*} q_1:\mathbb{R}^N \times \mathbb{R}^{N} \longmapsto \mathbb{R}^N \times \mathbb{R}^{N}\ \mbox{ defined by }\ q_1(\nabla u^\epsilon,B^\epsilon\eta)=\ (b_1\nabla u^\epsilon-B^\epsilon\eta, -\nabla u^\epsilon ). \end{equation*} Introducing $W_0 =(\nabla u,\overline{B}\eta)$, as before we have \begin{equation*} q_1(W_{\epsilon})\cdot W_{\epsilon}\ =\ 2q_1(W_{\epsilon})\cdot W_0 -q_1(W_0)\cdot W_0 + q_1(W_{\epsilon}- W_0)\cdot(W_{\epsilon}- W_0).\end{equation*} Denoting by $\varPi_W$ the $H$-measure of $(W_{\epsilon}- W_0)$, we thus pass to the limit in \eqref{lb8} by virtue of Theorem \ref{dc10} : \begin{equation}\label{lb9} B^{\#}\nabla u\cdot\nabla u + (\overline{B}-b_1 I)\eta \cdot \eta - 2b_1 \nabla u\cdot \eta\ \geq\ b_1\nabla u\cdot\nabla u- 2\overline{B}\nabla u\cdot \eta + Y \end{equation} where $Y$ is the $H$-measure correction term defined by \begin{equation*} Y = \underset{\epsilon \rightarrow 0}{\textrm{lim}}\ q_1(W_{\epsilon}- W_0)\cdot(W_{\epsilon}- W_0) = \int_{\mathbb{S}^{N-1}} trace\ (q_1\varPi_W (x,d\xi)) \end{equation*} or equivalently, denoting the average w.r.t. directions $\xi\in\mathbb{S}^{N-1}$ by double angular bracket, we write with $W = (U, B\eta) \in \mathbb{R}^N \times \mathbb{R}^N,$ we can write \begin{equation}\label{dc11} Y=\langle\langle \varPi_W, Q_1(U,B\eta)\rangle\rangle, \mbox{ with }Q_1(U,B\eta) := b_1U\cdot U-2BU\cdot\eta. \end{equation} The aim is to find tight lower bound for $H$-measure term $Y$. (Earlier we did this job for $X$ (see \eqref{FL5}). As before, any such lower bound must result Theorem \ref{dc10} applied to an appropriate oscillatory system. This idea is carried out in the next step.\\ \noindent\textbf{Step 1c : (Lower bound) :} To take care of the interaction between microstructures $A^\epsilon$,$B^\epsilon$, we need to work with the following inflated oscillatory system with constraints : \begin{equation}\label{eik} \begin{cases} W_\epsilon^\prime = (\nabla u^\epsilon, B^\epsilon\eta, A^\epsilon\eta ) \rightharpoonup W_0^\prime= ( \nabla u, \overline{B}\eta, \overline{A}\eta )\ \mbox{ weakly in } L^2({|\!\!\!O}mega)^{3N},\\ a_1\Delta U_\epsilon - \nabla( div(A^\epsilon\eta) ) \in H^{-2}_{loc}({|\!\!\!O}mega)\mbox{ convergent, }\\ U_\epsilon = \nabla u^\epsilon. \end{cases} \end{equation} We proceed in a fashion analogous to Step $2$ of Section \ref{os9}. We introduce a new linear form $q^{\prime}_{1,\xi}$, whose associated quadratic form is defined as \begin{equation}\label{eis} Q^\prime_{1,\xi}(U,B\eta,A\eta)= q^{\prime}_{1,\xi}(U,B\eta,A\eta)\cdot(U,B\eta,A\eta) := Q_1(U,B\eta)-\underset{U \in \Lambda_{1,\xi}}{min}\ Q_1(U,B\eta), \end{equation} where $\Lambda_{1,\xi}$ is the corresponding wave cone for the inflated system \eqref{eik} : \begin{equation*}\Lambda_{1,\xi}=\ \{(U,A\eta) \in \mathbb{R}^N\times \mathbb{R}^N;\ U= \frac{(A\eta\cdot\xi)}{a_1|\xi|^2}\xi \}.\end{equation*} So \eqref{eis} becomes, \begin{equation*} Q^\prime_{1,\xi}(U,B\eta,A\eta)= Q_1(U,B\eta)- Q_1(\frac{(A\eta\cdot\xi)}{a_1|\xi|^2}\xi ,B\eta). \end{equation*} Note that, $Q^{\prime}_{1,\xi}$ is zero on the wave cone $\Lambda_{1,\xi}$. Thus applying Theorem \ref{dc10} we get \begin{equation*} trace\ (q^{\prime}_{1,\xi}\varPi_{W^\prime}) = 0 \end{equation*} which implies that, $Y$ (cf.\eqref{dc11}) \begin{align} Y =\langle\langle\varPi_{W^\prime}, Q_1(\frac{(A\eta\cdot\xi)}{a_1|\xi|^2}\xi,B\eta)\rangle\rangle &= \langle\langle\varPi_{W^\prime},\left( b_1\frac{(A\eta\cdot\xi)^2}{a_1^2|\xi|^2}-2\frac{(A\eta\cdot\xi)(B\eta\cdot\xi)}{a_1|\xi|^2}\right)\rangle\rangle\notag\\ &= \langle\langle \varPi_{W^\prime}, b_1\left( \frac{(A\eta\cdot\xi)}{a_1|\xi|} - \frac{(B\eta\cdot\xi)}{b_1|\xi|}\right)^2\rangle\rangle -\langle\langle\varPi_{W^\prime},\frac{(B\eta\cdot\xi)^2}{b_1|\xi|^2}\rangle\rangle\notag\\ &= \frac{1}{a_1^2b_1}\langle\langle\varPi_{AB}, \frac{((b_1A-a_1B)\eta\cdot\xi)^2}{|\xi|^2} \rangle\rangle -\frac{1}{b_1}\langle\langle \varPi_B, \frac{(B\eta\cdot\xi)^2}{|\xi|^2}\rangle\rangle\notag \\ &=: R_1 + R_2 \mbox{ (say),}\label{FL6} \end{align} where $\varPi_{AB}\in \mathcal{M}({|\!\!\!O}mega\times \mathbb{S}^{N-1};\mathbb{R}^{N\times N}) $ is $H$-measure of the sequence $\{(b_1A^\epsilon-a_1B^\epsilon) -(b_1\overline{A}-a_1\overline{B})\}\eta$ and $\varPi_B\in \mathcal{M}({|\!\!\!O}mega\times \mathbb{S}^{N-1};\mathbb{R}^{N\times N})$ is $H$-measure of the sequence $(B^\epsilon-\overline{B})\eta$.\\ \\ Since, $\{(b_1A^\epsilon-a_1B^\epsilon) -(b_1\overline{A}-a_1\overline{B})\}\eta = \{b_1(a_1-a_2)(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A(x))-a_1(b_1-b_2)(\chi_{{\omega}_{B^{\epsilon}}}(x)-\theta_B(x))\}\eta$, the $H$-measure $\varPi_{AB}$ reduces to \begin{equation*} (\varPi_{AB})_{ij} = (\nu_{AB})\eta_i\eta_j \ \ \forall i,j =1,..,N \end{equation*} where, $\nu_{AB}$ is $H$-measure of the scalar sequence $\{b_1(a_1-a_2)(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A(x))-a_1(b_1-b_2)(\chi_{{\omega}_{B^{\epsilon}}}(x)-\theta_B(x))\}$ with \begin{equation*} \nu_{AB}(x,d\xi)\geq 0\ \mbox{ and }\ \int_{\mathbb{S}^{N-1}}\nu_{AB}(x,d\xi) = L_{AB} \end{equation*} with \begin{align*} L_{AB} & := L^\infty({|\!\!\!O}mega)\mbox{ weak* limit of }\{b_1(a_1-a_2)(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)-a_1(b_1-b_2)(\chi_{{\omega}_{B^{\epsilon}}}(x)-\theta_B)\}^2\notag\\ &= b_1^2(a_2-a_1)^2\theta_A(1-\theta_A) +a_1^2(b_2-b_1)^2\theta_B(1-\theta_B)-2b_1a_1(b_2-b_1)(a_2-a_1)(\theta_{AB}-\theta_A\theta_B)\notag\\ \end{align*} where $\theta_{AB}$ is the $L^{\infty}({|\!\!\!O}mega)$ weak* limit of $(\chi_{{\omega}_{A^{\epsilon}}}(x)B)$. Moreover, using \eqref{FG2} we have \begin{align}\label{dc18} L_{AB} \geq\ & \ b_1^2(a_2-a_1)^2\theta_A(1-\theta_A) +a_1^2(b_2-b_1)^2\theta_B(1-\theta_B)\notag\\ &\quad\qquad\qquad\qquad\qquad\qquad\qquad\qquad-2b_1a_1(b_2-b_1)(a_2-a_1)(\mbox{min}\{\theta_A,\theta_B\}-\theta_A\theta_B)\notag\\ & =: L^0_{AB} \mbox{ (say).} \end{align} Thus \begin{align*} R_1 = \frac{1}{a_1^2b_1}\langle\langle\varPi_{AB}, \frac{((b_1A-a_1B)\eta\cdot\xi)^2}{|\xi|^2} \rangle\rangle &= \frac{1}{a_1^2b_1} \int_{\mathbb{S}^{N-1}} \frac{(\eta \cdot \xi)^2}{|\xi|^2}\nu_{AB} (d\xi) = \frac{1}{a_1^2b_1}L_{AB}\ M_{AB}\eta\cdot\eta \end{align*} where $M_{AB}$ is a non-negative matrix with unit trace defined by \begin{equation}\label{siv} M_{AB} = \frac{1}{L_{AB}}\int_{\mathbb{S}^{N-1}} \xi \otimes \xi\ \nu_{AB} (d\xi). \end{equation} Hence, using \eqref{dc18} and \eqref{siv} we have \begin{align}\label{dc14} R_1\ \geq\ &\{ \frac{b_1}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A) +\frac{1}{b_1}(b_2-b_1)^2\theta_B(1-\theta_B)\notag\\ &\qquad\qquad\qquad\qquad-\frac{2}{a_1}(b_2-b_1)(a_2-a_1)(\mbox{min}\{\theta_A,\theta_B\}-\theta_A\theta_B)\}M_{AB}\eta\cdot\eta. \end{align} We compute $R_2$ next. Since $(B^\epsilon-\overline{B})\eta=(b_2-b_1)(\chi_{{\omega}_{B^{\epsilon}}}(x)-\theta_B(x))\eta$, the $H$-measure $\varPi_B$ reduces to \begin{equation*} (\varPi_B)_{ij} = (b_2-b_1)^2(\nu_B)\eta_i\eta_j \ \ \forall i,j =1,..,N \end{equation*} where $\nu_B$ is the $H$-measure of the scalar sequence $(\chi_{{\omega}_{B^{\epsilon}}}(x)-\theta_B)$ satisfying \begin{equation*} \nu_{AB}(x,d\xi)\geq 0\ \mbox{ and }\int_{\mathbb{S}^{N-1}}\nu_{AB}(x,d\xi) =\theta_B(1-\theta_B). \end{equation*} Thus \begin{align}\label{dc16} R_2 = -\frac{1}{b_1}\langle\langle\varPi_B, \frac{((B\eta\cdot\xi)^2}{|\xi|^2} \rangle\rangle &= -\frac{(b_2-b_1)^2}{b_1} \int_{\mathbb{S}^{N-1}} \frac{(\eta \cdot \xi)^2}{|\xi|^2}\nu_B (d\xi)\notag\\ &=- \frac{\theta_B(1-\theta_B)(b_2-b_1)^2}{b_1}M_B\eta\cdot\eta \end{align} where $M_B$ is the non-negative matrix with unit trace defined as \begin{equation}\label{dc19} M_B = \frac{1}{(1-\theta_B)\theta_B}\int_{\mathbb{S}^{N-1}} \xi\otimes\xi\ \nu_B(x,d\xi). \end{equation} Therefore from \eqref{FL6} and \eqref{dc14},\eqref{dc16} we have \begin{align}\label{dc13} Y &= - \frac{\theta_B(1-\theta_B)(b_2-b_1)^2}{b_1}M_B\eta\cdot\eta + \frac{1}{a_1^2b_1}{L_{AB}}\ M_{AB}\eta\cdot\eta \notag\\ &\geq - \frac{\theta_B(1-\theta_B)(b_2-b_1)^2}{b_1}M_B\eta\cdot\eta +\{\frac{b_1}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A) +\frac{1}{b_1}(b_2-b_1)^2\theta_B(1-\theta_B)\notag\\ &\qquad\qquad\qquad\qquad\qquad\qquad -\frac{2(b_2-b_1)(a_2-a_1)}{a_1}(\mbox{min}\{\theta_A,\theta_B\}-\theta_A\theta_B)\}M_{AB}\eta\cdot\eta\notag\\ &\ \ \ \ =: Y_{min} \end{align} with \begin{equation}\label{dc17} trace\ Y\ \geq\ \frac{b_1}{a_1^2}(a_2 - a_1)^2\theta_A(1-\theta_A) - \frac{2}{a_1}(b_2-b_1)(a_2-a_1)(\mbox{min}\{\theta_A,\theta_B\}-\theta_A\theta_B). \end{equation} Rewriting \eqref{lb9}, we get \begin{equation}\label{ub15}(B^{\#}- b_1 I)\nabla u\cdot\nabla u + (\overline{B}-b_1 I)\eta \cdot \eta + 2 (\overline{B}-b_1I)\nabla u\cdot \eta \geq\ Y\end{equation} \textbf{Choice of $\nabla u$ : } Let us take the point $x=x_0$, where $A^{*}(x_0)$, $B^{\#}(x_0)$ are defined as constant matrices with the proportion $\theta_A(x_0)$ and $\theta_B(x_0)$. We consider the oscillatory test function as in \eqref{zz11} namely, \begin{equation*} (i)\ \ \nabla u^\epsilon \rightharpoonup \zeta\in\mathbb{R}^N \mbox{ arbitrary and }(ii)\ \ div(A^\epsilon\nabla u^\epsilon) \mbox{ converges }H^{-1}({|\!\!\!O}mega) \mbox{ strong.} \end{equation*} In the present context, we have additional restriction \eqref{eik}. According to Theorem \ref{ub8} this restriction is satisfied by $u^\epsilon$ because $A^{*}\in\partial\mathcal{G}_{\theta_A}^L$. We choose $\nabla u=\zeta$ to be the minimizer of left hand side of \eqref{lb4} as : \begin{equation}\label{ub4} \zeta = -(\overline{A}(x_0)-a_1I)(A^{*}(x_0)- a_1 I)^{-1}\eta. \end{equation} \noindent\textbf{Matrix lower bound :} Thus from \eqref{ub15} by using \eqref{ub4} and \eqref{dc13} we obtain the following estimate at $x_0$ : \begin{align}\label{bs10} &(B^{\#}- b_1 I)(\overline{A}-a_1I)^2(A^{*}- a_1 I)^{-2}\eta\cdot\eta - 2 (\overline{B}-b_1I)(\overline{A}-a_1I)(A^{*}- a_1 I)^{-1}\eta\cdot\eta + (\overline{B}-b_1 I)\notag\\ &\eta\cdot\eta \geq - \frac{\theta_B(1-\theta_B)(b_2-b_1)^2}{b_1}M_B\eta\cdot\eta + \{\frac{b_1}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A) +\frac{1}{b_1}(b_2-b_1)^2\theta_B(1-\theta_B)\notag\\ &\qquad\qquad\qquad\qquad\qquad\qquad\quad -\frac{2(b_2-b_1)(a_2-a_1)}{a_1}(\mbox{min}\{\theta_A,\theta_B\}-\theta_A\theta_B)\}M_{AB}\eta\cdot\eta \end{align} where, $A^{*}$ satisfies \begin{equation}\label{eif} (A^{*}-a_1I)^{-1}\eta\cdot\eta =\ (\overline{A}-a_1I)^{-1}\eta\cdot\eta + \frac{\theta_A}{(1-\theta_A)a_1}M_A\eta\cdot\eta. \end{equation} Since $x_0$ is arbitrary, the matrix bound \eqref{bs10} is pointwise bound for $x$ almost everywhere. \noindent\textbf{Trace bound L1 : : $\theta_A\leq\theta_B$ almost everywhere in $x$ :} Having obtained the matrix inequality \eqref{bs10}, we now prove trace bound L1. We simplify the above bound \eqref{bs10}, using \eqref{eif}, \eqref{dc17} to obtain : \begin{equation}\label{eig} tr\ (B^{\#}-b_1I)(\overline{A}-a_1I)^2(A^{*}-a_1I)^{-2}\geq\ tr\ (\overline{B}-b_1I) +\frac{b_1}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A). \end{equation} Hence the trace bound \eqref{tt} follows whenever $A^{*}\in\partial\mathcal{G}^L_{\theta_A}$. \qed\\ \\ Next, we give the examples of microstructures which possess the property $\omega_{A^\epsilon}\subseteq\omega_{B^\epsilon}$ and such that equality holds in \eqref{bs10} and hence in \eqref{eig}. \begin{example}[Saturation/Optimality]\label{ot5} The equality of this lower bound is achieved by the simple laminated materials $L^{\#}_1$ (cf. \eqref{lb10}), at the composites based on Hashin-Shtrikman construction given in \eqref{ED1} and at sequential laminates of $N$-rank. \begin{proof} The simple laminate, say in $e_1$ direction with $\theta_A\leq \theta_B$ is given by \begin{equation*}B^{\#} = diag\ (L^{\#}_1, \overline{b},..,\overline{b}\ )\ \mbox{ (cf.\eqref{lb10}) with }\ A^{*}= diag\ (\underline{a},\overline{a},..,\overline{a}).\end{equation*} It achieves equality in \eqref{bs10} with $M_B = M_{AB}= diag\ ( 1,0,..,0)$. Indeed, \begin{align*} (L^{\#}_1- b_1 )\frac{(\overline{a}-a_1)^2}{(\underline{a}- a_1 )^2}&=\ (L^{\#}_1- b_1 )\frac{(\theta_A(a_2 -a_1) +a_1)^2}{a_1^2}\\ &=\ \{\frac{b_2}{a_2^2} + \frac{(b_1-b_2)}{a_2^2}\theta_B + {b_1}(\frac{1}{a_1^2} -\frac{1}{a_2^2})\theta_A \} a_2^2 - \frac{b_1}{a_1^2}(\theta_A(a_2 -a_1) +a_1)^2 \\ &=\ (b_2-b_1)(1-\theta_B) + \frac{b_1}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A). \end{align*} Similarly, the Hashin-Shtrikman construction given in \eqref{ED1} i.e. with core $a_2 I$ and coating $a_1 I$ for $A_{B(0,1)}$ and core $b_2 I$ with coating $b_1 I$ for $B_{B(0,1)}$, with $\theta_A \leq \theta_B$ \begin{align*} a^{*}&=\ a_1 + Na_1\frac{(1-\theta_A)(a_2-a_1)}{(N-\theta_A)a_1 +\theta_Aa_2}\\ b^{\#} &=\ b_1\ [\ 1 + \frac{N\theta_A(1-\theta_A)(a_2-a_1)^2}{(\theta_A a_2 + (N-\theta_A)a_1)^2}\ ] - \frac{(b_1-b_2)(Na_1)^2(1-\theta_B)}{(\theta_A a_2 + (N-\theta_A)a_1)^2}. \end{align*} achieves the equality in \eqref{bs10} with $M_B = M_{AB}= diag\ (\frac{1}{N},\frac{1}{N},..,\frac{1}{N})$. \qed \paragraph{Construction of Sequential Laminates with ${\omega}_{A^{\epsilon}}\subseteq {\omega}_{B^{\epsilon}} $ :} First we are going to write down relations characterizing the $(p,p)$-sequential laminates $B^{\#}_{p,p}$ whenever $\omega_{A^{\epsilon}_p},\omega_{B^{\epsilon}_p}$ correspond to the $p$-sequential laminate microstructures with $\omega_{A^{\epsilon}_p}\subseteq \omega_{B^{\epsilon}_p}$ in the same directions $\{e_i\}_{1\leq i\leq p}$. Following the arguments presented in Section \ref{os9}, by taking $A^{*}=A^{*}_p$ with matrix $a_1 I$ and core $a_2 I$ defined in \eqref{OP2} and considering the inequality \eqref{bs10}, it is natural to define $B^{\#}_{p,p}$ via the relation (namely equality of \eqref{bs10}) \begin{align*} &(B^{\#}_{p,p}- b_1 I)(\overline{A}-a_1I)^2(A^{*}_p - a_1 I)^{-2} + (\overline{B}-b_1 I)- 2 (\overline{B}-b_1I)(\overline{A}-a_1I)(A^{*}_p- a_1 I)^{-1}\notag\\ &=\{\frac{b_1(a_2 - a_1)^2}{a_1^2}\theta_A(1-\theta_A)- \frac{2(b_2-b_1)(a_2-a_1)}{a_1}(1-\theta_B)\theta_A\}(\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i}) \mbox{ with }\sum_{i=1}^{p} m_i =1. \end{align*} or using \eqref{OP2}, we may write \begin{equation}\label{sie} (B^{\#}_{p,p}- b_1 I)(\overline{A}-a_1I)^2(A^{*}_p - a_1 I)^{-2} = (\overline{B}-b_1 I)+\frac{b_1(a_2-a_1)^2}{a_1^2}\theta_A(1-\theta_A)(\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i}). \end{equation} This can be also written as, (in an inverse form): \begin{align}\label{hsa} &\{\frac{(\overline{B}-b_1I)}{(\overline{A}-a_1I)}(A^{*}_p-a_1I)+\frac{b_1}{a_1}(\overline{A}-A^{*}_p)\}(B^{\#}_{p,p}-b_1I)^{-1}\{\frac{(\overline{B}-b_1I)}{(\overline{A}-a_1I)}(A^{*}_p-a_1I)+\frac{b_1}{a_1}(\overline{A}-A^{*}_p)\}\notag\\ &=\ (\overline{B}-b_1 I)+\frac{b_1(a_2-a_1)^2}{a_1^2}\theta_A(1-\theta_A)(\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i}). \end{align} We need to justify the above definition because it's not a priori clear that $B^{\#}_{p,p}$ defined in the above manner is indeed the relative limit of $B^\epsilon_p$. To this end, we begin with considering the sequence $A^\epsilon_p$ containing $p$-sequential laminate microstructures with matrix $a_1I$ and core $a_2I$ such that $A^\epsilon_p\xrightarrow{H}A^{*}_p\in \partial\mathcal{G}_{\theta_A}^L$. Then by considering the oscillatory test sequence $\nabla u^\epsilon$ satisfying \eqref{zz11} i.e. \begin{align*} &(i)\ \nabla u^\epsilon \rightharpoonup \nabla u= -(\overline{A}-a_1I)(A^{*}_p- a_1 I)^{-1}\eta \mbox{ in }L^2({|\!\!\!O}mega) \mbox{ weak }\\ \mbox{ and }\ \ &(ii)\ div(A^\epsilon_p\nabla u^\epsilon) \mbox{ converges }H^{-1}({|\!\!\!O}mega) \mbox{ strong}; \end{align*} and by using \eqref{FL5}, \eqref{UD10} we have \begin{align}\label{pol17} (A^\epsilon_p-&a_1I)(\nabla u^\epsilon +\eta)\cdot(\nabla u^\epsilon +\eta)\notag\\ &\rightharpoonup (A^{*}_p-a_1I)\nabla u\cdot\nabla u +2(\overline{A}-a_1I)\nabla u\cdot\eta +(\overline{A}-a_1I)\eta\cdot\eta - X_{min}= 0 \ \ \mbox{in }\mathcal{D}^\prime({|\!\!\!O}mega) \end{align} where we recall, \begin{equation*} X_{min}= - \frac{\theta_A(1-\theta_A)(a_2-a_1)^2}{a_1}(\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i})\ \mbox{ with }\sum_{i=1}^{p} m_i =1. \end{equation*} Next, by using $(A^\epsilon_p -a_1I) = (a_2-a_1)(1-\chi_{\omega_{A^{\epsilon}_p}})I$, it follows that from \eqref{pol17} that \begin{equation}\label{pol11} \underset{\epsilon\rightarrow 0}{lim}\ |(1-\chi_{\omega_{A^{\epsilon}_p}})(\nabla u^\epsilon +\eta)|^2 =0. \end{equation} Now, under the hypothesis $\omega_{A^{\epsilon}_p}\subseteq\omega_{B^{\epsilon}_p}$, from \eqref{pol11} it also follows that, \begin{equation*} 0\ \leq\ \underset{\epsilon\rightarrow 0}{lim}\ |(1-\chi_{\omega_{B^{\epsilon}_p}})(\nabla u^\epsilon +\eta)|^2\ \leq\ \underset{\epsilon\rightarrow 0}{lim}\ |(1-\chi_{\omega_{A^{\epsilon}_p}})(\nabla u^\epsilon +\eta)|^2. \end{equation*} Therefore, \begin{equation}\label{pol7} \underset{\epsilon\rightarrow 0}{lim}\ |(1-\chi_{\omega_{B^{\epsilon}_p}})(\nabla u^\epsilon +\eta)|^2 = 0. \end{equation} Consequently, by using $(B^\epsilon_p -b_1I) =(b_2-b_1)(1-\chi_{\omega_{B^\epsilon_p}})I$, from \eqref{pol7} we have \begin{equation}\label{pol18} (B^\epsilon_p-b_1I)(\nabla u^\epsilon +\eta)\cdot(\nabla u^\epsilon +\eta)\rightharpoonup 0 \mbox{ in }\mathcal{D}^\prime({|\!\!\!O}mega). \end{equation} On the other hand, computations made in \textbf{Step 1c} show directly that for microstructures $(A^\epsilon_p$, $ B^\epsilon_p)$, we have $Y=Y_{min}$ with $M_A =M_{AB} = (\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i})$ with $\sum_{i=1}^{p} m_i =1$ such that, \begin{equation*} Y_{min} =\{\frac{b_1}{a_1^2}(a_2 - a_1)^2\theta_A(1-\theta_A) - \frac{2}{a_1}(b_2-b_1)(a_2-a_1)\theta_A(1-\theta_B)\} (\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i}) \mbox{ with}\sum_{i=1}^{p} m_i =1. \end{equation*} Consequently, following \textbf{Step 1b}, we have (cf. \eqref{lb9}) \begin{align}\label{pol19} (B^\epsilon_p&-b_1I)(\nabla u^\epsilon +\eta)\cdot(\nabla u^\epsilon +\eta)\notag\\ &\rightharpoonup (B^{\#}_{p,p}-b_1I)\nabla u\cdot\nabla u +(\overline{B}-b_1I)\eta\cdot\eta + 2(\overline{B}-b_1I)\nabla u\cdot\eta - Y_{min}\ \mbox{ in }\ \mathcal{D}^\prime({|\!\!\!O}mega). \end{align} Next by comparing \eqref{pol18} and \eqref{pol19}, and finally using $\nabla u = -(\overline{A}-a_1I)(A^{*}_p- a_1 I)^{-1}\eta$ we obtain that $B^{\#}_{p,p}$ satisfies the relation \eqref{sie}.\\ With $p=N$, the above defined $(N,N)$- sequential laminate $B^{\#}_{N,N}$ give the saturation / optimality of the lower trace bound \eqref{tt}. This follows from the very defining relation of $B^{\#}_{N,N}$ (cf.\eqref{sie}). \end{proof} \end{example} \begin{remark} The difference between the present case with the one treated in Section \ref{os9} lies in the fact that we have now two phases $(b_1,b_2)$ with local volume $(\theta_B,1-\theta_B)$. In Section \ref{os9}, we had a single phase and so $\theta_B=1$ in \eqref{hsa}. The effect of this difference is seen in the expression \eqref{FG4} and \eqref{hsa}, where the $p$-sequential laminate structure $A^{*}_p$ remains the same in both cases. \qed\end{remark} \begin{remark} In Section \ref{qw4}, we prove that the above lower bound \eqref{eig} is optimal in the sub-domain $\theta_A \leq \theta_B$. In general, it need not be optimal if $\theta_B < \theta_A$. \qed \end{remark} \noindent\textbf{Trace Bound L2 : $\theta_B(x) < \theta_A(x)$ almost everywhere $x$ : } In this case we will be finding lower bounds on $(A^{*},B^{\#})$ over arbitrary $\omega_{B^\epsilon}$ and those optimal microstructures $\omega_{A^\epsilon}$ for $A^{*}$ which provides equality in the optimal upper bound \eqref{FL11}. This lower bound is also optimal under the same condition $\theta_B(x) < \theta_A(x)$ almost everywhere $x\in{|\!\!\!O}mega$. \paragraph{Step 1d : ($H$-measure term)} Instead of the translated inequality \eqref{FL8}, we consider the following one for the heat flux, which coincides with the known dual inequality for $A^\epsilon$ in the self-interacting case. As in the case of L1, our plan is to pass to the limit in this inequality using $H$-limit, relative limit and $H$-measure. The resulting inequality \eqref{os12} will involve a new $H$-measure term. Let us now provide details. Introducing the constant vector $\eta$ in $\mathbb{R}^N$ and suitable constant $c$ to be chosen later and sequence $\sigma^\epsilon$ such that \begin{equation}\label{tc} ((A^{\epsilon})^{-1}B^{\epsilon}(A^{\epsilon})^{-1} - c I)(\sigma^{\epsilon} + \eta)\cdot(\sigma^{\epsilon} + \eta)\ \geq\ 0 \quad\mbox{a.e. in }{|\!\!\!O}mega.\end{equation} where, \begin{equation*}\sigma^{\epsilon} = A^{\epsilon}\nabla u^{\epsilon} \rightharpoonup \sigma\ (\ = A^{*}\nabla u )\mbox{ in }L^2({|\!\!\!O}mega)\mbox{ and }-div\ (\sigma^{\epsilon}) \in H^{-1}({|\!\!\!O}mega)\mbox{ convergent. }\end{equation*} Now by expanding the above inequality we get, \begin{equation}\label{os11} (A^{\epsilon})^{-1}B^{\epsilon}(A^{\epsilon})^{-1}\sigma^{\epsilon}\cdot\sigma^{\epsilon} + ((A^{\epsilon})^{-1}B^{\epsilon}(A^{\epsilon})^{-1} -c I)\eta\cdot \eta -2c\sigma^{\epsilon}\cdot \eta\ \geq\ c\ \sigma^{\epsilon}\cdot\sigma^{\epsilon} - 2(A^{\epsilon})^{-1}B^{\epsilon}(A^{\epsilon})^{-1}\sigma^{\epsilon}\cdot\eta. \end{equation} The choice of the constant $c$ will depend upon $a_1,a_2,b_1,b_2$ and we will fix it later.\\ \\ It is easy to pass to the limit in the first term of the left hand side of \eqref{os11}, as \begin{equation*} (A^{\epsilon})^{-1}B^{\epsilon}(A^{\epsilon})^{-1}\sigma^{\epsilon}\cdot\sigma^{\epsilon}= B^\epsilon\nabla u^\epsilon\cdot\nabla u^\epsilon \rightharpoonup B^{\#}\nabla u\cdot \nabla u =\ {A^{*}}^{-1}B^{\#}{A^{*}}^{-1}\sigma\cdot \sigma\ \mbox{ in }\mathcal{D}^\prime({|\!\!\!O}mega).\end{equation*} Regarding the second term, we write \begin{align}\label{zz5} (A^{\epsilon})^{-1}B^{\epsilon}(A^{\epsilon})^{-1} =& \{\frac{b_1}{a_1^2}\chi_{{\omega}_{A^{\epsilon}}}(x)B + \frac{b_1}{a_2^2}(\chi_{{\omega}_{B^{\epsilon}}}(x) - \chi_{{\omega}_{A^{\epsilon}}}(x)B) + \frac{b_2}{a_1^2}(\chi_{{\omega}_{A^{\epsilon}}}(x) - \chi_{{\omega}_{A^{\epsilon}}}(x)B)\notag\\ &\qquad+ \frac{b_2}{a_2^2}(1-\chi_{{\omega}_{A^{\epsilon}}}(x) - \chi_{{\omega}_{B^{\epsilon}}}(x) + \chi_{{\omega}_{A^{\epsilon}}}(x)B)\}I \notag\\ &\rightharpoonup \widetilde{L} \mbox{ (say),\ in }L^\infty({|\!\!\!O}mega)\ \mbox{weak*.} \end{align} Using the fact, that $\theta_{AB}\leq min\{\theta_A, \theta_B\}=\theta_B$ (in this case), where $\theta_{AB}$ is the $L^\infty({|\!\!\!O}mega)$ weak* limit of the sequence $\chi_{{\omega}_{A^{\epsilon}}}(x)B$, we find \begin{equation}\label{bs4} \widetilde{L}\ \geq\ \{\frac{b_2}{a_2^2} + (\frac{b_2}{a_1^2}-\frac{b_2}{a_2^2})\theta_A(x) - \frac{(b_2-b_1)}{a_1^2}\theta_B(x)\}I =: L =l(x)\hspace{1.4pt} I \mbox{ (say) }. \end{equation} It shows that, the $L^\infty({|\!\!\!O}mega)$ weak* limit of $\{(A^{\epsilon})^{-1}B^{\epsilon}(A^{\epsilon})^{-1}\}\geq L$ along any convergent subsequence and it is equal to L for the choice $\omega_{B^\epsilon}\subset \omega_{A^\epsilon}$ (in which case $\theta_{AB}=\theta_B$). \\ \\ The optimal choice of the translated amount $c$ is \begin{equation}\label{os15} c = min\ \{ \frac{b_1}{a_1^2}, \frac{b_2}{a_2^2} \},\end{equation} because then we will have $(\widetilde{L}-cI)\geq 0$ as shown by the following inequalities : \begin{align*} l=\ \frac{b_2}{a_2^2} + (\frac{b_2}{a_1^2}-\frac{b_2}{a_2^2})\theta_A - \frac{(b_2-b_1)}{a_1^2}\theta_B &\geq\ \frac{b_2}{a_2^2} + (\frac{b_1}{a_1^2}-\frac{b_2}{a_2^2})\theta_B\ \geq\ \frac{b_2}{a_2^2}, \mbox{ whenever } \frac{b_1}{a_1^2}\geq\frac{b_2}{a_2^2}\\ &\geq\ \frac{b_1}{a_1^2} + (\frac{b_2}{a_2^2}-\frac{b_1}{a_1^2})(1-\theta_B) \geq \frac{b_1}{a_1^2}, \mbox{ whenever } \frac{b_2}{a_2^2}\geq\frac{b_1}{a_1^2}. \end{align*} Thus, it is straight forward to pass to the limit in left hand side of \eqref{os11} to get \begin{equation*}{A^{*}}^{-1}B^{\#}{A^{*}}^{-1}\sigma\cdot \sigma + (\widetilde{L} - c I)\eta\cdot \eta -2c\sigma\cdot \eta\geq\ \mbox{ limit of R.H.S. of }\eqref{os11} \end{equation*} where $\widetilde{L}$ and $c$ are defined above. \\ \\ We use the notion of $H$-measure in order to pass to the limit in the right hand side of \eqref{os11}. Introducing a coupled variable $W^{\prime\prime}_{\epsilon}=(\sigma^{\epsilon}, (A^{\epsilon})^{-1}B^{\epsilon}(A^{\epsilon})^{-1}\eta)$, we write the right hand side of \eqref{os11} in a quadratic form $q_2(W^{\prime\prime}_{\epsilon})\cdot W^{\prime\prime}_{\epsilon} $. Here $q_2$ is a linear map \begin{equation*} q_2:\mathbb{R}^N \times \mathbb{R}^N \mapsto \mathbb{R}^N \times \mathbb{R}^N \mbox{ defined by } q_2(\sigma^\epsilon, (A^\epsilon)^{-1}B^\epsilon(A^\epsilon)^{-1}\eta)= (c\sigma^\epsilon-(A^\epsilon)^{-1}B^\epsilon(A^\epsilon)^{-1}\eta, -\sigma^\epsilon). \end{equation*} Introducing $W^{\prime\prime}_0 =(\sigma, \widetilde{L}\eta )$, we have \begin{equation*}q_2(W^{\prime\prime}_{\epsilon})\cdot W^{\prime\prime}_{\epsilon}=\ 2q_2(W^{\prime\prime}_{\epsilon})\cdot W^{\prime\prime}_0 -q_2(W^{\prime\prime}_0)\cdot W^{\prime\prime}_0 + q_2(W^{\prime\prime}_{\epsilon}- W^{\prime\prime}_0)\cdot(W^{\prime\prime}_{\epsilon}- W^{\prime\prime}_0).\end{equation*} Denoting by $\varPi_{W^{\prime\prime}}$ $H$-measure of $(W^{\prime\prime}_{\epsilon}- W^{\prime\prime}_0)$, we pass to the limit in \eqref{os11} by virtue of Theorem \ref{dc10} : \begin{equation}\label{os12} {A^{*}}^{-1}B^{\#}{A^{*}}^{-1}\sigma\cdot\sigma + (\widetilde{L} - c I)\eta\cdot \eta - 2c\ \sigma\cdot \eta\ \geq\ c\ \sigma\cdot\sigma - 2\widetilde{L}\sigma\cdot \eta + Y^\prime \end{equation} where $Y^\prime$ is the $H$-measure correction term defined by \begin{equation*}Y^\prime =\ \underset{\epsilon \rightarrow 0}{\textrm{lim}}\ q_2(W^{\prime\prime}_{\epsilon}- W^{\prime\prime}_0)\cdot(W^{\prime\prime}_{\epsilon}- W^{\prime\prime}_0) = \int_{\mathbb{S}^{N-1}} trace\ (q_2\varPi_{W^{\prime\prime}} (x,d\xi))\end{equation*} or equivalently, denoting the average w.r.t. directions $\xi\in\mathbb{S}^{N-1}$ by double angular bracket, we write with $W^{\prime\prime} = (\varSigma, A^{-1}BA^{-1}\eta) \in \mathbb{R}^N \times \mathbb{R}^N$ \begin{equation}\label{OP10} Y^\prime =\ \langle\langle \varPi_{W^{\prime\prime}}, Q_2(\varSigma,A^{-1}BA^{-1}\eta) \rangle\rangle\ \ \mbox{with }\ Q_2(\varSigma,A^{-1}BA^{-1}\eta)=\ c|\varSigma|^2 - 2A^{-1}BA^{-1}\varSigma\cdot \eta. \end{equation} Now our objective is to find the lower bound on $Y^\prime$ in order to get the lower bound on $B^{\#}$ or equivalently on ${A^{*}}^{-1}B^{\#}{A^{*}}^{-1}$ whenever $A^{*}\in\partial\mathcal{G}_{\theta_A}^U$. \paragraph{Step 1e :} \paragraph{Special case :} $A^{\epsilon}= tB^{\epsilon}$ for some scalar $t>0$.\\ \\ In this case $c= (ta_2)^{-1}$ and so \eqref{os11} simply becomes \begin{equation}\label{FG8} (A^{\epsilon})^{-1}\sigma^{\epsilon}\cdot\sigma^{\epsilon} + ( (A^{\epsilon})^{-1} - a_2^{-1}I )\eta\cdot \eta - 2a_2^{-1}\sigma^{\epsilon}\cdot \eta \geq\ a_2^{-1}\hspace{1.5pt}\sigma^{\epsilon} \cdot \sigma^{\epsilon} - 2(A^{\epsilon})^{-1}\sigma^{\epsilon}\cdot \eta. \end{equation} Since $A^{*}=tB^{\#}$, the task of proving L2 simply turns out to finding optimal lower bound on ${A^{*}}^{-1}$, which is known from classical results : \begin{equation}\label{os20} tr\ ({A^{*}}^{-1}-a_2^{-1} I)^{-1} \leq\ tr\ (\underline{A} ^{-1} - a_2^{-1}I)^{-1} + \frac{(1-\theta_A)a_2}{\theta_A}(N-1). \end{equation} Before considering the general case, it is appropriate to present some elements of the proof of \eqref{os20}.\\ \\ We begin with \eqref{FG8}. We pass to the limit in the left hand side of \eqref{FG8} by using homogenization theory and on the right hand side by using $H$-measures. Let us introduce the linear map $q_3$ whose associated quadratic form is as follows : \begin{equation*} q_3(V^\prime)\cdot V^\prime =\ a_2^{-1}\varSigma\cdot \varSigma - 2 A^{-1}\varSigma\cdot\eta,\ \mbox{ with }V^\prime =(\varSigma, A^{-1}\eta).\end{equation*} The limit of \eqref{FG8} can be written as \begin{equation}\label{FG11} {A^{*}}^{-1}\sigma\cdot\sigma + ( \underline{A}^{-1} - a_2^{-1} )\eta\cdot \eta - 2a_2^{-1}\sigma\cdot \eta\ \geq\ a_2^{-1}\sigma\cdot\sigma - 2\underline{A}^{-1}\sigma\cdot \eta + X^\prime \end{equation} where $\underline{A}^{-1}$ is the $L^{\infty}$ weak * limit of $(A^{\epsilon})^{-1}$ and $X^\prime$ is a new $H$-measure correction term defined by \begin{equation*} X^\prime =\ \int_{\mathbb{S}^{N-1}} trace\ (q_3\varPi_{V^\prime}(x,d\xi)) =\ \langle\langle \varPi_{V^\prime} , Q_3(V^{\prime}) \rangle\rangle,\ \ \mbox{where } Q_3(V^{\prime})= q_3(V^{\prime})\cdot V^{\prime} \end{equation*} and $\varPi_{V^\prime}$ is the $H$-measure of $V^\prime_\epsilon -V^\prime = (\sigma^{\epsilon} - \sigma ,\ ((A^{\epsilon})^{-1} - \underline{A} ^{-1})\eta ).$\\ \\ Since $div\ \sigma^{\epsilon}$ is convergent in $H^{-1}({|\!\!\!O}mega)$, we consider the oscillation variety which is as follows : \begin{equation*}\vartheta^\prime =\ \{(\xi, \varSigma, A^{-1}\eta) \in \mathbb{S}^{N-1} \times \mathbb{R}^{N}\times\mathbb{R}^{N};\ \sum_{i=1}^N \xi_i \varSigma_i = 0\} \end{equation*} with its projection (wave cone) \begin{equation*}\Lambda^\prime = \{( \varSigma, A^{-1}\eta) \in \mathbb{R}^{N}\times\mathbb{R}^{N};\ \exists\ \xi\in\mathbb{S}^{N-1}\mbox{ such that } (\xi, \varSigma,A) \in \vartheta^\prime\}.\end{equation*} We define $\Lambda^\prime_{\xi}\subset \Lambda^\prime$, $\xi\in\mathbb{R}^N\smallsetminus\{0\}$ as \begin{equation*}\Lambda^\prime_\xi = \{\ ( \varSigma,A^{-1}\eta) \in \mathbb{R}^{N}\times\mathbb{R}^{N};\ \sum_{i=1}^N \xi_i\varSigma_i=0 \};\ \mbox{So, }\ \underset{\xi\neq 0}{\cup}{\Lambda^\prime_\xi}=\Lambda^\prime.\end{equation*} Based on this, one defines a new linear map $q_{3,\xi}^{\prime}$, whose associated quadratic form is \begin{equation*}q_{3,\xi}^\prime(\varSigma,A^{-1}\eta)\cdot(\varSigma, A^{-1}\eta ) =\ q_3(\varSigma,A^{-1}\eta)\cdot(\varSigma,A^{-1}\eta) - \underset{\varSigma \in \Lambda^\prime_{\xi} }{min}\ q_3(\varSigma,A^{-1}\eta)\cdot(\varSigma,A^{-1}\eta).\end{equation*} As $q_{3,\xi}^\prime$ is non-negative on $\Lambda_\xi^\prime$, so by applying Theorem \ref{dc10}, we get \begin{equation*}trace\ (q_{3,\xi}^\prime\varPi_{V^\prime}) \geq 0 .\end{equation*} Finally introducing $\varPi^\prime_A, $ $H$-measure of $((A^{\epsilon})^{-1} - (\underline{A})^{-1})\eta$, we get \begin{equation}\label{FG9} X^\prime\ \geq\ \langle\langle \varPi_{V^\prime},\underset{\varSigma \in \Lambda^\prime_\xi}{min}\ q_3(\varSigma,A^{-1}\eta)\cdot(\varSigma,A^{-1}\eta)\rangle\rangle =\ \langle\langle \varPi^\prime_A, \underset{\varSigma \in \Lambda^\prime_\xi}{min}\ q_3(\varSigma,A^{-1}\eta)\cdot(\varSigma,A^{-1}\eta)\rangle\rangle. \end{equation} Now by introducing Lagrange multiplier corresponding to the constraint in $\Lambda^\prime$, it is straight forward to compute \begin{equation}\label{FG10} \underset{\varSigma \in \Lambda^\prime_\xi}{min}\ q_3(\varSigma,A^{-1}\eta)\cdot(\varSigma,A^{-1}\eta) =\ -a_2(I-\frac{\xi\otimes \xi}{|\xi|^2}) A^{-1}\eta\cdot A^{-1}\eta \end{equation} with the minimizer $\varSigma_{min}$ in $\Lambda^\prime$ : \begin{equation}\label{eih}\varSigma_{min} =\ a_2(I-\frac{\xi\otimes \xi}{|\xi|^2})A^{-1}\eta.\end{equation} On the other hand, since $((A^{\epsilon})^{-1} - \underline{A}^{-1})\eta = (a_1 ^{-1} - a_2 ^{-1} )(\chi_{{\omega}_{A^{\epsilon}}}(x) - \theta_A)\eta $ , the $H$-measure $\varPi^\prime_A$ becomes \begin{equation}\label{os2}(\varPi^\prime_A)_{ij} = \frac{(a_2 - a_1)^{2}}{(a_1 a_2)^{2}}(\nu_A)\eta_i\eta_j \ \ \forall i,j =1,..,N\end{equation} where $\nu_A$ is $H$-measure of $(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)$ satisfying \eqref{hsz}. Then with the help of the matrix $M_A$ defined in \eqref{os19}, we obtain from \eqref{FG9} together with \eqref{FG10} and \eqref{os2} \begin{equation}\label{os5} X^\prime \geq\ -\langle\langle\varPi^\prime_A, a_2(I-\frac{\xi\otimes \xi}{|\xi|^2})A^{-1}\eta\cdot A^{-1}\eta \rangle\rangle = -\theta_A(1-\theta_A)a_2\frac{(a_2 - a_1 )^{2}}{(a_1a_2)^{2}}(I - M_A)\eta\cdot \eta :=X^\prime_{min}\end{equation} Thus, \begin{equation}\begin{aligned}\label{ub2} a_2^{-1}\sigma^\epsilon\cdot\sigma^\epsilon - 2(A^\epsilon)^{-1}\sigma^\epsilon\cdot \eta \rightharpoonup& \ a_2^{-1}\sigma\cdot\sigma - 2\underline{A}^{-1}\sigma\cdot \eta +X^\prime\ \mbox{ weakly in }\mathcal{D}^\prime({|\!\!\!O}mega)\\ &\geq\ a_2^{-1}\sigma\cdot\sigma - 2\underline{A}^{-1}\sigma\cdot \eta -\theta_A(1-\theta_A)a_2\frac{(a_2 - a_1 )^{2}}{(a_1a_2)^{2}}(I - M_A)\eta\cdot \eta. \end{aligned}\end{equation} Using the above lower bound \eqref{ub2} in \eqref{FG11} one obtains \begin{equation*} {A^{*}}^{-1}\sigma\cdot\sigma + ( \underline{A}^{-1} - a_2^{-1} )\eta\cdot \eta - 2a_2^{-1}\sigma\cdot \eta\ \geq\ a_2^{-1}\sigma\cdot\sigma - 2\underline{A}^{-1}\sigma\cdot \eta -\theta_A(1-\theta_A)a_2\frac{(a_2 - a_1 )^{2}}{(a_1a_2)^{2}}(I - M_A)\eta\cdot \eta.\end{equation*} And then minimizing with respect to $\sigma$ with the minimizer $\sigma = -(\underline{A} ^{-1} - a_2^{-1}I)({A^{*}} ^{-1}- a_2^{-1}I)^{-1}\eta$ we get \begin{equation}\label{sig} ({A^{*}} ^{-1}- a_2^{-1}I)^{-1}\eta\cdot \eta \leq\ (\underline{A} ^{-1} - a_2^{-1}I)^{-1}\eta\cdot \eta +\frac{(1-\theta_A)}{\theta_A}a_2(I- M_A)\eta\cdot \eta. \end{equation} Finally, by taking the trace, \eqref{sig} yields the optimal lower bound \eqref{os20} on ${A^{*}}^{-1}$. \qed \paragraph{General Case :} Having seen the special case, let us now treat the general case of $(A^\epsilon,B^\epsilon)$. We go back to the equation \eqref{OP10}, in which we need to estimate the H-measure term $Y^{\prime}$ from below optimally. As for L1, we will be needing compactness property for this purpose. Following result which can be proved in a fashion similar to Theorem \ref{ub8} states this property precisely : \begin{theorem}[Compactness]\label{ad20} Let us consider the following constrained oscillatory system : \begin{align} &V^\prime_\epsilon=\ (\sigma^\epsilon,(A^\epsilon)^{-1}\eta) \rightharpoonup (\sigma,\underline{A}^{-1}\eta)=V^\prime_0\ \mbox{ in }L^2({|\!\!\!O}mega)^{2N}\mbox{ weak }, \eta\in\mathbb{R}^N\smallsetminus\{0\}, \label{ub9}\\ &\Delta( \sigma^\epsilon - a_2(A^\epsilon)^{-1}\eta) - a_2\nabla(div((A^\epsilon)^{-1}\eta) ) \in H^{-2}_{loc}({|\!\!\!O}mega)\mbox{ convergent. }\label{ub10} \end{align} Then \begin{equation}\label{bs18} a_2^{-1}\sigma^{\epsilon}\cdot\sigma^{\epsilon}- 2(A^{\epsilon})^{-1}\sigma^{\epsilon}\cdot\eta \rightharpoonup a_2^{-1} \sigma\cdot\sigma- 2\underline{A}^{-1}\sigma\cdot \eta + X^{\prime}_{min} \mbox{ in }\ \mathcal{D}^{\prime}({|\!\!\!O}mega), \end{equation} where $ X^\prime_{min}$ is defined in \eqref{os5}. \\ \\ Conversely, if the sequence with \eqref{ub9} satisfies \eqref{bs18} then \eqref{ub10} must hold. \qed\end{theorem} \paragraph{Step 1f : (Lower bound)} Taking this into account, in the translated inequality \eqref{os11} we restrict $\sigma^\epsilon$ to satisfy the following oscillatory system to work with \begin{equation}\label{os3} \begin{cases} W_\epsilon^{\prime\prime\prime} = (\sigma^\epsilon, (A^\epsilon)^{-1}B^\epsilon (A^\epsilon)^{-1}\eta, (A^\epsilon)^{-1}\eta ) \rightharpoonup \ W_0^{\prime\prime\prime} = ( \sigma , \widetilde{L}\eta, \underline{A}^{-1}\eta )\ \mbox{ weakly in }L^2({|\!\!\!O}mega)^{3N},\\ \Delta( \sigma^\epsilon - a_2(A^\epsilon)^{-1}\eta) - a_2\nabla(div((A^\epsilon)^{-1}\eta) ) \in H^{-2}_{loc}({|\!\!\!O}mega)\mbox{ convergent. } \end{cases} \end{equation} Proceeding in a same as we did in Step 1c, here we compute $Y^\prime$ (cf.\eqref{OP10}) with $\Sigma=\Sigma_{min}$ given in \eqref{eih}, as follows : \begin{align} Y^\prime &= \langle\langle \varPi_{W^{\prime\prime\prime}}, Q_2( a_2(I-\frac{\xi\otimes \xi}{|\xi|^2})A^{-1}\eta,A^{-1}BA^{-1}\eta\cdot\eta)\rangle\rangle\notag\\ &= \langle\langle \varPi_{W^{\prime\prime\prime}}, c a_2^2(I-\frac{\xi\otimes \xi}{|\xi|^2})A^{-1}\eta\cdot A^{-1}\eta -2a_2A^{-1}BA^{-1}(I-\frac{\xi\otimes \xi}{|\xi|^2})A^{-1}\eta\cdot\eta \rangle\rangle\notag\\ &= \langle\langle \varPi_{W^{\prime\prime\prime}}, c (I-\frac{\xi\otimes \xi}{|\xi|^2})(\frac{1}{c}A^{-1}BA^{-1}\eta-a_2A^{-1}\eta)\cdot(\frac{1}{c}A^{-1}BA^{-1}\eta-a_2A^{-1}\eta)\rangle\rangle \notag\\ &\qquad-\frac{1}{c}\langle\langle \varPi_{W^{\prime\prime\prime}},(I-\frac{\xi\otimes \xi}{|\xi|^2})A^{-1}BA^{-1}\eta\cdot A^{-1}BA^{-1}\eta \rangle\rangle \notag \\ &= \langle\langle \varPi^\prime_{AB}, c (I-\frac{\xi\otimes \xi}{|\xi|^2})(\frac{1}{c}A^{-1}BA^{-1}-a_2A^{-1})\eta\cdot(\frac{1}{c}A^{-1}BA^{-1}-a_2A^{-1})\eta\rangle\rangle \notag\\ &\qquad -\frac{1}{c}\langle\langle \varPi^{\prime\prime}_{AB},(I-\frac{\xi\otimes \xi}{|\xi|^2})A^{-1}BA^{-1}\eta\cdot A^{-1}BA^{-1}\eta \rangle\rangle, \notag \\ &=: R^\prime_1 + R^\prime_2 \mbox{ (say)}\label{os6} \end{align} (where in the second line we have used $|(I-\frac{\xi\otimes \xi}{|\xi|^2})v|^2=\langle(I-\frac{\xi\otimes \xi}{|\xi|^2})v,v\rangle, v\in\mathbb{R}^N$).\\ \noindent $\varPi^\prime_{AB}\in \mathcal{M}({|\!\!\!O}mega\times \mathbb{S}^{N-1};\mathbb{R}^{N\times N}) $ is $H$-measure of the sequence $\{\frac{1}{c}((A^\epsilon)^{-1}B^\epsilon(A^\epsilon)^{-1}-\widetilde{L})-a_2((A^\epsilon)^{-1}-(\underline{A})^{-1})\}\eta$, and $\varPi^{\prime\prime}_{AB}\in \mathcal{M}({|\!\!\!O}mega\times \mathbb{S}^{N-1};\mathbb{R}^{N\times N})$ is $H$-measure of the sequence $\{(A^\epsilon)^{-1}B^\epsilon(A^\epsilon)^{-1}-\widetilde{L}\}\eta$; where $\widetilde{L}$ is the $L^\infty({|\!\!\!O}mega)$ weak* limit of $(A^\epsilon)^{-1}B^\epsilon (A^\epsilon)^{-1}$ (cf.\eqref{zz5}).\\ \\ Now both of these above sequences are scalar, as \begin{align*} ((A^\epsilon)^{-1}B^\epsilon (A^\epsilon)^{-1} -\widetilde{L}) &=: f^\epsilon I \mbox{ (say);} \ \quad (f^\epsilon \rightharpoonup 0 \mbox{ in }L^\infty({|\!\!\!O}mega)\mbox{ weak*})\\ \mbox{ and }\ ((A^\epsilon)^{-1}-(\underline{A})^{-1}) &= (\frac{1}{a_1}-\frac{1}{a_2})(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)I\ \ ( \rightharpoonup 0 \mbox{ in }L^\infty({|\!\!\!O}mega)\mbox{ weak*}). \end{align*} Then $H$-measure $\varPi^\prime_{AB}$ reduces to \begin{equation*} (\varPi^\prime_{AB})_{ij} = (\nu^\prime_{AB})\eta_i\eta_j \ \ \forall i,j =1,..,N \end{equation*} where $\nu^\prime_{AB}$ is $H$-measure of the scalar sequence $\{\frac{1}{c}f^\epsilon- a_2(\frac{1}{a_1}-\frac{1}{a_2})(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)\}$ with \begin{align}\label{zz6} &\nu^\prime_{AB}(x,\xi)\geq 0\ \mbox{ and }\notag\\ &\int_{\mathbb{S}^{N-1}}\nu^\prime_{AB}(x,d\xi) = L^\infty({|\!\!\!O}mega)\mbox{ weak* limit of }\{\frac{1}{c}f^\epsilon- a_2(\frac{1}{a_1}-\frac{1}{a_2})(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)\}^2 := L^\prime_{AB} \mbox{ (say)}. \end{align} Thus \begin{align}\label{dc15} R^\prime_1 &= \langle\langle \varPi^\prime_{AB}, c (I-\frac{\xi\otimes \xi}{|\xi|^2})(a_2A^{-1}-\frac{1}{c}A^{-1}BA^{-1})\eta\cdot(a_2A^{-1}-\frac{1}{c}A^{-1}BA^{-1})\eta\rangle\rangle\notag\\ &= cL^\prime_{AB}(I-M^\prime_{AB})\eta\cdot\eta \end{align} where $M^\prime_{AB}$ is a non-negative matrix with unit trace defined by \begin{equation}\label{zz9} M^\prime_{AB} = \frac{1}{L^\prime_{AB}}\int_{\mathbb{S}^{N-1}} \xi \otimes \xi\ \nu^\prime_{AB} (d\xi). \end{equation} We compute $R_2^\prime$ next. We write \begin{equation*} (\varPi^{\prime\prime}_B)_{ij} = (\nu^{\prime\prime}_B)\eta_i\eta_j \ \ \forall i,j =1,..,N \end{equation*} where $\nu^{\prime\prime}_B$ is the $H$-measure of the scalar sequence $f^\epsilon$ satisfying \begin{equation}\label{zz7} \nu^{\prime\prime}_{AB}(x,\xi)\geq 0 \mbox{ and }\int_{\mathbb{S}^{N-1}}\nu_{AB}(x,d\xi) = L^\infty({|\!\!\!O}mega)\mbox{ weak* limit of }(f^\epsilon)^2 =:L^{\prime\prime}_{AB} \mbox{ (say)}. \end{equation} Thus \begin{align}\label{dc20} R_2^\prime &= -\frac{1}{c}\langle\langle \varPi^{\prime\prime}_{AB},(I-\frac{\xi\otimes \xi}{|\xi|^2})A^{-1}BA^{-1}\eta\cdot A^{-1}BA^{-1}\eta \rangle\rangle\notag\\ &= -\frac{1}{c}L^{\prime\prime}_{AB}(I-M^{\prime\prime}_{AB})\eta\cdot\eta \end{align} where $M^{\prime\prime}_{AB}$ is the non-negative matrix with unit trace defined as \begin{equation}\label{zz8} M^{\prime\prime}_{AB} = \frac{1}{I^{\prime\prime}_{AB}}\int_{\mathbb{S}^{N-1}} \xi\otimes\xi\ \nu^{\prime\prime}_B(x,d\xi). \end{equation} Therefore from \eqref{os6} and \eqref{dc15},\eqref{dc20} we have \begin{equation}\label{FG12} Y^\prime = cL^\prime_{AB}(I-M^\prime_{AB})\eta\cdot\eta- \frac{1}{c}L^{\prime\prime}_{AB}(I-M^{\prime\prime}_{AB})\eta\cdot\eta \end{equation} with \begin{equation}\label{zz2} trace\ Y^\prime = \{cL^\prime_{AB} -\frac{1}{c}L^{\prime\prime}_{AB}\}(N-1). \end{equation} Recall that, \begin{align}\label{zz1} \{cL^\prime_{AB} -\frac{1}{c}L^{\prime\prime}_{AB}\}=&\ c\{\mbox{$L^\infty({|\!\!\!O}mega)$ weak* limit of }\{\frac{1}{c}f^\epsilon- a_2(\frac{1}{a_1}-\frac{1}{a_2})(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)\}^2\}\notag\\ &\qquad-\frac{1}{c}\{\mbox{$L^\infty({|\!\!\!O}mega)$ weak* limit of }(f^\epsilon)^2\}. \end{align} We want to find the lower bound the above quantity \eqref{zz1}. As usual the above quantity involves the parameter $\theta_{AB}(x)$, the $L^{\infty}({|\!\!\!O}mega)$ weak* limit of $(\chi_{{\omega}_{A^{\epsilon}}}(x)B)$. Now keeping the estimate \eqref{bs4} in mind, we minimize $L^\prime_{AB}$ and according to that the other quantity $L^{\prime\prime}_{AB}$ also gets determined. For that, it is enough to make the choice $\omega_{B^\epsilon}\subset \omega_{A^\epsilon}$ which is possible in this present case ($\theta_B<\theta_A$), and we bound $trace\ Y^\prime$ from below as following : \begin{align}\label{zz3} trace\ Y^\prime\ \geq\ & L^\infty({|\!\!\!O}mega) \mbox{ weak* limit of } \{ca_2^2(\frac{1}{a_1}-\frac{1}{a_2})^2(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)^2-2a_2(\frac{1}{a_1}-\frac{1}{a_2})(\frac{b_1}{a_1^2}-\frac{b_2}{a_1^2})\notag\\ & (\chi_{{\omega}_{B^{\epsilon}}}(x)-\theta_B)(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)-2a_2(\frac{1}{a_1}-\frac{1}{a_2})(\frac{b_2}{a_1^2}-\frac{b_2}{a_2^2})(\chi_{{\omega}_{A^{\epsilon}}}(x) - \theta_A)^2\}(N-1)\notag\\ &=\{ca_2^2(\frac{1}{a_1}-\frac{1}{a_2})^2\theta_A(1-\theta_A)-2a_2(\frac{1}{a_1}-\frac{1}{a_2})\frac{(b_1-b_2)}{a_1^2}\theta_B(1-\theta_A)\notag\\ &\qquad\quad\quad\quad\qquad\qquad\qquad\qquad\qquad-2a_2(\frac{1}{a_1}-\frac{1}{a_2})(\frac{b_2}{a_1^2}-\frac{b_2}{a_2^2})\theta_A(1-\theta_A)\}(N-1). \end{align} Rewriting \eqref{os12}, we get \begin{equation}\label{sip} ({A^{*}}^{-1}B^{\#}{A^{*}}^{-1}-cI)\sigma\cdot\sigma + (\widetilde{L} - c I)\eta\cdot \eta - 2(\widetilde{L}-cI)\ \sigma\cdot \eta\ \geq\ Y^\prime \end{equation} \textbf{Choice of $\sigma$ :} Now in order to obtain the final expression of the lower bound by eliminating $\sigma$ from \eqref{sip}, we choose $\sigma(x)$ (locally which can take any value in $\mathbb{R}^N$) as the minimizer of \eqref{sig} i.e. \begin{equation}\label{ub5} \sigma = - (\underline{A}^{-1} - a_2^{-1}I)({A^{*}}^{-1}- a_2^{-1}I)^{-1}\eta\end{equation} \textbf{Matrix lower bound :} Then \eqref{sip} yields as \begin{align} &({A^{*}}^{-1}B^{\#}{A^{*}}^{-1}- c I)(\underline{A} ^{-1} - a_2^{-1}I)^2({A^{*}} ^{-1}- a_2^{-1}I)^{-2}\eta\cdot \eta + (\widetilde{L}-c I)\eta\cdot \eta\notag\\ &\quad\quad\qquad\qquad\qquad\qquad- 2(\widetilde{L}-c I)(\underline{A} ^{-1} - a_2^{-1}I)({A^{*}} ^{-1}- a_2^{-1}I)^{-1}\eta\cdot \eta\ \geq\ Y^\prime.\label{os14} \end{align} Now by using $\widetilde{L}\geq L$ (cf.\eqref{bs4}) together with $A^{*}\eta\cdot\eta\geq \underline{A}\eta\cdot\eta$ we can replace $\widetilde{L}$ by L in \eqref{os14} to obtain : \begin{align} &({A^{*}}^{-1}B^{\#}{A^{*}}^{-1}- c I)(\underline{A} ^{-1} - a_2^{-1}I)^2({A^{*}} ^{-1}- a_2^{-1}I)^{-2}\eta\cdot \eta + (L-c I)\eta\cdot \eta\notag\\ &\quad\quad\qquad\qquad\qquad\qquad- 2(L-c I)(\underline{A} ^{-1} - a_2^{-1}I)({A^{*}} ^{-1}- a_2^{-1}I)^{-1}\eta\cdot \eta\ \geq\ Y^\prime \label{bs9} \end{align} where, $A^{*}$ satisfies \begin{equation}\label{eij} ({A^{*}}^{-1}-a_2^{-1} I)^{-1}\eta\cdot\eta =\ (\underline{A} ^{-1} - a_2^{-1}I)^{-1}\eta\cdot\eta + \frac{(1-\theta_A)a_2}{\theta_A}(I-M_A)\eta\cdot\eta. \end{equation} We simplify the bound \eqref{bs9} with using \eqref{eij} and by taking trace using \eqref{zz3} one obtains : \paragraph{Trace bound L2(a)\ when $c=\frac{b_2}{a_2^2}$ :} \begin{align}\label{ub1} tr\ \{({A^{*}}^{-1}B^{\#}{A^{*}}^{-1}- \frac{b_2}{a_2^2} I)(\underline{A} ^{-1} - a_2^{-1}I)^2({A^{*}} ^{-1}- a_2^{-1}I)^{-2}\} \geq\ &\frac{b_2}{a_2^2}\frac{(a_2-a_1)^2}{a_1^2}\theta_A(1-\theta_A)(N-1) \notag\\ &\qquad\quad + N(l-\frac{b_2}{a_2^2}). \end{align} \textbf{Trace bound L2(b)\ when $c=\frac{b_1}{a_1^2}$ :} \begin{align}\label{ub3} &tr\ \{({A^{*}}^{-1}B^{\#}{A^{*}}^{-1}- \frac{b_1}{a_1^2} I)(\underline{A} ^{-1} - a_2^{-1}I)^2({A^{*}} ^{-1}- a_2^{-1}I)^{-2}\}\notag\\ &\geq\ \frac{b_1}{a_1^2}\frac{(a_2-a_1)^2}{a_1^2}\theta_A(1-\theta_A)(N-1)+2(\frac{b_2}{a_2^2}-\frac{b_1}{a_1^2})\frac{(a_2-a_1)}{a_1}(1-\theta_A)(N-1) + N(l-\frac{b_1}{a_1^2}). \end{align} Hence, the trace bounds \eqref{to},\eqref{tn} follow whenever $A^{*}\in\partial\mathcal{G}_{\theta_A}^U$. \qed \\ Next, we give examples of microstructures which possess the property $\omega_{B^\epsilon}\subset \omega_{A^\epsilon}$ and achieves the equality in \eqref{bs9} as mentioned below. \begin{example}[Saturation/Optimality]\label{Sd20} The equality of this lower bound is achieved by the laminated materials $L^{\#}_2$ (cf. \eqref{lb10}), at composites based on Hashin-Shtrikman construction given in \eqref{FG6} and at sequential laminates of $N$-rank to be constructed below. \begin{proof} We consider the simple laminate say in $e_1$ direction with $\theta_B \leq \theta_A$, then \begin{equation*}B^{\#} = diag\ (L^{\#}_2, \overline{b},..,\overline{b})\ \mbox{(cf. \eqref{lb10})\ and }\ A^{*}=diag\ (\underline{a},\overline{a},..,\overline{a})\end{equation*} achieves the equality in \eqref{bs9} with $M^\prime_{AB} =M^{\prime\prime}_{AB}= diag\ ( 1,0,..,0)$. \\ \\ Similarly, the Hashin-Shtrikman construction given in \eqref{FG6} i.e. with core $a_1 I$ and coating $a_2 I$ for $A_{B(0,1)}$ and core $b_1 I$ with coating $b_2 I$ for $B_{B(0,1)}$, with $\theta_B \leq \theta_A$ \begin{align*} a^{*}&=\ a_2 + N a_2 \frac{\theta_A(a_1-a_2)}{(1-\theta_A)a_1 + (N +\theta_A -1)a_2}\\ b^{\#} &=\ b_2\ [\ 1 + \frac{N\theta_A(1-\theta_A)(a_2-a_1)^2}{((1-\theta_A)a_1 + (N+\theta_A-1)a_2)^2}\ ] - \frac{(b_2-b_1)(Na_2)^2\theta_B}{((1-\theta_A)a_1 + (N+\theta_A-1)a_2)^2}. \end{align*} achieves the equality in \eqref{bs9} with $M^\prime_{AB} =M^{\prime\prime}_{AB}= diag\ (\frac{1}{N},\frac{1}{N},..,\frac{1}{N}\ )$. \paragraph{Sequential Laminates when ${\omega}_{B^{\epsilon}}\subset {\omega}_{A^{\epsilon}} $ :} Here we construct the $(p,p)$-sequential laminates $B^{\#}_{p,p}$ whenever ${\omega}_{B^{\epsilon}}\subset {\omega}_{A^{\epsilon}}$ and with having the same number of $p$ layers in the same directions $\{e_i\}_{1\leq i\leq p}$ as of the sequential Laminates $A^{*}_p$ with matrix $a_1I$ and core $a_2I$ defined in \eqref{OP3}. Similar to the Example \ref{ot5}, by considering the equality in \eqref{bs9} with the above hypothesis, we define $B^{\#}_{p,p}$ through the following expression : \begin{align}\label{ot16} &({A^{*}_p}^{-1}B^{\#}_{p,p}{A^{*}_p}^{-1}- c I)(\underline{A} ^{-1} - a_2^{-1}I)^2({A^{*}_p} ^{-1}- a_2^{-1}I)^{-2}\notag \\ &= (L-cI) + \{c\frac{(a_2-a_1)^2}{a_1^2}\theta_A(1-\theta_A) + 2(\frac{b_2}{a_2^2} -c)\frac{(a_2 - a_1)}{a_1}(1-\theta_A)\}(I-\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i}). \end{align} where $m_i\geq 0$ and $\sum_{i=1}^{p} m_i=1$. This can be also written as, (in an inverse form): \paragraph{Case L2(a): \ when $c=\frac{b_2}{a_2^2}$ :} \begin{align*} \{ \frac{(L-\frac{b_2}{a_2^2}I)}{(\underline{A} ^{-1} - a_2^{-1}I)}({A^{*}_p}^{-1} &- a_2^{-1}I) +\frac{b_2}{a_2}(\underline{A}^{-1}-{A^{*}_p}^{-1})\}^2({A^{*}_p}^{-1}B^{\#}_{p,p}{A^{*}_p}^{-1}- \frac{b_2}{a_2^2}I)^{-1}\notag\\ &=\ (L-\frac{b_2}{a_2^2}I) + \frac{b_2}{a_2^2}\theta_A(1-\theta_A)\frac{(a_2-a_1)^2}{a_1^2}(I-\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i}). \end{align*} \textbf{Case L2(b): \ when $c=\frac{b_1}{a_1^2}$ :} \begin{align*} &\{ \frac{(L-\frac{b_1}{a_1^2}I)}{(\underline{A} ^{-1} - a_2^{-1}I)}({A^{*}_p}^{-1} - a_2^{-1}I) +\frac{b_1}{a_1^2}a_2(\underline{A}^{-1}-{A^{*}_p}^{-1}) + 2(\frac{b_2}{a_2^2}-\frac{b_1}{a_1^2})\frac{(\underline{A}^{-1} - {A^{*}_p}^{-1})}{(\underline{A}^{-1}- a_2^{-1}I)}\}^2\notag\\ &\ .({A^{*}_p}^{-1}B^{\#}_{p,p}{A^{*}_p}^{-1}- \frac{b_1}{a_1^2}I)^{-1}\ = (L-\frac{b_1}{a_1^2}I) + \frac{b_1}{a_1^2}\theta_A(1-\theta_A)\frac{(a_2-a_1)^2}{a_1^2}(I-\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i})\notag\\ &\qquad\qquad\qquad\qquad\qquad\qquad\qquad+ 2(\frac{b_2}{a_2^2}-\frac{b_1}{a_1^2})(1-\theta_A)\frac{(a_2-a_1)}{a_1}(I-\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i}). \end{align*} The above defined $(N,N)$- sequential laminates give the saturation / optimality of the lower trace bound \eqref{to},\eqref{tn} respectively. This property is incorporated in the very definition of $B^{\#}_{N,N}$ (see \eqref{ot16}). \end{proof} \end{example} \paragraph{Step 2 : $A^{*}\in\mathcal{G}_{\theta_A}$ arbitrary :} Here we will establish the trace bound L1 and trace bound L2 obtained in Step $1$, are also satisfied by any pair $(A^{*},B^{\#})$ corresponding to the arbitrary microstructures $\omega_{A^\epsilon},\omega_{B^\epsilon}$; i.e. need not be satisfying the oscillatory systems \eqref{eik},\eqref{os3}. We recall that the set $\mathcal{G}_{\theta_A(x)}$ consists of all $A^{*}(x)$ governed with two phase medium $(a_1,a_2)$ with its volume proportion $(\theta_A(x),1-\theta_A(x))$, and characterized by the inequalities given in \eqref{FL11}. We also recall that $\mathcal{K}_{\theta_A(x)}$ consists of all eigenvalues of $A^{*}(x)\in\mathcal{G}_{\theta_A(x)}$. In the sequel, we will interchangeably use $\mathcal{G}_{\theta_A(x)}$ and $\mathcal{K}_{\theta_A(x)}$ to mean the same set. $\mathcal{G}_{\theta_A(x)}$ is a convex region for fixed $x$. The boundaries of $\mathcal{G}_{\theta_A(x)}$ are given by $\partial\mathcal{G}^{L}_{\theta_A(x)}$ and $\partial\mathcal{G}^{U}_{\theta_A(x)}$. The $\partial\mathcal{G}^{L}_{\theta_A(x)}$ represents the set of all symmetric matrices with eigenvalues $\lambda_1(x),..,\lambda_N(x)$ satisfying $\underline{a}(x) \leq \lambda_i(x)\leq \overline{a}(x) \ \ \forall 1\leq i\leq N$ and the lower bound equality \begin{equation*}\sum_{i=1}^N \frac{1}{\lambda_i(x)-a_1} = \frac{1}{\underline{a}(x)-a_1} + \frac{N-1}{\overline{a}(x)-a_1}\end{equation*} and similarly, the $\partial\mathcal{G}^{U}_{\theta_A(x)}$ represents the set which satisfies the upper bound equality \begin{equation*}\sum_{i=1}^N \frac{1}{a_2 -\lambda_i(x)} = \frac{1}{a_2-\underline{a}(x)} + \frac{N-1}{a_2-\overline{a}(x)}.\end{equation*} We have $\mathcal{G}_{\{\theta_A=0\}} = \{a_2\}$ and $\mathcal{G}_{\{\theta_A=1\}} = \{a_1\}$. The family $\{\mathcal{G}_{\theta_A(x)}\}_{\theta_A(x)\in[0,1]}$ gives a continuum of convex sets for almost everywhere $x$, whose union is also a convex set \cite{CHA,MT1}. Moreover, for any $\theta_A(x)\in (0,1)$, there exists an interval $[\widetilde{\theta_A}(x),\widetilde{\widetilde{\theta_A}}(x)]$ for $x$ almost everywhere such that, \begin{equation*}\left( \mathcal{G}_{\theta(x)} \cap \mathcal{G}_{\theta_A(x)} \right) = \emptyset \mbox{ for } \theta(x)\notin [\widetilde{\theta_A}(x),\widetilde{\widetilde{\theta_A}}(x)] \mbox{ and } \underset{\theta(x)\in[\widetilde{\theta_A}(x),\widetilde{\widetilde{\theta_A}}(x)]}{\cup}\left( \mathcal{G}_{\theta(x)} \cap \mathcal{G}_{\theta_A(x)} \right) = \mathcal{G}_{\theta_A(x)}, x \mbox{ a.e.}\end{equation*} The set can be also expressed through the continuum of boundaries $\partial\mathcal{G}_{\theta(x)}$ as follows : \begin{equation}\label{eii} \mathcal{G}_{\theta_A(x)}\subset \underset{\theta\in[\widetilde{\theta_A},\theta_A]}{\cup}\partial\mathcal{G}^{L}_{\theta(x)}\ \mbox{ and }\ \mathcal{G}_{\theta_A(x)}\subset\underset{\theta\in[\theta_A,\widetilde{\widetilde{\theta_A}}]}{\cup}\partial\mathcal{G}^{U}_{\theta(x)}, \ x \mbox{ a.e. } \end{equation} We will use the above notation to obtain our desired results. \begin{figure} \caption{\textit{$N=2$ : The $\mathcal{G} \end{figure} \noindent Let us consider the interior points of $\mathcal{G}_{\theta_A(x)}$, or equivalently the continuum of the boundaries $\{\partial\mathcal{G}^{L}_\theta(x)\}_{ \theta(x)\in[\widetilde{\theta_A}(x),\theta_A(x)]}$, $x$ a.e.; i.e. it says for a given $A^{*}(x)\in \mathcal{G}_{\theta_A(x)}$, there exist a unique $\theta(x)$ with $\theta(x) < \theta_A(x)$, $x$ a.e. such that $A^{*}(x)\in \partial\mathcal{G}^{L}_{\theta(x)}$. So, it satisfies trace of \eqref{eif} where $\theta_A$ is replaced by $\theta$, i.e. \begin{equation}\label{eio} tr\ (A^{*}(x)-a_1I)^{-1} = \frac{N}{(a_2-a_1)(1-\theta(x))} + \frac{\theta(x)}{(1-\theta(x))a_1}, \ \ A^{*}\in\mathcal{G}_{\theta_A(x)}\cap \partial\mathcal{G}^{L}_{\theta(x)}. \end{equation} \noindent Now in Step $1$ we have obtained the optimal `lower trace bound L1' on the pair $(A^{*},B^{\#})$ with $A^{*}\in\partial\mathcal{G}_{\theta(x)}^L$, which is nothing but considering the lower bound expression \eqref{eig} by replacing $\theta_A(x)$ by $\theta(x)$, i.e. \begin{equation}\label{eip} tr\ (B^{\#}-b_1I)(\overline{A}_\theta-a_1I)^2(A^{*}-a_1I)^{-2}\geq\ tr\ (\overline{B}-b_1I) +\frac{b_1}{a_1^2}(a_2-a_1)^2\theta(1-\theta). \end{equation} where $\overline{A}_\theta = \{a_1\theta + a_2(1-\theta)\}I$.\\ \\ Thus for any $A^{*}\in\mathcal{G}_{\theta_A}$, the pair $(A^{*},B^{\#})\in\mathcal{G}_{(\theta_A,\theta_B)}$ satisfies the following optimal lower trace bound whenever $\theta\leq\theta_A\leq\theta_B$ \begin{equation}\label{eim} tr\ (B^{\#}-b_1I)(A^{*}-a_1I)^{-2}\geq \frac{N(b_2-b_1)(1-\theta_B)}{(a_2-a_1)^2(1-\theta(x))^2} +\frac{b_1}{a_1^2}\frac{\theta(x)}{(1-\theta(x))} \end{equation} Next one can find $\theta$ explicitly from \eqref{eio} in terms of $trace\ (A^{*}-a_1I)^{-1}$ where $A^{*}\in\mathcal{G}_{\theta_A}\cap \partial\mathcal{G}^{L}_{\theta}$ and substitute it in \eqref{eip} to eliminate $\theta$. We obtain : \begin{equation}\label{FG15} \theta = \frac{ a_1\{(a_2-a_1)\hspace{2pt} tr\ (A^{*}-a_1I)^{-1} - N\}}{(a_2-a_1)\{a_1tr\ (A^{*}-a_1I)^{-1} +1\}}\ \mbox{ and }\ (1-\theta) = \frac{a_2 + a_1(N-1)}{(a_2-a_1)\{a_1tr\ (A^{*}-a_1I)^{-1} +1\}}. \end{equation} Now putting $\theta$ and $(1-\theta)$ in \eqref{eip}, we get for all $A^{*}\in \mathcal{G}_{\theta_A}$ and $(A^{*},B^{\#})\in\mathcal{G}_{(\theta_A,\theta_B)}$ : \begin{align}\label{eiq} tr\ (B^{\#}-b_1I)(A^{*}-a_1I)^{-2}\geq\ & N(b_2-b_1)(1-\theta_B)\frac{(a_1tr\ (A^{*}-a_1I)^{-1} +1)^2}{(a_2 +a_1(N-1))^2}\notag\\ &\qquad\qquad\qquad+\frac{b_1}{a_1}\frac{((a_2-a_1)\hspace{2pt} tr\ (A^{*}-a_1I)^{-1} - N)}{(a_2 +a_1(N-1))}. \end{align} \noindent The above inequality coincides with \eqref{ub6} which is now proved. Similarly, by using the inclusion $ \mathcal{G}_{\theta_A}\subset\underset{\theta\in[\theta_A,\widetilde{\widetilde{\theta_A}}]}{\cup}\partial\mathcal{G}^{U}_{\theta},$ one performs the same analysis with the lower trace bound L2 given in \eqref{ub1},\eqref{ub3} to generalize it for all $A^{*}\in\mathcal{G}_{\theta_A}$ and establishes \eqref{eic},\eqref{to} and \eqref{tn} which provide the optimal lower bound whenever $\theta_B <\theta_A$. \noindent This completes the proof of establishing the lower trace bound L1, L2 announced in Section \ref{Sd9} and their saturation / optimality. \qed \begin{remark}[Pointwise bounds on energy densities]\label{siz} This remark is analogous to Remark \ref{hsg} and here we find the bounds on energy densities $B^{\#}\nabla u\cdot\nabla u$ and ${A^{*}}^{-1}B^{\#}{A^{*}}^{-1}\sigma\cdot\sigma$ corresponding to `trace bound L1' and `trace bound L2' respectively, which are useful in solving optimal oscillation-dissipation problem, as shown in Section \ref{qw5}. First we find the bound on $B^{\#}\nabla u\cdot\nabla u$ for $A^{*}\in\partial\mathcal{G}^{L}_{\theta_A}$ and $B^{\#}$ being associated to $A^{*}$. We consider the inequality obtained in \eqref{ub15} in Step $1c$ and using \eqref{ub4} for $A^{*}\in\partial\mathcal{G}^{L}_{\theta_A}$, we eliminate $\eta$ to obtain the following inequality : \begin{align}\label{tv} B^{\#}\nabla u\cdot\nabla u \geq \{ b_1I + 2 &(\overline{B}-b_1I)(\overline{A}-a_1I)^{-1}(A^{*}-a_1I)\notag\\ &\qquad+(Y- (\overline{B}-b_1 I))(\overline{A}-a_1I)^{-2}(A^{*}-a_1I)^2\}\nabla u\cdot\nabla u \end{align} where $Y$ is given in \eqref{dc13}.\\ (Previously, in Step $1c$, we used \eqref{ub4} to eliminate $\nabla u$ to obtain the matrix inequality \eqref{bs10}.) \\ \noindent Then following the arguments presented in Step $2$ we generalize the above inequality \eqref{tv} for all $A^{*}\in\mathcal{G}_{\theta_A}$ and the pair $(A^{*},B^{\#})\in\mathcal{G}_{(\theta_A,\theta_B)}$ by using the inclusion $\mathcal{G}_{\theta_A}\subset \underset{\theta\in[\widetilde{\theta_A},\theta_A]}{\cup}\partial\mathcal{G}^{L}_{\theta}$: \begin{align}\label{FG19} B^{\#}\nabla u\cdot\nabla u \geq \{ b_1I + 2 &(\overline{B}-b_1I)(\overline{A}_{\theta}-a_1I)^{-1}(A^{*}-a_1I)\notag\\ &\qquad+(Y_{\theta}- (\overline{B}-b_1 I))(\overline{A}_{\theta}-a_1I)^{-2}(A^{*}-a_1I)^2\}\nabla u\cdot\nabla u \end{align} where $\theta_A$ in \eqref{tv} is replaced by $\theta$ which is explicitly given in \eqref{FG15}. The above bound is optimal whenever $\theta_A(x)\leq\theta_B(x)$, $x$ a.e. As it is shown above, for each $A^{*}\in\mathcal{G}_{\theta_A}$ there exist a $B^{\#}$ such that the corresponding $(A^{*},B^{\#})$ achieves the equality in the above bound \eqref{FG19}. \qed\end{remark} \begin{remark}\label{siz2} Similarly, we consider the inequalities obtained in \eqref{sip} in Step $1f$ and using \eqref{ub5} for $A^{*}\in\partial\mathcal{G}_{\theta_A}^U$, we eliminate $\eta$ to obtain : \begin{align}\label{ty} {A^{*}}^{-1}B^{\#}{A^{*}}^{-1}\sigma\cdot\sigma \geq \{cI + &2(L-cI)(\underline{A}^{-1} - a_2^{-1}I)^{-1}({A^{*}}^{-1}- a_2^{-1}I) +(Y^\prime- (L-cI))\notag\\ &\qquad\qquad\qquad\qquad (\underline{A}^{-1} - a_2^{-1}I)^{-2}({A^{*}}^{-1}- a_2^{-1}I)^{2}\}\sigma\cdot\sigma \end{align} where $c, L, Y^\prime$ are given in \eqref{os15},\eqref{bs4} and in \eqref{FG12} respectively.\\ \noindent Then following the arguments presented in Step $2$ we generalize the above inequality \eqref{ty} for all $A^{*}\in\mathcal{G}_{\theta_A}$ and the pair $(A^{*},B^{\#})\in\mathcal{G}_{(\theta_A,\theta_B)}$ by using the inclusion $\mathcal{G}_{\theta_A}\subset\underset{\theta\in[\theta_A,\widetilde{\widetilde{\theta_A}}]}{\cup}\partial\mathcal{G}^{U}_{\theta}$: \begin{align}\label{siu} {A^{*}}^{-1}B^{\#}{A^{*}}^{-1}\sigma\cdot\sigma \geq \{cI + &2(L-cI)(\underline{A}_\theta^{-1} - a_2^{-1}I)^{-1}({A^{*}}^{-1}- a_2^{-1}I) +(Y_\theta^\prime- (L_\theta-cI))\notag\\ &\qquad\qquad\qquad\qquad (\underline{A}_\theta^{-1} - a_2^{-1}I)^{-2}({A^{*}}^{-1}- a_2^{-1}I)^{2}\}\sigma\cdot\sigma \end{align} where $\theta_A$ in \eqref{tv} is replaced by $\theta$ which is uniquely determined by \eqref{FG15}. Moreover, the above bound is optimal whenever $\theta_B(x) < \theta_A(x)$, $x$ a.e. \qed\end{remark} \begin{remark}\label{zz12} By \eqref{zz11} we can take in \eqref{tv} $\nabla u=\zeta$ an arbitrary vector in $\mathbb{R}^N$. We apply \eqref{tv} with $A^{*}=A^{*}_N$ (cf.\eqref{OP2}) and $B^{\#}=B^{\#}_{N,N}$ (cf. Example \ref{ot5}) in which case, we get equality in \eqref{tv}. On the other hand, we apply \eqref{tv} with $A^{*}=A^{*}_N$ and $B^{\#}$ any relative limit corresponding to $A^{*}$. We obtain \begin{equation*} B^{\#}\zeta\cdot\zeta\ \geq\ B^{\#}_{N,N}\zeta\cdot\zeta. \end{equation*} \qed\end{remark} \paragraph{Upper Bound : } In the final part of this section, we derive the upper bounds U1,U2 on $(A^{*},B^{\#})$ introduced in Section \ref{ad18}. In deriving upper bound, we follow the same strategy as in lower bounds L1, L2. In the Step $1$ we will be finding bounds when $A^{*}\in\partial\mathcal{G}_{\theta_A}$, and then in Step $2$ we will generalize it for arbitrary $A^{*}\in \mathcal{G}_{\theta_A}$. \paragraph{Step 1a : ($H$-Measure term) } Let us take $\eta\in\mathbb{R}^N$ arbitrary and consider the simple translated inequality \begin{equation*}(b_2I - B^{\epsilon})(\nabla u^{\epsilon} +\eta)\cdot(\nabla u^{\epsilon} +\eta)\geq 0 \quad\mbox{a.e. in }{|\!\!\!O}mega \end{equation*} or equivalently, \begin{equation}\label{os13} b_2\nabla u^{\epsilon}\cdot\nabla u^{\epsilon}-2B^\epsilon\nabla u^\epsilon\cdot\eta \geq\ B^{\epsilon}\nabla u^{\epsilon}\cdot\nabla u^\epsilon -(b_2I-B^\epsilon)\eta\cdot\eta-2b_2\nabla u^\epsilon\cdot\eta \ \ \mbox{a.e. in }{|\!\!\!O}mega. \end{equation} Let us pass to the limit in the above inequality. Passing to the limit in the right hand side is straight forward. The limit of left hand side of \eqref{os13} is \begin{equation*} lim\ \{b_2\nabla u^\epsilon\cdot\nabla u^\epsilon- 2B^\epsilon\nabla u^\epsilon\cdot\eta\} = b_2\nabla u\cdot\nabla u -2\overline{B}\nabla u\cdot\eta + Z \end{equation*} where $Z$ is a $H$-measure correction term. Combining these two we get \begin{equation}\label{OP1} (b_2I - B^{\#})\nabla u\cdot\nabla u +2(b_2I-\overline{B})\nabla u\cdot\eta +(b_2I-\overline{B})\eta\cdot\eta + Z\geq\ 0. \end{equation} Now, $Z$ is given by \begin{equation*} Z =\ \underset{\epsilon\rightarrow 0}{lim}\ q_4(\nabla u^\epsilon-\nabla u,(B^\epsilon-\overline{B})\eta)\cdot (\nabla u^\epsilon-\nabla u,(B^\epsilon-\overline{B})\eta)=\ \langle\langle\varPi_W, Q_4(U,B\eta)\rangle\rangle\end{equation*} with, \begin{equation*}Q_4(W) =\ q_4(W)\cdot W=\ b_2|U|^2-2BU\cdot\eta,\ \ W=(U,B\eta)\in \mathbb{R}^N\times\mathbb{R}^N\end{equation*} and $\varPi_{W}$ is the $H$-measure of the sequence $W_\epsilon-W_0= (\nabla u^\epsilon,B^\epsilon\eta)-(\nabla u,\overline{B}\eta)$. \paragraph{Step 1b : (Upper bound) :} Here onwards we will find the upper bounds by choosing the field $\nabla u^\epsilon$ satisfying \eqref{six} and \eqref{eiz} or equivalently $A^{*}\in\partial\mathcal{G}_{\theta_A}^L$. Under the oscillatory system \eqref{eik} for $W_\epsilon^\prime$ where $W^\prime_\epsilon= (\nabla u^\epsilon,B^\epsilon\eta, A^\epsilon\eta) \rightharpoonup (\nabla u,\overline{B}\eta,\overline{A}\eta)= W^\prime_0$ weakly in $L^2({|\!\!\!O}mega)^{3N}$ and $\varPi_{W^\prime}$ is the $H$-measure of the sequence $W^\prime_\epsilon-W^\prime_0$. Then following the very similar arguments presented in Step $1c$ for computing $Y$ in order to deduce the `lower trace bound L1', here we compute $Z$. \begin{align}\label{os7} Z= \langle\langle\varPi_{W}, Q_4(U,B\eta)\rangle\rangle &= \langle\langle\varPi_{W^{\prime}}, Q_4(\frac{(A\eta\cdot\xi)}{a_1|\xi|^2}\xi,B\eta) \rangle\rangle \notag\\ &= \langle\langle \varPi_{W^\prime}, b_2\left( \frac{(A\eta\cdot\xi)}{a_1|\xi|} - \frac{(B\eta\cdot\xi)}{b_2|\xi|}\right)^2\rangle\rangle -\langle\langle\varPi_{W^\prime},\frac{(B\eta\cdot\xi)^2}{b_2|\xi|^2}\rangle\rangle\notag\\ &= \frac{1}{a_1^2b_2}\langle\langle\widetilde{\varPi}_{AB}, \frac{((b_2A-a_1B)\eta\cdot\xi)^2}{|\xi|^2} \rangle\rangle -\frac{1}{b_2}\langle\langle \varPi_B, \frac{(B\eta\cdot\xi)^2}{|\xi|^2}\rangle\rangle\notag \\ &= \widetilde{R_1} - \frac{\theta_B(1-\theta_B)(b_2-b_1)^2}{b_2}M_B\eta\cdot\eta \end{align} where $\widetilde{\varPi}_{AB}\in \mathcal{M}({|\!\!\!O}mega\times \mathbb{S}^{N-1};\mathbb{R}^{N\times N}) $ is $H$-measure of the sequence $\{(b_2A^\epsilon-a_1B^\epsilon) -(b_2\overline{A}-a_1\overline{B})\}\eta$ and $M_B$ is the non-negative matrix with unit trace defined in \eqref{dc19}.\\ The $H$-measure $\widetilde{\varPi}_{AB}$ reduces to \begin{equation*} (\widetilde{\varPi}_{AB})_{ij} = (\widetilde{\nu}_{AB})\eta_i\eta_j \ \ \forall i,j =1,..,N \end{equation*} where, $\widetilde{\nu}_{AB}$ is $H$-measure of the scalar sequence $\{b_2(a_1-a_2)(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A(x))-a_1(b_1-b_2)(\chi_{{\omega}_{B^{\epsilon}}}(x)-\theta_B(x))\}$ with \begin{equation*} \widetilde{\nu}_{AB}(x,d\xi)\geq 0\ \mbox{ and }\ \int_{\mathbb{S}^{N-1}}\widetilde{\nu}_{AB}(x,d\xi) = U_{AB} \end{equation*} with \begin{align*} U_{AB} & := L^\infty({|\!\!\!O}mega)\mbox{ weak* limit of }\{b_2(a_1-a_2)(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)-a_1(b_1-b_2)(\chi_{{\omega}_{B^{\epsilon}}}(x)-\theta_B)\}^2\notag\\ &= b_2^2(a_2-a_1)^2\theta_A(1-\theta_A) +a_1^2(b_2-b_1)^2\theta_B(1-\theta_B)-2b_2a_1(b_2-b_1)(a_2-a_1)(\theta_{AB}-\theta_A\theta_B)\notag\\ &\leq\ b_2^2(a_2-a_1)^2\theta_A(1-\theta_A) +a_1^2(b_2-b_1)^2\theta_B(1-\theta_B) +2b_2a_1(b_2-b_1)(a_2-a_1)\theta_A\theta_B\notag\\ &\ \ \ \ =: U^0_{AB} \mbox{ (say)}. \end{align*} The above inequality $U_{AB}\leq U^{0}_{AB}$ follows by simply using $\theta_{AB}\geq 0$ and it becomes equal when $\theta_{AB}=0\ $ (e.g. $\omega_{A^\epsilon}\cap \omega_{B^\epsilon} =\emptyset$). Thus \begin{align*} \widetilde{R_1} = \frac{1}{a_1^2b_2}\langle\langle\widetilde{\varPi}_{AB}, \frac{((b_2A-a_1B)\eta\cdot\xi)^2}{|\xi|^2} \rangle\rangle &= \frac{1}{a_1^2b_2} \int_{\mathbb{S}^{N-1}} \frac{(\eta \cdot \xi)^2}{|\xi|^2}\widetilde{\nu}_{AB} (d\xi)\notag\\ &= \frac{1}{a_1^2b_2}U_{AB}\ \widetilde{M}_{AB}\eta\cdot\eta \leq \frac{1}{a_1^2b_2}U^{0}_{AB}\ \widetilde{M}_{AB}\eta\cdot\eta \end{align*} where $\widetilde{M}_{AB}$ is a non-negative matrix with unit trace defined by \begin{equation*} \widetilde{M}_{AB} = \frac{1}{U_{AB}}\int_{\mathbb{S}^{N-1}} \xi \otimes \xi\ \widetilde{\nu}_{AB} (d\xi). \end{equation*} Therefore from \eqref{os7} we have \begin{align}\label{zz10} Z \leq\ &\{\frac{b_2}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A) +\frac{(b_2-b_1)^2}{b_2}\theta_B(1-\theta_B)+\frac{2}{a_1}(b_2-b_1)(a_2-a_1)\theta_A\theta_B\}\widetilde{M}_{AB}\eta\cdot\eta\notag\\ &\quad - \frac{\theta_B(1-\theta_B)(b_2-b_1)^2}{b_2}M_B\eta\cdot\eta. \end{align} with \begin{equation}\label{FG14} trace\ Z\ \leq\ \frac{b_2}{a_1^2}(a_2 - a_1)^2\theta_A(1-\theta_A) + \frac{2}{a_1}(b_2-b_1)(a_2-a_1)\theta_A\theta_B. \end{equation} \textbf{Matrix upper bound :} Next we consider \eqref{OP1} with the upper bound \eqref{zz10} on $Z$ and choosing $\nabla u = - (\overline{A}-a_1I)(A^{*}- a_1 I)^{-1}\eta$ (cf.\eqref{ub4}) to obtain : \begin{align} &(b_2I-B^{\#})(\overline{A}-a_1I)^2(A^{*}- a_1 I)^{-2}\eta\cdot\eta - 2 (b_2I-\overline{B})(\overline{A}-a_1I)(A^{*}- a_1 I)^{-1}\eta\cdot \eta \notag\\ &\qquad\quad+(b_2I-\overline{B})\eta\cdot\eta - \frac{\theta_B(1-\theta_B)(b_2-b_1)^2}{b_2}M_B\eta\cdot\eta+\{\frac{b_2}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A)\notag\\ &\qquad\qquad\qquad+\frac{(b_2-b_1)^2}{b_2}\theta_B(1-\theta_B)+\frac{2}{a_1}(b_2-b_1)(a_2-a_1)\theta_A\theta_B\}\widetilde{M}_{AB}\eta\cdot\eta \geq 0 \label{OP4} \end{align} \textbf{Trace bound U1 : : $\theta_A+\theta_B\leq 1$ almost everywhere in $x$ :} We simplify the above bound with using \eqref{eif} as $A^{*}\in\partial\mathcal{G}^L_{\theta_A}$ and then by taking trace using \eqref{FG14} to obtain : \begin{equation}\label{FL2} tr\ (b_2I-B^{\#})(\overline{A}-a_1I)^2(A^{*}-a_1I)^{-2}\geq\ tr\ (b_2I-\overline{B}) -\frac{b_2}{a_1^2}(a_2-a_1)^2\theta_A(1-\theta_A). \end{equation} However $(b_2I-B^{\#})$ need not be positive definite (cf. \eqref{Sd3}), so we slightly modify \eqref{FL2} by using $A^{*}\in \partial\mathcal{G}^L_{\theta_A}$ to obtain : \begin{equation}\label{lb18} tr\ (\frac{b_2}{a_1}A^{*}-B^{\#})(\overline{A}-a_1I)^2(A^{*}(x)- a_1 I)^{-2} \geq\ tr\ (b_2I-\overline{B})+ tr\ \frac{b_2}{a_1}(\overline{A}-a_1I). \end{equation} Thus \eqref{hsm} follows. Note that, $(\frac{b_2}{a_1}A^{*}-B^{\#})$ is positive definite matrix (cf. \eqref{Sd3}), that's why the above inequality turns out to be an upper bound. \qed \\ Next we give examples of microstructures which possess the property $\omega_{A^\epsilon}\cap\omega_{B^\epsilon}=\emptyset$ and such that equality holds in \eqref{OP4} as shown below. \begin{example}[Saturation/Optimality]\label{FG5} The equality of this lower bound is achieved by the laminated materials $U^{\#}_1$ (cf. \eqref{tp}), at composites based on Hashin-Shtrikman construction given in \eqref{FG7} and at sequential laminates of higher rank. \begin{proof} The composite based on Hashin-Shtrikman construction given in \eqref{FG7} i.e. with core $a_2 I$ and coating $a_1 I$ for $A_{B(0,1)}$ and core $b_1 I$ with coating $b_2 I$ for $B_{B(0,1)}$, with $\theta_A + \theta_B\leq 1$ \begin{align*} a^{*}&=\ a_1 + N a_1 \frac{(1-\theta_A)(a_2-a_1)}{(N-\theta_A)a_1 + \theta_A a_2}\\ b^{\#} &=\ b_2\ [\ 1 + \frac{N\theta_A(1-\theta_A)(a_2-a_1)^2}{(\theta_Aa_2 + (N-\theta_A)a_1)^2}\ ] - \frac{(b_2-b_1)(Na_1)^2\theta_B}{(\theta_Aa_2 + (N-\theta_A)a_1)^2}. \end{align*} achieves the equality in \eqref{OP4} with $M_B=\widetilde{M}_{AB} = diag\ (\frac{1}{N},\frac{1}{N},..,\frac{1}{N}\ )$.\\ \\ Similarly, the simple laminates say in $e_1$ direction with $\theta_A + \theta_B\leq 1$ \begin{equation*}B^{\#} = diag\ (U^\#_1, \overline{b},..,\overline{b})\ \mbox{ (cf.\eqref{tp}) and }\ A^{*}=diag\ (\underline{a},\overline{a},..,\overline{a})\end{equation*} achieves the equality in \eqref{OP4} with $M_B = \widetilde{M}_{AB}= diag\ ( 1,0,..,0)$. \paragraph{Sequential Laminates when ${\omega}_{A^{\epsilon}}\cap {\omega}_{B^{\epsilon}}=\emptyset $ :} Here we define $(p,p)$-sequential laminates $B^{\#}_{p,p}$ whenever ${\omega}_{A^{\epsilon}}\cap {\omega}_{B^{\epsilon}}=\emptyset$ and with having the same number of $p$ layers in the same directions $\{e_i\}_{1\leq i\leq p}$. Following the arguments presented before, by taking $A^{*}=A^{*}_p$ with matrix $a_1 I$ and core $a_2 I$ defined in \eqref{OP2} and considering the equality in \eqref{OP4} we have \begin{equation*} (b_2I-B^{\#}_{p,p})(\overline{A}-a_1I)^{2}(A^{*}_p - a_1 I)^{-2} = (b_2I-\overline{B})-\frac{b_2(a_2 - a_1)^2}{a_1^2}\theta_A(1-\theta_A)(\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i}); \end{equation*} or, \begin{align*} \{\frac{(b_2I-\overline{B})}{(\overline{A}-a_1I)}(A^{*}_p-a_1I)+\frac{b_2}{a_1}(A^{*}_p-a_1I)\}^2(\frac{b_2}{a_1}A^{*}_p-B^{\#}_{p,p})^{-1} = \frac{b_2}{a_1}(\overline{A}-a_1I) + (b_2I-\overline{B}). \end{align*} The above defined $(N,N)$- sequential laminates give the saturation / optimality of the upper trace bound \eqref{hsm}. \end{proof} \end{example} \begin{remark} The above upper bound \eqref{lb18} need not be an optimal bound whenever $\theta_A + \theta_B > 1$. \qed\end{remark} \noindent \textbf{Trace bound U2 : $\theta_A(x) +\theta_B(x) > 1$ almost everywhere in $x$: } In this case, we establish the optimal upper bound by proceeding in a similar way as we did in the case of proving lower trace bound L2. \paragraph{Step 1c : ($H$-Measure term) :} We begin by considering the translated inequality : \begin{equation*}(\frac{b_2}{a_1^2}I-(A^{\epsilon})^{-1}B^{\epsilon}(A^{\epsilon})^{-1})(\sigma^{\epsilon} + \eta)\cdot(\sigma^{\epsilon} + \eta)\ \geq\ 0 \quad\mbox{a.e. in }{|\!\!\!O}mega,\ \ \ \eta\in \mathbb{R}^N.\end{equation*} Note that here the optimal translated amount is $\frac{b_2}{a_1^2}$. By passing to the limit in the of the above inequality we write \begin{equation}\label{OP5} (\frac{b_2}{a_1^2}I - {A^{*}}^{-1}B^{\#}{A^{*}}^{-1})\sigma \cdot\sigma + 2(\frac{b_2}{a_1^2}I-\widetilde{L})\sigma \cdot\eta +(b_2I-\widetilde{L}) + Z^\prime \geq\ 0 \end{equation} where, $\widetilde{L}$ is the $L^{\infty}$ weak* limit of $(A^{\epsilon})^{-1}B^{\epsilon}(A^{\epsilon})^{-1}$ with having the following upper bound in this case $\theta_A +\theta_B>1$ as : \begin{equation}\label{OP6} \widetilde{L}\ \leq\ \Theta^{*} := \{\frac{b_2}{a_1^2}+\frac{(b_1-b_2)}{a_1^2}\theta_B + b_1(\frac{1}{a_2^2}-\frac{1}{a_1^2})(1-\theta_A)\}I. \end{equation} Note that, by making the choice $\omega_{A^\epsilon}, \omega_{B^\epsilon}$ such that $\omega_{B^\epsilon}^c\subset \omega_{A^\epsilon}$ (which is possible in the present case), we obtain equality in the above inequality.\\ \\ In \eqref{OP5}, $Z^\prime$ is $H$-measure correction term defined as : \begin{equation*} lim\ \{\frac{b_2}{a_1^2}\sigma^\epsilon\cdot\sigma^\epsilon- 2(A^\epsilon)^{-1}B^\epsilon(A^\epsilon)^{-1}\sigma^\epsilon\cdot\eta\} = \frac{b_2}{a_1^2}\sigma\cdot\sigma -2\Theta^{*}\sigma\cdot\eta + Z^\prime \end{equation*} with \begin{equation*} Z^\prime= \underset{\epsilon\rightarrow 0}{lim}\ q_5(\sigma^\epsilon-\sigma,((A^\epsilon)^{-1}B^\epsilon(A^\epsilon)^{-1}-\widetilde{L})\eta)\cdot (\sigma^\epsilon-\sigma,((A^\epsilon)^{-1}B^\epsilon(A^\epsilon)^{-1}-\widetilde{L})\eta)=\langle\langle\varPi_{W^{\prime\prime}}, Q_5(W^{\prime\prime})\rangle\rangle \end{equation*} where, \begin{equation*}Q_5(W^{\prime\prime}) =\ q_5(W^{\prime\prime})\cdot W^{\prime\prime}=\ \frac{b_2}{a_1^2}|\varSigma|^2-2A^{-1}BA^{-1}\varSigma\cdot\eta,\ \ W^{\prime\prime}= (\varSigma,A^{-1}BA^{-1})\in \mathbb{R}^N\times\mathbb{R}^N\end{equation*} and $\varPi_{W^{\prime\prime}}$ is the $H$-measure of the sequence $W^{\prime\prime}_\epsilon-W^{\prime\prime}_0= (\sigma^\epsilon,(A^\epsilon)^{-1}B^\epsilon(A^\epsilon)^{-1}\eta)-(\sigma,\widetilde{L}\eta)$. \paragraph{Step 1d : (Upper bound) :} Here onwards we will find the upper bounds by choosing the field $\sigma^\epsilon$ satisfying \eqref{ub9},\eqref{ub10} or equivalently $A^{*}\in\partial\mathcal{G}_{\theta_A}^U$. Under the oscillatory system \eqref{os3} for $W_\epsilon^{\prime\prime\prime}$ where $W_\epsilon^{\prime\prime\prime} = (\sigma^\epsilon, (A^\epsilon)^{-1}B^\epsilon (A^\epsilon)^{-1}\eta, (A^\epsilon)^{-1}\eta ) \rightharpoonup ( \sigma , \widetilde{L}\eta, \underline{A}^{-1}\eta )= W_0^{\prime\prime\prime}$ weakly in $L^2({|\!\!\!O}mega)^{3N}$ and $\varPi_{W^{\prime\prime\prime}}$ is the $H$-measure of the sequence $W^{\prime\prime\prime}_\epsilon-W^{\prime\prime\prime}_0$. Then following the very similar arguments presented in Step $1f$ for computing $Y^\prime$ in order to deduce the `lower trace bound L2', here we compute $Z^\prime$. \begin{align}\label{OP7} Z^\prime\ &=\ \langle\langle\varPi_{W^{\prime\prime}}, Q_5(\varSigma,A^{-1}BA^{-1}\eta)\rangle\rangle\ =\ \langle\langle\varPi_{W^{\prime\prime\prime}}, Q_5( a_2(I-\frac{\xi\otimes \xi}{|\xi|^2})A^{-1}\eta,A^{-1}BA^{-1}) \rangle\rangle \notag\\ &=\ \langle\langle \widetilde{\varPi}^\prime_{AB}, \frac{b_2}{a_1^2} (I-\frac{\xi\otimes \xi}{|\xi|^2})(\frac{a_1^2}{b_2}A^{-1}BA^{-1}-a_2A^{-1})\eta\cdot(\frac{a_1^2}{b_2}A^{-1}BA^{-1}-a_2A^{-1})\eta\rangle\rangle \notag\\ &\qquad -\frac{a_1^2}{b_2}\langle\langle \widetilde{\varPi}^{\prime\prime}_{AB},(I-\frac{\xi\otimes \xi}{|\xi|^2})A^{-1}BA^{-1}\eta\cdot A^{-1}BA^{-1}\eta \rangle\rangle, \notag \\ &= \widetilde{R}^\prime_1 -\frac{a_1^2}{b_2}L^{\prime\prime}_{AB}(I-M^{\prime\prime}_{AB})\eta\cdot\eta \end{align} where $\widetilde{\varPi}^\prime_{AB}\in \mathcal{M}({|\!\!\!O}mega\times \mathbb{S}^{N-1};\mathbb{R}^{N\times N}) $ is $H$-measure of the sequence $\{\frac{a_1^2}{b_2}((A^\epsilon)^{-1}B^\epsilon(A^\epsilon)^{-1}-\widetilde{L})-a_2((A^\epsilon)^{-1}-(\underline{A})^{-1})\}\eta$ and the weak* limit $L^{\prime\prime}_{AB}$ and the non-negative matrix $M^{\prime\prime}_{AB}$ with unit trace are defined in \eqref{zz7} and \eqref{zz8} respectively. \\ Then $H$-measure $\widetilde{\varPi}^\prime_{AB}$ reduces to \begin{equation*} (\widetilde{\varPi}^\prime_{AB})_{ij} = (\widetilde{\nu}^\prime_{AB})\eta_i\eta_j \ \ \forall i,j =1,..,N \end{equation*} where $\widetilde{\nu}^\prime_{AB}$ is $H$-measure of the scalar sequence $\{\frac{a_1^2}{b_2}f^\epsilon- a_2(\frac{1}{a_1}-\frac{1}{a_2})(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)\}$ with \begin{align} &\widetilde{\nu}^\prime_{AB}(x,d\xi)\geq 0\ \mbox{ and }\notag\\ &\int_{\mathbb{S}^{N-1}}\widetilde{\nu}^\prime_{AB}(x,d\xi) = L^\infty({|\!\!\!O}mega)\mbox{ weak* limit of }\{\frac{a_1^2}{b_2}f^\epsilon- a_2(\frac{1}{a_1}-\frac{1}{a_2})(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)\}^2 := \widetilde{L}^\prime_{AB}. \end{align} So \begin{equation*} \widetilde{R}^\prime_1 = \frac{b_2}{a_1^2}\widetilde{L}^\prime_{AB}\ (I-\widetilde{M}^\prime_{AB})\eta\cdot\eta \end{equation*} where $\widetilde{M}^\prime_{AB}$ is a non-negative matrix with unit trace defined by \begin{equation*} \widetilde{M}^\prime_{AB} = \frac{1}{\widetilde{L}^\prime_{AB}}\int_{\mathbb{S}^{N-1}} \xi \otimes \xi\ \widetilde{\nu}^\prime_{AB} (d\xi). \end{equation*} Therefore from \eqref{OP7} we have \begin{equation}\label{os8} Z^\prime = \frac{b_2}{a_1^2}\widetilde{L}^\prime_{AB}(I-\widetilde{M}^\prime_{AB})\eta\cdot\eta- \frac{a_1^2}{b_2}L^{\prime\prime}_{AB}(I-M^{\prime\prime}_{AB})\eta\cdot\eta \end{equation} with \begin{equation}\label{FL4} trace\ Z^\prime = \{\frac{b_2}{a_1^2}\widetilde{L}^\prime_{AB} -\frac{a_1^2}{b_2}L^{\prime\prime}_{AB}\}(N-1). \end{equation} Recall that, \begin{align}\label{FG20} \{\frac{b_2}{a_1^2}\widetilde{L}^\prime_{AB} -\frac{a_1^2}{b_2}L^{\prime\prime}_{AB}\}=&\ \frac{b_2}{a_1^2}\{\mbox{$L^\infty({|\!\!\!O}mega)$ weak* limit of }\{\frac{a_1^2}{b_2}f^\epsilon- a_2(\frac{1}{a_1}-\frac{1}{a_2})(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)\}^2\}\notag\\ &\qquad-\frac{a_1^2}{b_2}\{\mbox{$L^\infty({|\!\!\!O}mega)$ weak* limit of }(f^\epsilon)^2\}. \end{align} We want to find the upper bound the above quantity \eqref{FG20}. As usual the above quantity involves the parameter $\theta_{AB}(x)$, the $L^{\infty}({|\!\!\!O}mega)$ weak* limit of $(\chi_{{\omega}_{A^{\epsilon}}}(x)B)$. Now keeping the estimate \eqref{OP6} in mind, we maximize $\widetilde{L}^\prime_{AB}$ and according to that the other quantity $L^{\prime\prime}_{AB}$ also gets determined. For that, it is enough to make the choice $\omega_{B^\epsilon}^c\subset \omega_{A^\epsilon}$ which is possible in this present case ($\theta_A +\theta_B>1$), we bound $trace\ Z^\prime$ from above as following : \begin{align}\label{os4} &trace\ Z^\prime\ \leq \ L^\infty({|\!\!\!O}mega) \mbox{ weak* limit of }\{\frac{b_2}{a_1^2}a_2^2(\frac{1}{a_1}-\frac{1}{a_2})^2(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)^2-2a_2(\frac{1}{a_1}-\frac{1}{a_2})(\frac{b_1}{a_1^2}-\frac{b_1}{a_2^2})\notag\\ &\quad\ (\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)^2 + 2a_2(\frac{1}{a_1}-\frac{1}{a_2})\frac{(b_1-b_2)}{a_1^2}((1-\chi_{{\omega}_{B^{\epsilon}}}(x))-(1-\theta_B))(\chi_{{\omega}_{A^{\epsilon}}}(x)-\theta_A)\}(N-1)\notag\\ &\qquad= \{\frac{b_2}{a_1^2}a_2^2(\frac{1}{a_1}-\frac{1}{a_2})^2\theta_A(1-\theta_A)-2a_2(\frac{1}{a_1} - \frac{1}{a_2})(\frac{b_1}{a_1^2} -\frac{b_1}{a_2^2})\theta_A(1-\theta_A)\notag\\ &\quad\qquad\qquad\qquad\qquad\qquad\qquad+2a_2(\frac{1}{a_1}-\frac{1}{a_2})\frac{(b_1-b_2)}{a_1^2}(1-\theta_B)(1-\theta_A)\}(N-1). \end{align} \textbf{Matrix upper bound :} Next we consider \eqref{OP5} with $Z^\prime$ in \eqref{os8} and choosing $\sigma = - (\underline{A}^{-1} - a_2^{-1}I)({A^{*}}^{-1}- a_2^{-1}I)^{-1}\eta$ (cf.\eqref{ub5}) and using $\widetilde{L}\leq \Theta^{*}$ (cf.\eqref{OP6}) we obtain \begin{align}\label{OP9} &(\frac{b_2}{a_1^2}I-{A^{*}}^{-1}B^{\#}{A^{*}}^{-1})(\underline{A}^{-1} - a_2^{-1}I)^2({A^{*}} ^{-1}- a_2^{-1}I)^{-2}\eta\cdot\eta + (\frac{b_2}{a_1^2}I-\Theta^{*})\eta\cdot\eta\notag\\ &\qquad\qquad\qquad\qquad\qquad- 2(\frac{b_2}{a_1^2}I-\Theta^{*})(\underline{A}^{-1} - a_2^{-1}I)({A^{*}}^{-1}- a_2^{-1}I)^{-1}\eta\cdot\eta + Z^{\prime} \geq\ 0 \end{align} \textbf{Trace bound U2 : $\theta_A+\theta_B>1$ almost everywhere in $x$ :} We simplify the above bound with using \eqref{eij} as $A^{*}\in\partial\mathcal{G}_{\theta_A}^U$ and then by taking trace using \eqref{os4} to obtain : \begin{align}\label{lb17} tr \ &(\frac{b_2a_2}{a_1^2}{A^{*}}^{-1}-{A^{*}}^{-1}B^{\#}{A^{*}}^{-1})(\underline{A}^{-1} - a_2^{-1}I)^2({A^{*}}^{-1} - a_2^{-1}I)^{-2}\notag\\ &\geq\ tr\ (\frac{b_2}{a_1^2}I-\Theta^{*})+ tr\ \frac{b_2a_2}{a_1^2}(\underline{A}^{-1}-a_2^{-1}I)- 2\frac{(b_2-b_1)(a_2-a_1)}{a_1^3}(1-\theta_A(x))(N-1). \end{align} Thus \eqref{tm} follows. Note that $(\frac{b_2a_2}{a_1}{A^{*}}^{-1}-{A^{*}}^{-1}B^{\#}{A^{*}}^{-1})$ is a positive definite matrix (cf.\eqref{Sd3}), that is why the above inequality turns out to be an upper bound. \qed \begin{example}[Saturation/Optimality] The equality of the above upper bound is achieved by the laminated materials $U^{\#}_2$ (cf. \eqref{tp}), at the composites based on Hashin-Shtrikman construction given in \eqref{bs19} and at the sequential laminates of higher rank. \paragraph{Sequential Laminates when $\omega_{B^\epsilon}^c \subset \omega_{A^\epsilon}$ :} Here we define the $(p,p)$-sequential laminates $B^{\#}_{p,p}$ whenever $ ({\omega}_{B^{\epsilon}})^c \subset\omega_{A^\epsilon}$ and with having the same number of $p$ layers in the same directions $\{e_i\}_{1\leq i\leq p}$. By taking $A^{*}=A^{*}_p$ with matrix $a_1 I$ and core $a_2 I$ defined in \eqref{OP3} and considering the equality in \eqref{OP9} we have \begin{align*} &(\frac{b_2a_2}{a_1^2}{A^{*}_p}^{-1}-{A^{*}_p}^{-1}B^{\#}_{p,p}{A^{*}_p}^{-1})(\underline{A}^{-1} - a_2^{-1}I)^2({A^{*}_p} ^{-1}- a_2^{-1}I)^{-2} \\ &\ = (\frac{b_2a_2}{a_1^2}\underline{A}^{-1}-\Theta^{*})- 2\frac{(b_2-b_1)(a_2-a_1)}{a_1^3}(1-\theta_A)(I-\sum_{i=1}^{p} m_i\frac{e_i\otimes e_i}{e_i.e_i}); \mbox{ with }\sum_{i=1}^p m_i =1. \end{align*} The above defined $(N,N)$- sequential laminates give the saturation / optimality of the upper trace bound \eqref{tm}. \qed \end{example} \noindent\textbf{Step 2 :} Proceeding in a exactly same way as we have done for the lower trace bounds L1 and L2, the upper trace bounds U1 and U2 obtained in the above Step $1$ with the hypothesis $A^{*}\in\partial\mathcal{G}_{\theta_A}$ can be generalized for any pair of $(A^{*},B^{\#})\in\mathcal{G}_{(\theta_A,\theta_B)}$ with $A^{*}\in\mathcal{G}_{\theta_A}$. \\ \\ This completes our discussion of proof of the main results announced in Section \ref{Sd9}. \qed \begin{remark}\label{OP8} Results analogous to Remark \ref{siz},\ref{siz2} and Remark \ref{zz12} are valid for U1, U2 also. In particular, for $A^{*}=A^{*}_N$ (cf.\eqref{OP2}) and $B^{\#}$ any relative limit corresponding to $A^{*}$, we will have for $\zeta\in\mathbb{R}^N$ : \begin{equation*} B^{\#}\zeta\cdot\zeta\ \leq\ B^{\#}_{N,N}\zeta\cdot\zeta \end{equation*} where, $B^{\#}_{N,N}$ is given in Example \ref{FG5}. \qed\end{remark} \section{Optimality of Regions}\label{qw4} \setcounter{equation}{0} This section is devoted to the proof of Theorem \ref{qw6}. Let us recall the sets $\mathcal{K}_{(\theta_A,\theta_B)}$ and $\mathcal{K}^{f}_{(\theta_A,\theta_B)}(A^{*})$ introduced in \eqref{kab} and \eqref{kfab} respectively to begin with the following result : \begin{lemma} Recall that the Hausdorff distance $d_H$ between two compact sets in a metric space is \begin{equation} d_H\left( K_1,K_2\right) = \underset{x_1\in K_1}{max}\ \underset{x_2\in K_2}{min}\ d(x_1,x_2) + \underset{x_2\in K_2}{max}\ \underset{x_1\in K_1}{min}\ d(x_2,x_1). \end{equation} There exist positive constants $C>0,\delta_A>0,\delta_B>0$ such that, for any $\theta_{A^1},\theta_{A^2}\in [0,1]$ and $\theta_{B^1},\theta_{B^2}\in [0,1]$, we have \begin{align} d_H\left(\mathcal{G}_{\theta_{A^1}},\mathcal{G}_{\theta_{A^2}}\right) &\leq\ C|\theta_{A^1} -\theta_{A^2}|^{\delta_A};\label{qw9}\\ d_H\left(\mathcal{K}^{f}_{(\theta_{A^1},\theta_{B^1})}(A^{*,1}), \mathcal{K}^{f}_{(\theta_{A^2},\theta_{B^2})}(A^{*,2})\right) &\leq\ C\left(|\theta_{A^1} -\theta_{A^2}|^{\delta_A} + |\theta_{B^1} -\theta_{B^2}|^{\delta_B}\right). \label{qw8} \end{align} Here $\mathcal{G}_{\theta_{A}}$ stands for the \textit{G-closure set} and $\mathcal{K}^f_{(\theta_A,\theta_B)}(A^{*})$ stands for the fibre over $A^{*}$ and it is defined in Remark \ref{qw1}. \end{lemma} \begin{proof} A proof of \eqref{qw9} is found in \cite{A}, who uses periodic microstructures. Here we argue directly without using them. It is enough to prove that, for any sequence of characteristic functions $\chi_{\omega_{A^{\epsilon,1}}}$ with $\chi_{\omega_{A^{\epsilon,1}}}\rightharpoonup \theta_{A^1}$ in $L^\infty({|\!\!\!O}mega)$ weak*, there exists another sequence of characteristic functions $\chi_{\omega_{A^{\epsilon,2}}}$ with $\chi_{\omega_{A^{\epsilon,2}}}\rightharpoonup \theta_{A^2}$ in $L^\infty({|\!\!\!O}mega)$ weak* such that $|A^{*,1}-A^{*,2}|\leq C|\theta_{A^1}-\theta_{A^2}|^{\delta_A}$ where $A^{*,1}$ and $A^{*,2}$ defined as $A^{\epsilon,i}\xrightarrow{H} A^{*,i}$ for $i=1,2$ with $A^{\epsilon,i} = \{a_1\chi_{\omega_A^{\epsilon,i}} + a_2(1-\chi_{\omega_A^{\epsilon,i}})\}I$ for $i=1,2$. Without loss of generality we assume that, $\theta_{A^1}\geq \theta_{A^2}$, and use the following Lypunov type result : If there is a sequence $\chi_{\omega_{A^{\epsilon,1}}}\rightharpoonup \theta_{A^1}$ in $L^\infty({|\!\!\!O}mega)$ weak* with $\theta_{A^1}\geq \theta_{A^2}$, then there exists a sequence $\chi_{\omega_{A^{\epsilon,2}}}\rightharpoonup \theta_{A^2}$ in $L^\infty({|\!\!\!O}mega)$ weak* such that $(\chi_{\omega_{A^{\epsilon,1}}}-\chi_{\omega_{A^{\epsilon,2}}})\geq 0$ for $x\in{|\!\!\!O}mega$ a.e. Then by using \eqref{FL16} one obtains $|A^{*,1}-A^{*,2}|\leq C|\theta_{A^1}-\theta_{A^2}|^{\delta_A}$ and consequently \eqref{qw9}. Similarly, one shows for any sequence of characteristic functions $\chi_{\omega_{B^{\epsilon,1}}},$ with $\chi_{\omega_{B^{\epsilon,1}}}\rightharpoonup \theta_{B^1}$ in $L^\infty({|\!\!\!O}mega)$ weak*, there exists another sequence of characteristic function $\chi_{\omega_{B^{\epsilon,2}}}$ with with $\chi_{\omega_{B^{\epsilon,2}}}\rightharpoonup \theta_{B^2}$ in $L^\infty({|\!\!\!O}mega)$ weak* such that together with the above mentioned $\chi_{\omega_{A^{\epsilon,1}}},\chi_{\omega_{A^{\epsilon,2}}}$, one gets $|B^{\#,1}-B^{\#,2}|\leq C\left( |\theta_{A^1}-\theta_{A^2}|^{\delta_A} + |\theta_{B^1}-\theta_{B^2}|^{\delta_B}\right) $ from \eqref{FL14} and it gives \eqref{qw8}. \end{proof} \noindent \paragraph{Characterization of $\mathcal{G}_{(\theta_A,\theta_B)}$/Optimality of the regions $(Li,Uj)$ :} Proving optimality of the regions is equivalent to obtaining the following characterization of $\mathcal{G}_{(\theta_A(x),\theta_B(x))}$, $x$ almost everywhere in terms of $\mathcal{K}_{(\theta_A,\theta_B)}$ which was defined in Remark \ref{qw1} : \begin{align}\label{qw2} \mathcal{G}_{(\theta_A(x),\theta_B(x))}= \{ (A^{*}(x),B^{\#}(x))\in \mathcal{M}(a_1,a_2;{|\!\!\!O}mega)&\times \mathcal{M}(b_1,\widetilde{b_2};{|\!\!\!O}mega)\ |\ \notag\\ \ ( A^{*}(x)&, B^{\#}(x))\in \mathcal{K}_{(\theta_A(x),\theta_B(x))}\mbox{ a.e. }x\in {|\!\!\!O}mega\}. \end{align} \begin{proof}[Proof of \eqref{qw2} :] Similar pointwise characterization of $\mathcal{G}_{\theta_A(x)}$ in terms of bounds on $A^{*}$ is a celebrated theorem in this subject \cite{TF,MT,A}. The above result describes its extension by including relative limits $B^{\#}$ too. Let us underline some new features : there are four possible phase regions in the place of a single region for $A^{*}$. Secondly, due to non-commutativity of $A^{*},B^{\#}$, the bounds are formulated in terms of traces of product of matrices instead of their individual spectra. The proof is presented in three parts. \paragraph{First part : Macroscopically homogeneous case i.e. $(A^{*},B^{\#})$, $(\theta_A,\theta_B)$ are constants :} Here we assume that $A^{*}, B^{\#}$ are constant matrices and $\theta_A,\theta_B$ are constant functions. Examples of microstructures which are macroscopically homogeneous include periodic ones and Hashin-Shtrikman structures. Depending upon the values of $\theta_A$ and $\theta_B$, there are four sets of possible lower trace bounds and upper trace bounds $\{Li,Uj\}$, $i,j=1,2$ introduced in Section \ref{ad18}. Each pair defines a region denoted as $(Li,Uj)$. We consider, for instance, the region $(L1,U1)$, where $\theta_A\leq\theta_B$ and $\theta_A+\theta_B\leq 1$. Now let us take $(A^{*},B^{\#})$ lying in this region. Of course $A^{*}\in\mathcal{G}_{\theta_A}$, and $B^{\#}$ satisfies \eqref{Sd3}. We want to show that there exists $(A^\epsilon(x),B^\epsilon(x))$ satisfying \eqref{ta},\eqref{tb} such that $A^\epsilon(x) \xrightarrow{H} A^{*}$ and $B^\epsilon(x)\xrightarrow{A^\epsilon(x)} B^{\#}$ in ${|\!\!\!O}mega$. (Recall that in Section \ref{sil}, the converse of the above assertion was shown). \\ \noindent We divide the proof into several sub-cases. First we treat matrices $A^{*}$ similar to $A^{*}_N= diag(\lambda_1,..,\lambda_N)$ with $\lambda_1\geq\ldots\geq\lambda_N$. Next, we show how the general case can be reduced to the above case. \\ \noindent \textit{Case (1) :} Let us consider $A^{*}\in\partial\mathcal{G}_{\theta_A}^L$ and any pair $(A^{*},B^{\#})$ satisfying the equality in L1 bound. Let's say spectrum of $A^{*} = \{\lambda_1,..,\lambda_N\}$. Without loss of generality, we assume the ordering $\lambda_1\geq\ldots\geq\lambda_i\geq\ldots\geq\lambda_N$. Then there exists an orthogonal matrix $P$ such that $A^{*}= P(diag\ (\lambda_1,..,\lambda_N))P^{-1}$. The required $A^\epsilon$ can be constructed as follows by change of variables technique. Let us denote ${|\!\!\!O}mega_0 = P^{-1}({|\!\!\!O}mega)$. It is classical to construct (see \cite{A}) $A^\epsilon_N(x_0)$, $x_0\in{|\!\!\!O}mega_0$ given by $N$-sequential laminated microstructures such that $A^\epsilon_N(x_0)$ $H$-converges to $A^{*}_N$ in ${|\!\!\!O}mega_0$ where $A^{*}_N = diag(\lambda_1,\ldots,\lambda_N)$. Next we define $A^\epsilon(x)\stackrel{def}{=} PA^\epsilon_N(x_0)P^{-1}$, $x\in{|\!\!\!O}mega$, $x_0\in{|\!\!\!O}mega_0$ with $x_0=P^{-1}x$. Then by the covariance property of $H$-convergence \cite[Lemma 21.1]{T}, $A^\epsilon(x)$ $H$-converges to $PA^{*}_NP^{-1} = A^{*}$\ \ in $P({|\!\!\!O}mega_0)={|\!\!\!O}mega$. We constructed (cf. Example \ref{ot5}) $B^\epsilon_{N,N}(x_0)$, $x_0\in{|\!\!\!O}mega_0$ and diagonal matrix $B^{\#}_{N,N}$ given by $N$-sequential laminated microstructures with $\omega_{A^\epsilon_N}\subseteq \omega_{B^\epsilon_{N,N}}$ (possible since $\theta_A\leq \theta_B$) such that $B^\epsilon_{N,N}(x_0)$ converges to $B^{\#}_{N,N}$ relative to $A^{\epsilon}_N(x_0)$ in ${|\!\!\!O}mega_0$ and the pair $(A^{*}_N,B^{\#}_{N,N})$ satisfies the equality in L1 bound. Having treated diagonal matrices representing $N$-rank laminates, let us now consider other matrices $(A^{*},B^{\#})$. Since the right hand side of L1 remains same for both $A^{*}$ and $A^{*}_N$ and since we are considering equality in L1 bound, we have obviously \begin{equation}\label{zz20} tr \ \{(A^{*}-a_1I)^{-1}(B^{\#}-b_1I)(A^{*}-a_1I)^{-1}\} = tr \ \{(A^{*}_N-a_1I)^{-1}(B^{\#}_{N,N}-b_1I)(A^{*}_N-a_1I)^{-1}\}. \end{equation} We now show that $B^{\#}$ and $B^{\#}_{N,N}$ have the same spectra and that $A^{*}$, $B^{\#}$ commute. For that, we prove the following result stated as : \begin{lemma}[Optimality-Commutativity]\label{zz14} For $A^{*}\in\mathcal{M}(a_1,a_2;{|\!\!\!O}mega)$ and $B^{\#}\in\mathcal{M}(b_1,\widetilde{b_2};{|\!\!\!O}mega)$, we have \begin{align}\label{zz13} &tr \ \{(A^{*}-a_1I)^{-1}(B^{\#}-b_1I)(A^{*}-a_1I)^{-1} \}\notag\\ &\geq tr \{ (diag\ (\lambda_1-a_1,..,\lambda_N-a_1))^{-1}(diag\ (\mu_1-b_1,..,\mu_N-b_1))(diag\ (\lambda_1-a_1,..,\lambda_N-a_1))^{-1}\} \end{align} where spectrum of $A^{*}=\{\lambda_i\}_{i=1}^N$ with $\lambda_1\geq..\geq\lambda_i\geq..\geq\lambda_N$ and spectrum of $B^{\#}=\{\mu_i\}_{i=1}^N$ with $\mu_1\geq..\geq\mu_i\geq..\geq\mu_N$.\\ The equality in the above inequality \eqref{zz13} takes place if and only if $A^{*}$ commutes with $B^{\#}$. \end{lemma} \begin{proof} The above inequality \eqref{zz13} is a simple consequence of the following result from \cite{BT}, which says for any two positive semi-definite matrices $E$ and $F$ : \begin{center} $tr\ EF \geq \sum_{i=1}^N \sigma_i(E)\sigma_{N-i+1}(F)$ \end{center} where $\sigma_1(.)\leq..\leq\sigma_i(.)\leq..\leq\sigma_N(.)$ are the eigenvalues in increasing order. Further the equality holds only if $EF=FE$ in which case there exists an orthogonal matrix $Q$ satisfying $Q^{-1}EQ =diag(\sigma_1(E),..,\sigma_N(E))$ and $Q^{-1}FQ =diag(\sigma_1(F),..,\sigma_N(F))$. We apply the above result with the choice $E= (A^{*}-a_1I)^{-2}$ and $F=(B^{\#}-b_1I).$ \end{proof} \noindent In our case, using successively \eqref{zz13}, the inequality $B^{\#}\geq B^{\#}_{N,N}$ (cf. Remark \ref{zz12}) and finally \eqref{zz20} in that order, we see that equality holds in \eqref{zz13}. As a consequence, it follows that $B^{\#}$ and $B^{\#}_{N,N}$ have the same spectra and that $A^{*}$, $B^{\#}$ commute. Therefore, there exists an orthogonal matrix $Q$ diagonalizing $A^{*}$ and $B^{\#}$ simultaneously to give $A^{*}= Q(diag\ (\lambda_1,..,\lambda_N))Q^{-1}$ and $B^{\#}= Q(diag\ (\mu_1,..,\mu_N))Q^{-1}$. Now we can use as before change of variables technique using $Q$ instead of $P$. More precisely, we define ${|\!\!\!O}mega^\prime = Q^{-1}({|\!\!\!O}mega)$ and $A^\epsilon(x) = QA^\epsilon_N(x^\prime)Q^{-1}$, $B^\epsilon(x) = QB^\epsilon_{N,N}(x^\prime)Q^{-1}$, $x\in{|\!\!\!O}mega$, $x^\prime\in{|\!\!\!O}mega^\prime$, with $x^\prime = Q^{-1}x$. Applying Lemma \ref{pol6} it follows that $A^\epsilon \xrightarrow{H} A^{*}$ and $B^\epsilon(x)\xrightarrow{A^\epsilon} QB^{\#}_{N,N}Q^{-1}=B^{\#}$ in ${|\!\!\!O}mega$. \\ \noindent \textit{Case (2) :} Now let us consider $A^{*}\in int(\mathcal{G}_{\theta_A})$ arbitrary and continue to assume the equality in L1 bound. This implies $A^{*}\in \partial\mathcal{G}_{\theta}^L$ for some $\theta<\theta_A$. Recall the spectrum of $A^{*}$ is denoted $\{\lambda_i\}_{i=1}^N$ with $\lambda_1\geq\ldots\geq\lambda_i\geq\ldots\geq\lambda_N$. In this case also, there exists $\widetilde{A}^\epsilon_N(\widetilde{x})$ with $\widetilde{x}\in\widetilde{{|\!\!\!O}mega}$ constructed through sequential laminated microstructures such that $\widetilde{A}^\epsilon_N \xrightarrow{H} \widetilde{A}^{*}_N$ in $\widetilde{{|\!\!\!O}mega}$ with $\widetilde{A}^{*}_N= diag(\lambda_1,..,\lambda_N)$ with local proportion $\theta_A$ (see \cite[Page no. 125]{A}). Using this $\widetilde{A}^\epsilon_N$, we construct $\widetilde{B}^\epsilon_{N,N}$ (see Example \ref{ot5}) such that $\widetilde{B}^\epsilon_{N,N}\xrightarrow{\widetilde{A}^\epsilon_N}\widetilde{B}_{N,N}^{\#}$ in $\widetilde{{|\!\!\!O}mega}$ and the pair $(\widetilde{A}^{*}_N, \widetilde{B}^{\#}_{N,N})$ achieves the equality in the L1 bound. Then as in the previous case, we apply Remark \ref{zz12} over the fibre on $A^{*}\in \partial\mathcal{G}_{\theta}^L$ and Lemma \ref{zz14} to deduce that the spectra of $\widetilde{B}^{\#}_{N,N}$ and $B^{\#}$ is same and consequently the only if part of the lemma gives that there exists an orthogonal matrix $R$ such that $R\widetilde{A}^{*}_NR^{-1}=A^{*}$ and $R\widetilde{B}^{\#}_{N,N}R^{-1}=B^{\#}$ in ${|\!\!\!O}mega$. Now by following the change of variables techniques $R: \widetilde{{|\!\!\!O}mega} \mapsto {|\!\!\!O}mega$ as above, we obtain the pair $(A^\epsilon,B^\epsilon)$ satisfying \eqref{ta},\eqref{tb} such that $A^\epsilon\xrightarrow{H} A^{*}$ and $B^\epsilon\xrightarrow{A^\epsilon} B^{\#}$ in ${|\!\!\!O}mega$. Let us remark that the hypothesis $\theta_A\leq\theta_B$ is needed in the construction of $\widetilde{B}^{\epsilon}_{N,N}$ with $\omega_{\widetilde{A}^\epsilon_N}\subseteq \omega_{\widetilde{B}^\epsilon_{N,N}}$.\\ \noindent \textit{Case (3) :} The two cases above complete the proof of the saturation / optimality of the L1 bound. Similarly one can establish the saturation / optimality of the bound U1 using the hypothesis that $\theta_A +\theta_B\leq 1$. \\ \noindent \textit{Case (4) :} Here we prove the optimality of the region $(L1,U1)$, assuming of course that $\theta_A\leq \theta_B$ and $\theta_A+\theta_B\leq 1$. Take $(A^{*},B^{\#})$ lying in the region (L1,U1). Of course we will have $A^{*}\in\mathcal{G}_{\theta_A}$. We treat the special case $A^{*}\in\partial\mathcal{G}_{\theta_A}^L$ (for $A^{*}\in int(\mathcal{G}_{\theta_A})$, we follow the arguments of the Case (2) above). As before, let $A^{*}_N = diag(\lambda_1,\ldots,\lambda_N)$, where $\{\lambda_i\}_{1\leq i\leq N}=$ spectrum of $A^{*}$. It is classical to construct $N$-rank laminate $A^\epsilon_N$ (core $a_2$ and matrix $a_1$) such that $A^\epsilon_N \xrightarrow{H} A^{*}_N$ in ${|\!\!\!O}mega$. Using the saturation / optimality of the L1 and U1 individually (case (3)), we can construct as before $B^{\epsilon,1}_{N,N}$ and $B^{\epsilon,2}_{N,N}$ which converge in the relative sense with respect to $A^\epsilon_N$ where $\omega_{A^\epsilon_N}\subseteq \omega_{B^{\epsilon,1}_{N,N}}$ and $\omega_{A^\epsilon_N}\subseteq \omega^c_{B^{\epsilon,2}_{N,N}}$, (i.e. underline $A^\epsilon_N$ is common for both $B^{\epsilon,1}_{N,N}$ and $B^{\epsilon,2}_{N,N}$) to, say, $B^{\#,1}_{N,N}$ and $B^{\#,2}_{N,N}$ respectively, which are diagonal, such that $(A^{*}_N,B^{\#,1}_{N,N})$ and $(A^{*}_N,B^{\#,2}_{N,N})$ achieve the equality in L1 and U1 trace bounds respectively. \noindent As $(A^{*},B^{\#})$ with $A^{*}\in\partial\mathcal{G}^L_{\theta_A}$ satisfies both trace bounds L1 (cf.\eqref{eig}) and U1 (cf.\eqref{FL2}), i.e. \begin{align} &tr\ \{(B^{\#}-b_1I)(A^{*}-a_1I)^{-2}\} = \alpha_1 \ (\mbox{say})\ \geq \ tr\ \{(B^{\#,1}_{N,N}-b_1I)(A^{*}_N-a_1I)^{-2}\} =\widetilde{\alpha}_1 \ (\mbox{say}) \label{FL3}\\ &tr\ \{(b_2I-B^{\#})(A^{*}-a_1I)^{-2}\} = \alpha_2\ (\mbox{say})\ \geq \ tr\ \{(b_2I-B^{\#,2}_{N,N})(A^{*}_N-a_1I)^{-2}\}=\widetilde{\alpha}_2\ (\mbox{say}), \label{bs15} \end{align} we can find some scalars $\beta_1\geq 0$ and $\beta_2\geq 0$ such that : \begin{align} tr\ \{((B^{\#}-\beta_1I)-b_1I)(A^{*}-a_1I)^{-2}\} &= tr\ \{(B^{\#,1}_{N,N}-b_1I)(A^{*}_N-a_1I)^{-2}\} \label{pol3}\\ tr\ \{(b_2I-(B^{\#}+\beta_2I))(A^{*}-a_1I)^{-2}\} & = tr\ \{(b_2I-B^{\#,2}_{N,N})(A^{*}_N-a_1I)^{-2}\}. \label{pol4} \end{align} In fact, they are given by \begin{equation} \beta_1 = \frac{\alpha_1-\widetilde{\alpha}_1}{tr\ (A^{*}-a_1I)^{-2}}\ \mbox{ and } \ \beta_2 = \frac{\alpha_2-\widetilde{\alpha}_2}{tr\ (A^{*}-a_1I)^{-2}}. \end{equation} Let us define \begin{equation}\label{ED6} B^\epsilon_{N,N}\stackrel{def}{=} \frac{1}{(\beta_1+\beta_2)}(\beta_2B^{\epsilon,1}_{N,N}+ \beta_1B^{\epsilon,2}_{N,N}) \end{equation} then using Lemma \ref{pol5}, it is easily checked that \begin{equation}\label{ED7} B^{\epsilon}_{N,N} \xrightarrow{A^\epsilon_N} \frac{1}{(\beta_1+\beta_2)}(\beta_2B^{\#,1}_{N,N}+ \beta_1B^{\#,2}_{N,N}) \stackrel{def}{=} B^{\#}_{N,N} \mbox{ in }{|\!\!\!O}mega. \end{equation} Simple computation using \eqref{pol3},\eqref{pol4} and \eqref{ED7} also shows that \begin{align} tr\ \{(B^{\#}-b_1I)(A^{*}-a_1I)^{-2}\} & = tr\ \{(B^{\#}_{N,N}-b_1I)(A^{*}_N-a_1I)^{-2}\}\label{ED8}\\ tr\ \{(b_2I-B^{\#})(A^{*}-a_1I)^{-2}\} &= tr\ \{(b_2I-B^{\#}_{N,N})(A^{*}_N-a_1I)^{-2}\}.\label{ED9} \end{align} Now from \eqref{pol3} by using the similar arguments presented in case$(1)$ we get : \begin{equation}\label{eg1} \mbox{Spectrum of $(B^{\#}-\beta_1I)=$ Spectrum of $B^{\#,1}_{N,N}$,} \end{equation} and similarly, from \eqref{pol4} by using the similar arguments presented in case$(3)$ we get : \begin{equation}\label{eg2} \mbox{Spectrum of $(\beta_2I +B^{\#})=$ Spectrum of $B^{\#,2}_{N,N}$}. \end{equation} We claim now that \begin{equation}\label{ED10} \mbox{Spectrum of $B^{\#}=$ Spectrum of $B^{\#}_{N,N}$. } \end{equation} Proof of the claim \eqref{ED10} : Let us assume that $\mu_k$ is an eigenvalue of $B^{\#}$ with eigenvector $u_k$, i.e. $B^{\#}u_k =\mu_k u_k$ with $\mu_1\geq\ldots\geq\mu_N$. Then from \eqref{eg1} shows that the spectrum of $(B^{\#}-\beta_1 I)$ is $\{\mu_i-\beta_1\}$ in decreasing order. Since $(B^{\#}-\beta_1 I)$ is a diagonal matrix , it follows that $(B^{\#}-\beta_1 I) = diag\ \{\mu_i-\beta_1\}$. Similarly \eqref{eg2} shows that, $(\beta_2 I+B^{\#}) = diag\ \{\beta_2+\mu_i\}$. Now from the definitaion \eqref{ED7} of $B^{\#}_{N,N}$, it follows that $B^{\#}_{N,N} = diag\ \{\mu_i\}$. In particular, the claim \eqref{ED10} follows as a consequence. \\ Thanks to \eqref{ED10}, we are in position to apply the converse part of the Lemma \ref{zz14} in \eqref{ED8} or \eqref{ED9}, we conclude that $A^{*}$, $B^{\#}$ commute. So there exists an orthogonal matrix $S$ (say) such that $SA^{*}_NS^{-1} = A^{*}$ and $SB^{\#}_{N,N}S^{-1}=B^{\#}$ in ${|\!\!\!O}mega$ and consequently, $A^\epsilon \stackrel{def}{=}SA^\epsilon_NS^{-1}\xrightarrow{H} A^{*}$ and $B^\epsilon \stackrel{def}{=}SB^\epsilon_{N,N}S^{-1}\xrightarrow{A^\epsilon} B^{\#}$ in ${|\!\!\!O}mega$. \begin{remark} It is important that there should be one common sequence $A^\epsilon$ in the proof of optimality of the region (L1,U1). That is why, we have to go through these arguments. \end{remark} \noindent \textit{Case (5) :} The optimality of the other regions $(Li,Uj)$ can be shown analogously. \paragraph{Second Part : Reduction to Macroscopically homogeneous case i.e. $(A^{*},B^{\#})$ and $(\theta_A,\theta_B)$ are constants :} Here we will show how to reduce the general case of variables $A^{*}(x),B^{\#}(x)$ and $\theta_A(x),\theta_B(x)$ to the special case where they are constants. It is based on piecewise constant approximation of functions. Such a procedure was adapted in the context of $H$-limits $A^{*}$ (see \cite[Theorem 2.1.2]{A}). With the same inspiration, we want to extend it to include the relative limits $B^{\#}$ also. To this end, we make use of fibre-wise convexity and Lemma \ref{zz15}, Remarks \ref{ub13}, \ref{sii}, \ref{qw1}. Let us begin by recalling few elements of the reduction for $A^{*}$. Let $A^{*}(x)\in \mathcal{G}_{\theta_A(x)}$, $x\in{|\!\!\!O}mega$ a.e. be given. Let $\{\omega^n_k\}_{1\leq k\leq n}$ be an arbitrary family of disjoint open subsets covering ${|\!\!\!O}mega$ upto null sets, such that the maximal diameter of the collection goes to $0$ as $n\rightarrow\infty$. Let us define a piecewise constant function $\theta^n_A\in L^\infty({|\!\!\!O}mega)$ by \begin{equation*} \theta^n_A(x) = \sum_{j=1}^n\theta_{A,k}^n\chi_{\omega_k^n}(x) \mbox{ with } \theta_{A,k}^n = \frac{1}{|\omega_k^n|}\int_{\omega_k^n}\theta_A(x) dx. \end{equation*} Then $\theta_A^n(x)\rightarrow \theta_A(x)$ strongly in $L^p({|\!\!\!O}mega)$ for $1\leq p<\infty$. We recall the definition of the closed convex set $\mathcal{G}_{\theta_{A,k}^n}$ for $\theta_{A,k}^n$ being a constant : \begin{align*} \mathcal{G}_{\theta_{A,k}^n} = \{ A^{*}\ | \ A^{*} \mbox{ is a constant matrix and it satisfies bounds \eqref{FL11} } \ &\\ \mbox{ with constant in proportion }\theta_{A,k}^n\}.& \end{align*} By what is proved in the First Part above, $\mathcal{G}_{\theta_{A,k}^n}$ can also be characterized as \begin{align*} \mathcal{G}_{\theta_{A,k}^n} = \{ A^{*}\ | \ A^{*} \mbox{ is a constant matrix and it is a $H$-limit in }\omega_k^n \ & \\ \mbox{ with $(a_1,a_2)$ constant proportions } (\theta_{A,k}^n,1-\theta_{A,k}^n)\}. & \end{align*} Now, in each open set $\omega_k^n$, we set \begin{equation*} \widetilde{A}^n(x) \stackrel{def}{=} \mbox{ projection of }A^{*}(x) \mbox{ onto } \mathcal{G}_{\theta_{A,k}^n}. \end{equation*} Then for a.e. $x\in\omega_k^n$, following \eqref{qw9} we have \begin{equation*} |A^{*}(x)-\widetilde{A}^n(x)| \leq C|\theta_A(x)-\theta_{A,k}^n|^{\delta_A}, \end{equation*} for some $C>0,\delta_A>0$ and therefore the sequence $\widetilde{A}^n$ converges strongly to $A^{*}$ in $L^p({|\!\!\!O}mega)$ for any $1\leq p<\infty$. Each matrix $\widetilde{A}^n$ is not yet piecewise constant. Therefore, we define a sequence of piecewise constant matrices \begin{equation*} \widehat{A}^n(x) = \sum_{k=1}^n\widehat{A}^n_k\chi_{\omega_k^n}(x) \mbox{ with } \widehat{A}^n_k = \frac{1}{|\omega_k^n|}\int_{\omega_k^n}\widetilde{A}^n(x) dx . \end{equation*} It follows that the sequence $\widehat{A}^n$ also converges strongly to $A^{*}$ in $L^p({|\!\!\!O}mega)$. Unfortunately, there is no guarantee that each $\widehat{A}^n$ belongs to $\mathcal{G}_{\theta_{A,k}^n}$. Therefore, we define a constant matrix $A^n_k$ as \begin{equation*} A^n_k \stackrel{def}{=} \mbox{ projection of }\widehat{A}^n_k \mbox{ onto } \mathcal{G}_{\theta_{A,k}^n}. \end{equation*} This yields a piecewise constant matrix \begin{equation*} A^n(x) =\sum_{k=1}^nA^n_k\chi_{\omega_k^n}(x). \end{equation*} Then $A^n(x)\in \mathcal{G}_{\theta_{A,k}^n}$, $x\in\omega_k^n$ and $A^n$ converges to $A^{*}$ strongly in $L^p({|\!\!\!O}mega)$ for $1\leq p<\infty$ and so $A^n$ $H$-converges to $A^{*}$ (see Remark \ref{sii}). Thanks to Lemma \ref{zz15} and the fibre-wise convexity structure (cf. Remark \ref{qw1}), we can follow the above arguments to obtain the required approximation of $B^{\#}(x)$ too. Indeed, let $(A^{*}(x),B^{\#}(x))\in\mathcal{G}_{(\theta_A(x),\theta_B(x))}$ with $A^{*}(x)\in\mathcal{G}_{\theta_A(x)}$, $x\in{|\!\!\!O}mega$ a.e. be given. Working with the same partition $\{\omega_k^n\}_k$ and proceeding as before, we define a piecewise constant function $\theta^n_B\in L^\infty({|\!\!\!O}mega)$ by \begin{equation*} \theta^n_B(x) = \sum_{j=1}^n\theta_{B,k}^n\chi_{\omega_k^n}(x) \mbox{ with } \theta_{B,k}^n = \frac{1}{|\omega_k^n|}\int_{\omega_k^n}\theta_B(x) dx \end{equation*} We recall that the closed convex set $\mathcal{K}^f_{(\theta_{A,k}^n,\theta_{B,k}^n)}(A^{*})$ for $\theta_{A,k}^n,\theta_{B,k}^n$ being constants, can be characterized as shown in the First Part above, i.e. \begin{align*} \mathcal{K}^f_{(\theta_{A,k}^n,\theta_{B,k}^n)}(A^{*}) = \{ & B^{\#};\ \ B^{\#} \mbox{ is a constant matrix and it is a relative limit in }\omega_k^n \\ & \mbox{associated with }A^{*}\in \mathcal{G}_{\theta_{A,k}^n}, \mbox{ with constant proportions } (\theta_{A,k}^n,\theta_{B,k}^n)\}. \end{align*} Now, in each open set $\omega_k^n$, we define \begin{equation*} \widetilde{B}^n(x) \stackrel{def}{=}\mbox{ projection of }B^{\#}(x) \mbox{ onto } \mathcal{K}^f_{(\theta_{A,k}^n,\theta_{B,k}^n)}(A^{*}(x)). \end{equation*} Note that because the fibre over $A^{*}$ is convex and closed, this is well defined. \\ \\ Then for a.e. $x\in\omega_k^n$, using \eqref{qw8} we have \begin{equation*} |B^{\#}(x)-\widetilde{B}^n(x)| \leq C\left(|\theta_A(x)-\theta_{A,k}^n|^{\delta_A} + |\theta_B(x)-\theta_{B,k}^n|^{\delta_B}\right), \end{equation*} for some $C>0$, $\delta_A,\delta_B>0$ and therefore the sequence $\widetilde{B}^n$ converges strongly to $B^{\#}$ in $L^p({|\!\!\!O}mega)$ for any $1\leq p<\infty$. Each matrix $\widetilde{B}^n$ is not yet piecewise constant. Therefore, we define a sequence of piecewise constant matrices \begin{equation*} \widehat{B}^n(x) = \sum_{k=1}^n\widehat{B}^n_k\chi_{\omega_k^n}(x) \mbox{ with } \widehat{B}^n_k = \frac{1}{|\omega_k^n|}\int_{\omega_k^n}\widetilde{B}^n(x) dx . \end{equation*} It follows that the sequence $\widehat{B}^n$ also converges strongly to $B^{\#}$ in $L^p({|\!\!\!O}mega)$. Next, we define a constant matrix $B_j^n$ as \begin{equation*} B^n_k \stackrel{def}{=}\mbox{ projection of }\widetilde{B}^n_k \mbox{ onto } \mathcal{K}^f_{(\theta_{A,k}^n,\theta_{B,k}^n)}(A^{*}). \end{equation*} This yields a piecewise constant matrix \begin{equation*} B^n(x) =\sum_{k=1}^nB^n_k\chi_{\omega_k^n}(x), \end{equation*} which is a relative-limit, i.e., belongs to $\mathcal{K}^f_{(\theta_{A,k}^n,\theta_{B,k}^n)}(A^{*})$ for fixed $A^{*}\in \mathcal{G}_{\theta_{A,k}^n}$. Let us prove that the sequence $B^n$ converges to $B^{\#}$ strongly in $L^p({|\!\!\!O}mega)$. Then by Remark \ref{sii} $B^n$ will converge to $B^{\#}$ relative to $A^n$. By construction, the projection $B^n_k$ satisfies \begin{equation} |B^n(x)-\widehat{B}^n(x)| \leq |\widetilde{B}^n(x)-\widehat{B}^n(x)|. \end{equation} Therefore, \begin{align*} |B^n(x)-B^{\#}(x)| \leq 2|\widehat{B}^n(x)-\widetilde{B}^n(x)| +|\widetilde{B}^n(x)-B^{\#}(x)|. \end{align*} We know that both of these terms in the right hand side converges strongly to $0$ in $L^p$, so we deduce $B^n$ converges strongly to $B^{\#}$ in $L^p({|\!\!\!O}mega)$ for any $1\leq p<\infty$. Hence $B^n$ converges to $B^{\#}$ relative to the sequence $A^n$ in ${|\!\!\!O}mega$. \\ \\ In our problem, we have four regions $(Li,Uj)$ $i,j=1,2$, and the corresponding physical domains ${|\!\!\!O}mega_{(Li,Uj)}$ $i,j=1,2$ are defined as follows : \begin{equation}\begin{aligned}\label{FL1} {|\!\!\!O}mega_{(L1,U1)} &= \{ x\in{|\!\!\!O}mega : \theta_A(x)\leq \theta_B(x), \theta_A(x)+\theta_B(x)\leq 1\},\\ {|\!\!\!O}mega_{(L1,U2)} &= \{ x\in{|\!\!\!O}mega : \theta_A(x)\leq \theta_B(x), \theta_A(x)+\theta_B(x)> 1\},\\ {|\!\!\!O}mega_{(L2,U1)} &= \{ x\in{|\!\!\!O}mega : \theta_B(x)< \theta_A(x), \theta_A(x)+\theta_B(x)\leq 1\},\\ {|\!\!\!O}mega_{(L2,U2)} &= \{ x\in{|\!\!\!O}mega : \theta_B(x)< \theta_A(x), \theta_A(x)+\theta_B(x)> 1\}. \end{aligned}\end{equation} These domains provide a measurable disjoint cover for ${|\!\!\!O}mega$ : ${|\!\!\!O}mega = \underset{i,j=1,2}{\cup}{|\!\!\!O}mega_{(Li,Uj)}$. \\ \\ In the above arguments we may replace the open set ${|\!\!\!O}mega$ by $int({|\!\!\!O}mega_{(Li,Uj)})$ and deal with its open covering given by $\{int({|\!\!\!O}mega_{(Li,Uj)})\cap \omega^n_k\}_k$. Combination of the optimality result of first part and the approximation result of the second part yields the optimality of the region $(Li,Uj)$ in the sub-domain $int({|\!\!\!O}mega_{(Li,Uj)})$. Since these step is analogous to the treatment of the classical case $A^{*}$, we will not give details. \paragraph{Third part : Case (a) : } If the interior of ${|\!\!\!O}mega_{(Li,Uj)}$, $i,j=1,2$ cover ${|\!\!\!O}mega$ upto a null-set, we can prove the optimality of all the four regions as follows using the optimality of individual four regions established in First and Second parts. Let us assume that $$|{|\!\!\!O}mega\ \smallsetminus\ \underset{i,j=1,2}{\cup}\ int\hspace{2pt}({|\!\!\!O}mega_{(Li,Uj)})|=0.$$ Then according to First and Second parts, given $(A^{*}(x),B^{\#}(x))$, $x\in {|\!\!\!O}mega$, by restricting it on $int({|\!\!\!O}mega_{(Li,Uj)})$, there exist $ (A^\epsilon_{ij}, B^\epsilon_{ij})$ with the required property such that \begin{align*} A^\epsilon_{ij} \rightarrow A^{*}_{ij} \mbox{ in }L^p(int\hspace{2pt}({|\!\!\!O}mega_{(Li,Uj)}), \quad B^\epsilon_{ij} \rightarrow B^{\#}_{ij} \mbox{ in }L^p(int\hspace{2pt}({|\!\!\!O}mega_{(Li,Uj)}), \ \ i,j =1,2, \ \ 1\leq p<\infty. \end{align*} Putting them together, we define ($A^\epsilon,B^\epsilon$) on ${|\!\!\!O}mega$ by \begin{align*} A^\epsilon = A^\epsilon_{ij} \mbox{ on } int\hspace{2pt}({|\!\!\!O}mega_{(Li,Uj)}),\quad B^\epsilon = B^\epsilon_{ij} \mbox{ on } int\hspace{2pt}({|\!\!\!O}mega_{(Li,Uj)}). \end{align*} Then we have \begin{align*} A^\epsilon \rightarrow A^{*} \mbox{ in }L^p({|\!\!\!O}mega), \quad B^\epsilon \rightarrow B^{\#} \mbox{ in }L^p({|\!\!\!O}mega), \ \ i,j =1,2. \end{align*} Consequently, \begin{align*} A^\epsilon \xrightarrow{H} A^{*} \mbox{ in }{|\!\!\!O}mega, \quad B^\epsilon \xrightarrow{A^\epsilon} B^{\#} \mbox{ in }{|\!\!\!O}mega, \ \ i,j =1,2. \end{align*} \textbf{Case (b) : } If \begin{center}$|{|\!\!\!O}mega \smallsetminus \underset{i,j=1,2}{\cup}\ int\hspace{2pt}({|\!\!\!O}mega_{(Li,Uj)})|> 0 $. \end{center} Covering arguments given in previous case fail. \\ \\ However,the following covering arguments can be advanced. Indeed using Vitali covering (cf. \cite[Theorem 17.1]{DiB}), we have a countable collection of open cubes $\{\omega_{ij}^k\}_{k=1}^{\infty}$ with finite diameter and with pairwise disjoint interiors, such that \begin{equation*} |{|\!\!\!O}mega_{(Li,Uj)}\ \smallsetminus\ \underset{k}{\cup}\ \omega_{ij}^k | =0 \ \mbox{ for each }i,j=1,2. \end{equation*} So, we have \begin{equation*} |{|\!\!\!O}mega\ \smallsetminus\ \underset{i,j=1,2}{\cup}\underset{k}{\cup}\left( {|\!\!\!O}mega \cap \omega_{ij}^k\right) |=0. \end{equation*} Then again following the First and Second parts, given $(A^{*}(x),B^{\#}(x))$, $x\in {|\!\!\!O}mega$, by restricting it on ${|\!\!\!O}mega\cap\omega_{ij}^k$, there exist $((A^\epsilon_{ij})_k, (B^\epsilon_{ij})_k)$ with required properties such that \begin{align*} (A^\epsilon_{ij})_k \rightarrow A^{*} \mbox{ in }L^p({|\!\!\!O}mega\cap \omega_{ij}^k), \mbox{ and } (B^\epsilon_{ij})_k \rightarrow B^{\#} \mbox{ in }L^p({|\!\!\!O}mega\cap \omega_{ij}^k), \ i,j =1,2,\ k=1,2,.. \end{align*} Putting them together, we define $(A^\epsilon,B^\epsilon)$ on ${|\!\!\!O}mega$ via \begin{align*} A^\epsilon = (A^\epsilon_{ij})_k \mbox{ on } {|\!\!\!O}mega_{(Li,Uj)}\cap\omega_{ij}^k \mbox{ a.e. }\quad B^\epsilon = (B^\epsilon_{ij})_k \mbox{ on } {|\!\!\!O}mega_{(Li,Uj)}\cap\omega_{ij}^k \mbox{ a.e. }. \end{align*} Then we have \begin{align*} A^\epsilon \rightarrow A^{*} \mbox{ in }L^p({|\!\!\!O}mega), \quad B^\epsilon \rightarrow B^{\#} \mbox{ in }L^p({|\!\!\!O}mega), \ \ i,j =1,2. \end{align*} Consequently, \begin{align*} A^\epsilon \xrightarrow{H} A^{*} \mbox{ in }{|\!\!\!O}mega, \quad B^\epsilon \xrightarrow{A^\epsilon} B^{\#} \mbox{ in }{|\!\!\!O}mega, \ \ i,j =1,2. \end{align*} \end{proof} \begin{remark}\label{eg3} Before closing this section, we wish to highlight one important result which was intermediary. In the First Part of the proof, we have shown that for any given $(A^{*},B^{\#})\in \mathcal{K}_{(\theta_A,\theta_B)}$ i.e. being in one of the regions $(Li,Uj)$ $i,j=1,2$, i.e. $B^{\#}$ lying on the fibre over $A^{*}$, we have the commutation relation : \begin{equation}\label{eg4} A^{*}B^{\#} = B^{\#}A^{*}. \end{equation} The above result is extended later to \begin{equation} A^{*}(x)B^{\#}(x) = B^{\#}(x)A^{*}(x), \ \ x \mbox{ a.e. in }{|\!\!\!O}mega. \end{equation} by the arguments in Second and Third Parts. This is somewhat a surprising property. \qed\end{remark} \section{Applications in Calculus of Variations}\label{qw5} \setcounter{equation}{0} In this section we mention two applications of our earlier results. In particular we make use of the optimality of the bounds. \subsection{Application to an Optimal Design Problem} Here we mention one application in optimal design problem (ODP). Let us consider the following model problem in ODP already treated in literature \cite{GRB}. Our contribution is merely to point out the convergence of integrands (and hence integrals) and the advantage of using $I^{\#}$ in the relaxation process. We introduce some notations. Let us denote by Char(${|\!\!\!O}mega$) the set of all characteristic functions of measurable subsets of ${|\!\!\!O}mega$, i.e. \begin{equation*} \mbox{Char}({|\!\!\!O}mega) = \{\chi :{|\!\!\!O}mega \mapsto \{0,1\} \mbox{ measurable}\}. \end{equation*} For a given $\delta_A\in (0,1)$, let us consider the set $\mathcal{C}_{\delta_A}$ of classical microstructures defined by \begin{equation}\label{hsu} \mathcal{C}_{\delta_A} = \{\chi \in \mbox{Char}({|\!\!\!O}mega) : \frac{1}{|{|\!\!\!O}mega|}\int_{{|\!\!\!O}mega} \chi(x) dx = {\delta_A}\}\end{equation} and for any $\chi_{\omega_A}\in $Char$({|\!\!\!O}mega$), we define the functional $J(\chi_{\omega_A})$ as follows \begin{equation}\begin{aligned}\label{tz} J :\mbox{Char}({|\!\!\!O}mega) &\mapsto \mathbb{R}\\ \chi_{\omega_A} &\mapsto J(\chi_{\omega_A}) := \int_{{|\!\!\!O}mega} \nabla u_{\omega_A} \cdot\nabla u_{\omega_A}\ dx, \end{aligned}\end{equation} where $u_{\omega_A}\in H^1_0({|\!\!\!O}mega)$ is the solution the following state equation with a given $g\in H^{-1}({|\!\!\!O}mega)$ : \begin{equation}\label{tx} \begin{aligned} -div(A\nabla u_{\omega_A})&= g\quad\mbox{in } {|\!\!\!O}mega\\ u_{\omega_A}&= 0 \quad\mbox{on }\partial{|\!\!\!O}mega. \end{aligned} \end{equation} with $A\in \mathcal{M}(a_1,a_2;{|\!\!\!O}mega)$ is governed with two-phase medium as \begin{equation*} A(x) = \{a_1\chi_{\omega_A}(x) +a_2(1-\chi_{\omega_A}(x))\}I,\ \ x\in{|\!\!\!O}mega. \end{equation*} We are interested in finding the infimum value and minimizers of \begin{equation}\label{eiv} m=\underset{\chi_{\omega_A}\in \mathcal{C}_{\delta_A}}{\mbox{inf }}J(\chi_{\omega_A}). \end{equation} To this end, we do relaxation. For any ${\delta_A}\in (0,1)$, let us consider the set $\mathcal{D}_{\delta_A}$ of generalized microstructures defined by \begin{equation}\label{hsv} \mathcal{D}_{\delta_A} = \{\theta \in L^\infty({|\!\!\!O}mega;[0,1]): \frac{1}{|{|\!\!\!O}mega|}\int_{|\!\!\!O}mega \theta(x) dx = {\delta_A} \}.\end{equation} To find the relaxed version of \eqref{eiv}, we consider a sequence $A^\epsilon\in\mathcal{M}(a_1,a_2;{|\!\!\!O}mega)$ as in \eqref{ta} with $\chi_{{\omega}_{A^{\epsilon}}}(x)\in \mathcal{C}_{\delta_A}$ and the corresponding state $u^\epsilon\in H^1_0({|\!\!\!O}mega)$ solving \eqref{tx}. The functional becomes \begin{equation}\label{ot6} J(\chi_{\omega_{A^\epsilon}}) = \int_{{|\!\!\!O}mega}\nabla u^{\epsilon}\cdot\nabla u^{\epsilon}\ dx; \end{equation} The microstructures $\chi_{{\omega}_{A^{\epsilon}}}(x)$ constitute the following admissible set in which $\delta_A$ is given : \begin{align}\label{ED5} \mathcal{S} = &\mbox{ Collection of all microstructures } \chi_{{\omega}_{A^{\epsilon}}}(x) \mbox{ satisfying the following conditions : }\notag\\ &\frac{|\omega_{A^\epsilon}|}{|{|\!\!\!O}mega|}=\delta_A\ \forall \epsilon,\ \chi_{{\omega}_{A^{\epsilon}}}(x) \rightharpoonup \theta_A(x)\mbox{ in $L^\infty({|\!\!\!O}mega)$ weak* with }\frac{1}{|{|\!\!\!O}mega|}\int_{|\!\!\!O}mega \theta_A(x)dx =\delta_A, \notag\\ &\mbox{ and } A^\epsilon(x)\xrightarrow{H} A^{*}(x). \end{align} Then $A^{*}\in\mathcal{G}_{\theta_A}$ and \eqref{eiv} becomes \begin{equation}\label{ot7} m = \underset{\chi_{\omega_{A^\epsilon}}\in \mathcal{S}}{inf}\left(\underset{\epsilon\rightarrow 0}{lim\hspace{2pt}inf}\ \int_{|\!\!\!O}mega \nabla u^\epsilon\cdot\nabla u^\epsilon\ dx\right).\end{equation} Our main claim in this paragraph is that \begin{equation}\label{ot8} m = \underset{\theta_A\in \mathcal{D}_{\delta_A}}{min}\left(\underset{A^{*}\in\mathcal{G}_{\theta_A}}{min}\ \int_{|\!\!\!O}mega \frac{|(a_2I-A^{*}(x))\nabla u_{\theta_A}(x)|^2}{a_2(a_2-a_1)\theta_A(x)} dx+\frac{1}{a_2}\langle g,u_{\theta_A}\rangle \right),\end{equation} where, $u_{\theta_A}\in H^1_0({|\!\!\!O}mega)$ is the solution of the following homogenized equation with $A^{*}\in\mathcal{G}_{\theta_A}$ : \begin{equation}\label{ot19}\begin{aligned} -div(A^{*}\nabla u_{\theta_A}) &= g \mbox{ in }{|\!\!\!O}mega\\ u_{\theta_A} &=0 \mbox{ on } {|\!\!\!O}mega . \end{aligned} \end{equation} In order to show the above relaxation identity \eqref{ot8}, we pass to the limit $\epsilon\rightarrow 0$ in \eqref{ot6} and we have \begin{equation} J(\theta_A,A^{*},I^{\#}):=\ \underset{\epsilon\rightarrow 0}{lim\hspace{2pt}inf}\ J(\chi_{\omega_{A^\epsilon}}) = \int_{{|\!\!\!O}mega}I^{\#}\nabla u_{\theta_A}\cdot\nabla u_{\theta_A}\ dx \end{equation} where $ u^{\epsilon} \rightharpoonup u_{\theta_A} \mbox{ weakly in }H^{1}_{0}({|\!\!\!O}mega),\ A^{\epsilon}\nabla u^{\epsilon} \rightharpoonup A^{*}\nabla u_{\theta_A} \mbox{ weakly in }(L^{2}({|\!\!\!O}mega))^N$, with $A^{*}\in\mathcal{G}_{\theta_A}$ and the pair $(A^{*},I^{\#})\in \mathcal{G}_{(\theta_A,1)}$, the set defined by the lower trace bound L (cf.\eqref{tw}) and upper trace bound U (cf.\eqref{tq}) with $b=1$. (See \eqref{qw2} for the characterization of $\mathcal{G}_{(\theta_A,1)}$). The optimality of the region defined by the above bounds gives that the relaxation of \eqref{eiv} : \begin{equation}\label{lb13} m =\ \mbox{min }\{ J(\theta_A, A^{*},I^{\#});\ \theta_A\in\mathcal{D}_{\delta_A},\ (A^{*},I^{\#})\in\mathcal{G}_{(\theta_A,1)}\}. \end{equation} Now using the lower bound \eqref{bs1} in Remark \ref{hsg}, we have the pointwise lower bound on the integrand $I^{\#}\nabla u_{\theta_A}\cdot\nabla u_{\theta_A}$ : \begin{equation}\label{os1} I^{\#}\nabla u_{\theta_A}\cdot\nabla u_{\theta_A} \geq\ {a_2}^{-1}\{A^{*} + (a_2I-\overline{A})^{-1}(a_2I-A^{*})^2\}\nabla u_{\theta_A}\cdot \nabla u_{\theta_A}. \end{equation} Therefore, \begin{equation}\label{eg8} m \geq \underset{\theta_A\in \mathcal{D}_{\delta_A}}{min}\left(\underset{A^{*}\in\mathcal{G}_{\theta_A}}{min}\ \int_{|\!\!\!O}mega h(A^{*}) \nabla u_{\theta_A}\cdot \nabla u_{\theta_A}dx\right) \end{equation} where $h$ is the map $$ A^{*}\mapsto h(A^{*}):= {a_2}^{-1}\{A^{*} + (a_2I-\overline{A})^{-1}(a_2I-A^{*})^2\}.$$ Now we vary $A^{*}\in\mathcal{G}_{\theta_A}$ along the horizontal segment connecting $A^{*}\in\mathcal{G}_{\theta_A}$ to the $N$-rank laminate $A^{*}_N\in\partial\mathcal{G}^{U}_{\theta_A}$ in the phase space of eigenvalues of $A^{*}$. Note that, the eigenvalues $\{\lambda_2,..,\lambda_N\}$ of matrices of $A^{*}$ do not change whereas $\lambda_1$ increases along the segment. Let us restrict $h$ to the above segment. Such a restriction is a function of a single variable $t$ along the segment. It is easy to check that $$\frac{d h}{d t}|_{t=\lambda_1} = \frac{(2t-a_2-\overline{a})}{a_2(a_2-\overline{a})}|_{t=\lambda_1} = \frac{(2\lambda_1-a_2-\overline{a})}{a_2(a_2-\overline{a})} <0 $$ as $\lambda_1\leq\overline{a}=(a_1\theta_A+(1-\theta_A)a_2) \mbox{ and }\overline{a}<a_2$.\\ \\ Thus $h$ is decreasing as $\lambda_1$ increases. Hence we get $h(A^{*})\geq h(A^{*}_N)$.\\ \\ Now from the Remark \ref{hsg}, for each $A^{*}_N\in\partial\mathcal{G}^{U}_{\theta_A}$ given by \eqref{OP3} with matrix $a_2I$ and core $a_1I$, the corresponding $N$- sequential laminates $I^{\#}_N$ given in \eqref{bs13} achieve the equality in the minimization problem \eqref{eg8}. Thus from \eqref{lb13}, it follows that $$m\ \ = \int_{{|\!\!\!O}mega} I^{\#}_N \nabla u^{*}_{\theta_A}\cdot\nabla u^{*}_{\theta_A}\ dx. $$ Simple computations show that $$m = \int_{|\!\!\!O}mega \frac{|(a_2I-A^{*}_N(x))\nabla u^{*}_{\theta_A}(x)|^2}{a_2(a_2-a_1)\theta_A(x)}dx +\frac{1}{a_2}\langle g(x),u^{*}_{\theta_A}(x)\rangle $$ where, $u^{*}_{\theta_A}$ is the solution of \eqref{ot19} corresponding to the $N$-sequential laminates $A^{*}_N$ with matrix $a_2I$ and core $a_1I$. In particular, \eqref{ot8} follows, describing its minimum value as well as the minimizers and the underlying microstructures. \qed \subsection{Application to an Optimal Oscillation-Dissipation Problem}\label{qw10} Here we mention another application to a minimization problem, where the where the results of Section \ref{ad18} are useful. Let us define the functional $J(\chi_{\omega_A},\chi_{\omega_B})$ as \begin{equation}\begin{aligned}\label{ub11} J :\mbox{Char}({|\!\!\!O}mega)\times \mbox{Char}({|\!\!\!O}mega) &\mapsto \mathbb{R}\\ (\chi_{\omega_A},\chi_{\omega_B}) &\mapsto J(\chi_{\omega_A},\chi_{\omega_B}) := \int_{{|\!\!\!O}mega} B\nabla u_{\omega_A} \cdot\nabla u_{\omega_A} dx, \end{aligned}\end{equation} where $u_{\omega_A}$ solves \eqref{tx}, and $B$ is governed with the two phase medium as \begin{equation*} B(x) = \{b_1\chi_{\omega_B} +b_2(1-\chi_{\omega_B})\}I, \ x\in{|\!\!\!O}mega \end{equation*} with $\chi_{\omega_B}(x)\in \mathcal{C}_{\delta_B}$ for some given ${\delta_B}\in (0,1)$, as introduced in \eqref{hsu}.\\ \\ We are interested in solving the following non-convex minimization problem : \begin{equation}\label{ub20} m^\prime =\underset{(\chi_{\omega_A},\chi_{\omega_B})\in (\mathcal{C}_{{\delta_A}}\times \mathcal{C}_{{\delta_B}})}{\mbox{inf }}J(\chi_{\omega_A},\chi_{\omega_B}). \end{equation} In classical problems in Calculus of Variations in which the homogenization theory is applied, we usually have minimization with respect to $\chi_{\omega_A}$ but not with respect to $\chi_{\omega_B}$. Minimization with respect to $\chi_{\omega_B}$ is a new aspect. Interpretation of such problems was given in Introduction. As usual, there are no minimizers among characteristic functions and there is a need to relax the problem. To find the relaxation of \eqref{ub20}, we consider sequence $A^\epsilon\in\mathcal{M}(a_1,a_2;{|\!\!\!O}mega)$ given in \eqref{ta} with $\chi_{{\omega}_{A^{\epsilon}}}(x)\in \mathcal{C}_{\delta_A}$, and the corresponding state sequence $u^\epsilon\in H^1_0({|\!\!\!O}mega)$ which solves \eqref{tx}. Let us also consider a sequence $B^\epsilon\in\mathcal{M}(b_1,b_2;{|\!\!\!O}mega)$ given in \eqref{tb} with $\chi_{{\omega}_{B^{\epsilon}}}(x)\in\mathcal{C}_{\delta_B}$ and the functional becomes \begin{equation}\label{Sd16} J(\chi_{\omega_{A^\epsilon}},\chi_{\omega_{B^\epsilon}}) = \int_{{|\!\!\!O}mega}B^\epsilon\nabla u^{\epsilon}\cdot\nabla u^{\epsilon}\ dx. \end{equation} The pair of microstructures $(\chi_{{\omega}_{A^{\epsilon}}}(x),\chi_{{\omega}_{B^{\epsilon}}}(x))$ constitute the following admissible set in which $\delta_A$,$\delta_B$ are given : \begin{align}\label{ub17} \mathcal{S} = &\mbox{ Collection of all pair of microstructures } (\chi_{{\omega}_{A^{\epsilon}}}(x),\chi_{{\omega}_{B^{\epsilon}}}(x)) \mbox{ having the properties : }\notag\\ &(\chi_{{\omega}_{A^{\epsilon}}}(x),\chi_{{\omega}_{B^{\epsilon}}}(x)) \ |\ \left(\frac{|\omega_{A^\epsilon}|}{|{|\!\!\!O}mega|},\frac{|\omega_{A^\epsilon}|}{|{|\!\!\!O}mega|}\right)=({\delta_A},{\delta_B})\ \forall\epsilon, (\chi_{{\omega}_{A^{\epsilon}}}(x),\chi_{{\omega}_{B^{\epsilon}}}(x)) \rightharpoonup (\theta_A(x),\theta_B(x))\notag\\ &\mbox{in $L^\infty({|\!\!\!O}mega)$ weak*, with }\left(\frac{1}{|{|\!\!\!O}mega|}\int_{|\!\!\!O}mega \theta_A(x)dx,\frac{1}{|{|\!\!\!O}mega|}\int_{|\!\!\!O}mega \theta_B(x)dx\right) =({\delta_A},{\delta_B}),\notag\\ &\mbox{ and }\ A^\epsilon(x) \xrightarrow{H}A^{*}(x),\ B^\epsilon(x)\xrightarrow{A^\epsilon}B^{\#}(x). \end{align} Then $A^{*}\in\mathcal{G}_{\theta_A}$ and $(A^{*},B^{\#})\in\mathcal{G}_{(\theta_A,\theta_B)}$ a set which has been introduced in Section \ref{ad18}. Note that we need here the convergence of $B^\epsilon$ towards $B^{\#}$ relative to $A^\epsilon$.\\ Let $(\chi_{{\omega}_{A^{\epsilon}}}(x),\chi_{{\omega}_{B^{\epsilon}}}(x))\in\mathcal{S}$ and let $u^\epsilon\in H^1_0({|\!\!\!O}mega)$ solve \eqref{tx}. Then \eqref{ub20} becomes : \begin{equation}\label{Sd17} m^\prime = \underset{(\chi_{\omega_{A^\epsilon}},\chi_{\omega_{B^\epsilon}})\in \mathcal{S}}{inf}\left(\underset{\epsilon\rightarrow 0}{lim\hspace{2pt}inf}\ J(\chi_{\omega_{A^\epsilon}},\chi_{\omega_{B^\epsilon}})\right)=\underset{(\chi_{\omega_{A^\epsilon}},\chi_{\omega_{B^\epsilon}})\in \mathcal{S}}{inf}\left(\underset{\epsilon\rightarrow 0}{lim\hspace{2pt}inf}\ \int_{{|\!\!\!O}mega}B^\epsilon\nabla u^{\epsilon}\cdot\nabla u^{\epsilon}\ dx\right).\end{equation} By passing to the limit as $\epsilon\rightarrow 0$ in \eqref{Sd17} we obtain \begin{equation}\label{ub14} J(\theta_A,\theta_B,A^{*},B^{\#}):= \underset{\epsilon\rightarrow 0}{lim\hspace{2pt}inf} J(\chi_{\omega_{A^\epsilon}},\chi_{\omega_{B^\epsilon}}) = \int_{{|\!\!\!O}mega}B^{\#}\nabla u_{\theta_A}\cdot\nabla u_{\theta_A}\ dx \end{equation} where, $u_{\theta_A}\in H^1_0({|\!\!\!O}mega)$ is the solution of \eqref{ot19} with $A^{*}\in\mathcal{G}_{\theta_A}$ and as a pair $(A^{*},B^{\#})\in\mathcal{G}_{(\theta_A,\theta_B)}$.\\ \\ The optimality result (cf. Remark \ref{ED4} and $(5)$ of Theorem \ref{qw6}) shows that the relaxation of \eqref{ub20} is given by : \begin{equation}\label{lb11} m^\prime = \mbox{min\ }\{ J(\theta_A,\theta_B,A^{*},B^{\#}) ; \ (\theta_A,\theta_B)\in\mathcal{D}_{\delta_A}\times \mathcal{D}_{\delta_B},\ A^{*}\in\mathcal{G}_{\theta_A},(A^{*},B^{\#})\in\mathcal{G}_{(\theta_A,\theta_B)}\} \end{equation} where, $\mathcal{D}_{\delta_A},\mathcal{D}_{\delta_B}$ are defined for given ${\delta_A},{\delta_B}\in(0,1)$, as introduced in \eqref{hsv}.\\ \\ We split the integral $J(\theta_A,\theta_B,A^{*},B^{\#})$ in \eqref{ub14} into two parts as follows : \begin{equation}\label{ub16} J(\theta_A,\theta_B,A^{*},B^{\#}) = \int_{{|\!\!\!O}mega_{L1}} B^{\#}\nabla u_{\theta_A}\cdot\nabla u_{\theta_A}\ dx + \int_{{|\!\!\!O}mega_{L2}}{A^{*}}^{-1}B^{\#}{A^{*}}^{-1}\sigma_{\theta_A}\cdot\sigma_{\theta_A}\ dx \end{equation} where, ${|\!\!\!O}mega_{L1} = \{x\in{|\!\!\!O}mega\ |\ \theta_A(x) \leq \theta_B(x)\}$ and ${|\!\!\!O}mega_{L2} = \{x\in{|\!\!\!O}mega\ |\ \theta_B(x)< \theta_A(x)\}$ and $\sigma_{\theta_A}=A^{*}\nabla u_{\theta_A}$. Let us assume that $|int\ {|\!\!\!O}mega_{L1}|=|{|\!\!\!O}mega_{L1}|$ and $|int\ {|\!\!\!O}mega_{L2}|=|{|\!\!\!O}mega_{L2}|$.\\ \\ Now using the pointwise bound on the above integrands $B^{\#}\nabla u_{\theta_A}\cdot\nabla u_{\theta_A}$ and ${A^{*}}^{-1}B^{\#}{A^{*}}^{-1}\sigma_{\theta_A}\cdot\sigma_{\theta_A}$ obtained in Remark \ref{siz}, \ref{siz2} for $A^{*}\in\mathcal{G}_{\theta_A}$ and the associated pair $(A^{*},B^{\#})\in\mathcal{G}_{(\theta_A,\theta_B)}$, we have : \begin{equation}\label{tu} J(\theta_A,\theta_B,A^{*},B^{\#})\geq J_1(\theta_A,\theta_B,A^{*},M_{AB},M_B) + J_2(\theta_A,\theta_B,A^{*},M^{\prime}_{AB},M^{\prime\prime}_{AB}) \end{equation} where using \eqref{FG19} we define \begin{align*} J_1(\theta_A,\theta_B,A^{*},M_{AB},M_B)= \int_{{|\!\!\!O}mega_{L1}}& \{ b_1I + 2 (\overline{B}-b_1I)(\overline{A}_{\theta}-a_1I)^{-1}(A^{*}-a_1I)\notag\\ &+(Y_{\theta}- (\overline{B}-b_1 I))(\overline{A}_{\theta}-a_1I)^{-2}(A^{*}-a_1I)^2\}\nabla u_{\theta_A}\cdot\nabla u_{\theta_A}\ dx \end{align*} with $\overline{A}_\theta = (a_1\theta +a_2(1-\theta))I$, \begin{align*}Y_\theta &=\{\frac{b_1(a_2-a_1)^2}{a_1^2}\theta(1-\theta) +\frac{(b_2-b_1)^2}{b_1}\theta_B(1-\theta_B)-\frac{2(b_2-b_1)(a_2-a_1)}{a_1}\theta(1-\theta_B)\}M_{AB}\eta\cdot\eta\\ &\qquad- \frac{\theta_B(1-\theta_B)(b_2-b_1)^2}{b_1}M_B\eta\cdot\eta. \end{align*} $M_B,M_{AB}$ are symmetric non-negative definite matrices with unit trace, whose expression can be found in \eqref{siv} and \eqref{dc19} respectively replacing $\theta_A$ by $\theta$, where $\theta$ is uniquely determined by \eqref{eib}.\\ \\ Analogously using \eqref{siu} we define \begin{align*} J_2(\theta_A,\theta_B,A^{*},M^{\prime}_{AB},M^{\prime\prime}_{AB})= \int_{{|\!\!\!O}mega_{L2}}& \{ cI + 2(L_\theta-cI)(\underline{A}_{\theta}^{-1} - a_2^{-1}I)^{-1}({A^{*}}^{-1}- a_2^{-1}I)\notag\\ &+(Y^\prime_\theta- (L_\theta-cI))(\underline{A}_\theta^{-1} - a_2^{-1}I)^{-2}({A^{*}}^{-1}- a_2^{-1}I)^{2}\}\sigma_{\theta_A}\cdot\sigma_{\theta_A}\ dx \end{align*} with \begin{align*} &\underline{A}_\theta^{-1} =(\frac{\theta}{a_1} +\frac{1-\theta}{a_2})I,\ c = min\{\frac{b_2}{a_2^2},\frac{b_1}{a_1^2}\}, \ L_\theta= \{\frac{b_1}{a_1^2}\theta_B(x) + \frac{b_2}{a_1^2}(\theta(x) - \theta_B(x)) + \frac{b_2}{a_2^2}(1-\theta(x))\}I,\\ &Y^\prime_\theta = cL^\prime_{AB}(I-M^\prime_{AB})\eta\cdot\eta- \frac{1}{c}L^{\prime\prime}_{AB}(I-M^{\prime\prime}_{AB})\eta\cdot\eta \ \ (\mbox{cf. \eqref{FG12} where $\theta_A$ is replaced by $\theta$.}) \end{align*} where $M^\prime_{AB},M^{\prime\prime}_{AB}$ are symmetric non-negative definite matrices with unit trace can be found in \eqref{zz9} and \eqref{zz8} respectively replacing $\theta_A$ by $\theta$ and here $\theta$ is uniquely determined by \eqref{eic} and it depends only on $A^{*}$. For the $J_1$- term, we need to use the optimality of the equality in L1 bound described in (1) of Theorem \ref{qw6}. In fact, the construction in Example \ref{ot5} shows that the optimal microstructure is given by $A^{*,1}_N$, the $N$ sequential laminate with core $a_2I$ and matrix $a_1I$ with $\theta_A\in \mathcal{D}_{\delta_A}$, and the corresponding $(N,N)$-sequential laminates $B^{\#,1}_{N,N}$ (cf.\eqref{sie}) associated with $A^{*,1}_N$ under the condition $\omega_{A^\epsilon}\subset\omega_{B^\epsilon}$. This construction can be carried out in $int({|\!\!\!O}mega_{L1})$ because $\theta_A\leq\theta_B$. We have also computed the associated matrices $M_B,M_{AB}$ as $M_B=M_{AB}=\sum_{i=1}^N m_i\frac{e_i\otimes e_i}{e_i\cdot e_i}$ where, $m_i\geq 0$ for $i=1,..,N$ and $\sum_i m_i =1$. And further it is found that $$ J_1 = \int_{{|\!\!\!O}mega_{L1}}B^{\#,1}_{N,N}\nabla \widetilde{u_{\theta_A}}\cdot\nabla \widetilde{u_{\theta_A}}\ dx. $$ Similarly, for the $J_2$- term, we need to use the optimality of the equality in L2 bound described in (2) of Theorem \ref{qw6}. In fact, the construction in Example \ref{Sd20} shows that the optimal microstructure is given by $A^{*,2}_N$ the $N$ sequential laminate with core $a_1I$ and matrix $a_2I$ with $\theta_A\in \mathcal{D}_{\delta_A}$, and the corresponding $(N,N)$-sequential laminates $B^{\#,2}_{N,N}$ (cf.\eqref{ot16}) associated with $A^{*,2}_N$ under the condition $\omega_{B^\epsilon}\subset\omega_{A^\epsilon}$. This construction is possible in ${|\!\!\!O}mega_{L2}$ since $\theta_B<\theta_A$. Moreover, we have $$J_2 = \int_{{|\!\!\!O}mega_{L2}}(A^{*,2}_N)^{-1}B^{\#,2}_{N,N}(A^{*,2}_N)^{-1}\widetilde{\sigma_{\theta_A}}\cdot\widetilde{\sigma_{\theta_A}}\ dx.$$ In the above we have used $\widetilde{u_{\theta_A}}$ ($\widetilde{\sigma}_{\theta_A}$ is the associated homogenized flux) for the homogenized limit of $\widetilde{u^\epsilon_{\theta_A}}$ which is the solution of \eqref{tx} with coefficient matrix $\widetilde{A^{\epsilon}}$ defined in ${|\!\!\!O}mega$ by \begin{align*} \widetilde{A^{\epsilon}} &= A^{\epsilon,1} \mbox{ in }{|\!\!\!O}mega_{L1} \mbox{ \ (the $N$-sequential laminate microstructure with core $a_2I$ and matrix $a_1I$)},\\ &= A^{\epsilon,2} \mbox{ in }{|\!\!\!O}mega_{L2} \mbox{ \ (the $N$-sequential laminate microstructure with core $a_1I$ and matrix $a_2I$)}. \end{align*} The associated homogenized matrix $\widetilde{A^{*}}$ in ${|\!\!\!O}mega$ is also given by \begin{align*} \widetilde{A^{*}} &= A^{*,1}_N \mbox{ in }{|\!\!\!O}mega_{L1} \mbox{ \ (the $N$-sequential laminate with core $a_2I$ and matrix $a_1I$)},\\ &= A^{*,2}_N \mbox{ in }{|\!\!\!O}mega_{L2} \mbox{ \ (the $N$-sequential laminate with core $a_1I$ and matrix $a_2I$)}. \end{align*} Thus, \begin{equation*}J \geq \int_{{|\!\!\!O}mega_{L1}}B^{\#,1}_{N,N}\nabla \widetilde{u_{\theta_A}}\cdot\nabla\widetilde{u_{\theta_A}}\ dx + \int_{{|\!\!\!O}mega_{L2}}(A^{*,2}_N)^{-1}B^{\#,2}_{N,N}(A^{*,2}_N)^{-1}\widetilde{\sigma_{\theta_A}}\cdot\widetilde{\sigma_{\theta_A}}\ dx.\end{equation*} Therefore, from \eqref{lb11} it follows that \begin{equation*} m^\prime = \int_{{|\!\!\!O}mega_{L1}}B^{\#,1}_{N,N}\nabla\widetilde{u_{\theta_A}}\cdot\nabla\widetilde{u_{\theta_A}}\ dx + \int_{{|\!\!\!O}mega_{L2}}B^{\#,2}_{N,N}\nabla\widetilde{u_{\theta_A}}\cdot\nabla\widetilde{u_{\theta_A}}\ dx.\end{equation*} Thus we have solved the problem \eqref{ub20} completely describing the minimum value as well the minimizers $(A^{*},B^{\#})$ and the underlying microstructures for both $A^{*}$ and $B^{\#}$. They are found among ``piecewise'' $N$-laminates. It is interesting to see minimizer admitting an interface defined by $\{\theta_A=\theta_B\}$ across which it interchanges core and matrix values. \qed \begin{remark}\label{bs6} We have solved the minimization problem \eqref{ub20} as an application of Theorem \ref{qw6} $(1),(2),(5)$. One may solve other types of optimization problems as an application of other parts of Theorem \ref{qw6}. \begin{equation*} (i)\ \underset{\chi_{\omega_A}\in \mathcal{C}_{{\delta_A}}}{\mbox{inf }}\ \underset{\chi_{\omega_B}\in \mathcal{C}_{{\delta_B}}}{\mbox{sup }}J(\chi_{\omega_A},\chi_{\omega_B}), \ \mbox{ or, }\ (ii)\ \underset{\chi_{\omega_B}\in \mathcal{C}_{{\delta_B}}}{\mbox{inf }}\ \underset{\chi_{\omega_A}\in \mathcal{C}_{{\delta_A}}}{\mbox{sup }}J(\chi_{\omega_A},\chi_{\omega_B}) \end{equation*} or, \begin{equation*} (iii)\ \underset{(\chi_{\omega_A},\chi_{\omega_B})\in (\mathcal{C}_{{\delta_A}}\times \mathcal{C}_{{\delta_B}})}{\mbox{sup }}J(\chi_{\omega_A},\chi_{\omega_B}). \end{equation*} For instance, resolution of $(i)$ requires optimality of the region $(Li,Uj)$, $i,j=1,2$ described in Theorem \ref{qw6} $(3),(4)$. It is more complicated than \eqref{ub20}. For details, see \cite{TG-MV}. \qed\end{remark} \paragraph{Acknowledgement :} This work has been carried out within a project supported by the Airbus Group Corporate Foundation Chair ``Mathematics of Complex Systems'' established at Tata Institute Of fundamental Research (TIFR) - Centre for Applicable Mathematics. \end{document}
\begin{document} \title{Linear families of smooth hypersurfaces over finitely generated fields} \author{Shamil Asgarli} \address{Department of Mathematics and Computer Science \\ Santa Clara University \\ 500 El Camino Real \\ USA 95053} \email{[email protected]} \author{Dragos Ghioca} \address{Department of Mathematics, University of British Columbia, Vancouver, BC V6T 1Z2} \email{[email protected]} \author{Zinovy Reichstein} \address{Department of Mathematics, University of British Columbia, Vancouver, BC V6T 1Z2} \email{[email protected]} \subjclass[2020]{Primary 14N05; Secondary 14J70, 14G15} \keywords{linear system, hypersurface, finite fields, smoothness} \begin{abstract} Let $K$ be a finitely generated field. We construct an $n$-dimensional linear system $\mathcal{L}$ of hypersurfaces of degree $d$ in $\operatorname{\mathbb{P}}^n$ defined over $K$ such that each member of $\mathcal{L}$ defined over $K$ is smooth, under the hypothesis that the characteristic $p$ does not divide $\gcd(d, n+1)$ (in particular, there is no restriction when $K$ has characteristic $0$). Moreover, we exhibit a counterexample when $p$ divides $\gcd(d, n+1)$. \end{abstract} \maketitle \section{Introduction}\label{sect:intro} The study of hypersurfaces varying in a pencil, or more generally, in a linear system of arbitrary dimension, is an active research area. For instance, determining the number of reducible members in a pencil is already a challenging problem \cite{Ste89}, \cite{Vis93}, \cite{PY08}. When the base field is a number field, the study of pencils has deep connections to Diophantine geometry; see, for example \cite{DGH21}. Linear systems of hypersurfaces over finite fields have been studied by Ballico~\cite{Bal07}, \cite{Bal09}. Our primary goal in the present paper is to address the following question from a recent paper~\cite{AG22} by the first two authors. While the version stated in~\cite{AG22} was concerned with linear systems of hypersurfaces over finite fields, in this paper we will work over an arbitrary \emph{finitely generated field}. Recall that a field $K$ is called finitely generated if it is generated by a finite number of elements as a field (or equivalently, as a field extension of its prime subfield). \begin{question}\label{quest:main} Let $K$ be a finitely generated field and $r \geqslant 1$, $n\geqslant 2$, $d\geqslant 2$ be integers. Do there exist $r+1$ linearly independent homogeneous polynomials $F_0, F_1, ..., F_r \in K[x_0, \ldots, x_n]$ of degree $d$ such that the hypersurface $$ X_{[a_0:a_1:\ldots:a_r]} = \{a_0 F_0 + a_1 F_1 + ... + a_r F_r = 0\} \subset \mathbb{P}^n $$ is smooth for every $[a_0:a_1:\ldots:a_r]\in\mathbb{P}^r(K)$? \end{question} Here, as usual, ``smooth" means ``smooth at every $\overline{K}$-point", not just at every $K$-point. Question~\ref{quest:main} can be rephrased in geometric terms as follows. Consider the linear system $\mathcal{L}=\langle F_0, ..., F_r\rangle$ of (projective) dimension $r$ spanned by $F_0, \ldots, F_r$. We say that $\mathcal{L}$ is $K$-\emph{smooth} if for every $[a_0:a_1:\ldots:a_r]\in \mathbb{P}^r(K)$, the hypersurface cut out by $a_0 F_0 + a_1 F_1 + ... + a_r F_r$ is smooth in $\mathbb{P}^n$. In other words, Question~\ref{quest:main} asks for existence of a $K$-smooth linear system $\mathcal{L}$ in $\mathbb P^n$ of prescribed degree and dimension. We show that, under a mild assumption on the characteristic, the maximum value of $r$ for which Question~\ref{quest:main} has a positive answer is $r=n$. \begin{theorem}~\label{thm:main} Let $K$ be an arbitrary field. \begin{enumerate} \item \label{main-thm-item-1} If $r\geqslant n+1$, then there does not exist a $K$-smooth linear system of (projective) dimension $r$ (of any degree $d \geqslant 2$). \item \label{main-thm-item-2} Suppose $K$ is a finitely generated field of characteristic $p \geqslant 0$. If $r \leqslant n$ and $p\nmid \gcd(d, n+1)$, then there exist homogeneous polynomials $F_0, \ldots, F_r$ in $x_0, \ldots, x_n$ of degree $d$ such that $\mathcal{L} = \langle F_0, \ldots, F_r \rangle$ is a $K$-smooth linear system of (projective) dimension $r$. \end{enumerate} \end{theorem} Note that the assumption $p\nmid \gcd(d, p+1)$ on the characteristic of $K$ holds automatically when $\operatorname{char}(K)=0$. On the other hand, we will show in Section~\ref{sect:quadrics} that this assumption \emph{cannot} be dropped in general. More precisely, we will show that no $n$-dimensional linear system of degree $2$ hypersurfaces in $\mathbb P^n$ can be $K$-smooth in the case where $K$ is a field of characteristic $2$ and $n \geqslant 1$ is an odd integer; see~Theorem~\ref{thm:quadrics}. The case where $r=1$, which corresponds to a pencil of hypersurfaces, is of particular interest. For any given $n$, the condition that $p\nmid \gcd(d, n+1)$ is satisfied for all but finitely many characteristics $p$. In particular, Theorem~\ref{main-thm-item-2} tells us that for every value of $d \geqslant 1$ and every finitely generated field $K$ there exists \begin{itemize} \item a $K$-smooth pencil of degree $d$ in $\mathbb{P}^2$ if $\operatorname{char}(K)\neq 3$. \item a $K$-smooth pencil of degree $d$ in $\mathbb{P}^3$ if $\operatorname{char}(K)\neq 2$. \item a $K$-smooth pencil of degree $d$ in $\mathbb{P}^4$ if $\operatorname{char}(K)\neq 5$. \item a $K$-smooth pencil of degree $d$ in $\mathbb{P}^5$ if $\operatorname{char}(K)\neq 2, 3$. \end{itemize} On the other hand, the main result of~\cite{AG22}*{Theorem 1.3} proves the existence of a $K$-smooth pencil $\mathcal{L}$ of degree $d$ hypersurfaces in $\mathbb{P}^n$ defined over the field $K = \mathbb{F}_q$ under a different hypothesis: \begin{align*} q > \left(\frac{1+\sqrt{2}}{2}\right)^2 \left((n+1)(d-1)^{n}\right)^2 \left((n+1)(d-1)^{n}-1\right)^2\left((n+1)(d-1)^{n}-2\right)^2. \end{align*} In particular, an $\mathbb{F}_q$-smooth pencil of degree $d$ hypersurfaces exists in any characteristic as long as $q$ is sufficiently large. It is reasonable to ask if smooth pencils of every degree exist over every finitely generated field. \textbf{Acknowledgements.} In an earlier version of this paper our main result, Theorem~\ref{thm:main}\eqref{main-thm-item-2}, was only stated for finite fields. We are grateful to Angelo Vistoli for suggesting that it can be extended to finitely generated fields and contributing the inductive argument of Section~\ref{sect:induction}. The first author is supported by a postdoctoral research fellowship from the University of British Columbia and the NSERC PDF award. The second and third authors are supported by NSERC Discovery grants. \section{Proof of Theorem~\ref{thm:main}\eqref{main-thm-item-1}}\label{sect:main-result} In this section $K$ will denote an arbitrary field. We will denote by $K[x_0, \ldots, x_n]_d$ the space of homogeneous polynomials of degree $d$ in $x_0, \ldots, x_n$ with coefficients in $K$. This is a $K$-vector space of dimension $N=\binom{n+d}{d}$. Points of the projective space $\mathbb{P}(K[x_0, \ldots, x_n]_d)$ are naturally identified with degree $d$ hypersurfaces in $\mathbb{P}^n$. We proceed with the proof of part \eqref{main-thm-item-1} of Theorem~\ref{thm:main}. Assume the contrary: there exists a $K$-smooth linear system $\mathcal{L} \subset K[x_0, \ldots, x_n]_d$ of (affine) dimension $\geqslant n + 2$. Let $x_0^{d-1} K[x_0, \ldots, x_n]_1$ denote the ($n+1$)-dimensional $K$-vector space of degree $d$ forms divisible by $x_0^{d-1}$. Any such form can be written as $x_0^{d-1} l(x_0, \ldots, x_n)$, where $l \in K[x_0, \ldots, x_n]_1$. Consider the $K$-linear map $$ \Psi \colon K[x_0, \ldots, x_n]_d \to x_0^{d-1} K[x_0, \ldots, x_n]_1 $$ which removes from $F \in \mathcal{L}(K)$ all monomials which are not multiples of $x_0^{d-1}$. In other words, for any non-negative integers $i_0, \ldots, i_n$ satisfying $i_0 + \ldots + i_n = d$, \[ \Psi(x_0^{i_0} x_1^{i_1} \ldots x_n^{i_n}) = \begin{cases} \text{$x_0^{i_0} x_1^{i_1} \ldots x_n^{i_n}$, if $i_0 \geqslant d - 1$, and} \\ \text{$0$, otherwise}. \end{cases} \] The kernel, $\operatorname{Ker}(\Psi)$, is precisely the set of polynomials $F \in K[x_0, \ldots, x_n]_d$ with the property that the associated hypersurface in $\mathbb{P}^n$ is singular at $P = [1: 0: \ldots : 0]$. Since the codimension of $\operatorname{Ker}(\Psi)$ in $K[x_0, \ldots, x_n]_d$ is at least $\dim(x_0^{d-1} K[x_0, \ldots, x_n]_1) = n + 1$ and $\dim(\mathcal{L}) \geqslant n + 2$, we see that $\mathcal{L}\cap \operatorname{Ker}(\Psi)$ must contain a non-zero $K$-point of $\mathcal{L}$. In other words, $\mathcal{L}(K)$ contains a hypersurface which is singular at $P$. This shows that $\mathcal{L}$ cannot be $K$-smooth. \qed \section{Proof of Theorem~\ref{thm:main}\eqref{main-thm-item-2} in the case, where $K$ is a finite field} \label{sect:finite} We begin by exhibiting two families of smooth hypersurfaces of degree $d \geqslant 2$ over an arbitrary field $K$ of characteristic $p \geqslant 0$. \begin{lemma}\label{lemma:smoothness-fermat} Suppose $p\nmid d$. Set $F=c_0 x_0^{d}+ c_1 x_1^{d} + ... + c_n x_n^{d}$. If $c_0, c_1, \ldots, c_n \neq 0$, then $F$ cuts out a smooth hypersurface in $\mathbb{P}^n$. \end{lemma} \begin{proof} This is clear from the Jacobian criterion: the equations \[ \frac{\partial F}{\partial x_i}= d c_i x_i^{d-1} = 0 \; \; \text{($i = 0, 1, \ldots, n$)} \] have no common solution in $\mathbb{P}^n$. \end{proof} \begin{lemma}\label{lemma:smoothness-klein} Suppose $p\mid d$ but $p\nmid (n+1)$. Set $F= c_0 x_0^{d-1} x_1 + c_1 x_1^{d-1} x_2 + ... + c_n x_n^{d-1} x_0$. If $c_0, c_1, \ldots, c_n \neq 0$, then $F$ cuts out a smooth hypersurface in $\mathbb{P}^n$. \end{lemma} \begin{proof} Assume the contrary: the hypersurface cut out by $F$ in $\mathbb P^n$ is singular at some point $P=[u_0:u_1:...:u_n] \in \mathbb{P}^n$. By symmetry we may assume without loss of generality that $u_1\ne 0$. Using the Jacobian criterion, and remembering that $p\mid d$, we obtain: \begin{equation}\label{eq:smoothness-klein-partial} \frac{\partial{F}}{\partial x_i}(P) = c_{i-1} u_{i-1}^{d-1} - c_i u_i^{d-2} u_{i+1} = 0 \end{equation} for each $0\leqslant i\leqslant n$, where the subscripts are taken modulo $n+1$. Multiplying both sides of~\eqref{eq:smoothness-klein-partial} by $u_i$, we obtain \begin{equation}\label{eq:smoothness-klein} c_{i-1} u_{i-1}^{d-1} u_i = c_i u_i^{d-1} u_{i+1} . \end{equation} Now recall that \[ F(P) = c_0 u_0^{d-1} u_1 + c_1 u_1^{d-1} u_2 + ... + c_n u_n^{d-1} u_0 = 0 . \] By~\eqref{eq:smoothness-klein}, the $n$ terms in this sum are all equal to each other. Hence, $$ 0 = F(P) = \sum_{i=0}^{n} c_i u_i^{d-1} u_{i+1} = (n+1) c_0 u_0^{d-1} u_{1}. $$ Since $p\nmid (n+1)$, $c_0\neq 0$, and $u_1 \neq 0$, we conclude that $u_0=0$. We will divide the remainder of the proof into two cases, according to whether $d=2$ or $d\geqslant 3$. If $d \geqslant 3$, then~\eqref{eq:smoothness-klein-partial} tells us that $u_i = 0$ implies $u_{i-1} = 0$ for any $i \in \mathbb Z/(n+1) \mathbb Z$. (Recall that the subscripts in~\eqref{eq:smoothness-klein-partial} are viewed modulo $n + 1$.) Using this implication recursively, starting from $u_0 = 0$, we see that $u_0 = u_n = u_{n-1} = \ldots = u_1 = 0$, a contradiction. Now assume $d=2$. In this case~\eqref{eq:smoothness-klein-partial} tells us that $u_{i - 1} = 0$ implies $u_{i+1} = 0$ for any $i \in \mathbb Z/(n+1) \mathbb Z$. Since we know that $u_0 = 0$, this tells us that $u_{i}=0$ for every even $i$. Since $d=2$, the assumption that $p$ divides $d$ tells us that $p = 2$ and the assumption that $p$ does not divide $n + 1$ tells us that that $n = 2k$ is even. Thus, $2k + 2 \equiv 1$ modulo $n + 1$ and hence, $0 = u_{2k+2}=u_1=0$, a contradiction. \end{proof} We are now ready to prove Theorem~\ref{thm:main}\eqref{main-thm-item-2} in the case, where $K=\mathbb{F}_q$ is a finite field. Since any $K$-linear subspace of a $K$-smooth linear system is again $K$-smooth, we may assume without loss of generality that $r = n$. Note also that $p \nmid \gcd(d, n + 1)$ if and only if $p \nmid d$ or $p \nmid n + 1$. Thus we may consider two cases. {\bf Case 1:} $p\nmid d$. We will explicitly construct a linear system $\mathcal{L}$ of dimension $r=n$ with the desired property. By the normal basis theorem, we can find an element $\alpha\in \mathbb{F}_{q^{n+1}}$ such that $\alpha, \alpha^q, \alpha^{q^2}, ..., \alpha^{q^n}$ form an $\mathbb{F}_q$-basis for the $(n+1)$-dimensional vector space $\mathbb{F}_{q^{n+1}}$. Let \begin{align*} F_0 &= (\alpha x_0 + \alpha^q x_1 + \alpha^{q^2} x_2 + ... + \alpha^{q^i} x_i + ... + \alpha^{q^n}x_n)^{d} , \\ F_1 &= (\alpha^q x_0 + \alpha^{q^2} x_1 + \alpha^{q^3} x_2 + ... + \alpha^{q^{i+1}} x_i + ... + \alpha x_n)^{d}, \\ F_2 &= (\alpha^{q^2} x_0 + \alpha^{q^3} x_1 + \alpha^{q^4} x_2 + ... + \alpha^{q^{i+2}} x_i + ... + \alpha^q x_n)^{d}, \\ &\vdots \\ F_n &= (\alpha^{q^n} x_0 + \alpha^{q} x_1 + \alpha^{q^2} x_2 + ... + \alpha^{q^{i+n}} x_i + ... + \alpha^{q^{n-1}} x_n)^{d}. \end{align*} Note that the polynomials $F_i$ are not defined over $\mathbb{F}_q$. However, the set $\{F_0, F_1, ..., F_n\}$ is invariant under the action of the $q$-th power Frobenius map. Thus, the linear system $\mathcal{L}=\langle F_0, ..., F_n\rangle$ is defined over $\mathbb{F}_q$, that is, one can find a set of new generators $G_0, G_1, \ldots, G_n$ for $\mathcal{L}$ where \emph{each} $G_i$ is defined over $\mathbb{F}_q$. We claim that $F_0, F_1, ..., F_n$ are linearly independent over $\overline{\mathbb{F}_q}$. To prove this claim, let \begin{equation}\label{eq:new-coord} y_j = \alpha^{q^j} x_0 + \alpha^{q^{j+1}} x_1 + \alpha^{q^{j+2}} x_2 + ... + \alpha^{q^{j+i}} x_i + ... + \alpha^{q^{j+n}}x_n \end{equation} for each $0\leqslant j\leqslant n$, and observe that $F_i=y_i^d$. The linear map $x_i\mapsto y_i$ is a linear automorphism of $\mathbb{P}^n$. Indeed, the matrix of this linear transformation, known as a \emph{Moore matrix}, is non-singular; see, e.g., \cite{Go96}*{Corollary 1.3.4}. Thus, $y_0, \ldots, y_n$ are algebraically independent over $\mathbb{F}_q$ and hence, over $\overline{\mathbb{F}_q}$. Consequently, $F_0, F_1, \ldots, F_n$ are linearly independent over $\overline{\mathbb F_q}$. This proves the claim. In summary, $\mathcal{L}=\langle F_0, F_1, \ldots, F_n\rangle$ is a linear system of degree $d$ hypersurfaces in $\mathbb{P}^n$ defined over $\mathbb F_q$ of (projective) dimension $r=n$. It remains to show that $\mathcal{L}$ is $\mathbb F_q$-smooth. Indeed, suppose \begin{equation}\label{eq:checking-singularity} X = \{ c_0 F_0 + c_1 F_1 + ... + c_n F_n = 0 \} \end{equation} is a singular hypersurface $X$ which belongs to $\mathcal{L}$, for some $c_i\in\overline{\mathbb{F}_q}$ where not all $c_i$ are zero. Our goal is to show that $X$ is not defined over $\mathbb F_q$. In the new coordinates $y_i$, we can express \eqref{eq:checking-singularity} as: \begin{equation*} X=\{c_0 y_0^d + c_1 y_1^d + ... + c_n y_n^d = 0\}. \end{equation*} Since $X$ is singular, we can apply Lemma~\ref{lemma:smoothness-fermat} to deduce that $c_i=0$ for some $i$. Without loss of generality, we may assume that $c_0=0$. By applying the Frobenius map, we see that $X$ is sent to: \begin{equation*} X^{\sigma} = \{c_1^q F_2 + ... + c_n^q F_0 = 0 \}. \end{equation*} We claim that $X$ and $X^{\sigma}$ are distinct. Indeed, their defining equations are not multiples of one another: otherwise, there would exist a nonzero constant $b \in \overline{\mathbb F_q}$ such that $c_i^{q}= b\cdot c_{i+1}$ for each $0\leqslant i \leqslant n$ taken modulo $n+1$. As $c_0=0$, this would force $c_i=0$ for each $0\leqslant i\leqslant n$, which is a contradiction. Thus, $X$ is not defined over $\mathbb{F}_q$, as desired. We conclude that the linear system $\mathcal{L}$ is $\mathbb F_q$-smooth. {\bf Case 2:} $p\mid d$ but $p\nmid (n+1)$. Define $y_0, \ldots, y_n$ by the formula $\eqref{eq:new-coord}$, and set $F_i=y_i^{q} y_{i+1}$ for $0\leqslant i\leqslant n-1$ and $F_n=y_n^{q} y_{0}$. Arguing as in Case 1, one readily checks that $\mathcal{L}=\langle F_0, F_1, ..., F_n\rangle$ is a linear subspace of (projective) dimension $n$ defined over $\mathbb F_q$. Moreover, the same argument as in Case 1, with Lemma~\ref{lemma:smoothness-klein} used in place of Lemma~\ref{lemma:smoothness-fermat}, shows that $\mathcal{L}$ is $\mathbb{F}_q$-smooth. This completes the proof of Theorem~\ref{thm:main}\eqref{main-thm-item-2} in the case, where $K=\mathbb{F}_q$ is a finite field. \qed \section{Conclusion of the proof of Theorem~\ref{thm:main}\eqref{main-thm-item-2}} \label{sect:induction} Given a finitely generated field $K$, we define its dimension $\dim(K)$ to be the Krull dimension of any finitely generated $\mathbb{Z}$-algebra whose fraction field is $K$. In other words, $\dim(K)=\operatorname{tr deg}_{\mathbb{F}_p}(K)$ if $\operatorname{char}(K)=p>0$ and $\dim(K)=1+\operatorname{tr deg}_{\mathbb{Q}}(K)$ if $\operatorname{char}(K)=0$. In this section we will prove Theorem~\ref{thm:main}\eqref{main-thm-item-2} over an arbitrary finitely generated field $K$ by induction on $\dim(K)$. The inductive step will be based on the following lemma. \begin{lemma}\label{lem:lifting} Let $R$ be discrete valuation ring with fraction field $K$ and residue field $L$, and let $F_0, \ldots, F_r\in L[x_0, ..., x_n]$ be linearly independent homogeneous polynomials of degree $d$. Denote their liftings to $R$ by $\overline{F_0}, \ldots, \overline{F_r}\in R[x_0, ..., x_n]\subset K[x_0, ..., x_n]$, respectively. If the linear system $\langle F_0, \ldots, F_r \rangle$ is $L$-smooth, then the linear system $\langle \overline{F_0}, \ldots, \overline{F_r} \rangle$ is $K$-smooth. \end{lemma} \begin{proof} Let $(a_0, \ldots, a_r)$ be in $K^{r+1}\setminus \{(0, \ldots, 0)\}$. We will show that the hypersurface in $\mathbb{P}^{n}_{K}$ defined by the form $a_0\overline{F_0}+\ldots + a_r \overline{F_r}$ is smooth. By scaling the $a_i$, we may assume that $a_i\in R$ for all $i$ and $a_i$ is invertible in $R$ for at least one $i$. Consider the hypersurface $X\subset \mathbb{P}^n_{K}$ defined by $a_0 \overline{F_0}+\cdots +a_r \overline{F_r}=0$. Then $X$ is flat over $\operatorname{Spec}(R)$ and its fiber over $\mathcal{L}$ is smooth by hypothesis. Since the smooth locus of the projection $X\to\operatorname{Spec}(R)$ is open in $X$, its complement must be empty. It follows that the fiber over the generic point of $\operatorname{Spec}(R)$ is smooth, as desired. \end{proof} We are now ready to finish the proof of Theorem~\ref{thm:main}\eqref{main-thm-item-2} by induction on the dimension of the finitely generated field $K$. If $\operatorname{dim}(K)=0$, then $K$ is a finite field. In this case Theorem~\ref{thm:main}\eqref{main-thm-item-2} is proved in Section~\ref{sect:finite}. If $\operatorname{dim}(K)>0$, then it is easy to see that $K$ admits a discrete valuation with finitely generated residue field $L$ such that $\dim(L)=\dim(K)-1$. Furthermore, if $\operatorname{char}(K)=0$, then this valuation can be chosen so that $\operatorname{char}(L)$ is positive and arbitrarily large. By applying Lemma~\ref{lem:lifting}, we can lift an $L$-smooth linear system of hypersurfaces in $\mathbb P^n$ to a $K$-smooth linear system of hypersurfaces in $\mathbb P^n$ of the same degree degree and the same dimension. \qed \section{Quadrics in characteristic $2$}\label{sect:quadrics} In this section, we will show that the hypothesis $p\nmid \gcd(d, n+1)$ in our main theorem cannot be removed in general. We will focus on the case, where $p=d=2$ and $n$ is odd. Our goal is to prove the following result. \begin{theorem}\label{thm:quadrics} Suppose $n$ is an odd positive integer, and $K$ be a field of characteristic $2$ (not necessarily finitely generated). Then for any $d \geqslant 2$ there does \textbf{not} exist a linear system $\mathcal{L} = \langle F_0, \ldots, F_n \rangle \subset K[x_0, \ldots, x_n]_2$ of (projective) dimension $n$ over $K$ such that each $K$-member of $\mathcal{L}$ is a smooth quadric hypersurface in $\mathbb{P}^n$. \end{theorem} We begin with the following lemma. \begin{lemma}\label{lemma:sing-quadrics} Let $K$ be a field of characteristic $2$ and $n \geqslant 1$ be an odd integer. Consider a quadric hypersurface $X\subset \mathbb{P}^n$ cut out by $$ F(x_0, \ldots, x_n) = x_0^2 + G(x_1, x_2, ..., x_n) $$ where $G \in K[x_1, \ldots, x_n]$ is a homogeneous polynomial of degree $2$. Then $X$ is singular. \end{lemma} \begin{proof} The Jacobian criterion gives rise to a homogeneous system \[ \frac{\partial G}{\partial x_1} = \ldots = \frac{\partial G}{\partial x_n} = 0 \] of $n$ linear equations in $x_1, \ldots, x_n$. (Note $x_0$ never appears in this system.) We claim that this homogeneous linear system has a nontrivial solution. To prove the claim, it suffices to show that the matrix $M$ of this linear system is singular. Note that $M$ is the Hessian matrix of $G$ and hence, is symmetric. (Since $G$ is a quadratic polynomial, the entries of the Hessian matrix are constant.) Because we are in characteristic $2$, $M$ is also skew-symmetric. It remains to show that a skew-symmetric square $n \times n$ matrix $M$ over any commutative ring has zero determinant, when $n$ is odd. Indeed, consider the universal skew-symmetric matrix $n \times n$ matrix $A$ over the polynomial ring $R = \mathbb{Z}[x_{ij} | 1 \leqslant i < j \leqslant n]$. By definition, the $(i, j)$-th entry of $A$ is $x_{ij}$ if $i < j$, $0$ if $i = j$ and $- x_{ij}$ if $i > j$. Taking the determinant on both sides of $A^T = - A$, and remembering that $n$ is odd, we obtain $\det(A) = - \det(A)$ in $R$. Since $R$ is an integral domain of characteristic $0$, this implies that $\det(A) = 0$. A simple specialization argument (specializing $x_{ij}$ to the $(i, j)$-th entry of $M$) now shows that $\det(M) = 0$, as desired. Thus, we have found $(0, \ldots, 0) \neq (t_1, \ldots, t_n) \in K^n$ such that for any point $P \in \mathbb{P}^n$ of the form $P=[t_0:\ldots:t_n]$, we have \begin{equation} \label{e.jacobian} \frac{\partial F}{\partial x_0}(P)= \ldots = \frac{\partial{F}}{\partial x_n}(P) = 0. \end{equation} Note that since $\deg(F)$ is even and we are in characteristic $2$, conditions~\eqref{e.jacobian} do not guarantee that $F(P) = 0$. On the other hand, the partial derivatives of $F(x_0, \ldots, x_n)$ depend only on $x_1, \ldots, x_n$ and not on $x_0$. We thus want to choose $t_0$ so that the resulting point $P = [t_0: \ldots : t_n]$ lies on the hypersurface $X$ cut out by $F$. To achieve this goal, we choose $t_0 \in \overline{K}$ so that $$ t_0^2 = - G(t_1, t_2, ..., t_n). $$ Then $P = [t_0:\ldots:t_n]\in\mathbb{P}^n(\overline{K})$ satisfies both \eqref{e.jacobian} and $F(P)=0$. In other words, $X$ is singular at $P$. \end{proof} \begin{remark} If $K$ is a perfect field of characteristic $2$, then the above construction gives rise to a singular point $P = [t_0: \ldots: t_n]$ of $X$ defined over $K$. Indeed, since $K$ is closed under taking square roots, we can always choose $t_0 \in K$ in the last step. \end{remark} \begin{remark} The conclusion of Lemma~\ref{lemma:sing-quadrics} is false when $n=2k$ is even. Indeed, the quadric hypersurface in $\mathbb{P}^n$ defined by the polynomial $$ x_0^2 + x_1x_2+x_3x_4 + ... + x_{2k-1}x_{2k}=0 $$ is smooth. \end{remark} We now proceed with a proof of Theorem~\ref{thm:quadrics}. \begin{proof}[Proof of Theorem~\ref{thm:quadrics}] Suppose, to the contrary, that $\mathcal{L}=\langle F_0, \ldots, F_n\rangle$ is a $K$-smooth linear system of quadric hypersurfaces of (projective) dimension $n$. Let $\mathcal{L}(K)$ denote the set of $K$-members of the system. Consider the $K$-linear map $$ \Psi: K[x_0, \ldots, x_n]_2 \to x_0 K[x_0, \ldots, x_n]_1 $$ introduced in Section~\ref{sect:main-result} (with $d = 2$). Recall that $x_0 K[x_0, \ldots, x_n]_1$ denotes the $(n+1)$-dimensional $K$-vector space of quadratic forms in $x_0, \ldots, x_n$ divisible by $x_0$ and that $\Psi$ removes from $F \in K[x_0, \ldots, x_n]$ all monomials which are not multiples of $x_0$. When $d = 2$, the map $\Psi$ is given by the simple formula \[ (\Psi F)(x_0, \ldots, x_n) = F(x_0, x_1, \ldots, x_n) - F(0, x_1, \ldots, x_n). \] As we noted in Section~\ref{sect:main-result}, $F$ lies in the kernel of $\Psi$ if and only if the hypersurface in $\mathbb{P}^n$ cut out by $F$ is singular at the point $[1:0: \ldots: 0]$. Since the linear system $\mathcal{L}$ is $K$-smooth, this tells us that the restricted map $$ \Psi: \mathcal{L}(K) \to x_0 K[x_0, \ldots, x_n]_1 $$ is injective. Since the vector spaces $\mathcal{L}(K)$ and $x_0 K[x_0, \ldots, x_n]_1$ are of the same dimension $n + 1$, we conclude that $\Psi$ must also be surjective. In particular, there exists some $F\in\mathcal{L}(K)$ whose image under $\Psi$ is $x_0^2$. In other words, \[ F (x_0, \ldots, x_n) = x_0^2 +G(x_1, ..., x_n) \] for some quadratic form $G$ in $x_1, \ldots, x_n$. By Lemma~\ref{lemma:sing-quadrics}, $F$ cuts out a singular quadric hypersurface. This contradicts the assumption that each $K$-member of $\mathcal{L}$ is smooth. We conclude that a $K$-smooth linear system $\mathcal{L}$ of quadric hypersurfaces in $\mathbb P^n$ of dimension $n$ does not exist. \end{proof} We have shown that the hypothesis $p\nmid \gcd(d, n+1)$ of Theorem~\ref{thm:main}\eqref{main-thm-item-2} cannot be removed in the case $p=2$. We do not know whether this assumption can be dropped for other primes $p$. We finish the paper with an example, which shows that it can be for one particular choice of $K$, $p$, $d$, and $n$. \begin{example} Set $d=3$ and $n=2$ and consider the following cubic homogeneous polynomials with coefficients in $K = \mathbb{F}_3$: \begin{align*} F_0 &= x^{3}+x^{2}y-x y^{2}+y^{3}+x^{2} z+x y z+y^{2} z-x z^{2}+z^{3} \\ F_1 &= x^{3}+x^{2} y-x^{2}z-x y z+y^{2} z+z^{3} \\ F_2 &= x^{3}-x^{2}y+x y^{2}+y^{3}+x^{2}z+x y z+y^{2} z-y z^{2} \end{align*} A computer calculation shows that $aF_0+bF_1+cF_2=0$ defines a smooth plane curve for each of the possible $3^2+3+1=13$ choices $[a:b:c]\in\mathbb{P}^2(\mathbb{F}_3)$. In other words, $\langle F_0, F_1, F_2 \rangle$ is a $\mathbb{F}_3$-smooth linear system of (projective) dimension $n=2$. Thus, the conclusion of Theorem~\ref{thm:main}\eqref{main-thm-item-2} holds in this example, even though $p$ divides $\gcd(d,n+1)$. \end{example} \begin{bibdiv} \begin{biblist} \bib{AG22}{article}{ AUTHOR = {Asgarli, Shamil}, AUTHOR={Ghioca, Dragos}, TITLE = {Smoothness in pencils of hypersurfaces over finite fields}, JOURNAL={Bulletin of the Australian Mathematical Society}, Publisher={Cambridge University Press}, YEAR={2022}, PAGES={1–10}, DOI={10.1017/S0004972722000776}, URL={https://doi.org/10.1017/S0004972722000776}, } \bib{Bal07}{article}{ AUTHOR = {Ballico, E.}, TITLE = {Bertini's theorem over a finite field for linear systems of quadrics}, JOURNAL = {Int. J. Pure Appl. Math.}, VOLUME = {35}, YEAR = {2007}, NUMBER = {4}, PAGES = {453--455}, } \bib{Bal09}{article}{ AUTHOR = {Ballico, E.}, TITLE = {Vanishings and non-vanishings of homogeneous forms over a finite field}, JOURNAL = {Int. J. Pure Appl. Math.}, VOLUME = {57}, YEAR = {2009}, NUMBER = {2}, PAGES = {219--224}, } \bib{DGH21}{article}{ AUTHOR = {Dimitrov, Vesselin}, AUTHOR = {Gao, Ziyang}, AUTHOR = {Habegger, Philipp}, TITLE = {Uniform bound for the number of rational points on a pencil of curves}, JOURNAL = {Int. Math. Res. Not. IMRN}, YEAR = {2021}, NUMBER = {2}, PAGES = {1138--1159}, ISSN = {1073-7928}, DOI = {10.1093/imrn/rnz248}, URL = {https://doi.org/10.1093/imrn/rnz248} } \bib{Go96}{book}{ AUTHOR = {Goss, David}, TITLE = {Basic structures of function field arithmetic}, SERIES = {Ergebnisse der Mathematik und ihrer Grenzgebiete (3) [Results in Mathematics and Related Areas (3)]}, VOLUME = {35}, PUBLISHER = {Springer-Verlag, Berlin}, YEAR = {1996}, PAGES = {xiv+422}, ISBN = {3-540-61087-1}, DOI = {10.1007/978-3-642-61480-4}, URL = {https://doi.org/10.1007/978-3-642-61480-4}, } \bib{PY08}{article}{ AUTHOR = {Pereira, Jorge Vitório}, AUTHOR={Yuzvinsky, Sergey}, TITLE = {Completely reducible hypersurfaces in a pencil}, JOURNAL = {Adv. Math.}, VOLUME = {219}, YEAR = {2008}, NUMBER = {2}, PAGES = {672--688}, DOI = {10.1016/j.aim.2008.05.014}, URL = {https://doi.org/10.1016/j.aim.2008.05.014}, } \bib{Ste89}{article}{ AUTHOR = {Stein, Yosef}, TITLE = {The total reducibility order of a polynomial in two variables}, JOURNAL = {Israel J. Math.}, VOLUME = {68}, YEAR = {1989}, NUMBER = {1}, PAGES = {109--122}, DOI = {10.1007/BF02764973}, URL = {https://doi.org/10.1007/BF02764973}, } \bib{Vis93}{article}{ AUTHOR = {Vistoli, Angelo}, TITLE = {The number of reducible hypersurfaces in a pencil}, JOURNAL = {Invent. Math.}, VOLUME = {112}, YEAR = {1993}, NUMBER = {2}, PAGES = {247--262}, DOI = {10.1007/BF01232434}, URL = {https://doi.org/10.1007/BF01232434}, } \end{biblist} \end{bibdiv} \end{document}
\begin{document} \author{Roy Meshulam\thanks{Department of Mathematics, Technion, Haifa 32000, Israel. e-mail: [email protected]~. Supported by ISF and BSF grants. } } \title{Homology of Balanced Complexes \ via the Fourier Transform} \pagestyle{plain} \begin{abstract} Let $G_0,\ldots,G_k$ be finite abelian groups and let $G_0* \cdots *G_k$ be the join of the $0$-dimensional complexes $G_i$. We give a characterization of the integral $k$-coboundaries of subcomplexes of $G_0* \cdots *G_k$ in terms of the Fourier transform on the group $G_0 \times \cdots \times G_k$. This leads to an extension of a recent result of Musiker and Reiner on a topological interpretation of the cyclotomic polynomial. \end{abstract} \section{Introduction} \ \ \ \ Let $G_0,\ldots,G_k$ be finite abelian groups with the discrete topology and let $N=\prod_{i=0}^k (|G_i|-1)$. The simplicial join $Y=G_0* \cdots *G_k$ is homotopy equivalent to a wedge of $N$ $k$-dimensional spheres. Subcomplexes of $Y$ are called {\it balanced complexes} (see e.g. \cite{Stanley}). Denote the $(k-1)$-dimensional skeleton of $Y$ by $Y^{(k-1)}$. Let $A$ be a subset of $G_0 \times \cdots \times G_k$. Regarding each $a \in A$ as an oriented $k$-simplex of $Y$, we consider the balanced complex $$X(A)=X_{G_0,\ldots,G_k}(A)=Y^{(k-1)} \cup A.$$ In this note we characterize the integral $k$-coboundaries of $X(A)$ in terms of the Fourier transform on the group $G_0 \times \cdots \times G_k$. As an application we give a short proof of an extension of a recent result of Musiker and Reiner \cite{MR10} on a topological interpretation of the cyclotomic polynomial. We recall some terminology. Let $R[G]$ denote the group algebra of a finite abelian group $G$ with coefficients in a ring $R$. By writing $f=\sum_{x \in G} f(x) x\in R[G]$ we identify elements of $R[G]$ with $R$-valued functions on $G$. For a subset $A \subset G$ let $R[A]=\{f \in R[G]: {\rm supp}(f) \subset A\}$. Let $\widehat{G}$ be the character group of $G$. The Fourier transform is the linear bijection ${\cal F}:{\fam\bbfam\twelvebb C}[G] \rightarrow {\fam\bbfam\twelvebb C}[\widehat{G}]$ given on $f \in {\fam\bbfam\twelvebb C}[G]$ and ${\cal H}i \in \widehat{G}$ by $$ {\cal F}(f)({\cal H}i)=\widehat{f}({\cal H}i)=\sum_{x \in G} f(x) {\cal H}i(x)~. $$ Let $G= G_0 \times \cdots \times G_k$ then $\widehat{G}=\widehat{G}_0 \times \cdots \times \widehat{G}_k$. For $0 \leq i \leq k$ let $$L_i=G_0 \times \cdots \times G_{i-1} \times G_{i+1} \times \cdots \times G_k.$$ We identify the group of integral $k$-cochains $C^k(X(A);{\fam\bbfam\twelvebb Z})$ with ${\fam\bbfam\twelvebb Z}[A]$ and the group of integral $(k-1)$-cochains $C^{k-1}(X(A);{\fam\bbfam\twelvebb Z})=C^{k-1}(X(G);{\fam\bbfam\twelvebb Z})$ with the $(k+1)$-tuples $\psi=(\psi_0,\ldots,\psi_k)$ where $\psi_i \in {\fam\bbfam\twelvebb Z}[L_i]$. The coboundary map $$d_{k-1}:C^{k-1}(X(G);{\fam\bbfam\twelvebb Z}) \rightarrow C^k(X(G);{\fam\bbfam\twelvebb Z})$$ is given by $$d_{k-1} \psi(g_0,\ldots,g_k)=\sum_{i=0}^k (-1)^i \psi_i(g_0,\ldots,g_{i-1},g_{i+1},\ldots,g_k).$$ For $0 \leq i \leq k$ let ${\bf 1}_i$ denote the trivial character of $G_i$ and let $$\widehat{G}^+=(\widehat{G}_0-\{{\bf 1}_0\}) \times \cdots \times (\widehat{G}_k-\{{\bf 1}_k\}) .$$ For $A \subset G$ and $f \in {\fam\bbfam\twelvebb Z}[G]$ let $f_{|A} \in {\fam\bbfam\twelvebb Z}[A]$ denote the restriction of $f$ to $A$. The group ${\rm B}^k(X(A);{\fam\bbfam\twelvebb Z})=d_{k-1}C^{k-1}(X(G);{\fam\bbfam\twelvebb Z})$ of integral $k$-coboundaries of $X(A)$ is characterized by the following \begin{proposition} \label{cob} For any $A \subset G$ $${\rm B}^k(X(A);{\fam\bbfam\twelvebb Z}) = \{f_{|A}: f \in {\fam\bbfam\twelvebb Z}[G] ~~such~that~~ {\rm supp}(\widehat{f}) \subset \widehat{G} -\widehat{G}^+\}.$$ \end{proposition} As an application of Proposition \ref{cob} we study the homology of the following family of balanced complexes introduced by Musiker and Reiner \cite{MR10}. Let $p_0,\ldots,p_k$ be distinct primes and for $0 \leq i \leq k$ let $G_i={\fam\bbfam\twelvebb Z}/p_i {\fam\bbfam\twelvebb Z}={\fam\bbfam\twelvebb Z}_{p_i}$. Writing $n=\prod_{i=0}^k p_i$, let $\theta:{\fam\bbfam\twelvebb Z}_n \rightarrow G=G_0 \times \cdots \times G_k$ be the standard isomorphism given by $$\theta(x)=(x({\rm mod}~p_0),\ldots, x({\rm mod}~p_k)).$$ For any $\ell$ let ${\fam\bbfam\twelvebb Z}_{\ell}^*=\{m \in {\fam\bbfam\twelvebb Z}_{\ell}: \text{gcd}(m,{\ell})=1\}$. Let $\varphi(n)=|{\fam\bbfam\twelvebb Z}_n^*|=\prod_{i=0}^k (p_i-1)$ be the Euler function of $n$ and let $A_0=\{\varphi(n)+1,\ldots,n-1\}$. For $A \subset \{0,\ldots,\varphi(n)\}$ consider the complex $$K_A=X(\theta(A \cup A_0)) \subset {\fam\bbfam\twelvebb Z}_{p_0} * \cdots * {\fam\bbfam\twelvebb Z}_{p_k}~.$$ Let $\omega=\exp(\frac{2\pi i}{n})$ be a fixed primitive $n$-th root of unity. The $n$-th cyclotomic polynomial (see e.g. \cite{Lang}) is given by $$\Phi_n(z)=\prod_{j \in {\fam\bbfam\twelvebb Z}_n^*} (z-w^j)=\sum_{j=0}^{\varphi(n)} c_j z^j \in {\fam\bbfam\twelvebb Z}[z].$$ Musiker and Reiner \cite{MR10} discovered the following remarkable connection between the coefficients of $\Phi_n(z)$ and the homology of the complexes $K_{\{j\}}$. \begin{theorem}[Musiker and Reiner] \label{mr} For any $j \in \{0,\ldots,\varphi(n)\}$ $$\tilde{\rm H}_i(K_{\{j\}};{\fam\bbfam\twelvebb Z}) \cong \left\{ \begin{array}{ll} {\fam\bbfam\twelvebb Z}/c_j {\fam\bbfam\twelvebb Z} & i=k-1 \\ {\fam\bbfam\twelvebb Z} & i=k~\text{and}~c_j=0 \\ 0 & \text{otherwise.} \end{array} \right.~~ $$ \end{theorem} \noindent The next result extends Theorem \ref{mr} to general $K_A$'s. Let $$c_A=(c_j: j \in A) \in {\fam\bbfam\twelvebb Z}^{A}$$ and $$d_A= \left\{ \begin{array}{ll} \text{gcd}(c_A) & c_A \neq 0 \\ 0 & c_A=0 \end{array} \right.~~ $$ \begin{theorem} \label{hka} For any $A \subset \{0,\ldots,\varphi(n)\}$ $$ \tilde{\rm H}^i(K_A;{\fam\bbfam\twelvebb Z})\cong \left\{ \begin{array}{ll} {\fam\bbfam\twelvebb Z} & i=k-1 ~\text{and}~ d_A=0 \\ {\fam\bbfam\twelvebb Z}^{|A|-1}\oplus {\fam\bbfam\twelvebb Z}/d_A {\fam\bbfam\twelvebb Z} & i=k \\ 0 & \text{otherwise} \end{array} \right.~~ $$ and $$ \tilde{\rm H}_i(K_A;{\fam\bbfam\twelvebb Z})\cong \left\{ \begin{array}{ll} {\fam\bbfam\twelvebb Z}/d_A{\fam\bbfam\twelvebb Z} & i=k-1 \\ {\fam\bbfam\twelvebb Z}^{|A|} & i=k ~\text{and}~ d_A=0 \\ {\fam\bbfam\twelvebb Z}^{|A|-1} & i=k ~\text{and}~ d_A \neq 0 \\ 0 & \text{otherwise.} \end{array} \right.~~ $$ \end{theorem} \ \\ \\ Proposition \ref{cob} is proved in Section \ref{s:cob}. It is then used in Section \ref{s:hka} to obtain an explicit form of the $k$-coboundaries of $K_A$ (Proposition \ref{isoh}) that directly implies Theorem \ref{hka}. \section{$k$-Coboundaries and Fourier Transform} \label{s:cob} \noindent {\bf Proof of Proposition \ref{cob}:} It suffices to consider the case $A=G$. Let $\psi=(\psi_0,\ldots,\psi_k) \in C^{k-1}(X(G);{\fam\bbfam\twelvebb Z})$. Then for any ${\cal H}i=({\cal H}i_0, \ldots,{\cal H}i_k) \in \widehat{G}$ $$\widehat{d_{k-1} \psi}({\cal H}i)=\sum_{g=(g_0,\ldots,g_k) \in G} d_{k-1} \psi(g) {\cal H}i(g)=$$ $$\sum_{(g_0,\ldots,g_k)} \sum_{i=0}^k (-1)^i \psi_i(g_0,\ldots,g_{i-1},g_{i+1},\ldots,g_k) \prod_{j=0}^k {\cal H}i_j(g_j)=$$ $$\sum_{i=0}^k (-1)^i \sum_{(g_0,\ldots,g_{i-1},g_{i+1},\ldots ,g_k)}\psi_i(g_0,\ldots,g_{i-1},g_{i+1},\ldots,g_k) \prod_{j \neq i} {\cal H}i_j(g_j) \sum_{g_i} {\cal H}i_i(g_i)=$$ $$\sum_{i=0}^k (-1)^i \widehat{\psi_i}({\cal H}i_0,\ldots,{\cal H}i_{i-1},{\cal H}i_{i+1},\ldots,{\cal H}i_k) |G_i| \delta({\cal H}i_i,{\bf 1}_i)$$ where $\delta({\cal H}i_i,{\bf 1}_i)=1$ if ${\cal H}i_i={\bf 1}_i$ and is zero otherwise. \\ Therefore ${\rm supp}(\widehat{d_{k-1} \psi}) \subset \widehat{G} -\widehat{G}^+$ and so $$U_1\stackrel{\text{def}}{=}{\rm B}^k(X(G);{\fam\bbfam\twelvebb Z}) \subset \{f \in {\fam\bbfam\twelvebb Z}[G]:{\rm supp}(\widehat{f}) \subset \widehat{G} -\widehat{G}^+\}\stackrel{\text{def}}{=}U_2.$$ Since $X(G)$ is homotopy equivalent to a wedge of $\prod_{i=0}^k (|G_i|-1) =|\widehat{G}^+|~$ $k$-dimensional spheres, it follows ${\rm H}^k(X(G);{\fam\bbfam\twelvebb Z})={\fam\bbfam\twelvebb Z}[G]/U_1$ is free of rank $|\widehat{G}^+|$ and hence $rank~U_1=|\widehat{G}|-|\widehat{G}^+|$. On the other hand, the injectivity of the Fourier transform implies that $$rank~ U_2 \leq \dim_{{\fam\bbfam\twelvebb C}} \{f \in {\fam\bbfam\twelvebb C}[G]:{\rm supp}(\widehat{f}) \subset \widehat{G} -\widehat{G}^+\}=|\widehat{G}|-|\widehat{G}^+|$$ and therefore $rank~U_2/U_1=0$. Since $U_2/U_1 \subset {\rm H}^k(X(G);{\fam\bbfam\twelvebb Z})$ is free it follows that $U_1=U_2$. {\begin{flushright} $\Box$ \end{flushright}} \section{The Homology of $K_A$} \label{s:hka} \ \ \ \ Recall that $G={\fam\bbfam\twelvebb Z}_{p_0} \times \cdots \times {\fam\bbfam\twelvebb Z}_{p_k}$ and $n=\prod_{j=0}^k p_j$. For $h \in {\fam\bbfam\twelvebb Z}[G]$ let $\theta^{*}h \in {\fam\bbfam\twelvebb Z}[{\fam\bbfam\twelvebb Z}_n]$ be the pullback of $h$ given by $\theta^*h(x)=h(\theta(x))$. For any $\ell$ we identify the character group $\widehat{{\fam\bbfam\twelvebb Z}_{\ell}}$ with ${\fam\bbfam\twelvebb Z}_{\ell}$ via the isomorphism $\eta_{\ell}:{\fam\bbfam\twelvebb Z}_{\ell} \rightarrow \widehat{{\fam\bbfam\twelvebb Z}_{\ell}}$ given by $\eta_{\ell}(x)(y)= \exp(2 \pi i x y/{\ell}).$ Proposition \ref{cob} implies the following characterization of the integral $k$-coboundaries of $K_A$. For $A\subset \{0,\ldots,\varphi(n)\}$ let $\theta_A$ denote the restriction of $\theta$ to $A \cup A_0$ and let $\theta_A^*$ be the induce isomorphism from ${\fam\bbfam\twelvebb Z}[\theta(A \cup A_0)]$ to ${\fam\bbfam\twelvebb Z}[A \cup A_0].$ Let $${\cal B}(A)=\{f_{|A \cup A_0}: f \in {\fam\bbfam\twelvebb Z}[{\fam\bbfam\twelvebb Z}_n] ~~{such~that}~~ \widehat{f}(1)=0 \}.$$ \begin{proposition} \label{isoh} $$ \theta_A^*{\rm B}^k(K_A;{\fam\bbfam\twelvebb Z})={\cal B}(A). $$ \end{proposition} \noindent {\bf Proof:} We first examine the relation between the Fourier transforms on ${\fam\bbfam\twelvebb Z}_n$ and on $G$. Let $$\lambda=\sum_{j=0}^k \prod_{t \neq j} p_t \in {\fam\bbfam\twelvebb Z}_n^*.$$ For any $h \in {\fam\bbfam\twelvebb Z}[G]$ and $m \in {\fam\bbfam\twelvebb Z}_n$ $$\widehat{\theta^*h}(\lambda m)=\sum_{x \in {\fam\bbfam\twelvebb Z}_n} \theta^*h(x) \exp(\frac{2 \pi i x \lambda m}{n})= $$ \begin{equation} \label{fourh} \sum_{x \in {\fam\bbfam\twelvebb Z}_n} h(\theta(x))\exp(\sum_{j=0}^k \frac{2 \pi i x m}{p_j})=\widehat{h}(\theta(m)). \end{equation} \noindent Noting that $$\theta^{-1}(\widehat{G}^+)=\theta^{-1}({\fam\bbfam\twelvebb Z}_{p_0}^* \times \cdots \times {\fam\bbfam\twelvebb Z}_{p_k}^*)={\fam\bbfam\twelvebb Z}_n^*=\lambda {\fam\bbfam\twelvebb Z}_n^*$$ it follows from Proposition \ref{cob} and Eq. (\ref{fourh}) that $${\rm B}^k(K_A;{\fam\bbfam\twelvebb Z}) =\{h_{|\theta(A \cup A_0)}: h \in {\fam\bbfam\twelvebb Z}[G] ~~{such~that}~~{\rm supp}(\widehat{h}) \subset \widehat{G}-\widehat{G}^+\}= $$ \begin{equation} \label{tran} (\theta_A^*)^{-1}\{f_{|A \cup A_0}: f \in {\fam\bbfam\twelvebb Z}[{\fam\bbfam\twelvebb Z}_n] ~~{such~that}~~{\rm supp}(\widehat{f}) \subset {\fam\bbfam\twelvebb Z}_n - {\fam\bbfam\twelvebb Z}_n^* \}. \end{equation} \ \\ \\ Let ${\cal P}_n=\{\omega^m: m \in {\fam\bbfam\twelvebb Z}_n^*\}$ be the set of primitive $n$-th roots of $1$. The Galois group ${\rm Gal}({\fam\bbfam\twelvebb Q}(\omega)/{\fam\bbfam\twelvebb Q})$ acts transitively on ${\cal P}_n$. Hence, by Eq. (\ref{tran}): $$ \theta_A^*{\rm B}^k(K_A;{\fam\bbfam\twelvebb Z}) = \{f_{|A \cup A_0}: f \in {\fam\bbfam\twelvebb Z}[{\fam\bbfam\twelvebb Z}_n] ~~{such~that}~~{\rm supp}(\widehat{f}) \subset {\fam\bbfam\twelvebb Z}_n - {\fam\bbfam\twelvebb Z}_n^* \}= $$ $$ \{f_{|A \cup A_0}: f \in {\fam\bbfam\twelvebb Z}[{\fam\bbfam\twelvebb Z}_n] ~~{such~that}~~ \widehat{f}(m)=0 {~~for~all~~} m \in {\fam\bbfam\twelvebb Z}_n^*\}= $$ $$ \{f_{|A \cup A_0}: f \in {\fam\bbfam\twelvebb Z}[{\fam\bbfam\twelvebb Z}_n] ~~{such~that}~~ \widehat{f}(1)=0 \}={\cal B}(A). $$ {\begin{flushright} $\Box$ \end{flushright}} \ \\ \\ {\bf Proof of Theorem \ref{hka}:} Proposition \ref{isoh} implies that $\theta_A^*$ induces an isomorphism between ${\rm H}^k(K_A;{\fam\bbfam\twelvebb Z})$ and $${\cal H}(A) \stackrel{\text{def}}{=} {\fam\bbfam\twelvebb Z}[A \cup A_0]/{\cal B}(A).$$ For $j \in A \cup A_0$ let $g_j \in {\fam\bbfam\twelvebb Z}[A \cup A_0]$ be given by $g_j(i)=1$ if $i=j$ and $g_j(i)=0$ otherwise. Let $[g_j]$ be the image of $g_j$ in ${\cal H}(A)$. The computation of ${\cal H}(A)$ depends on the following two observations: \ \\ \\ (i) ${\cal H}(A)$ is generated by $\{[g_j]: j \in A\}$. \ \\ \\ {\bf Proof:} Let $t \in A_0$. There exist $u_0,\ldots,u_{\varphi(n)-1} \in {\fam\bbfam\twelvebb Z}$ such that $$\sum_{\ell=0}^{\varphi(n)-1} u_{\ell} \omega^{\ell} +\omega^t=0.$$ Let $f \in {\fam\bbfam\twelvebb Z}[{\fam\bbfam\twelvebb Z}_n]$ be given by $$f(\ell)= \left\{ \begin{array}{ll} u_{\ell} & 0 \leq \ell \leq \varphi(n)-1 \\ 1 & \ell=t \\ 0 & \text{otherwise.} \end{array} \right.~~ $$ Since $$\widehat{f}(1)=\sum_{\ell=0}^{\varphi(n)-1} u_{\ell} \omega^{\ell} +\omega^t=0$$ it follows that $$ \sum_{j \in A} u_j g_j+g_t=f_{|A \cup A_0} \in {\cal B}(A). $$ Hence $[g_t]=-\sum_{j \in A} u_j [g_j]$. \ \\ \\ (ii) The minimal relation between $\{[g_j]\}_{j \in A}$ is $\sum_{j \in A} c_j[g_j]=0$. \ \\ \\ {\bf Proof:} Let $f \in {\fam\bbfam\twelvebb Z}[{\fam\bbfam\twelvebb Z}_n]$ be given by $f(\ell)=c_{\ell}$ if $0 \leq \ell \leq \varphi(n)$ and zero otherwise. Since $\widehat{f}(1)=\Phi_n(\omega)=0$, it follows that $$ \sum_{j \in A}c_j g_j = f_{|A \cup A_0} \in {\cal B}(A). $$ Hence $\sum_{j \in A} c_j [g_j]=0$. On the otherhand, if $\sum_{j \in A} \alpha_j [g_j]=0$ then there exists an $h \in {\fam\bbfam\twelvebb Z}[{\fam\bbfam\twelvebb Z}_n]$ such that $\widehat{h}(1)=0$ and $h_{|A \cup A_0}=\sum_{j \in A} \alpha_j g_j$. In particular $h(\ell)=0$ for $\ell \geq \varphi(n)+1$. Let $p(z)=\sum_{\ell=0}^{\varphi(n)} h(\ell) z^{\ell}$ then $p(\omega)=\widehat{h}(1)=0$. Hence $p(z)=r\Phi_n(z)$ for some $r \in {\fam\bbfam\twelvebb Z}$. Therefore $\alpha_j=h(j)=rc_j$ for all $j \in A$. \ \\ \\ It follows from (i) and (ii) that \begin{equation} \label{cohomk} {\rm H}^k(K_A;{\fam\bbfam\twelvebb Z})\cong {\cal H}(A)={\fam\bbfam\twelvebb Z}[A]/{\fam\bbfam\twelvebb Z} c_A \cong {\fam\bbfam\twelvebb Z}^{|A|-1}\oplus {\fam\bbfam\twelvebb Z}/d_A {\fam\bbfam\twelvebb Z} ~. \end{equation} The remaining parts of Theorem \ref{hka} are formal consequences of (\ref{cohomk}) and the universal coefficient theorem (see e.g. \cite{Munkres}): \begin{equation} \label{uct} 0 \leftarrow {\rm Hom}({\rm H}_p(K_A;{\fam\bbfam\twelvebb Z}),{\fam\bbfam\twelvebb Z}) \leftarrow {\rm H}^p(K_A;{\fam\bbfam\twelvebb Z}) \leftarrow {\rm Ext}({\rm H}_{p-1}(K_A;{\fam\bbfam\twelvebb Z}),{\fam\bbfam\twelvebb Z}) \leftarrow 0~. \end{equation} First consider the case $c_A=0$. By (\ref{cohomk}) and (\ref{uct}) $$ 0 \leftarrow {\rm Hom}({\rm H}_k(K_A;{\fam\bbfam\twelvebb Z}),{\fam\bbfam\twelvebb Z}) \leftarrow {\fam\bbfam\twelvebb Z}^{|A|} \leftarrow {\rm Ext}({\rm H}_{k-1}(K_A;{\fam\bbfam\twelvebb Z}),{\fam\bbfam\twelvebb Z}) \leftarrow 0~. $$ Therefore ${\rm H}_k(K_A;{\fam\bbfam\twelvebb Z})\cong {\fam\bbfam\twelvebb Z}^{|A|}$ and ${\rm H}_{k-1}(K_A;{\fam\bbfam\twelvebb Z})$ is torsion free. The Euler-Poincar\'{e} relation \begin{equation} \label{EPH} rank~ {\rm H}_k(K_A;{\fam\bbfam\twelvebb Z})=rank~ \tilde{\rm H}_{k-1}(K_A;{\fam\bbfam\twelvebb Z})+|A|-1~ \end{equation} then implies that $\tilde{\rm H}_{k-1}(K_A;{\fam\bbfam\twelvebb Z})\cong {\fam\bbfam\twelvebb Z}$ and $$\tilde{\rm H}^{k-1}(K_A;{\fam\bbfam\twelvebb Z})\cong{\rm Hom}(\tilde{\rm H}_{k-1}(K_A;{\fam\bbfam\twelvebb Z}),{\fam\bbfam\twelvebb Z})\cong {\fam\bbfam\twelvebb Z}.$$ Next assume that $c_A \neq 0$. By (\ref{cohomk}) and (\ref{uct}) $$ 0 \leftarrow {\rm Hom}({\rm H}_k(K_A;{\fam\bbfam\twelvebb Z}),{\fam\bbfam\twelvebb Z}) \leftarrow {\fam\bbfam\twelvebb Z}^{|A|-1} \oplus {\fam\bbfam\twelvebb Z}/d_A {\fam\bbfam\twelvebb Z} \leftarrow {\rm Ext}({\rm H}_{k-1}(K_A;{\fam\bbfam\twelvebb Z}),{\fam\bbfam\twelvebb Z}) \leftarrow 0~. $$ Therefore ${\rm H}_k(K_A;{\fam\bbfam\twelvebb Z})\cong {\fam\bbfam\twelvebb Z}^{|A|-1}$ and ${\rm Ext}({\rm H}_{k-1}(K_A;{\fam\bbfam\twelvebb Z}),{\fam\bbfam\twelvebb Z})= {\fam\bbfam\twelvebb Z}/d_A {\fam\bbfam\twelvebb Z}$. Together with (\ref{EPH}) this implies that $rank~ \tilde{\rm H}_{k-1}(K_A;{\fam\bbfam\twelvebb Z})=0$. Therefore $\tilde{\rm H}_{k-1}(K_A;{\fam\bbfam\twelvebb Z})= {\fam\bbfam\twelvebb Z}/d_A {\fam\bbfam\twelvebb Z}$ and $\tilde{\rm H}^{k-1}(K_A;{\fam\bbfam\twelvebb Z})=0$. {\begin{flushright} $\Box$ \end{flushright}} \noindent {\bf Remark:} In the proof of (ii) it was observed that the function $f \in {\fam\bbfam\twelvebb Z}[{\fam\bbfam\twelvebb Z}_n]$ given by $f(\ell)=c_{\ell}$ if $0 \leq \ell \leq \varphi(n)$ and zero otherwise, is the image under $\theta^*$ of a $k$-coboundary of $X(G)$. This fact also appears (with a different proof) in Proposition 24 of \cite{MR10} and is attributed there to D. Fuchs. \ \\ \\ {\bf Acknowledgment} \\ I would like to thank Vic Reiner for helpful discussions and comments. \end{document}
\begin{document} \title{Distributed shape derivative via averaged adjoint method and applications} \author[1]{Antoine Laurain\thanks{Universidade de S\~{a}o Paulo, Instituto de Matem\'atica e Estat\'istica, Departamento de Matem\'atica Aplicada, Rua do Mat\~{a}o, 1010 Cidade Universitaria, CEP 05508-090, S\~{a}o Paulo, SP, Brazil}} \author[2]{Kevin Sturm\thanks{Universit\"at Duisburg-Essen, Fakult\"at f\"ur Mathematik, Thea-Leymann-Stra\ss e 9, D-45127 Essen, Germany; {\bf email}: [email protected]}} \affil[1]{Department of Mathematics, University of S\~{a}o Paulo} \affil[2]{Faculty of Mathematics, University of Essen-Duisburg} \maketitle \begin{abstract} The structure theorem of Hadamard-Zol\'esio states that the derivative of a shape functional is a distribution on the boundary of the domain depending only on the normal perturbations of a smooth enough boundary. Actually the domain representation, also known as distributed shape derivative, is more general than the boundary expression as it is well-defined for shapes having a lower regularity. It is customary in the shape optimization literature to assume regularity of the domains and use the boundary expression of the shape derivative for numerical algorithms. In this paper we describe several advantages of the distributed shape derivative in terms of generality, easiness of computation and numerical implementation. We identify a tensor representation of the distributed shape derivative, study its properties and show how it allows to recover the boundary expression directly. We use a novel Lagrangian approach, which is applicable to a large class of shape optimization problems, to compute the distributed shape derivative. We also apply the technique to retrieve the distributed shape derivative for electrical impedance tomography. Finally we explain how to adapt the level set method to the distributed shape derivative framework and present numerical results. \varepsilonnd{abstract} \subjclass{49Q10, 35Q93, 35R30, 35R05}\newline \text{ker}ywords{Shape optimization, distributed shape gradient, electrical impedance tomography, Lagrangian method, level set method} \maketitle \section*{Introduction} In his research on elastic plates \cite{Ha} in 1907, Hadamard showed how to obtain the derivative of a shape functional $J(\mathcal{B}ega)$ by considering normal perturbations of the boundary $\partial\mathcal{B}ega$ of a smooth set $\mathcal{B}ega$. This fundamental result of shape optimization was made rigorous later by Zol\'esio \cite{MR2731611} in the so-called ``structure theorem''. When $J(\mathcal{B}ega)$ and the domain are smooth enough, one may also write the shape derivative as an integral over $\partial\mathcal{B}ega$, which is the canonical form in the shape optimization literature. However, when $\mathcal{B}ega$ is less regular, the shape derivative can often be written as a domain integral even when the boundary expression is not available. The {\it domain expression} also known as {\it distributed shape derivative} has been generally ignored in the shape optimization literature for several reasons: firstly the boundary representation provides a straightforward way of determining an explicit descent direction since it depends linearly on the boundary perturbation $\theta$ and not on its gradient, secondly this descent direction only needs to be defined on the boundary. When considering the domain expression, these two advantages disappear as the shape derivative is defined on $\mathcal{B}ega$ and depends on the gradient of $\theta$, so that a partial differential equation needs to be solved to obtain a descent direction $\theta$ on $\mathcal{B}ega$. It seems that these drawbacks would definitely rule out the distributed shape derivative, however they turn out to be less dramatic than expected in many situations and the domain formulation has other less foreseeable advantages over the boundary representation. In this paper we advocate for the use of the distributed shape derivative and discuss the advantages of this formulation. The boundary representation has the following drawbacks. First of all if the data is not smooth enough the integral representation does not exist so that the more general domain representation is the only rigorous alternative. Even when the boundary representation exists and has the form $\int_{\partial\mathcal{B}ega} g\, \theta\cdot n $, it is usually not legitimate to choose $\theta\cdot n = -g$ on $\partial\mathcal{B}ega$ for a descent direction if $g$ is not smooth enough, for instance if $g\in L^1(\partial\mathcal{B}ega)$. Therefore, a smoother $\theta$ must be chosen, which requires to solve a partial differential equation on the boundary $\partial\mathcal{B}ega$. When taking $\theta\cdot n = -g$ is legitimate, it might still not be desirable as this may yield a $\theta$ with low regularity, in which case one needs to regularize $\theta$ on the boundary as well. In these cases the first advantage of the boundary representation disappears. The second advantage of the boundary representation is that the perturbation field only needs to be defined on the boundary instead of on the whole domain, reducing the cost of the computation. Actually, the distributed shape derivative also has its support on the boundary, and may be computed in a small neighborhood of the boundary so that the additional cost is minimal. In addition, in most shape optimization applications, $g$ is the restriction of a function defined in a neighborhood of the boundary and not a quantity depending only on the boundary such as the curvature. Therefore from a practical point of view, $g$ must be evaluated in a neighborhood of $\partial\mathcal{B}ega$ anyway. Also, in many numerical applications, $\theta$ must be extended to a neighborhood of $\Gamma$ or even to the entire domain $\mathcal{B}ega$. This is the case for level set methods for instance, where the level set function must be updated on $\mathcal{B}ega$, or when one wishes to update the mesh along with the domain update, to avoid re-meshing the new domain. The distributed shape derivative then directly gives an extension of $\theta$ well-suited to the optimization problem. Recent results have shown that the distributed shape derivative is also more accurate than the boundary representation from a numerical point of view; see \cite{MR3348199} for a comparison. Indeed functions such as gradients of the state and adjoint state appearing in the distributed shape derivative only need to be defined at grid points and not on the interface. Therefore one avoids interpolation of these irregular terms. This is particularly useful for transmission problems where the boundary representation requires to compute the jump of a function over the interface, a delicate and error-prone operation from the numerical point of view. Having considered these equivalent expressions of the shape derivative (i.e. boundary and domain expression) leads to a general form of the shape derivative using tensors. We introduce such a tensor representation in Section \ref{sec:tensor_representation} which covers a large class of problems and in particular contains the boundary and domain expression. We show how this abstract form allows to identify simple relations between the domain and boundary expressions of the shape derivative. In this paper we also extend and simplify the averaged adjoint method from \cite{sturm13}, a Lagrangian-type method which is well-suited to compute the shape derivative of a cost function in an efficient way. Lagrangian methods are commonly used in shape optimization and have the advantage of providing the shape derivative without the need to compute the material derivative of the state; see \cite{MR862783,MR948649,MR2166150,sturm13,MeftBelh13,SturmHeomHint13}. Compared to these known shape-Lagrangian methods, the averaged adjoint method is fairly general due to minimal required conditions. The assumptions are for instance less restrictive than those required for the theorem of Correa-Seeger \cite{MR948649}, therefore it can be applied to more general situations such as non-convex functionals. As the direct approach our method can also be applied for problems depending on nonlinear partial differential equations. In this paper we give an example of application to a transmission problem (in electrical impedance tomography - see Section \ref{section4}). Our method provides the domain expression of the shape derivative and the boundary expression can be computed easily from the tensor representation of the domain expression. To complete the numerical implementation aspect, we also show how the domain expression of the shape derivative can be used in the level set method framework \cite{MR2033390,FLS,MR965860,MR2806573,HLsegmentation,MR2472382,MR2459656,MR2356899}. The level set method can be modified to use the domain expression which leads to a method which is actually easier to implement. Combining all these techniques, we obtain a straightforward and general way of solving the shape optimization problem, from the rigorous theoretical computation of the shape derivative to the numerical implementation. In Section \ref{section1} we recall the concept of shape derivative and the structure theorem on an abstract level. In Section \ref{section2} a shape-Lagrangian method, the averaged adjoint method, is described. In Section \ref{sec:tensor_representation} we identify a general tensor representation of the shape derivative, establish some of its properties, and give a few examples. In Section \ref{section3} we explain how to compute descent directions for the distributed shape derivative for use in gradient methods. In Section \ref{section4} we apply the results of Sections \ref{section2} and \ref{sec:tensor_representation} to the inverse problem of electrical impedance tomography. In Section \ref{section5} we extend the level set method to the case of the distributed shape derivative and finally in Section \ref{section6} we show numerical results for various problems including the problem of electrical impedance tomography. \section{The structure theorem revisited}\label{section1} Our aim in this section is to describe properties of the shape derivative on an abstract level and to emphasize that all representations of the shape derivative satisfy the same {\it structure theorem}. Let $\mathcal{P}(D)$ be the set of subsets of $D\subset\mathbf{R}^d$ compactly contained in $D$, where the so-called ``universe'' $D\subset \mathbf{R}^d$ is assumed to be open and bounded. Define for $k\geq 0$ and $0\leq \alpha \leq 1$, \begin{align} C^{k,\alpha}_c(D,\mathbf{R}^d) &:=\{\theta\in C^{k,\alpha}(D,\mathbf{R}^d)|\quad \theta\text{ has compact support in } D\}. \varepsilonnd{align} Also for given domain $\mathcal{B}ega\subset D$ with at least a $C^1$ boundary we introduce the space of vector field \begin{align} C^{k,\alpha}_{\partial \mathcal{B}ega}(D ,\mathbf{R}^d) &:=\{\theta\in C^{k,\alpha}_c( D,\mathbf{R}^d)|\quad \theta\cdot n =0 \text{ on } \partial \mathcal{B}ega\} \varepsilonnd{align} where $n$ is the outward unit normal vector to $\mathcal{B}ega$. Consider a vector field $\theta\in C^{0,1}_c(D,\mathbf{R}^d)$ and the associated flow $\Phi_t^{\theta}:\overline{D}\rightarrow \mathbf{R}^d$, $t\in [0,\tau]$ defined for each $x_0\in \overline{D}$ as $\Phi_t^{\theta}(x_0):=x(t)$, where $x:[0,\tau]\rightarrow \mathbf{R}$ solves \begin{align}\label{Vxt} \begin{split} \dot{x}(t)&= \theta(x(t)) \quad \text{ for } t\in (0,\tau),\quad x(0) =x_0. \varepsilonnd{split} \varepsilonnd{align} We will sometimes use the simpler notation $\Phi_t=\Phi_t^{\theta}$ when no confusion is possible. Since $\theta\in C^{0,1}_c(D,\mathbf{R}^d)$ we have by Nagumo's theorem \cite{MR0015180} that for fixed $t\in [0,\tau]$ the flow $\Phi_t$ is a homeomorphism from $D$ into itself and maps boundary onto boundary and interior onto interior. Further, we consider the family \begin{equation}\label{domain} \mathcal{B}ega_t := \Phi_t^{\theta}(\mathcal{B}ega) \varepsilonnd{equation} of perturbed domains. In the following let $J : \mathfrak P \rightarrow \mathbf{R}$ be a shape function defined on some admissible set $\mathfrak P\subset \mathcal P(D)$. \begin{definition}\label{def1} The Eulerian semiderivative of $J$ at $\mathcal{B}ega$ in direction $\theta \in C^{0,1}_c(D,\mathbf{R}^d)$, when the limit exists, is defined by \begin{equation} dJ(\mathcal{B}ega)(\theta):= \lim_{t \searrow 0}\textnormal{fr}ac{J(\mathcal{B}ega_t)-J(\mathcal{B}ega)}{t}. \varepsilonnd{equation} \begin{itemize} \item[(i)] $J$ is said to be \textit{shape differentiable} at $\mathcal{B}ega$ if it has a Eulerian semiderivative at $\mathcal{B}ega$ for all $\theta \in C^\infty_c(D,\mathbf{R}^d)$ and the mapping \begin{align*} dJ(\mathcal{B}ega): C^\infty_c(D,\mathbf{R}^d) & \to \mathbf{R},\; \theta \mapsto dJ(\mathcal{B}ega)(\theta) \varepsilonnd{align*} is linear and continuous, in which case $dJ(\mathcal{B}ega)(\theta)$ is called the \textit{shape derivative} at $\mathcal{B}ega$. \item[(ii)] The shape derivative $dJ(\mathcal{B}ega)$ is of finite order if there is an integer $l\ge 0$ and a constant $c>0$ such that for each compact $K\subset D$ $$|dJ(\mathcal{B}ega)(\theta)| \le c \|\theta\|_{l} \quad \forall \theta\in C^\infty_c(K,\mathbf{R}^d),$$ where $\|\theta\|_{l} := \sum_{|\alpha| \le l} |D^\alpha \theta|_\infty$. The smallest such integer $l\ge 0$ is called order of $dJ(\mathcal{B}ega)$. \varepsilonnd{itemize} \varepsilonnd{definition} The shape derivative from Definition \ref{def1} has a particular structure. Intuitively, it is clear that the form functional stays constant for a transformation $\Phi$ that leaves $\mathcal{B}ega$ unchanged, that is $\Phi(\mathcal{B}ega)=\mathcal{B}ega$, even if some points inside $\mathcal{B}ega$ move and consequently the shape derivative is zero in this case. This property is valid when $\mathcal{B}ega$ is open or closed; cf. \cite{MR2731611}. Mathematically, this is expressed in the following basic theorem proved in \cite{zolesio1979identification}. \begin{theorem}\label{lem:1} Let $\mathcal{B}ega \in \mathfrak P$ be open or closed. Let $\theta \in C^{0,1}_c(D,\mathbf{R}^d)$ be a vector field with compact support in $\mathcal{B}ega$ and denote by $\Phi_t$ its flow defined in \varepsilonqref{Vxt}. Then we have $$ dJ(\mathcal{B}ega)(\theta)=0. $$ \varepsilonnd{theorem} Note that the shape derivative of $J(\mathcal{B}ega)$ always exists for vector fields with compact support in $\mathcal{B}ega$, even if it does not exist for other vector fields. An important consequence of Theorem \ref{lem:1}, also for numerical methods, is that {\it independently of the representation} of the shape derivative and the regularity of the domain $\mathcal{B}ega$, the values of $\theta$ outside the boundary of $\mathcal{B}ega$ have no influence on the shape derivative. \begin{corollary} Let $\mathcal{B}ega \in \mathfrak P$ be a set with $C^{1}$-boundary. Assume that $J$ is shape differentiable on $\mathfrak P$. Let $\theta \in C^{0,1}_{\partial\mathcal{B}ega}(D,\mathbf{R}^d)$. Then we have $$ dJ(\mathcal{B}ega)(\theta)=0. $$ \varepsilonnd{corollary} The previous discussion immediately yields the following fundamental result of shape optimization. \begin{theorem}[Structure Theorem]\label{thm:structure_theorem} Assume $\Gamma :=\partial \mathcal{B}ega $ is compact and $J$ is shape differentiable. Denote the \textit{shape derivative} by \begin{equation} dJ(\mathcal{B}ega ):C^\infty_c(D,\mathbf{R}^d)\rightarrow \mathbf{R},\quad \theta \mapsto dJ(\mathcal{B}ega)(\theta). \varepsilonnd{equation} Assuming $dJ(\mathcal{B}ega )$ is of order $k\ge 0$ and $\Gamma$ of class $C^{k+1}$, then there exists a linear and continuous functional $g: C^k(\Gamma)\rightarrow \mathbf{R}$ such that \begin{equation}\label{volume} dJ(\mathcal{B}ega)(\theta)=g(\theta_{|\Gamma}\cdot n), \varepsilonnd{equation} \varepsilonnd{theorem} \begin{proof} See \cite[pp. 480-481]{MR2731611}. \varepsilonnd{proof} \section{Shape derivatives via averaged adjoint method}\label{section2} Lagrangian methods in shape optimization allow to compute the shape derivative of functions depending on the solution of partial differential equations without the need to compute the material derivative of the partial differential equations; see \cite{MR2731611} for a description of such a method in the linear case. Here we extend and simplify the averaged adjoint method, a Lagrangian-type method introduced in \cite{sturm13}. With this approach the computation of the domain representation of the shape derivative is fast, the retrieval of the boundary form is convenient, and no saddle point assumptions is required unlike in \cite{MR2731611}. Let two vector spaces $E = E(\mathcal{B}ega), F=F(\mathcal{B}ega)$ and $\tau>0$ be given, and consider a parameterization $\mathcal{B}ega_t = \Phi_t(\mathcal{B}ega)$ for $t\in [0,\tau]$. Ultimately, our goal is to differentiate shape functions of the type $J(\mathcal{B}ega_t)$ which can be written using a Lagrangian as $J(\mathcal{B}ega_t) = \mathcal{L}(\mathcal{B}ega_t, u^t,\hat\psi)$, where $u^t\in E(\mathcal{B}ega_t)$ and $\hat\psi\in F(\mathcal{B}ega_t) $. The main appeal of the Lagrangian is that we actually only need to compute the derivative with respect to $t$ of $\mathcal{L}(\mathcal{B}ega_t,\hat\varphi,\hat\psi)$ to compute the derivative of $J(\mathcal{B}ega_t)$, indeed this is the main result of Theorem \ref{thm:sturm}, but this requires a few explanations. Since $\mathcal{L}(\mathcal{B}ega_t,\hat\varphi,\hat\psi)$, is often constituted of integrals on $\Phi_t(\mathcal{B}ega)$, using a change of variable we can rewrite these integrals to integrals on the fixed domain $\mathcal{B}ega$, and consequently transfer the dependence on $t$ to the integrand. However, in the process appear the composed functions $\hat\varphi\circ\Phi_t\in E(\mathcal{B}ega)$ and $\hat\psi\circ\Phi_t\in F(\mathcal{B}ega)$, whose derivatives are not straightforward to compute since $\hat\varphi$ and $\hat\psi$ are defined on the moving spaces $E(\mathcal{B}ega_t)$ and $F(\mathcal{B}ega_t)$. Fortunately, and this is the crucial point of the shape-Lagrangian approach, to compute the shape derivative we can reparameterize the problem by considering $\mathcal{L}(\mathcal{B}ega_t, \Psi_t\circ \varphi, \Psi_t\circ \psi)$ instead of $\mathcal{L}(\mathcal{B}ega_t,\hat\varphi,\hat\psi)$, where $\Psi_t$ is an appropriate bijection between $E(\mathcal{B}ega)$ and $E(\mathcal{B}ega_t)$, and $\varphi\in E(\mathcal{B}ega)$, $\psi\in F(\mathcal{B}ega)$. Now the change of variable in the integrals yields functions $\varphi$ and $\psi$ in the integrand, which are defined on fixed spaces. In this paper $E$ and $F$ are $H^1$-spaces, and in this case we may consider the particular reparameterization $\mathcal{L}(\mathcal{B}ega_t,\varphi\circ\Phi_t^{-1},\psi\circ\Phi_t^{-1})$. For spaces such as $H(\text{curl};\mathcal{B}ega)$, other transformations $\Psi_t$ can be used; see \cite{HLY,MR2971616,MR2002150}. Thus we are led to consider general functions of the type $G:[0,\tau]\times E\times F \rightarrow \mathbf{R}$ with $$ G(t,\varphi,\psi): =\mathcal{L}(\Phi_t(\mathcal{B}ega),\varphi\circ\Phi_t^{-1},\psi\circ\Phi_t^{-1}).$$ This is precisely what we do in \varepsilonqref{eq:G_on_the_moved_domain} when showcasing an application of the method. The main result of this section, Theorem \ref{thm:sturm}, shows that to obtain the shape derivative of $\mathcal{L}$, it is enough to compute the derivative with respect to $t$ of $G$ while assigning the proper values to $\varphi$ and $\psi$. The main ingredient is the introduction of the averaged adjoint equation. In addition, in this paper we consider the following specific form \begin{equation} \label{G_lag} G(t,\varphi,\psi):= a(t,\varphi,\psi) + b(t,\varphi), \varepsilonnd{equation} where $$ a:[0,\tau]\times E \times F \rightarrow \mathbf{R}, \qquad b:[0,\tau]\times E \rightarrow \mathbf{R} ,$$ are functions such that $\psi \mapsto a(t,\varphi,\psi)$ is linear for all $t\in [0,\tau]$ and $\varphi\in E$. The function $G$ is commonly called {\it Lagrangian}, hence the name of the method. In the applications we have in mind, the function $b$ arises from the objective function while $a$ corresponds to the constraint, after transporting back to the fixed domain $\mathcal{B}ega$. Throughout the paper, the Greek letters $\varphi$ and $\psi$ are used for variables, while the roman letters $u,p$ are used for the solutions of the state and adjoint states, respectively. Let us assume that for each $t\in [0,\tau]$ the equation \begin{equation}\label{eq:state_G} d_\psi G(t,u^t,0;\hat\psi)= a(t,u^t, \hat\psi) = 0\;\text{ for all } \hat\psi \in F. \varepsilonnd{equation} admits a unique solution $u^t\in E$. Further we make the following assumptions for $G$. \begin{thmx} \label{amp:gateaux_diffbar_G} \label{amp:affine-linear} For every $(t,\psi)\in [0,\tau]\times F$ \begin{enumerate} \item[(i)] $[0,1]\ni s\mapsto G(t,su^t+s(u^t-u^0),\psi)$ is absolutely continuous. \item[(ii)] $[0,1]\ni s\mapsto d_\varphi G(t,su^t+(1-s)u^0,\psi;\hat{\varphi})$ belongs to $L^1(0,1)$ for all $\hat{\varphi}\in E$. \varepsilonnd{enumerate} \varepsilonnd{thmx} When Assumption \ref{amp:affine-linear} is satisfied, for $t\in [0,\tau]$ we introduce the \textit{averaged adjoint equation} associated with $u^t$ and $u^0$: Find $p^t\in F$ such that \begin{equation}\label{averated_} \int_0^1 d_\varphi G(t,su^t+(1-s)u^0,p^t;\hat{\varphi})\, ds =0 \quad \text{ for all } \hat{\varphi}\in E. \varepsilonnd{equation} Notice that, in view Assumption \ref{amp:affine-linear}, for all $t\in[0,\tau]$, \begin{equation}\label{eq:main_averaged} G(t,u^t,p^t)-G(t,u^0,p^t) = \int_0^1 d_\varphi G(t,su^t+(1-s)u^0,p^t;u^t-u^0)\, ds =0. \varepsilonnd{equation} We can now state the main result of this section. \begin{thmy} We assume that $$ \lim_{t\searrow 0} \textnormal{fr}ac{G(t,u^0,p^t)-G(0,u^0,p^t)}{t}=\partial_tG(0,u^0,p^0).$$ \varepsilonnd{thmy} \begin{theorem} \label{thm:sturm} Let (H0) and (H1) be satisfied and assume there exists a unique solution $p^t$ of the averaged adjoint equation \varepsilonqref{averated_}. Then for $\psi \in F$ we obtain \begin{equation}\label{eq:dt_G_single} \dt b(t,u^t) |_{t=0} = \dt(G(t,u^t,\psi))|_{t=0}=\partial_t G(0,u^0,p^0). \varepsilonnd{equation} \varepsilonnd{theorem} \begin{proof} Put $g(t) := G(t,u^t,0)-G(0,u^0,0)$, and note that $g(t) = G(t,u^t,\psi)-G(0,u^0,\psi)$ for all $\psi \in F$ and $g(0)=0$. We have to show that $$g'(0):= \lim_{t\searrow 0}\textnormal{fr}ac{G(t,u^t,0)-G(0,u^0,0)}{t} \quad \text{ exists. }$$ Thanks to Assumption \ref{amp:affine-linear} we can define the averaged adjoint $p^t$ and using that $G$ is affine with respect to the third argument, we obtain $$g(t) = \underbrace{G(t,u^t,p^t)-G(t,u^0,p^t)}_{=0 \mbox{ in view of } \varepsilonqref{eq:main_averaged}} + G(t,u^0,p^t)-G(0,u^0,p^t) .$$ Dividing by $t>0$ and using Assumption (H1) yields $$ g'(0) = \lim_{t\searrow 0} \textnormal{fr}ac{g(t) - g(0)}{t} = \lim_{t\searrow 0}\textnormal{fr}ac{G(t,u^0,p^t)-G(0,u^0,p^t)}{t} = \partial_t G(0,u^0,p^0) $$ which concludes the proof. \varepsilonnd{proof} \begin{remark} In terms of $a$ and $b$, equation \varepsilonqref{averated_} reads: $$ \int_0^1 d_\varphi a(t,su^t+(1-s)u^0,p^t;\hat{\varphi})\, ds = - \int_0^1 d_\varphi b(t,su^t+(1-s)u^0;\hat{\varphi})\, ds $$ for all $\hat{\varphi}\in E$. If $\varphi \mapsto a(t,\varphi,\psi)$ is in addition linear, then \varepsilonqref{averated_} becomes $$ a(t,\hat{\varphi},p^t) = - \int_0^1 d_\varphi b(t,su^t+(1-s)u^0;\hat{\varphi})\, ds $$ for all $\hat{\varphi}\in E$. \varepsilonnd{remark} \section{Tensor representation of the shape derivative}\label{sec:tensor_representation} In this section we identify tensor representations of the shape derivative that correspond to a large class of problems studied in the literature for PDE-constrained shape optimization. This tensor representation has several interesting properties that we investigate. In particular we exhibit the link between this tensor representation and the usual boundary expression of the shape derivative. \subsection{Definition and properties}\label{def_prop} \begin{definition}\label{def:tensor} Let $\mathcal{B}ega \in \mathfrak P$ be a set with $C^k$-boundary, $k\ge 1$. A shape differentiable function $J$ of order $k$ is said to admit a tensor representation if there exist tensors $\mathbf{S}_l\in L^1(D, \mathcal L^l(\mathbf{R}^d,\mathbf{R}^d))$ and $\mathfrak{S}_l\in L^1(\partial \mathcal{B}ega; \mathcal L^l(\mathbf{R}^d,\mathbf{R}^d))$, $l=0,..,k$, such that \begin{equation}\label{ea:volume_from} dJ(\mathcal{B}ega)(\theta) = \sum_{l=0}^k \int_D \mathbf{S}_l\cdot D^l\theta \,dx + \int_{\partial \mathcal{B}ega} \mathfrak{S}_l\cdot D^l_\Gamma \theta\, ds \quad \text{ for all } \theta\in C^k_c(D,\mathbf{R}^d), \varepsilonnd{equation} where $D_\Gamma \theta: = D\theta -(D\theta n)\otimes n$ is the tangential derivative of $\theta$ along $\partial\mathcal{B}ega$. Here $\mathcal L^l(\mathbf{R}^d, \mathbf{R}^d) $ denotes the space of multilinear maps from $\mathbf{R}^d \times \cdots \times \mathbf{R}^d$ to $\mathbf{R}^d$. \varepsilonnd{definition} Most if not all examples involving PDEs from classical textbooks \cite{MR2731611,MR2512810,SokZol92} can be written in the form \varepsilonqref{ea:volume_from}. \begin{remark} \begin{itemize} \item[(a)] A particular case of the tensor representation \varepsilonqref{ea:volume_from} is the Eshelby energy momentum tensor in continuum mechanics introduced in \cite{MR0489190}; see also \cite{MR3013681}. In this case only $\mathbf{S}_1$ is not zero. \item[(b)] When $J$ is is shape differentiable in $\mathcal{B}ega$ then by definition $\theta \mapsto dJ(\mathcal{B}ega)(\theta)$ is a distribution, and if $\partial \mathcal{B}ega$ is compact, the distribution $\theta \mapsto dJ(\mathcal{B}ega)(\theta)$ is of finite order. \item[(c)] If $dJ(\mathcal{B}ega)$ is of order $k=1$ and $ |dJ(\mathcal{B}ega)(\theta)| \le C\|\theta\|_{H^1(D,\mathbf{R}^d)} $ for all $\theta\in C^\infty_c(D,\mathbf{R}^d)$ then by density of $C^\infty_c(D,\mathbf{R}^d)$ in $H^1_0(D,\mathbf{R}^d)$ the derivative $dJ(\mathcal{B}ega)$ extends to a continuous functional on $H^1_0(D,\mathbf{R}^d)$, that is, $$ |\widehat{dJ(\mathcal{B}ega)}(\theta)| \le c\|\theta\|_{H^1(D,\mathbf{R}^d)} \quad \text{ for all } \theta\in H^1_0(D,\mathbf{R}^d). $$ Therefore by the theorem of Riesz, we obtain a vector field $W$ in $H^1_0(D,\mathbf{R}^d)$ such that $$ \forall \theta\in H^1_0(D,\mathbf{R}^d),\quad \widehat{dJ(\mathcal{B}ega)}(\theta) = \int_D DW \cdot D \theta + W \cdot \theta \, dx $$ and this defines a tensor representation with $\mathbf{S}_1= DW$, $\mathbf{S}_0=W$, $\mathfrak{S}_1=0$ and $\mathfrak{S}_0=0$. \item[(d)] The assumption that $\mathcal{B}ega$ be a set of class $C^k$ can be reduced if $\mathfrak{S}_l\varepsilonquiv 0$ for all $0\leq k_0\leq l\leq k$. \varepsilonnd{itemize} \varepsilonnd{remark} The tensor representation \varepsilonqref{ea:volume_from} is not unique in the sense that there might be several ways to choose the tensors $\mathbf{S}_l$ and $\mathfrak{S}_l$. This is expressed by the fact that these tensors are correlated. We describe these relations below in the case $k=1$ in Proposition \ref{tensor_relations}, which also describes the link between the tensor representation and the usual boundary representation \varepsilonqref{volume} of the shape derivative. \begin{proposition}\label{tensor_relations} Let $\mathcal{B}ega $ be a subset of $D$ with $C^1$-boundary. Suppose that the derivative $dJ(\mathcal{B}ega)$ has the representation \begin{equation}\label{eq:first_order_tensor} dJ(\mathcal{B}ega)(\theta) = \int_D \mathbf{S}_1\cdot D\theta + \mathbf{S}_0\cdot \theta\,dx + \int_{\partial \mathcal{B}ega} \mathfrak{S}_1\cdot D_\Gamma \theta + \mathfrak{S}_0\cdot \theta\, ds . \varepsilonnd{equation} If $\mathbf{S}_l$ is of class $W^{1,1}$ in $\mathcal{B}ega$ and $D\setminus \overline \mathcal{B}ega$ then indicating by $+$ and $-$ the restrictions of the tensors to $\mathcal{B}ega$ and $D\setminus\overline \mathcal{B}ega$, respectively, we get \begin{equation} \label{eq:equvilibrium_strong} \begin{split} -\operatorname{div}(\mathbf{S}_1^+) + \mathbf{S}_0^+ &= 0 \quad \text{ in } \mathcal{B}ega \\ -\operatorname{div}(\mathbf{S}_1^-) + \mathbf{S}_0^- &= 0 \quad \text{ in } D\setminus\overline \mathcal{B}ega. \varepsilonnd{split} \varepsilonnd{equation} Moreover, we can rewrite the tensor representation as a distribution on the boundary: $$dJ(\mathcal{B}ega)(\theta) = \int_{\partial \mathcal{B}ega} [(\mathbf{S}_1^+-\mathbf{S}_1^-)n] \cdot\theta + \mathfrak{S}_1\cdot D_\Gamma \theta + \mathfrak{S}_0\cdot \theta \, ds $$ where $n$ denotes the outward unit normal vector to $\mathcal{B}ega$. If the boundary $\partial \mathcal{B}ega$ is $C^2$ and $\mathfrak{S}_1\in W^{1,1}(\partial \mathcal{B}ega; \mathcal L^1(\mathbf{R}^d,\mathbf{R}^d))$, then we obtain a more regular distribution, the so-called boundary expression of the shape derivative: \begin{equation}\label{eq:general_boundary_exp} dJ(\mathcal{B}ega)(\theta) = \int_{\partial \mathcal{B}ega} g_1\, \theta\cdot n\, ds, \varepsilonnd{equation} where \begin{equation} \label{g1} g_1 := [(\mathbf{S}_1^+-\mathbf{S}_1^-)n]\cdot n + \mathfrak{S}_0\cdot n + \mathfrak{S}_1\cdot D_\Gamma n - \operatorname{div}_\Gamma (\mathfrak{S}_1^T n) + \mathcal{H}(\mathfrak{S}_1^T n\cdot n). \varepsilonnd{equation} and $\mathcal{H}=\operatorname{div}_\Gamma n$ denotes the mean curvature\footnote{We define the mean curvature as the sum of the principal curvatures $\kappa_i$, that is, $\mathcal H := \sum_{i=1}^d \kappa_i$.} of $\partial\mathcal{B}ega$ while $\operatorname{div}_\Gamma := \operatorname{tr}(D_\Gamma)$ is the tangential divergence. \varepsilonnd{proposition} \begin{proof} Applying Theorem \ref{lem:1} we have $$dJ(\mathcal{B}ega)(\theta) = \int_D \mathbf{S}_1\cdot D\theta + \mathbf{S}_0\cdot \theta\,dx + \int_{\partial \mathcal{B}ega} \mathfrak{S}_1\cdot D_\Gamma \theta + \mathfrak{S}_0\cdot \theta\, ds =0 \quad \text{ for all }\theta\in C^1_c(\mathcal{B}ega\cup (D\setminus \overline \mathcal{B}ega), \mathbf{R}^d). $$ An integration by parts shows \varepsilonqref{eq:equvilibrium_strong}. Then, when $\partial\mathcal{B}ega$ is $C^1$, replacing \varepsilonqref{eq:equvilibrium_strong} in the expression of the shape derivative and using Green's formula we obtain \begin{equation}\label{eq:domain} \begin{split} dJ(\mathcal{B}ega)(\theta) =& \int_{\partial \mathcal{B}ega} \mathfrak{S}_1\cdot D_\Gamma \theta + \mathfrak{S}_0\cdot \theta\,\,ds + \int_{\partial \mathcal{B}ega} [(\mathbf{S}_1^+-\mathbf{S}_1^-)n]\cdot \theta \, ds\\ &+ \int_\mathcal{B}ega( \underbrace{-\operatorname{div}(\mathbf{S}_1^+) + \mathbf{S}_0^+}_{=0})\cdot \theta \, dx + \int_{D\setminus \overline \mathcal{B}ega} (\underbrace{-\operatorname{div}(\mathbf{S}_1^-) + \mathbf{S}_0^-}_{=0})\cdot \theta \, dx\\ \stackrel{\varepsilonqref{eq:equvilibrium_strong}}{=} & \int_{\partial \mathcal{B}ega} [(\mathbf{S}_1^+-\mathbf{S}_1^-)n]\cdot \theta +\mathfrak{S}_1\cdot D_\Gamma \theta + \mathfrak{S}_0\cdot \theta\, \, ds\quad \text{ for all } \theta\in C^1_c(D, \mathbf{R}^d). \varepsilonnd{split} \varepsilonnd{equation} With a slight abuse of notation we keep the same notation $n$ for the extension of the normal to a neighborhood of $\partial \mathcal{B}ega$. Let $\theta\in C^1(\overline D,\mathbf{R}^d)$ and define $\theta_\tau :=\theta - (\theta\cdot n) n$ the tangential part of $\theta$. Then $\theta_\tau \cdot n =0$ on $\partial \mathcal{B}ega$ and hence from the structure theorem we get $dJ(\mathcal{B}ega)(\theta_\tau)=0$ which yields in view of \varepsilonqref{eq:domain}: \begin{equation}\label{eq:boundary_exp} \begin{split} dJ(\mathcal{B}ega)(\theta) &= dJ(\mathcal{B}ega)((\theta\cdot n)n) \\ &= \int_{\partial \mathcal{B}ega} ((\mathbf{S}_1^+-\mathbf{S}_1^-)n\cdot n) (\theta\cdot n) +\mathfrak{S}_1\cdot D_\Gamma (n(\theta\cdot n)) + (\mathfrak{S}_0\cdot n) (\theta\cdot n) \, ds\\ & = \int_{\partial \mathcal{B}ega} ((\mathbf{S}_1^+-\mathbf{S}_1^-)n\cdot n) (\theta\cdot n) + (\mathfrak{S}_0\cdot n) (\theta\cdot n)\, ds\\ & + \int_{\partial \mathcal{B}ega} \mathfrak{S}_1\cdot D_\Gamma n (\theta\cdot n) + n\cdot \mathfrak{S}_1\nabla_\Gamma (\theta\cdot n) \, ds, \varepsilonnd{split} \varepsilonnd{equation} where we used that for all functions $f\in C^1(\mathbf{R}^d,\mathbf{R}^d)$ and $g\in C^1(\mathbf{R}^d)$ we have \begin{equation} \begin{split} D(gf) & = g Df + f\otimes \nabla g. \varepsilonnd{split} \varepsilonnd{equation} Finally using $\mathfrak{S}_1\in W^{1,1}(\partial \mathcal{B}ega; \mathcal L^1(\mathbf{R}^d,\mathbf{R}^d))$ we integrate by parts on the boundary $\partial \mathcal{B}ega$ to transform the last term in \varepsilonqref{eq:boundary_exp} $$ \int_{\partial \mathcal{B}ega} n\cdot \mathfrak{S}_1\nabla_\Gamma (\theta\cdot n) \, ds = \int_{\partial \mathcal{B}ega} (- \operatorname{div}_\Gamma (\mathfrak{S}_1^T n) + \mathcal{H}(\mathfrak{S}_1^T n\cdot n) )(\theta\cdot n) ds. $$ Therefore \varepsilonqref{eq:boundary_exp} reads \begin{equation}\label{eq:boundary_exp_2} \begin{split} dJ(\mathcal{B}ega)(\theta) = & \int_{\partial \mathcal{B}ega} ((\mathbf{S}_1^+-\mathbf{S}_1^-)n\cdot n) (\theta\cdot n) + (\mathfrak{S}_0\cdot n) (\theta\cdot n)\, ds\\ & + \int_{\partial \mathcal{B}ega} \mathfrak{S}_1\cdot D_\Gamma n (\theta\cdot n) + (- \operatorname{div}_\Gamma (\mathfrak{S}_1^T n) + \mathcal{H}(\mathfrak{S}_1^T n\cdot n) )(\theta\cdot n)\, ds, \varepsilonnd{split} \varepsilonnd{equation} which we can rewrite as \varepsilonqref{eq:general_boundary_exp}. \varepsilonnd{proof} \begin{remark} In Proposition \ref{tensor_relations}, if $\mathfrak{S}_1\varepsilonquiv 0$, one can still obtain \varepsilonqref{eq:equvilibrium_strong} when $\mathcal{B}ega$ is only Lipschitz instead of $C^1$. \varepsilonnd{remark} \begin{corollary}\label{corollary2} Let the assumptions of Proposition~\ref{tensor_relations} be satisfied. Suppose that the tensor $\mathfrak{S}_1:\partial\mathcal{B}ega\rightarrow \mathcal L(\mathbf{R}^d,\mathbf{R}^d)$ has the form $\mathfrak{S}_1 = \alpha(I -n\otimes n)$, where $\alpha\in C^0(\partial\mathcal{B}ega)$. Then \varepsilonqref{eq:general_boundary_exp} simplifies to \begin{equation} \label{shape_der_simplified} dJ(\mathcal{B}ega)(\theta) = \int_{\partial \mathcal{B}ega} g_1 \, \theta\cdot n\, \, ds, \varepsilonnd{equation} where $g_1$ is given by $$ g_1 := [(\mathbf{S}_1^+-\mathbf{S}_1^-)n]\cdot n + \mathfrak{S}_0\cdot n + \alpha\mathcal{H}. $$ \varepsilonnd{corollary} \begin{proof} First $\mathfrak{S}_1^T = \alpha(I -n\otimes n)^T = \mathfrak{S}_1$ and $ \mathfrak{S}_1^T n = \alpha(I - n\otimes n)n = \alpha(n- (n\cdot n)n) = 0$, thus the two last terms in \varepsilonqref{g1} vanish. Concerning the third term in \varepsilonqref{g1} we write \begin{align*} \mathfrak{S}_1\cdot D_\Gamma n = \alpha (I -n\otimes n)\cdot D_\Gamma n & = \alpha(\operatorname{tr}(D_\Gamma n) - (n\otimes n)\cdot D_\Gamma n )=\alpha(\operatorname{div}_\Gamma n - (D_\Gamma n n)\cdot n)= \alpha\mathcal{H}. \varepsilonnd{align*} where we have used $(D_\Gamma n n)\cdot n=0$. \varepsilonnd{proof} \begin{remark} The particular tensor $\mathfrak{S}_1 = \alpha(I -n\otimes n)$ in Corollary \ref{corollary2} is commonly encountered in shape optimization problems. In fact, \varepsilonqref{shape_der_simplified} corresponds to a standard formula that can be found in most textbooks on shape optimization. \varepsilonnd{remark} \begin{remark} Recall that for given vector fields $\theta, \zeta$, the second order shape derivative is defined by $$d^2J(\mathcal{B}ega)(\theta)(\zeta) := \dt dJ(\Phi_t^\zeta(\mathcal{B}ega))(\theta)|_{t=0}.$$ Once we have identified a tensor representation \varepsilonqref{ea:volume_from} for the shape derivative $dJ(\mathcal{B}ega)(\theta)$ for fixed $\theta$, it is convenient to differentiate it to also obtain a tensor representation for the second-order shape derivative. Further, Proposition \ref{tensor_relations} or Corollary \ref{corollary2} can also be applied to obtain a boundary expression for the second order shape derivative. \varepsilonnd{remark} Similar relations as in Proposition \ref{tensor_relations} could be obtained for any tensor representation of order $k$. For instance in the case $k = 2$ we obtain the relations \begin{equation} \label{eq:equvilibrium_strong_second} \begin{split} \mathcal A\mathbf \mathbf{S}_2^+-\operatorname{div}(\mathbf{S}_1^+) + \mathbf{S}_0^+ &= 0 \quad \text{ in } \mathcal{B}ega, \\ \mathcal A\mathbf \mathbf{S}_2^- -\operatorname{div}(\mathbf{S}_1^-) + \mathbf{S}_0^- &= 0 \quad \text{ in } D\setminus\overline \mathcal{B}ega, \varepsilonnd{split} \varepsilonnd{equation} where $(\mathcal A\mathbf{S}_2)_l = \sum_{i,j=1}^d \partial_{x_ix_j}(\mathbf{S}_2)_{ijl}$. Using the averaged adjoint approach from Theorem \ref{thm:sturm} yields the tensor representation \varepsilonqref{ea:volume_from} of the shape derivative. Then Proposition \ref{tensor_relations} can be used to immediately derive the standard boundary expression of the shape gradient from this tensor representation. \subsection{Examples of tensor representations} In this section we present several examples of representations corresponding to Definition \ref{def:tensor} and apply the observations from Section \ref{def_prop}. \subsubsection*{First order tensor representation} A basic example of a first order tensor representation of the shape derivative is for $$J(\mathcal{B}ega) = \int_\mathcal{B}ega f \, dx + \int_{\partial \mathcal{B}ega} g \, ds$$ with $f,g\in C^2(\mathbf{R}^d)$. Then one easily computes $$ dJ(\mathcal{B}ega)(\theta) = \int_\mathcal{B}ega \nabla f \cdot \theta + f\operatorname{div}(\theta)\,dx + \int_{\partial \mathcal{B}ega} \nabla g\cdot \theta + g\operatorname{div}_\Gamma \theta\, ds . $$ The corresponding tensor representation \varepsilonqref{ea:volume_from} is \begin{align*} \mathbf{S}_1^+:= f &I,\quad \mathbf{S}_1^-:= 0 , \quad \mathbf{S}_0^+:= \nabla f, \quad \mathbf{S}_0^-:= 0, \quad \mathfrak{S}_1:= g (I-n\otimes n),\quad \mathfrak{S}_0 := \nabla g. \varepsilonnd{align*} Note that $\mathfrak{S}_1$ has the form assumed in Corollary \ref{corollary2}. Applying this Corollary, assuming the domain has enough regularity, we obtain in view of \varepsilonqref{shape_der_simplified} the classical formula: $$ dJ(\mathcal{B}ega)(\theta) =\int_{\partial\mathcal{B}ega} g_1 \, \theta\cdot n\, \, ds, $$ where $g_1$ is given by $$ g_1 := f + \partial_n g + g\mathcal{H}. $$ Note that in the particular case $f=0$ we have obtained as a byproduct the formula \begin{equation}\label{tangential_green} \int_{\partial \mathcal{B}ega} \nabla g\cdot \theta + g\operatorname{div}_\Gamma \theta\, ds =\int_{\partial\mathcal{B}ega} (\partial_n g + g\mathcal{H}) \, \theta\cdot n\, \, ds, \varepsilonnd{equation} and when in addition $\partial_n g=0$ or $g$ is defined only on $\partial\mathcal{B}ega$, \varepsilonqref{tangential_green} becomes the classical {\it tangential Green's formula}; see for instance \cite[proposition 5.4.9]{MR2512810}. \subsubsection*{Non-homogeneous Dirichlet problem} The following problem was already considered for instance in \cite{MR2731611}. Here we present a fairly easy way to compute the shape derivative. Let $\mathcal{B}ega$ be an open and bounded subset of $\mathbf{R}^d$ that is contained in an open and bounded set $D$. Consider \begin{align}\label{eq:state_non_homo} -\Delta v &= f\mbox{ in }\mathcal{B}ega,\\ v & = g \mbox{ on }\partial\mathcal{B}ega,\label{eq:state_non_homo_} \varepsilonnd{align} where $f\in L^2(D)$ and $g\in H^2(D)$. Introducing the variable $u:=v-g$, we observe that \varepsilonqref{eq:state_non_homo}-\varepsilonqref{eq:state_non_homo_} is equivalent to the homogeneous Dirichlet problem \begin{align}\label{eq:state_non_homo-2} -\Delta u &= \Delta g + f\mbox{ in }\mathcal{B}ega,\\\label{eq:state_non_homo-2_} u & = 0 \mbox{ on }\partial\mathcal{B}ega. \varepsilonnd{align} Consider the cost function \begin{equation}\label{eq:cost_dirichlet_non} J(\mathcal{B}ega)=\int_\mathcal{B}ega |v-u_d|^2\,dx=\int_\mathcal{B}ega |u+g-u_d|^2\,dx. \varepsilonnd{equation} The weak formulation of \varepsilonqref{eq:state_non_homo-2},\varepsilonqref{eq:state_non_homo-2_} reads: \begin{equation}\label{eq:non_dir} \mbox{Find } u\in H^1_0(\mathcal{B}ega): \int_\mathcal{B}ega \nabla u\cdot \nabla \psi \, dx = \int_\mathcal{B}ega -\nabla g\cdot \nabla \psi + f\psi\, dx \quad \text{ for all } \psi \in H^1_0(\mathcal{B}ega). \varepsilonnd{equation} Note that the previous weak formulation is already well-defined for arbitrary open and bounded set $\mathcal{B}ega$. We do not need to impose any regularity on $\mathcal{B}ega$. The perturbed problem of the previous equation, which is obtained by considering \varepsilonqref{eq:non_dir} on $\Phi_t(\mathcal{B}ega)$ and performing a change of variables, reads: find $u^t\in H^1_0(\mathcal{B}ega)$ such that \begin{equation}\label{eq:non_dir_per} \int_\mathcal{B}ega A(t)\nabla u^t\cdot \nabla \psi \, dx = \int_\mathcal{B}ega - A(t)\nabla g^t\cdot \nabla \psi + \xi(t)f^t \psi\, dx \quad \text{ for all } \psi \in H^1_0(\mathcal{B}ega), \varepsilonnd{equation} where $\xi(t) := \det(D\Phi_t)$ and $A(t) :=\xi(t)D\Phi_t^{-1}D\Phi_t^{-T}$. The following continuity result is standard: \begin{lemma} There exists a constant $c>0$ such that $ \|u^t-u^0\|_{H^1_0(\mathcal{B}ega)} \le ct$ for all $ t\in [0,\tau]. $ \varepsilonnd{lemma} Introduce \begin{align*} a(t,\varphi,\psi) & := \int_\mathcal{B}ega A(t)\nabla \varphi\cdot \nabla \psi \, dx + \int_\mathcal{B}ega A(t)\nabla g^t\cdot \nabla \psi - \xi(t)f^t \psi\, dx \\ b(t,\varphi) & :=\int_\mathcal{B}ega \xi(t) |\varphi +g^t-u_d^t|^2\,dx. \varepsilonnd{align*} where $g^t:=g\circ \Phi_t$ and $u_d^t:=u_d\circ \Phi_t$. Recall that the associated Lagrangian \varepsilonqref{G_lag} is $G(t,\varphi,\psi) = a(t,\varphi,\psi) + b(t,\varphi). $ The averaged adjoint equation \varepsilonqref{averated_} reads $$\int_\mathcal{B}ega A(t)\nabla \varphi\cdot \nabla p^t \, dx = \int_\mathcal{B}ega (u^t+u^0 + 2g^t - 2u_d^t)\varphi\,dx \quad \text{ for all } \varphi\in H^1_0(\mathcal{B}ega). $$ The following continuity result for the adjoint is standard: \begin{lemma} There exists a constant $c>0$ such that $$ \|p^t-p^0\|_{H^1(\mathcal{B}ega)} \le ct \quad \text{ for all } t\in [0,\tau].$$ \varepsilonnd{lemma} One readily verifies that all assumptions of Theorem~\ref{thm:sturm} are satisfied, except for (H1) which we now prove. Indeed using $p^t\rightarrow p^0$ in $H^1_0(\mathcal{B}ega)$ as $t$ goes to zero and the strong differentiability of $t\mapsto A(t)$ and $t\mapsto \xi(t)$, we get \begin{align*} &\lim_{t\searrow 0 } \textnormal{fr}ac{ G(t,u^0,p^t) - G(0,u^0,p^t)}{t} \\ &\hspace{1cm} =\lim_{t\searrow 0} \bigg(\int_{\mathcal{B}ega} \left(\textnormal{fr}ac{ A(t) - I }{t} \right) \nabla u^0 \cdot \nabla p^t \, dx + \int_{\mathcal{B}ega} \left(\textnormal{fr}ac{ A(t)\nabla g^t - \nabla g }{t}\right) \cdot \nabla p^t {\color{red}\, dx} - \left(\textnormal{fr}ac{\xi(t)f^t - f}{t} \right) p^t \, dx \\ & \hspace{1cm} + \int_{\mathcal{B}ega} \textnormal{fr}ac{ \xi(t)| u^0 + g^t - u_d^t |^2 - | u^0 + g - u_d |^2}{t}\bigg)\\ &\hspace{1cm} = \partial_t G(0,u^0,p^0), \varepsilonnd{align*} which shows that (H1) is satisfied. Hence, applying Theorem~\ref{thm:sturm} yields $$ dJ(\mathcal{B}ega)(\theta) = \partial_t a(0,u,p) + \partial_tb(0,u) ,$$ which is by definition equivalent to \begin{equation} \begin{split} dJ(\mathcal{B}ega)(\theta) = &\int_\mathcal{B}ega A'(0)(\nabla u \cdot \nabla p + \nabla g\cdot \nabla p) \, dx + \int_\mathcal{B}ega \nabla(\nabla g\cdot \theta)\cdot \nabla p - \operatorname{div}(f\theta) p \, dx\\ & + \int_\mathcal{B}ega \operatorname{div}(\theta) |u+g-u_d|^2 + (\nabla (g-u_d)\cdot \theta))(u+g-u_d)\,dx. \varepsilonnd{split} \varepsilonnd{equation} Since $A'(0) = (\operatorname{div} \theta) I - D\theta^T - D\theta$ we obtain the tensor representation \varepsilonqref{eq:first_order_tensor} with: \begin{align*} \mathbf{S}_1 &= I(\nabla u\cdot \nabla p + \nabla g \cdot \nabla p - fp + |u+g-u_d|^2) -\nabla u\otimes \nabla p - \nabla p \otimes \nabla u - \nabla p \otimes \nabla g, \\ \mathbf{S}_0 &= D^2 g\nabla p - p\nabla f +(u+g-u_d) \nabla (g-u_d), \\ \mathfrak{S}_1 &= 0, \quad \mathfrak{S}_0 = 0. \varepsilonnd{align*} Now applying \varepsilonqref{shape_der_simplified} we get immediately \begin{align*} g_1 = \nabla u\cdot \nabla p +\nabla g \cdot \nabla p - fp + |u+g-u_d|^2 -2\partial_n u\partial_n p - \partial_n p \partial_n g. \varepsilonnd{align*} Using the definition of the tangential gradient and $p=0,u=0$ on $\Gamma$ implies $\nabla_\Gamma u=\nabla_\Gamma p=0$, so we obtain the simpler expression \begin{align*} g_1 = -\partial_n u\partial_n p + |u+g-u_d|^2 = -\partial_n (v-g)\partial_n p + |v-u_d|^2 . \varepsilonnd{align*} Finally, substituting back $u=v-g$ we obtain the formula $$dJ(\mathcal{B}ega)(\theta) = \int_{\partial \mathcal{B}ega} (-\partial_n (v-g)\partial_n p + |v-u_d|^2)\, \theta\cdot n\, ds .$$ This formula can be found for instance in \cite[p. 566, Formula 6.38]{MR2731611}, where the adjoint has the sign opposite to our case. \subsubsection*{Elliptic problem: first order tensor representation} Suppose that $\mathcal{B}ega \subset D\subset \mathbf{R}^d$ is a smooth bounded domain, where $D\subset \mathbf{R}^d$ is the smooth ``universe''. Let us consider the Dirichlet problem: \begin{align}\label{eq:state} \begin{split} -\operatorname{div}(M\nabla u) + u&= f\mbox{ in }\mathcal{B}ega,\\ u & = 0 \mbox{ on }\partial\mathcal{B}ega, \varepsilonnd{split} \varepsilonnd{align} where $M\in \mathbf{R}^{d,d}$ is a positive definite matrix. Consider the cost function \begin{equation}\label{eq:cost_dirichlet} J(\mathcal{B}ega)=\int_\mathcal{B}ega |u-u_d|^2\,dx, \varepsilonnd{equation} where $u_d\in C^1(\mathbf{R}^d)$. Let us introduce \begin{align*} a(t,\varphi,\psi) & := \int_\mathcal{B}ega (M Q^t\nabla \varphi \cdot Q^t\nabla \psi + \varphi \psi )\xi(t) \, dx - \int_\mathcal{B}ega \xi(t) f^t\psi \, dx \\ b(t,\varphi) & :=\int_\mathcal{B}ega \xi(t) |\varphi-u_d^t|^2\,dx, \varepsilonnd{align*} where $Q^t:=D\Phi_t^{-T}$ and $\xi(t) := \det(D\Phi_t)$. Then the weak formulation of \varepsilonqref{eq:state} on the perturbed domain $\mathcal{B}ega_t$, once transported back to $\mathcal{B}ega$ is \begin{equation}n a(t,u^t,\psi) = 0 \quad \text{ for all } \psi\in H^1_0(\mathcal{B}ega). \varepsilonnd{equation}n The Lagrangian corresponding to the minimization of $J(\mathcal{B}ega)$ and the PDE constraint \varepsilonqref{eq:state} is \begin{equation}\label{eq:lagrange} G(t,\varphi,\psi) = b(t,\varphi) + a(t,\varphi,\psi). \varepsilonnd{equation} It can be shown using Theorem \ref{thm:sturm} that $dJ(\mathcal{B}ega)(\theta) = \partial_t G(0,u,p)$, where $p\in H^1_0(\mathcal{B}ega)$ denotes the adjoint state: \begin{equation}\label{eq:adjoint_state_simple_dirichlet} \begin{split} \int_\mathcal{B}ega M\nabla \psi\cdot \nabla p +p\psi\, dx=-\int_\mathcal{B}ega 2 (u-u_d)\psi\, dx\quad \text{ for all } \psi \in H^1_0(\mathcal{B}ega) . \varepsilonnd{split} \varepsilonnd{equation} \\ The tensor representation \varepsilonqref{ea:volume_from} of the shape derivative of $J(\mathcal{B}ega)$ in direction $\theta\in C^2_c(D,\mathbf{R}^d)$ is given by \begin{equation}\label{eq:shape_derivative} dJ(\mathcal{B}ega)(\theta) = \int_\mathcal{B}ega \mathbf S_1\cdot D\theta + \mathbf{S}_0\cdot \theta \, dx. \varepsilonnd{equation} where we use the relation $(\nabla p \otimes \nabla u)\cdot D\theta = D\theta \nabla u\cdot \nabla p$ to get the tensors \begin{align} \label{f_mathbf} \mathbf{S}_0 & = -2(u-u_d)\nabla u_d - p\nabla f ,\\ \mathbf{S}_1 & = - \nabla p \otimes M \nabla u - \nabla u \otimes M^T\nabla p + (M\nabla u\cdot \nabla p+ up-fp+(u-u_d)^2)I. \varepsilonnd{align} In the simple case where $M=I$, assuming $u,p\in C^2(\overline \mathcal{B}ega)$, we know from the previous discussion that \varepsilonqref{eq:equvilibrium_strong} is satisfied. Noting that \begin{align*} \operatorname{div}(\nabla p\otimes \nabla u) &= \Delta u \nabla p + (D^2p)^T\nabla u, \\ \operatorname{div}(\nabla u\otimes \nabla p) &= \Delta p \nabla u + (D^2u)^T\nabla p, \\ \nabla (\nabla u\cdot \nabla p) &= D^2u\nabla p + D^2 p \nabla u, \varepsilonnd{align*} the relation \begin{equation}\label{nes} - \operatorname{div}(\mathbf{S}_1) + \mathbf{S}_0 = 0 \quad \text{ in } \mathcal{B}ega \varepsilonnd{equation} is equivalent to $$ (-\Delta u + u -f)\nabla p + (-\Delta p + p + 2(u-u_d))\nabla u = 0 \quad \text{ in } \mathcal{B}ega.$$ Therefore, we observe that the fundamental relation \varepsilonqref{eq:equvilibrium_strong} between the tensors $\mathbf{S}_1$ and $\mathbf{S}_0$ corresponds to the strong solvability of the state and adjoint state equation. \section{Descent directions}\label{section3} In this paper we are interested in numerical methods for shape optimization problems of the type \begin{equation} \min_{\mathcal{B}ega \in \mathfrak{P}}J(\mathcal{B}ega), \varepsilonnd{equation} where $\mathfrak{P}\subset\mathcal{P}(D)$ is the admissible set. Assume $J:\mathfrak{P}\rightarrow \mathbf{R}$ is shape differentiable at $\mathcal{B}ega\subset D\subset \mathbf{R}^d$. \begin{definition}[descent direction]\label{def:descent} The vector field $\theta\in C^{0,1}_c(D,\mathbf{R}^d)$ is called a {\it descent direction} for $J$ at $\mathcal{B}ega$ if there exists an ${\varepsilon}$ such that $$ J(\Phi_t^\theta(\mathcal{B}ega))< J(\mathcal{B}ega)\mbox{ for all } t\in (0,{\varepsilon}).$$ If the Eulerian semiderivative of $J$ at $\mathcal{B}ega$ in direction $\theta$ exists and if it is a descent direction then by definition \begin{equation}\label{eq:descent} dJ(\mathcal{B}ega)(\theta) < 0. \varepsilonnd{equation} \varepsilonnd{definition} Descent directions are used in iterative methods for finding approximate (possibly local) minimizers of $J(\mathcal{B}ega)$. Typically, at a given starting point $\mathcal{B}ega$, one determines a descent direction $\theta$ and proceeds along this direction as long as the cost functional $J$ reduces sufficiently using a step size strategy. In this section we give a general setting for computing descent directions in the framework of gradient methods using the domain and boundary representations of the shape derivative according to Theorem \ref{thm:structure_theorem}. We show how a descent direction $\theta$ with any regularity $H^s$, $s\geq 1$ can be obtained by solving an appropriate partial differential equation. We also show how to deal with bound constraints on $\theta$. In order to develop a setting allowing to define general descent directions, we recall sufficient conditions for the solvability of the following operator equation \begin{equation}n A\theta=f, \varepsilonnd{equation}n where $A:\textbf{E}\rightarrow \textbf{E}'$ is an operator between a Banach space $\textbf{E}$ and its dual $\textbf{E}'$. Sufficient conditions for the bijectivity of $A$ are given by the theorem of Minty-Browder \cite[p.364, Theorem 10.49]{MR2028503}. \begin{theorem}[Minty-Browder]\label{MB} Let $(\textbf{E};\|\cdot\|_{\textbf{E}})$ be a reflexive separable Banach space and $A:\textbf{E}\rightarrow \textbf{E}'$ a bounded, hemi-continuous, monotone and coercive operator. Then $A$ is surjective, i.e. for each $f\in \textbf{E}'$ there exists $\theta\in \textbf{E}$ such that $A\theta=f$. Moreover if $A$ is strictly monotone then it is bijective. \varepsilonnd{theorem} Let $A:\textbf{E}\rightarrow \textbf{E}'$ be an operator on a reflexive, separable Banach space $\textbf{E}$ satisfying the assumptions of Theorem \ref{MB} with $A(0)\theta\ge 0$ for $v\in \textbf{E}$. Assume $dJ(\mathcal{B}ega)$ can be extended to $\textbf{E}'$ if necessary; for simplicity we keep the same notation for the extension. Introduce the bilinear form \begin{equation}\label{eq:bilinear} \mathcal B:\textbf{E}\times \textbf{E}\rightarrow \mathbf{R},\qquad \mathcal B(\theta,\zeta):=\langle A\theta,\zeta \rangle_{\textbf{E}',\textbf{E}}. \varepsilonnd{equation} Consider the variational problem: \begin{equation} \label{VP_1} (\text{VP})\qquad \mbox{Find } \theta_1\in \textbf{E}\mbox{ such that } \mathcal B(\theta_1,\zeta)=- dJ(\mathcal{B}ega)(\zeta) \mbox{ for all } \zeta\in \textbf{E} , \varepsilonnd{equation} Then the solution $\theta_1$ of (VP) is a descent direction since $dJ(\mathcal{B}ega)(\theta_1) = -\mathcal B(\theta_1,\theta_1)\leq 0$. In certain situations it is desirable to have bound constraints on the shape perturbation. This may be handled by considering the more general case of a variational inequality. Given a subset $K\subset \textbf{E}$ with $0\in K$, consider the variational inequality: \begin{equation}n (\text{VI})\qquad \mbox{Find }\theta_2\in K\mbox{ such that } \mathcal B(\theta_2,\theta_2-\zeta) \le dJ(\mathcal{B}ega)(\zeta-\theta_2) \mbox{ for all } \zeta\in K . \varepsilonnd{equation}n The solution $\theta_2$ of (VI) yields a descent direction for $J$ at $\mathcal{B}ega$ since taking $\zeta = 0\in K$ we get $$dJ(\mathcal{B}ega)(\theta_2) \le -\mathcal B(\theta_2,\theta_2)\leq 0.$$ In view of Theorem \ref{thm:structure_theorem}, we choose $\textbf{E}\subset H^{s}(D)$ where $s$ is such that $dJ(\mathcal{B}ega):H^s(D,\mathbf{R}^d)\rightarrow \mathbf{R}^d$ is continuous. When $\textbf{E}$ is a Hilbert space, one may identify $\textbf{E}'$ with $\textbf{E}$. Therefore if $\mathcal B$ is bilinear, coercive, and continuous, then Lax Milgram's lemma ensures that (VP) has a unique solution. For all other cases we may have to use Theorem \ref{MB} or similar results. \begin{remark} \begin{itemize} \item[(a)] Let $\textbf{E}:= H^1_0(D,\mathbf{R}^d)$, $\mathcal B(\theta, \zeta) := \int_D D\theta:D\zeta \,dx $ and $\mathcal{B}ega^+\Subset D$. Then \varepsilonqref{VP_1} reads: find $\theta \in H^1_0(D,\mathbf{R}^d)$, such that $ \mathcal B(\theta, \zeta) = - dJ(\mathcal{B}ega^+)(\zeta)$ for all $\zeta\in H^1_0(D,\mathbf{R}^d)$. Under the assumption that $\partial \mathcal{B}ega^+ \in C^2$, $\theta|_{\mathcal{B}ega^+} \in H^2(\mathcal{B}ega^+)$, and $\theta|_{D\setminus \overline{\mathcal{B}ega^+}} \in H^2(D\setminus \overline{\mathcal{B}ega^+})$, Proposition \ref{tensor_relations} yields $$ \int_{\partial \mathcal{B}ega^+} g \; \zeta\cdot n \, ds = dJ(\mathcal{B}ega^+)(\zeta) \quad \text{ for all }\zeta\in H^1_0(D,\mathbf{R}^d), \quad \text{ where } \quad g= -(D\theta^+ n -D\theta^- n)\cdot n. $$ This shows that the restriction to $\partial \mathcal{B}ega^+$ of the obtained descent direction $\theta$ is more regular than the function $g$. \item[(b)] Let $n$ be an extension of the unit normal to $\mathcal{B}ega^+$ in $D$. If $\theta$ defined on $D$ is a descent direction then $(\theta\cdot n)n|_{\partial\mathcal{B}ega^+}$ is also a descent direction, for the tangential part of $\theta$ does not influence the derivative. Indeed define $\theta_{\tau}:= \theta - (\theta\cdot n)n$, then by Nagumo's theorem $dJ(\mathcal{B}ega^+)(\theta_{\tau}) = 0$ and thus $dJ(\mathcal{B}ega^+)(\theta) = dJ(\mathcal{B}ega^+)((\theta\cdot n)n)$. However, $\theta$ and $(\theta\cdot n)n$ lead to different transformations of the domains in general, indeed the tangential term actually has an influence for large deformations, which means $\Phi_t^\theta(\mathcal{B}ega^+) \ne \Phi_t^{(\theta \cdot n)n}(\mathcal{B}ega^+)$. This influence appears for instance in the shape Hessian. \varepsilonnd{itemize} \varepsilonnd{remark} \section{Electrical impedance tomography}\label{section4} We consider an application of the results above to a typical and important interface problem: the inverse problem of electrical impedance tomography (EIT) also known as the inverse conductivity or Calder\'on's problem \cite{MR590275} in the mathematical literature. It is an active field of research with an extensive literature; for further details we point the reader toward the survey papers \cite{MR1955896,MR1669729} as well as \cite{MR2986262} and the references therein. We consider the particular case where the objective is to reconstruct a piecewise constant conductivity $\sigma$ which amounts to determine an interface ${\Gamma^+}$ between some inclusions and the background. We refer the reader to \cite{MR2329288, MR2132313, MR2536481,HintLauNov,MR2886743, 0266-5611-31-7-075009,Canelas2013} for more details on this approach. The main interest of studying EIT is to apply the approach developed in this paper to a problem which epitomizes general interface problems and simultaneously covers the entire spectrum of difficulties encountered with severely ill-posed inverse problem. \subsection{Problem statement}\label{sec:eit} Let $D\subset \mathbf{R}^d$ be a Lipschitz domain, and $\mathcal{B}ega^+,\mathcal{B}ega^-\subset D$ open sets such that $D=\mathcal{B}ega^+\cup \mathcal{B}ega^-\cup \Gamma^+$, where ${\Gamma^+}=\partial \mathcal{B}ega^+=\overline{\mathcal{B}ega^+}\cap \overline{\mathcal{B}ega^-}$ and $\Gamma = \partial D=\partial\mathcal{B}ega^-\setminus\Gamma^+$; see Figure \ref{partition}. In this section $n$ denotes either the outward unit normal vector to $D$ or the outward unit normal vector to $\mathcal{B}ega^+$. Decompose $\Gamma$ as $\Gamma = \Gamma_d\cup\Gamma_n$. Let $\sigma=\sigma^+\chi_{\mathcal{B}ega^+}+\sigma^-\chi_{\mathcal{B}ega^-}$ where $\sigma^\pm$ are scalars and $f=f^+\chi_{\mathcal{B}ega^+}+f^-\chi_{\mathcal{B}ega^-}$ where $f^\pm\in H^1(D)$. \begin{figure} \begin{center} \includegraphics[width=0.3\textwidth]{./EIT_sketch_small2} \caption{Partition $D=\mathcal{B}ega^+\cup \mathcal{B}ega^-\cup \Gamma$. }\label{partition} \varepsilonnd{center} \varepsilonnd{figure} Consider the following problems: find $u_n\in H^1_d (D)$ such that \begin{equation}\label{varun} \int_D \sigma \nabla u_n \cdot \nabla z = \int_D fz + \int_{\Gamma_n} gz\ \mbox{ for all }\ z\in H^1_d (D) \varepsilonnd{equation} and find $\dot{\mathbf{u}}i\in H^1_{dn}(D)$ such that \begin{equation}\label{varud} \int_D \sigma \nabla \dot{\mathbf{u}}i \cdot \nabla z = \int_D fz\ \mbox{ for all }\ z\in H^1_0(D) \varepsilonnd{equation} where \begin{align*} H^1_d (D)&:=\{v\in H^1(D)\ |\ \; v=0\mbox{ on }\Gamma_d\}, \\ H^1_{dn}(D)&:=\{v\in H^1(D)\ |\ \; v=0\mbox{ on }\Gamma_d, v=h\mbox{ on }\Gamma_n\},\\ H^1_{0}(D)&:=\{v\in H^1(D)\ |\ \; v=0\mbox{ on }\Gamma\} \varepsilonnd{align*} and $g\in H^{-1/2}(\Gamma_n)$ represents the input, in this case the electric current applied on the boundary and $h\in H^{1/2}(\Gamma_n)$ is the measurement of the potential on $\Gamma_n$, or the other way around, i.e. $h$ can be the input and $g$ the measurement. Define also the space $$PH^k(D) := \{ u = u^+\chi_{\mathcal{B}ega^+} + u^-\chi_{\mathcal{B}ega^-}|\ u^+\in H^k(\mathcal{B}ega^+),\ u^-\in H^k(\mathcal{B}ega^-) \}. $$ Consider the following assumption which will be used only to derive the boundary expression of the shape derivative but is not required for the domain expression: \begin{assumption}\label{assump1} The domains $D, \mathcal{B}ega^+,\mathcal{B}ega^-$ are of class $C^k$, $f\in PH^{\max(k-2,1)}(D)$, $g\in H^{k-\textnormal{fr}ac{3}{2}}(D)$ and $h\in H^{k-\textnormal{fr}ac{1}{2}}(D)$ for $k\geq 2$. \varepsilonnd{assumption} Applying Green's formula under Assumption \ref{assump1}, equations \varepsilonqref{varun} and \varepsilonqref{varud} are equivalent to the following transmission problems where $u_n = u_n^+\chi_{\mathcal{B}ega^+} + u_n^-\chi_{\mathcal{B}ega^-}$ and $\dot{\mathbf{u}}i = \dot{\mathbf{u}}i^+\chi_{\mathcal{B}ega^+} + \dot{\mathbf{u}}i^-\chi_{\mathcal{B}ega^-}$: \begin{align} \label{eit1.1_}-\sigma^+\Delta u_n^+ & = f \mbox{ in }\mathcal{B}ega^+, \quad -\sigma^-\Delta u_n^- = f \mbox{ in }\mathcal{B}ega^-, \\ \label{eit1.2_} u_n^- & = 0 \mbox{ on }\Gamma_d ,\\ \label{eit1.3_}\sigma^- \partial_n u_n^- & = g \mbox{ on }\Gamma_n , \varepsilonnd{align} \begin{align} \label{eit2.1_}-\sigma^+\Delta \dot{\mathbf{u}}i^+ & = f \mbox{ in }\mathcal{B}ega^+,\quad -\sigma^-\Delta \dot{\mathbf{u}}i^- = f \mbox{ in }\mathcal{B}ega^-,\\ \label{eit2.2_}\dot{\mathbf{u}}i^- & = 0 \mbox{ on }\Gamma_d, \\ \label{eit2.3_}\dot{\mathbf{u}}i^- & = h \mbox{ on }\Gamma_n, \varepsilonnd{align} with the transmission conditions \begin{equation} \begin{split} \sigma^+\partial_nu_n^+=\sigma^-\partial_nu_n^-, & \qquad \sigma^+\partial_n\dot{\mathbf{u}}i^+=\sigma^-\partial_n\dot{\mathbf{u}}i^- \quad \text{ on } {\Gamma^+},\\ u_n^+=u_n^-, &\qquad \dot{\mathbf{u}}i^+=\dot{\mathbf{u}}i^-\quad \text{ on } {\Gamma^+}. \varepsilonnd{split} \varepsilonnd{equation} On $\Gamma_d$ we impose homogeneous Dirichlet conditions, meaning that the voltage is fixed and no measurement is performed. One may take $\Gamma_d =\varepsilonmptyset$, in which case \varepsilonqref{varun} becomes a pure Neumann problem and additional care must be taken for the uniqueness and existence of a solution. The situation $\Gamma_d \neq \varepsilonmptyset$ corresponds to partial measurements. Alternatively, it is also possible to consider a slightly different problem where each function $u_n$ and $\dot{\mathbf{u}}i$ has both the boundary conditions \varepsilonqref{eit1.3_} and \varepsilonqref{eit2.3_} on different parts of the boundary. Several measurements can be made by choosing sets of functions $\{g_i\}_{i=1}^I$ and $\{h_i\}_{i=1}^I$. Writing $u_{n,i}$ and $u_{d,i}$ for the corresponding states, the problem of electrical impedance tomography is: \begin{equation}\label{EIT_pb} \mbox{(EIT): Given $\{g_i\}_{i=1}^I$ and $\{h_i\}_{i=1}^I$, find $\sigma$ such that $u_{n,i} = u_{d,i}$ in $D$ for $i=1,..,I$.} \varepsilonnd{equation} Note that $u_{n,i} = u_{n,i}(\mathcal{B}ega^+)$ and $u_{d,i} = u_{d,i}(\mathcal{B}ega^+)$ actually depend on $\mathcal{B}ega^+$ through $\sigma=\sigma(\mathcal{B}ega^+)$, however we often write $u_{n,i}$ and $u_{d,i}$ for simplicity. In this section, we assume that the conductivities $(\sigma^+,\sigma^-)$ are known, therefore the EIT problem \varepsilonqref{EIT_pb} reduces to the following shape optimization problem where $\mathcal{B}ega^+$ is the unknown. \begin{align}\label{EIT-SO} \begin{split} \mbox{Given }& \mbox{$\{g_i\}_{i=1}^I$, $\{h_i\}_{i=1}^I$ and $(\sigma^+,\sigma^-)$ with $\sigma=\sigma^+\chi_{\mathcal{B}ega^+}+\sigma^-\chi_{\mathcal{B}ega^-}$,} \\ & \mbox{find $\mathcal{B}ega^+$ such that $u_{n,i} = u_{d,i}$ in $D$ for $i=1,..,I$.} \varepsilonnd{split} \varepsilonnd{align} Actually, the result for several measurements can be straightforwardly deduced from the case of one measurement by summing the cost functionals corresponding to each measurement, therefore in this section we stick to the case $I=1$ of one measurement $g$ for simplicity of presentation. In section \ref{section6} we consider several measurements for the numerics. The notion of well-posedness due to Hadamard requires the existence and uniqueness of a solution and the continuity of the inverse mapping. The severe ill-posedness of EIT is well-known: uniqueness and continuity of the inverse mapping depend on the regularity of $\sigma$, the latter being responsible for the instability of the reconstruction process. Additionally, partial measurements often encountered in practice render the inverse problem even more ill-posed. We refer to the reviews \cite{MR1955896,MR1669729, MR2986262} and the references therein for more details. A standard cure against the ill-posedness is to regularize the inverse mapping. In this paper the regularization is achieved by considering smooth perturbations of the domains $\mathcal{B}ega^+$. To solve problem \varepsilonqref{EIT-SO}, we use an optimization approach by minimizing the shape functionals \begin{align} \label{eit3.1}J_1(\mathcal{B}ega^+) &= \textnormal{fr}ac{1}{2}\int_D (\dot{\mathbf{u}}i(\mathcal{B}ega^+) - u_n(\mathcal{B}ega^+))^2 ,\\ \label{eit3.2}J_2(\mathcal{B}ega^+) &= \textnormal{fr}ac{1}{2}\int_{\Gamma_n} (u_n(\mathcal{B}ega^+) - h)^2. \varepsilonnd{align} Since $\dot{\mathbf{u}}i,u_n\in H^1(D)$ and $h\in H^{1/2}(\Gamma_n)$, $J_1$ and $J_2$ are well-defined. Note that $J_1$ and $J_2$ are redundant for the purpose of the reconstruction but our aim is to provide an efficient way of computing the shape derivative of two functions which are often encountered in the literature. To compute these derivatives we use the approach described in Section \ref{section2}. First of all introduce \begin{align} \label{eit4.1}F_1(\varphi_d,\varphi_n) & := \textnormal{fr}ac{1}{2}\int_D (\varphi_d - \varphi_n)^2, \\ \label{eit4.2}F_2(\varphi_n) & := \textnormal{fr}ac{1}{2}\int_{\Gamma_n} (\varphi_n - h)^2. \varepsilonnd{align} Note that $J_1(\mathcal{B}ega^+) = F_1(\dot{\mathbf{u}}i(\mathcal{B}ega^+),u_n(\mathcal{B}ega^+))$ and $J_2(\mathcal{B}ega^+) =F_2(u_n(\mathcal{B}ega^+))$. Next consider $\mathfrak{P}$ a subset of $\mathcal{P}(D)$ and the Lagrangian $\mathcal{L}:\mathfrak{P}\times H^1_d(D)\times H^1_d (D)\times H^1_0 (D)\times H^1_d (D)\rightarrow \mathbf{R}$: \begin{align} \label{G} \begin{split} \mathcal{L}(\mathcal{B}ega^+,\mathbf{{\varphi}},\mathbf{{\psi}}) & := \alpha_1 F_1(\varphi_d,\varphi_n) + \alpha_2 F_2(\varphi_n)\\ & + \int_D \sigma \nabla \varphi_d\cdot\nabla \psi_d - f\psi_d + \int_{\Gamma_n} -\sigma^-\partial_n\psi_d (\varphi_d - h)\\ &+ \int_D \sigma \nabla \varphi_n\cdot\nabla \psi_n - f\psi_n - \int_{\Gamma_n} g\psi_n, \varepsilonnd{split} \varepsilonnd{align} where $\mathbf{{\varphi}} :=(\varphi_d,\varphi_n)$ and $\mathbf{{\psi}} :=(\psi_d,\psi_n)$. The term $-\sigma^-\partial_n\psi_d$ in the second integral of \varepsilonqref{G} is used to enforce the boundary condition \varepsilonqref{eit2.3_}. Introduce the objective functional \begin{align*} J(\mathcal{B}ega^+) := \alpha_1 J_1(\mathcal{B}ega^+) + \alpha_2 J_2(\mathcal{B}ega^+). \varepsilonnd{align*} To compute the derivative of the Lagrangian depending on \varepsilonqref{eit1.1_}-\varepsilonqref{eit2.3_} we apply the averaged adjoint method from Section \ref{section2}. \subsection{State and adjoint equations} The state $\mathbf{u}:=(\dot{\mathbf{u}}i,u_n)$ and adjoint state $\mathbf{p}:=(p_d,p_n)$ are solutions of the equations: \begin{align} \label{lag1} \partial_{\mathbf{{\psi}}} \mathcal{L}(\mathcal{B}ega^+,\mathbf{u}, \mathbf{p})(\hat \mathbf{{\psi}}) &= 0\mbox{ for all } \hat \mathbf{{\psi}} \in H^1_0(D)\times H^1_d(D),\\ \label{lag2} \partial_{\mathbf{{\varphi}}} \mathcal{L}(\mathcal{B}ega^+, \mathbf{u} ,\mathbf{p})(\hat \mathbf{{\varphi}}) &= 0\mbox{ for all } \hat \mathbf{{\varphi}} \in H^1_d(D)\times H^1_d(D) . \varepsilonnd{align} Writing \varepsilonqref{lag1} explicitely, one can obtain easily the state equations \varepsilonqref{varun} and \varepsilonqref{varud}. Then \varepsilonqref{lag2} yields the equation for the adjoint $p_d$: $$\partial_{\varphi_d} \mathcal{L}(\mathcal{B}ega^+,\mathbf{u},\mathbf{p})(\hat{\varphi}_d) = 0,\mbox{ for all } \hat{\varphi}_d \in H^1_d (D),$$ which leads to \begin{align}\label{eq:lambda} &\int_D \sigma \nabla p_d\cdot \nabla \hat{\varphi}_d \, dx = - \alpha_1 \int_D (\dot{\mathbf{u}}i - u_n) \hat{\varphi}_d \, dx -\int_{\Gamma_n} -\sigma^-\partial_np_d\hat{\varphi}_d \, ds \quad \text{ for all } \hat{\varphi}_d \in H^1_d (D) \varepsilonnd{align} which is the variational formulation for the adjoint state $p_d$. This yields the following variational formulation when test functions are restricted to $H^1_0(D)$: \begin{align}\label{eit4b.0} & \int_D \sigma \nabla p_d\cdot \nabla \widetilde\varphi \, dx = -\alpha_1 \int_D (\dot{\mathbf{u}}i - u_n) \widetilde\varphi\, dx\quad \text{ for all } \widetilde\varphi\in H^1_0(D). \varepsilonnd{align} If we use Assumption~\ref{assump1}, we get $p_d\in PH^k(D)$ and using Green's formula in $\mathcal{B}ega^+$ and $\mathcal{B}ega^-$ with $\widetilde\varphi\in C_c^\infty(\mathcal{B}ega^+)$ and $\widetilde\varphi \in C_c^\infty(\mathcal{B}ega^-)$, we obtain the strong form \begin{align} \label{eit4b.1}-\operatorname{div}(\sigma \nabla p_d) & = -\alpha_1(\dot{\mathbf{u}}i - u_n)\mbox{ in }\mathcal{B}ega^+ \mbox{ and } \mathcal{B}ega^-. \varepsilonnd{align} Hence using now Green's formula in \varepsilonqref{eq:lambda} and using \varepsilonqref{eit4b.1} gives \begin{align*} & \int_{\Gamma^+} [\sigma \partial_n p_d]_{\Gamma^+} \hat{\varphi}_d \, ds + \int_{\Gamma_n} (\sigma\partial_n p_d - \sigma^-\partial_np_d) \hat{\varphi}_d\, ds =0 \quad \text{ for all } \hat{\varphi}_d\in H^1_d (D), \varepsilonnd{align*} where $[\sigma \partial_n p_d]_{\Gamma^+} = \sigma^+ \partial_n p_d^+ - \sigma^- \partial_n p_d^-$ is the jump of $\sigma \partial_n p_d$ across $\Gamma^+$. Since the integral on $\Gamma_n$ above vanishes and $p_d\in H^1_0(D)$, we obtain \begin{align} \label{eit4b.3} p_d & = 0 \mbox{ on }\Gamma,\\ \label{eit4b.4} \sigma^+ \partial_n p_d^+ & = \sigma^- \partial_n p_d^- \mbox{ on }\Gamma^+. \varepsilonnd{align} Finally solving $$\partial_{\varphi_n} \mathcal{L}(\mathcal{B}ega^+,\mathbf{u},\mathbf{p})(\hat{\varphi}_n) = 0, \mbox{ for all } \hat{\varphi}_n\in H^1_d (D),$$ leads to the variational formulation \begin{align}\label{eit5} & \int_D -\alpha_1(\dot{\mathbf{u}}i - u_n)\hat{\varphi}_n +\sigma \nabla p_n\cdot \nabla \hat{\varphi}_n + \int_{\Gamma_n} \alpha_2(u_n - h )\hat{\varphi}_n =0 \varepsilonnd{align} for all $\hat{\varphi}_n \in H^1_d (D)$. Similarly as for $p_d$ we get, under Assumption~\ref{assump1}, $p_n\in PH^k(D)$ and the strong form \begin{align} \label{eit4.3} -\operatorname{div}(\sigma \nabla p_n) & = \alpha_1(\dot{\mathbf{u}}i - u_n)\mbox{ in }\mathcal{B}ega^+ \mbox{ and } \mathcal{B}ega^-,\\ \label{eit4.4} \sigma\partial_n p_n & = -\alpha_2(u_n - h )\mbox{ on }\Gamma_n,\\ \label{eit4.5} p_n & = 0\mbox{ on }\Gamma_d,\\ \sigma^+\partial_np_n^+ & =\sigma^-\partial_n p_n^- \text{ on } {\Gamma^+}, \quad p_n^+=p_n^- \text{ on } {\Gamma^+}. \varepsilonnd{align} \subsection{Shape derivatives} Let us consider a transformation $\Phi_t^{\theta}$ defined by \varepsilonqref{Vxt} with $\theta\in C^1_c(D,\mathbf{R}^d)$. Note that $\Phi_t^{\theta}(D) = D$ but in general $\Phi_t^{\theta}(\mathcal{B}ega^+) \neq \mathcal{B}ega^+$. We use the notation $\mathcal{B}ega^+(t):= \Phi_t^{\theta}(\mathcal{B}ega^+)$. Our aim is to show the shape differentiability of $J(\mathcal{B}ega^+)$ with the help of Theorem~\ref{thm:sturm}. Following the methodology described in Section \ref{section2} we introduce \begin{equation}\label{eq:G_on_the_moved_domain} G(t,\mathbf{{\varphi}},\mathbf{{\psi}}):=\mathcal{L}(\mathcal{B}ega^+(t),\mathbf{{\varphi}}\circ\Phi_t^{-1},\mathbf{{\psi}}\circ\Phi_t^{-1}). \varepsilonnd{equation} We proceed to the change of variables $\Phi_t(x)=y$ in \varepsilonqref{eq:G_on_the_moved_domain} to get the canonical form \varepsilonqref{G_lag}. First of all let us denote $f_{\mathcal{B}ega^+(t)}=f^+\chi_{\mathcal{B}ega^+(t)}+f^-\chi_{\mathcal{B}ega^-(t)}$ and $\sigma_{\mathcal{B}ega^+(t)}=\sigma^+\chi_{\mathcal{B}ega^+(t)}+\sigma^-\chi_{\mathcal{B}ega^-(t)}$; recall that $\sigma^\pm$ are scalars but $f^{\pm}$ are functions. Then note that the change of variables $\Phi_t(x)=y$ leads to considering the following functions inside the integrals: \begin{align*} \sigma_{\mathcal{B}ega^+(t)}\circ \Phi_t & =\sigma^+\chi_{\mathcal{B}ega^+(t)}\circ \Phi_t+\sigma^-\chi_{\mathcal{B}ega^-(t)}\circ \Phi_t = \sigma^+\chi_{\mathcal{B}ega^+}+\sigma^-\chi_{\mathcal{B}ega^-}= \sigma,\\ f_{\mathcal{B}ega^+(t)}\circ \Phi_t & =f^+\circ \Phi_t\, \chi_{\mathcal{B}ega^+(t)}\circ \Phi_t +f^-\circ \Phi_t\, \chi_{\mathcal{B}ega^-(t)}\circ \Phi_t =f^+\circ \Phi_t\, \chi_{\mathcal{B}ega^+} +f^-\circ \Phi_t\, \chi_{\mathcal{B}ega^-}. \varepsilonnd{align*} Thus we introduce the function $\tilde f_t := f^+\circ \Phi_t\, \chi_{\mathcal{B}ega^+} +f^-\circ \Phi_t\, \chi_{\mathcal{B}ega^-}$. Now we obtain the canonical form \varepsilonqref{G_lag} for the Lagrangian: \begin{align} \label{G_Omega} G(t,\mathbf{{\varphi}},\mathbf{{\psi}}) & = a(t,\mathbf{{\varphi}},\mathbf{{\psi}}) + b(t,\mathbf{{\varphi}}), \varepsilonnd{align} with \begin{align*} a(t,\mathbf{{\varphi}},\mathbf{{\psi}}) :=& \int_D \sigma A(t)\nabla \varphi_d\cdot\nabla \psi_d -\tilde f_t \psi_d \xi(t) - \int_{\Gamma_n} \sigma^{-1} \partial_n\psi_d(\varphi_d - h) \\ \notag & + \int_D \sigma A(t)\nabla \varphi_n\cdot\nabla \psi_n -\tilde f_t \psi_n \xi(t) - \int_{\Gamma_n} g\psi_n,\\ b(t,\mathbf{{\varphi}}) :=& \textnormal{fr}ac{\alpha_1}{2}\int_D (\varphi_d - \varphi_n)^2 \xi(t) + \textnormal{fr}ac{\alpha_2}{2}\int_{\Gamma_n} (\varphi_n - h)^2 \varepsilonnd{align*} where the Jacobian $\xi(t)$ and $A(t)$ are defined as $ \xi(t) := \det (D\Phi_t)$ and $A(t) := \xi(t)D\Phi_t^{-1}D\Phi_t^{-T}$. In the previous expression \varepsilonqref{G_Omega}, one should note that the integrals on subsets of $\Gamma$ are unchanged since $\Phi_t^{-1} = I$ on $\Gamma$. Thus we have $\Phi_t^{\theta}(D) = D$, however the terms inside the integrals on $D$ are modified by the change of variable since $\Phi_t^{-1}\neq I$ inside $D$. Note that \begin{align*} J(\mathcal{B}ega^+(t))=G(t,\mathbf{u}^t,\mathbf{{\psi}}),\text{ for all } \mathbf{{\psi}}\in H^1_0 (D)\times H^1_d (D), \varepsilonnd{align*} where $\mathbf{u}^t=(u_n^t,\dot{\mathbf{u}}i^t):=(u_{n,t}\circ\Phi_t,u_{d,t}\circ\Phi_t)$ and $u_{n,t},u_{d,t}$ solve \varepsilonqref{varun},\varepsilonqref{varud}, respectively, with the domains $\mathcal{B}ega^+$ and $\mathcal{B}ega^-$ replaced by $\mathcal{B}ega^+(t)$ and $\mathcal{B}ega^-(t)$. As one can verify by applying a change of variables to \varepsilonqref{varun} and \varepsilonqref{varud} on the domain $\mathcal{B}ega^+(t)$ the functions $u_n^t,\dot{\mathbf{u}}i^t$ satisfy \begin{align}\label{varun_t} \int_D \sigma A(t)\nabla u_n^t \cdot \nabla \hat{\psi}_n &= \int_D \tilde f_t\hat{\psi}_n + \int_{\Gamma_n} g\hat{\psi}_n \mbox{ for all } \hat{\psi}_n\in H^1_d (D), \\ \label{varud_t} \int_D \sigma A(t) \nabla \dot{\mathbf{u}}i^t \cdot \nabla \hat{\psi}_d &= \int_D \tilde f_t \hat{\psi}_d \mbox{ for all } \hat{\psi}_d\in H^1_0(D) . \varepsilonnd{align} Applying standards estimates for elliptic partial differential equations and the fact that $A(t)$ is uniformly bounded from below and above for $t$ small enough, we infer from equations \varepsilonqref{varun_t},\varepsilonqref{varud_t} the existence of constants $C_1,C_2>0$ independent of $t$ and $\tau>0$ such that for all $t\in [0,\tau]$: \begin{equation}\label{apriori_u_D_u_N} \|\dot{\mathbf{u}}i^t\|_{H^1(D)}\le C_1,\quad \text{ and } \|u_n^t\|_{H^1(D)}\le C_2. \varepsilonnd{equation} From these estimates, we get $\dot{\mathbf{u}}i^t\rightharpoonup w_d \text{ and } u_n^t\rightharpoonup w_n \text{ in } H^1(D)\mbox{ as }t\to 0.$ Passing to the limit in \varepsilonqref{varun_t} and \varepsilonqref{varud_t} yields $w_d=\dot{\mathbf{u}}i$ and $w_n=u_n$ by uniqueness. Let us now check Assumption \ref{amp:gateaux_diffbar_G} and the conditions of Theorem~\ref{thm:sturm} for the function $G$ given by \varepsilonqref{G_Omega} and the Banach spaces $E = H^1_d (D)\times H^1_d (D)$ and $F = H^1_0 (D)\times H^1_d (D)$. First of all equation \varepsilonqref{eq:state_G} admits a unique solution $\mathbf{u}^t := (u_n^t,\dot{\mathbf{u}}i^t)$ for each $t\in[0,\tau]$. The conditions of Assumption \ref{amp:gateaux_diffbar_G} are readily satisfied and also the function $G$ is affine with respect to $\psi = (\psi_d,\psi_n)$. Regarding the conditions of Theorem~\ref{thm:sturm}, first note that applying Lax-Milgram's lemma, we check that both equations \varepsilonqref{eq_bar_p_t_1} and \varepsilonqref{eq_bar_p_t_2} have indeed a unique solution in $F = H^1_0 (D)\times H^1_d (D)$: \begin{align} \label{eq_bar_p_t_1} &\int_D \sigma A(t)\nabla p_d^t\cdot\nabla \hat{\varphi}_d +\textnormal{fr}ac{\alpha_1}{2}\int_D \xi(t) (\dot{\mathbf{u}}i^t+\dot{\mathbf{u}}i - (u_n^t+u_n)) \hat{\varphi}_d - \int_{\Gamma_n} \sigma^{-1}\partial_n p_d^t\hat{\varphi}_d =0,\\ \label{eq_bar_p_t_2} &\int_D \sigma A(t)\nabla p_n^t\cdot\nabla \hat{\varphi}_n-\textnormal{fr}ac{\alpha_1}{2}\int_D \xi(t)(\dot{\mathbf{u}}i^t+\dot{\mathbf{u}}i - (u_n^t+u_n)) \hat{\varphi}_n + \textnormal{fr}ac{\alpha_2}{2}\int_{\Gamma_n} (u_n^t+u_n - 2h)\hat{\varphi}_n =0, \varepsilonnd{align} for all $\hat{\varphi}_d$, $\hat{\varphi}_n$ in $H^1_d (D)$. Therefore there exists a unique solution $\mathbf{p}^t= (p_n^t,p_d^t)$ of the averaged adjoint equation \varepsilonqref{averated_}. Now we check Assumption (H1). Testing \varepsilonqref{eq_bar_p_t_1} with $\hat{\varphi}_d=p_d^t$ and \varepsilonqref{eq_bar_p_t_2} with $\hat{\varphi}_n=p_n^t$, we conclude by an application of H\"older's inequality together with \varepsilonqref{apriori_u_D_u_N} the existence of constants $C_1,C_2$ and $\tau>0$ such that for all $t\in [0,\tau]$ \begin{equation}n \|p_d^t\|_{H^1(D)}\le C_1,\quad \text{ and } \|p_n^t\|_{H^1(D)}\le C_2. \varepsilonnd{equation}n We get that for each sequence $t_k$ converging to zero, there exists a subsequence also denoted $t_k$ such that $p_d^{t_k}\rightharpoonup q_d$ and $p_n^{t_k}\rightharpoonup q_n$ for two elements $q_d,q_n\in H^1(D)$. Passing to the limit in \varepsilonqref{eq_bar_p_t_1} and \varepsilonqref{eq_bar_p_t_2} yields $q_d=p_d$ and $q_n=p_n$ by uniqueness, where $p_d$ and $p_n$ are solutions of the adjoint equations. Since the limit is unique, we have in fact $p_d^{t}\rightharpoonup p_d$ and $p_n^{t}\rightharpoonup p_n$ as $t\to 0$. Finally, differentiating $G$ with respect to $t$ yields \begin{align*} \partial_tG(t,\mathbf{{\varphi}},\mathbf{{\psi}}) & = \textnormal{fr}ac{\alpha_1}{2}\int_D (\varphi_d - \varphi_n)^2 \xi(t)\operatorname{tr}(D\theta_t D \Phi_t^{-1})\\ &\hspace{-1.5cm} + \int_D \sigma A'(t)\nabla \varphi_d\cdot\nabla \psi_d -\tilde f_t \psi_d \xi(t)\operatorname{tr}(D\theta_t D \Phi_t^{-1}) -\psi_d \widetilde{\nabla} f_t\cdot \theta_t \xi(t)\\ &\hspace{-1.5cm} + \int_D \sigma A'(t)\nabla \varphi_n\cdot\nabla \psi_n - \tilde f_t\psi_n \xi(t)\operatorname{tr}(D\theta_t D \Phi_t^{-1}) -\psi_n \widetilde{\nabla} f_t\cdot \theta_t \xi(t). \varepsilonnd{align*} where $$\widetilde{\nabla} f_t := \nabla f^+\circ \Phi_t \, \chi_{\mathcal{B}ega^+} + \nabla f^-\circ \Phi_t\, \chi_{\mathcal{B}ega^-},\qquad \theta_t = \theta\circ\Phi_t$$ $$A'(t) = \operatorname{tr}(D\theta^t D\Phi_t^{-1}) A(t) - D\Phi_t^{-T} D\theta_t A(t) -(D\Phi_t^{-T} D\theta_t A(t) )^T$$ and $D\theta_t$ is the Jacobian matrix of $\theta_t$. In view of $\theta\in C^1_c(D,\mathbf{R}^d)$, the functions $t\mapsto D\theta_t$ and $t\mapsto \operatorname{tr}( D\theta_t \Phi_t^{-1})= \operatorname{div}(\theta)\circ \Phi_t$ are continuous on $[0,T]$. Moreover $\varphi_d,\varphi_n,\psi_d,\psi_n$ are in $H^1(D)$, $f\in PH^1(D)$ so that $\partial_tG(t,\mathbf{{\varphi}},\mathbf{{\psi}}) $ is well-defined for all $t\in [0,T]$. Using the weak convergence of $\mathbf{p}^t$ and the strong differentiability of $t\mapsto A(t)$ and $t\mapsto \xi(t)$ it follows \begin{equation} \lim_{t \searrow 0 } \textnormal{fr}ac{G( t,\mathbf{u}^0,\mathbf{p}^t ) - G(0,\mathbf{u}^0, \mathbf{p}^t) }{t}=\partial_t G(0,\mathbf{u}^0,\mathbf{p}^0). \varepsilonnd{equation} Thus we have verified all assumptions from Theorem \ref{thm:sturm}. This yields \begin{equation}n dJ(\mathcal{B}ega^+)(\theta)=\dt \left(G(t,\mathbf{u}^t,\mathbf{{\psi}})\right)|_{t=0}=\partial_t G(0,\mathbf{u}^0,\mathbf{p}^0)\text{ for all } \mathbf{{\psi}}\in F= H^1_0 (D)\times H^1_d (D) , \varepsilonnd{equation}n and therefore we have proved the following result. \begin{proposition}[distributed shape derivative]\label{domexpr} Let $D\subset\mathbf{R}^d$ be a Lipschitz domain, $\theta\in C^1_c(D,\mathbf{R}^d)$, $f~\in~PH^1(D)$, $g\in H^{-1/2}(\Gamma_n)$, $h\in H^{1/2}(\Gamma_n)$, $\mathcal{B}ega^+\subset D$ is an open set, then the shape derivative of $J(\mathcal{B}ega^+)$ is given by \begin{align}\label{volexpr} \begin{split} dJ(\mathcal{B}ega^+)(\theta) & = \int_D \left(\textnormal{fr}ac{\alpha_1}{2}(\dot{\mathbf{u}}i - u_n)^2 -f(p_n+p_d)\right)\operatorname{div} \theta \\ &\hspace{-1cm}+ \int_D - (p_d+p_n)\widetilde{\nabla} f\cdot \theta + \sigma A'(0)(\nabla \dot{\mathbf{u}}i\cdot\nabla p_d + \nabla u_n\cdot\nabla p_n) , \varepsilonnd{split} \varepsilonnd{align} where $\widetilde{\nabla} f := \nabla f^+ \, \chi_{\mathcal{B}ega^+} + \nabla f^-\, \chi_{\mathcal{B}ega^-}$, $A'(0) = (\operatorname{div} \theta) I - D\theta^T - D\theta$, $u_n,\dot{\mathbf{u}}i$ are solutions of \varepsilonqref{varun},\varepsilonqref{varud} and $p_n,p_d$ of \varepsilonqref{eit5}, \varepsilonqref{eq:lambda}. The shape derivative \varepsilonqref{volexpr} also has the tensor representation corresponding to \varepsilonqref{ea:volume_from}: \begin{align}\label{volexpr2} \begin{split} dJ(\mathcal{B}ega^+)(\theta) & = \int_D \mathbf{S}_1 \cdot D\theta + \mathbf{S}_0\cdot \theta , \varepsilonnd{split} \varepsilonnd{align} where \begin{align*} \mathbf{S}_1 & = -\sigma (\nabla \dot{\mathbf{u}}i \otimes \nabla p_d + \nabla p_d \otimes \nabla \dot{\mathbf{u}}i + \nabla u_n \otimes \nabla p_n + \nabla p_n \otimes \nabla u_n) \\ &\quad +\sigma(\nabla \dot{\mathbf{u}}i\cdot\nabla p_d + \nabla u_n\cdot\nabla p_n)I + \left(\textnormal{fr}ac{\alpha_1}{2}(\dot{\mathbf{u}}i - u_n)^2 -f(p_n+p_d)\right)I,\\ \mathbf{S}_0 & = - (p_d+p_n)\widetilde{\nabla} f. \varepsilonnd{align*} \varepsilonnd{proposition} Note that the volume expression of the shape gradient in Proposition \ref{domexpr} has been obtained without any regularity assumption on $\mathcal{B}ega^+$. In order to obtain a boundary expression on the interface $\Gamma^+$ we need more regularity of $\mathcal{B}ega^+$ provided by Assumption \ref{assump1}. If it is satisfied, we can apply Corollary \ref{corollary2} to obtain directly the boundary expression of the shape derivative, using mainly the standard tensor relation $(\nabla \dot{\mathbf{u}}i \otimes \nabla p_d) n = (\nabla p_d\cdot n)\nabla \dot{\mathbf{u}}i$, which yields Proposition \ref{prop2}. \begin{proposition}[boundary expression]\label{prop2} Under Assumption \ref{assump1} and $\theta\in C^1_c(D,\mathbf{R}^d)$ the shape derivative of $J(\mathcal{B}ega^+)$ is given by \begin{align*} dJ(\mathcal{B}ega^+)(\theta) =& \int_{\Gamma^+} \left[ \sigma(-\partial_n \dot{\mathbf{u}}i\partial_n p_d - \partial_n u_n\partial_n p_n )\right]_{\Gamma^+} \theta\cdot n \\ & + \int_{\Gamma^+} ( [\sigma]_{\Gamma^+}(\nabla_{\Gamma^+} \dot{\mathbf{u}}i\cdot\nabla_{\Gamma^+} p_d + \nabla_{\Gamma^+} u_n\cdot \nabla_{\Gamma^+} p_n ) -[f]_{\Gamma^+}(p_n+p_d)) \theta\cdot n. \varepsilonnd{align*} \varepsilonnd{proposition} Note that our results cover and generalize several results that can be found in the literature of shape optimization approaches for EIT, including \cite{MR2329288,MR2536481}. For instance taking $\alpha_2 = 1$, $\alpha_1 = 0$ in Proposition \ref{prop2} we get $p_D \varepsilonquiv 0$ which yields the same formula as the one obtained in \cite[pp. 533]{MR2329288}. Note also that from a numerical point of view, the boundary expression in Proposition \ref{prop2} is delicate to compute compared to the domain expression in Proposition \ref{domexpr} for which the gradients of the state and adjoint states can be straightforwardly computed at grid points when using the finite element method for instance. The boundary expression, on the other hand, needs here the computation of the normal vector and the interpolation of the gradients on the interface $\Gamma^+$ which requires a precise description of the boundary and introduces an additional error. \section{Level set method}\label{section5} The level set method, originally introduced in \cite{MR965860}, gives a general framework for the computation of evolving interfaces using an implicit representation of these interfaces. The core idea of this method is to represent the boundary of the moving domain $\mathcal{B}ega^+(t)\subset D \in \mathbf{R}^N$ as the level set of a continuous function $\phi(\cdot,t): D\to\mathbf{R}$. Let us consider the family of domains $\mathcal{B}ega^+(t)\subset D$ as defined in \varepsilonqref{domain}. Each domain $\mathcal{B}ega^+(t)$ can be defined as \begin{equation} \mathcal{B}ega^+(t) :=\{x\in D,\ \phi(x,t) < 0\} \varepsilonnd{equation} where $\phi: D\times \mathbf{R}^+ \to \mathbf{R}$ is continuous and called {\it level set function}. Indeed, if we assume $|\nabla\phi(\cdot,t)|\neq 0$ on the set $\{x\in D,\ \phi(x,t) = 0\}$ then we have \begin{equation} \partial \mathcal{B}ega^+(t) = \{x\in D,\ \phi(x,t) = 0\}, \varepsilonnd{equation} i.e. the boundary $\partial \mathcal{B}ega^+(t)$ is the zero level set of $\phi(\cdot,t)$. Let $x(t)$ be the position of a particle on the boundary $\partial \mathcal{B}ega^+(t)$ moving with velocity $\dot{x}(t)=\theta(x(t))$ according to \varepsilonqref{Vxt}. Differentiating the relation $\phi(x(t),t)=0$ with respect to $t$ yields the Hamilton-Jacobi equation: \begin{equation*} \partial_t\phi (x(t),t)+ \theta(x(t))\cdot \nabla \phi(x(t),t) = 0 \quad \mbox{ in } \partial \mathcal{B}ega^+(t)\times\mathbf{R}^+, \varepsilonnd{equation*} which is then extended to all of $D$ via the equation \begin{equation} \partial_t\phi(x,t) + \theta(x)\cdot \nabla \phi(x,t) = 0 \quad \mbox{ in } D\times\mathbf{R}^+, \label{eq:transport} \varepsilonnd{equation} or alternatively to $U\times\mathbf{R}^+$ where $U$ is a neighbourhood of $\partial \mathcal{B}ega^+(t)$. Traditionally, the level set method has been designed to track smooth interfaces moving along the normal direction to the boundary. Theoretically, this is supported by Theorem \ref{thm:structure_theorem}, i.e. if the domain $\mathcal{B}ega^+(t)$ and the shape gradient are smooth enough then the shape derivative only depends on $\theta\cdot n$ on $\partial\mathcal{B}ega^+(t)$. In this case, we may choose for the optimization a vector field $\theta = \vartheta_n n$ on $\partial\mathcal{B}ega^+(t)$. Then, noting that an extension to $D$ of the unit outward normal vector $n$ to $\mathcal{B}ega^+(t)$ is given by $n =\nabla \phi /|\nabla \phi|$, and extending $\vartheta_n$ to all of $D$, one obtains from \varepsilonqref{eq:transport} the level set equation \begin{equation} \partial_t \phi + \vartheta_n |\nabla \phi| = 0 \quad \mbox{ in } D\times\mathbf{R}^+. \label{eq:HJ1} \varepsilonnd{equation} The initial data $\phi(x,0)=\phi_0(x)$ accompanying the Hamilton-Jacobi equation \varepsilonqref{eq:transport} or \varepsilonqref{eq:HJ1} is chosen as the signed distance function to the initial boundary $\partial \mathcal{B}ega^+(0)$ in order to satisfy the condition $|\nabla u|\neq 0$ on $\partial\mathcal{B}ega^+$, i.e. \begin{equation} \phi_0(x) = \left\{\begin{array}{rl} d(x,\partial \mathcal{B}ega^+(0)), & \mbox{ if } x\in (\mathcal{B}ega^+(0))^c, \\ -d(x,\partial \mathcal{B}ega^+(0)), & \mbox{ if } x\in \mathcal{B}ega^+(0) . \varepsilonnd{array} \right. \varepsilonnd{equation} \subsection{Level set method and domain expression} In the case of the distributed shape derivative, for instance \varepsilonqref{volexpr} or \varepsilonqref{volexpr2}, $\phi$ is not governed by \varepsilonqref{eq:HJ1} but rather by the Hamilton-Jacobi equation \varepsilonqref{eq:transport}. Indeed we obtain a descent direction $\theta$ defined in $D$ by solving \varepsilonqref{VP_1}, where $dJ(\mathcal{B}ega^+)$ is given by Proposition \ref{domexpr} which can subsequently be used in \varepsilonqref{eq:transport} to compute the evolution of $\phi$. On the other hand, in the usual level set method, one solves a PDE {\it on the boundary} $\partial\mathcal{B}ega^+$ in an analogous way as for \varepsilonqref{VP_1} (for instance using a Laplace-Beltrami operator), and uses the boundary expression from Proposition \ref{prop2} to obtain $\vartheta_n = \theta\cdot n$ on $\partial\mathcal{B}ega^+$. Numerically it is actually more straightforward in many cases to use \varepsilonqref{eq:transport} instead of \varepsilonqref{eq:HJ1}. Indeed, when using \varepsilonqref{eq:HJ1}, $\vartheta_n$ is initially only given on $\partial \mathcal{B}ega^+(t)$ and must be extended to the entire domain $D$ or at least to a narrow band around $\partial \mathcal{B}ega^+(t)$. Therefore it is convenient to use \varepsilonqref{eq:transport} with $\theta$ already defined in $D$ as is the case of the distributed shape derivative, which provides an extension to $D$ or to a narrow band around $\partial \mathcal{B}ega^+(t)$. In shape optimization, $\vartheta_n$ usually depends on the solution of one or several PDEs and their gradient. Since the boundary $\partial\mathcal{B}ega^+(t)$ in general does not match the grid nodes where $\phi$ and the solutions of the partial differential equations are defined in the numerical application, the computation of $\vartheta_n$ requires the interpolation on $\partial\mathcal{B}ega^+(t)$ of functions defined at the grid points only, complicating the numerical implementation and introducing an additional interpolation error. This is an issue in particular for interface problems where $\vartheta_n$ is the jump of a function across the interface, as in Proposition \ref{prop2}, which requires multiple interpolations and is error-prone. In the distributed shape derivative framework $\theta$ only needs to be defined at grid nodes. \subsection{Discretization of the level set equation} Let $D$ be the unit square $D=(0,1)\times(0,1)$ to fix ideas. For the discretization of the Hamilton-Jacobi equation \varepsilonqref{eq:transport}, we first define the mesh grid corresponding to $D$. We introduce the nodes $P_{ij}$ whose coordinates are given by $(i\Delta x, j \Delta y)$, $1\leq i,j\leq N$ where $\Delta x$ and $\Delta y$ are the steps discretization in the $x$ and $y$ directions respectively. Let us also write $t^k = k\Delta t$ the discrete time for $k\in \mathbb{N}$, where $\Delta t$ is the time step. We are seeking for an approximation $\phi_{ij}^k \simeq \phi(P_{ij},t^k)$. In the usual level set method, the level set equation \varepsilonqref{eq:HJ1} is discretized using an explicit upwind scheme proposed by Osher and Sethian \cite{MR1939127,MR965860,MR1700751}. This scheme applies to the specific form \varepsilonqref{eq:HJ1} but is not suited to discretize \varepsilonqref{eq:transport} required for our application. Equation \varepsilonqref{eq:transport} is of the form \begin{equation} \partial_t\phi + H(\nabla \phi)= 0 \quad \mbox{ in } D\times\mathbf{R}^+. \varepsilonnd{equation} where $ H(\nabla \phi) := \theta\cdot \nabla \phi $ is the so-called Hamiltonian. We use the Local Lax-Friedrichs flux originally conceived in \cite{MR1111446} and which reduces in our case to: $$\hat H^{LLF}(p^-,p^+,q^-,q^+) = H\left(\textnormal{fr}ac{p^- + p^+}{2},\textnormal{fr}ac{q^- +q^+}{2}\right) -\textnormal{fr}ac{1}{2} (p^+-p^-)\alpha^x -\textnormal{fr}ac{1}{2}(p^+-p^-)\alpha^y $$ where $\alpha^x = |\theta_x|$, $\alpha^y = |\theta_y|$, $\theta = (\theta_x,\theta_y)$ and \begin{align*} p^- &= D_x^{-}\phi_{ij}=\dfrac{\phi_{ij}-\phi_{i-1,j}}{\Delta x}, & p^+ = D_x^{+}\phi_{ij}=\dfrac{\phi_{i+1,j}-\phi_{ij}}{\Delta x},\\ q^- &= D_y^{-}\phi_{ij}=\dfrac{\phi_{ij}-\phi_{i,j-1}}{\Delta y}, & q^+ = D_y^{+}\phi_{ij}=\dfrac{\phi_{i,j+1}-\phi_{ij}}{\Delta y} \varepsilonnd{align*} are the backward and forward approximations of the $x$-derivative and $y$-derivative of $\phi$ at $P_{ij}$, respectively. Using a forward Euler time discretization, the numerical scheme corresponding to \varepsilonqref{eq:transport} is \begin{equation} \phi_{ij}^{k+1}=\phi_{ij}^k - \Delta t \ \hat H^{LLF}(p^-,p^+,q^-,q^+) \varepsilonnd{equation} For numerical accuracy, the solution of the level set equation \varepsilonqref{eq:transport} should not be too flat or too steep. This is fulfilled for instance if $\phi$ is the distance function i.e. $|\nabla \phi| = 1$. Even if one initializes $\phi$ using a signed distance function, the solution $\phi$ of the level set equation \varepsilonqref{eq:transport} does not generally remain close to a distance function. We may occasionally perform a reinitialization of $\phi$ by solving a parabolic equation up to the stationary state; see \cite{MR2356899,MR2459656,MR1723321}. Although in the level set method this reinitialization is standard, in the case of the distributed shape gradient, we observe experimentally that the level set function $\phi$ stays close to a distance function during the iterations and we do not need to reinitialize. The regularization of the shape gradient could explain this observed stability of the level set function. The computational efficiency of the level set method can be improved by using the so-called ``narrow band'' approach introduced in \cite{MR1329634}, which consists in computing and updating the level set function only on a thin region around the interface. This allows to reduce the complexity of the problem to $N\log(N)$ instead of $N^2$ in two dimensions. In this paper we do not implement this approach but we mention that it could also be applied to the distributed shape derivative approach and equation \varepsilonqref{eq:transport} by taking $\theta$ with a support in a narrow band around the moving interface, which can be achieved by choosing the appropriate space $\textbf{E}$ in \varepsilonqref{eq:bilinear}. \section{Application and numerical results}\label{section6} \subsection{Electrical impedance tomography} In this section we give numerical results for the problem of electrical impedance tomography presented in Section \ref{sec:eit}, precisely we look for an approximate solution of the shape optimization problem \varepsilonqref{EIT-SO}. Using the notations of Section \ref{sec:eit} we take $D = (0,1)\times (0,1)$ and $\Gamma_d = \varepsilonmptyset$, i.e. we have measurements on the entire boundary $\Gamma$. For easiness of implementation, we consider a slightly different problem than the one in Section \ref{sec:eit}. Denote $\Gamma_{t}$, $\Gamma_{b}$, $\Gamma_{l}$ and $\Gamma_{r}$ the four sides of the square, where the indices $t,b,l,r$ stands for top, bottom, left and right, respectively. We consider the following problems: find $u_n\in H^1_{tb}(D)$ \begin{equation}\label{varun-b} \int_D \sigma \nabla u_n \cdot \nabla \varphi = \int_D f\varphi + \int_{\Gamma_{l}\cup\Gamma_{r}} g\varphi\ \mbox{ for all }\ \varphi\in H^1_{0,tb}(D) \varepsilonnd{equation} and find $\dot{\mathbf{u}}i\in H^1_{lr}(D)$ such that \begin{equation}\label{varud-b} \int_D \sigma \nabla \dot{\mathbf{u}}i \cdot \nabla \varphi = \int_D f\varphi + \int_{\Gamma_{t}\cup\Gamma_{b}} g\varphi\ \mbox{ for all }\ \varphi\in H^1_{0,lr}(D) \varepsilonnd{equation} where \begin{align*} H^1_{tb}(D)&:=\{v\in H^1(D)\ |\ \; v=h\mbox{ on }\Gamma_t \cup \Gamma_b\}, \\ H^1_{lr}(D)&:=\{v\in H^1(D)\ |\ \; v=h\mbox{ on }\Gamma_l \cup \Gamma_r\}, \\ H^1_{0,tb}(D)&:=\{v\in H^1(D)\ |\ \; v=0\mbox{ on }\Gamma_t \cup \Gamma_b\}, \\ H^1_{0,lr}(D)&:=\{v\in H^1(D)\ |\ \; v=0\mbox{ on }\Gamma_l \cup \Gamma_r\}. \varepsilonnd{align*} In our experiments we choose $f\varepsilonquiv 0$. The results of Section \ref{sec:eit} can be straightforwardly adapted to equations \varepsilonqref{varun-b}, \varepsilonqref{varud-b}. We use the software package FEniCS for the implementation; see \cite{fenics:book}. The domain $D$ is meshed using a regular grid of $128\times 128$ elements and we describe the evolution of the interface $\Gamma^+$ using the level set method from Section \ref{section5}. The conductivity values are set to $\sigma_0 = 1$ and $\sigma_1 = 10$. We obtain measurements $h_i$ corresponding to fluxes $g_i$, $i=1,..,I$, by taking the trace on $\Gamma$ of the solution of a Neumann problem where the fluxes are equal to $g_i$. To simulate real noisy EIT data, the measurements $h_i$ are corrupted by adding a normal Gaussian noise with mean zero and standard deviation $\delta*\|h_i\|_\infty$, where $\delta$ is a parameter. The noise level is computed as \begin{equation}\label{noise_compute} noise = \textnormal{fr}ac{\sum_{i=1}^I\|h_i - \tilde h_i\|_{L^2(\Gamma)}}{\sum_{i=1}^I\|h_i\|_{L^2(\Gamma)}} \varepsilonnd{equation} where $\tilde h_i$ is the noisy measurement and $h_i$ the synthetic measurement without noise on $\Gamma$. We use a variation of the functional \varepsilonqref{eit3.1}, i.e. in our context: \begin{align}\label{funcJ_num} J(\mathcal{B}ega^+) &= \sum_{i=1}^I \mu_i \int_\mathcal{B}ega \textnormal{fr}ac{1}{2} (u_{d,i}(\mathcal{B}ega^+) - u_{n,i}(\mathcal{B}ega^+))^2 , \varepsilonnd{align} where $u_{d,i}$ and $u_{n,i}$ correspond to the different fluxes $g_i$. Here the coefficients $\mu_i$ are weights associated to the fluxes $g_i$. In our experiments we choose the weights $\mu_i$ such that each term of the sums in \varepsilonqref{funcJ_num} are equal to $1$ on initialization in order to have a well-distributed influence of each term. Practically, the $\mu_i$ are thus calculated during the first iteration. We use the distributed shape derivative $dJ(\mathcal{B}ega^+)$ from Proposition \varepsilonqref{domexpr}. We obtain a descent direction by solving \varepsilonqref{VP_1} with $\textbf{E}$ a finite dimensional subspace of $H^1_0(D)$ and $\mathcal B(v,w) = \int_D D v \cdot D w.$ We choose $\textbf{E}$ to be the space of linear Lagrange elements. Since we use a gradient-based method we implement an Armijo line search to adjust the time-stepping. The algorithm is stopped when the decrease of the functional becomes insignificant, practically when the following stopping criterion is repeatedly satisfied: $$J(\mathcal{B}ega^+_{k}) - J(\mathcal{B}ega^+_{k+1}) < \gamma (J(\mathcal{B}ega^+_{0}) - J(\mathcal{B}ega^+_{1})) $$ where $\mathcal{B}ega^+_{k}$ denotes the $k$-th iterate of $\mathcal{B}ega^+$. We take $\gamma = 5.10^{-5}$ in our tests. \begin{figure}[ht] \begin{center} \includegraphics[width=0.32\textwidth]{./initialization01} \includegraphics[width=0.32\textwidth]{./reconstruction00} \includegraphics[width=0.32\textwidth]{./reconstruction003}\\ \includegraphics[width=0.32\textwidth]{./reconstruction01} \includegraphics[width=0.32\textwidth]{./reconstruction02} \includegraphics[width=0.32\textwidth]{./reconstruction05} \varepsilonnd{center} \caption{Reconstruction (continuous contours) of two ellipses (dashed contours) with different noise levels and using three measurements. From left to right and top to bottom: initialization (continuous contours - top left), $0\%$ noise (367 iterations), $0.43\%$ noise (338 iterations), $1.44\%$ noise (334 iterations), $2.83\%$ noise (310 iterations), $7\%$ noise (356 iterations).}\label{rec_twoellipses_noise} \varepsilonnd{figure} In Figure \ref{rec_twoellipses_noise} we compare the reconstruction for different noise levels computed using \varepsilonqref{noise_compute}. We take in this example $I=3$, i.e. we use three fluxes $g_i$, $i=1,2,3$, defined as follows: \begin{align*} g_1 &= 1\mbox{ on }\Gamma_l\cup\Gamma_r \mbox{ and } g_1 = -1\mbox{ on }\Gamma_t\cup\Gamma_b, \\ g_2 &= 1\mbox{ on }\Gamma_l\cup\Gamma_t \mbox{ and } g_2 = -1\mbox{ on }\Gamma_r\cup\Gamma_b,\\ g_3 &= 1\mbox{ on }\Gamma_l\cup\Gamma_b \mbox{ and } g_3 = -1\mbox{ on }\Gamma_r\cup\Gamma_t. \varepsilonnd{align*} Without noise, the reconstruction is very close to the true object and degrades as the measurements become increasingly noisy, as is usually the case in EIT. However, the reconstruction is quite robust with respect to noise considering that the problem is severely ill-posed. We reconstruct two ellipses and initialize with two balls placed at the wrong location. The average number of iterations until convergence is around $340$ iterations. In Figure \ref{rec_three_inclusions} we reconstruct three inclusions this time using $I=7$ different measurements, with $1.55\%$ noise. The reconstruction is close to the true inclusion and is a bit degraded due to the noise. Figure \ref{rec_three_inclusions2} shows the convergence history of the cost functional in log scale for this example. Our algorithm gives good results in comparison to existing results in the literature using level set methods to solve the EIT problem. In \cite{MR2132313} the EIT problem has been treated numerically using a level set method, which is not based on the use of shape derivatives but on the differentiation of a smooth approximation of the Heaviside function to represent domains. In \cite{MR2536481} the level set method using the boundary expression of the shape derivative is used based on equation \varepsilonqref{eq:HJ1}. Our algorithm converges fast in comparison to \cite{MR2132313,MR2536481}: convergence occurs after around 300 iterations. In \cite{MR2132313} convergence occurs between 200 iterations for one inclusion and up to 50000 iterations for two inclusions. In \cite{MR2536481} convergence occurs after 2000 or 10000 iterations on two examples with three inclusions. Concerning measurements we obtain good reconstruction of two inclusions with $I=3$ and three inclusions with $I=7$, while in \cite{MR2132313} sets of $4,12,28$ and $60$ measurements are used but usually $60$ measurements are required for complicated shapes such as two inclusions. In \cite{MR2536481}, $60$ measurements are used. Nevertheless, our results are not directly comparable since the conductivities are unknown in \cite{MR2132313, MR2536481}, which makes the inverse problem harder and might explain the slower convergence. Also, the reconstructed shapes are not the same although the complexity of the unknown shapes is comparable since we also consider two and three inclusions as in \cite{MR2132313, MR2536481}. Only an exact comparison using the same problem, test case, initialization, noise level, number and type of measurements could allow to conclude. \begin{figure}[ht] \begin{center} \includegraphics[width=0.4\textwidth]{./initialization_three} \includegraphics[width=0.4\textwidth]{./reconstruction_three} \varepsilonnd{center} \caption{Initialization (continuous contours - left) and reconstruction (continuous contours - right) of two ellipses and a ball (dashed contours) with $1.55\%$ noise (371 iterations) and using seven measurements.}\label{rec_three_inclusions} \varepsilonnd{figure} \begin{figure}[ht] \begin{center} \includegraphics[width=0.5\textwidth]{./cost_history} \varepsilonnd{center} \caption{History of cost functional corresponding to Figure \ref{rec_three_inclusions} in logarithmic scale.}\label{rec_three_inclusions2} \varepsilonnd{figure} {\bf Acknowledgments.} The authors would like to thank the reviewers for their helpful comments. Antoine Laurain acknowledges support from the DFG research center MATHEON (MATHEON - Project C37, Shape/Topology optimization methods for inverse problems). Kevin Sturm acknowledges support from the DFG research center MATHEON, Project C11. \varepsilonnd{document}
\begin{equation}gin{document} \begin{equation}gin{abstract} In this work, we consider an inverse problem of determining a source term for a structural acoustic partial differentia equation (PDE) model, comprised of a two or three-dimensional interior acoustic wave equation coupled to a Kirchoff plate equation, with the coupling being accomplished across a boundary interface. For this PDE system, we obtain the uniqueness and stability estimate for the source term from a single measurement of boundary values of the ``structure''. The proof of uniqueness is based on Carleman estimate. Then, by means of an observability inequality and a compactness/uniqueness argument, we can get the stability result. Finally, an operator theoretic approach gives us the regularity needed for the initial conditions in order to get the desired stability estimate. \end{abstract} \title{Inverse problem for a structural acoustic interaction} \textbf{Keywords}: Structural acoustic interaction, inverse problem, Carleman estimate, continuous observability inequality \section{Introduction and Main Results} \subsection{Statement of the Problem} Let $\Omega$ be an open bounded subset of $\mathbb{R}^2$ or $\mathbb{R}^3$ with smooth boundary $\Gamma$ of class $C^2$, and we designate a nonempty simply connected segment of $\Gamma$ as $\Gamma_0$ with then $\Gamma=\Gamma_0\cup\Gamma_1$ and $\Gamma_0\cap\Gamma_1=\emptyset$. We consider here the following system comprised of a ``coupling'' between a wave equation and an elastic plate-like equation: \begin{equation}gin{equation}\label{nonlinear} \begin{equation}gin{cases} z_{tt}(x,t) = \Delta z(x,t) + q(x)z(x,t) & \mbox{in } \Omega \times [0,T] \\ \frac{\partial z}{\partial\nu}(x,t) = 0 & \mbox{on } \Gamma_1\times[0,T] \\ z_t(x,t) = -v_{tt}(x,t)-\Delta^2 v(x,t)-\Delta^2 v_t(x,t) & \mbox{on } \Gamma_0\times[0,T] \\ v(x,t)=\frac{\partial v}{\partial \nu}(x,t)=0 & \mbox{on } \partial\Gamma_0\times[0,T] \\ \frac{\partial z}{\partial \nu}(x,t)=v_t(x,t) & \mbox{on } \Gamma_0\times[0,T] \\ z(\cdot,\frac{T}{2}) = z_0(x) & \mbox{in } \Omega \\ z_t(\cdot,\frac{T}{2}) = z_1(x) & \mbox{in } \Omega \\ v(\cdot,\frac{T}{2}) = v_0(x) & \mbox{on } \Gamma_0 \\ v_t(\cdot,\frac{T}{2}) = v_1(x) & \mbox{on } \Gamma_0 \end{cases} \end{equation} where the coupling occurs across the boundary interface $\Gamma_0$. $[z_0,z_1,v_0,v_1]$ are the given initial conditions and $q(x)$ is a time-independent unknown coefficient. For this system, notice that the map $\{q\}\to \{z(q),v(q)\}$ is nonlinear, therefore we consider the following \textit{nonlinear inverse problem}: Let $\{z=z(q), v=v(q)\}$ be the weak solution to system $\eqref{nonlinear}$. Under suitable geometrical conditions on $\Gamma_1=\Gamma\setminus\Gamma_0$, is it possible to retrieve $q(x)$, $x\in\Omega$, from measurement of $v_{tt}(q)$ on $\Gamma_0\times[0,T]$? In other words, is it possible to recover the internal wave potential from the observation of the acceleration of the elastic plate. Our emphasis here that we determine the interior acoustic property from observing the acceleration of the elastic wall (portion of the boundary), is not only due to physical consideration, but also to the implications of such inverse type analysis related to the coupling nature of the structural acoustic flow. In many structural acoustics applications, the problem of controlling interior acoustic properties is directly correlated with the problem of controlling structural vibrations since the interior noise fields are often generated by the vibrations of an enclosing structure. An important example of this is the problem of controlling interior aircraft cabin noise which is caused by fuselage vibrations that are induced by the low frequency high magnitude exterior noise fields generated by the engines. The primary goal in this paper is to study the uniqueness and stability of the interior time-independent unknown coefficient $q(x)$ in some appropriate function space. More precisely, we consider the follow uniqueness and stability problems: \textbf{Uniqueness in the nonlinear inverse problem} Let $\{z=z(q), v=v(q)\}$ be the weak solution to system $\eqref{nonlinear}$. Under geometrical conditions on $\Gamma_1$, does the acceleration of the wall $v_{tt}|_{\Gamma_0\times [0,T]}$ determine $q(x)$ uniquely? In other words, does $$v_{tt}(q)|_{\Gamma_0\times [0,T]}=v_{tt}(p)|_{\Gamma_0\times [0,T]}$$ imply $q(x)=p(x)$ in $\Omega$? \textbf{Stability in the nonlinear inverse problem} Let $\{z(q), v(q)\}$, $\{z(p), v(p)\}$ be weak solutions to system $\eqref{nonlinear}$ with corresponding coefficients $q(x)$ and $p(x)$. Under geometric conditions on $\Gamma_1$, is it possible to estimate $\displaystyle\|q-p\|_{L^2(\Omega)}$ by some suitable norms of $\displaystyle (v_{tt}(q)-v_{tt}(p))|_{\Gamma_0\times[0,T]}$? In order to study the \textit{nonlinear inverse problem}, we first linearize $\eqref{nonlinear}$ and hence we consider the following system: \begin{equation}gin{equation}\label{linear} \begin{equation}gin{cases} w_{tt}(x,t) - \Delta w(x,t) - q(x)w(x,t) = f(x)R(x,t) & \mbox{in } \Omega \times [0,T] \\ \frac{\partial w}{\partial\nu}(x,t) = 0 & \mbox{on } \Gamma_1\times[0,T] \\ w_t(x,t) = -u_{tt}(x,t)-\Delta^2 u(x,t)-\Delta^2 u_t(x,t) & \mbox{on } \Gamma_0\times[0,T] \\ u(x,t)=\frac{\partial u}{\partial \nu}(x,t)=0 & \mbox{on } \partial\Gamma_0\times[0,T] \\ \frac{\partial w}{\partial \nu}(x,t)=u_t(x,t) & \mbox{on } \Gamma_0\times[0,T] \\ w(\cdot,\frac{T}{2}) = 0 & \mbox{in } \Omega \\ w_t(\cdot,\frac{T}{2}) = 0 & \mbox{in } \Omega \\ u(\cdot,\frac{T}{2}) = 0 & \mbox{on } \Gamma_0 \\ u_t(\cdot,\frac{T}{2}) = 0 & \mbox{on } \Gamma_0 \end{cases} \end{equation} where $q\in L^{\infty}(\Omega)$ is given, $R(x,t)$ is fixed suitably while $f(x)$ is an unknown time-independent coefficient. For this linearized system, we have the advantage that the map $\{f\}\to \{w(f),u(f)\}$ is linear, hence we consider the corresponding \textit{linear inverse problem}: \textbf{Uniqueness in the linear inverse problem} Let $\{w=w(f), u=u(f)\}$ be the weak solution to system $\eqref{linear}$. Under geometrical conditions on $\Gamma_1$, does $u_{tt}|_{\Gamma_0\times [0,T]}$ determine $f(x)$ uniquely? In other words, does $$u_{tt}(f)|_{\Gamma_0\times [0,T]}=0$$ imply $f(x)=0$ in $\Omega$? \textbf{Stability in the linear inverse problem} Let $\{w=w(f), u=u(f)\}$ be the weak solution to system $\eqref{linear}$. Under geometrical conditions on $\Gamma_1$, is it possible to estimate $\displaystyle\|f\|_{L^2(\Omega)}$ by some suitable norms of $\displaystyle u_{tt}|_{\Gamma_0\times[0,T]}$? \begin{equation}gin{remark} In our models $\eqref{nonlinear}$ and $\eqref{linear}$ we regard $t=\frac{T}{2}$ as the initial time. This is not essential, because the change of independent variables $t\to t-\frac{T}{2}$ transforms $t=\frac{T}{2}$ to $t=0$. However, this is convenient for us to apply the Carleman estimate established in \cite{l-t-z}. In fact, one can keep $t=0$ as initial moment by doing an even extension of $w$ and $u$ to $\Omega\times[-T, T]$, but then the Carleman estimate in \cite{l-t-z} needs to be modified accordingly. \end{remark} \subsection{Literature and Motivation} The PDE system $\eqref{nonlinear}$ is an example of a \emph{structural acoustic interaction}. It mathematically describes the interaction of a vibrating beam/plate in an enclosed acoustic field or chamber. In this situation, the boundary segment $\Gamma_1$ represents the ``hard'' walls of the chamber $\Omega$, with $\Gamma_0$ being the flexible portion of the chamber wall. The flow with in the chamber is assumed to be of acoustic wave type, and hence the presence of the wave equation in $\Omega$, satisfied by $z$ in $\eqref{nonlinear}$, coupled to a structural plate equation (in variable $v$) on the flexible boundary portion $\Gamma_0$. This type of PDE models has long existed in the literature and has been an object of intensive experimental and numerical studies at the Nasa Langley Research Center \cite{l-l, b-f-s-s,b-s}. Moreover, recent innovations in smart material technology and the potential applications of these innovations in control engineering design have greatly increased the interest in studying these structural acoustic models. As a result, there has been a lot of recent contributions to the literature deal with various topis; e.g., optimal control, stability, controllability, regularity \cite{a.1,a.2,a-l.1,a-l.2,a-l.3,a-l.4,a-l-r.1,a-l-r.2,c-t,l}. However, to the best of our knowledge, there are no results available in the literature for our inverse type analysis on the model. On the other hand, the interest to the inverse problem has been stimulated by the studies of applied problems such as geophysics, medical imaging, scattering, nondestructive testing and so on. These problems are of the determination of unknown coefficients of differential equations which are the functions depending on the point of the space \cite{b,is.1,is.2}. For the uniqueness in multidimensional inverse problem with a single boundary observation, the pioneering paper by Bukhgeim and Klibanov \cite{b-k} provides a methodology based on a type of exponential weighted energy estimate, which is usually referred as the Carleman estimate since the original work \cite{c} by Carleman. After \cite{b-k}, several papers concerning inverse problems by using Carleman estimate have been published (e.g. \cite{is.3,k.1}). In particular, for the inverse hyperbolic type problems that is related to our concern in this paper, there has been intensively studies \cite{i-y,is-y,kh,p-y,y}. However, we mentioned again that there is not any such uniqueness and stability analysis for the structural acoustic models or even in general coupled PDE systems. This motivates the work of the present paper. The usual problem setting for inverse hyperbolic problem includes determining a coefficient from measurements on the whole boundary or part of the boundary, either Dirichlet type \cite{b-k,kh,p-y,y} or Neumann type \cite{i-y,is-y}. Usually the coefficient describes a physical property of the medium (e.g. the elastic modulus in Hooke's law), and the inverse problem is to determine such a property. In our formulation of the inverse problem, we need to determine the time-independent wave potential $q(x)$ by observing the acceleration from the flexible portion of the boundary $\Gamma_0$. The mathematical challenge in this problem stems from the fact that we are dealing with the ``coupling'' on the part of the boundary and the main technical difficulty associated with this structure is the lack of the compactness of the resolvent. As a result, the space regularity for the solution of the wave equation component is limited by the structure on the plate and hence this will prevent us going to higher dimension ($n>7$) no matter how smooth the initial data is. This is a distinguished feature of this structural acoustic model comparing to the purely wave equation model as in that case the solution can be as smooth as we want as long as the initial data is smooth enough. In this present paper, we prove the cases where the dimension $n=2$ and $3$ (physical meaningful cases) by using the Carleman estimate for the Neumann problem in \cite{l-t-z} and an operator theoretic formulation. We show that indeed the observation of the acceleration on the plate can determine the potential $q$ under some restrictions on the initial data and some geometrical conditions on the boundary. As we mentioned, the argument will also work for dimension up to $n=7$. \subsection{Main Assumptions and Preliminaries} In this section we state the main geometrical assumptions throughout this paper. These assumptions are essential in order to establish the Carleman estimate stated in section 2. Let $\nu=[\nu_1,\cdots,\nu_n]$ be the unit outward normal vector on $\Gamma$, and let $\frac{\partialrtial}{\partialrtial \nu}=\nabla\cdot\nu$ denote the corresponding normal derivative Moreover, we assume the following geometric conditions on $\Gamma_1=\Gamma\setminus\Gamma_0$: (A.1) There exists a strictly convex (real-valued) non-negative function $\displaystyle d:\overline{\Omega}\to\mathbb{R}^+$, of class $C^3(\overline{\Omega})$, such that, if we introduce the (conservative) vector field $h(x)=[h_1(x),\cdots,h_n(x)]\equiv\nabla d(x), x\in\Omega$, then the following two properties hold true: (i)\begin{equation}\label{assume1} \frac{\partial d}{\partial\nu}\bigg|_{\Gamma_1}=\nabla d\cdot\nu=h\cdot\nu=0; \quad h\equiv\nabla d \end{equation} (ii) the (symmetric) Hessian matrix $\mathcal{H}_d$ of $d(x)$ [i.e., the Jacobian matrix $J_h$ of $h(x)$] is strictly positive definite on $\overline{\Omega}$: there exists a constant $\rho>0$ such that for all $x\in\overline{\Omega}$: \begin{equation}\label{assume2} \mathcal{H}_d(x)=J_h(x)=\left[\begin{equation}gin{array}{ccc} d_{x_1x_1} & \cdots & d_{x_1x_n} \\ \vdots & & \vdots \\ d_{x_nx_1} & \cdots & d_{x_nx_n} \\ \end{array}\right] =\left[\begin{equation}gin{array}{ccc} \frac{\partial h_1}{\partial x_1} & \cdots & \frac{\partial h_1}{\partial x_n} \\ \vdots & & \vdots \\ \frac{\partial h_n}{x_1} & \cdots & \frac{\partial h_n}{\partial x_n} \\ \end{array}\right] \geq\rho I \end{equation} (A.2) $d(x)$ has no critical point on $\overline{\Omega}$: \begin{equation}\label{assume3} \inf_{x\in\Omega}|h(x)|=\inf_{x\in\Omega}|\nabla d(x)|=s>0 \end{equation} \begin{equation}gin{remark} One canonical example is that $\Gamma_1$ is flat (not the case in our problem setting here), where then we can take $d(x)=|x-x_0|^2$, with $x_0$ on the hyperplane containing $\Gamma_1$ and outside $\Omega$, then $h(x)=\nabla d(x)= 2(x-x_0)$ is radial. However, in general $h(x)$ is not necessary radial. In particularly in our case where $\Gamma_1$ is convex, the corresponding required $d(x)$ can also be explicitly constructed. For more examples of such function $d(x)$ with different geometries of $\Gamma_1$, we refer to the appendix of \cite{l-t-z}. \end{remark} Next we introduce an abstract operator theoretic formulation associated to $\eqref{nonlinear}$ for which we will need the following facts and definitions: Let the operator $A$ be \begin{equation}\label{operatorA} Az=-\Delta z-q(x)z,\quad D(A)=\{z: \Delta z+q(x)z\in L^2(\Omega), \frac{\partial z}{\partial \nu}\bigg|_{\Gamma}=0\}\end{equation} Notice the lower-order part is a perturbation which preserves generation of the self-adjoint principle part $A_N$ (e.g. \cite{l-t.4}), where $A_N:L^2(\Omega)\supset D(A_N)\rightarrow L^2(\Omega)$ is defined by: \begin{equation}\label{operatorAN} A_Nz=-\Delta z,\quad D(A_N)=\{z: \Delta z\in L^2(\Omega), \frac{\partial z}{\partial \nu}\bigg|_{\Gamma}=0\}\end{equation} Then $A_N$ is positive self-adjoint and \begin{equation}\label{AN1/2} D(A_N^{\frac{1}{2}})=H^1_{\Gamma_1}(\Omega)=\{z: z\in H^1(\Omega), \frac{\partial z}{\partial\nu}=0 \ \text{on} \ \Gamma_1\}\end{equation} Then we define the Neumann map $N$ by: \begin{equation} z=Ng\;\Leftrightarrow\;\begin{equation}gin{cases} \Delta z=0&\text{in}\;\;\;\Omega\\ \frac{\partial z}{\partial\nu} =0 &\text{on}\;\;\;\Gamma_1\\ \frac{\partial z}{\partial \nu}=g&\text{on}\;\;\;\Gamma_0 \end{cases}\end{equation} By elliptic theory \begin{equation}gin{equation} N\in\mathcal{L}(L^2(\Gamma_0),H^{3/2}_{\Gamma_1}(\Omega)) \end{equation} Now we define \begin{equation}gin{equation} \mathcal{B}=A_NN:L^2(\Gamma_0)\rightarrow D(A_N^{1\over2})' \end{equation} via the conjugation $\mathcal{B}^{*}=N^{*}A_N$. Then with $v\in L^2(\Gamma)$ and for any $y\in D(A_N^{1\over2})$ we have \begin{equation}gin{multline} -(\mathcal{B}^{*}y,v)_{\Gamma}=-(N^*A_Ny,v)_{\Gamma}=-(A_Ny,Nv)_{\Omega}=(\Delta y,Nv)_{\Omega}\\ =(y,\Delta(Nv))_{\Omega}+(\frac{\partial y}{\partial\nu},Nv)_{\Gamma}-(y,\frac{\partial(Nv)}{\partial\nu})_{\Gamma}=-(y,v)_{\Gamma_0} \end{multline} by Green's theorem, the definition of $N$ and the fact $\displaystyle\frac{\partial y}{\partial\nu}=0$ on $\Gamma_1$ when $y\in D(A_N^{1\over2})$. In other words, we have \begin{equation} N^{*}A_Ny=\begin{equation}gin{cases} y,&\text{on}\;\;\;\Gamma_0\\ 0,&\text{on}\;\;\;\Gamma_1 \end{cases}\;\;\;\;\;\text{for}\;y\in D(A_N^{1\over2})\end{equation} i.e. $\mathcal{B}^{*}=N^{*}A_N$ is the restriction of the trace map from $H^1(\Omega)$ to $H^{\frac{1}{2}}(\Gamma_0)$. Last we set $\textbf{\AA}:L^2(\Gamma_0)\supset D(\textbf{\AA})\rightarrow L^2(\Gamma_0)$ to be \begin{equation}\label{laplace2} \textbf{\AA}=\Delta^2,D(\textbf{\AA})=\{v\in H^2_0(\Gamma_0):\Delta^2v\in L^2(\Gamma_0)\} \end{equation} where $H^2_0(\Gamma_0)=\{v\in H^2(\Omega):v=\frac{\partial v}{\partial\nu}=0 \ \text{on} \ \partial\Gamma_0\}$. $\textbf{\AA}$ is self-adjoint, positive definite, and we have the characterization \begin{equation}\label{domainA} D(\textbf{\AA}^{\frac{1}{2}})=H^2_0(\Gamma_0) \end{equation} Now set \begin{equation}gin{equation}\mathcal{A}=\left[\begin{equation}gin{array}{cccc} 0 & I & 0 & 0 \\ -A_N+q & 0 & 0 & \mathcal{B} \\ 0 & 0 & 0 & I \\ 0 & -\mathcal{B}^* & -\textbf{\AA} & -\textbf{\AA} \end{array} \right]\label{A}\end{equation} on the energy space \begin{equation}gin{equation} \begin{equation}gin{split} H & = D(A_N^{\frac{1}{2}})\times L^2(\Omega) \times D(\textbf{\AA}^{\frac{1}{2}}) \times L^2(\Gamma_0) \\ & = H^1_{\Gamma_1}(\Omega)\times L^2(\Omega) \times H^2_0(\Gamma_0) \times L^2(\Gamma_0) \end{split} \end{equation} Then we have the domain of the operator $\mathcal{A}$ \begin{equation}gin{equation}\label{DomA} \begin{equation}gin{split} D(\mathcal{A})& =\{[z_0,z_1,v_0,v_1]^T\in [D(A_N^{\frac{1}{2}})]^2 \times [D(\textbf{\AA}^{\frac{1}{2}})]^2 \ \text{such that} \\ & \qquad -z_0+Nv_1\in D(A_N) \ \text{and} \ v_0+v_1\in D(\textbf{\AA})\} \\ & =\{[z_0,z_1,v_0,v_1]^T: z_0\in H^1_{\Gamma_1}(\Omega), z_1\in H^1_{\Gamma_1}(\Omega), v_0\in H^2_0(\Gamma_0), v_1\in H^2_0(\Gamma_0), \\ & \qquad (\Delta+q)z_0\in L^2(\Omega), \frac{\partial z_0}{\partial\nu}=v_1 \ \text{on} \ \Gamma_0 \ \text{and} \ v_0+v_1\in D(\textbf{\AA})\} \\ & =\{[z_0,z_1,v_0,v_1]^T: z_0\in H^2_{\Gamma_1}(\Omega), z_1\in H^1_{\Gamma_1}(\Omega), v_0\in H^2_0(\Gamma_0), v_1\in H^2_0(\Gamma_0), \\ & \qquad \frac{\partial z_0}{\partial\nu}=v_1 \ \text{on} \ \Gamma_0 \ \text{and} \ v_0+v_1\in D(\textbf{\AA})\} \end{split} \end{equation} where in the last step we get $z_0\in H^2(\Omega)$ from $q\in L^{\infty}(\Omega)$ and $(\Delta+q)z_0\in L^2(\Omega)$ due to elliptic theory. Therefore with these notations, the original system $\eqref{nonlinear}$ becomes to the first order abstract differential equation \begin{equation} \frac{dy}{dt}=\mathcal{A}y\end{equation} where $y=[z,z_t,v,v_t]^T$. From semigroup theory, when the initial conditions $[z_0,z_1,v_0,v_1]$ are in $D(\mathcal{A})$ we have that the solution $y$ satisfies \begin{equation} y\in D(\mathcal{A}), \quad y_t\in H \end{equation} \begin{equation}gin{remark} The structure of $\mathcal{A}$ reflects the coupled nature of this structural acoustic system $\eqref{nonlinear}$. One distinguished feature of the system is that the resolvent of $\mathcal{A}$ is not compact. However, it can still be shown that $\mathcal{A}$ generates a $C_0$-semigroup of contractions $\{e^{\mathcal{A}t}\}_{t\geq0}$ which establishes the well-posedness of the system \cite{a-l.2}. \end{remark} \subsection{Main results} For the inverse problems stated in section 1.1, we have the following results: \begin{equation}gin{theorem}\label{th1}(Uniqueness for the linear inverse problem) Under the main assumptions (A.1), (A.2) and let \begin{equation}\label{time} T>2\sqrt{\max_{x\in\overline{\Omega}}d(x)} \end{equation} Moreover, let \begin{equation}\label{regR} R\in W^{3,\infty}(Q) \end{equation} and \begin{equation}\label{crucialR} \bigg|R\left(x,\frac{T}{2}\right)\bigg|\geq r_0>0,\qquad \bigg|R_t\left(x,\frac{T}{2}\right)\bigg|\geq r_1>0 \end{equation} for some positive constants $r_0$, $r_1$ and $x\in\overline{\Omega}$. In addition, let \begin{equation}\label{regq} q\in L^{\infty}(\Omega) \end{equation} If the weak solution $\{w=w(f), u=u(f)\}$ to system $\eqref{linear}$ satisfies \begin{equation}\label{h2reg} w,w_t,w_{tt}\in H^{2}(Q)=H^2(0,T'L^2(\Omega))\cap L^2(0,T;H^2(\Omega)) \end{equation} and \begin{equation}\label{linearuniqueness} u_{tt}(f)(x,t)=0, \quad x\in\Gamma_0, t\in[0,T] \end{equation} then $f(x)=0$, $x\in\Omega$. \end{theorem} \begin{equation}gin{theorem}\label{th2}(Uniqueness for the nonlinear inverse problem) Under the main assumptions (A.1), (A.2), assume $\eqref{time}$ and \begin{equation}\label{regqp} q,p\in L^{\infty}(\Omega) \end{equation} Let either of $z(q)$ and $z(p)$ satisfy \begin{equation}\label{regw} z\in W^{3,\infty}(Q) \end{equation} Moreover, let \begin{equation}\label{crucialz}|z_0(x)|\geq s_0>0,\qquad |z_1(x)|\geq s_1>0 \end{equation} for some positive constants $s_0$, $s_1$ and $x\in\overline{\Omega}$. If the weak solutions $\{z(q), v(q)\}$ and $\{z(p), v(p)\}$ to system $\eqref{nonlinear}$ satisfy \begin{equation}\label{differenceh2} z(q)-z(p), z_t(q)-z_t(p), z_{tt}(q)-z_{tt}(p)\in H^{2}(Q) \end{equation} and \begin{equation}\label{nonlinearuniqueness} v_{tt}(q)(x,t)=v_{tt}(p)(x,t), \quad x\in\Gamma_0, t\in[0,T] \end{equation} then $q(x)=p(x)$, $x\in\Omega$. \end{theorem} \begin{equation}gin{theorem}\label{th3}(Stability for the linear inverse problem) Under the main assumptions (A.1), (A.2), assume $\eqref{time}$, $\eqref{regR}$, $\eqref{crucialR}$ and $\eqref{regq}$. Moreover, let \begin{equation}\label{regRt} R_{t}\in H^{\frac{1}{2}+\epsilon}(0,T;L^{\infty}(\Omega))\end{equation} for some $0<\epsilon<\frac{1}{2}$. Then there exists a constant $C=C(\Omega,T,\Gamma_0,\varphi,q,R)>0$ such that \begin{equation}gin{equation}\label{stability} \|f\|_{L^2(\Omega)}\leq C\left(\|u_{tt}\|_{L^2(\Gamma_0\times[0,T])}+\|u_{ttt}\|_{L^2(\Gamma_0\times[0,T])}+\|\Delta^2u_{tt}\|_{L^2(\Gamma_0\times[0,T])}\right) \end{equation} for all $f\in L^2(\Omega)$. \end{theorem} \begin{equation}gin{theorem}\label{th4}(Stability for the nonlinear inverse problem) Under the main assumptions (A.1), (A.2), assume $\eqref{time}$, $\eqref{regqp}$, $\eqref{regw}$ and $\eqref{crucialz}$. Moreover, let the initial data satisfy the compatibility condition \begin{equation}gin{enumerate} \item When $n=2$, $[z_0,z_1,v_0,v_1]\in D(\mathcal{A}^2)$ where \begin{equation}gin{equation*} \begin{equation}gin{split} D(\mathcal{A}^2) & =\{[z_0,z_1,v_0,v_1]^T: z_0\in H^3_{\Gamma_1}(\Omega),z_1\in H^2_{\Gamma_1}(\Omega),v_0\in H^2_0(\Gamma_0), v_1\in H^2_0(\Gamma_0), \\ & \qquad\textbf{\AA}(v_0+v_1)+B^{*}z_1\in H^2_0(\Gamma_0), v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1\in D(\textbf{\AA}), \\ & \qquad\frac{\partial z_0}{\partial\nu}|_{\Gamma_0}=v_1, \frac{\partial z_1}{\partial\nu}|_{\Gamma_0}=-\textbf{\AA}(v_0+v_1)-B^{*}z_1 \} \end{split} \end{equation*} \item When $n=3$, $[z_0,z_1,v_0,v_1]\in D(\mathcal{A}^3)$ where \begin{equation}gin{equation*} \begin{equation}gin{split} D(\mathcal{A}^3) & =\{[z_0,z_1,v_0,v_1]^T: z_0\in H^{\frac{7}{2}}_{\Gamma_1}(\Omega), z_1\in H^3_{\Gamma_1}(\Omega), v_0\in H^2_0(\Gamma_0), v_1\in H^2_0(\Gamma_0), \\ & \textbf{\AA}(v_0+v_1)+B^{*}z_1\in H^2_0(\Gamma_0), \frac{\partial z_0}{\partial\nu}|_{\Gamma_0}=v_1, \frac{\partial z_1}{\partial\nu}|_{\Gamma_0}=-\textbf{\AA}(v_0+v_1)-B^{*}z_1 \\ & \textbf{\AA}(v_0+v_1)+B^{*}z_1+\textbf{\AA}[v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1]+B^{*}[(-A_N+q)z_0+Bv_1]\in D(\textbf{\AA}) \\ & \textbf{\AA}(v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1)+B^{*}[(-A_N+q)z_0+Bv_1]\in H^2_0(\Gamma_0), \\ & \frac{\partial [(-A_N+q)z_0+Bv_1]}{\partial\nu}|_{\Gamma_0}=-\textbf{\AA}[v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1]-B^{*}[(-A_N+q)z_0+Bv_1]\} \end{split} \end{equation*} \noindent Then there exists a constant $C=C(\Omega,T,\Gamma_0,\varphi,q,p,z_0,z_1,v_0,v_1)>0$ such that \begin{equation}gin{multline}\label{nonlinearstability} \|q-p\|_{L^2(\Omega)}\leq C\left(\|v_{tt}(q)-v_{tt}(p)\|_{L^2(\Gamma_0\times[0,T])}+\|v_{ttt}(q)-v_{ttt}(p)\|_{L^2(\Gamma_0\times[0,T])}\right. \\ \left. \qquad +\|\Delta^2(v_{tt}(q)-v_{tt}(p))\|_{L^2(\Gamma_0\times[0,T])}\right) \end{multline} for all $q,p\in W^{1,\infty}(\Omega)$ when $n=2$ and all $q,p\in W^{2,\infty}(\Omega)$ when $n=3$. \end{enumerate} \end{theorem} The rest of this paper is organized as follows: In section 2 we give the key Carleman estimate that is used in the proof of uniqueness result. Based on the same Carleman estimate, we also prove an observability inequality that is needed in section 5. Section 3 to 6 are devoted to the proofs of our main results Theorems \ref{th1} to \ref{th4}. Some concluding remarks will be given in section 7. \section{Carleman estimate and observability inequality} \subsection{Carleman Estimate} In this section, we state a Carleman estimate result that plays a key role in the proof of our uniqueness theorem. The result is due to \cite{l-t-z}. We first introduce the pseudo-convex function $\varphi(x,t)$ defined by \begin{equation}\label{defphi} \varphi(x,t)=d(x)-c\left(t-\frac{T}{2}\right)^2; \quad x\in\Omega, t\in[0,T]\end{equation} where $T$ is as in $\eqref{time}$ and $0<c<1$ is selected as follows: By $\eqref{time}$, there exists $\delta>0$ such that \begin{equation}\label{timesquare} T^2>4\max_{x\in\overline{\Omega}}d(x)+4\delta \end{equation} For this $\delta>0$, there exists a constant $c$, $0<c<1$, such that \begin{equation}\label{ctsquare} cT^2>4\max_{x\in\overline{\Omega}}d(x)+4\delta\end{equation} Henceforth, with $T$ and $c$ chosen as described above, this function $\varphi(x,t)$ has the following properties: (a) For the constant $\delta>0$ fixed in $\eqref{timesquare}$ and for any $t>0$ \begin{equation}\label{propertya} \varphi(x,t)\leq\varphi(x,\frac{T}{2}),\quad\varphi(x,0)=\varphi(x,T)\leq d(x)-c\frac{T^2}{4}\leq-\delta \end{equation} uniformly in $x\in\Omega$. (b) There are $t_0$ and $t_1$, with $0<t_0<\frac{T}{2}<t_1<T$, such that we have \begin{equation}\label{propertyb} \min_{x\in\overline{\Omega},t\in[t_0,t_1]}\varphi(x,t)\geq\sigma \end{equation} where $0<\sigma<\min_{x\in\overline{\Omega}}d(x)$. Moreover, if we introduce the space $Q(\sigma)$ that is defined by the following \begin{equation}\label{qsigma} Q{(\sigma)}=\{(x,t)|x\in\Omega, 0\leq t\leq T, \varphi(x,t)\geq\sigma>0\} \end{equation} Then an important property of $Q(\sigma)$ is that (see \cite{l-t-z}): \begin{equation}\label{qsigmaproperty} [t_0,t_1]\times\Omega\subset Q(\sigma)\subset[0,T]\times\Omega\end{equation} Then for the wave equation of the form \begin{equation}\label{carlemaneqn} w_{tt}(x,t)-\Delta w(x,t)-q(x)w(x,t) = F(x,t), \quad x\in\Omega, t\in[0,T]\end{equation} we have the following Carleman-type estimate: \begin{equation}gin{theorem}\label{prop1} Under the main assumptions (A.1) and (A.2), with $\varphi(x,t)$ defined in $\eqref{defphi}$. Let $w\in H^{2}(Q)$ be a solution of the equation $\eqref{carlemaneqn}$ where $q\in L^{\infty}(\Omega)$ and $F\in L_2(Q)$. Then the following one parameter family of estimates hold true, with $\rho>0$, $\begin{equation}ta>0$, for all $\tau>0$ sufficiently large and $\epsilon>0$ small: \begin{equation}gin{multline}\label{carleman} BT|_w+2\int_Qe^{2\tau\varphi}|F|^2dQ+C_{1,T}e^{2\tau\sigma}\int_Qw^2dQ \geq (\tau\epsilon\rho-2C_T)\int_Qe^{2\tau\varphi}\left(w_t^2+|\nabla w|^2\right)dQ\\ +\left(2\tau^3\begin{equation}ta+\mathcal{O}(\tau^2)-2C_T\right)\int_{Q{(\sigma)}}e^{2\tau\varphi}w^2dxdt - c_T\tau^3e^{-2\tau\delta}[E_w(0)+E_w(T)] \end{multline} Here $\delta>0$, $\sigma>0$ are the constants in $\eqref{timesquare}$, $\eqref{propertyb}$, while $C_T$, $c_T$ and $C_{1,T}$ are positive constants depending on $T$ and $d$. In addition, the boundary terms $BT|_{w}$ are given explicitly by \begin{equation}\label{boundary} \begin{equation}gin{split} BT|_{w}& =2\tau\int_0^T\int_{\Gamma_0}e^{2\tau\varphi}(w_t^2-|\nabla w|^2)h\cdot\nu d\Gamma dt \\ & +8c\tau\int_0^T\int_{\Gamma}e^{2\tau\varphi}(t-\frac{T}{2})w_t\frac{\partialrtial w}{\partialrtial\nu} d\Gamma dt \\ & +4\tau\int_0^T\int_{\Gamma}e^{2\tau\varphi}(h\cdot\nabla w)\frac{\partialrtial w}{\partialrtial\nu} d\Gamma dt \\ & +4\tau^2\int_0^T\int_{\Gamma}e^{2\tau\varphi}\left(|h|^2-4c^2(t-\frac{T}{2})^2+\frac{\alpha}{2\tau}\right)w\frac{\partialrtial w}{\partialrtial\nu} d\Gamma dt \\ & +2\tau\int_0^T\int_{\Gamma_0}e^{2\tau\varphi}\bigg[2\tau^2\left(|h|^2-4c^2(t-\frac{T}{2})^2\right) \\ & \quad +\tau(\alpha-\Delta d-2c)\bigg]w^2h\cdot\nu d\Gamma dt \end{split} \end{equation} where $\alpha=\Delta d-2c-1+k$ for $0<k<1$ is a constant and $E_w$ is defined as follows: \begin{equation}\label{energy} E_w(t)=\int_{\Omega}[w^2(x,t)+w_t^2(x,t)+|\nabla w(x,t)|^2]d\Omega \end{equation} \end{theorem} An immediate corollary of the estimate is the following (Theorem 6.1 in \cite{l-t-z}) \begin{equation}gin{corollary} Under the assumptions in Theorem (\ref{prop1}), the following one-parameter family of estimates hold true, for all $\tau$ sufficiently large, and for any $\epsilon>0$ small: \begin{equation}\label{carleman2} \overline{BT}|_{w}+2\int_0^T\int_{\Omega}e^{2\tau\varphi}F^2dQ+const_{\varphi}\int_0^T\int_{\Omega}F^2dQ\geq k_{\varphi}[E_w(0)+E_w(T)] \end{equation} for a constant $k_{\varphi}>0$ while $\overline{BT}|_{w}$ is given by: \begin{equation}gin{multline}\label{btbar} \overline{BT}|_{w}=BT|_{w}+const_{\varphi}\left[\int_0^T\int_{\Gamma}\bigg|\frac{\partial w}{\partial\nu}w_t\bigg|d\Gamma dt+\int_{t_0}^{t_1}\int_{\Gamma_0}w^2d\Gamma_0 dt\right. \\ + \left. \int_0^T\int_{\Gamma_0}|ww_t|d\Gamma_0 dt\right] \end{multline} \end{corollary} \begin{equation}gin{remark} For the proof of the above Carleman estimate and the corollary, we refer to \cite{l-t-z} and we omit the details here. \end{remark} \subsection{Continuous Observability Inequality} Using the Carleman estimate in last section, we can prove the following observability inequality: \begin{equation}gin{theorem}\label{theoremobserve} Under the main assumptions (A.1) and (A.2), for the following initial boundary value problem \begin{equation}\label{observe} \begin{equation}gin{cases} w_{tt}(x,t) = \Delta w(x,t) + q(x)w(x,t) & \mbox{in } \Omega \times [0,T] \\ w(\cdot,\frac{T}{2}) = w_0(x) & \mbox{in } \Omega \\ w_t(\cdot,\frac{T}{2}) = w_1(x) & \mbox{in } \Omega \\ \frac{\partialrtial w}{\partialrtial \nu}(x,t) = 0 & \mbox{on } \Gamma_1 \times [0,T] \\ \frac{\partialrtial w}{\partialrtial \nu}(x,t) = g(x,t) & \mbox{on } \Gamma_0 \times [0,T] \end{cases} \end{equation} where $w_0\in H^1(\Omega)$, $w_1\in L^2(\Omega)$, $g\in L^2(\Gamma\times[0,T])$ and $q\in L^{\infty}(\Omega)$. We have the following continuous observability inequality: \begin{equation}gin{equation*}\label{observeineq} \|w_0\|^2_{H^1(\Omega)}+\|w_1\|^2_{L^2(\Omega)}\leq C\left(\|w\|^2_{L^2(\Gamma_0\times[0,T])}+\|w_t\|^2_{L^2(\Gamma_0\times[0,T])}+\|g\|^2_{L^2(\Gamma_0\times[0,T])}\right) \end{equation*} where $T$ is as in $\eqref{time}$ and $C=C(\Omega,T,\Gamma_0,\varphi,\tau,q)$ is a positive constant. \end{theorem} \begin{equation}gin{proof} For the case when $g=0$, we refer to \cite{l-t-z} where the continuous observability inequality is established for zero Neumann data on the whole boundary. Here we give the proof for the case of general $g\in L^2(\Gamma_0\times[0,T])$, which is still based on the proof in \cite{l-t-z}. We first introduce the following result that is from the section 7.2 of \cite{l-t.5}. \begin{equation}gin{lemma}\label{tangential} Let $w$ be a solution of the equation \begin{equation}\label{wave} w_{tt}(x,t)=\Delta w(x,t)+q(x)w(x,t)+f(x,t) \ \text{in} \ Q=\Omega\times[0,T] \end{equation} with $q\in L^{\infty}(\Omega)$ and $w$ in the following class: \begin{equation} \left\{\begin{equation}gin{aligned} w\in L^2(0,T;H^1(\Omega))\cap H^1(0,T;L^2(\Omega)) \\ w_t, \frac{\partial w}{\partial\nu}\in L^2(0,T;L^2(\Gamma)) \end{aligned}\right. \end{equation} Given $\epsilon>0$, $\epsilon_0>0$ arbitrary, given $T>0$, there exists a constant $C=C(\epsilon,\epsilon_0,T)>0$ such that \begin{equation}gin{multline}\label{tangential1} \int_{\epsilon}^{T-\epsilon}\int_{\Gamma}|\nabla_{tan}w|^2d\Gamma dt\leq C\left(\int_0^T\int_{\Gamma}w_t^2+\left(\frac{\partial w}{\partial\nu}\right)^2d\Gamma dt+\|w\|^2_{L^2(0,T;H^{\frac{1}{2}+\epsilon_0}(\Omega))} \right. \\ \left. +\|f\|^2_{H^{\frac{1}{2}+\epsilon_0}(Q)}\right) \end{multline} \end{lemma} Now to prove $\eqref{observeineq}$, we first establish the following weaker conclusion under the assumptions (A.1) and (A.2) \begin{equation}\label{polluted} E\left(\frac{T}{2}\right)\leq C\left(\int_0^T\int_{\Gamma_0}[w^2+w_t^2+g^2]d\Gamma_0dt+\|w\|^2_{L^2(0,T;H^{\frac{1}{2}+\epsilon_0}(\Omega))}\right) \end{equation} which is the desired inequality $\eqref{observeineq}$ polluted by the interior lower order term $\|w\|$. To see this, we introduce a preliminary equivalence first. Let $u\in H^1(\Omega)$, then the following inequality holds true: there exist positive constants $0<k_1<k_2<\infty$, independent of $u$, such that \begin{equation}\label{equivalence} k_1\int_{\Omega}[u^2+|\nabla u|^2]d\Omega\leq\int_{\Omega}|\nabla u|^2d\Omega+\int_{\tilde{\Gamma_0}}u^2d\Gamma\leq k_2\int_{\Omega}[u^2+|\nabla u|^2]d\Omega \end{equation} where $\tilde{\Gamma_0}$ is any (fixed) portion of the boundary $\Gamma$ with positive measure. Inequality $\eqref{equivalence}$ is obtained by combining the following two inequalities: \begin{equation}\label{equivalence1} \int_{\Omega}u^2 d\Omega\leq c_1\left[\int_{\Omega}|\nabla u|^2d\Omega+\int_{\tilde{\Gamma_0}}u^2d\Gamma\right];\quad \int_{\tilde{\Gamma_0}}u^2d\Gamma\leq c_2\int_{\Omega}[u^2+|\nabla u|^2]d\Omega \end{equation} The inequality on the left of $\eqref{equivalence1}$ replaces Poincar\'{e}'s inequality, while the inequality on the right of $\eqref{equivalence1}$ stems from (a conservative version of) trace theory. Thus, for $w\in H^2(Q)$, if we introduce \begin{equation} \varepsilon(t)=\int_{\Omega}\left[|\nabla w(t)|^2+w_t^2(t)\right]d\Omega+\int_{\Gamma_0}w^2(t)d\Gamma_1 \end{equation} where $\Gamma_0=\Gamma\setminus\Gamma_1$ is as defined in the main assumptions, then $\eqref{equivalence}$ yields the equivalence \begin{equation}\label{equivalence2} a E(t)\leq\varepsilon(t)\leq b E(t) \end{equation} for some positive constants $a>0$, $b>0$. Now in a standard way, we multiply equation $\eqref{wave}$ by $w_t$ and integrate over $\Omega$. After an application of the first Green's identity, we have \begin{equation}gin{multline}\label{ibp} \frac{1}{2}\frac{\partial}{\partial t}\left(\int_{\Omega}[w_t^2+|\nabla w|^2]d\Omega+\int_{\Gamma_0}w^2d\Gamma_0\right)=\int_{\Gamma}\frac{\partial w}{\partial\nu}w_td\Gamma+\int_{\Gamma_0}ww_td\Gamma_0 \\ +\int_{\Omega}\left[q(x)+f\right]w_td\Omega \end{multline} Notice that on both sides of (\ref{ibp}) we have added term $\displaystyle\frac{1}{2}\frac{\partial}{\partial t}\int_{\Gamma_0}w^2d\Gamma_0 =\int_{\Gamma_0}ww_td\Gamma_0$. Recalling $\varepsilon(t)$ in $\eqref{equivalence2}$, we integrate (\ref{ibp}) over $(s,t)$ and obtain \begin{equation}\label{varepsilonrelation} \varepsilon(t)=\varepsilon(s)+2\int_s^t\left[\int_{\Gamma}\frac{\partial w}{\partial\nu}w_td\Gamma+\int_{\Gamma_0}ww_td\Gamma_0\right]dr+ 2\int_s^t\int_{\Omega}\left[q(x)+f\right]w_td\Omega dr \end{equation} We apply Cauthy-Schwartz inequality on $[q(x)+f]w_t$, invoke the left hand side $\displaystyle E(t)\leq\frac{1}{a}\varepsilon(t)$ of $\eqref{equivalence}$, and obtain \begin{equation}\label{varepsilont} \varepsilon(t)\leq [\varepsilon(s)+N(T)]+C_T\int^t_s\varepsilon(r)dr \end{equation} \begin{equation}\label{varepsilons} \varepsilon(s)\leq [\varepsilon(t)+N(T)]+C_T\int^t_s\varepsilon(r)dr \end{equation} where we have set \begin{equation}\label{nt} N(T)=\int^T_0\int_{\Omega}f^2dQ+2\int^T_0\int_{\Gamma}\bigg|\frac{\partial w}{\partial\nu}w_t\bigg|d\Gamma dt+2\int^T_0\int_{\Gamma_0}|ww_t|d\Gamma_0dt \end{equation} Gronwall's inequality applied on $\eqref{varepsilont}$, $\eqref{varepsilons}$ then yields for $0\leq s\leq t\leq T$, \begin{equation}\label{gronwall} \varepsilon(t)\leq [\varepsilon(s)+N(T)]e^{C_T(t-s)}; \quad \varepsilon(s)\leq [\varepsilon(t)+N(T)]e^{C_T(t-s)} \end{equation} We consider the following three cases here: \textbf{Case 1:} $0\leq s\leq t\leq \frac{T}{2}$. In this case we set $t=\frac{T}{2}$ and $s=t$ in the first inequality of $\eqref{gronwall}$; and set $s=0$ in the second inequality of $\eqref{gronwall}$, to obtain \begin{equation}\label{case1} \varepsilon(\frac{T}{2})\leq[\varepsilon(t)+N(T)]e^{C_T\frac{T}{2}}; \quad \varepsilon(0)\leq[\varepsilon(t)+N(T)]e^{C_T\frac{T}{2}} \end{equation} Summing up these two inequalities in $\eqref{case1}$ yields for $0\leq t\leq \frac{T}{2}$, \begin{equation}\label{desired1} \begin{equation}gin{split} \varepsilon(t)& \geq \frac{\varepsilon(\frac{T}{2})+\varepsilon(0)}{2}e^{-C_T\frac{T}{2}}-N(T) \\ & \geq \frac{a}{2}[E(\frac{T}{2})+E(0)]e^{-C_T\frac{T}{2}}-N(T) \end{split} \end{equation} after recalling the left hand side of the equivalence in $\eqref{equivalence2}$. \textbf{Case 2:} $\frac{T}{2}\leq s\leq t\leq T$. In this case we set $t=T$ and $s=t$ in the first inequality of $\eqref{gronwall}$; and set $s=\frac{T}{2}$ in the second inequality of $\eqref{gronwall}$, to obtain \begin{equation}\label{case2} \varepsilon(T)\leq[\varepsilon(t)+N(T)]e^{C_T\frac{T}{2}}; \quad \varepsilon(\frac{T}{2})\leq[\varepsilon(t)+N(T)]e^{C_T\frac{T}{2}} \end{equation} Summing up these two inequalities in $\eqref{case2}$ yields for $\frac{T}{2}\leq t\leq T$, \begin{equation}\label{desired2} \begin{equation}gin{split} \varepsilon(t)& \geq \frac{\varepsilon(\frac{T}{2})+\varepsilon(T)}{2}e^{-C_T\frac{T}{2}}-N(T) \\ & \geq \frac{a}{2}[E(\frac{T}{2})+E(T)]e^{-C_T\frac{T}{2}}-N(T) \end{split} \end{equation} after recalling the left hand side of the equivalence in $\eqref{equivalence2}$. \textbf{Case 3:} $0\leq s\leq\frac{T}{2}\leq t\leq T$. In this case we set $t=0$ and $s=t$ in the first inequality of $\eqref{gronwall}$; and set $s=\frac{T}{2}$ in the second inequality of $\eqref{gronwall}$, to obtain \begin{equation}\label{case3} \varepsilon(0)\leq[\varepsilon(t)+N(T)]e^{C_T\frac{T}{2}}; \quad \varepsilon(\frac{T}{2})\leq[\varepsilon(t)+N(T)]e^{C_T\frac{T}{2}} \end{equation} Summing up these two inequalities in $\eqref{case3}$ yields for $\frac{T}{2}\leq t\leq T$, \begin{equation}\label{desired3} \begin{equation}gin{split} \varepsilon(t)& \geq \frac{\varepsilon(\frac{T}{2})+\varepsilon(0)}{2}e^{-C_T\frac{T}{2}}-N(T) \\ & \geq \frac{a}{2}[E(\frac{T}{2})+E(0)]e^{-C_T\frac{T}{2}}-N(T) \end{split} \end{equation} after recalling the left hand side of the equivalence in $\eqref{equivalence2}$. In summary, we get for any $0\leq t\leq T$, \begin{equation}\label{desired} \varepsilon(t)\geq\frac{a}{2}E(\frac{T}{2})e^{-C_T\frac{T}{2}}-N(T) \end{equation} We now apply the Corollary 2.2 of the Carleman estimate, except on the interval $[\epsilon, T-\epsilon]$, rather than on $[0,T]$ as in $\eqref{carleman2}$. Thus, we obtain since $f=0$: \begin{equation}\label{corollary1} \overline{BT}|_{[\epsilon,T-\epsilon]\times\Gamma}\geq k_{\varphi}E(\epsilon) \end{equation} where $\overline{BT}|_{[\epsilon,T-\epsilon]\times\Gamma}$ is given as in (\ref{btbar}). Since we have $\displaystyle\frac{\partialrtial w}{\partialrtial \nu}=0$ on $\Gamma_1\times[0,T]$ and $\displaystyle\frac{\partialrtial w}{\partialrtial \nu}=g(x,t)$ on $\Gamma_0\times[0,T]$ by $\eqref{observe}$, with the additional information that $h\cdot\nu=0$ on $\Gamma_1$ by the assumption (A.1). Thus, by using the explicit expression $\eqref{boundary}$ for $BT|_{w}$, we have that $\overline{BT}|_{[\epsilon,T-\epsilon]\times\Gamma}$ is given by: \begin{equation}\label{btbar1} \begin{equation}gin{split} \overline{BT}|_{[\epsilon,T-\epsilon]\times\Gamma}& =2\tau\int_{\epsilon}^{T-\epsilon}\int_{\Gamma_0}e^{2\tau\varphi}(w_t^2-|\nabla w|^2)h\cdot\nu d\Gamma dt \\ & +8c\tau\int_{\epsilon}^{T-\epsilon}\int_{\Gamma_0}e^{2\tau\varphi}(t-\frac{T}{2})w_tg d\Gamma dt \\ & +4\tau\int_{\epsilon}^{T-\epsilon}\int_{\Gamma_0}e^{2\tau\varphi}(h\cdot\nabla w)g d\Gamma dt \\ & +4\tau^2\int_{\epsilon}^{T-\epsilon}\int_{\Gamma_0}e^{2\tau\varphi}\left(|h|^2-4c^2(t-\frac{T}{2})^2+\frac{\alpha}{2\tau}\right)wg d\Gamma dt \\ & +2\tau\int_{\epsilon}^{T-\epsilon}\int_{\Gamma_0}e^{2\tau\varphi}\left[2\tau^2\left(|h|^2-4c^2(t-\frac{T}{2})^2\right)+\tau(\alpha-\Delta d-2c)\right]w^2h\cdot\nu d\Gamma dt \\ & +const_{\varphi}\left[\int_{\epsilon}^{T-\epsilon}\int_{\Gamma_0}|gw_t|d\Gamma dt+\int_{t_0}^{t_1}\int_{\Gamma_0}w^2d\Gamma_0 dt+\int_{\epsilon}^{T-\epsilon}\int_{\Gamma_0}|ww_t|d\Gamma_0 dt\right] \end{split} \end{equation} Next, by the right side of equivalences $\eqref{equivalence2}$ and $\eqref{desired}$, we obtain \begin{equation}\label{finishing} E(\epsilon)\geq\frac{\varepsilon(\epsilon)}{b}\geq\frac{a}{2b}E\left(\frac{T}{2}\right)e^{-C_T\frac{T}{2}}-2\int_0^T\int_{\Gamma}|gw_t|d\Gamma dt-2\int_0^T\int_{\Gamma_0}|ww_t|d\Gamma_0 dt \end{equation} recalling $N(T)$ in $\eqref{nt}$. We use $\eqref{finishing}$ in $\eqref{corollary1}$. Finally, we invoke estimate (\ref{tangential1}) of Lemma \ref{tangential} on the first and the third integral terms of $\eqref{btbar1}$. This way, we readily obtain $\eqref{polluted}$, which is our desired inequality polluted by $\|w\|^2_{L^2(0,T;H^{\frac{1}{2}+\epsilon_0}(\Omega))}$. To eliminate this interior lower order term, we can apply the standard compactness/uniqueness argument (e.g.\cite{l-t.1}) by invoking the global uniqueness Theorem 7.1 in \cite{l-t-z}. \end{proof} \section{Proof of Theorem \ref{th1}} We let $\bar{w}=\bar{w}(f)=w_{t}(f)$ then from $\eqref{linear}$ we have $\bar{w}$, $u$ satisfy \begin{equation}gin{equation}\label{lineary} \begin{equation}gin{cases} \bar{w}_{tt}(x,t) - \Delta \bar{w}(x,t) - q(x)\bar{w}(x,t) = f(x)R_{t}(x,t) & \mbox{in } \Omega \times [0,T] \\ \frac{\partial\bar{w}}{\partial\nu}(x,t) = 0 & \mbox{on } \Gamma_1\times[0,T] \\ \bar{w}(x,t) = -u_{tt}(x,t)-\Delta^2 u(x,t)-\Delta^2 u_t(x,t) & \mbox{on } \Gamma_0\times[0,T] \\ u(x,t)=\frac{\partial u}{\partial \nu}(x,t)=0 & \mbox{on } \partial\Gamma_0\times[0,T] \\ \frac{\partial \bar{w}}{\partial \nu}(x,t)=u_{tt}(x,t) & \mbox{on } \Gamma_0\times[0,T] \\ \bar{w}(\cdot,\frac{T}{2}) = 0 & \mbox{in } \Omega \\ \bar{w}_t(\cdot,\frac{T}{2}) = f(x)R(x,\frac{T}{2}) & \mbox{in } \Omega \\ u(\cdot,\frac{T}{2}) = 0 & \mbox{on } \Gamma_0 \\ u_t(\cdot,\frac{T}{2}) = 0 & \mbox{on } \Gamma_0 \end{cases} \end{equation} Under the assumptions in Theorem \ref{th1}, we can apply the Carleman estimate to the wave equation in the system $\eqref{lineary}$ $\bar{w}_{tt}(x,t) - \Delta \bar{w}(x,t) - q(x)\bar{w}(x,t) = f(x)R_{t}(x,t)$ and get \begin{equation}gin{multline*}\label{carlemany} BT|_{\bar{w}}+2\int_Qe^{2\tau\varphi}|fR_{t}|^2 dQ+C_{1,T}e^{2\tau\sigma}\int_Q \bar{w}^2dQ \geq (\tau\epsilon\rho-2C_T)\int_Qe^{2\tau\varphi}[\bar{w}_t^2+|\nabla \bar{w}|^2]dQ \\ + [2\tau^3\begin{equation}ta+\mathcal{O}(\tau^2)-2C_T]\int_{Q{(\sigma)}}e^{2\tau\varphi}\bar{w}^2dxdt - c_T\tau^3e^{-2\tau\delta}[E_{\bar{w}}(0)+E_{\bar{w}}(T)] \end{multline*} where the boundary terms are given explicitly by \begin{equation}\label{boundaryy} \begin{equation}gin{split} BT|_{\bar{w}}& =2\tau\int_0^T\int_{\Gamma_0}e^{2\tau\varphi}(\bar{w}_t^2-|\nabla \bar{w}|^2)h\cdot\nu d\Gamma dt \\ & +8c\tau\int_0^T\int_{\Gamma}e^{2\tau\varphi}(t-\frac{T}{2})\bar{w}_t\frac{\partialrtial \bar{w}}{\partialrtial\nu} d\Gamma dt \\ & +4\tau\int_0^T\int_{\Gamma}e^{2\tau\varphi}(h\cdot\nabla \bar{w})\frac{\partialrtial \bar{w}}{\partialrtial\nu} d\Gamma dt \\ & +4\tau^2\int_0^T\int_{\Gamma}e^{2\tau\varphi}\left(|h|^2-4c^2(t-\frac{T}{2})^2+\frac{\alpha}{2\tau}\right)\bar{w}\frac{\partialrtial \bar{w}}{\partialrtial\nu} d\Gamma dt \\ & +2\tau\int_0^T\int_{\Gamma_0}e^{2\tau\varphi}\bigg[2\tau^2\left(|h|^2-4c^2(t-\frac{T}{2})^2\right) \\ & \quad + \tau(\alpha-\Delta d-2c)\bigg]\bar{w}^2h\cdot\nu d\Gamma dt \end{split} \end{equation} Since we have the extra observation that $u_{tt}(x,t)=0$ on $\Gamma_0\times[0,T]$ and note that the initial conditions $u(x,\frac{T}{2})=u_t(x,\frac{T}{2})=0$ on $\Gamma_0$, thus by the fundamental theorem of calculus we have $u(x,t)=0$ on $\Gamma_0\times[0,T]$ and hence from the coupling in the system $\eqref{lineary}$ we get \begin{equation}\label{boundaryterm1} \bar{w}(x,t)=-u_{tt}(x,t)-\Delta^2 u(x,t)-\Delta^2 u_t(x,t)=0 \ \textrm{on} \ \Gamma_0\times[0,T] \end{equation} and \begin{equation}\label{boundaryterm2} \frac{\partial\bar{w}}{\partial\nu}(x,t)=u_{tt}(x,t)=0 \ \textrm{on} \ \Gamma_0\times[0,T] \end{equation} Plugging $\eqref{boundaryterm1}$ and $\eqref{boundaryterm2}$ into $\eqref{boundaryy}$, note also that $\displaystyle\frac{\partial\bar{w}}{\partial\nu}=0$ on $\Gamma_1\times[0,T]$, therefore we get $BT|_{\bar{w}}\equiv0$. In addition, in view of $\eqref{regR}$, $\eqref{crucialR}$, we have $|fR_{t}|\leq C|f|$ for some positive constant $C$ depend on $R_t$. Moreover, notice that $\displaystyle\lim_{\tau\to\infty}\tau^3e^{-2\tau\delta}=0$. Hence when $\tau$ is sufficiently large, the above Carleman estimate can be rewritten as the following: \begin{equation}gin{equation}\label{ineq2} C_{1,\tau}\int_Qe^{2\tau\varphi}[\bar{w}_t^2+|\nabla \bar{w}|^2]dQ+C_{2,\tau}\int_{Q(\sigma)}e^{2\tau\varphi}\bar{w}^2dxdt \leq C\int_Qe^{2\tau\varphi}|f|^2dQ+Ce^{2\tau\sigma} \end{equation} where we set \begin{equation}\label{ctau} C_{1,\tau}=\tau\epsilon\rho-2C_T, \quad C_{2,\tau}=2\tau^3\begin{equation}ta+\mathcal{O}(\tau^2)-2C_T\end{equation} and $C$ denote generic constants which do not depend on $\tau$ and henceforth we will use this notation for the rest of this paper. In addition, note that $f$ is time-independent, so if we differentiate the system $\eqref{lineary}$ in time twice, we can get the following wave equations for $\bar{w_t}$ and $\bar{w_{tt}}$: \begin{equation}\label{eqnfrtt} (\bar{w_t})_{tt}(x,t) - \Delta \bar{w_t}(x,t) - q(x)\bar{w_t}(x,t) = f(x)R_{tt}(x,t) \end{equation} and \begin{equation}\label{eqnfrtt} (\bar{w_{tt}})_{tt}(x,t) - \Delta \bar{w_{tt}}(x,t) - q(x)\bar{w_{tt}}(x,t) = f(x)R_{ttt}(x,t) \end{equation} Notice the assumptions $\eqref{regR}$, $\eqref{h2reg}$, therefore we have similarly as $\eqref{ineq2}$ the following estimates for the two new systems: \begin{equation}gin{multline}\label{ineq3} C_{1,\tau}\int_Qe^{2\tau\varphi}[\bar{w}_{tt}^2+|\nabla \bar{w}_t|^2]dQ+C_{2,\tau}\int_{Q(\sigma)}e^{2\tau\varphi}\bar{w}_t^2dxdt \leq C\int_Qe^{2\tau\varphi}|f|^2dQ+Ce^{2\tau\sigma} \end{multline} and \begin{equation}gin{multline}\label{ineq4} C_{1,\tau}\int_Qe^{2\tau\varphi}[\bar{w}_{ttt}^2+|\nabla \bar{w}_{tt}|^2]dQ+C_{2,\tau}\int_{Q(\sigma)}e^{2\tau\varphi}\bar{w}_{tt}^2dxdt \leq C\int_Qe^{2\tau\varphi}|f|^2dQ+Ce^{2\tau\sigma} \end{multline} where $\tau$ is sufficiently large and $C_{1,\tau}$, $C_{2,\tau}$ are defined as in $\eqref{ctau}$. Adding $(\ref{ineq2})$, $(\ref{ineq3})$ and $(\ref{ineq4})$ together we then have \begin{equation}gin{multline}\label{ineq5} C_{1,\tau}\int_Qe^{2\tau\varphi}[\bar{w}_t^2+\bar{w}_{tt}^2+\bar{w}_{ttt}^2+|\nabla \bar{w}|^2+|\nabla \bar{w}_t|^2+|\nabla \bar{w}_{tt}|^2]dQ \\+ C_{2,\tau}\int_{Q(\sigma)}e^{2\tau\varphi}[\bar{w}^2+\bar{w}_t^2+\bar{w}_{tt}^2]dxdt \leq C\left(\int_Qe^{2\tau\varphi}|f|^2 dQ+e^{2\tau\sigma}\right) \end{multline} Again we use the wave equation $\bar{w}_{tt}(x,t)-\Delta \bar{w}(x,t)-q(x)\bar{w}(x,t)=f(x)R_{t}(x,t)$, plugging in the initial time of $t=\frac{T}{2}$ and use the zero initial conditions of $\bar{w}(\cdot,\frac{T}{2})=0$, we have \begin{equation}\label{initial} \bar{w}_{tt}(x,\frac{T}{2})-\Delta\bar{w}(x,\frac{T}{2})-q(x)\bar{w}(x,\frac{T}{2})=\bar{w}_{tt}(x,\frac{T}{2})=f(x)R_t(x,\frac{T}{2})\end{equation} Since $|R_t(x,\frac{T}{2})|\geq r_1>0$ from $\eqref{crucialR}$, therefore we have $|f(x)|\leq C|\bar{w}_{tt}(x,\frac{T}{2})|$ and hence we have the following estimates on $\displaystyle\int_Qe^{2\tau\varphi}|f|^2dQ$: \begin{equation}\label{mainineq} \begin{equation}gin{split} \int_Qe^{2\tau\varphi}|f|^2dQ& =\int_0^T\int_{\Omega}e^{2\tau\varphi(x,t)}|f(x)|^2d\Omega dt \\ & \leq C\int_0^T\int_{\Omega}e^{2\tau\varphi(x,t)}|\bar{w}_{tt}(x,\frac{T}{2})|^2d\Omega dt \\ & \leq C\int_{\Omega}e^{2\tau\varphi(x,\frac{T}{2})}|\bar{w}_{tt}(x,\frac{T}{2})|^2d\Omega \\ & = C\left(\int_{\Omega}\int_0^{\frac{T}{2}} \frac{d}{ds}(e^{2\tau\varphi(x,s)}|\bar{w}_{tt}(x,s)|^2)ds d\Omega+\int_{\Omega}e^{2\tau\varphi(x,0)}|\bar{w}_{tt}(x,0)|^2d\Omega \right) \\ & = C\left(4c\tau\int_{\Omega}\int_0^{\frac{T}{2}}(\frac{T}{2}-s)e^{2\tau\varphi(x,s)}|\bar{w}_{tt}(x,s)|^2dsd\Omega \right. \\ & \quad + \left.2\int_{\Omega}\int_0^{\frac{T}{2}}e^{2\tau\varphi}|\bar{w}_{tt}(x,s)||\bar{w}_{ttt}(x,s)|dsd\Omega+\int_{\Omega}e^{2\tau\varphi(x,0)}|\bar{w}_{tt}(x,0)|^2d\Omega\right) \\ & \leq C\left(\tau\int_{\Omega}\int^{\frac{T}{2}}_0e^{2\tau\varphi}|\bar{w}_{tt}|^2 dt d\Omega + \int_{\Omega}\int^{\frac{T}{2}}_0 e^{2\tau\varphi}(|\bar{w}_{tt}|^2+|\bar{w}_{ttt}|)^2dt d\Omega \right. \\ & \quad + \left.\int_{\Omega}|\bar{w}_{tt}(x,0)|^2d\Omega \right) \\ & \leq C\left(\tau\int_Q e^{2\tau\varphi}|\bar{w}_{tt}|^2dQ+\int_Qe^{2\tau\varphi}(|\bar{w}_{tt}|^2+|\bar{w}_{ttt}|^2)dQ\right) \\ & = C\left((\tau+1)\int_Q e^{2\tau\varphi}|\bar{w}_{tt}|^2dQ+\int_Qe^{2\tau\varphi}|\bar{w}_{ttt}|^2dQ\right) \end{split} \end{equation} where in the above estimates we use the definition $\eqref{defphi}$ and the property $\eqref{propertya}$ of $\varphi$ as well as Cauthy-Schwartz inequality. Collecting $\eqref{mainineq}$ with $(\ref{ineq5})$, we have \begin{equation}gin{multline}\label{ineq6} C_{1,\tau}\int_Qe^{2\tau\varphi}[\bar{w}_t^2+\bar{w}_{tt}^2+\bar{w}_{ttt}^2+|\nabla \bar{w}|^2+|\nabla \bar{w}_t|^2+|\nabla \bar{w}_{tt}|^2]dQ \\+ C_{2,\tau}\int_{Q(\sigma)}e^{2\tau\varphi}[\bar{w}^2+\bar{w}_t^2+\bar{w}_{tt}^2]dxdt \leq C\left((\tau+1)\int_Q e^{2\tau\varphi}|\bar{w}_{tt}|^2dQ+\int_Qe^{2\tau\varphi}|\bar{w}_{ttt}|^2dQ+e^{2\tau\sigma}\right) \end{multline} Note that in $(\ref{ineq6})$, the right hand side term $C\int_Qe^{2\tau\varphi}|\bar{w}_{ttt}|^2dQ$ can be absorbed by the term $C_{1,\tau}\int_Qe^{2\tau\varphi}[\bar{w}_t^2+\bar{w}_{tt}^2+\bar{w}_{ttt}^2]dQ$ on the left hand side when $\tau$ is large enough. In addition, since $e^{2\tau\varphi}<e^{2\tau\sigma}$ on $Q\setminus Q(\sigma)$ by the definition of $Q(\sigma)$, we have \begin{equation}\label{absorb} \begin{equation}gin{split} C(\tau+1)\int_Q e^{2\tau\varphi}|\bar{w}_{tt}|^2dQ & =C(\tau+1)\left(\int_{Q(\sigma)}e^{2\tau\varphi}|\bar{w}_{tt}|^2dtdx+\int_{Q\setminus Q(\sigma)}e^{2\tau\varphi}|\bar{w}_{tt}|^2dxdt\right) \\ & \leq C(\tau+1)\left(\int_{Q(\sigma)}e^{2\tau\varphi}|\bar{w}_{tt}|^2dtdx+e^{2\tau\sigma}\int_{Q\setminus Q(\sigma)}|\bar{w}_{tt}|^2dxdt\right) \\ & \leq C(\tau+1)\int_{Q(\sigma)}e^{2\tau\varphi}|\bar{w}_{tt}|^2dtdx+C(\tau+1)e^{2\tau\sigma} \end{split} \end{equation} Again $C(\tau+1)\int_{Q(\sigma)}e^{2\tau\varphi}|\bar{w}_{tt}|^2dtdx$ on the right hand side of $\eqref{absorb}$ can be absorbed by $C_{2,\tau}\int_{Q(\sigma)}e^{2\tau\varphi}[\bar{w}^2+\bar{w}_t^2+\bar{w}_{tt}^2]dxdt$ on the left hand side of $(\ref{ineq6})$ when taking $\tau$ large enough. Therefore $(\ref{ineq6})$ becomes to \begin{equation}gin{multline}\label{ineq7} C_{1,\tau}^{'}\int_Qe^{2\tau\varphi}[\bar{w}_t^2+\bar{w}_{tt}^2+\bar{w}_{ttt}^2+|\nabla \bar{w}|^2+|\nabla \bar{w}_t|^2+|\nabla \bar{w}_{tt}|^2]dQ \\+ C_{2,\tau}^{'}\int_{Q(\sigma)}e^{2\tau\varphi}[\bar{w}^2+\bar{w}_t^2+\bar{w}_{tt}^2]dxdt \leq C\left((\tau+1)e^{2\tau\sigma}+e^{2\tau\sigma}+\tau^3e^{-2\tau\delta}\right) \end{multline} Where we have \begin{equation}\label{ctauprime} C_{1,\tau}^{'}=\tau\epsilon\rho-C, \quad C_{2,\tau}^{'}=2\tau^3\begin{equation}ta+\mathcal{O}(\tau^2) \end{equation} Now we take $\tau$ sufficiently large such that $C_{1,\tau}^{'}>0$, $C_{2,\tau}^{'}>0$. Then in $(\ref{ineq7})$ we can drop the first term on the left hand side and get \begin{equation}\label{ineq8} \begin{equation}gin{split} C_{2,\tau}^{'}\int_{Q(\sigma)}e^{2\tau\varphi}[\bar{w}^2+\bar{w}_t^2+\bar{w}_{tt}^2]dxdt& \leq C[(\tau+1)e^{2\tau\sigma}+e^{2\tau\sigma}] \\ & \leq C(\tau+2)e^{2\tau\sigma} \end{split} \end{equation} Note again from $\eqref{qsigma}$ the definition of $Q(\sigma)$, we have $e^{2\tau\varphi}\geq e^{2\tau\sigma}$ on $Q(\sigma)$, therefore $(\ref{ineq8})$ implies \begin{equation}\label{ineq9} C_{2,\tau}^{'}\int_{Q(\sigma)}[\bar{w}^2+\bar{w}_t^2+\bar{w}_{tt}^2]dxdt \leq C(\tau+2) \end{equation} Divide $\tau+2$ on both sides of $\eqref{ineq9}$, we get \begin{equation}\label{ineq10} \frac{C_{2,\tau}^{'}}{\tau+2}\int_{Q(\sigma)}[\bar{w}^2+\bar{w}_t^2+\bar{w}_{tt}^2]dxdt \leq C \end{equation} By $\eqref{ctauprime}$, $\frac{C_{2,\tau}^{'}}{\tau+2}\to\infty$ as $\displaystyle\tau\to\infty$, thus $\eqref{ineq10}$ implies that we must have $\bar{w}\equiv0$ on $Q(\sigma)$ and hence we have \begin{equation}\label{identity} f(x)R_t(x,t)=\bar{w}_{tt}(x,t)-\Delta \bar{w}(x,t)-q(x)\bar{w}(x,t)=0, \quad (x,t)\in Q(\sigma) \end{equation} Recall again that $|R_t(x,\frac{T}{2})|\geq r_1>0$ from$\eqref{crucialR}$ and the property that $Q\supset Q(\sigma)\supset[t_0,t_1]\times\Omega$ from $\eqref{qsigmaproperty}$. Thus we have from $\eqref{identity}$ that $f(x)\equiv0$, for all $x\in\Omega$. $\qquad\Box$ \section{Proof of Theorem \ref{th2}} Setting $f(x)=q(x)-p(x)$, $w(x,t)=z(q)(x,t)-z(p)(x,t)$, $u(x,t)=v(q)(x,t)-v(p)(x,t)$ and $R(x,t)=z(p)(x,t)$, we then obtain $\eqref{linear}$ after the subtraction of $\eqref{nonlinear}$ with $p$ from $\eqref{nonlinear}$ with $q$. Since $R(x,\frac{T}{2})=z(p)(x,\frac{T}{2})=z_0(x)$ and $R_t(x,\frac{T}{2})=z_t(p)(x,\frac{T}{2})=z_1(x)$, the conditions $\eqref{crucialz}$ imply $\eqref{crucialR}$. In addition, the condition $v(q)(x,t)=v(p)(x,t)$, $x\in\Gamma_0$, $t\in[0,T]$ implies that $u(x,t)=0$ on $\Gamma_0\times[0,T]$ and $\eqref{differenceh2}$ implies $\eqref{h2reg}$. Therefore from the above Theorem \ref{th1} we conclude $f(x)=q(x)-p(x)=0$, i.e., $q(x)=p(x)$, $x\in\Omega$. $\Box$ \section{Proof of Theorem \ref{th3}} In relation with this system $\eqref{lineary}$, we define $\psi$ which satisfies the following equation \begin{equation}gin{equation}\label{eqpsi} \begin{equation}gin{cases} \psi_{tt}(x,t) = \Delta \psi(x,t) + q(x)\psi(x,t) & \mbox{in } \Omega \times [0,T] \\ \frac{\partial\psi}{\partial\nu}(x,t) = 0 & \mbox{on } \Gamma_1\times[0,T] \\ \frac{\partial \psi}{\partial \nu}(x,t) = u_{tt}(x,t) & \mbox{on } \Gamma_0\times[0,T] \\ \psi(\cdot,\frac{T}{2}) = 0 & \mbox{in } \Omega \\ \psi_t(\cdot,\frac{T}{2}) = f(x)R(x,\frac{T}{2}) & \mbox{in } \Omega \end{cases} \end{equation} Set $y=\bar{w}-\psi$, then we have $y$ satisfies the following initial-boundary value problem \begin{equation}gin{equation}\label{eqy} \begin{equation}gin{cases} y_{tt}(x,t) - \Delta y(x,t) - q(x)y(x,t) = f(x)R_t(x,t) & \mbox{in } \Omega \times [0,T] \\ \frac{\partial y}{\partial\nu}(x,t) = 0 & \mbox{on } \Gamma\times[0,T] \\ y(\cdot,\frac{T}{2}) = 0 & \mbox{in } \Omega \\ y_t(\cdot,\frac{T}{2}) = 0 & \mbox{in } \Omega \\ \end{cases} \end{equation} It is easy to see that both $\eqref{eqpsi}$ and $\eqref{eqy}$ are well-posed. For the system $\eqref{eqpsi}$, we apply the continuous observability inequality in Theorem \ref{theoremobserve} to get \begin{equation}\label{ineqfr} \|fR(\cdot,\frac{T}{2})\|^2_{L^2(\Omega)}\leq C\left(\|\psi\|^2_{L^2(\Gamma_0\times[0,T])}+\|\psi_t\|^2_{L^2(\Gamma_0\times[0,T])}+\|\frac{\partial\psi}{\partial\nu}\|^2_{L^2(\Gamma_0\times[0,T])}\right)\end{equation} Notice that $|R(x,\frac{T}{2})|\geq r_0>0$, $\frac{\partial \psi}{\partial \nu}(x,t) = u_{tt}(x,t)$ on $\Gamma_0\times[0,T]$ and $\frac{\partial \psi}{\partial \nu}(x,t) = 0$ on $\Gamma_1\times[0,T]$, therefore we have from $\eqref{ineqfr}$ \begin{equation}\label{ineqfr1} \|f\|_{L^2(\Omega)}\leq C\left(\|\psi\|_{L^2(\Gamma_0\times[0,T])}+\|\psi_t\|_{L^2(\Gamma_0\times[0,T])}+\|u_{tt}\|_{L^2(\Gamma_0\times[0,T])}\right)\end{equation} On the other hand, for the system $\eqref{eqy}$, we have the following lemma: \begin{equation}gin{lemma} Let $q\in L^{\infty}(\Omega)$ and $R(x,t)$ satisfies $R_t\in H^{\frac{1}{2}+\epsilon}(0,T;L^{\infty}(\Omega))$ for some $0<\epsilon<\frac{1}{2}$ as in Theorem \ref{th3}. If we define the operators $K$ and $K_1$ by $K, K_1: L^2(\Omega)\rightarrow L^2(\Gamma_0\times[0,T])$, such that \begin{equation}\label{operator} (Kf)(x,t)=y(x,t), \quad (K_1f)(x,t)=y_t(x,t), \quad x\in\Gamma_0,t\in[0,T]\end{equation} where $y$ is the unique solution of the equation $\eqref{eqy}$. Then $K$ and $K_1$ are both compact operators. \end{lemma} \begin{equation}gin{proof} It suffices to just show that $K_1$ is compact, then it follows similarly that $K$ is also compact. Since $f\in L^2(\Omega)$ and $R_t\in H^{\frac{1}{2}+\epsilon}(0,T;L^{\infty}(\Omega))$, we have \begin{equation}\label{regfRt} fR_t\in H^{\frac{1}{2}+\epsilon}(0,T;L^2(\Omega))\end{equation} Therefore we have the solution $y$ satisfies (e.g. Corollary 5.3 in \cite{l-t.4}) \begin{equation}\label{sharpy} y\in C([0,T];H^{\frac{3}{2}+\epsilon}(\Omega)), \quad y_t\in C([0,T];H^{\frac{1}{2}+\epsilon}(\Omega)) \end{equation} Hence by $\eqref{regfRt}$, $q\in L^{\infty}(\Omega)$ and $y_{tt}=\Delta y+q(x)y+fR_t$ we can get \begin{equation}\label{regytt} y_{tt}\in L^2(0,T;H^{-\frac{1}{2}+\epsilon}(\Omega)) \end{equation} In addition, by $\eqref{sharpy}$ and trace theorem we have $y_t\in C([0,T];H^{\epsilon}(\Gamma))$. Since the embedding $H^{\epsilon}(\Gamma)\to L^2(\Gamma)$ is compact, we have by Lions-Aubin's compactness criterion (e.g. Proposition III.1.3 in \cite{s}) that the operator $K_1$ is a compact operator. \end{proof} Now we have that the inequality $\eqref{ineqfr1}$ becomes to \begin{equation}\label{ineq11} \begin{equation}gin{split} \|f\|_{L^2(\Omega)} & \leq C\left(\|\psi\|_{L^2(\Gamma_0\times[0,T])}+\|\psi_t\|_{L^2(\Gamma_0\times[0,T])}+\|u_{tt}\|_{L^2(\Gamma_0\times[0,T])}\right) \\ & \leq C\left(\|\bar{w}-y\|_{L^2(\Gamma_0\times[0,T])}+\|\bar{w}_t-y_t\|_{L^2(\Gamma_0\times[0,T])}+\|u_{tt}\|_{L^2(\Gamma_0\times[0,T])}\right) \\ & \leq C\left(\|\bar{w}\|_{L^2(\Gamma_0\times[0,T])}+\|\bar{w}_t\|_{L^2(\Gamma_0\times[0,T])}+\|u_{tt}\|_{L^2(\Gamma_0\times[0,T])}\right) \\ & \qquad +C\|y\|_{L^2(\Gamma_0\times[0,T])}+C\|y_t\|_{L^2(\Gamma_0\times[0,T])} \\ & = C\left(\|\bar{w}\|_{L^2(\Gamma_0\times[0,T])}+\|\bar{w}_t\|_{L^2(\Gamma_0\times[0,T])}+\|u_{tt}\|_{L^2(\Gamma_0\times[0,T])}\right) \\ & \qquad +C\|Kf\|_{L^2(\Gamma_0\times[0,T])}+C\|K_1f\|_{L^2(\Gamma_0\times[0,T])} \\ & \leq C\left(\|u_{tt}\|_{L^2(\Gamma_0\times[0,T])}+\|u_{ttt}\|_{L^2(\Gamma_0\times[0,T])}+\|\Delta^2u_{tt}\|_{L^2(\Gamma_0\times[0,T])}\right) \\ & \qquad +C\|Kf\|_{L^2(\Gamma_0\times[0,T])}+C\|K_1f\|_{L^2(\Gamma_0\times[0,T])} \end{split} \end{equation} where in the last step we use the coupling $\bar{w}(x,t) = -u_{tt}(x,t)-\Delta^2 u(x,t)-\Delta^2 u_t(x,t)$ on $\Gamma_0\times[0,T]$ from $\eqref{lineary}$ and again the initial conditions $u(\cdot,\frac{T}{2})=u_t(\cdot,\frac{T}{2})=0$ on $\Gamma_0\times[0,T]$ so that by the fundamental theorem of calculus, we have \begin{equation}\label{poincare} \|u\|_{L^2(\Gamma_0\times[0,T])}\leq C\|u_t\|_{L^2(\Gamma_0\times[0,T])}\leq C\|u_{tt}\|_{L^2(\Gamma_0\times[0,T])} \end{equation} To complete the proof, we need to absorb the last two terms in $\eqref{ineq11}$. To achieve that, we apply the compactness-uniqueness argument. For simplicity we denote \begin{equation}gin{equation*} \|u\|_X=\|u_{tt}\|_{L^2(\Gamma_0\times[0,T])}+\|u_{ttt}\|_{L^2(\Gamma_0\times[0,T])} +\|\Delta^2u_{tt}\|_{L^2(\Gamma_0\times[0,T])} \end{equation*} Suppose contrarily that the inequality $\eqref{stability}$ does not hold. Then there exists $f_n\in L^2(\Omega)$, $n\geq 1$ such that \begin{equation}\label{contrary1} \|f_n\|_{L^2(\Omega)}=1, \quad n\geq 1 \end{equation} and \begin{equation}\label{contrary2} \lim_{n\to\infty}\|u(f_n)\|_{X}=0 \end{equation} From $\eqref{contrary1}$, there exists a subsequence, denoted again by $\{f_n\}_{n\geq1}$ such that $f_n$ converges to some $f_0\in L^2(\Omega)$ weakly in $L^2(\Omega)$. Moreover, since $K$ and $K_1$ are compact, we have \begin{equation}\label{compactidentity} \lim_{m,n\to\infty}\|Kf_n-Kf_m\|_{L^2(\Gamma_0\times[0,T])}=0, \quad \lim_{m,n\to\infty}\|K_1f_n-K_1f_m\|_{L^2(\Gamma_0\times[0,T])}=0 \end{equation} On the other hand, it follows from $\eqref{ineq11}$ that \begin{equation}\label{cauthyf} \begin{equation}gin{split} \|f_n-f_m\|_{L^2(\Omega)}& \leq C\|u(f_n)-u(f_m)\|_{X}+C\|Kf_n-Kf_m\|_{L^2(\Gamma_0\times[0,T])} \\ & \qquad +C\|K_1f_n-K_1f_m\|_{L^2(\Gamma_0\times[0,T])} \\ & \leq C\|u(f_n)\|_{X}+C\|u(f_m)\|_{X}+C\|Kf_n-Kf_m\|_{L^2(\Gamma_0\times[0,T])} \\ & \qquad +C\|K_1f_n-K_1f_m\|_{L^2(\Gamma_0\times[0,T])} \end{split} \end{equation} Thus by $\eqref{contrary2}$ and $\eqref{compactidentity}$, we have that \begin{equation}\label{iden1} \lim_{m,n\to\infty}\|f_n-f_m\|_{L^2(\Omega)}=0 \end{equation} and hence $f_n$ converges strongly to $f_0$ in $L^2(\Omega)$. So by $\eqref{contrary1}$ we obtain \begin{equation}\label{iden3} \|f_0\|_{L^2(\Omega)}=1 \end{equation} On the other hand, by $\eqref{crucialR}$ and a usual a-priori estimate, we have that \begin{equation} \begin{equation}gin{split} \|\bar{w}(f)\|_{C([0,T];H^1(\Omega))}+\|\bar{w}_t(f)\|_{C([0,T];L^2(\Omega))} & \leq C\|fR_t\|_{L^1(0,T;L^2(\Omega))} \\ & \leq C\|R_t\|_{L^1(0,T;L^{\infty}(\Omega))}\|f\|_{L^2(\Omega)} \end{split} \end{equation} Hence trace theorem implies that \begin{equation}\label{traceineq} \|\bar{w}(f)\|_{L^2(\Gamma_0\times[0,T])}\leq C\|f\|_{L^2(\Omega)} \end{equation} where $C>0$ depends on $\|R_t\|_{L^1(0,T;L^{\infty}(\Omega))}$. Therefore by $\eqref{traceineq}$ we have \begin{equation}\label{ineq12} \lim_{n\to\infty}\|\bar{w}(f_n)-\bar{w}(f_0)\|_{L^2(\Gamma_0\times[0,T])}\leq C\lim_{n\to\infty}\|f_n-f_0\|_{L^2(\Omega)}=0 \end{equation} Moreover, by $\eqref{contrary2}$ and the coupling $\bar{w}(x,t) = -u_{tt}(x,t)-\Delta^2 u(x,t)-\Delta^2 u_t(x,t)$ on $\Gamma_0\times[0,T]$, we have \begin{equation}\label{iden4} \lim_{n\to\infty}\|\bar{w}(f_n)\|_{L^2(\Gamma_0\times[0,T])}\leq \lim_{n\to\infty}\|u\|_X=0 \end{equation} Thus by $\eqref{ineq12}$ and $\eqref{iden4}$, we obtain \begin{equation}\label{last} \bar{w}(f_0)(x,t)=0, \quad x\in\Gamma_0, t\in[0,T] \end{equation} Therefore from $\eqref{lineary}$ we have $u=u(f_0)$ satisfies the initial boundary problem: \begin{equation}gin{equation}\label{equationu} \begin{equation}gin{cases} -u_{tt}(x,t)-\Delta^2 u(x,t)-\Delta^2 u_t(x,t) = 0 & \mbox{in } \Gamma_0\times[0,T] \\ u(x,t)=\frac{\partial u}{\partial \nu}(x,t)=0 & \mbox{on } \partial\Gamma_0\times[0,T] \\ u(\cdot,\frac{T}{2}) = 0 & \mbox{in } \Gamma_0 \\ u_t(\cdot,\frac{T}{2}) = 0 & \mbox{in } \Gamma_0 \end{cases} \end{equation} which has only zero solution, namely, we have $u(f_0)(x,t)=0, x\in\Gamma_0, t\in [0,T]$. Therefore by the uniqueness theorem \ref{th1}, we have $f_0\equiv0$ in $\Omega$ which contradicts with $\eqref{iden3}$. Thus we must have \begin{equation}gin{equation} \|f\|_{L^2(\Omega)}\leq C\left(\|u_{tt}\|_{L^2(\Gamma_0\times[0,T])}+\|u_{ttt}\|_{L^2(\Gamma_0\times[0,T])}+\|\Delta^2u_{tt}\|_{L^2(\Gamma_0\times[0,T])}\right) \end{equation} and the proof of the theorem is complete. $\Box$ \section{Proof of Theorem \ref{th4}} We now go back to the original system $\eqref{nonlinear}$. \textbf{Case 1: n=2}. Let $\bar{z}=z_t$ and $\bar{v}=v_t$, then the system $\eqref{nonlinear}$ becomes to \begin{equation}gin{equation}\label{nonlinear2} \begin{equation}gin{cases} \bar{z}_{tt}(x,t) = \Delta \bar{z}(x,t) + q(x)\bar{z}(x,t) & \mbox{in } \Omega \times [0,T] \\ \frac{\partial \bar{z}}{\partial\nu}(x,t) = 0 & \mbox{on } \Gamma_1\times[0,T] \\ \bar{z}_t(x,t) = -\bar{v}_{tt}(x,t)-\Delta^2 \bar{v}(x,t)-\Delta^2 \bar{v}_t(x,t) & \mbox{on } \Gamma_0\times[0,T] \\ \bar{v}(x,t)=\frac{\partial \bar{v}}{\partial \nu}(x,t)=0 & \mbox{on } \partial\Gamma_0\times[0,T] \\ \frac{\partial \bar{z}}{\partial \nu}(x,t)=\bar{v}_t(x,t) & \mbox{on } \Gamma_0\times[0,T] \\ \bar{z}(\cdot,\frac{T}{2}) = z_1(x) & \mbox{in } \Omega \\ \bar{z}_t(\cdot,\frac{T}{2}) = \Delta z_0(x)+q(x)z_0 & \mbox{in } \Omega \\ \bar{v}(\cdot,\frac{T}{2}) = v_1(x) & \mbox{on } \Gamma_0 \\ \bar{v}_t(\cdot,\frac{T}{2}) = -z_1(x)-\Delta^2 v_0(x)-\Delta^2 v_1(x) & \mbox{on } \Gamma_0 \end{cases} \end{equation} By using the similar operator setting as in section 1.2 and notice the new initial conditions, we can compute the domain of the operator $\mathcal{A}^2$: \begin{equation}gin{equation}\label{DomA2} \begin{equation}gin{split} D(\mathcal{A}^2)& =\{[z_0,z_1,v_0,v_1]^T:(z_1,(-A_N+q)z_0+Bv_1,v_1,-\textbf{\AA}(v_0+v_1)-B^{*}z_1\in D(\mathcal{A})\} \\ & =\{[z_0,z_1,v_0,v_1]^T: z_1\in H^2_{\Gamma_1}(\Omega), (-A_N+q)z_0+Bv_1\in H^1_{\Gamma_1}(\Omega), v_0\in H^2_0(\Gamma_0), \\ & \qquad v_1\in H^2_0(\Gamma_0), \textbf{\AA}(v_0+v_1)+B^{*}z_1\in H^2_0(\Gamma_0), \\ & \qquad v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1\in D(\textbf{\AA}), \frac{\partial z_1}{\partial\nu}|_{\Gamma_0}=-\textbf{\AA}(v_0+v_1)-B^{*}z_1\} \\ & =\{[z_0,z_1,v_0,v_1]^T: z_1\in H^2_{\Gamma_1}(\Omega),(\Delta+q)z_0\in H^1_{\Gamma_1}(\Omega), \frac{\partial z_0}{\partial\nu}|_{\Gamma_0}=v_1, \\ & \qquad v_0\in H^2_0(\Gamma_0), v_1\in H^2_0(\Gamma_0),\textbf{\AA}(v_0+v_1)+B^{*}z_1\in H^2_0(\Gamma_0), \\ & \qquad v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1\in D(\textbf{\AA}), \frac{\partial z_1}{\partial\nu}|_{\Gamma_0}=-\textbf{\AA}(v_0+v_1)-B^{*}z_1\} \\ & =\{[z_0,z_1,v_0,v_1]^T: z_0\in H^3_{\Gamma_1}(\Omega),z_1\in H^2_{\Gamma_1}(\Omega),v_0\in H^2_0(\Gamma_0), v_1\in H^2_0(\Gamma_0), \\ & \qquad\textbf{\AA}(v_0+v_1)+B^{*}z_1\in H^2_0(\Gamma_0), v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1\in D(\textbf{\AA}), \\ & \qquad\frac{\partial z_0}{\partial\nu}|_{\Gamma_0}=v_1, \frac{\partial z_1}{\partial\nu}|_{\Gamma_0}=-\textbf{\AA}(v_0+v_1)-B^{*}z_1\} \end{split} \end{equation} where in the last step $z_0\in H^3_{\Gamma_1}(\Omega)$ is from elliptic theory when provided that $q(x)\in W^{1,\infty}(\Omega)$. Therefore when $z_0\in H^3_{\Gamma_1}(\Omega)$, $z_1\in H^2_{\Gamma_1}(\Omega)$, $v_0\in H^2_0(\Gamma_0)$, $v_1\in H^2_0(\Gamma_0)$ with compatible conditions as in $D(\mathcal{A}^2)$ and $q\in W^{1,\infty}(\Omega)$, then from semigroup theory we have that the solution of $\eqref{nonlinear2}$ satisfies \begin{equation}\label{barztztt} \bar{z}_t\in C([0,T];H^1(\Omega)), \quad \bar{z}_{tt}\in C([0,T];L^2(\Omega)) \end{equation} Hence we have on the one hand \begin{equation}\label{regzt11} z_t\in H^1(0,T;H^1(\Omega)) \end{equation} On the other hand, from $\eqref{barztztt}$ and $\bar{z}_{tt}(x,t) = \Delta \bar{z}(x,t) + q(x)\bar{z}(x,t)$, we have by elliptic theory that \begin{equation}\label{regzt02} z_t=\bar{z}\in L^2(0,T;H^2(\Omega)) \end{equation} Interpolate between $\eqref{regzt11}$ and $\eqref{regzt02}$, we have for $0<\epsilon<\frac{1}{2}$, \begin{equation} z_t\in H^{\frac{1}{2}+\epsilon}(0,T;H^{\frac{3}{2}-\epsilon}(\Omega))\subset H^{\frac{1}{2}+\epsilon}(0,T;L^{\infty}(\Omega)) \end{equation} where the inclusion is by Sobolev embedding theorem. \textbf{Case 2: n=3}. We let $\bar{\bar{z}}=z_{tt}$, $\bar{\bar{v}}=v_{tt}$, then we have $\bar{\bar{z}}$, $\bar{\bar{v}}$ satisfy \begin{equation}gin{equation}\label{nonlinear3} \begin{equation}gin{cases} \bar{\bar{z}}_{tt}(x,t) = \Delta \bar{\bar{z}}(x,t) + q(x)\bar{\bar{z}}(x,t) & \mbox{in } \Omega \times [0,T] \\ \frac{\partial \bar{\bar{z}}}{\partial\nu}(x,t) = 0 & \mbox{on } \Gamma_1\times[0,T] \\ \bar{\bar{z}}_t(x,t) = -\bar{\bar{v}}_{tt}(x,t)-\Delta^2 \bar{\bar{v}}(x,t)-\Delta^2 \bar{\bar{v}}_t(x,t) & \mbox{on } \Gamma_0\times[0,T] \\ \bar{\bar{v}}(x,t)=\frac{\partial \bar{\bar{v}}}{\partial \nu}(x,t)=0 & \mbox{on } \partial\Gamma_0\times[0,T] \\ \frac{\partial \bar{\bar{z}}}{\partial \nu}(x,t)=\bar{\bar{v}}_t(x,t) & \mbox{on } \Gamma_0\times[0,T] \\ \bar{\bar{z}}(\cdot,\frac{T}{2}) = \Delta z_0(x)+q(x)z_0 & \mbox{in } \Omega \\ \bar{\bar{z}}_t(\cdot,\frac{T}{2}) = \Delta z_1(x)+q(x)z_1 & \mbox{in } \Omega \\ \bar{\bar{v}}(\cdot,\frac{T}{2}) = -z_1(x)-\Delta^2 v_0(x)-\Delta^2 v_1(x) & \mbox{on } \Gamma_0 \\ \bar{\bar{v}}_t(\cdot,\frac{T}{2}) = -\Delta z_0(x)-q(x)z_0(x)-\Delta^2 v_1(x) \\ \hspace*{1.0 in} +\Delta^2 z_1(x)+\Delta^4 v_0(x)+\Delta^4 v_1(x) & \mbox{on } \Gamma_0 \end{cases} \end{equation} Then still using the similarly operator setting as before we can compute the domain of $\mathcal{A}^3$: \begin{equation}gin{equation}\label{DomA3} \begin{equation}gin{split} D(\mathcal{A}^3)& =\{[z_0,z_1,v_0,v_1]^T:(z_1,(-A_N+q)z_0+Bv_1,v_1,-\textbf{\AA}(v_0+v_1)-B^{*}z_1\in D(\mathcal{A}^2)\} \\ & =\{[z_0,z_1,v_0,v_1]^T: z_1\in H^3_{\Gamma_1}(\Omega), (\Delta+q)z_0\in H^2_{\Gamma_1}(\Omega), \frac{\partial z_0}{\partial\nu}|_{\Gamma_0}=v_1, \\ & v_0\in H^2_0(\Gamma_0), v_1\in H^2_0(\Gamma_0), \textbf{\AA}(v_0+v_1)+B^{*}z_1\in H^2_0(\Gamma_0), \\ & \textbf{\AA}(v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1)+B^{*}[(-A_N+q)z_0+Bv_1]\in H^2_0(\Gamma_0), \\ & \textbf{\AA}(v_0+v_1)+B^{*}z_1+\textbf{\AA}[v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1]+B^{*}[(-A_N+q)z_0+Bv_1]\in D(\textbf{\AA}) \\ & \frac{\partial z_1}{\partial\nu}|_{\Gamma_0}=-\textbf{\AA}(v_0+v_1)-B^{*}z_1, \frac{\partial [(-A_N+q)z_0+Bv_1]}{\partial\nu}|_{\Gamma_0}= \\ & -\textbf{\AA}[v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1]-B^{*}[(-A_N+q)z_0+Bv_1]\} \\ & =\{[z_0,z_1,v_0,v_1]^T: z_0\in H^{\frac{7}{2}}_{\Gamma_1}(\Omega), z_1\in H^3_{\Gamma_1}(\Omega), v_0\in H^2_0(\Gamma_0), v_1\in H^2_0(\Gamma_0), \\ & \textbf{\AA}(v_0+v_1)+B^{*}z_1\in H^2_0(\Gamma_0), \frac{\partial z_0}{\partial\nu}|_{\Gamma_0}=v_1, \frac{\partial z_1}{\partial\nu}|_{\Gamma_0}=-\textbf{\AA}(v_0+v_1)-B^{*}z_1 \ \text{on} \ \Gamma_0, \\ & \textbf{\AA}(v_0+v_1)+B^{*}z_1+\textbf{\AA}[v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1]+B^{*}[(-A_N+q)z_0+Bv_1]\in D(\textbf{\AA}) \\ & \textbf{\AA}(v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1)+B^{*}[(-A_N+q)z_0+Bv_1]\in H^2_0(\Gamma_0), \\ & \frac{\partial [(-A_N+q)z_0+Bv_1]}{\partial\nu}|_{\Gamma_0}=-\textbf{\AA}[v_1-\textbf{\AA}(v_0+v_1)-B^{*}z_1]-B^{*}[(-A_N+q)z_0+Bv_1]\} \end{split} \end{equation} where in the last step $z_0\in H^{\frac{7}{2}}_{\Gamma_1}(\Omega)$ is from trace theory of solving $\frac{\partial z_0}{\partial\nu}|_{\Gamma_0}=v_1\in H^{2}(\Gamma_0)$. Therefore when $z_0\in H^{\frac{7}{2}}_{\Gamma_1}(\Omega)$, $z_1\in H^3_{\Gamma_1}(\Omega)$, $v_0\in H^2_0(\Gamma_0)$, $v_1\in H^2_0(\Gamma_0)$ with compatible conditions as in $D(\mathcal{A}^3)$ and $q\in W^{2,\infty}(\Omega)$, then from semigroup theory we have that the solution of $\eqref{nonlinear2}$ satisfies \begin{equation}\label{barbarztztt} \bar{\bar{z}}_t\in C([0,T];H^1(\Omega)), \quad \bar{\bar{z}}_{tt}\in C([0,T];L^2(\Omega)) \end{equation} Hence we have on the one hand \begin{equation}\label{regzt21} z_t\in H^2(0,T;H^1(\Omega)) \end{equation} On the other hand, from $\eqref{barbarztztt}$ and $\bar{\bar{z}}_{tt}(x,t) = \Delta \bar{\bar{z}}(x,t) + q(x)\bar{\bar{z}}(x,t)$, we have by elliptic theory that \begin{equation} z_{tt}=\bar{\bar{z}}\in L^2(0,T;H^2(\Omega)) \end{equation} which implies \begin{equation}\label{regzt12} z_t\in H^1(0,T;H^2(\Omega)) \end{equation} Now interpolate between $\eqref{regzt21}$ and $\eqref{regzt12}$, we have for $0<\epsilon<\frac{1}{2}$, \begin{equation} z_t\in H^{\frac{3}{2}}(0,T;H^{\frac{3}{2}}(\Omega))\subset H^{\frac{1}{2}+\epsilon}(0,T;L^{\infty}(\Omega)) \end{equation} where the inclusion is again by Sobolev embedding. Hence in either case $n=2$ or $n=3$, under the assumptions on the initial data $[z_0,z_1,v_0,v_1]$ and $q(x)$, $p(x)$ in Theorem \ref{th4}, we have that $z_t\in H^{\frac{1}{2}+\epsilon}(0,T;L^{\infty}(\Omega))$. Thus when we again set $f(x)=q(x)-p(x)$, $w(x,t)=z(q)(x,t)-z(p)(x,t)$, $u(x,t)=v(q)(x,t)-v(p)(x,t)$ and $R(x,t)=z(p)(x,t)$ as in section 4, we obtain $\eqref{regRt}$ that $R_{t}\in H^{\frac{1}{2}+\epsilon}(0,T;L^{\infty}(\Omega))$ and hence all the assumptions in theorem \ref{th3} are satisfied. Therefore, we get the desired stability $\eqref{nonlinearstability}$ from the stability $\eqref{stability}$ of the linear inverse problem in Theorem \ref{th3}. $\Box$ \section{Concluding remark} As we mentioned at the beginning and the calculations of $D(\mathcal{A}^2)$ and $D(\mathcal{A}^3)$ show, the lack of compactness of the resolvent limits the space regularity of the solutions for the wave equation parts since we always have the elliptic problem for $z$ or $z_t$ such that $(\Delta+q)z\in L^2(\Omega)$ with $\frac{\partial z}{\partial\nu}\in H^2_0(\Gamma_0)$ provided $q$ in some suitable space. Therefore the best space regularity that $z$ could get is $2+\frac{3}{2}=\frac{7}{2}$ from elliptic and trace theory. As a result, our argument of the stability in the nonlinear inverse problem will only work for dimension up to $n=7$ as we need the Sobolev embedding $H^{\frac{n}{2}}(\Omega)\subset L^{\infty}(\Omega)$ in order to achieve the space regularity of $z_t$ in $L^{\infty}(\Omega)$ which is needed in the proof. \begin{equation}gin{thebibliography}{99} \bibitem{a.1} G. Avalos, The exponential stability of a coupled hyperbolic/parabolic system arising in structural acoustics, \emph{Appl. Abstr. Anal.} \textbf{1} (2), 203-217. \bibitem{a.2} G. Avalos, Exact-approximate boundary reachability of thermoelastic plates under variable thermal coupling, \emph{Inverse Problems}, \textbf{16} (2000), 979-996. \bibitem{a-l.1} G. Avalos and I. Lasiecka, Differential Riccati equation for the active control of a problem in structural acoustics, \emph{J. Optim. Theory. Appl.}, \textbf{91} (3) (2001), 695-728. \bibitem{a-l.2} G. Avalos and I. Lasiecka, The strong stability of a semigroup arising from a coupled hyperbolic/parabolic system, \emph{Semigroup Forum}, Vol. \textbf{57} (1998), 278-292. \bibitem{a-l.3} G. Avalos and I. Lasiecka, Exact controllability of structrual acoustic interactions, \emph{J. Math. Pures. Appl.}, \textbf{82} (2003), 1047-1073. \bibitem{a-l.4} G. Avalos and I. Lasiecka, Exact reachability of finite energy states for an acoustic wave/plate interaction under the influence of boundary and localized controls, \emph{IMA Preprint Series $\sharp$2017}, (January, 2005). \bibitem{a-l-r.1} G. Avalos, I. Lasiecka and R. Rebarber, Well-posedness of a structrual acoustics control model with point observation of the pressure, \emph{Journal of Differential Equations}, \textbf{173} (2001), 40-78. \bibitem{a-l-r.2} G. Avalos, I. Lasiecka and R. Rebarber, Boundary controllability of a coupled wave/Kirchoff system, \emph{System and Control Letters}, \textbf{50} (2003), 331-341. \bibitem{b-f-s-s} H. T. Banks, W. Fang, R. J. Silcox and R.C. Smith, Approximation methods for control of acoustic/structure models with piezoceramic actuators, \emph{Contract Report 189578 NASA} (1991). \bibitem{b-s} H. T. Banks and R.C. Smith, Feedback control of noise in a 2-D nonlinear structural acoustics model, \emph{Discrete and Continuous Dynamical Systems} Vol \textbf{1}, No. \textbf{1}, (1995), 119-149. \bibitem{b} A. Bukhgeim, \emph{Introduction to the Theory of Inverse Problems}, VSP, Utrecht, 2000. \bibitem{b-k} A. Bukhgeim and M. Klibanov, Global uniqueness of a class of multidimensional inverse problem, \emph{Sov. Math.-Dokl.} \textbf{24}(1981), 244-7. \bibitem{c} T. Carleman, Sur un probl\`{e}me d'unicit\'{e} pour les syst\`{e}mes d'\'{e}quations aux deriv\'{e}es partielles \`{a} deux variables independantes, \emph{Ark. Mat. Astr. Fys.}, \textbf{2B} (1939), 1-9. \bibitem{c-t} M. Camurdan and R. Triggiani, Sharp regularity of a coupled system of a wave and a Kirchoff plate with point control arising in noise reduction, \emph{Differential and Integral Equations}, \textbf{12} (1999), 101-107. \bibitem{is.1} V. Isakov, \emph{Inverse Source Problems}, American Mathematical Society, 2000. \bibitem{is.2} V. Isakov, \emph{Inverse Problems for Partial Differential Equations}, Second Edition, Springer, New York, 2006. \bibitem{is.3} V. Isakov, Uniqueness and stability in multi-dimensional inverse problems, \emph{Inverse Problems}, \textbf{9} (1993), 579-621. \bibitem{i-y} O. Imanuvilov and M. Yamamoto, Global Lipschitz stability in an inverse hyperbolic problem by interior observations, \emph{Inverse Problems}, \textbf{17}(2001), 717-728. \bibitem{is-y} V. Isakov and M. Yamamoto, Carleman estimate with the Neumann boundary condition and its application to the observability inequality and inverse hyperbolic problems, \emph{Contemp. Math.}, \textbf{268}(2000), 191-225. \bibitem{kh} A. Kha\u{i}darov, Carleman estimates and inverse problems for second order hyperbolic equations, \emph{Math. USSR Sbornik}, \textbf{58}(1987), 267-277. \bibitem{k.1} M. Klibanov, Inverse problems and Carleman estimates, \emph{Inverse Problems}, \textbf{8}(1992), 575-596. \bibitem{k.2} M. Klibanov, Carleman estimates and inverse problems in the last two decades, \emph{Surveys on Solutions Methods for Inverse Problems}, Springer, Wien, 2000, pp 119-146. \bibitem{l} I. Lasiecka, Mathematical Control Theory of Coupled PDE's, \emph{CBMS-NSF Regional Conference Series in Applied Mathematics}, SIAM Publishing, Philadelphia (2002). \bibitem{l-t.1} I. Lasiecka and R. Triggiani, Exact controllability of the wave equation with Neumann boundary control, \emph{Appl. Math. \& Optimiz.}, \textbf{19}(1989), 243-290. \bibitem{l-t.2} I. Lasiecka and R. Triggiani, Carleman estimates and exact boundary controllability for a system of coupled, nonconservative second order hyperbolic equations, in Partial Differential Equations Methods in Control and Shape Analysis, \emph{Lecture Notes in Pure and Applied Mathematics}, Marcel Dekker, New York, Vol. \textbf{188}, 215-243. \bibitem{l-t.3} I. Lasiecka and R. Triggiani, Sharp regularity theory for second order hyperbolic equations of Neumann type. Part I. $L_2$ Nonhomogeneous data, \emph{Ann. Mat. Pura. Appl. (IV)}, \textbf{CLVII}(1990), 285-367. \bibitem{l-t.4} I. Lasiecka and R. Triggiani, Regularity theory of hyperbolic equations with non-homogeneous Neumann boundary conditions. II. General boundary data, \emph{Journal of Differential Equations}, \textbf{94}(1991), 112-164. \bibitem{l-t.5} I. Lasiecka and R. Triggiani, Uniform stabilization of the wave equation with Dirichlet or Neumann feedback control without geometrical conditions, \emph{Appl. Math. \& Optimiz.}, \textbf{25}(1992), 189-244. \bibitem{l-t-z} I. Lasiecka, R. Triggiani and X. Zhang, Nonconservative wave equations with unobserved Neumann B.C.:global uniqueness and observability in one shot, \emph{Contemp. Math.}, \textbf{268}(2000), 227-325. \bibitem{l-m} J.L.Lions and E. Magenes, \emph{Non-homogeneous Boundary Value Problems and Applications}, Vol. I, Springer-Verlag, Berlin, 1972. \bibitem{l-l} W. Littman and B. Liu, On the spectral properties and stabilization of acoustic flow, \emph{IMA Preprint Series $\sharp$1436}, (November, 1996). \bibitem{p-y} J-P Puel and M. Yamamoto, On a global estimate in a linear inverse hyperbolic problem, \emph{Inverse Problems}, \textbf{12}(1996), 995-1002. \bibitem{s} R.E.Showalter, \emph{Monotone Operators in Banach Space and Nonlinear Partial Differential Equations}, Mathematical Surveys and Monographs Volume 49, American Mathematical Society, 1997. \bibitem{ta.1} D. Tataru, On the regularity of boundary traces for the wave equation, \emph{Annali Scuola Normale di Pisa}, Classe Scienze (4), \textbf{26}(1998), no. 1, 355-387. \bibitem{ta.2} D. Tataru, A priori estimates of Carleman's type in domains with boundary, \emph{J.Math.Pures Appl}, (9), \textbf{73}(1994), no. 4, 185-206. \bibitem{t} R. Triggiani, Wave equation on a bounded domain with boundary dissipation: an operator approach, \emph{Journal of Mathematical Analysis and Applications}, \textbf{137}(1989), 438-461. \bibitem{y} M. Yamamoto, Uniqueness and stability in multidimensional hyperbolic inverse problems, \emph{J. Math. Pures Appl.}, \textbf{78}(1999), 65-98. \end{thebibliography} \end{document}
\begin{document} \title[Schauder estimates up to the boundary on h-type groups]{Schauder estimates up to the boundary on h-type groups: an approach via the double layer potential} \author[G.~Citti]{Giovanna Citti} \address{Dipartimento di Matematica, Piazza di Porta S. Donato 5, 401 26 Bologna, Italy} \email{[email protected]} \author[G.~Giovannardi]{Gianmarco Giovannardi} \address{Dipartimento di Matematica Informatica "U. Dini", Universita  degli Studi di Firenze, Viale Morgani 67/A, 50134, Firenze, Italy} \email{[email protected]} \author[Y.~Sire]{Yannick Sire} \address{Department of Mathematics, Johns Hopkins University, Baltimore, MD 21218, USA} \email{[email protected]} \date{\today} \thanks{The first author have been supported by Horizon 2020 Project ref. 777822: GHAIA. The second author have been supported by MEC-Feder grant PID2020-118180GB-I00, Horizon 2020 Project ref. 777822: GHAIA and INdAM-GNAMPA Project 2022 CUP-E55F22000270001. The third author is partially funded by NSF grant DMS 2154219 and the Simons foundation. } \subjclass[2000]{35B65, 35J25, 35R03} \keywords{The double layer potential, Schauder estimates at the boundary, Heisenberg-type groups, reflection-type argument} \maketitle \thispagestyle{empty} \begin{abstract} We establish the Schauder estimates at the boundary away from the characteristic points for the Dirichlet problem by means of the double layer potential in { a Heisenberg-type group ${\mathbb{G}}$.} Despite its singularity we manage to invert the double layer potential restricted to the boundary thanks to a reflection technique for an approximate operator in {${\mathbb{G}}$. } This is the first instance where a reflection-type argument appears to be useful in the sub-Riemannian setting. \end{abstract} \section{Introduction} Schauder estimates at the boundary are important tools in regularity theory and applications to PDEs. On an Euclidean (smooth) domain, they are obtained classically by combining a flattening of the boundary together with a reflection argument that allows to use interior estimates. This classical reflection technique does not work when the ambient space, instead of ${\mathbb{R}}^n$, is the simplest sub-Riemannian manifold: the Heisenberg $\mathbb{H}^n$, $n\ge1$. Given $f,g \in C^{\infty} $ Kohn and Nirenberg in \cite{KN65} proved that the solution is smooth up to the boundary at the non-characteristic points of the boundary $\partial \Omegaega$, where the projection of the Euclidean normal to $\partial \Omegaega$ onto the horizontal distribution is different from zero. Then Jerison in \cite{Jerison1} using the method of the \textit{single layer potential} was able to invert the operator restricted to the boundary to construct a Poisson kernel that directly yields to the Schauder estimates around a non-characteristic point. Later in \cite{Jerison2} under suitable conditions on the characteristic point Jerison obtained the Schauder estimates around a strongly isolated characteristic boundary point. On the other hand, always in \cite[Section 3]{Jerison2} Jerison showed the celebrated example of the paraboloid where the Schauder estimate fails, since the solution of the Dirichlet problem with real analytic datum $g$ and $f=0$ may be not better than h\"older continuous near a characteristic boundary point. Recently Baldi, Citti and Cupini \cite{BCC19}, assuming a geometric hypothesis on the boundary, obtained Schauder estimates at the boundary in neighborhood of non-characteristic points for the problem $\Delta_{\mathbb{G}} u= f$ in $\Omegaega$ with Dirichlet boundary condition $u=g $ on $\partial \Omegaega$. Here $ \Delta_{\mathbb{G}}= \sum_{i=1}^k X_i^2$ is the sub-Laplacian in a generic Carnot group $\mathbb{G}$ with distribution $V^1$ generated by the vector fields $X_1,\ldots,X_k$ and $\Omegaega $ is a bounded open set of $\mathbb{G}$. Here $f$ and $g$ belong to suitable classes of h\"{o}lderian functions. The additional hypothesis that Baldi, Citti and Cupini \cite{BCC19} assume is that the induced distribution on the boundary, generated by the vector fields tangent to the boundary that belongs to the distribution $V^1$, verifies the H\"{o}rmander condition. However even in the simplest case of the Heisenberg group $\mathbb{H}^1$ this hypothesis is not verified. Another relevant paper concerning the $C^{1,\alpha}$ Schauder estimates in Carnot groups near a non-characteristic portion of the boundary of regularity $C^{1,\alpha}$ is \cite{MR3948989}. The full Schauder estimates have been subsequently obtained in the recent paper \cite{2022arXiv221012950B} where the authors obtain estimates in $\Gammamma^{k,\alpha}$ for $k\ge2$ near a $C^{k,\alpha}$ non-characteristic portion of the boundary in a Carnot group by means of a compactness method going back to seminal works of Caffarelli (see e.g. \cite{MR1005611}).\footnote{A first version of the current paper was only considering the case of the first Heisenberg group. We decided in this new version to deal with all possible H-type groups; the paper \cite{2022arXiv221012950B} appeared while this work was in preparation.} The aim of this work is to establish the Schauder estimates at the boundary away from the characteristic points for the Dirichlet problem \begin{equation} \lambdabel{eq:PP} \begin{cases} \Delta_{{\mathbb{G}}} u= f & \quad \text{in} \quad \Omegaega\\ u= g & \quad \text{on} \quad \partial \Omegaega \end{cases} \end{equation} by means of the \textit{double layer potential} in a group of Heisenberg type ${\mathbb{G}}$. Here $\Omegaega \subset {\mathbb{G}}$ is a bounded open set and $f\in C^{\alpha}(\bar{\Omegaega})$, $g \in C^{2,\alpha}(\partial \Omegaega)$. Since the function $f$ in \eqref{eq:PP} concerns only interior estimates, it is not restrictive to study the associated homogeneous problem $\Delta_{{\mathbb{G}}} u=0$ in $\Omegaega$ and $ u=g$ on $\partial \Omegaega$. By the Green's representation formula the solution of the previous problem is given by \[ u(x)=\int_{\partial \Omegaega} g(y) \escpr{ \nabla_{{\mathbb{G}}} G(x,y), \nu_h(y)} d\sigma(y), \] where $\nabla_{{\mathbb{G}}}$ is the horizontal gradient and $G(x,y)$ is the Green's function such that $G=0$ on $\partial \Omegaega $. Then we consider an harmonic approximation of $u(x)$ given by $\mathcal{D}(g)(x)$ where instead of $\nabla_{{\mathbb{G}}} G(x,y)$ we consider $\nabla_{{\mathbb{G}}} \Gammamma(x,y)$, where $\Gammamma$ is the fundamental solution for $\Delta_{{\mathbb{G}}}$. We will call $\mathcal{D}(g)$ the \textit{double layer potential}. Clearly the harmonic function $\mathcal{D}(g)$ does not assume the boundary datum $g$, but when $x \rightarrow \xi \in \partial \Omegaega$ and $\xi$ is a non-characteristic point, we get that the limit of the double layer potential coincides with $T(g)(\xi):=\frac{1}{2} g(\xi)+ K(g)(\xi)$, where $K$ is a singular operator from $C^{2,\alpha}$ into $C^{2,\alpha}$. Jerison in \cite{Jerison1} pointed already the singularity of $K$ and chose to use instead the single layer potential to derive Schauder estimates up to the boundary. On one hand in the Euclidean case the analogous of $K$ is a compact operator if the boundary is smooth, thus $T$ is invertible, by the Fredholm alternative. Then, we have that $\mathcal{D}(T^{-1}(g))$ is harmonic and assume the boundary datum. Hence the Schauder estimates follow automatically by the h\"{o}lder estimates for $T$. On the other hand when the boundary $\partial \Omegaega$ is Lipschitz the operator $K$ is singular as well as in the sub-Riemannian case. However, Verchota in \cite{MR769382} inverted the operator $T$ between $L^2$ classes on the Lipschitz boundary in an Euclidean domain. He showed that the $L^2$ norm of $K$ is small compared to $\tfrac{1}{2}$ extending the previous result by Fabes, Jodeit, and Rivi\'ere \cite{MR501367} for $C^1$ domains in ${\mathbb{R}}^n$. The technique developed in \cite{MR769382} was influenced by the previous seminal papers by Calder\'on \cite{MR466568} and Coifman, McIntosh, and Meyer \cite{MR672839} which established the $L^p$-boundedness of the Riesz transform and the double layer potential in this Lipschitz Euclidean setting. All these results are very well explained and (some of them) extended in the book \cite{KC94} by Kenig. It is important to keep in mind that in this Euclidean setting, the double layer potential is singular since the domain is rough. Several extensions of the boundary layer technique have been developed over the last years in a variety of settings (see e.g. \cite{kim} and references therein for recent developments). In the present situation, as mentioned above, the situation is more dramatic since the double layer potential is singular {\sl even} on smooth domains. Fortunately in the present work we do not need to develop an $L^2$ theory for singular integrals in the sub-Riemannian setting as Orponen and Villa did in \cite{2020arXiv200608293O}; instead we need to prove that $K$ has small $C^{2,\alpha}$ norm with respect to $\frac{1}{2}$ in order to use the continuity method developed by \cite{MR769382} (see also \cite[page 56]{KC94}). Crucial to our strategy is a reflection argument, special to the sub-Riemannian setting, which we believe will be proved to be useful for other problems when the lack of commutativity is critical. We now describe our strategy in the case of the upper half space $\{ x_1>0\}$ of the first Heisenberg group ${\mathbb{H}}^1$ with coordinates $(x_1,x_2,x_3)$ in ${\mathbb{H}}^1$. The boundary of $\{ x_1>0\}$ is given by the intrinsic plane $\Pi=\{x_1=0\}$, so called in literature since it does not contain characteristic points. In this case the singular operator $K$ is an operator on $\Pi$ with convolution kernel $k$ given by \eqref{eq:kk}. In order to prove that $K$ has small $C^{2,\alpha}$ norm with respect to $\frac{1}{2}$ we use a surprising reflection technique in the Heisenberg group. Indeed the main obstacle in applying the reflection technique in the Heisenberg group is the fact that if $u$ satisfies an equation, its reflection $u(-x_1, x_2, x_3)$ does not satisfy the same equation, because of the non commutation properties of $X_1$ and $X_2$. As a consequence, while the increments appearing in the kernel along the horizontal directions are the standard ones, $x_1- y_1$ and $ x_2 - y_2 $, in the third increment mixed variables show up $$x_3 - y_3 - \frac{1}{2}(y_1 x_{2}-y_{2}x_1).$$ However this increment, restricted to the boundary $x_1=y_1=0$, reduces to the standard one, and all variables decouple. This allows to apply a reflection technique, if we are interested in the limit, when the operators tend to the boundary. Hence we modify the operators on the whole space, removing the mixed term, which is not present in the limit, and is the cause of preventing reflection. Hence the symmetry of these new operators on the whole space yields that in the limit the $C^{2,\alpha}$ norm of $\tfrac{1}{2} I+ K$ and $-\tfrac{1}{2} I+ K$ coincide. Finally the continuity method allows to prove the invertibility of $\tfrac{1}{2} I+ K$, thus the solution of the Dirichlet problem on the half space is given by harmonic function $\mathcal{D}((\tfrac{1}{2} I+ K)^{-1}g)$. Once we obtain the invertibility of $\tfrac{1}{2} I+ K$ on the flat space the general case for curved domains follows directly. Indeed flattening the boundary around a non-charatheristic point involves a compact operator $K_{\hat R}$ that does not affect the invertibility $\tfrac{1}{2} I+ K+K_{\hat R}$. Hence the Schauder estimates around a non-charatheristic point are a direct consequence of the H\"older estimates for the inverse of $\tfrac{1}{2} I+ K+K_{\hat R}$. Even if the result in the present paper, i.e. the Schauder estimates at the boundary on ${\mathbb{H}}^1$, is known since the works of Jerison and by now thanks to \cite{2022arXiv221012950B}, in the full generality of any Carnot groups, we want to emphasize that it was not known that one could achieve such results via the double layer potential. We believe that our method, being independent of all the previous ones, will be proved to be useful for other problems and provide a new point of view on singular integrals in groups. In order to simplify the exposition, we concentrate in the first five sections only on the case of ${\mathbb{H}}^1$, for which the computations are easier. In the last section{ \ref{sc:GHT} we show that the same technique provides the Schauder estimates in the more general setting of $H$-type groups. We emphasize that for the $C^{2,\alpha}$ estimates of the singular operator $K$ (Theorem \ref{th:Kcont} ) in the case of ${\mathbb{H}}^1$ we provide a standard proof based on \cite[Lemma 4.4]{GT} in order to make the exposition self-contained, whereas in the case of the $H$-type group ${\mathbb{G}}$ (Theorem \ref{th:KcontH}) we used a deep result by Nagel and Stein \cite{NagelStein79}}. {To be more precise on the boundary $\partial \Omegaega$ the h\"older classes are defined by the non-isotropic Folland-Stein h\"older classes $\Gammamma^{2,\alpha}(\partial \Omegaega)$ introduced by Jerison \cite[Section 4]{Jerison1}, see also \cite{NagelStein79,MR657581}. Roughly speaking a function belongs to $\Gammamma^{2,\alpha}(\partial \Omegaega)$ if at each scale $\delta$ we approximate the function by a second order polynomial in local coordinates on $\partial \Omegaega$ with an error that goes as $\delta^{2+\alpha}$, this idea in the Euclidean setting goes back to Campanato \cite{MR167862}. We show that on the plane $\Pi=\{x_1=0\}$ the class $\Gammamma^{2,\alpha}(\Pi)$ coincides with the classical sub-Riemannian h\"older space $C^{2,\alpha}(\Pi)$, that means that the second order horizontal tangential derivatives and the first order vertical derivatives are h\"older continuous with respect to the induced distance on $\Pi$. The control on the first order vertical derivatives is crucial since on $\Pi$ we do not verify the H\"ormander rank condition such as in \cite{BCC19}, thus the vertical derivatives are not a priori commutators of vector fields that belong to the distribution. To our knowledge the equivalence of these two classes of functions was known only when instead of the intrinsic plane $\Pi$ we consider the whole group ${\mathbb{G}}$, see \cite[Theorem 5.3]{MR657581}. } {Finally we point out that the Schauder estimates for general $H$-type groups, obtained in Section \ref{sc:GHT}, are not consequences of the results in \cite{BCC19} (but of course follow from the results in \cite{2022arXiv221012950B}); indeed there are several examples of $H$-type groups, different from ${\mathbb{H}}^1$, that does not satisfy the H\"ormander rank condition on $\Pi$, see for instance Example \ref{eq:nothormander}. As is usual in layer potential methods, one needs to have a rather precise formula for the fundamental solution of the operator to be able to conclude. This is the main issue in our technique to deal with general Carnot groups. } The paper is organized as follows. In Section \ref{sc:pre} the Heisenberg group, the fundamental solution for the sub-Laplacian and the H\"older classes are introduced. Section \ref{sc:dlp} deals with the double layer potential and its jump formulas. In Section \ref{sc:invert} we provide the invertibility of the double layer potential on the intrinsic plane by the reflection technique. In Section \ref{sc:Schest} we show the local Schauder estimates around a non-characteristic point and the global Schauder estimates for bounded domains without characteristic points. Examples of such domains are constructed in \cite{MR4219401}. { Finally in Section \ref{sc:GHT} we show that all the previous results hold in the more general setting of $H$-type groups.} \section{Preliminaries} \lambdabel{sc:pre} The first Heisenberg group ${\mathbb{H}}^1$ is an analytic, simply connected $3$-dimensional Lie group such that its Lie algebra $\mathfrak{g}$ admits a stratification \[ \mathfrak{g}=V^1\oplus V^2, \quad [V^1,V^1]=V^2 \quad \text{and} \quad [V^1,V^{2}]=\{0\}. \] The stratification induces a natural notion of degree of a vector field \[ \deg(X)=j \quad \text{whenever} \quad X \in V_j, \] for $j=1,2$. By \cite[Theorem 2.2.18]{BLU} the first Heisenberg group ${\mathbb{H}}^1$ can be identified with the triple $(\mathbb{R}^3, \circ, \delta_{\lambdambda})$, where $\circ$ is the polynomial group law given by \[ y\circ x =\Bigg(y_1+ x_1, y_{2}+x_{2}, y_3+x_3 + \frac{1}{2} (y_1 x_{2}-y_{2}x_1)\Bigg), \] for any pair of points $x=(x_1,x_2,x_3)$, $y=(y_1,y_{2},y_3)$ in ${\mathbb{R}}^{3}$ and $\{\delta_{\lambdambda}\}_{\lambdambda>0}$ is a family of automorphisms of $({\mathbb{R}}^3, \circ)$ such that \[ \delta_{\lambdambda} (x_1, x_2, x_3)= ( \lambdambda x_1, \lambdambda x_2, \lambdambda^{2} x_3 ). \] The homogenous dimension $Q$ is given by \[ Q:= \dim(V^1)+ 2 \dim(V^2)= 4 . \] We call horizontal distribution the subspace $V^1$ and we choose the basis of left invariant vector fields \[ X_1=\partial_{x_1}- \dfrac{x_2}{2} \partial_{x_3}, \qquad X_2=\partial_{x_2}+ \dfrac{x_1}{2} \partial_{x_3}. \] We denote by $\nabla_{{\mathbb{H}}}$ the horizontal gradient $ \nabla_{{\mathbb{H}}}= (X_1,X_2) $ and by $\nabla=(\partial_1,\partial_2,\partial_3)$ the standard Euclidean gradient. The sub-Laplacian operator is given by \[ \Delta_{{\mathbb{H}}}= X_1^2+ X_2^2= \divv_{{\mathbb{H}}}(\nabla_{{\mathbb{H}}} ), \] where $\divv_{{\mathbb{H}}}(\phi)= X_1(\phi_1)+X_2(\phi_2)$ for $\phi=\phi_1 X_1+\phi_2 X_2 \in V^{1}$. Is is well known (see \cite[Chapter 5]{BLU}) that the sub-Laplacian admits a unique fundamental solution $\hat{\Gammamma} \in C^{\infty}({\mathbb{R}}^3 \smallsetminus \{0\})$, $\hat{\Gammamma} \in L_{\text{loc}}^1({\mathbb{R}}^3)$, $\hat{\Gammamma}(x) \to 0$ when $x$ tends to infinity and such that \[ \int_{{\mathbb{R}}^3} \hat{\Gammamma}(x) \, \Delta_{{\mathbb{H}}} \varphi(x) \, dx = -\varphi(0) \quad \forall \varphi \in C^{\infty}({\mathbb{R}}^3). \] \begin{definition} We call Gauge norm on ${\mathbb{H}}^1$ a homogeneous symmetric norm $d$ smooth out of the origin and satisfying \[ \Delta_{{\mathbb{H}}} (d(x)^{2-Q})=0 \quad \forall x \ne 0 . \] \end{definition} Following \cite{MR315267,MR1219650} a Gauge norm in ${\mathbb{H}}^1$ is given by \[ |x|_{{\mathbb{H}}}=((x_1^2+x_2^2)^2+ 16 x_3^2)^{\tfrac{1}{4}}. \] Therefore we have \[ \hat{\Gammamma}(x)=(2 \pi)^{-1} |x|_{{\mathbb{H}}}^{2-Q}= \tfrac{1}{2 \pi \Big( \big( x_1^2 + x_2^2\big)^2 + 16 x_3^2 \Big)^{1/2}}. \] Finally we define the fundamental solution $\Gammamma(x, y) = \hat \Gammamma(y^{-1} \circ x)$ that is given by \begin{equation} \lambdabel{eq:fundsol} \Gammamma(x, y)= \tfrac{1}{ 2 \pi \Big( \big( (x_1-y_1)^2 + (x_2-y_2)^2\big)^2 + 16 \big(x_3 - y_3 - \frac{1}{2} (y_1 x_{2}-y_{2}x_1) \big)^2 \Big)^{1/2}} \end{equation} and the Gauge distance $d(x,y)=|y^{-1} \circ x|_{{\mathbb{H}}}$ for all $x,y \in \mathbb{H}^1$ . The Gauge ball center at $x \in {\mathbb{H}}^1 $ of radius $r>0$ is given $B_r(x):=\{y \in \mathbb{H}^1 \ : \ d(x,y)< r \}$. \begin{definition} Let $0<\alpha<1$, $\Omegaega\subset\mathbb{H}^1$ be an open set and $f: \Omegaega \rightarrow {\mathbb{R}}$ be function on $\Omegaega$. We say that $f \in C^{\alpha}(\Omegaega)$ if there exists a positive constant $M$ such that for every $x,y$ in $\Omegaega$ \[ |f(x)-f(y)|<M\ d^{\alpha}(x,y). \] We set \[ \lVert f \rVert_{C^{\alpha}({\Omegaega})}=\sup_{x\ne y}\dfrac{|f(x)-f(y)|}{d^{\alpha}(x,y)}+ \sup_{x \in \Omegaega} |f(x)|. \] Iterating this definition, when $k>1$ we say that $f \in C^{k,\alpha}(\Omegaega)$ if $X_i u \in C^{k-1,\alpha}(\Omegaega)$ for all $i=1,2$. \end{definition} \subsection{Smooth domains, characteristic points and holder classes on the boundary} \begin{definition} \lambdabel{def:smoothboundary} The set $\Omegaega$ is called a domain of class $C^{\infty}$ if for each $\xi \in \partial \Omegaega$ then there exists a neighborhood $U_{\xi}$ and a function $\psi_{\xi} \in C^{\infty} (U_{\xi})$ such that \begin{align*} U_{\xi} \cap \Omegaega &=\{ x \in U_{\xi} \ : \ \psi(x)<0 \}\\ U_{\xi} \cap \partial \Omegaega&=\{ x \in U_{\xi} \ : \ \psi(x)=0 \}. \end{align*} We say that $\xi$ in $\partial \Omegaega$ is a \textit{characteristic point} if $\nabla_{\mathbb{H}} \psi (\xi)=0$. \end{definition} Let $\xi, \eta$ in $\partial \Omegaega$, we define the induced distance $\hat{d}$ on $\partial \Omegaega$ by \[ \hat{d}(\xi,\eta):=d(\xi,\eta), \] where $d$ is the Gauge distance in $\mathbb{H}^1$ and for $r>0$ we call $\hat B_r( \xi)$ the induced ball given by \[ \hat B_r( \xi)=B_r(\xi) \cap \partial \Omegaega, \] where $B_r(\xi)$ is a Gauge ball in ${\mathbb{H}}^1$ centered at $\xi$. \begin{definition} Let $0<\alpha<1$. We say that a continuous function $f$ belongs to $C^{\alpha}(\partial \Omegaega)$ if there exists a constant $C$ such that \[ \dfrac{|f(\xi)-f(\eta)|}{ \hat{d}(\xi, \eta)^{\alpha}} < C, \] for each $\xi, \eta$ in $\partial \Omegaega$. Then the holder semi-norm $[f]_{\alpha}$ is defined by \[ [f]_{\alpha}= \sup_{\substack{\xi ,\eta \in \partial \Omegaega \\ \xi \neq \eta} } \dfrac{|f(\xi)-f(\eta)|}{ \hat{d}(\xi, \eta)^{\alpha}} \] and the holder norm is defined by \[ \|f \| _{\alpha}= [f]_{\alpha}+ \sup_{\xi \in \partial \Omegaega} |f(\xi)| \] \end{definition} \begin{definition} \lambdabel{def:CKalpha} Let $0<\alpha<1$ and $k \in {\mathbb{N}} \cup \{0\}$. We say that a bounded function $f$ belongs to $\Gammamma^{k,\alpha}(\partial \Omegaega)$ if for each $\xi \in \partial \Omegaega$ and $\delta>0$ there exist a polynomial $P_{\xi} (\eta)$ of degree $k$ in local coordinates on $\partial \Omegaega$ and a uniform constant $C$ such that \[ |f(\eta)-P_{\xi}(\eta)| \le C \delta^{k+\alpha}, \qquad \hat{d}(\eta,\xi) <\delta \] Then the H\"older semi-norm $[f]_{k,\alpha}$ is the least possible $C$ above $+$ the supremum of the coefficient of $P_{\xi}$ and the holder norm is defined by \[ \|f \| _{\Gammamma^{k,\alpha}}=[f]_{k,\alpha}+ \sup_{\xi \in \partial \Omegaega} |f(\xi)|, \] \end{definition} These classes are the non-isotropic Folland-Stein h\"older classes (see \cite{MR657581} or Section $ 9$ of \cite{NagelStein79}) introduced by Jerison \cite[Section 4]{Jerison1}. \subsection{Polynomial in local coordinates far from the characteristic points} Let $\xi$ in $\partial \Omegaega$ be a non-characteristic point and $\psi$ be the defining function of the boundary, see Definition \ref{def:smoothboundary}. Then the horizontal normal to $\partial \Omegaega$ is defined by \[ \nu_h= \dfrac{\nabla_{{\mathbb{H}}} \psi }{|\nabla_{{\mathbb{H}}} \psi|}. \] Then there exists an orthonormal frame $Z, S$ tangent to $\partial \Omegaega $ such that $\deg(Z)=1$ and $\deg(S)=2$. Then we consider the exponential map \begin{equation} \lambdabel{eq:expexp} \begin{aligned} (x_1,x_2,x_3)=\exp(v_1 \nu_h ) \, \circ \, \exp \left( v_2 Z\right) \, \circ \, \exp \left( v_3 S \right) (\xi) \end{aligned} \end{equation} On the neighborhood $U \subset {\mathbb{H}}^1$ of $\xi$ we consider the local coordinates $v=(v_1,v_2,v_3)$ given by the inverse map $\Xi_{\xi}$ of the exponential map defined in \eqref{eq:expexp}. In the literature, these coordinates are commonly called exponential or canonical coordinates of the second kind, see \cite{Bellaiche}. In these new coordinates $\Xi_{\xi}(U\cap \partial \Omegaega) \subset \{v_1=0\}$. Then we set $\hat{v}=(v_2,v_3)$ and $J=(j_2,j_3)$ $$\deg(J)=j_2+2 \cdot j_3$$ and \[ \hat{v}^{J}=v_2^{j_2} v_3^{j_3}. \] A polynomial of order $k$ in local coordinates on the boundary is given by \[ P(\hat{v})=\sum_{\deg(J) \le k} a_J \hat{v}^J, \] where $a_J$ are constants. Hence assuming that $\eta= \Xi_{\xi}^{-1} (\hat{v})$ we set $P_{\xi}(\eta):=P(\hat{v})$ in Definition \ref{def:CKalpha}. \section{Double layer potential} \lambdabel{sc:dlp} Let $\Omegaega$ be a bounded smooth domain of ${\mathbb{H}}^1$. We consider the following Dirichlet problem \begin{equation} \begin{cases} \Delta_{\mathbb{H}} u=0 & \text{in} \quad \Omegaega \\ u=g & \text{on} \quad \partial \Omegaega. \end{cases} \lambdabel{DP} \end{equation} Let $h_x(y)$ be an harmonic function in $\Omegaega$ such that \begin{equation}\lambdabel{harmonic} h_x (\cdot) |_{\partial \Omegaega}= \Gammamma(\cdot,x)|_{\partial \Omegaega}, \end{equation} then the Green function $G(x,y)$ is given by \[ G(x,y)=\Gammamma(x,y)-h_x(y). \] Under the assumptions ($\partial \Omegaega$ smooth, negligible surface measure of the singular set and uniform exterior ball property) by \cite{UL97} a solution of \eqref{DP} is \begin{equation} \lambdabel{eq:u} u(x)=\int_{\partial \Omegaega} g(y) \escpr{\nabla_{{\mathbb{H}}}^{y}G(x,y), \nu(y)} d\sigma(y), \end{equation} where $\nu(y)$ is the unit normal at the point $y \in \partial \Omegaega$. Following \cite{KC94} we consider the approximation of \eqref{eq:u} given by \begin{equation} \lambdabel{eq:w} \mathcal{D}(g)(x)= \int_{\partial \Omegaega} g(y) \escpr{\nabla_{{\mathbb{H}}}^y \Gammamma (x,y), \nu(y)} d\sigma(y), \end{equation} proposed by C. Neumann in the classical setting. Clearly we have that $\Delta_{{\mathbb{H}}} \mathcal{D}(g)(x)=0$ for each $x \in \Omegaega$. \subsection{The jump formulas across an intrinsic plane} Let $\Omegaega=\{x_1>0\} \subset {\mathbb{H}}^1$ and $\partial \Omegaega =\{x_1=0\}= \Pi$. Then the induced distance $\hat d$ is given by \begin{equation} \hat d(\hat x,\hat y)=((x_2-y_2)^4+ 16 (x_3-y_3)^2)^{\tfrac{1}{4}} \end{equation} for each $\hat x =(x_2,x_3)$ and $\hat y =(y_2,y_3)$ in $\Pi$ and the induced ball is given by \[ \hat B_r(\hat x)=\Big\{ (y_2,y_3) \in \Pi \ : \ \hat d(\hat x,\hat y)<r\Big\}. \] \begin{proposition} \lambdabel{prop:K1Kplane} Let $\Omegaega=\{x_1>0\} \subset {\mathbb{H}}^1$ and $\partial \Omegaega =\{x_1=0\}= \Pi$. Then the double layer potential $ \mathcal{D}(g)(x)$ is given by \begin{equation} \lambdabel{eq:DgPi} \mathcal{D}(g)(x)= K_1(g)(x) + K(g)(x) \end{equation} for $x\in \Omegaega$, where $K_1$ and $K$ are operators with kernels respectively $k_1$ and $k$ defined as \begin{equation}\lambdabel{k1}k_1(x,\hat y)= \dfrac{1}{\pi} \frac{\big( x_1^2 + (x_2-y_2)^2\big) x_1 } {\Big( \big( x_1^2 + (x_2-y_2)^2\big)^2 + 16 \big(x_3 - y_3 + \frac{1}{2}y_{2}x_1 \big)^2 \Big)^{3/2}} \end{equation} \begin{equation}\lambdabel{k}k(x,\hat y)= -\dfrac{4}{\pi}\frac{ (x_2-y_2)(x_3 - y_3 - \frac{1}{2}y_{2}x_1 )} {\Big( \big( x_1^2 + (x_2-y_2)^2\big)^2 + 16 \big(x_3 - y_3 + \frac{1}{2} y_{2}x_1 \big)^2 \Big)^{3/2}}, \end{equation} where $\hat y=(y_2,y_3)$ and $(0,\hat y) \in \Pi$. \end{proposition} \begin{proof} By left invariance an explicit computation shows that the derivative \eqref{eq:fundsol} with respect to $X^x_1$ is given by \begin{align*} &X^x_{1}(\Gammamma(x, y)) = (X_{v_1}\Gammamma)(y^{-1} x) =\\ &= -\frac{ \big( (x_1-y_1)^2 + (x_2-y_2)^2\big) (x_1-y_1) - 4 (x_2-y_2)\big(x_3 - y_3 - \frac{1}{2} (y_1 x_{2}-y_{2}x_1)\big)} { \pi \Big( \big( (x_1-y_1)^2 + (x_2-y_2)^2\big)^2 + 16 \big(x_3 - y_3 - \frac{1}{2} (y_1 x_{2}-y_{2}x_1)\big)^2 \Big)^{3/2}} \end{align*} Since $\Gammamma$ is symmetric we also have \begin{align*} &X^y_{1} \Gammamma(x,y) =\escpr{\nabla_{{\mathbb{H}}}^y \Gammamma(x,y), X_1^y}\\ &= -\frac{ \big( (x_1-y_1)^2 + (x_2-y_2)^2\big) (y_1-x_1) - 4 (y_2-x_2)\big(y_3 - x_3 - \frac{1}{2} (x_1 y_{2}-x_{2}y_1)\big)} { \pi \Big( \big( (x_1-y_1)^2 + (x_2-y_2)^2\big)^2 + 16 \big(x_3 - y_3 - \frac{1}{2} (y_1 x_{2}-y_{2}x_1)\big)^2 \Big)^{3/2}}. \end{align*} Evaluating this derivative over the plane $\Pi=\{y_1=0\}$ for $x_1>0$ we get \begin{equation} \lambdabel{eq:X1y} \begin{aligned} X^y_{1} \Gammamma(x,(0,y_2,y_3)) &= \frac{ \big( x_1^2 + (x_2-y_2)^2\big) x_1 - 4 (x_2-y_2)\big(y_3 - x_3 - \frac{1}{2} x_1 y_{2}\big)} { \pi \Big( \big( x_1^2 + (x_2-y_2)^2\big)^2 + 16 \big(x_3 - y_3 + \frac{1}{2} y_{2}x_1)\big)^2 \Big)^{3/2}}\\ &=k_1(x,\hat y)+k(x,\hat y) \end{aligned} \end{equation} where $k_1$ and $k$ are defined in \eqref{k1} and \eqref{k}. Integrating \eqref{eq:X1y} over the plane $\Pi$ and assuming $y_1=0$, and $x_1>0$ we get \eqref{eq:DgPi}. \end{proof} \begin{remark} \lambdabel{rk:int=1} Notice that for each $r>0$ it holds \begin{equation} \lambdabel{eq:intbound1} \int_{\partial B_{r}(x)} \escpr{\nabla_{{\mathbb{H}}} \Gammamma(x,y) , \nu(y)} d \sigma(y)=1. \end{equation} Indeed, by the mean value formula for each open subset $O\subset {\mathbb{H}}^1$ such that $x \in O$, for each $r>0$ such that $B_r(x) \subset O$ and for each harmonic function $\psi \in \mathcal{H}(O)$ we have \[ \psi(x)= \int_{\partial B_{r}(x)} \psi(y) \escpr{\nabla_{{\mathbb{H}}} \Gammamma(x,y) , \nu(y)} d \sigma(y). \] In particular if we consider $\psi \equiv1$ in $O$ we obtain \eqref{eq:intbound1}. \end{remark} \begin{lemma}\lambdabel{ga} Let $x_0=(0,\hat x_0) \in \Pi$, $R>0$ and $\hat B_R(\hat x_0)= \{\hat y \in \Pi \: \ \hat d (\hat x_0, \hat y)\leqslant R\} \subset \Pi$. Then the integral $$ \int_{ \hat B_R(\hat x_0)} \escpr{\nabla_{{\mathbb{H}}}^y \Gammamma (x,(0, \hat y)) , X_1^y(y)} d \hat y $$ is well defined if the first component $x_1$ of $x$ satisfies $x_1>0$ and tends to $1/2$ as $x\to x_0$. \end{lemma} \begin{proof} Let $\{x^n\}_{n \in {\mathbb{N}}}$ be a sequence of points in $\Omegaega=\{x_1>0\}$ converging to $x_0$ as $n \to +\infty$ and $\varepsilon_n>0$ small enough such that $B(x^n,\varepsilon_n) \subset \Omegaega$ for each $n \in {\mathbb{N}}$. Then we consider the bounded domain $$\Omegaega_{n}^R= \{x_1 >0\} \cap B_R(x_0) \smallsetminus B(x^n,\varepsilon_n).$$ By the divergence theorem for each $n \in {\mathbb{N}} $ we have \begin{equation} \lambdabel{eq:2ballbound} \begin{aligned} 0&= \int_{\Omegaega_{n}^R} \Delta_{{\mathbb{H}}} \Gammamma (x^n, y) dy= \int_{\partial \Omegaega_n^R} \escpr{\nabla_{{\mathbb{H}}}^y \Gammamma(x_n,y), \nu_h(y)} d\sigma(y)\\ &= \int_{\partial B_R (x_0) \cap \{x_1 >0\}} \escpr{\nabla_{{\mathbb{H}}}^y \Gammamma(x_n,y), \nu (y)} d\sigma(y)\\ & \quad + \int_{\Pi \cap B_R(x_0)} \escpr{\nabla_{{\mathbb{H}}}^y \Gammamma(x_n,y), \nu (y)} d\sigma(y)\\ &\quad -\int_{\partial B(x^n,\varepsilon_n)} \escpr{\nabla_{{\mathbb{H}}}^y \Gammamma(x_n,y), \nu(y)} d\sigma(y). \end{aligned} \end{equation} For each $n \in {\mathbb{N}}$ the ball $B(x^n,\varepsilon_n)$ is contained in $\{x_1>0\}$ thus by Remark \ref{rk:int=1} we get \[ \int_{\partial B(x^n,\varepsilon_n)} \escpr{\nabla_{{\mathbb{H}}}^y \Gammamma(x_n,y), \nu_h(y)} d\sigma(y)=1. \] Noticing that $\Pi \cap B_R(x_0)= \hat B_R( \hat x_0)$ and rearranging terms in \eqref{eq:2ballbound} we get \[ \int_{\hat B_R( \hat x_0)} \escpr{\nabla_{{\mathbb{H}}}^y \Gammamma(x_n,y), \nu (y)} d\sigma(y)= 1- \int_{\partial B_R (x_0) \cap \{x_1 >0\}} \escpr{\nabla_{{\mathbb{H}}}^y \Gammamma(x_n,y), \nu (y)} d\sigma(y). \] Letting $n \to + \infty$ the left hand side of the previous equality converges to \[ 1- \int_{\partial B_R (x_0) \cap \{x_1 >0\}} \escpr{\nabla_{{\mathbb{H}}}^y \Gammamma(x_0,y), \nu (y)} d\sigma(y)=\dfrac{1}{2}, \] since we only consider half of the integral equation \eqref{eq:intbound1}. \end{proof} The operator $K_1$ is totally degenerate while restricted to $\Pi$, so that we can not restrict it to functions defined on $\Pi$; however we can compute the limit from the interior of the set. \begin{proposition}\lambdabel{frompositive} Let $g$ be a Lipschitz compact supported function in $\Pi$ and $x_0$ be a point in $\Pi$. For $x \in {\mathbb{H}}^1 \smallsetminus \Pi$ we consider $$ K_1(g) (x) = \int_{\Pi} k_1(x,y) g(y) d\sigma(y). $$ Then we have \begin{align*} &K_1(g) (x) \to \frac{1}{2} g(x_0) \quad \text{ as } x\to x_0^+,\\ &K_1(g) (x) \to -\frac{1}{2} g(x_0) \quad \text{ as } x\to x_0^-, \end{align*} so that $(K_1)^+=\tfrac{1}{2}\text{Id}$ while restricted to $\Pi$ and $(K_1)^-= - \frac{1}{2} \text{Id}$ while restricted to $\Pi$. \end{proposition} \begin{proof} Let $R>0$ big enough such that $\text{supp}(g) \subset \hat B_R (\hat x_0)$. Let us assume that $x=(x_1, \hat x)$, $x_1>0$ and \begin{align*} K_1(g) (x) =& \int_{\Pi} k_1(x,y) g(y) d\sigma(y) =\int_{\hat B_R (\hat x_0)} k_1(x,y) (g(y) - g(x))d\sigma(y)\\ & + g(x) \int_{\hat B_R (\hat x_0)} k_1(x,y) d\sigma(y). \end{align*} On one hand we have \begin{align*} \left|\int_{\hat B_R (\hat x_0)} k_1(x,y) (g(y) - g(x)) d\sigma(y) \right|&\leqslant L \int_{\hat B_R (\hat x_0)} k_1(x,y) d(y, x)d\sigma(y)\\ &\leqslant L \int_{\hat B_R (\hat x_0)} \sqrt{x_1} d(y, x)^{-5/2}d\sigma(y) \to 0, \end{align*} $\text{ as }x \to x_0$ and where $L$ is the Lipschitz constant of $g$. On the other hand by Lemma \ref{ga} we have \begin{align*} &g(x) \int_{\hat B_R (\hat x_0)} k_1(x,y) d\sigma(y) =g(x)\int_{\hat B_R (\hat x_0)} (k_1(x,y)+ k(x,y) ) d\sigma(y) +\\ & \, -g(x) \int_{\hat B_R (\hat x_0)} k(x,y) d\sigma(y) \xrightarrow[x \to x_0^+] {} \frac{1}{2} g(x_0)- g(x_0) \int_{\hat B_R (\hat x_0)} k(x_0,y) d\sigma(y)=\frac{1}{2} g(x_0) \end{align*} by symmetry of the kernel $k$ restricted to $\Pi$, see Lemma \ref{lm:sypol2}. Finally when $x_1<0$ the kernel $k_1$ defined \eqref{k1} has the same sign of $x_1$ , then $-k_1$ and $-x_1$ are positive and by Lemma \ref{ga} we have \begin{align*} &-g(x) \int_{\hat B_R (\hat x_0)} -k_1(x,y) d\sigma(y) =-g(x)\int_{\hat B_R (\hat x_0)} (-k_1(x,y)+ k(x,y) ) d\sigma(y) +\\ & \quad -g(x) \int_{\hat B_R (\hat x_0)} k(x,y) d\sigma(y) \xrightarrow[(-x_1,x_2,x_3) \to x_0^+] {} -\frac{1}{2} g(x_0). \qedhere \end{align*} \end{proof} \begin{definition} \lambdabel{def:kk} As $x \to x_0^{\pm}$ the kernel $k(x,\hat y)$ defined in \eqref{k} converges to the convolution kernel \begin{equation} \lambdabel{eq:kk} k(\hat x- \hat y )=- \dfrac{4}{\pi} \frac{ (x_2-y_2) \, (x_3 - y_3) } {\Big( \big( x_2-y_2\big)^4 + 16 \big(x_3 - y_3 \big)^2 \Big)^{3/2}}. \end{equation} Thus, if $g$ is a continuous compactly supported function in $\Pi$ the operator $K(g)$ converges to \[ \int_{\Pi} k(\hat x- \hat y ) g(\hat y) d \sigma(y), \] that with an abuse of notation we also denoted by $K(g)$. \end{definition} Hence the analogue of \cite[Theorem 4.4]{MR3600064} in this setting is the following \begin{theorem} Let $g$ be a Lipschitz compacty supported function in $\Pi$ and $x_0$ be a point in $\Pi$. Let $\mathcal{D}(g)$ be the double layer potential defined in \eqref{eq:DgPi}, then the limits of $\mathcal{D}(g)(x)$ when $x$ tends to $x_0^+$ for $x \in \{x_1>0\}$ and when $x$ tends to $x_0^-$ for $x \in \{x_1<0\}$ exist. Moreover the limits verify the following relations \begin{align*} &\lim_{ x \to x_0^+} \mathcal{D}(g)(x)= \tfrac{1}{2} g(x_0)+ Kf(x_0) & \text{if} \quad x \in \{x_1>0\}\\ & \lim_{ x \to x_0^-} \mathcal{D}(g)(x)= -\tfrac{1}{2} g(x_0)+ Kf(x_0) & \text{if} \quad x \in \{x_1<0\}, \end{align*} where $K$ is the operator with convolution kernel $k$ defined in \eqref{eq:kk}. \end{theorem} \begin{proof} By Propositions \ref{frompositive} and Definition \ref{def:kk} we obtain $$\mathcal{D}(g)(x) \to (\frac{1}{2} I +K)(g)(x_0) $$ in the limit from positive values of $x_1$, while $$\mathcal{D}(g)(x) \to (- \frac{1}{2} I +K)(g)(x_0) $$ in the limit from negative values of $x_1$. \end{proof} \section{Invertibility of the double layer potential on the intrinsic plane} \lambdabel{sc:invert} \subsection{The $C^{2,\alpha}$ estimates of $K$} \lambdabel{sc:c2alphaestimete} \begin{definition}[Classical H\"older class $C^{1,\alpha}$] Let $r \in {\mathbb{R}}$, we say that a function $g$ defined on the boundary $\Pi_r=\{x=(r, x_2, x_3)\}$ is of class $C^{1, \alpha}(\Pi_r)$ if and only if $\partial_2 g$ is a continuous function and there exists $C>0$ such that \[ |\partial_{2}g(\hat y) - \partial_{2}g(\hat x)| \le C \tilde{d}(\hat x,\hat y)^{\alpha} \] for each $\hat x=(x_2,x_3)$ and $\hat y=(y_2,y_3)$ in $ \Pi_r$ and where \begin{equation} \lambdabel{eq:distancetilde} \tilde d(\hat x,\hat y)=((x_2-y_2)^4+ 16 (x_3-y_3)^2)^{\tfrac{1}{4}}. \end{equation} \end{definition} \begin{remark} Notice that the induced distance \[ \hat d(\hat x,\hat y)=((x_2-y_2)^4+ 16 (x_3-y_3 - \tfrac{1}{2} r (x_2-y_2))^2)^{\tfrac{1}{4}}. \] on $\Pi_r$, considered in Definition \ref{def:CKalpha}, is different from $\tilde{d}$. They coincide only when $r=0$, i.e. $\Pi_0=\Pi$. \end{remark} In addition, we set $$\|g\|_{1, \alpha} = \|g\|_{1} +\sup_{\hat x, \hat y \in \Pi_r} \frac{|\partial_{2}g(\hat y) - \partial_{2}g(\hat x)|}{\tilde d(\hat x,\hat y)^{\alpha}}$$ where $$\|g\|_{1}=\sup_{\hat x \in \Pi_r} g(\hat x)+ \sup_{\hat x \in \Pi_r} \partial_2 g(\hat x)$$ \begin{definition}[Classical H\"older classes $C^{2,\alpha}$] Let $r \in {\mathbb{R}}$, we say that a function $g$ defined on the boundary $\Pi_r=\{x=(r, x_2, x_3)\}$ is of class $C^{2, \alpha}(\Pi_r)$ if and only if $\partial^2_2 g$ and $\partial_3 g$ are continuous functions and there exists $C>0$ such that \[ |\partial^2_{2}g(\hat y) - \partial^2_{2}g(\hat x)| \le C \tilde{d}(\hat x,\hat y)^{\alpha} \] and \[ |\partial_{3}g(\hat y) - \partial_{3}g(\hat x)| \le C \tilde{d}(\hat x,\hat y)^{\alpha} \] for each $\hat x=(x_2,x_3)$ and $\hat y=(y_2,y_3)$ in $ \subset \Pi_r$. In addition, we set $$\|g\|_{2, \alpha} =\|g\|_{2}+ [\partial_{3}g]_{\alpha}+[\partial_{2}^2g]_{\alpha} $$ where $$[\partial_{3}g]_{\alpha}=\sup_{\hat x, \hat y \in \Pi_r} \frac{|\partial_{3}g(\hat y) - \partial_{3}g(\hat x)|}{\tilde d(\hat x,\hat y)^{\alpha}},$$ $$[\partial_{2}^2 g]_{\alpha}=\sup_{\hat x, \hat y \in \Pi_r} \frac{|\partial_{2}^2g(\hat y) - \partial_{2}^2g(\hat x)|}{\tilde d(\hat x,\hat y)^{\alpha}} $$ and $$\|g\|_{2} = \|g\|_{1}+ \sup_{\hat x \in \Pi_r} |\partial_2^2 g(\hat x)|+ \sup_{\hat x \in \Pi_r} |\partial_3 g(\hat x)|.$$ \end{definition} \begin{proposition} \lambdabel{Pr:C=Gamma} A function $f$ belongs to $C^{2,\alpha}(\Pi_0)$ if and only if $f$ belongs to $\Gammamma^{2,\alpha}(\Pi_0)$, namely for each $\hat x \in \Pi_0$, $\rho>0$ there exists a polynomial $P_{\hat x}(\hat y)=a_{\hat x} + b_{\hat x} v_2+ c_{\hat x} v_2^2+ d_{\hat x} v_3$ with ${\hat v}={\hat y}-{\hat x}$ and $C>0$ such that \begin{equation} \lambdabel{eq:CTE} |f({\hat y})-P_{{\hat x}}({\hat y})|<C\rho^{2+\alpha} \end{equation} for each ${\hat y} \in B_{\rho}({\hat x})$ (see Definition \ref{def:CKalpha}). \end{proposition} \begin{proof} Assume that $f \in C^{2,\alpha}(\Pi_0)$. Let $$P_{\hat x} (\hat y)= f(\hat x)+\partial_2 f(\hat x) (y_2-x_2)+\dfrac{ \partial_2^2 f(\hat x)}{2} (y_2-x_2)^2+ \partial_3 f(\hat x) (y_3-x_3).$$ By the Lagrange mean value theorem for the function $t\to f(y_2, x_3+t(y_3-x_3))$ with $t \in [0,1]$ we get \[ f(y_2,y_3)=f(y_2,x_3)+ \partial_3 f(\xi)(y_3-x_3) \] where $ \xi=(y_2,x_3+ \theta (y_3-x_3))$ for $\theta \in (0,1)$. Moreover, by the Taylor's formula with Lagrange remainder for the function $t\to f(x_2+t(y_2-x_2), x_3)$ with $t \in [0,1]$ we get \[ f(y_2,x_3)=f(x_2,x_3)+ \partial_2 f(\hat x)(y_2-x_2) + \dfrac{\partial_2^2 f(\eta)}{2}(y_2-x_2)^2 \] where $ \eta=(x_2+\theta (y_2-x_2),x_3)$ for $\theta \in (0,1)$. Then we get \begin{align*} f(y_2,y_3)&=f(x_2,x_3)+ \partial_2 f(\hat x)(y_2-x_2) + \dfrac{\partial_2^2 f(\eta)}{2}(y_2-x_2)^2+ \partial_3 f(\xi)(y_3-x_3)\\ &=P_{\hat x} (\hat y)+\dfrac{\partial_2^2 f(\eta)-\partial_2^2 f(\hat x)}{2}(y_2-x_2)^2+ \partial_3 f(\xi)-\partial_3 f(\hat x)(y_3-x_3). \end{align*} Therefore \begin{align*} |f(y_2,y_3)- P_{\hat x} (\hat y)|&\le \dfrac{|\partial_2^2 f(\eta)-\partial_2^2 f(\hat x)|}{2}(y_2-x_2)^2+ |\partial_3 f(\xi)-\partial_3 f(\hat x)| \, |y_3-x_3|\\ &\le C \tilde{d}(\eta,\hat x)^{\alpha} \tilde{d}(\hat y,\hat x)^2+ C\tilde{d}(\xi,\hat x)^{\alpha} \tilde{d}(\hat y,\hat x)^2 \le C \tilde{d}(\hat x, \hat y)^{2+\alpha}. \end{align*} Now, for any fixed $\hat x \in \Pi_0$ and $\rho > 0$, taking $\hat y \in B_{\rho} (\hat x)$, clearly since $ \tilde{d}(\hat x, \hat y)^{2+\alpha} < \rho^{2+\alpha}$ we get \[ |f(\hat y)- P_{\hat x} (\hat y)|< C \rho^{2+\alpha}. \] For the reverse implication we set \[ u_\rho({\hat x})= \frac{u(\delta_{\rho} ({\hat x}))}{\rho^2}, \] where $\delta_\rho(\hat x)=(\rho x_2,\rho^2 x_3)$. Let $\hat x$, $\hat y$ two points at distance $\rho$ apart, by Remark \ref{rk:midpoint} there exists $\hat \xi$ such that $\tilde{d}({\hat x},\hat \xi),\tilde{d}({\hat y},\hat \xi) < \frac{\sqrt{3}}{2} \rho $. Then after a translation of $-\hat \xi$, we have $B_{\rho/2}=B_{\rho/2} (0) \subset B_{\sqrt{3}\rho}(\hat x), B_{\sqrt{3}\rho} (\hat y) $. Let \begin{align*} \|P_{\hat x, \rho/2}- P_{\hat y, \rho/2} \|_{L^{\infty}(B_1)}&\le \|f_{\rho/2}-P_{\hat x, \rho/2}\|_{L^{\infty}(B_1)}+ \|f_{\rho/2}- P_{\hat y, \rho/2} \|_{L^{\infty}(B_1)}\\ &=\frac{4}{\rho^2} \sup_{\hat v \in B_{\rho/2}} |f(\hat v)- P_{\hat x}(\hat v)|+ \frac{4}{\rho^2} \sup_{\hat v \in B_{\rho/2}} |f(\hat v)- P_{\hat y}(\hat v)|\\ &\le \frac{4}{\rho^2} \sup_{\hat v \in B_{\sqrt{3}\rho}(\hat x)} |f(\hat v)- P_{\hat x}(\hat v)|+ \frac{4}{\rho^2} \sup_{\hat v \in B_{\sqrt{3} \rho}(\hat y)} |f(\hat v)- P_{\hat y}(\hat v)|\\ &\le 8 (3)^{1+\alpha/2} C \rho^{\alpha}. \end{align*} Notice that \[ (P_{\hat x, \rho/2} - P_{\hat y, \rho/2}) (\hat v)=\dfrac{4}{\rho^2}[ (a_x-a_y)+ (b_x-b_y) \rho v_2 + (c_x- c_y) \rho^2 v_2^2 +(d_x-d_y) \rho^2 v_3]. \] Then by Lemma \ref{lm:abcd} we get \begin{equation} \lambdabel{eq:cdest} \begin{aligned} |a_{\hat x}-a_{\hat y}| \le 2 (3)^{1+\alpha/2} C \rho^{2+\alpha} \quad &\text{and} \quad |b_{\hat x}-b_{\hat y}| \le 4 (3)^{1+\alpha/2} C \rho^{1+\alpha}\\ |c_{\hat x}-c_{\hat y}| \le 4 (3)^{1+\alpha/2} C \rho^{\alpha} \quad &\text{and} \quad |d_{\hat x}-d_{\hat y}| \le 4 (3)^{1+\alpha/2} C \rho^{\alpha}. \end{aligned} \end{equation} By assumption \eqref{eq:CTE} we easily get that $a_{\hat x}=f({\hat x})$, $f$ is continuous, $\partial_2 f({\hat x})=b_{\hat x}$, $\partial_3 f({\hat x})=d_{\hat x}$. Then by \eqref{eq:cdest} we obtain that $\partial_2 f,\partial_3 f$ are continuous and $\partial_3 f$ is $C^{\alpha}$ . Moreover, setting $e_2=(1,0)$ by \eqref{eq:CTE} we have \[ (f({\hat x}+(h+s)e_2)- f({\hat x}+h e_2))-(f({\hat x}+s e_2)-f({\hat x}))=2c_{\hat x} h s+O(s^{2+\alpha})+O(h^{2+\alpha}). \] Then there exists \[ \lim_{h\to 0} \lim_{s\to 0} \frac{1}{h} \left( \frac{f({\hat x}+(h+s)e_2)- f({\hat x}+h e_2)}{s}-\frac{f({\hat x}+s e_2)-f({\hat x})}{s} \right)=2c_{\hat x}. \] On the other hand, letting $s\to 0$ in the previous limit we gain that \[ \partial_2^2 f({\hat x})=\lim_{h \to 0 } \frac{\partial_2 f({\hat x}+he_2)-\partial_2 f({\hat x})}{h}=2 c_{\hat x}. \] Finally, by \eqref{eq:cdest} we obtain that $|\partial_2^2 f({\hat x})-\partial_2^2 f({\hat y})|\le 8 (3)^{1+\alpha/2} C \tilde{d}({\hat x},{\hat y})^{\alpha}$. \end{proof} \begin{remark} \lambdabel{rk:midpoint} Given two points ${\hat x},{\hat y} \in \Pi$ such that $\rho=\tilde{d}({\hat x},{\hat y})$ then there exists $\hat \xi=(\frac{x_2+y_2}{2}, \frac{x_3+3y_3}{4} )$ such that $\tilde{d}(\hat \xi,{\hat y})=\frac{\rho}{2} <\frac{\sqrt{3}}{2} \rho $ and $\tilde{d}(\hat \xi,{\hat x}) <\frac{\sqrt{3}}{2} \rho$. Moreover if ${\hat x},{\hat y}$ belongs to $B_R({\hat x}_0)$ for ${\hat x}_0 \in \Pi$ then $\hat \xi$ in $B_{2R}({\hat x}_0)$. \end{remark} \begin{lemma} \lambdabel{lm:abcd} Let $\hat v \in \Pi$ and $P(\hat v)=a+b v_2+c v_2^2+dv_3$. Assume that there exists $C>0$ such that $\|P\|_{L^\infty(B_1)} \le C$, then $|a|\le C$ and $|b|,|c|,|d| \le 2C$ \begin{proof} Setting $v_2=v_3=0$ we have $|a|\le C$. Let $\varepsilon>0$, if $v_2=0$, $v_3=1/(1+\varepsilon)$ we get $|a+ \frac{d}{1+\varepsilon}| \le C$, thus $|d|\le 2C(1+\varepsilon)$, letting $\varepsilon \to 0 $ we get $|d|\le 2C$. Setting $v_2=\pm 1/(1+\varepsilon)$, $v_3=0$ we obtain \[ \left|\frac{b}{1+\varepsilon}+ \frac{c}{(1+\varepsilon)^2}\right| \le 2C, \qquad \left|\frac{b}{1+\varepsilon}-\frac{c}{(1+\varepsilon)^2}\right| \le 2C. \] Then we have \begin{align*} \frac{|b|}{1+\varepsilon}\le \frac{1}{2} \left( \left|\frac{b}{1+\varepsilon}+ \frac{c}{(1+\varepsilon)^2}\right|+ \left|\frac{b}{1+\varepsilon}- \frac{c}{(1+\varepsilon)^2}\right|\right) \le 2 C\\ \frac{|c|}{1+\varepsilon}\le \frac{1}{2} \left( \left|\frac{b}{1+\varepsilon}+ \frac{c}{(1+\varepsilon)^2}\right|+ \left|\frac{b}{1+\varepsilon}- \frac{c}{(1+\varepsilon)^2}\right|\right) \le 2 C. \end{align*} Letting $\varepsilon \to 0$ we get the desired inequalities. \end{proof} \end{lemma} \begin{lemma} \lambdabel{lm:sypol2} Let $a_i \in {\mathbb{R}}$ for each $i=1,\ldots,4$, $D_{\hat x} \subset \Pi$ be a set axially symmetric with respect to $y_2=x_2$ and $y_3=x_3$ where $\hat x \in \Pi$. Let $k(\hat x- \hat y)$ be the convolution kernel given by \begin{equation} \lambdabel{eq:convkernel} k(\hat x- \hat y )= \dfrac{4}{\pi} \frac{ (x_2-y_2) \, (x_3 - y_3) } {\Big( \big( x_2-y_2\big)^4 + 16 \big(x_3 - y_3) \big)^2 \Big)^{3/2}}, \end{equation} then we have \begin{equation} \lambdabel{eq:kp0} \int_{D_{\hat x}} k(\hat x- \hat y) p (\hat x- \hat y) \, d \hat y=0, \end{equation} for each polynomial \[ p (\hat x- \hat y)=a_1+ a_2 (\hat x_2- \hat y_2)+a_3 (\hat x_3- \hat y_3)+ a_4(\hat x_2- \hat y_2)^2 \] of degree less than or equal to $2$. \end{lemma} \begin{proof} Changing the variable $\hat y_2'=\hat x_2-\hat y_2$ and $\hat y_3'=\hat x_3-\hat y_3$ we get that \eqref{eq:kp0} is equivalent to \begin{align*} a_1 \int_{D_0} k(\hat y) d \, \hat y+ a_2 \int_{D_0} k(\hat y) \hat y_2 d \, \hat y +a_3 \int_{D_0} k(\hat y) \hat y_3 \,d \hat y + a_4 \int_{D_0} k(\hat y) \hat y_2^2 \,d \hat y=0, \end{align*} since the kernel $k$ is symmetric both in $y_2$ and in $y_3$. We denote by $D_0$ the translation of $D_{\hat x}$ in the origin. \end{proof} \begin{lemma} \lambdabel{lm:coarea} Let $0<\alpha<1$, $\tilde d(\hat x,\hat y)$ be the distance defined in \eqref{eq:distancetilde} and \[ \hat B_r(\hat x)=\Big\{ (y_2,y_3) \in \Pi \ : \ \tilde d(\hat x,\hat y)<r\Big\} \] be the associated metric ball of radius $r$ and center $\hat x=(x_2,x_3)$. Let $k(\hat x- \hat y )$ be the convolution kernel defined in \eqref{eq:convkernel}. Then there exists a constant $C_3$ such that \begin{equation} \lambdabel{eq:intonball} \int_{\hat B_r(\hat x)} |k(\hat x- \hat y )| \tilde{d} (\hat x,\hat y)^{j+\alpha} d \hat y\le C_{2} \, r^{j+\alpha}. \end{equation} When $j \neq 0$ then \eqref{eq:intonball} holds also for $\alpha=0$. \end{lemma} \begin{proof} By Young's inequality there exist a constant $C_1$ such that \[ |x_2-y_2| |x_3-y_3| \le \dfrac{|x_2-y_2|^3}{3} + \dfrac{2 \, |x_3-y_3| ^{\tfrac{3}{2}}}{3} \le C_1 (|x_2-y_2|^4+|x_3-y_3|^2)^{\tfrac{3}{4}}, \] then we get that \[ |k(\hat x- \hat y )| \leqslant C_1 \tilde d (\hat x , \hat y)^{-3}. \] Then we have \begin{equation*} \begin{aligned} \int_{B_r(\hat x)} |k(\hat x- \hat y )| \tilde{d} (\hat x,\hat y)^{j+\alpha} d \hat y &\leqslant C_1 \int_{B_r(\hat x)} \tilde{d} (\hat x,\hat y)^{j+\alpha-3} d \hat y\\ &= C_1 \int_{\hat B_r(0)} \tilde {d} (0,(v_2,v_3))^{j+\alpha-3} d v_2 \, dv_3 \\ &= C_1 \int_0^r s^{j+\alpha-3} \left( \int_{\partial \hat B_s(0)} \dfrac{1}{|\nabla \tilde {d} |} \ d H^{1} \right) ds\\ &= 3 C_1 |\hat B_1(0)| \int_0^r s^{j+\alpha-1} ds= C_2 \ r^{j+\alpha} \end{aligned} \end{equation*} where $|\hat B_1(0)|$ is the $2$-Lebesgue measure of $\hat B_1(0)$ and $H^{1}$ is the $1$-dimensional Hausdorff measure. In the second to last equality we used \begin{equation} \lambdabel{eq:surfacemeasureofball0} 3 r^{2}|\hat B_1(0)|=\int_{\partial \hat B_r(0)} \dfrac{1}{|\nabla \tilde {d} |} \ d H^{1}. \end{equation} Indeed by the coarea formula and using the induced dilations $\hat \delta_r (v_2, v_3)=(r \, v_2, r^2 \, v_3)$ we have \[ r^{3}|\hat B_1(0)|=|\hat \delta_r(\hat B_1(0))|= |\hat B_r(0)|=\int_0^r \left( \int_{\partial \hat B_s(0)} \dfrac{1}{|\nabla \tilde {d} |} \ d H^{1} \right) ds. \] Differentiating this last identity with respect to $r$ we obtain \eqref{eq:surfacemeasureofball0}. \end{proof} \begin{theorem} \lambdabel{th:Kcont} Let $k$ be the kernel defined in \eqref{eq:kk}, we set \[ K(f)( \hat x)=\int_{\Pi} k(\hat x-\hat y) f(\hat y) d \hat y \] for each $\hat x \in \Pi$. Assume that $f \in C^{2,\alpha}(\Pi)$ and $f$ compactly supported in $\Pi$ then there exists a constant $C$ such that \begin{equation} \lambdabel{eq:C2ahe} \| K (f) \|_{C^{2,\alpha}} \le C \| f \|_{C^{2,\alpha}} \end{equation} \end{theorem} \begin{proof} First of all notice that $k(\hat x,\hat y)$ is a convolution kernel, so that we can write $k(\hat x-\hat y)$ and by Lemma \ref{lm:sypol2}, the kernel $k$ satisfies the cancellation condition, thus $K$ is a well-defined singular integral operator. Moreover, by Lemma \ref{lm:sypol2} we have \[ K(f)(\hat x)= \int_{\Pi \smallsetminus \hat B_1(\hat x)} k(\hat x-\hat y) f(\hat y) d \hat y+ \int_{ \hat B_1(\hat x)} k(\hat x-\hat y) (f(\hat y)- f(\hat x)) d \hat y, \] then, letting $B_R$ be a sufficiently large ball that contains the compact support of $f$ and using Lemma \ref{lm:coarea}, we get \begin{align*} |K(f)(\hat x)|&\le \int_{\Pi \smallsetminus \hat B_1(\hat x)} |k(\hat x-\hat y)| |f({\hat y})| d \hat y+ \| \partial_{2}f\|_{\infty} \int_{ \hat B_1(\hat x)} |k(\hat x-\hat y)| \tilde d(\hat x ,\hat y) d \hat y\\ &\le C_1 \int_{\Pi \smallsetminus \hat B_1(\hat x)} |f({\hat y})| d \hat y+ \|f\|_{C^1} \le \tilde{C} \|f\|_{C^1}. \end{align*} Similarly we have \begin{align*} \partial_{2} K(f)({\hat x})&=\int_{B_R\smallsetminus B_{1}({\hat x})} k(\hat x-\hat y) \partial_2 f(\hat y) d\hat y+ \int_{ B_{1} ({\hat x})} k({\hat x}-\hat y) (\partial_2 f(\hat y) -\partial_2f({\hat x}) ) d\hat y,\\ \partial_{3} K(f)({\hat x})&=\int_{B_R\smallsetminus B_{1}({\hat x})} k(\hat x-\hat y) \partial_3 f(\hat y) d\hat y+ \int_{ B_{1} ({\hat x})} k({\hat x}-\hat y) (\partial_3 f(\hat y) -\partial_3f({\hat x}) ) d\hat y,\\ \partial_{2}^2 K(f)({\hat x})&=\int_{B_R\smallsetminus B_{1}({\hat x})} k(\hat x-\hat y) \partial_2^2 f(\hat y) d\hat y+ \int_{ B_{1} ({\hat x})} k({\hat x}-\hat y) (\partial_2^2 f(\hat y) -\partial_2^2f({\hat x}) ) d\hat y, \end{align*} then there exists a constant $\tilde{C}$ such that \begin{align*} |\partial_3K(f)(\hat x)|&\le \| \partial_3 f\|_{\infty} \int_{B_R \smallsetminus \hat B_1(\hat x)} |k(\hat x-\hat y)| d \hat y+ \| f\|_{C^{2,\alpha}} \int_{ \hat B_1(\hat x)} |k(\hat x-\hat y)| \tilde d(\hat x ,\hat y)^{\alpha} d \hat y\\ &\le \tilde{C} \|f\|_{C^{2,\alpha}}, \end{align*} $|\partial_2K(f)(\hat x)| \le \tilde{C} \|f\|_{C^{2}}$ and $|\partial_2^2 K(f)(\hat x)| \le \tilde{C} \|f\|_{C^{2,\alpha}}$. Therefore there exists a constant $\tilde{C}_2$ such that \begin{equation} \lambdabel{eq:C2e} \|K(f)\|_{C^2} \le \tilde{C}_2 \| f \|_{C^{2,\alpha}}. \end{equation} Let ${\hat x}_0$ be a point in $\Pi$. Fix $\hat x$ and $\hat z$ in $B_R=B_R({\hat x}_0)$ and let $\delta=\tilde{d}(\hat x, \hat z)$. A direct computation shows that $ k({\hat x}-{\hat y})= \frac{\partial}{\partial y_3} \ell ({\hat x}-{\hat y}) $ where \[ \ell ({\hat x}-{\hat y})=\frac{1}{4\pi} \frac{(x_2-y_2)}{\Big( \big( x_2-y_2\big)^4 + 16 \big(x_3 - y_3 \big)^2 \Big)^{1/2}}. \] Notice that there exist a constant $C_3$ such that \begin{equation} \lambdabel{eq:dervker} |\ell ({\hat x}-{\hat y})|\le C_3 \tilde{d}({\hat x},{\hat y})^{-1}, \, |\partial_2\ell ({\hat x}-{\hat y})|\le C_3 \tilde{d}({\hat x},{\hat y})^{-2}, \, |\partial_3\ell ({\hat x}-{\hat y})|\le C_3 \tilde{d}({\hat x},{\hat y})^{-2} \end{equation} Following \cite[Lemma 4.4]{GT}, for each ${\hat x}, {\hat z} \in B_R({\hat x}_0)$ we have \[ \partial_2^2 K(f) ({\hat x})= \int_{B_{8R}} k({\hat x}-{\hat y}) (\partial_2^2 f({\hat y})-\partial_2^2 f({\hat x})) d {\hat y} + \partial_2^2 f({\hat x})\int_{\partial B_{8R}} \ell ({\hat x}-{\hat y}) \nu_3 d \sigma({\hat y}), \] and \[ \partial_2^2 K(f) ({\hat z})= \int_{B_{8R}} k({\hat z}-{\hat y}) (\partial_2^2 f({\hat y})-\partial_2^2 f({\hat z})) d {\hat y} + \partial_2^2 f({\hat z})\int_{\partial B_{8R}} \ell ({\hat z}-{\hat y}) \nu_3 d \sigma({\hat y}), \] where $\nu=(\nu_2,\nu_3)$ is the unit normal to $\partial B_{8R}$. Writing $\delta=\tilde{d}(\hat x, \hat z)$ and letting $\hat \xi$ be the point given by Remark \ref{rk:midpoint} we obtain \begin{align*} \partial_2^2 K(f) ({\hat x})-\partial_2^2 K(f) ({\hat z})=& \partial_2^2 f({\hat x})I_1+(\partial_2^2 f({\hat x})-\partial_2^2 f({\hat z}))I_2+I_3 + I_4\\ &+(\partial_2^2 f({\hat z})-\partial_2^2 f({\hat x}))I_5 + I_6, \end{align*} where \begin{align*} I_1&= \int_{\partial B_{8R}} (\ell ({\hat x}-{\hat y}) -\ell ({\hat z}-{\hat y}) ) \nu_3 d \sigma({\hat y})\\ I_2&= \int_{\partial B_{8R}} \ell ({\hat z}-{\hat y}) \nu_3 d \sigma({\hat y})\\ I_3&= \int_{ B_{\sqrt{3} \delta}(\hat \xi)} k({\hat x}-{\hat y}) (\partial_2^2 f({\hat y})-\partial_2^2 f({\hat x})) d {\hat y}\\ I_4&=\int_{ B_{\sqrt{3}\delta}(\hat \xi)} k({\hat z}-{\hat y}) (\partial_2^2 f({\hat z})-\partial_2^2 f({\hat y})) d {\hat y}\\ I_5&=\int_{ B_{8R}\smallsetminus B_{\sqrt{3}\delta}(\hat \xi)} k({\hat x},{\hat y}) d {\hat y}\\ I_6&=\int_{ B_{8R}\smallsetminus B_{\sqrt{3}\delta}(\hat \xi)} (k({\hat x},{\hat y}) - k({\hat z},{\hat y}) )(\partial_2^2 f({\hat y})-\partial_2^2 f({\hat z})) ) d {\hat y} \end{align*} Let $\gamma(t)=(r \sqrt{\cos(t)},\frac{r^2 \sin(t)}{4})$ for $t \in (-\frac{\pi}{2}, \frac{\pi}{2})$ be a parametrization of $\partial B_r$, where $\|v\|=\tilde{d}(0,v)$. Then we have \begin{equation} \lambdabel{eq:partialBr} H^1(\partial B_r)=2 \int_{-\frac{\pi}{2}}^{\frac{\pi}{2}} \| \dot{\gamma}(t)\| dt= \frac{r}{2} \int_{-\frac{\pi}{2}}^{\frac{\pi}{2}} \frac{ \sqrt{\sin(t)^4+16 \cos(t)^4}}{\cos(t)} dt= r H^1(\partial B_1). \end{equation} Let $\hat \eta=(x_2+\theta_1 (z_2-x_2),x_3) $ and $\hat \zeta=(z_2,x_3+ \theta_2 (z_3-x_3)) $ for $\theta_1, \theta_2 \in (0,1)$. Then there exist constants $C_4,\ldots,C_9$ independent of $R,\delta$ such that \begin{align*} |I_1| \le& \tilde{d}({\hat x},{\hat z}) \int_{\partial B_{2R}} |\partial_2\ell(\hat \eta-{\hat y})| d \sigma( {\hat y})+ \tilde{d}({\hat x},{\hat z})^2 \int_{\partial B_{2R}} |\partial_3\ell(\hat \zeta-{\hat y})| d \sigma( {\hat y})\\ \le & C_4 \tilde{d}({\hat x},{\hat z})^{\alpha} R^{-\alpha}=C_4 \left(\frac{\delta}{R}\right)^{\alpha} \qquad(\text{by equation} \, \eqref{eq:dervker} \, \text{and} \, \eqref{eq:partialBr} ). \\ |I_2| \le &C_4.\\ |I_3| \le & \int_{ B_{\sqrt{3}\delta}(\hat \xi)} |k({\hat x}-{\hat y})| |\partial_2^2 f({\hat y})-\partial_2^2 f({\hat x})| d {\hat y} \le C_1 [\partial_2^2 f]_{\alpha} \int_{ B_{9\delta/2}({\hat x})} |k({\hat x}-{\hat y})|\tilde{d}({\hat x}, {\hat y})^{\alpha} \ dy.\\ &\le C_4 [\partial_2^2 f]_{\alpha} \delta^{\alpha} \qquad \text{(by Lemma \ref{lm:coarea}) }\\ |I_4| \le& C_4 [\partial_2^2 f]_{\alpha} \delta^{\alpha} \qquad \text{as in the estimation of } \ I_3\\ |I_5| =& \left| \int_{\partial (B_{8R}- B_{\sqrt{3}\delta}(\hat \xi))} \ell({\hat x}-{\hat y}) \nu_3 d \sigma({\hat y}) \right|\\ \le& \left| \int_{\partial B_{8R}} \ell({\hat x}-{\hat y}) \nu_3 d \sigma({\hat y}) \right|+\left| \int_{\partial B_{\sqrt{3} \delta}(\hat \xi)} \ell({\hat x}-{\hat y}) \nu_3 d \sigma({\hat y}) \right| \le C_5, \end{align*} thanks to equations \eqref{eq:dervker} and\eqref{eq:partialBr}. Moreover, we have \begin{align*} |I_6| \le & C_6 \delta \int_{ B_{8R}\smallsetminus B_{\sqrt{3}\delta}(\hat \xi)} |\partial_2 k(\hat \eta-{\hat y})| \, |(\partial_2^2 f({\hat y})-\partial_2^2 f({\hat z})) | d {\hat y}\\ & \quad + \delta^2\int_{ B_{8R}\smallsetminus B_{\sqrt{3}\delta}(\hat \xi)} |\partial_3 k(\hat \zeta-{\hat y})| \, |(\partial_2^2 f({\hat y})-\partial_2^2 f({\hat z})) | d {\hat y}\\ \le& C_7 [\partial_2^2 f]_{\alpha} \left( \delta \int_{\tilde{d}({\hat y},\hat \xi)\ge \sqrt{3} \delta} \frac{\tilde{d} ({\hat y},{\hat z})^{\alpha}}{\tilde{d}({\hat y}, \hat \eta)^4} d {\hat y}+ \delta^2 \int_{\tilde{d}({\hat y},\hat \xi)\ge \sqrt{3} \delta} \frac{\tilde{d} ({\hat y},{\hat z})^{\alpha}}{\tilde{d}({\hat y}, \hat \zeta)^5} d {\hat y} \right)\\ \le& C_8 [\partial_2^2 f]_{\alpha} \left( \delta \int_{\tilde{d}({\hat y},\hat \xi)\ge \sqrt{3} \delta} \frac{\tilde{d} ({\hat y},\hat \xi)^{\alpha}}{\tilde{d}({\hat y}, \hat \xi)^4} d {\hat y}+ \delta^2 \int_{\tilde{d}({\hat y},\hat \xi)\ge \sqrt{3} \delta} \frac{\tilde{d} ({\hat y},\hat \xi)^{\alpha}}{\tilde{d}({\hat y}, \hat \xi)^5} d {\hat y} \right)\\ &(\text{Since by Remark \ref{rk:midpoint}} \quad \tilde{d}({\hat y},{\hat z}) \le(1+\tfrac{\sqrt{3}}{2}) \tilde{d}({\hat y},\hat \xi), (1- \tfrac{\sqrt{3}}{2}) \tilde{d}(\hat \xi,{\hat y}) \le \tilde{d}(\hat \eta ,{\hat y}) \\ & \qquad \, \text{and} \ (1- \tfrac{\sqrt{3}}{2}) \tilde{d}(\hat \xi,{\hat y}) \le \tilde{d}({\hat y} ,\hat \zeta) )\\ \le& C_9 [\partial_2^2 f]_{\alpha} \left( \delta \int_{\sqrt{3}\delta}^{+\infty} s^{\alpha-2} \ ds+ \delta^2 \int_{\sqrt{3}\delta}^{+\infty} s^{\alpha-3} \ ds \right)= C_9[\partial_2^2 f]_{\alpha} \delta^{\alpha}, \end{align*} where in the last inequality we used the coarea formula and the equation \eqref{eq:surfacemeasureofball0}. Collecting terms we gain \begin{equation} \lambdabel{eq:he22} |\partial_2^2 K(f) ({\hat x})-\partial_2^2 K(f) ({\hat z})| \le C_{10} \left( R^{-\alpha} \|\partial_2^2 f\|_{\infty}+[\partial_2^2 f]_{\alpha} \right) \tilde{d}({\hat x},{\hat z})^{\alpha}. \end{equation} Then choosing ${\hat x}_0={\hat x}$ and $R=1$ we have $B_R({\hat x}_0)=B_1({\hat x})$, then by equations \eqref{eq:he22} we get \begin{equation} \lambdabel{eq:he222} \begin{aligned} \frac{|\partial_2^2 K(f) ({\hat x})-\partial_2^2 K(f) ({\hat z})|}{\tilde{d}({\hat x},{\hat z})^{\alpha}} &\le C_{11} (\|\partial_2^2 f\|_{\infty}+[\partial_2^2 f]_{\alpha})+ 2 \|\partial_2^2 K(f)\|_{\infty}\\ \text{(by eq. \eqref{eq:C2e}) }& \le C_{11} (\|\partial_2^2 f\|_{\infty}+[\partial_2^2 f]_{\alpha})+ \tilde{C}_2 \| f\|_{C^{2,\alpha}}\\ &\le C_{12} \| f\|_{C^{2,\alpha}} \end{aligned} \end{equation} Reasoning in the same way as above, we get \begin{equation} \lambdabel{eq:he3} \frac{|\partial_3 K(f) ({\hat x})-\partial_3 K(f) ({\hat z})|}{\tilde{d}({\hat x},{\hat z})^{\alpha}} \le C_{13} \| f\|_{C^{2,\alpha}} \end{equation} Putting together equations \eqref{eq:C2e}, \eqref{eq:he222} and \eqref{eq:he3} we get the desired estimates \eqref{eq:C2ahe}. \end{proof} \subsection{The reflection technique for singular integrals} \begin{definition} Let $g$ be a function on $\Pi$ and $x \in {\mathbb{H}}^1 \smallsetminus \Pi$. We set $$ \tilde K_1(g) (x) = \int_{\Pi} \tilde k_1(x,\hat y) g(\hat y) d\sigma(\hat y), \qquad \tilde K(g) (x) = \int_{\Pi} \tilde k(x,\hat y) g(\hat y) d\sigma(\hat y) $$ where \begin{equation} \lambdabel{eq:tidek1} \tilde k_1(x,y)= \dfrac{1}{\pi} \frac{\big( x_1^2 + (x_2-y_2)^2\big) x_1 } {\Big( \big( x_1^2 + (x_2-y_2)^2\big)^2 + 16 \big(x_3 - y_3 \big)^2 \Big)^{3/2}} \end{equation} and \begin{equation} \lambdabel{eq:tidek} \tilde k(x,y)= \dfrac{1}{\pi} \frac{(x_2-y_2)(x_3 - y_3 ) } {\Big( \big( x_1^2 + (x_2-y_2)^2\big)^2 + 16 \big(x_3 - y_3 \big)^2 \Big)^{3/2}}, \end{equation} \end{definition} \begin{remark} \lambdabel{rk:tiledek} Notice that $\tilde k(x,y)$ defined in \eqref{eq:tidek} converges to the convolution kernel $k(\hat x-\hat y)$ defined in \eqref{eq:kk} as one approaches the boundary. \end{remark} \begin{lemma}\lambdabel{tildelimit} Let $g$ be a Lipschitz compactly supported function in $\Pi$ and $x_0$ be a point in $\Pi$. For $x \in {\mathbb{H}}^1 \smallsetminus \Pi$ we consider $$ \tilde{K}_1(g) (x) = \int_{\Pi} \tilde{k}_1(x,y) g(y) d\sigma(y), $$ where $\tilde{k}_1$ is defined in \eqref{eq:tidek1}. Then we have \begin{align*} &\tilde{K}_1(g) (x) \to \frac{1}{2} g(x_0) \quad \text{ as } x\to x_0^+,\\ &\tilde{K}_1(g) (x) \to -\frac{1}{2} g(x_0) \quad \text{ as } x\to x_0^-, \end{align*} so that $(K_1)^+=\tfrac{1}{2}\text{Id}$ while restricted to $\Pi$ and $(K_1)^-= - \frac{1}{2} \text{Id}$ while restricted to $\Pi$. \end{lemma} \begin{proof} By Proposition \ref{frompositive} $K_1 (g) (x)$ converges to $\pm \tfrac{1}{2} g$ and since \[ |K_1 (g) (x)- \tilde{K}_1(g)(x)| \leqslant \sup_{\hat x, \hat y} \left|1- \tfrac{\Big( \big( x_1^2 + (x_2-y_2)^2\big)^2 + 16 \big(x_3 - y_3 + \frac{1}{2}y_{2}x_1 \big)^2 \Big)^{3/2}}{\Big( \big( x_1^2 + (x_2-y_2)^2\big)^2 + 16 \big(x_3 - y_3 \big)^2 \Big)^{3/2}} \right| K_1(g)(x) \] we have $K_1 (g) (x)- \tilde{K}_1(g)(x)$ goes to zero when $x_1$ tends to $0$. Then also $\tilde{K}_1(g)(x)$ converges to $\tfrac{1}{2} g$ when $x_1\to0^+$ and $\tilde{K}_1(g)(x)$ converges to $-\tfrac{1}{2} g$ when $x_1\to0^-$ . \end{proof} Given $r \in {\mathbb{R}}$, let us denote $\Pi_r=\{x=(r, x_2, x_3)\}$. We consider the $C^{2, \alpha}(\Pi_r)$ norm with respect to the distance $\tilde{d}$ as we did in Section \ref{sc:c2alphaestimete}. This choice allows us to completely decouple variables and we have \begin{proposition}\lambdabel{IKnorm} Let $\Pi=\{ x_1=0\}$ and $K$ the singular operator defined by the kernel $k$, see \eqref{k}. Then we have $$||(-\frac{1}{2}I + K)(g)||_{C^{2, \alpha}(\Pi)} = ||(\frac{1}{2}I + K)(g)||_{C^{2, \alpha}(\Pi)}.$$ \end{proposition} \begin{proof} Since the $C^{2,\alpha}$ norm on $\Pi_r$ with respect to the distance $\tilde{d}$ are independent on $r$, we have $$||(\tilde K_1 + \tilde K)(g)(- \cdot, \cdot, \cdot)||_{C^{2, \alpha}(\Pi_{-r})} = ||(\tilde K_1 + \tilde K)(g)||_{C^{2, \alpha}(\Pi_r)}.$$ Letting $r$ to 0, and applying Lemma \ref{tildelimit} and Remark \ref{rk:tiledek} we get the thesis. \end{proof} \subsection{The method of continuity} \lambdabel{sc:methodcontinuity} Given $t \in [0,1]$ and $K$ the singular operator with kernel $k$ defined in \eqref{k}, we set \[ T_t=\frac{1}{2} I + t K. \] Notice that $T_1=\tfrac{1}{2}I + K$. Let us consider the set \[ A=\{ t \in [0,1] \ : \ T_t \ \text{is invertible on} \ C^{2, \alpha}(\Pi)\} \] First of all we notice that $A \ne \emptyset$ since $T_0=\frac{1}{2}I$ is invertible. By Theorem \ref{th:Kcont} we have that $T_t: C^{2, \alpha}(\Pi) \to C^{2, \alpha}(\Pi)$ is continuous, namely there exists a constant $C$ such that \begin{equation} \lambdabel{eq:upperTt} \| T_t(g)\|_{C^{2, \alpha}(\Pi)} \le C \|g\|_{C^{2, \alpha}(\Pi)}. \end{equation} \begin{proposition} \lambdabel{pr:contmeth} With the previous notations, it holds that \begin{equation} \lambdabel{eq:lowerTt} \|g\|_{C^{2, \alpha}(\Pi)} \le 2 \| T_t(g)\|_{C^{2, \alpha}(\Pi)} \end{equation} \end{proposition} \begin{proof} Clearly we have \[ g=\left(\frac{1}{2}I + tK\right) g + \left(\frac{1}{2}I - tK\right)g. \] Then by Proposition \ref{IKnorm} we gain \begin{align*} \|g \|_{C^{2,\alpha}(\Pi)} &\le \| (\tfrac{1}{2}I + t K) g\|_{C^{2,\alpha}(\Pi)} + \| (\tfrac{1}{2}I - tK) g\|_{C^{2,\alpha}(\Pi)}\\ &= \| (\tfrac{1}{2}I + t K) g\|_{C^{2,\alpha}(\Pi)} + \| (-\tfrac{1}{2}I + tK) g\|_{C^{2,\alpha}(\Pi)}\\ &= 2\| (\tfrac{1}{2}I + t K) g\|_{C^{2,\alpha}(\Pi)}. \qedhere \end{align*} \end{proof} By the estimates \eqref{eq:upperTt}, \eqref{eq:lowerTt} and the contraction mapping principle it follows that $A$ is both open and closed. Hence $A=[0,1]$ and $T_1=\tfrac{1}{2} I + K $ is invertible from $C^{2,\alpha}(\Pi)$ to $C^{2,\alpha}(\Pi)$. \section{The Poisson kernel and Schauder estimates} \lambdabel{sc:Schest} In this section we will show the Schauder estimates. First of all we consider the flat case as follows \begin{theorem} \lambdabel{c:schauderGroups} Let $\Omegaega \subset {\mathbb{H}}^1$ be a bounded domain such that $\Omegaega \subset \{x_1>0\}$. Let $\bar x\in \partial \Omegaega $ and assume that there exists an open neighborhood $V$ of $\bar x $ such that $ V \cap \partial \Omegaega \subset \{x_1=0\}.$ Assume that $f \in C^\alpha(\bar \Omegaega)$ and $g \in \Gammamma^{2, \alpha} _0(\partial \Omegaega \cap V)$ and $0<\alpha<1$. Denote $u$ the unique solution to $$\Delta_{\mathbb{H}} u=f\; \text{in}\ \Omegaega, \quad u= g \text{ on }\, \partial \Omegaega.$$ Then \begin{equation} \lambdabel{stime} \|u\|_{C^{2, \alpha}(\bar \Omegaega)} \leqslant C (\|g\|_{ \Gammamma^{2, \alpha} (\partial \Omegaega)} + \|f\|_{C^\alpha(\bar \Omegaega)}).\end{equation} \end{theorem} \begin{proof} Let $\mathcal{D}$ be the double layer potential on $\Pi$ defined in \eqref{eq:DgPi} and $K$ be the operator with convolution kernel $k(\hat x-\hat y)$ defined in \eqref{eq:kk}. Therefore we define $$\mathcal{P}_0(g)(x) = \mathcal{D} ( (\tfrac{1}{2}I+K)^{-1}g) (x)=\int_{\Pi} ((\tfrac{1}{2}I+K)^{-1}g)(\hat y) (k_1(x,\hat y)+ k(x,\hat y)) d\hat y.$$ Then $u=\mathcal{P}_0(g)$ satisfies \begin{equation} \begin{cases} \Delta_{\mathbb{H}} u=0 & \text{in} \quad \Omegaega \\ u=g & \text{on} \quad \{x_1=0\}, \end{cases} \lambdabel{P} \end{equation} since $\Omegaega \subset \{x_1>0\}$. For each $g \in \Gammamma^{2, \alpha} _0(\partial \Omegaega \cap V)$ we set $$K_{\partial \Omegaega} (g)=I(g)- \mathcal{P}_0(g).$$ If we choose $V_0 $ such that $supp (g) \subset \subset V_0\subset \subset V$, then $K_{\partial \Omegaega} (g)(x) = 0 $ for every $x \in \partial \Omegaega \cap V$. On the other side $g(x)=0$ for $ x \in \partial \Omegaega \smallsetminus V_0$, thus we have \begin{align*} K_{\partial \Omegaega} (g)( x) &= \mathcal{P}_0(g) (x)= \mathcal{D} ( (\tfrac{1}{2}I+K)^{-1}g) (x)\\ &=\int_{\Pi} ((\tfrac{1}{2}I+K)^{-1}g)(\hat y) (k_1(x,\hat y)+ k(x,\hat y)) d\hat y. \end{align*} then the kernel defining $K_{\partial \Omegaega}$ has no singularities when $ x \in\partial \Omegaega \smallsetminus \{x_1=0\}$. As a consequence $K_{\partial \Omegaega}$ is a compact operator, and $I-K_{\partial \Omegaega}$ can be explicitly inverted. Then we set \begin{equation} \lambdabel{eq:Pg} \mathcal{P}(g) = \mathcal{P}_0((I-K_{\partial \Omegaega})^{-1} g)= \mathcal{D} ( (\tfrac{1}{2}I+K)^{-1}(I-K_{\partial \Omegaega})^{-1}g) \end{equation} then $\mathcal{P}(g)$ is a Poisson kernel such that $u=\mathcal{P}(g)$ satisfies \begin{equation} \begin{cases} \Delta_{\mathbb{H}} u=0 & \text{in} \quad \Omegaega \\ u=g & \text{on} \quad \partial \Omegaega. \end{cases} \lambdabel{P} \end{equation} Since $\mathcal{D}$ is continuous from $C^{2,\alpha}(\Pi)$ into $C^{2,\alpha}(\{x_1>0\})$ (see for instance \cite[Main Lemma 13.12]{GreinerStein} or \cite{NagelStein79}), $(\tfrac{1}{2}I+K)^{-1}$ is continuous from $C^{2,\alpha}(\Pi)$ into $C^{2,\alpha}(\Pi)$ thanks to Section \ref{sc:methodcontinuity}, {$C^{2,\alpha}(\Pi)$ coincides with $\Gammamma^{2,\alpha}(\Pi)$ by Proposition \ref{Pr:C=Gamma} and $(I-K_{\partial \Omegaega})^{-1}$ is continuous from $\Gammamma^{2,\alpha}(\partial \Omegaega)$ into $\Gammamma^{2,\alpha}(\Pi)$} we get $u=\mathcal{P}(g)$ defined in \eqref{eq:Pg} verifies \[ \| u\|_{C^{2,\alpha}(\Omegaega)}\le C \|g\|_{\Gammamma^{2,\alpha}(\partial \Omegaega)}. \] On the other hand by the interior estimates (see for instance \cite{MR1135924}) we have that the solution $v$ of \begin{equation} \begin{cases} \Delta_{\mathbb{H}} v=f & \text{in} \quad \Omegaega \\ v=0 & \text{on} \quad \partial \Omegaega. \end{cases} \lambdabel{L} \end{equation} verifies \[ \| v\|_{C^{2,\alpha}(\Omegaega)}\le \tilde{C} \|f\|_{C^{\alpha}( \Omegaega)}. \] Hence considering the function $u+v$ instead of $u$ we get the thesis. \end{proof} Therefore we have solved the problem assuming that the boundary is locally a plane. Now we have to flatten the boundary and extend the result to general boundaries. \begin{theorem} \lambdabel{c:schauder2} Let $\Omegaega \subset {\mathbb{H}}^1$ be a bounded domain and $u$ is the unique solution to $$\Delta_{\mathbb{H}} u=f\; \text{in}\ \Omegaega, \quad u= g \text{ on }\, \partial \Omegaega, $$ where $f \in C^\alpha(\bar \Omegaega)$ and $g \in \Gammamma^{2, \alpha} (\partial \Omegaega)$ and $0<\alpha<1$. Let $\bar x\in \partial\Omegaega $ be a non-charateristic point, $V\subset {\mathbb{H}}^1$ be an open neighborhood of $\bar x $ without charateristic points and $\phi\in C^\infty_0(V)$ be a bump function equal to $1$ in neighborhood $V_0 \subset \subset V$ of $\hat x$. Then we have $\phi u \in C^{2, \alpha}(\bar \Omegaega\cap V)$ and \begin{equation} \lambdabel{stime} \|\phi u\|_{C^{2, \alpha}(\bar \Omegaega\cap V)} \leqslant C (\|g\|_{\Gammamma^{2, \alpha} (\partial \Omegaega)} + \|f\|_{C^\alpha(\bar \Omegaega)}).\end{equation} \end{theorem} \begin{proof} Let us denote by $\Omegaega$ a smooth, open bounded set in ${\mathbb{H}}^1$ and let $0\in \partial \Omegaega$ be a non characteristic point. The boundary of $\Omegaega$ can be identified in a neighborhood $V$ with the graph of a regular function $w$, defined on a neighborhood $\hat V=V\cap {\mathbb{R}}^{2}$ of $0$: $$\partial \Omegaega \cap V= \{(w(\hat s), \hat s): \hat s\in \hat V \}.$$ We can as well assume that $w(0) = 0$, $\nabla w=0$. This implies that \begin{equation}\lambdabel{tuttoqui} w(\hat s) = O(|\hat s|^2) \end{equation} as $\hat s \to 0$. On the set $V$ the function $\Xi(s_1, \hat s) = (s_1 - w(\hat s), \hat s) $ is a diffeomorphism. It sends $\partial \Omegaega\cap V$ to a subset of the plane $\{x_1 =0\}$: $$\Xi(\partial \Omegaega \cap V) =\{(x_1, \hat x): x_1 =0\}= \Pi_{\Xi}.$$ Moreover, we have \begin{equation}\lambdabel{ohscusa}\Delta_\Xi = d\Xi(\Delta_{{\mathbb{H}}^1}), \end{equation} with fundamental solution $$ \Gammamma_\Xi(x) = \Gammamma(x_1 + w(\hat x), \hat x).$$ For $x_1$ small enough we have $$ \Gammamma_\Xi(x_1, \hat x) = \Gammamma(x_1+w(\hat x), \hat x) = \Gammamma(x_1, \hat x) + R(x_1, \hat x), $$ where $$ R(x_1,\hat x)=w(\hat x) \nabla \Gammamma( x_1 +t w(\hat x), \hat x) $$ for some $t \in (0,1)$. Furthermore we have that $$X_{1,\Xi}^x=d\Xi(X_1^x)=\partial_{x_1}-\tfrac{x_2}{2} \partial_{x_3} w(\hat x) \partial_{x_1}-\tfrac{x_2}{2} \partial_{x_3}.$$ Notice that $\Gammamma$ is a rational function that goes as $d^{-Q+2}$, its first derivatives go as $d^{-Q+1}$ and its second derivatives go as $d^{-Q}$. On the other side the function $w(\hat x)$ has a 0 of order 2 thus $w(\hat x)$ goes as $d^{2}$. Then we have \[ X_{1,\Xi}^y \Gammamma_\Xi(0, \hat y)=X_1^y \Gammamma (0,\hat y)+ \hat R(0,\hat y), \] where \[ \hat R(0,\hat y)=X_1^y R(0,\hat y)-\tfrac{y_2}{2} \partial_{y_3} w(\hat y) \partial_{y_1} R(0,\hat y) -\tfrac{y_2}{2} \partial_{y_3} w(\hat y) \partial_{y_1} \Gammamma(0,\hat y) \] that goes as \[ |\hat R(0,\hat x)| \le \hat d^{-Q+2}(0,\hat x), \] where $\hat d$ is the induce distance. Therefore the operator $K_{\hat R}$ with kernel $\hat R$ is compact since the homogenous dimension of the boundary is $Q-1$. Therefore also thanks to the left-invariance of the distance and of the fundamental solution we get that the double layer potential \[ \mathcal{D}(\phi g)(x)= \int _{\Pi_{\Xi}} X_{1,\Xi}^y \Gammamma_\Xi(x, \hat y) ( \phi g)(\hat y) d \hat y= K_1(\phi g)(x)+K(\phi g)(x)+K_{\hat R}(\phi g)(x) \] converges to $(\frac{1}{2} I +K + K_{\hat R})(\phi g)(x_0) $ in the limit $x \to x_0^+$ from positive values of $x_1$. In the previous equation $K_1$ and $K$ are the operator defined in Proposition \ref{prop:K1Kplane}, $x_0=(0,\hat x_0) \in \hat V$. With an abuse of notation we denote in the same way the function $\phi g$ compactly supported in $V \cap \partial \Omegaega$ and the function $\phi g \circ \Xi^{-1}$ compactly supported on $\Pi_{\Xi}$. Since $\frac{1}{2} I +K$ is invertible, $$(\frac{1}{2} I +K + K_{\hat R})(x_0) =(\frac{1}{2} I +K )(I + (\frac{1}{2} I +K )^{-1}K_{\hat R})(x_0) $$ Since $K_{\hat R}$ is compact and $(\frac{1}{2} I +K )^{-1}$ is bounded, then $(\frac{1}{2} I +K )^{-1}K_{\hat R}$ is compact, so that $(I + (\frac{1}{2} I +K )^{-1}K_{\hat R})$ is invertible, thus $\frac{1}{2} I +K + K_{\hat R}$ is invertible. Therefore we define $$\mathcal{P}(\phi g)(x) = \mathcal{D} ( (\tfrac{1}{2}I+K+K_{\hat R})^{-1} \phi g) (x).$$ Then $u=\mathcal{P}(\phi g)$ satisfies \begin{equation} \begin{cases} \Delta_{\Xi} u=0 & \text{in} \quad \{x_1>0\} \\ u=\phi g & \text{on} \quad \{x_1=0\}. \end{cases} \lambdabel{PP} \end{equation} In particular, since $\phi=1$ on $V_0$ we have that $\phi u=\phi \mathcal{P}(\phi g)$ solves \begin{equation*} \begin{cases} \Delta_{\Xi} (\phi u) =0 & \text{in} \quad \Xi^{-1}(V_0 \cap \Omegaega) \\ \phi u=\phi g & \text{on} \quad \Xi^{-1}(V_0 \cap \partial \Omegaega), \end{cases} \end{equation*} thus changing variables and noticing that $\phi=1$ on $V_0$ we gain that $\mathcal{P}(\phi g)\circ \Xi$ solves \begin{equation} \begin{cases} \Delta_{{\mathbb{H}}^1}u=0 & \text{in} \quad V_0 \cap \Omegaega \\ u= g & \text{on} \quad V_0 \cap \partial \Omegaega. \end{cases} \lambdabel{PP} \end{equation} Since $\mathcal{D}$ is continuous from $C^{2,\alpha}(\Pi_{\Xi})$ into $C^{2,\alpha}(\{x_1>0\})$ (see for example \cite[Main Lemma 13.12]{GreinerStein} or \cite{NagelStein79}), $(\tfrac{1}{2}I+K+K_{\hat R})^{-1}$ is continuous form $C^{2,\alpha}(\Pi_{\Xi})$ into $C^{2,\alpha}(\Pi_{\Xi})$, { $C^{2,\alpha}(\Pi_{\Xi})$ coincides with $\Gammamma^{2,\alpha}(\Pi_{\Xi})$ by Proposition \ref{Pr:C=Gamma} } and $\Xi$ is a smooth diffeomorphism we get that $\phi u=\phi \mathcal{P}(\phi g)\circ \Xi$ verifies \[ \| \phi u\|_{C^{2,\alpha}(V \cap \bar{\Omegaega})}\le C \|\phi g\|_{\Gammamma^{2,\alpha}(\partial \Omegaega)} \le C \|g\|_{\Gammamma^{2,\alpha}(\partial \Omegaega)}. \] On the other hand by the interior estimates (see for instance \cite{MR1135924}) we have that the solution $v$ of \begin{equation} \begin{cases} \Delta_{\mathbb{H}} v=f & \text{in} \quad \Omegaega \\ v=0 & \text{on} \quad \partial \Omegaega. \end{cases} \lambdabel{L} \end{equation} verifies \[ \| v\|_{C^{2,\alpha}(\bar{\Omegaega})} \le \tilde{C} \|f\|_{C^{\alpha}( \bar{\Omegaega})}. \] Hence considering the function $u+v$ instead of $u$ we get the thesis. \end{proof} \begin{corollary} \lambdabel{c:schauder3} Let $\Omegaega \subset {\mathbb{H}}^1$ be a bounded domain without characteristic points and let $u$ is the unique solution to $$\Delta_{\mathbb{H}} u=f\; \text{in}\ \Omegaega, \quad u= g \text{ on }\, \partial \Omegaega, $$ where $f \in C^\alpha(\bar \Omegaega)$ and $g \in \Gammamma^{2, \alpha} (\partial \Omegaega)$ and $0<\alpha<1$. We have \begin{equation} \lambdabel{stime} \|u\|_{C^{2, \alpha}(\bar \Omegaega)} \leqslant C (\|g\|_{\Gammamma^{2, \alpha} (\partial \Omegaega)} + \|f\|_{C^\alpha(\bar \Omegaega)}).\end{equation} \end{corollary} \begin{proof} We can cover the boundary by a finite number of balls $\{B_i\}_{i=1,\ldots,N}$ and an associated partition of the unity $\phi_1, \ldots,\phi_N$. Then on each ball $B_i$ we have that $\phi_i g$ is compactly supported in $B_i$, then by Theorem \ref{c:schauder2} we get \[ \| \phi_i u\|_{C^{2,\alpha}(B_i \cap \bar{\Omegaega})} \le C \|g\|_{C^{2,\alpha}(\partial \Omegaega)}. \] Since we have \[ \| u \|_{C^{2,\alpha} (\bar{\Omegaega})}= \| u \|_{C^{2,\alpha} (\Omegaega \smallsetminus \medcup_{i=1}^N B_i )} + \| \sum_{i=1}^N \phi_i u\|_{C^{2,\alpha}(\bar{\Omegaega})}, \] we estimate the first term by means of the interior estimates and the second therm as follows \[ \| \sum_{i=1}^N \phi_i u\|_{C^{2,\alpha}(\bar{\Omegaega})}\le \sum_{i=1}^N \| \phi_i u\|_{C^{2,\alpha}(B_i \cap \bar{\Omegaega})} \le N C \|g\|_{\Gammamma^{2,\alpha}(\partial \Omegaega)}. \] Hence we get the result. \end{proof} \section{Generalization to Heisenberg-type groups} \lambdabel{sc:GHT} \begin{definition} A Heisenberg-type algebra is a finite-dimensional real Lie algebra $\mathfrak{g}$ which can be endowed with an inner product $\escpr{\cdot,\cdot}$ such that \[ [\frak{z}^{\perp},\frak{z}^{\perp}]= \frak{z}, \] where $\frak{z}$ is the center $\mathfrak{g}$ and moreover, for every fixed $z \in \frak{z}$ the map \[ J_z: \frak{z}^{\perp} \to \frak{z}^{\perp} \] defined by \[ \escpr{J_z(v),w}=\escpr{z,[v,w]} \quad \forall v,w \in \frak{z}^{\perp} \] is an orthogonal map whenever $\escpr{z,z}=1$. \end{definition} A Heisenberg-type group $\mathbb{G}$ ($H$-type group, in short) is a connected and simply connected Lie group whose Lie algebra is an $H$-type algebra. Let $n,m \in {\mathbb{N}}$, $m\ge2$ and $n\ge 1$. Following \cite[Chapter 18]{BLU} a prototype of $H$-type group $({\mathbb{R}}^{m+n},\delta_{\lambdambda}, \circ )$ is given by ${\mathbb{R}}^{m+n}$ equipped with the group law \[ (x,t) \circ (y,\tau)=\left( \begin{array}{cc} x_k+ y_k & k=1,\ldots,m\\ t_k+\tau_k +\frac{1}{2}\escpr{A^{(k)} x,y } & k=1,\ldots,n \end{array} \right) \] and the dilation $\delta_{\lambdambda}(x,t)=(\lambdambda x, \lambdambda^2 t)$. Here $A^{(k)}$ is a skew-symmetric orthogonal matrix, such that, \begin{equation} \lambdabel{eq:hyA} A^{(k)} A^{(\ell)}+ A^{(\ell)} A^{(k)}=0, \end{equation} for $k=1,\ldots,n$ with $k \ne \ell$. By \cite[Theorem 18.2.1]{BLU} any $H$-type group is naturally isomorphic to a prototype $H$-group. Therefore we use the notation $\mathbb{G}$ for the prototype $H$-type group $({\mathbb{R}}^{m+n},\delta_{\lambdambda}, \circ )$ associated to a $H$-type group. A family of left invariant vector fields that agree with $\tfrac{\partial}{\partial x_j}$ for $j=1,\ldots,m$ at the origin is given by \[ X_j=\frac{\partial}{\partial x_j}+ \frac{1}{2} \sum_{k=1}^n \left( \sum_{i=1}^m a_{j,i}^{k} x_i \right) \frac{\partial}{\partial t_k} \] Setting that $m= \dim(\mathfrak{z}^{\perp})$ and $n=\dim(\mathfrak{z})$ we have that $\{ X_1,\ldots, X_m \}$ is a basis of the horizontal distribution $\mathfrak{z}^{\perp}$. Then that we have \[ [X_i,X_j]= \sum_{k=1}^n a^{k}_{j,i} \frac{\partial}{\partial t_k} \] and setting $Z_k= \frac{\partial}{\partial t_k}$ for $k=1,\ldots,n$ we get that $Z_1,\ldots,Z_n$ is an orthonormal basis of $\mathfrak{z}$. The homogenous dimension $Q$ is given by $Q=m+2n$. We denote by $\nabla_{\mathbb{G}}$ the horizontal gradient $ \nabla_{\mathbb{G}}= (X_1,\ldots,X_m) $ and by $\nabla$ the standard Euclidean gradient. The sub-Laplacian operator is given by \[ \Delta_{\mathbb{G}}= \sum_{k=1}^{m} X_i^2= \divv_{\mathbb{G}}(\nabla_{\mathbb{G}} ), \] where $\divv_{\mathbb{G}}(\phi)= X_1(\phi_1)+\ldots+ X_m(\phi_m)$ for $\phi=\phi_1 X_1+\ldots+ \phi_m X_m \in \mathfrak{z}^{\perp}$. Is is well known (see \cite[Chapter 5]{BLU}) that the sub-Laplacian admits a unique fundamental solution $\hat{\Gammamma} \in C^{\infty}({\mathbb{R}}^{m+n} \smallsetminus \{0\})$, $\hat{\Gammamma} \in L_{\text{loc}}^1({\mathbb{R}}^{m+n})$, $\hat{\Gammamma}(x,t) \to 0$ when $\xi=(x,t)$ tends to infinity and such that \[ \int_{{\mathbb{R}}^{m+n}} \hat{\Gammamma}(x,t) \, \Delta_{\mathbb{G}} \varphi(x,t) \, dx \ dt = -\varphi(0) \quad \forall \varphi \in C^{\infty}({\mathbb{R}}^{m+n}). \] \begin{definition} We call Gauge norm on $\mathbb{G}$ a homogeneous symmetric norm $d$ smooth out of the origin and satisfying \[ \Delta_{\mathbb{G}} (d(x,t)^{2-Q})=0 \quad \forall (x,t) \ne (0,0) . \] \end{definition} Following \cite{BLU} a Gauge norm in $\mathbb{G}$ is given by \[ |\xi|_{{\mathbb{G}}}=\left( | x |^4+ 16 |t|^2\right)^{\tfrac{1}{4}}, \] where $\xi=(x,t)$. Therefore there exists a positive constant $C_Q$ such that \[ \hat{\Gammamma}(\xi)=C_Q \, |\xi|_{{\mathbb{G}}}^{2-Q}= \tfrac{C_Q}{ \Big( |x|^4 + 16 |t|^2 \Big)^{\frac{(2-Q)}{4}}}. \] Strictly speaking $| \cdot|_{{\mathbb{G}}}$ and $\hat \Gammamma$ are defined on the algebra $\mathfrak{g}$ and $d(\xi,\eta)=|\eta^{-1}\circ \xi|_{{\mathbb{G}}}$ on the group $\mathbb{G}$. Indeed, for every couple of points $\xi=(x,t)$, $\eta=(y,\tau)$ in ${\mathbb{R}}^{m+n}$ there exists and are unique coefficients $v=(v_1,\ldots,v_m)$ and $z=(z_1,\ldots,z_n)$ such that $$\xi = \exp\left(\sum_{i=1}^m v_i X_i + \sum_{k=1}^n z_k Z_k \right) (\eta).$$ We call these coefficients $(v,z) = \text{Log}_{\eta}(\xi)$ and a straightforward computation shows that $ \text{Log}_{\eta}(\xi)= \eta^{-1} \circ \xi$. Finally we define the fundamental solution $\Gammamma(\xi, \eta)$ on $\mathbb{G}$ as $\hat \Gammamma(\text{Log}_{\eta}(\xi))= \hat \Gammamma(\eta^{-1} \circ \xi)$, that is given by \begin{equation} \lambdabel{eq:fundsolHT} \Gammamma(\xi, \eta)= C(Q) \Big( \big| x-y \big|^4 + 16 \sum_{k=1}^n \big(t_k - \tau_k - \frac{1}{2} \escpr{A^{(k)}y,x} \big)^2 \Big)^{\frac{(2-Q)}{4}}. \end{equation} \begin{example} \lambdabel{eq:nothormander} Let us consider the $H$-type group given by ${\mathbb{R}}^{5}$ with the following vector fields \[ X_1=\frac{\partial }{\partial x_1} - \frac{x_2}{2} \frac{\partial }{\partial t_1}, \quad X_2=\frac{\partial }{\partial x_2} +\frac{x_1}{2} \frac{\partial }{\partial t_1}, \quad X_3=\frac{\partial }{\partial x_3} +x_1 \frac{\partial }{\partial t_2}, \] that generate the horizontal distribution $\mathfrak{z}^{\perp}$, and $Z_1=\tfrac{\partial }{\partial t_1}$, $ Z_2=\frac{\partial }{\partial t_2}$. When we consider $\Pi=\{x_1=0\}$ we obtain that $\mathfrak{z}^{\perp} \cap \Pi$, that is generated by $\tfrac{\partial }{\partial x_2}$ and $\tfrac{\partial }{\partial x_3}$, does not satisfy the H\"{o}rmander condition. In particular this a Carnot group, different from ${\mathbb{H}}^1$, that does not satisfy the structure condition $(1.5)$ in \cite{BCC19}. \end{example} \begin{proposition} \lambdabel{prop:K1KplaneHT} Let $\Omegaega=\{(x,t) \in {\mathbb{R}}^{m+n} \ : \ x_1>0\} \subset \mathbb{G}$ and $\partial \Omegaega =\{x_1=0\}= \Pi$. Then the double layer potential $ \mathcal{D}(g)(x)$ is given by \begin{equation} \lambdabel{eq:DgPiHT} \mathcal{D}(g)(\xi)= K_1(g)(\xi) + K(g)(\xi) \end{equation} for $\xi \in \Omegaega$, where $K_1$ and $K$ are operators with kernels respectively $k_1$ and $k$ defined as \begin{equation}\lambdabel{k1H}k_1(\xi ,\hat \eta)=C_Q (Q-2) \dfrac{ |x-(0,\hat y)|^2 x_1 } { \Big( \big| x-(0,\hat y) \big|^4 + 16 \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{ A^{(k)}x,(0,\hat y)} \big)^2 \Big)^{\frac{Q+2}{4}}} \end{equation} \begin{equation}\lambdabel{kH}k(\xi,\hat \eta)=C_Q (2-Q) \dfrac{ 4 \sum_{i=2}^m \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{A^{(k)}x,(0,\hat y)} \big) a_{1,i}^k (y_i-x_i)} { \Big( \big| x-(0,\hat y) \big|^4 + 16 \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{ A^{(k)}x,(0,\hat y)} \big)^2 \Big)^{\frac{Q+2}{4}}}, \end{equation} where $\hat y=(y_2,\ldots, y_m)$ and $\hat{\eta}=(0,\hat y,\tau) \in \Pi$. \end{proposition} \begin{proof} First of all we have \[ X_1 \hat \Gammamma (v,z)= C_Q (2-Q) \dfrac{ |v|^2 v_1 + 4 \sum_{i=1}^m \sum_{k=1}^n a_{1,i}^k v_i z_k} { \Big( | v |^4 + 16 |z|^2 \Big)^{\frac{Q+2}{4}}} \] Then, by left invariance an explicit computation shows that the derivative \eqref{eq:fundsolHT} with respect to $X^{\xi}_1$ is given by \begin{align*} &X^{\xi}_{1}(\Gammamma(\xi, \eta)) = (X_{v_1} \hat \Gammamma)(\eta^{-1} \circ \xi) =\\ &= C_Q (2-Q) \dfrac{ |x-y|^2 (x_1-y_1) + 4 \sum_{i=1}^m \sum_{k=1}^n \big(t_k - \tau_k - \frac{1}{2} \escpr{A^{(k)}y,x} \big) a_{1,i}^k (x_i-y_i)} { \Big( \big| x-y \big|^4 + 16 \sum_{k=1}^n \big(t_k - \tau_k - \frac{1}{2} \escpr{A^{(k)}y,x} \big)^2 \Big)^{\frac{Q+2}{4}}} \end{align*} Since $\Gammamma$ is symmetric we also have \begin{align*} &X^{\eta}_{1} \Gammamma(\xi,\eta) =\escpr{\nabla_{{\mathbb{G}}}^{\eta} \Gammamma(\xi,\eta), X_1^{\eta}}\\ &= C_Q (2-Q) \dfrac{ |x-y|^2 (y_1-x_1) + 4 \sum_{i=1}^m \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{A^{(k)}x,y} \big) a_{1,i}^k (y_i-x_i)} { \Big( \big| x-y \big|^4 + 16 \sum_{k=1}^n \big(t_k - \tau_k - \frac{1}{2} \escpr{A^{(k)}y,x} \big)^2 \Big)^{\frac{Q+2}{4}}}\\ &= C_Q (2-Q) \dfrac{ |x-y|^2 (y_1-x_1) + 4 \sum_{i=1}^m \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{A^{(k)}x,y} \big) a_{1,i}^k (y_i-x_i)} { \Big( \big| x-y \big|^4 + 16 \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{ A^{(k)}x,y} \big)^2 \Big)^{\frac{Q+2}{4}}}\\ \end{align*} Evaluating this derivative over the plane $\Pi=\{y_1=0\}$ for $x_1>0$ and notincing that $a_{1,i}^k=0$ we get \begin{align*} X^{\eta}_{1} \Gammamma(\xi,\eta) &= C_Q (2-Q) \dfrac{ - |x- (0,\hat y)|^2 x_1 + 4 \sum_{i=2}^m \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{A^{(k)}x,(0,\hat y)} \big) a_{1,i}^k (y_i-x_i)} { \Big( \big| x-(0,\hat y) \big|^4 + 16 \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{ A^{(k)}x,(0,\hat y)} \big)^2 \Big)^{\frac{Q+2}{4}}}\\ &=k_1(\xi,\hat \eta)+k(\xi,\hat \eta) \end{align*} \end{proof} \begin{remark} \lambdabel{rk:int=1} Notice that for each $r>0$ it holds \begin{equation} \lambdabel{eq:intbound1HT} \int_{\partial B_{r}(\xi)} \escpr{\nabla_{{\mathbb{G}}} \Gammamma(\xi,\eta) , \nu(\eta)} d \sigma(\eta)=1. \end{equation} Indeed, by the mean value formula for each open subset $O\subset {\mathbb{G}}$ such that $\xi \in O$, for each $r>0$ such that $B_r(\xi) \subset O$ and for each harmonic function $\psi \in \mathcal{H}(O)$ we have \[ \psi(\xi)= \int_{\partial B_{r}(\xi)} \psi(\eta) \escpr{\nabla_{{\mathbb{G}}} \Gammamma(\xi,\eta) , \nu(\eta)} d \sigma(\eta). \] In particular if we consider $\psi \equiv1$ in $O$ we obtain \eqref{eq:intbound1HT}. \end{remark} Let $\Omegaega=\{x_1>0\} \subset {\mathbb{G}}$ and $\partial \Omegaega =\{x_1=0\}= \Pi$. Then the induced distance $ \hat d$ is given by \begin{equation} \hat d(\hat \xi,\hat \eta)=|(0,\hat \eta)^{-1} \circ (0,\hat \xi) |_{{\mathbb{G}}} \end{equation} for each $\hat \xi =(0,\hat x,t)$ and $\hat \eta =(0,\hat y,\tau)$ in $\Pi$ and the induced ball is given by \[ \hat B_r(\hat \xi)=\Big\{ \hat \eta \in \Pi \ : \ \hat d(\hat \xi,\hat \eta)<r\Big\}. \] \begin{lemma}\lambdabel{gaHT} Let $\xi_0=(0,\hat x_0,t_0) \in \Pi$, $R>0$ and $\hat B_R(\hat \xi_0)= \{\hat \eta \in \Pi \: \ \hat d (\hat \xi_0, \hat y)\leqslant R\} \subset \Pi$. Then the integral $$ \int_{ \hat B_R(\hat \xi_0)} \escpr{\nabla_{{\mathbb{G}}}^{\eta} \Gammamma (\xi,(0, \hat \eta)) , X_1^{\eta}(\hat{\eta})} d \hat \eta $$ is well defined if the first component $x_1$ of $\xi$ satisfies $x_1>0$ and tends to $1/2$ as $\xi \to \xi_0$. \end{lemma} \begin{proof} Let $\{\xi^n\}_{n \in {\mathbb{N}}}$ be a sequence of points in $\Omegaega=\{x_1>0\}$ converging to $\xi_0$ as $n \to +\infty$ and $\varepsilon_n>0$ small enough such that $B(\xi^n,\varepsilon_n) \subset \Omegaega$ for each $n \in {\mathbb{N}}$. Then we consider the bounded domain $$\Omegaega_{n}^R= \{x_1 >0\} \cap B_R(\xi_0) \smallsetminus B(\xi^n,\varepsilon_n).$$ By the divergence theorem for each $n \in {\mathbb{N}} $ we have \begin{equation} \lambdabel{eq:2ballboundHT} \begin{aligned} 0&= \int_{\Omegaega_{n}^R} \Delta_{{\mathbb{G}}} \Gammamma (\xi^n, \eta) d\eta= \int_{\partial \Omegaega_n^R} \escpr{\nabla_{{\mathbb{G}}}^{\eta} \Gammamma(\xi_n,\eta), \nu(\eta)} d\sigma(\eta)\\ &= \int_{\partial B_R (\xi_0) \cap \{x_1 >0\}} \escpr{\nabla_{{\mathbb{G}}}^\eta \Gammamma(\xi_n,\eta), \nu (\eta)} d\sigma(\eta)\\ & \quad + \int_{\Pi \cap B_R(\xi_0)} \escpr{\nabla_{{\mathbb{G}}}^\eta \Gammamma(\xi_n,\eta), \nu (\eta)} d\sigma(\eta)\\ &\quad -\int_{\partial B(\xi^n,\varepsilon_n)} \escpr{\nabla_{{\mathbb{G}}}^\eta \Gammamma(\xi_n,\eta), \nu(\eta)} d\sigma(\eta). \end{aligned} \end{equation} For each $n \in {\mathbb{N}}$ the ball $B(\xi^n,\varepsilon_n)$ is contained in $\{x_1>0\}$ thus by Remark \ref{rk:int=1} we get \[ \int_{\partial B(\xi^n,\varepsilon_n)} \escpr{\nabla_{{\mathbb{G}}}^y \Gammamma(\xi_n,\eta), \nu_h(\eta)} d\sigma(\eta)=1. \] Noticing that $\Pi \cap B_R(x_0)= \hat B_R( \hat x_0)$ and rearranging terms in \eqref{eq:2ballboundHT} we get \[ \int_{\hat B_R( \hat \xi_0)} \escpr{\nabla_{{\mathbb{G}}}^{\eta} \Gammamma(\xi_n,\eta), \nu (\eta)} d\sigma(\eta)= 1- \int_{\partial B_R (\xi_0) \cap \{x_1 >0\}} \escpr{\nabla_{{\mathbb{G}}}^\eta \Gammamma(\xi_n,\eta), \nu (\eta)} d\sigma(\eta). \] Letting $n \to + \infty$ the left hand side of the previous equality converges to \[ 1- \int_{\partial B_R (\xi_0) \cap \{x_1 >0\}} \escpr{\nabla_{{\mathbb{G}}}^\eta \Gammamma(\xi_0,\eta), \nu (\eta)} d\sigma(\eta)=\dfrac{1}{2}, \] since we only consider half of the integral equation \eqref{eq:intbound1HT}. \end{proof} The operator $K_1$ is totally degenerate while restricted to $\Pi$, so that we can not restrict it to functions defined on $\Pi$; however we can compute the limit from the interior of the set. \begin{proposition}\lambdabel{frompositiveH} Let $g$ be a Lipschitz compact supported function in $\Pi$ and $\xi_0$ be a point in $\Pi$. For $\xi \in {\mathbb{G}} \smallsetminus \Pi$ we consider $$ K_1(g) (\xi) = \int_{\Pi} k_1(\xi,\eta) g(\eta) d\sigma(\eta). $$ Then we have \begin{align*} &K_1(g) (\xi) \to \frac{1}{2} g(\xi_0) \quad \text{ as } \xi \to \xi_0^+,\\ &K_1(g) (\xi) \to -\frac{1}{2} g(\xi_0) \quad \text{ as } \xi \to \xi_0^-, \end{align*} so that $(K_1)^+=\tfrac{1}{2}\text{Id}$ while restricted to $\Pi$ and $(K_1)^-= - \frac{1}{2} \text{Id}$ while restricted to $\Pi$. \end{proposition} \begin{proof} Let $R>0$ big enough such that $\text{supp}(g) \subset \hat B_R (\hat \xi_0)$. Let us assume that $\xi=(x_1, \hat x, t)$, $x_1>0$ and \begin{align*} K_1(g) (\xi) =& \int_{\Pi} k_1(\xi,\eta) g(\eta) d\sigma(\eta) =\int_{\hat B_R (\hat \xi_0)} k_1(\xi,\eta) (g(\eta) - g(\xi))d\sigma(\eta)\\ & + g(\xi) \int_{\hat B_R (\hat \xi_0)} k_1(\xi,\eta) d\sigma(\eta). \end{align*} On one hand we have \begin{align*} \left|\int_{\hat B_R (\hat \xi_0)} k_1(\xi,\eta) (g(\eta) - g(\xi)) d\sigma(\eta) \right|&\leqslant L \int_{\hat B_R (\hat \xi_0)} k_1(\xi,\eta) d(\xi,\eta)d\sigma(\eta)\\ &\leqslant L \int_{\hat B_R (\hat \xi_0)} \sqrt{x_1} d(\eta, \xi)^{-Q+1 +\frac{1}{2}}d\sigma(\eta) \to 0, \end{align*} $\text{ as } \xi \to \xi_0$ and where $L$ is the Lipschitz constant of $g$. On the other hand by Lemma \ref{gaHT} we have \begin{align*} &g(\xi) \int_{\hat B_R (\hat \xi_0)} k_1(\xi,\eta) d\sigma(\eta) =g(\xi)\int_{\hat B_R (\hat \xi_0)} (k_1(\xi,\eta)+ k(\xi,\eta) ) d\sigma(\eta) +\\ & \, -g(\xi) \int_{\hat B_R (\hat \xi_0)} k(\xi,\eta) d\sigma(\eta) \xrightarrow[\xi \to \xi_0^+] {} \frac{1}{2} g(\xi_0)- g(\xi_0) \int_{\hat B_R (\hat \xi_0)} k(\xi_0,\eta) d\sigma(\eta)=\frac{1}{2} g(\xi_0) \end{align*} by symmetry of the kernel $k$ restricted to $\Pi$, see Lemma \ref{lm:sypol2H}. Finally when $x_1<0$ the kernel $k_1$ defined \eqref{k1H} has the same sign of $x_1$ , then $-k_1$ and $-x_1$ are positive and by Lemma \ref{gaHT} we have \begin{align*} &-g(\xi) \int_{\hat B_R (\hat \xi_0)} -k_1(\xi,\eta) d\sigma(\eta) =-g(\xi)\int_{\hat B_R (\hat \xi_0)} (-k_1(\xi,\eta)+ k(\xi,\eta) ) d\sigma(\eta) +\\ & \quad -g(\xi) \int_{\hat B_R (\hat \xi_0)} k(\xi,\eta) d\sigma(\eta) \xrightarrow[(-x_1,\hat x,t) \to \xi_0^+] {} -\frac{1}{2} g(\xi_0). \qedhere \end{align*} \end{proof} \begin{definition} \lambdabel{def:kkH} As $\xi \to \xi_0^{\pm}$ the kernel $k(\xi,\hat \eta)$ defined in \eqref{kH} converges to the kernel \begin{equation} \lambdabel{eq:kkH} k(\hat \xi, \hat \eta )=C_Q (2-Q) \dfrac{ 4 \sum_{i=2}^m \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{\hat{A}^{(k)} \hat x,\hat y} \big) a_{1,i}^k (y_i-x_i)} { \Big( \big| \hat x- \hat y \big|^4 + 16 \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{ \hat A^{(k)} \hat x,\hat y} \big)^2 \Big)^{\frac{Q+2}{4}}}, \end{equation} where $\hat A^{(k)}=(a_{ij}^k)_{i,j=2,\ldots,m}$. Notice that $k(\hat \xi, \hat \eta )= \hat k ((\hat v,z))$ where $(0,\hat v, z)=\text{Log}_{(0,\hat \xi)}((0,\hat \eta))$, $\hat v=(v_2,\ldots,v_m)$ and \begin{equation} \lambdabel{eq:kkHO} \hat k((\hat v, z))= C_Q (2-Q) \dfrac{ 4 \sum_{i=2}^m \sum_{k=1}^n a_{1,i}^k v_i z_k} { \Big( | \hat v |^4 + 16 |z|^2 \Big)^{\frac{Q+2}{4}} } \end{equation} Thus, if $g$ is a continuous compactly supported function in $\Pi$ the operator $K(g)$ converges to \[ \int_{\Pi} k(\hat \xi, \hat \eta ) g(\hat \eta) d \sigma(\eta), \] that with an abuse of notation we also denoted by $K(g)$. \end{definition} Hence the analogous of \cite[Theorem 4.4]{MR3600064} in this setting is the following \begin{theorem} Let $g$ be a Lipschitz compacty supported function in $\Pi$ and $\xi_0$ be a point in $\Pi$. Let $\mathcal{D}(g)$ be the double layer potential defined in \eqref{eq:DgPiHT}, then the limits of $\mathcal{D}(g)(\xi)$ when $\xi$ tends to $\xi_0^+$ for $\xi \in \{x_1>0\}$ and when $\xi$ tends to $\xi_0^-$ for $\xi \in \{x_1<0\}$ exist. Moreover the limits verify the following relations \begin{align*} &\lim_{ \xi \to \xi_0^+} \mathcal{D}(g)(\xi)= \tfrac{1}{2} g(\xi_0)+ Kf(\xi_0) & \text{if} \quad \xi \in \{x_1>0\}\\ & \lim_{ \xi \to \xi_0^-} \mathcal{D}(g)(\xi)= -\tfrac{1}{2} g(\xi_0)+ Kf(\xi_0) & \text{if} \quad \xi \in \{x_1<0\}, \end{align*} where $K$ is the operator with convolution kernel $k$ defined in \eqref{eq:kkH}. \end{theorem} \begin{proof} By Propositions \ref{frompositiveH} and Definition \ref{def:kkH} we obtain $$\mathcal{D}(g)(\xi) \to (\frac{1}{2} I +K)(g)(\xi_0) $$ in the limit from positive values of $x_1$, while $$\mathcal{D}(g)(\xi) \to (- \frac{1}{2} I +K)(g)(\xi_0) $$ in the limit from negative values of $x_1$. \end{proof} \subsection{Invertibility of the double layer potential on the intrinsic plane} \lambdabel{sc:invertH} \subsection{The $C^{2,\alpha}$ estimates of $K$} \lambdabel{sc:c2alphaestimeteH} Let $r\in {\mathbb{R}}$ and $\Pi_r=\{\xi=(r, \hat{x},t)\}$. Let $\hat \xi=(\hat x, t), \hat \eta=(\hat y, \tau) \in \Pi_r$ and $\hat{v}=(v_2,\ldots,v_m)$ and $z=(z_1,\ldots,z_n)$ such that $(0,\hat v,z) = \text{Log}_{(0,\hat \eta)}((0,\hat \xi))$. We set $\hat X_j= X_j \big|_{x_1=0}$. On $\Pi_r$ we consider the distance \begin{equation} \lambdabel{eq:distancetildeH} \tilde{d}(\hat \xi,\hat \eta)= |(0,-\eta)\circ(0,\hat \xi) |_{{\mathbb{G}}}=\left( | \hat v |^4+ 16 |z|^2\right)^{\tfrac{1}{4}} \end{equation} instead of $\hat d$ on $\Pi_r$. Notice that $\tilde d$ coincides with $\hat d$ on $\Pi_r$ if and only if $r=0$. \begin{definition}[Classical H\"older classes $C^{2,\alpha}$] Let $\hat{X}^2 g ({\hat x}i)$ be the \emph{horizontal tangential Hessian} given by \[ \hat{X}^2 g({\hat x}i)_{i,j}=\frac{\hat X_i \hat X_j g({\hat x}i )+\hat X_j \hat X_i g({\hat x}i)}{2}, \qquad i,j=2,\ldots,m. \] Let $r \in {\mathbb{R}}$, we say that a function $g$ defined on the boundary $\Pi_r=\{x=(r, {\hat x}, t)\}$ is of class $C^{2, \alpha}(\Pi_r)$ if and only if $\hat X_{i} \hat X_j g$ for $i=2,\ldots,m$ and $Z_k g$ for $k=1,\ldots,n$ are continuous functions and there exists $C>0$ such that \[ |\hat{X}^2 g({\hat \eta})_{i,j} - \hat{X}^2 g({\hat x}i)_{i,j} | \le C \tilde{d}(\hat \eta,\hat \xi)^{\alpha} \] for $i,j=2,\ldots,m$ and \[ |Z_k g(\hat \eta) - Z_k g(\hat \xi)| \le C \tilde{d}(\hat \xi,\hat \eta)^{\alpha}, \] for $k=1,\ldots,n$ and for each $\hat \xi, \hat \eta$ in $ \Pi_r$. In addition, we set $$\|g\|_{2, \alpha} =\|g\|_{2}+ \max_{i,j=2,\ldots,m}[(\hat{X}^2 g)_{i,j}]_{\alpha}+\max_{k=1,\ldots,n}[Z_k g]_{\alpha} $$ where $$[Z_k g]_{\alpha}=\sup_{\hat \xi, \hat \eta \in \Pi_r} \frac{|Z_k g(\hat \eta) - Z_k g(\hat \xi)| }{\tilde d(\hat \xi,\hat \eta)^{\alpha}},$$ $$[(\hat{X}^2 g)_{i,j}]_{\alpha}=\sup_{\hat \xi, \hat \eta \in \Pi_r} \frac{|\hat{X}^2 g({\hat \eta})_{i,j} - \hat{X}^2 g({\hat x}i)_{i,j}| }{\tilde d(\hat \xi,\hat \eta)^{\alpha}} $$ and $$\|g\|_{2} = \|g\|_{\infty}+ \max_{i=2,\ldots,m} \sup_{\hat \xi \in \Pi_r}| \hat X_i g(\hat \xi)| + \max_{i,j=2,\ldots,m} \sup_{\hat \xi \in \Pi_r} |\hat X^2 g(\hat \xi)_{i,j}|+ \max_{k=1,\ldots,n} \sup_{\hat \xi \in \Pi_r}| Z_k g(\hat \xi)|.$$ \end{definition} \begin{proposition} \lambdabel{prop:Gamma=C2H} A function $f$ belongs to $C^{2,\alpha}(\Pi_0)$ if and only if $f$ belongs to $\Gammamma^{2,\alpha}(\Pi_0)$, namely for each $\hat \xi \in \Pi_0$, $\rho>0$ there exists a polynomial $P_{\hat \xi}(\hat \eta)=a_{\hat x}i + b_{\hat x}i \cdot \hat v+ \hat v^T C_{\hat x}i \hat v+ d_{\hat x}i \cdot z$ with $(0,\hat v,z) = \text{Log}_{(0,\hat \eta)}((0,\hat \xi))$ and $C>0$ such that \begin{equation} \lambdabel{eq:CTEH} |f({\hat \eta})-P_{{\hat x}i}({\hat \eta})|<C\rho^{2+\alpha} \end{equation} for each ${\hat \eta} \in B_{\rho}({\hat x}i)$ (see Definition \ref{def:CKalpha}). \end{proposition} \begin{proof} Assume that $f \in C^{2,\alpha}(\Pi_0)$. Let \begin{align*} P_{{\hat x}i} ({\hat \eta})&= f({\hat x}i)+\hat X f({\hat x}i) \cdot ({\hat y}-{\hat x})+\tfrac{1}{2} ({\hat y}-{\hat x})^T \, \hat{X}^2 f({\hat x}i)\, \cdot ({\hat y}-{\hat x}) \\ & \quad +\sum_{k=1}^n Z_k f({\hat x}i) (\tau_k-t_k-\tfrac{1}{2} \escpr{\hat A^{(k)} \hat x, \hat y}), \end{align*} where $\hat X=(\hat X_2,\ldots,\hat X_m)$, $Z=(Z_1,\ldots,Z_n)$ and \[ \hat{X}^2 f({\hat x}i)_{i,j}=\frac{\hat X_i \hat X_j f({\hat x}i )+\hat X_j \hat X_i f({\hat x}i)}{2}, \qquad i,j=2,\ldots,m \] By the Taylor's formula with Lagrange remainder for the function $s\to f(\gammamma(s))$ with $\dot{\gammamma}(s)=\sum_{i=2}^m v_i \hat X_i $, with $\hat v=({\hat y}-{\hat x})$ $s \in [0,1]$, $\gammamma(0)=({\hat x},t)$, $\gammamma(1)=({\hat y}, \bar{t})$ we get \[ f({\hat y},\bar{t})=f({\hat x},t)+ \hat X f({\hat x}i)\cdot({\hat y}-{\hat x}) + \frac{1}{2} ({\hat y}-{\hat x})^T\hat X^2 f(\hat \mu) \cdot ({\hat y}-{\hat x}) \] where $ \hat \mu=\gammamma(\theta)$ for $\theta \in (0,1)$ and $\bar{t}_k=t_k+\frac{1}{2} \escpr{\hat A^{(k)} {\hat x}, {\hat y}}$. Moreover, by the Lagrange mean value theorem for the function $s\to f({\hat y}, \bar{t}+s(\tau-\bar{t}))$ with $s \in [0,1]$ we get \[ f({\hat y}, \tau)=f({\hat y},\bar{t})+ Z f( \hat \zeta) \cdot (\tau-\bar{t}) \] where $ \hat \zeta=({\hat y}, \bar{t}+ \theta (\tau-\bar{t}))$ for $\theta \in (0,1)$. Then we get \begin{align*} f({\hat y}, \tau)&=f({\hat x},t)+ \hat X f({\hat x}i)\cdot({\hat y}-{\hat x}) + \tfrac{1}{2} ({\hat y}-{\hat x})^T\hat X^2 f(\mu) \cdot ({\hat y}-{\hat x})\\ & \quad+ \sum_{k=1}^n Z_k f( \hat \zeta) (\tau_k-t_k-\tfrac{1}{2} \escpr{\hat A^{(k)}\hat x, \hat y})\\ &=P_{{\hat x}i} ({\hat \eta})+\tfrac{1}{2}({\hat y}-{\hat x})^T [\hat X^2 f(\mu)-\hat X^2({\hat x}i)]({\hat y}-{\hat x}) \\ & \quad + \sum_{k=1}^n [Z_k f(\hat \zeta)-Z_k f({\hat x}i)](\tau_k-t_k-\tfrac{1}{2} \escpr{\hat A^{(k)}\hat x, \hat y}). \end{align*} Therefore \begin{align*} |f({\hat \eta})- P_{{\hat x}i} ({\hat \eta})|&\le \sup_{i,j=2,\ldots,m } |\hat X^2 f(\hat \mu)_{i,j}- \hat X^2 f({\hat x}i)_{i,j}| |\hat v|^2+ \sup_{k=1,\ldots,n}|Z_k f(\hat \zeta)-Z_k f({\hat x}i)| \, |z|\\ &\le C \tilde{d}(\hat \mu,{\hat x}i)^{\alpha} \tilde{d}({\hat \eta},{\hat x}i)^2+ C\tilde{d}(\xi,\hat \zeta)^{\alpha} \tilde{d}({\hat \eta},{\hat x}i)^2 \le \tilde{C} \tilde{d}({\hat \eta}, {\hat x}i)^{2+\alpha}. \end{align*} Now, for any fixed ${\hat x}i \in \Pi_0$ and $\rho > 0$, taking ${\hat \eta} \in B_{\rho} ({\hat x}i)$, clearly since $ \tilde{d}({\hat x}i, {\hat \eta})^{2+\alpha} < \rho^{2+\alpha}$ we get \[ |f({\hat \eta})- P_{{\hat x}i} ({\hat \eta})|< C \rho^{2+\alpha}. \] For the reverse implication we set \[ u_\rho({\hat x}i)= \frac{u(\delta_{\rho} ({\hat x}i))}{\rho^2}, \] where $\delta_\rho({\hat x}i)=(\rho {\hat x} ,\rho^2 t)$. Let ${\hat x}i$, ${\hat \eta}$ two points at distance $\rho$ apart, by Remark \ref{rk:midpointH} there exists $\hat \zeta$ such that $\tilde{d}({\hat x}i,\hat \zeta),\tilde{d}({\hat \eta},\hat \zeta) < \frac{\sqrt{3}}{2} \rho $. Then after a translation by the group low of $-\hat \zeta$, we have $B_{\rho/2}=B_{\rho/2} (0) \subset B_{\sqrt{3}\rho}({\hat x}i), B_{\sqrt{3}\rho} ({\hat \eta}) $. Let \begin{align*} &\|P_{{\hat x}i, \rho/2}- P_{{\hat \eta}, \rho/2} \|_{L^{\infty}(B_1)}\\ &\le \|f_{\rho/2}-P_{{\hat x}i, \rho/2}\|_{L^{\infty}(B_1)}+ \|f_{\rho/2}- P_{{\hat \eta}, \rho/2} \|_{L^{\infty}(B_1)}\\ &=\frac{4}{\rho^2} \sup_{(\hat v,z) \in B_{\rho/2}} |f(\hat v,z)- P_{{\hat x}i}(\hat v,z)|+ \frac{4}{\rho^2} \sup_{(\hat v,z) \in B_{\rho/2}} |f(\hat v,z)- P_{{\hat \eta}}(\hat v,z)|\\ &\le \frac{4}{\rho^2} \sup_{(\hat v,z) \in B_{\sqrt{3}\rho}({\hat x}i)} |f(\hat v,z)- P_{{\hat x}i}(\hat v,z)|+ \frac{4}{\rho^2} \sup_{(\hat v,z) \in B_{\sqrt{3} \rho}({\hat \eta})} |f(\hat v,z)- P_{{\hat \eta}}(\hat v,z)|\\ &\le 8 (3)^{1+\alpha/2} C \rho^{\alpha}. \end{align*} Notice that \[ (P_{{\hat x}i, \rho/2} - P_{{\hat \eta}, \rho/2}) (\hat v,z)=\dfrac{4}{\rho^2}[ a_{\hat x}i - a_{\hat \eta}+ (b_{\hat x}i- b_{\hat \eta}) \cdot \rho \hat v+ \rho^2 \hat v^T (C_{\hat x}i- C_{\hat \eta}) \hat v+(d_{\hat x}i-d_{\hat \eta}) \cdot \rho z]. \] Then by Lemma \ref{lm:abCdH} we get \begin{equation} \lambdabel{eq:cdestH} \begin{aligned} |a_{\hat x}i-a_{\hat \eta}| \le 2 (3)^{1+\alpha/2} M \rho^{2+\alpha} \quad &\text{and} \quad |b_{\hat x}i-b_{\hat \eta}| \le 8 (3)^{1+\alpha/2} \sqrt{m}M \rho^{1+\alpha}\\ \|C_{\hat x}i-C_{\hat \eta} \| \le 4 (3)^{1+\alpha/2} M \rho^{\alpha} \quad &\text{and} \quad |d_{\hat x}-d_{\hat y}| \le 4 (3)^{1+\alpha/2} M \rho^{\alpha}. \end{aligned} \end{equation} By assumption \eqref{eq:CTEH} we easily get that $a_{\hat x}i=f({\hat x}i)$, $f$ is continuous, $\hat X f({\hat x}i)=b_{\hat x}i$, $Z f({\hat x}i)=d_{\hat x}i$. Then by \eqref{eq:cdestH} we obtain that $\hat X f, Z f$ are continuous and $Z f$ is $C^{\alpha}$ . Moreover, since by Baker-Campbell-Hausdorff formula, see \cite[Theorem 15.1.1]{BLU}, we have \[ \exp(s \hat X_i)(\exp(h \hat X_j)({\hat x}i))=\exp \left(s \hat X_i+h \hat X_j +\tfrac{sh}{2} [\hat X_j, \hat X_i] + O(s^2h)+ O(h s^2)\right) ({\hat x}i), \] thanks to \eqref{eq:CTEH} a straightforward computation shows \begin{align*} &(f(\exp(s \hat X_i)(\exp(h \hat X_j)({\hat x}i)) )- f(\exp(h \hat X_j)({\hat x}i))-(f(\exp(s \hat X_i)({\hat x}i))-f({\hat x}i))\\ &=\left(2(C_{\hat x}i)_{i,j} - \sum_{k=1}^n \frac{a_{i,j}^k}{2} Z_k f(\xi) \right)h s+O(s^{2+\alpha})+O(h^{2+\alpha}). \end{align*} Then there exists \begin{align*} &\lim_{h\to 0} \lim_{s\to 0} \frac{1}{h} \left( \frac{f(\exp(s \hat X_i)(\exp(h \hat X_j)({\hat x}i)) )- f(\exp(h \hat X_j)({\hat x}i))}{s}-\frac{f(\exp(s \hat X_i)({\hat x}i))-f({\hat x}i)}{s} \right)\\ &=2(C_{\hat x}i)_{i,j} + \sum_{k=1}^n \frac{a_{i,j}^k}{2} Z_k f(\xi). \end{align*} On the other hand, letting $s\to 0$ in the previous limit we gain that \[ \hat X_j \hat X_i f({\hat x}i)=\lim_{h \to 0 } \frac{\hat X_i f(\exp(h \hat X_j)({\hat x}i))-\hat X_i f({\hat x}i)}{h}=2(C_{\hat x}i)_{i,j} - \sum_{k=1}^n \frac{a_{i,j}^k}{2} Z_k f(\xi). \] Therefore since $\sum_{k=1}^n a_{i,j}^k Z_k=[\hat X_i, \hat X_j]$ we obtain \[ \hat{X}^2 f({\hat x}i)_{i,j}=\frac{\hat X_i \hat X_j f({\hat x}i )+\hat X_j \hat X_i f({\hat x}i)}{2}=2(C_{\hat x}i)_{i,j} \] Finally, by \eqref{eq:cdestH} we obtain that $|\hat{X}^2 f({\hat x}i)_{i,j}-\hat{X}^2 f({\hat \eta})_{i,j}|\le 8 (3)^{1+\alpha/2} C \tilde{d}({\hat x}i,{\hat \eta})^{\alpha}$, for $i,j=2,\ldots,m$. \end{proof} \begin{remark} \lambdabel{rk:midpointH} Given two points ${\hat x}i,{\hat \eta} \in \Pi$ such that $\rho=\tilde{d}({\hat x}i,{\hat \eta})$ then there exists $\hat \zeta=(\frac{{\hat x}+{\hat y}}{2}, \frac{t+3\tau}{4}-\frac{1}{8}\escpr{\hat A {\hat x},{\hat y}} )$ such that $\tilde{d}(\hat \zeta,{\hat \eta})=\frac{\rho}{2} <\frac{\sqrt{3}}{2} \rho $ and $\tilde{d}(\hat \zeta,{\hat x}i) <\frac{\sqrt{3}}{2} \rho$. Moreover if ${\hat x}i,{\hat \eta}$ belongs to $B_R({\hat x}i_0)$ for ${\hat x}i_0 \in \Pi$ then $\hat \zeta$ in $B_{2R}({\hat x}i_0)$. \end{remark} \begin{lemma} \lambdabel{lm:abCdH} Let $\hat v \in \Pi$ and $P(\hat v,z)=a + b \cdot \hat v+ \hat v^T C \hat v+ d \cdot z$, where $C$ is a symmetric matrix. Assume that there exists $M>0$ such that $\|P\|_{L^\infty(B_1)} \le M$, then $|a|\le M$ and $\|C\|,|d| \le 2M$, $|b| \le 2\sqrt{m} M$. \begin{proof} Setting $\hat v=0$, $z=0$ we have $|a|\le M$. Let $\varepsilon>0$, if $\hat v=0$, $z=\tfrac{d}{|d|(1+\varepsilon)}$ we get $|a+ \frac{|d|}{1+\varepsilon}| \le M$, thus $|d|\le 2M(1+\varepsilon)$, letting $\varepsilon \to 0 $ we get $|d|\le 2M$. Let $\hat v^1,\ldots,\hat v^m$ be an orthonormal basis of $C$ with eigenvalue $\lambdambda_1,\ldots,\lambdambda_n$. Then for each $i$, setting $\hat v=\pm \tfrac{v^i}{(1+\varepsilon)}$, $z=0$, we obtain \[ \left|\frac{b \cdot \hat v^i}{1+\varepsilon}+ \frac{\lambdambda_i}{(1+\varepsilon)^2}\right| \le 2M, \qquad \left|\frac{b \cdot \hat v^i}{1+\varepsilon}-\frac{\lambdambda_i}{(1+\varepsilon)^2}\right| \le 2M. \] Then we have \begin{align*} \frac{|b \cdot \hat v_i|}{1+\varepsilon}\le \frac{1}{2} \left( \left|\frac{b \cdot \hat v^i}{1+\varepsilon}+ \frac{\lambdambda_i}{(1+\varepsilon)^2}\right|+ \left|\frac{b \cdot \hat v^i}{1+\varepsilon}-\frac{\lambdambda_i}{(1+\varepsilon)^2}\right| \right) \le 2 M\\ \frac{|\lambdambda_i|}{(1+\varepsilon)^2}\le \frac{1}{2} \left( \left|\frac{b \cdot \hat v^i}{1+\varepsilon}+ \frac{\lambdambda_i}{(1+\varepsilon)^2}\right|+ \left|\frac{b \cdot \hat v^i}{1+\varepsilon}-\frac{\lambdambda_i}{(1+\varepsilon)^2}\right| \right) \le 2 M. \end{align*} Letting $\varepsilon \to 0$ we get $||C||=\max_{i=1,\ldots,m}|\lambdambda_{i}| \le 2M$ and $|b|\le 2\sqrt{m}M$. \end{proof} \end{lemma} \begin{lemma} \lambdabel{lm:sypol2H} Let $D_{0} \subset \Pi=\{v_1=0\}$ be a set axially symmetric with respect to $v_i=0$ for $i=2,\ldots,m$ and $z_k=0$ for $k=1,\ldots,n$. Let \[ D_{\xi}=\exp(D_0)=\{\exp(\sum_{i=2}^m v_i \hat X_i + \sum_{k=1}^n z_k Z_k) (\xi) \ : \ (\hat v, z) \in D_0 \} \] and $p$ is a polynomial of of degree less than or equal to $2$, \[ p ((\hat v,z))=a_0+ \sum_{i=2}^{m} a_i v_i +\sum_{k=1}^n b_k z_k+\sum_{i,j=2}^{m} c_{i,j} v_j v_i \] Let $k$ be the kernel given by \eqref{eq:kkH} then we have \begin{equation} \lambdabel{eq:kp0H} \int_{D_{\hat \xi}} k(\hat \xi, \hat \eta) p (\text{Log}_{\xi}( \eta)) \, d \hat \eta=0, \end{equation} where $\xi=(0, \hat \xi)$, $\eta=(0,\hat \eta)$ and $\text{Log}_{\xi}(\eta)=(0, -\hat \xi) \circ (0, \hat \eta)$ \end{lemma} \begin{proof} Changing the variable $(\hat v, z)=\Phi(\eta)=\text{Log}_{\xi}(\eta)$ we have $\det(d\Phi)=1$. Therefore we gain that the left hand side of \eqref{eq:kp0H} is equivalent to \begin{equation} \lambdabel{eq:intproofoddlemma} \int_{D_{0}} \hat k(\hat v, z) p (\hat v, z) \, d \hat v dz \end{equation} Since the kernel \[ \hat k((\hat v, z))= C_Q (2-Q) \dfrac{ 4 \sum_{i=2}^m \sum_{k=1}^n a_{1,i}^k v_i z_k} { \Big( | \hat v |^4 + 16 |z|^2 \Big)^{\frac{Q+2}{4}} } \] is symmetric both in $v_i$ for $i=2\ldots,m$ and in $z_k$ for $k=1,\ldots,n$ the product between $\hat k$ and $p$ is still odd in some variables. Then the integral \eqref{eq:intproofoddlemma} vanishes on the axial symmetric domain $D_0$. \end{proof} \begin{theorem} \lambdabel{th:KcontH} Let $k$ be the kernel defined in \eqref{eq:kkH}, we set \[ K(f)( \hat \xi)=\int_{\Pi} k(\hat \xi, \hat \eta) f(\hat \eta) d \hat \eta \] for each $\hat \xi \in \Pi$. Assume that $f \in C^{2,\alpha}(\Pi)$ and $f$ compactly supported in $\Pi$ then there exists a constant $C$ such that \begin{equation} \lambdabel{eq:C2aestimateH} \| K (f) \|_{C^{2,\alpha}(\Pi)} \le C \| f \|_{C^{2,\alpha}(\Pi)} \end{equation} \end{theorem} \begin{proof} First of all we notice that $\Pi$ with the law induced by ${\mathbb{G}}$ is an homogenous group of homogeneous dimension $Q-1$. Setting $(0,\hat v, z)=\text{Log}_{(0,{\hat x}i)}(0,{\hat \eta})$ we have that $\hat k (\hat v, z)$ defined in \eqref{eq:kkHO} is $C^{\infty}(\Pi \smallsetminus {\{0\}})$, homogeneous of degree $1-Q$ and thanks to Lemma \ref{lm:sypol2H} defines a singular integral on $\Pi$. Following \cite[Section 3]{Jerison1} or \cite[p. 32]{NagelStein79} there exists a linear map $L_{{\hat x}i}$ such that $L_{{\hat x}i}({\hat x}i- {\hat \eta})=(0,{\hat \eta})^{-1} \circ (0,{\hat x}i)$. Denote $\tilde{L}_{{\hat x}i}= (L_{{\hat x}i}^{-1})^T$ then $K$ is realized as a pseudo-differential operator with symbol $a({\hat x}i, \zeta)=\mathcal{F}(\hat k)(\tilde{L}_{{\hat x}i}(\zeta))$ where $\mathcal{F}$ denotes the Fourier transform and $\hat k$ does not refer to the Fourier transform, but to definition \eqref{eq:kkHO}. Since $\hat k$ is of class $1-Q$, by \cite[Theorem 1, p. 9]{NagelStein79} we have $\mathcal{F}(\hat k)$ is of class $0$. Therefore the symbol $a$ belongs to the class $\mathcal{S}_{\tilde d}^0$, see \cite[p. 56]{NagelStein79}. Then by \cite[Theorem 13, p. 83]{NagelStein79} we get that $a({\hat x}i, D)$ (that coincides with $K$) is bounded from $\Gammamma^{2,\alpha}(\Pi)$ to $\Gammamma^{2,\alpha}(\Pi)$. Hence, by Proposition \ref{prop:Gamma=C2H} we obtain the desired estimates \eqref{eq:C2aestimateH} in $C^{2,\alpha}(\Pi)$. \end{proof} \subsection{The reflection technique} \begin{definition} Let $g$ be a function in $\Pi$ and $x \in {\mathbb{H}}^1 \smallsetminus \Pi$. We set $$ \tilde K_1(g) (\xi) = \int_{\Pi} \tilde k_1(\xi,{\hat \eta}) g({\hat \eta}) d\sigma({\hat \eta}), \qquad \tilde K(g) (\xi) = \int_{\Pi} \tilde k(\xi,{\hat \eta}) g({\hat \eta}) d\sigma({\hat \eta}) $$ where \begin{equation} \lambdabel{eq:tidek1H} \tilde k_1(\xi,{\hat \eta})= C_Q (Q-2) \dfrac{ |x-(0,\hat y)|^2 x_1 } { \Big( \big| x-(0,\hat y) \big|^4 + 16 \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{ \hat A^{(k)}{\hat x},{\hat y})} \big)^2 \Big)^{\frac{Q+2}{4}}} \end{equation} and \begin{equation} \lambdabel{eq:tidekH} \tilde k(\xi,{\hat \eta})= C_Q (2-Q) \dfrac{ 4 \sum_{i=2}^m \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{\hat A^{(k)} {\hat x}, \hat y } \big) a_{1,i}^k (y_i-x_i)} { \Big( \big| x-(0,\hat y) \big|^4 + 16 \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{ \hat A^{(k)} {\hat x}, \hat y} \big)^2 \Big)^{\frac{Q+2}{4}}}, \end{equation} \end{definition} \begin{remark} \lambdabel{rk:tiledekH} Notice that $\tilde k(\xi,{\hat \eta})$ defined in \eqref{eq:tidekH} converges to the convolution kernel $k({\hat x}i,{\hat \eta})$ defined in \eqref{eq:kkH} as one approaches the boundary. \end{remark} \begin{lemma}\lambdabel{tildelimitH} Let $g$ be a Lipschitz compactly supported function in $\Pi$ and ${\hat x}i_0$ be a point in $\Pi$. For $x \in {\mathbb{H}}^1 \smallsetminus \Pi$ we consider $$ \tilde{K}_1(g) (\xi) = \int_{\Pi} \tilde{k}_1(\xi,{\hat \eta}) g({\hat \eta}) d\sigma({\hat \eta}), $$ where $\tilde{k}_1$ is defined in \eqref{eq:tidek1H}. Then we have \begin{align*} &\tilde{K}_1(g) (\xi) \to \frac{1}{2} g({\hat x}i_0) \quad \text{ as } \xi \to {\hat x}i_0^+,\\ &\tilde{K}_1(g) (\xi) \to -\frac{1}{2} g({\hat x}i_0) \quad \text{ as } \xi \to {\hat x}i_0^-, \end{align*} so that $(K_1)^+=\tfrac{1}{2}\text{Id}$ while restricted to $\Pi$ and $(K_1)^-= - \frac{1}{2} \text{Id}$ while restricted to $\Pi$. \end{lemma} \begin{proof} By Proposition \ref{frompositiveH} $K_1 (g) (\xi)$ converges to $\pm \tfrac{1}{2} g$ and since \begin{align*} &|K_1 (g) (\xi)- \tilde{K}_1(g)(\xi)| \\ &\leqslant \sup_{{\hat x}i, {\hat \eta}} \left|1- \tfrac{\Big( \big| x-(0,\hat y) \big|^4 + 16 \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{ A^{(k)}x,(0,\hat y)} \big)^2 \Big)^{\frac{Q+2}{4}}}{\Big( \big| x-(0,\hat y) \big|^4 + 16 \sum_{k=1}^n \big(\tau_k - t_k - \frac{1}{2} \escpr{ \hat A^{(k)}{\hat x},{\hat y})} \big)^2 \Big)^{\frac{Q+2}{4}}} \right| K_1(g)(\xi) \end{align*} we have $K_1 (g) (\xi)- \tilde{K}_1(g)(\xi)$ goes to zero when $x_1$ tends to $0$. Then also $\tilde{K}_1(g)(\xi)$ converges to $\tfrac{1}{2} g$ when $x_1\to0^+$ and $\tilde{K}_1(g)(\xi)$ converges to $-\tfrac{1}{2} g$ when $x_1\to0^-$ . \end{proof} Given $r \in {\mathbb{R}}$, let us denote $\Pi_r=\{\xi=(r, {\hat x}, t)\}$. We consider the $C^{2, \alpha}(\Pi_r)$ norm with respect to the distance $\tilde{d}$ as we did in Section \ref{sc:c2alphaestimeteH}. This choice allows us to completely decouple variables and we have \begin{proposition}\lambdabel{IKnormH} Let $\Pi=\{ x_1=0\}$ and $K$ the singular operator defined by the kernel $k$, see \eqref{kH}. Then we have $$||(-\frac{1}{2}I + K)(g)||_{C^{2, \alpha}(\Pi)} = ||(\frac{1}{2}I + K)(g)||_{C^{2, \alpha}(\Pi)}.$$ \end{proposition} \begin{proof} Since the $C^{2,\alpha}$ norm on $\Pi_r$ with respect to the distance $\tilde{d}$ are independent on $r$, we have $$||(\tilde K_1 + \tilde K)(g)(- \cdot, \cdot, \cdot)||_{C^{2, \alpha}(\Pi_{-r})} = ||(\tilde K_1 + \tilde K)(g)||_{C^{2, \alpha}(\Pi_r)}.$$ Letting $r$ to 0, and applying Lemma \ref{tildelimitH} and Remark \ref{rk:tiledekH} we get the thesis. \end{proof} All the results in Section \ref{sc:methodcontinuity} follow in the same way and we obtain that $\tfrac{1}{2} I + K $ is invertible from $C^{2,\alpha}(\Pi)$ to $C^{2,\alpha}(\Pi)$. \subsection{The Poisson kernel and Schauder estimates} Once we have the invertibility of $\tfrac{1}{2} I + K $, we consider the Poisson kernel \eqref{eq:Pg} and the analogous of Theorem \ref{c:schauderGroups} for a bounded domain $\Omegaega \subset {\mathbb{G}}$ follows in the same way, clearly replacing $\Delta_{{\mathbb{H}}^1}$ with $\Delta_{{\mathbb{G}}}$ . The definition of smooth domain is the same of Definition \ref{def:smoothboundary} and we say that $\xi$ in $\partial \Omegaega$ is a \textit{characteristic point} if $\nabla_{{\mathbb{G}}} \psi (\xi)=0$, where $\psi$ is the defining function of the boundary defined in \ref{def:smoothboundary}. \begin{theorem} \lambdabel{c:schauder2H} Let $\Omegaega \subset {\mathbb{G}}$ be a smooth bounded domain and $u$ is the unique solution to $$\Delta_{{\mathbb{G}}} u=f\; \text{in}\ \Omegaega, \quad u= g \text{ on }\, \partial \Omegaega, $$ where $f \in C^\alpha(\bar \Omegaega)$ and $g \in \Gammamma^{2, \alpha} (\partial \Omegaega)$ and $0<\alpha<1$. Let $\bar \xi \in \partial\Omegaega $ be a non-charateristic point, $V\subset {\mathbb{G}}$ be an open neighborhood of $\bar \xi $ without charateristic points and $\phi\in C^\infty_0(V)$ be a bump function equal to $1$ in neighborhood $V_0 \subset \subset V$ of $\bar \xi$. Then we have $\phi u \in C^{2, \alpha}(\bar \Omegaega\cap V)$ and \begin{equation} \lambdabel{stimeH} \|\phi u\|_{C^{2, \alpha}(\bar \Omegaega\cap V)} \leqslant C (\|g\|_{ \Gammamma^{2, \alpha} (\partial \Omegaega)} + \|f\|_{C^\alpha(\bar \Omegaega)}).\end{equation} \begin{proof} Let us denote by $\Omegaega$ a smooth, open bounded set in ${\mathbb{G}}$ and let $0\in \partial \Omegaega$ be a non characteristic point. The boundary of $\Omegaega$ can be identified in a neighborhood $V$ with the graph of a regular function $w$, defined on a neighborhood $\hat V=V\cap {\mathbb{R}}^{m+n-1}$ of $0$: $$\partial \Omegaega \cap V= \{(w(\hat s), \hat s): \hat s\in \hat V \}.$$ We can as well assume that $w(0) = 0$, $\nabla w=0$. This implies that \begin{equation}\lambdabel{tuttoqui} w(\hat s) = O(|\hat s|^2) \end{equation} as $\hat s \to 0$. On the set $V$ the function $\Xi(s_1, \hat s) = (s_1 - w(\hat s), \hat s) $ is a diffeomorphism. It sends $\partial \Omegaega\cap V$ to a subset of the plane $\{x_1 =0\}$: $$\Xi(\partial \Omegaega \cap V) =\{(x_1, {\hat x}i): x_1 =0\}= \Pi_{\Xi}.$$ Moreover, we have \begin{equation}\lambdabel{ohscusa}\Delta_\Xi = d\Xi(\Delta_{{\mathbb{H}}^1}), \end{equation} with fundamental solution $$ \Gammamma_\Xi(\xi) = \Gammamma(x_1 + w({\hat x}i), {\hat x}i).$$ For $x_1$ small enough we have $$ \Gammamma_\Xi(x_1, {\hat x}i) = \Gammamma(x_1+w({\hat x}i), {\hat x}i) = \Gammamma(x_1, {\hat x}i) + R(x_1, {\hat x}i), $$ where $$ R(x_1,{\hat x}i)=w({\hat x}i) \nabla \Gammamma( x_1 +\sigma w({\hat x}i), {\hat x}i) $$ for some $\sigma \in (0,1)$. Furthermore we have that $$X_{1,\Xi}^{\xi}=d\Xi(X_1^{\xi})=X_1^{\xi}-\frac{1}{2}\sum_{k=1}^n a_{1,i}^k x_i \partial_{t_k} w({\hat x}i) \partial_{x_1}.$$ Notice that $\Gammamma$ is a rational function that goes as $d^{-Q+2}$, its first derivatives go as $d^{-Q+1}$ and its second derivatives go as $d^{-Q}$. On the other side the function $w({\hat x}i)$ has a 0 of order 2 thus $w({\hat x}i)$ goes as $d^{2}$. Then we have \[ X_{1,\Xi}^{\eta} \Gammamma_\Xi(0, {\hat \eta})=X_1^{\eta} \Gammamma (0,{\hat \eta})+ \hat R(0,{\hat \eta}), \] where \[ \hat R(0,{\hat \eta})=X_1^{\eta} R(0,{\hat \eta})-\frac{1}{2}\big( \sum_{k=1}^n a_{1,i}^k y_i \partial_{t_k} w({\hat \eta}) \big) \partial_{y_1} R(0,{\hat \eta}) -\frac{1}{2}\big( \sum_{k=1}^n a_{1,i}^k y_i \partial_{t_k} w({\hat \eta}) \big)\partial_{y_1} \Gammamma(0,\hat y) \] that goes as \[ |\hat R(0,{\hat x}i)| \le \hat d^{-Q+2}(0,{\hat x}i), \] where $\hat d$ is the induce distance. Therefore the operator $K_{\hat R}$ with kernel $\hat R$ is compact since the homogeneous dimension of the boundary is $Q-1$. Finally, the proof ends following the same steps of the proof of Theorem \ref{c:schauder2} and using Proposition \ref{prop:K1KplaneHT} instead of Proposition \ref{prop:K1Kplane}. \end{proof} \end{theorem} Finally, the analogous of Corollary \ref{c:schauder3} for a bounded domain $\Omegaega \subset {\mathbb{G}}$ without characteristic points follows in the same way, clearly replacing $\Delta_{{\mathbb{H}}^1}$ with $\Delta_{{\mathbb{G}}}$ . \end{document}
\begin{document} \title{Improving the bound for maximum degree on Murty-Simon Conjecture} \begin{abstract} A graph is said to be diameter-$k$-critical if its diameter is $k$ and removal of any of its edges increases its diameter. A beautiful conjecture by Murty and Simon, says that every diameter-2-critical graph of order $n$ has at most $\lfloor n^2/4\rfloor$ edges and equality holds only for $K_{\lceil n/2 \rceil,\lfloor n/2 \rfloor }$. Haynes et al. proved that the conjecture is true for $\Delta\geq 0.7n$. They also proved that for $n>2000$, if $\Delta \geq 0.6789n$ then the conjecture is true. We will improve this bound by showing that the conjecture is true for every $n$ if $\Delta\geq\ 0.6755n$. \end{abstract} \section{Introduction} Throughout this paper we assume that $G$ is a simple graph. Our notation is the same as \cite{survey}, let $G=(V,E)$ be a graph with vertex set $V$ of order $n$ and edge set $E$ of size $m$. For a vertex $v \in G$ we denote the set of its neighbors in $G$ by $N_G(u)$. Also we denote $N_G(u) \cup u$ by $N_G(u)$. The maximum and minimum degrees of $G$ will be denoted by $\Delta$ and $\delta$, respectively. The distance $d_G(u,v)$ between two vertices $u$ and $v$ of $G$, is the length of the shortest path between them. The \emph{diameter} of $G$, ($diam(G)$), is the maximum distance among all pairs of vertices in $G$. We say graph $G$ is \emph{diameter-$k$-critical} if its diameter is $k$ and removal of any of its edges increases its diameter. Based on a conjecture proposed by Murty and Simon \cite{conj}, there is an upper bound on the number of edges in a diameter-2-critical graph. \begin{cnj} Let $G$ be a diameter-2-critical graph. Then $m \leq {[n^2/4]}$ and equality holds only if $K_{\lceil n/2 \rceil,\lfloor n/2 \rfloor }$. \end{cnj} Several authors have conducted some studies on the conjecture proving acceptable results nearly close to the original one, however, no complete proof has been provided yet. Plesnık \cite{plensik} showed that $m < \frac{3n(n - 1)}{8}$. Moreover, Caccetta and Haggkvist \cite{conj} proved $m < 0.27n^2$. Fan \cite{fan} also proved the fact that for $n\leq24$ and for $n=26$ we have $m \leq {[\frac{n^2}{4}]}$. For $n=25$, he achieved $m < \frac{n^2}{4} + \frac{(n^2 - 16.2n + 56)}{320} < 0.2532n^2$. Another proof was presented by Xu \cite{Xu} in 1984, which was found out to have a small error. Afterwards, Furedi \cite{furedi} provided a considerable result showing that the original conjecture is true for large $n$, that is, for $n > n_0$ where $n_0$ is a tower of 2s of height about $10^{14}$. This result is highly significant though not applicable to those graphs we are currently working with. \section{Total Domination} Domination number and Total domination number are parameters of graphs which are studied, respectively, in \cite{Haynes98a,advanced_domination_book} and \cite{total_domination_book}. Assume $G=(V,E)$ is a simple graph. Let $X$ and $Y$ be subsets of $V$; We say that $X$ dominates $Y$, written $X \succ Y$, if and only if every element of $Y-X$ has a neighbor in $X$. Similarly, we say that $X$ totally dominates $Y$, written $X \succ_t Y$ if and only if every element of $Y$ has a neighbor in $X$. If $X$ dominates or totally dominates $V$, we might write, $X \succ G$ or $X \succ_t G$ instead of $X \succ V$ and $X \succ_t V$, respectively. Domination number and total domination number of $G=(V,E)$ are the size of smallest subset of $V$ that, dominates and totally dominates $V$, respectively. A graph $G$ with total domination number of $k$ is called $k_t$-critical, if every graph constructed by adding an edge between any nonadjacent vertices of $G$ has total domination number less than $k$. It is obvious that adding any edge to $k_t$-critical graph $G$ would result a graph which has total domination number of $k-1$ or $k-2$. Assume $G$ is $k_t$-critical graph. If for every pair of non adjacent vertices $\{u,v\}$ of $G$, the total domination number of $G+uv$ is $k-2$, then $G$ is called $k_t$-supercritical. As shown in \cite{hanson03} there is a great connection between diameter-2-critical graphs and total domination critical graphs: \begin{thm} \emph{(\cite{hanson03})}\label{complement} A graph is diameter-2-critical if and only if its complement is $3_t$-critical or $4_t$-supercritical. \end{thm} By this theorem in order to prove Murty-Simon conjecture, it suffices to prove that every graph which is $3_t$-critical, or $4_t$-critical , has at least $\lfloor n(n-2)/4 \rfloor$ edges where $n$ is order of graph. This problem is solved in some cases in \cite{haynes98b,haynes98c,haynes11} : \begin{thm} \emph{(\cite{haynes98b})}\label{4t} A graph $G$ is $4_t$-supercritical if and only if G is disjoint union of two nontrivial complete graphs. \end{thm} \begin{thm} \emph{(\cite{haynes98c})}\label{diam23} If $G$ is a $3_t$-critical graph, then $2 \le diam(G) \le 3$. \end{thm} \begin{thm} \emph{(\cite{haynes11})} Every $3_t$-critical graph of diameter 3 and order n has size $m \ge n(n-2)/4 $. \end{thm} By this theorems a proof for following conjecture will show that Murty-Simon conjecture is true. \begin{cnj} A $3_t$-critical graph of order $n$ and of diameter 2 has size $m \ge n(n-2)/4$. \end{cnj} More recently Haynes et al. proved the following: \begin{thm}\label{this} \emph{(\cite{haynes14})} Let $G$ be a $3_t$-critical graph of order $n$ and size $m$. Let $\delta = \delta (G)$. Then the following holds:\\ a)If $\delta \geq 0.3n $, then $ m > \lceil n(n-2)/4 \rceil $.\\ b)If $n \ge 2000$ and $\delta \geq 0.321n$, then $ m > \lceil n(n-2)/4 \rceil $. \end{thm} Also G. Fan et al. proved that: \begin{thm}\label{n<25} \emph{(\cite{fan})} The Murty-Simon conjecture is true for every graph with less than 25 verices. \end{thm} In next section, in order to improve this bound, we will prove that, every simple diameter-2-critical graph of order $n$ and size $m$ satisfies $ m <\lfloor n^2/4 \rfloor $ if $\Delta\geq\ 0.6756n$. \section{Main Result} In this section we will prove Murty-Simon conjecture for graphs which their complement are $3_t$-critical and have less restriction on their minimum degree and improve the result proposed by Haynes et al in \cite{haynes14}. First we recall the following lemma, which was proposed in that paper. \begin{lem} Let $u$ and $v$ are nonadjacent vertices in $3_t$-critical graph $G$, clearly $\{u,v\} \nsucc G$. Then there exists a vertex $w$, such that $w$ is adjacent to exactly one of $u,v$, say $u$, and $\{u,w\} \succ G-v$. We will call $uw$ \emph{quasi-edge} associated with $uv$. Further $v$ is the unique vertex not dominated by $\{u,w\}$ in $G$; In this case we call $v$ \emph{supplement} of $\{u,w\}$. \end{lem} \begin{dfn} Let $G=(V,E)$ be a $3_t$-critical graph. If $S\subseteq V$ then we say that $S$ is a \emph{quasi-clique} if for each nonadjacent pair of vertices of $S$ there exists a quasi-edge associated with that pair, and each quasi-edge associated with that pair at contains at least on vertex outside $S$. Edges \emph{associated with} quasi-clique $S$ are the union of the edges with both ends in $S$ and the quasi-edges associated with some pair of nonadjacent vertices of $S$. \end{dfn} \begin{dfn} Let $G=(V,E)$ be a $3_t$-critical graph. Let $A$ and $B$ be two disjoint subsets of $V$. We define $E(G;A,B)$ as set of all edges $\{a,b\}$ where $a \in A$ and $b \in B$, and $\{a,b\}$ is associated with a non adjacent pair $\{a,c\}$, where $c$ is in $A$. By lemma 3.1, we know that every two members of $E(G;A,B)$ are associated with different non adjacent pairs. \end{dfn} \begin{lem}\label{shimi} Let $G$ be a $3_t$-critical graph. Let $S \subset V(G)$, if $S^*=\cap_{s\in S} N(s)$ ,then the following holds: $$|E(G[S^*])|+|E(G;S^*,V(G)-(S^*\cup S))|\geq \frac{|S^*|^2-2|S^*|}{c}$$ Where $c$ is the greatest root of $x^2-4x-4=0$, which is equal to $2+2\sqrt2\approx4.83$. \end{lem} \begin{proof} We apply induction on size of $S^*$ to prove the theorem. Note that for every pair of non-adjacent vertices in $S^*$ such as $\{u,v\}$, If $\{u,w\}$ is the quasi-edge associated to it, then, since $v$ is adjacent $u$, we can conclude that $w\not\in S$. Note that when $|S^*| \le 2$, since $\frac{|S^*|^2-2|S^*|}{c} \leq 0$, then the inequality is obviously true. Let $v$ be the vertex having minimum degree in $G[S^*]$. We denote the set of neighbors of $v$ in $S^*$ by $A$. Since every vertex in $S^* - (A \cup \{v\})$ is not adjacent to $v$, so $S^* - (A \cup \{v\})$ is a quasi-clique. Also $A$ is $\cap_{s\in {S \cup \{v\}}} N(s)$, so $|E(G[A])|+|E(G;A,V(G)-(A\cup S \cup \{v\}))| \geq \frac{|A|^2-2|A|}{c}$. For every pair of non-adjacent vertices $\{x,y\}$, one of them is the supplement of quasi-edge associated to this pair, so quasi-edges associated to non-adjacent pairs in $A$ and $S^*-(A\cup\{v\})$ are disjoint. With statements mentioned above we can conclude that:\\ $$|E(G[S^*])|+|E(G;S^*,V(G)-(S^*\cup S))| \geq \frac{|A|^2-2|A|}{c} + {{|S^*| - |A| - 1} \choose 2} + |A|.$$\\ The right side of the inequality is a function of $|A|$, that we call it $f(|A|)$. One can find out that:\\ $$f'(|A|)=\frac{(c+2)|A|}{c}+(\frac{5}{2}-\frac{2}{c})-|S^*|$$\\ So $f'(|A|)$ has negative value whenever $0 \le |A| \le \frac {2|S^*|-4} {c}$ and $|S^*| \geq 3$. So it suffices to prove that $f(\frac {2|S^*|-4} {c})\ge \frac{|S^*|^2-2|S^*|}{c}$, which is done by Lemma \ref{appendProoflem}. On the other hand when $|A| \geq \frac {2|S^*|-4} {c}$ by definition of $A$, we can easily conclude that: $$|E(G[S^*])|\geq\frac{|A||S^*|}{2}\ge \frac{|S^*|^2-2|S^*|}{c}.$$ \end{proof} \begin{lem}\label{B_qc} Let $G=(V,E)$ be a $3_t$-critical graph. If $v \in V$, then $V-N_{G}[v]$ is a quasi-clique. \end{lem} \begin{proof} This lemma is generalized from a lemma in \emph{(\cite{haynes14})}, in which $v$ was assumed as a vertex with minimum degree in $G$. Since the proof was independent of such assumption, the same proof is correct. \iffalse $a$ and $b$ be a pair of nonadjacent vertices of $B$; clearly $\{a,b\} \nsucc G$, since neither $a$ nor $b$ is adjacent to $v$. Hence for each pair of nonadjacent vertices of $B$ there exists a quasi-edge associated with. Let $ay$ be a quasi-edge associated with $\{a,b\}$, if $y \in B$ then neither $a$ nor $y$ is adjacent to $v$. Also by lemma 3.1 we know that $\{a,y\}\succ G-b$. where $b$ is a vertex in B and so it isn't $v$. Hence a contradiction. Also for each pair $\{a,y\}$ where \fi \end{proof} \iffalse In the rest of this paper we will use symbol $v_\delta$ only for some fixed minimum-degree vertex of 3t-domination-critical graph $G$. Also we only use symbols $A$ and $B$ to referring to, respectively, $N_{(v_\delta)}$ and $V(G)-N_{[v_\delta]}$. \fi Now, we present the main result of this paper: \begin{thm}\label{mainTh} Suppose that $c=2+\sqrt2$, and $a$ is the smallest root of the equation $(2c+4)x^2-4cx+c=0$, which is equal to $\frac{\sqrt2-\sqrt{2-\sqrt2}}{2}\approx0.32442$. Let $G(V,E)$ be a $3_t$-critical graph of order $n$, size $m$ and minimum degree $\delta$. If $n\ge3$ and $\delta \le an-1$ then, $$m>\lceil{\frac{n(n-2)}{4}}\rceil$$ \end{thm} \begin{proof} First, note that for every positive integer $n$: \begin{itemize} \item if $n$ is even ${n(n-2)}$ is divisible by $4$. \item if $n$ is odd ${n(n-2)+1}$ is divisible by $4$. \end{itemize} So it suffices to prove that: $$m>\frac{n(n-2)+1}{4}$$ Let $v \in V(G)$ be a vertex with $\delta$ neighbors and $A=N_G(v)$. َAlso let $B=V-N_G[v]$, then by Lemma \ref{B_qc}, $B$ is a quasi-clique. Also by Lemma \ref{shimi}, $|E(G[A])|+|E(G;A,B)|\ge \frac{\delta^2-2\delta}{c}.$ $A$ and $B$ are disjoint, so the quasi-edges associated to non-adjacent pairs in $A$ are disjoint from the quasi-edges associated to non-adjacent pairs in $B$, because every quasi-edge has unique supplement. Therefore, we have: $$m\ge\delta+\frac{\delta^2-2\delta}{c}+{n-1-\delta\choose2}$$So by Lemma \ref{appendProofTheorem} we have: $$m>\frac{n(n-2)+1}{4}$$\\ \end{proof} \begin{thm} For every diameter-2-critical graph $G$ of order $n$ and size $m$, if $\Delta(G)\ge0.6756n$, then $m<\lfloor{\frac{n^2}{4}}\rfloor$ \end{thm} \begin{proof} Since $diam(G)=2$, so $n\ge3.$ Let $\bar{G}$ be complement of $G$. Assume that size of $\bar{G}$ is $m'$. Since $m+m'={n\choose2}$, so it suffices to prove that: $$m'>\lceil\frac{n(n-2)}{4}\rceil.$$ We have: $$\delta(\bar{G})=n-1-\Delta(G)\le0.3244n-1$$ Note that by Theorem \ref{complement}, $\bar{G}$ is either $3_t$-critical or $4_t$-supercritical. If $\bar{G}$ is $4_t$-supercritical, then by Theorem \ref{4t}, $\bar{G}$ is disjoint union of two non-trivial graph and size of the smaller one is less than $0.3244n-1$, which means $$m'\ge{0.3244n-1\choose2}+{0.6756n+1\choose2}>\lceil\frac{n(n-2)}{4}\rceil.$$ So we may consider that $\bar{G}$ is $3_t$-critical, which is shown in Theorem \ref{mainTh}. \end{proof} \appendix \section{\\Proof of Inequalities} \label{App:AppendixA} \begin{lem}\label{appendProofTheorem} Suppose that $c=2+\sqrt2$, and $a$ is smaller root of the equation $(2c+4)x^2-4cx+c=0$, which is eqaul to $\frac{\sqrt2-\sqrt{2-\sqrt2}}{2}\approx0.3244$. \\ If $an-1\ge y\ge0$ and $n\ge3$ then:\\ $$y+{\frac{y^2-2y}{c}} + {n-1-y \choose 2} > \frac{n(n-2)+1}{4} $$ \end{lem} \begin{proof} Let $f(y)= y+{\frac{y^2-2y}{c}} + {n-1-y \choose 2} $. We have:$$f'(y)=1+\frac{2y-2}{c}-n+y+\frac{3}{2}$$ $$=-n+\frac{5}{2}+\sqrt2y-\frac{2}{c}$$ $$<-n+\frac{5}{2}+\sqrt2(an-1)-\frac{2}{c}<0 $$ Which means $f(y)\ge f(an-1)$. Let $g(n)=f(an-1)-\frac{n(n-2)+1}{4}$. Now it suffices to prove $g(n)$ has positive value for every $n\ge3$. $$g(n)=\frac{1}{4}((-8+7\sqrt2+4\sqrt{4-2\sqrt2}-7\sqrt{2-2\sqrt2})n+6\sqrt2-11)$$ So the coefficient of $n$ is positive and $g(3)\approx0.025>0$, so we can conclude that $g(n)$ is positive when $n\ge3$. \end{proof} \begin{lem}\label{appendProoflem} Let $n\ge3$ be a positive integer and $c=2+2\sqrt2$, then $$\frac{(\frac{2n-4}{c})^2-2(\frac{2n-4}{c})}{c}+{n-(\frac{2n-4}{c})-1\choose2}+(\frac{2n-4}{c})\ge\frac{n^2-2n}{c}.$$ \end{lem} \begin{proof} We prove that $f(n)=\frac{(\frac{2n-4}{c})^2-2(\frac{2n-4}{c})}{c}+{n-(\frac{2n-4}{c})-1\choose2}+(\frac{2n-4}{c})-\frac{n^2-2n}{c}$ \\\\ has positive value. $$f(n)=\frac{1}{2}((3\sqrt{2}-4)n+(8-6\sqrt{2})$$$$=\frac{(3\sqrt{2}-4)}{2}(n-2)>0$$ So $f(n)$ is positive for $n\ge3$. \end{proof} \appendix \end{document}
\begin{document} \begin{abstract} We compute the homotopy groups of the $C_2$ fixed points of equivariant topological modular forms at the prime $2$ using the descent spectral sequence. We then show that as a $\TMF$-module, it is isomorphic to the tensor product of $\TMF$ with an explicit finite cell complex. \ifspringer \subclass{55N34 \and 55P91 \and 55T99} \fi \end{abstract} \title{ exorpdfstring{$C_2$} \tableofcontents \section{Introduction} Topological $K$-theory is one of the first examples of generalized cohomology theories. It admits a natural equivariant analogue --- for a compact Hausdorff $G$-space $X$, the group $\KO^0_G(X)$ is the Grothendieck group of $G$-equivariant vector bundles over $X$. In particular, $\KO^0_G(*) = \operatorname{Rep}(G)$ is the representation ring of $G$. As in the case of non-equivariant $K$-theory, this extends to a $G$-equivariant cohomology theory $\KO_G$, and is represented by a genuine $G$-spectrum. We shall call this $G$-spectrum $\KO$, omitting the subscript, as we prefer to think of this as a global equivariant spectrum --- one defined for all compact Lie groups. The $G$-fixed points of this, written $\KO^{\mathcal{B}G}$, are a spectrum analogue of the representation ring, with $\pi_0 \KO^{\mathcal{B}G} = \KO^0_G(*) = \operatorname{Rep}(G)$ (more generally, $\pi_n \KO^{\mathcal{B}G} = \KO^{-n}_G(*)$). These fixed point spectra are readily computable as $\KO$-modules. For example, \[ \KO^{\mathcal{B}C_2} = \KO \vee \KO,\quad \KO^{\mathcal{B} C_3} = \KO \vee \mathrm{KU}. \] This corresponds to the fact that $C_2$ has two real characters, while $C_3$ has a real character plus a complex conjugate pair. If one insists, one can write $\mathrm{KU} = \KO \otimes C\eta$, providing an arguably more explicit description of $\KO^{\mathcal{B} C_3}$ as a $\KO$-module. In general, $\KO^{\mathcal{B}G}$ decomposes as a direct sum of copies of $\KO$, $\mathrm{KU}$ and $\mathrm{KSp}$, with the factors determined by the representation theory of $G$ \cite[p.133--134]{segal-equivariant-k-theory}. From the chromatic point of view, the natural object to study after $K$-theory is elliptic cohomology, or its universal version, topological modular forms. Equivariant elliptic cohomology, in various incarnations, has been of interest to many people, include geometric representation theorist and quantum field theorists. Most recently, in \cite{equivariant-tmf}, Gepner and Meier constructed integral equivariant elliptic cohomology and topological modular forms for compact abelian Lie groups, following the outline in \cite{elliptic-survey} and the groundwork in \cite{elliptic-i,elliptic-ii,elliptic-iii}. The introduction in \cite{equivariant-tmf} provides a nice overview of the relevant history, whose efforts we shall not attempt to reproduce. The spectra $\TMF^{\mathcal{B}C_n}$ can be constructed as follows: in \cite{elliptic-ii}, Lurie constructed the universal oriented\footnote{``oriented'' refers to complex orientation of the associated cohomology theory} (spectral) elliptic curve, which we shall denote $p\colon \E \to \M$. Equivariant $\TMF$ is then constructed with the property that \[ \TMF^{\mathcal{B}C_n} = \Gamma(\E[n]; \O_{\E[n]}),\quad \TMF^{\mathcal{B}S^1} = \Gamma(\E; \O_\E), \] where $\E[n]$ are the $n$-torsion points of the elliptic curve. This is to be compared to the homotopy fixed points (with trivial group action), where $\E$ is replaced by the formal group $\hat{\E}$. We are interested in explicit descriptions of these spectra as $\TMF$-modules. Much work was done by Gepner--Meier themselves: in \cite[Theorem 1.1]{equivariant-tmf}, they computed \[ \TMF^{\mathcal{B}S^1} = \TMF \oplus \Sigma \TMF. \] This corresponds to the fact that the coherent cohomology of a (classical) elliptic curve is concentrated in degrees $0$ and $1$ by Serre duality. As for finite groups, \cite[Example 9.4]{equivariant-tmf} argues that if $\ell \nmid |G|$ or $\ell > 3$, then $\TMF^{\mathcal{B}G}_{\ell}$ splits as sums of shifts of $\TMF_1(3)$, $\TMF_1(2)$ and $\TMF$. Further, $\TMF_1(3)$ and $\TMF_1(2)$ can themselves be described as the smash product of $\TMF$ with an 8- and 3-cell complex respectively (see \cite[Section 4]{homology-tmf} for details). Thus, we have an explicit description of $\TMF^{\mathcal{B}G}_\ell$ as a $\TMF_\ell$-module. This leaves us with the case where $\ell = 2, 3$ and $\ell \mid |G|$. In this paper, we compute $\TMF^{\mathcal{B}C_2}$ at the prime $2$. \begin{theorem}\label{thm:main} There is a (non-canonical) isomorphism of $2$-completed $\TMF$-modules \[ \TMF^{\mathcal{B}C_2} \cong \TMF \oplus \TMF \oplus \TMF \otimes DL, \] where $DL$ is the spectrum $S^{-8} \cup_{\nu} S^{-4} \cup_{\eta} S^{-2} \cup_2 S^{-1}$, as depicted in \Cref{fig:cell-dl}. \end{theorem} This space $DL$ is so named because its dual $L$ is a split summand of the spectrum $L_0$ defined in \cite[Definition 2.3]{tmf-tate}; in fact, $L_0 = L \oplus S^0$. \begin{figure} \caption{Cell diagram of $DL$} \label{fig:cell-dl} \end{figure} \begin{remark} Despite being a $4$-cell complex, the $\TMF$-module $\TMF \otimes DL$ is really a rank-$2$ $\TMF$-module. We claim that after base change to the flat cover $\TMF_1(3) = v_2^{-1}\mathrm{BP}\langle 2 \rangle$, the module $\TMF_1(3) \otimes DL$ is free of rank $2$. We first observe what happens after base change to $\tmf_1(3) = \mathrm{BP}\langle 2 \rangle$. In $\pi_* \tmf_1(3)$, the elements $\eta$ and $\nu$ are killed. We claim that the cell diagram of $\tmf_1(3) \otimes DL$ as a $\tmf_1(3)$-module is what is given in \Cref{fig:cell-dl-tmf13}. \begin{figure} \caption{Cell diagram of $DL$ over $\tmf_1(3)$} \label{fig:cell-dl-tmf13} \end{figure} To see this, recall that $v_n$ is a filtration one element detected by $Q_n$. We can expand \[ Q_1 = \Sq^2 \Sq^1 + \Sq^1 \Sq^2,\quad Q_2 = \Sq^1 \Sq^2 \Sq^4 + \Sq^5 \Sq^2 + \Sq^6 \Sq^1 + \Sq^4 \Sq^2 \Sq^1. \] So $Q_1$ and $Q_2$ send the $(-4)$- and $(-8)$-cell to the top cell respectively. Since there are no other possible attaching maps for degree reasons ($v_n$ is only well-defined up to higher filtration), the cell diagram must be as described. In $\TMF_1(3)$, we further invert $v_2$, and the top and bottom cell cancel out. So we are left with two free cells in degree $-2$ and $-4$. \end{remark} \begin{remark} While the theorem is stated for $\TMF$, the same result holds for any elliptic cohomology theories in the sense of \Cref{section:equivariant}. Indeed, by \cite[Theorem 7.2]{affineness-chromatic}, taking global sections gives an equivalence of $\infty$-categories \[ \Gamma\colon \QCoh(\M) \overset\sim\to \Mod_\TMF. \] Thus, as quasi-coherent sheaves, we have \[ p_* \mathcal{O}_{\E[2]} \cong \mathcal{O}_\M \oplus \mathcal{O}_\M \oplus \mathcal{O}_\M \otimes DL. \] By the universality of $\M$, the same must hold for all other elliptic cohomology theories. \end{remark} \subsection{Outline of proof} To prove the theorem, we begin by computing the homotopy groups of $\TMF^{\mathcal{B}C_2}$. As in the case of $\TMF$, there is a descent spectral sequence computing $\pi_* \TMF^{\mathcal{B} C_2}$ whose $E_2$ page is the coherent cohomology of the $2$-torsion points of the (classical) universal elliptic curve. Upon computing the $E_2$ page for $\TMF^{\mathcal{B}C_2}$, one immediately observes that there are two copies of $\TMF$'s $E_2$ page as direct summands (as one would expect from the answer). We can identify these copies as follows: \begin{enumerate} \item Applying $\Gamma$ to the map $p\colon \E[2] \to \M$ induces $1 \colon \Gamma(\M; \O_\M) \to \Gamma(\E[2]; \O_{\E[2]})$. This is split by the identity section. \item Since $\TMF$ is a genuine $C_2$-equivariant cohomology theory, we get a norm map $\TMF_{h C_2} = \TMF \otimes \RP^\infty_+ \to \TMF^{\mathcal{B}C_2}$. Restricting to the bottom cell of $\RP^\infty_+$ gives us a transfer map $\tr\colon \TMF \to \TMF^{\mathcal{B}C_2}$. \end{enumerate} We will explore these further in \Cref{subsection:unit-transfer}. To simplify the calculation, we can quotient out these factors, and rephrase our original theorem as \begin{theorem} There is an isomorphism \[ \overline{\TMF^{\mathcal{B}C_2}} \equiv \TMF^{\mathcal{B}C_2} / (1, \tr) \simeq \TMF \otimes DL. \] \end{theorem} This is proven by computing the homotopy groups of $\overline{\TMF^{\mathcal{B} C_2}}$ via its descent spectral sequence, which is now reasonably sparse, followed by an obstruction theory argument. This implies the original theorem via the observation \begin{lemma} Any cofiber sequence of $\TMF$-modules \[ \TMF \oplus \TMF \to {?} \to \TMF \otimes DL \] splits. \end{lemma} \begin{proof} We have to show that \[ [\TMF \otimes DL, \Sigma \TMF \oplus \Sigma \TMF]_\TMF = 0. \] This is equivalent to showing that $\pi_{-1} \TMF \otimes L = 0$. This follows immediately by running the long exact sequences building $\TMF \otimes L$ from its cells, since $\pi_{-2} \TMF = \pi_{-3} \TMF = \pi_{-5} \TMF = \pi_{-9} \TMF = 0$. \end{proof} \begin{remark} At first I only computed the homotopy groups of $\TMF^{\mathcal{B}C_2}$. The above identification was discovered when I, for somewhat independent reasons, looked into the homotopy groups of $\TMF \otimes L$, and observed that they looked almost the same as that of $\TMF^{\mathcal{B}C_2}$. It is, however, $\TMF \otimes DL$ that shows up above; there is a cofiber sequence \[ \TMF \otimes L \to \TMF \otimes DL \to \KO, \] which induces a \emph{short} exact sequence in homotopy groups. Thus, the homotopy groups of $\TMF \otimes DL$ and $\TMF \otimes L$ differ by a single copy of $\pi_* \KO$, which is hard to notice after inverting $\Delta$. On the other hand, the corresponding classes have different Adams--Novikov filtrations, which makes them easy to distinguish in practice. \end{remark} \begin{remark} As part of the proof, we compute the homotopy groups $\pi_* \TMF^{\mathcal{B}C_2}$. To describe the group explicitly, under the decomposition, it remains to specify $\pi_* \TMF \otimes DL = \pi_* \overline{\TMF^{\mathcal{B} C_2}}$. This group is given by the direct sum of the $\ko$-like parts, namely \[ \bigoplus_{k \in \Z} \pi_*\Sigma^{8 + 24k} \ko \oplus \pi_*\Sigma^{16 + 24k} \ko, \] and what is depicted in \Cref{fig:anss-e9-page,fig:anss-e-infty-page}. In these figures, each dot is a copy of $\Z/2$, and the greyed out classes are ones that do not survive the spectral sequence (that is, the homotopy groups are given by the black dots). This part is $192$-periodic via $\Delta^8$-multiplication. \end{remark} \subsection{Overview} In \Cref{section:equivariant} we provide relevant background on equivariant elliptic cohomology. Building upon the results in \cite{equivariant-tmf}, we construct $C_n$-equivariant elliptic cohomology as a functor $\Sp_{C_n}^{\mathrm{op}} \to \QCoh(\E[n])$, which gives us the transfer map $\tr$. We then provide an explicit description of the descent spectral sequence for quasi-coherent sheaves over $\M$. In \Cref{section:e2}, we compute the Hopf algebroid presenting $\E[2]$ and subsequently the $E_2$ page of the descent spectral sequence for $\overline{\TMF^{\mathcal{B}C_2}}$ using the $2$-Bockstein spectral sequence. Unfortunately, the coaction involves division in a fairly complex ring, and cocycle manipulations throughout the paper are performed with the aid of \texttt{sage}. In \Cref{section:differentials}, we compute the differentials in the descent spectral sequence. The key input here is the fact that there is a norm map $\TMF_{h C_2} \to \TMF^{\mathcal{B}C_2}$ whose composite all the way down to $\TMF^{h C_2}$ is well-understood in terms of stunted projective spaces. This provides us with a few permanent classes, which combined with the $\TMF$-module structure lets us compute all the differentials. Our calculations will make heavy use of synthetic spectra \cite{synthetic}, whose relation to the Adams spectral sequence is laid out in \cite[Section 9]{manifold-synthetic}. In \Cref{section:ident}, we conclude the story by constructing a map $\TMF \otimes DL \to \overline{\TMF^{\mathcal{B}C_2}}$ via obstruction theory and showing that it is an isomorphism. In \Cref{section:connective}, we use entirely different methods to study properties of a hypothetical connective version of $C_2$-equivariant $\TMF$, which we call $\tmf_{C_2}$ (we put the $C_2$ subscript since we do not purport to describe a global equivariant $\tmf$). We shall show that under reasonable assumptions, $\Delta^{-1} (\tmf_{C_2})^{\mathcal{B}C_2}$ is dual to $\TMF^{\mathcal{B} C_2}$ in the category of $\TMF$-modules. In \Cref{section:stunted}, we prove some basic properties of the norm map that we use in \Cref{section:differentials}. Finally, in \Cref{section:sage}, we include the \texttt{sage} code we used for our cocycle manipulations. \subsection{Conventions} \begin{itemize} \item All categories are $\infty$-categories. \item We use $\otimes$ to denote the smash product of spectra. \item Unless otherwise specified, we work in the category of $\TMF$-modules, and all maps are $\TMF$-module maps. Further, we implicitly complete at the prime $2$. \item Our charts follow the same conventions as, say, \cite{tilman-tmf}. In each bidegree, a solid round dot denotes a copy of $\Z/2$. More generally, $n$ concentric circles denotes a copy of $\Z/2^n$. A white square denotes $\Z$. A line of slope $1$ denotes $h_1$ multiplication and a line of slope $\frac{1}{3}$ denotes $h_2$ multiplication. An arrow with a negative slope denotes a differential. Dashed lines denote hidden extensions. In particular, a dashed vertical line is a hidden $2$-extension. We use Adams grading, so that the horizontal axis is $t - s$ and vertical axis is $s$. \item All synthetic spectra will be based on $BP$. We choose our grading conventions so that $\pi_{t - s, s}(\nu X/\tau) = \Ext_{E_* E}^{s, t}(E_*, E_* X)$, i.e.\ $\pi_{x, y}$ shows up at coordinates $(t - s, s) = (x, y)$ in an Adams chart. Under these grading conventions, $\tau$ has bidegree $(0, -1)$. This is not the grading convention used by \cite{synthetic} and \cite{manifold-synthetic}; $\mathbb{S}^{a, b}$ in their grading is $S^{a, b - a}$ in ours. \item To avoid confusing the synthetic analogue functor $\nu$ with the element $\nu$ in the homotopy groups of spheres, we always write the former as $\nu(X)$ with the brackets. \item If $R$ is a (discrete) ring and $\alpha \in R$, we write $O(\alpha)$ for an unspecified element that is $\alpha$-divisible. For example, if $f = g + O(2)$, this means $f$ and $g$ agree mod $2$. \end{itemize} \ifspringer \subsection{Acknowledgements} \fi I would like to thank Robert Burklund for helpful discussions on various homotopy-theoretic calculations, especially regarding the application of synthetic spectra in \Cref{section:differentials}. Further, I benefited from many helpful discussions with Sanath Devalapurkar, Jeremy Hahn, and Lennart Meier regarding equivariant $\TMF$ and equivariant homotopy theory in general. Robert, Lennart and an anonymous referee also provided many helpful comments on an earlier draft. Finally, the paper would not have been possible without the support of my advisor, Michael Hopkins, who suggested the problem and provided useful guidance and suggestions throughout. The author was partially supported by NSF grants DMS-1803766 and DMS-1810917 through his advisor. \ifspringer \end{acknowledgements} \fi \section{Equivariant elliptic cohomology}\label{section:equivariant} \subsection{Elliptic cohomology} The starting point of equivariant elliptic cohomology is the notion of an oriented (spectral) elliptic curve, which was introduced by Lurie in \cite[Section 2]{elliptic-i} and \cite[Section 7.2]{elliptic-ii}. We should think of this as a spectral version of an elliptic curve, accompanied with a complex orientation of the associated cohomology theory. Let $X$ be a non-connective spectral Deligne--Mumford stack, and $p\colon E \to X$ an oriented elliptic curve. In \cite[Construction 5.4, Proposition 8.2]{equivariant-tmf}, Gepner--Meier constructs an $S^1$-equivariant elliptic cohomology functor \[ \Ell_{S^1}\colon \Sp_{S^1}^{\mathrm{op}} \to \QCoh(E), \] which is a limit-preserving symmetric monoidal functor satisfying \[ \Ell_{S^1} ((S^1 / C_m)_+) = \mathcal{O}_{E[m]}. \] We begin by extending this to a functor on $\Sp_{C_n}$. \begin{lemma} There is an elliptic cohomology functor \[ \Ell_{C_n}^E\colon \Sp_{C_n}^{\mathrm{op}} \to \QCoh(E[n]) \] such that for any $m \mid n$, we have a natural identification \[ \Ell_{C_n}^E(({C_n} / C_m)_+) = \O_{E[m]}, \] where we identify $\O_{E[m]}$ with its direct image in the $\QCoh(E[n])$ under the inclusion. Moreover, if $f: X' \to X$ is a morphism almost of finite presentation, then \[ f^* \Ell_{C_n}^E(X) = \Ell_{C_n}^{f^* E}(X) \in \QCoh(f^* E[n]). \] \end{lemma} If there is no risk of confusion, we omit the superscript ${}^E$. \begin{proof} Let $\Ind_{C_n}^{S^1}\colon \Sp_{C_n} \to \Sp_{S^1}$ be the induction map, left adjoint to the restriction map. Then $\Ind_{C_n}^{S^1}((C_n / C_m)_+) = (S^1 / C_m)_+$. Since the restriction map is symmetric monoidal under the smash product, $\Ind_{C_n}^{S^1}$ is oplax monoidal. Thus, the composite \[ \begin{tikzcd} \Ell_{C_n}^*\colon \Sp_{C_n}^{\mathrm{op}} \ar[r, "\Ind_{C_n}^{S^1}"] & \Sp_{S^1}^{\mathrm{op}} \ar[r, "\Ell_{S^1}"] & \QCoh(E) \end{tikzcd}, \] is lax monoidal. Since $S^0$ is a coalgebra in $\Sp_{C_n}$ and every object in $\Sp_{C_n}$ is naturally an $S^0$-comodule, it follows that this functor canonically factors through the category of $\Ell_{C_n}^*(S^0) = \O_{E[n]}$-modules in $\QCoh(E)$, which is equivalent to $\QCoh(E[n])$.\footnote{One has to check that the ring structure on $\O_{E[n]} = \Ell_{C_n}^*(S^0)$ that arises this way is the standard ring structure, which follows from the construction of $\Ell_{S^1}$.} Functoriality in $X$ follows from functoriality in the $S^1$ case as in \cite[Proposition 5.6]{equivariant-tmf}. \end{proof} \begin{remark} Unlike the case of $S^1$, the map $\Ell_{C_n}^E \colon \Sp_{C_n}^{\mathrm{op}} \to \QCoh(E[n])$ is in general not symmetric monoidal. \end{remark} \begin{corollary} There is a $C_n$-spectrum $R$ such that for any $C_n$-spectrum $Z$, we have \[ (R^Z)^{C_n} = \Gamma(E[n], \Ell_{C_n}(Z)). \] \end{corollary} We call this $R$ the $C_n$-spectrum associated to the elliptic curve $E \to X$. For example, when $E \to X$ is the universal elliptic curve, then $R = \TMF$. This follows the argument of \cite[Construction 8.3]{equivariant-tmf}. \begin{proof} By the spectral Yoneda's lemma \cite[Proposition 4.8.2.18]{ha}, the Yoneda embedding $\Sp_{C_n} \to \operatorname{Fun}^R(\Sp_{C_n}^{\mathrm{op}}, \Sp)$ is an equivalence. Since $\Sp_{C_n}$ is presentable, by \cite[Corollary 5.5.2.9(1)]{htt}, a functor $\Sp_{C_n}^{\mathrm{op}} \to \Sp$ is a right adjoint iff its opposite preserves colimits, i.e.\ it preserves limits. Thus, we have to show that the functor $Z \mapsto \Gamma(E[n], \Ell_{C_n}(Z))$ preserves limits as a functor $\Sp_{C_n}^{\mathrm{op}} \to \Sp$. \begin{itemize} \item By construction $\Ell_{S^1}: \Sp_{S^1}^{\mathrm{op}} \to \QCoh(E)$ preserves limits. \item Since $\Ind_{C_n}^{S^1} \colon \Sp_{C_n} \to \Sp_{S^1}$ is a left adjoint, it preserves colimits, hence its opposite preserves limits. So $\Ell_{C_n}^*\colon \Sp_{C_n}^{\mathrm{op}} \to \QCoh(E)$ preserves limits. \item Since $\QCoh(E[n])$ is the category of $\O_{E[n]}$-modules in $\QCoh(E)$, the forgetful functor $\QCoh(E[n]) \to \QCoh(E)$ creates limits. So $\Ell_{C_n}\colon \Sp_{C_n}^{\mathrm{op}} \to \QCoh(E[n])$ preserves limits. \item Finally, $\Gamma \colon \QCoh(E[n]) \to \Sp$ is a right adjoint and preserves limits. \end{itemize} \end{proof} We are interested in these global sections, which we can write as \[ \Gamma(E[n]; \Ell_{C_n}(Z)) = \Gamma(X; p_* \Ell_{C_n}(Z)). \] By computing $p_* \Ell_{C_n}(S^0)$, this lets us understand the global sections in terms of quasi-coherent sheaves on $X$ itself. This pushforward is fairly nice by virtue of \begin{lemma} The map $[n]\colon E \to E$ is flat, hence so is $p\colon E[n] \to X$. \end{lemma} \begin{proof} To check that $[n]\colon E \to E$ is flat, observe that by \cite[Theorem 2.3.1]{katz-mazur}, the map on underlying (classical) stacks is flat. The condition that $[n]^* \pi_t \O_E = \pi_t \O_E$ as sheaves on the underlying stack is automatic, since $\pi_t \O_E = p^* \pi_t \O_X$ and $p[n] = p$. For the second part, we have a pullback square \[ \begin{tikzcd} E[n] \ar[d, "p"] \ar[r] & E \ar[d, "{[n]}"] \\ X \ar[r] & E \end{tikzcd} \] where the bottom map is the identity section, and flat morphisms are closed under pullbacks. \end{proof} \begin{corollary}[{\cite[Lemma 8.1]{equivariant-tmf}}]\label[corollary]{cor:underlying-torsion} The underlying stack of $E[n]$ are the $n$-torsion points of the underlying stack of $E$. \end{corollary} \begin{proof} More generally, given a pullback of a flat morphism between non-connective spectral Deligne--Mumford stacks, it is also a pullback on the underlying classical stacks. To see this, since being flat and a pullback is local, we may assume that the stacks are in fact affine, in which case the result is clear. \end{proof} \subsection{The unit and transfer maps}\label{subsection:unit-transfer} There are two natural maps \[ 1, \tr\colon \O_X \to p_* \O_{E[n]}. \] The map $1$ is adjoint to the identity map $p^* \mathcal{O}_X = \mathcal{O}_{E[n]} \to \mathcal{O}_{E[n]}$, and is a map of $\O_X$-algebras. In particular, it is an $\O_X$-module homomorphism that sends $1$ to $1$. If $X$ were affine, then this comes from taking the global sections of $p\colon E[n] \to X$. This map is split by the identity section $X \to E[n]$. The trace map $\tr$ comes from stable equivariant homotopy theory itself. To avoid double subscripts, set $G = C_n$. In the category of $G$-spectra, there are maps \[ G_+ \to S^0 \to G_+ \] whose composition is $\sum_{g \in G} g$. The first map comes from applying $\Sigma^\infty_+$ to the map of unbased $G$-spaces $G \to *$, whereas the second map is the Spanier--Whitehead dual of the first map, using the self-duality of $G_+$. Informally, it sends $1 \mapsto \sum_{g \in G} G$. Now $\Ell_G(G_+) = \Ell_{\{e\}}(S^0) = \O_X$, so we get maps of $\O_{E[n]}$-modules \[ \O_X \overset\tr\to \O_{E[n]} \to \O_X \] whose composite is $n$ (since $G$ acts trivially on $\O_X$). Applying $p_*$, we get a map $\tr\colon \O_X \to p_* \O_{E[n]}$. It will be useful to relate this to the norm map of $C_n$-spectra. Let $R$ be the $C_n$-spectrum associated to $E$. Then unwrapping the definitions, we see that $\tr$ is the $C_n$-fixed points of the map \[ R \otimes G_+ \longrightarrow R \otimes S^0, \] obtained by tensoring up the unique map $G \to *$. Similarly, the norm map is induced by \[ R \otimes EG_+ \longrightarrow R \otimes S^0, \] using the Adams isomorphism $(R \otimes EG_+)^{\mathcal{B}G} = R_{hG}$. Since $G$ includes into $EG$, the trace map factors as \[ \tr\colon R \longrightarrow R_{h G} \overset{\mathrm{Nm}}\longrightarrow R^{\mathcal{B}G}, \] where the left-hand map is the usual inclusion. Since $G$ acts trivially on the underlying spectrum $R$, we have $R_{h G} = R \otimes BG_+$, and the left-hand map is the inclusion of the bottom cell of $BG_+$. We now define $\overline{p_* \O_{E[n]}}$ by the following cofiber sequence in $\QCoh(X)$: \[ \O_X \oplus \O_X \overset{1 \oplus \tr}\longrightarrow p_*\O_{E[n]} \longrightarrow \overline{p_* \O_{E[n]}}. \] We then write \[ \begin{aligned} R^{\mathcal{B}C_n} &= \Gamma(X; p_* \O_{\E[n]}),\\ \overline{R^{\mathcal{B}C_n}} &= \Gamma(X; \overline{p_* \O_{\E[n]}}). \end{aligned} \] In particular, when $X = \M$ and $R = \TMF$, we have a cofiber sequence \[ \TMF \oplus \TMF \overset{1 \oplus \tr}\longrightarrow \TMF^{\mathcal{B}C_n} \longrightarrow \overline{\TMF^{\mathcal{B}C_n}}. \] In this paper, we are only interested in the case $n = 2$. \subsection{The descent spectral sequence}\label{subsection:dss} Our main computational tool is the descent spectral sequence, which we recall in this section. Let $X$ be any non-connective spectral Deligne--Mumford stack and $\mathcal{F}$ a quasi-coherent sheaf on $X$. Let $U \to X$ be an \'etale cover of $X$. Then the sheaf condition tells us \[ \Gamma(X; \mathcal{F}) = \operatorname{Tot}(\Gamma(U \times_X \cdots \times_X U; \pi^* \mathcal{F})), \] where $U \times_X \cdots \times_X U$ is the \v{C}ech nerve of the cover, and $\pi\colon U \times_X \cdots \times_X U \to X$ is the projection map. The descent spectral sequence is the Bousfield--Kan spectral sequence for the totalization, and the $E_2$ page is given by the \v{C}ech cohomology \[ E_2^{s, t} = \check{H}^s(X_{\mathrm{cl}}; \pi_t \mathcal{F}) \] of the underlying classical stack $X_{\mathrm{cl}}$ with respect to the cover $U$. For us, we have $X = \M$, and $U = \M_1(3)$, the spectral enhancement of moduli stack of elliptic curves with a $\Gamma_1(3)$-structure (i.e.\ a choice of $3$-torsion point). By \cite[Theorem 7.2]{affineness-chromatic}, the map \[ \Gamma\colon \QCoh(\M) \to \Mod_\TMF \] is an equivalence of symmetric monoidal categories. Let $i\colon \M_1(3) \to \M$ be the covering map. Then we have a sequence of equivalences \[ \QCoh(\M_1(3)) \!\simeq\! \Mod_{i_* \O_{\M_1(3)}}(\QCoh(\M)) \!\simeq\! \Mod_{\TMF_1(3)}(\Mod_\TMF) \!\simeq\! \Mod_{\TMF_1(3)}, \] where the first equivalence follows from $i$ being affine and \cite[Proposition 2.5.6.1]{sag}\footnote{The statement of \cite[Proposition 2.5.6.1]{sag} refers to spectral Deligne--Mumford stacks, but the proof applies to non-connective ones as well.}. Under this equivalence, the pullback functor $\Mod_{\TMF} \to \Mod_{\TMF_1(3)}$ is given by $\TMF_1(3) \otimes_{\TMF}(-)$. More generally, we find that \[ \QCoh(\M_1(3) \times_\M \cdots \times_\M \M_1(3)) \simeq \Mod_{\TMF_1(3) \otimes_{\TMF} \cdots \otimes_{\TMF} \TMF_1(3)}. \] Thus, we have \[ \Gamma(\M_1(3) \times_\M \cdots \times_\M \M_1(3), \pi^* \mathcal{F}) \simeq \TMF_1(3) \otimes_{\TMF} \cdots \otimes_{\TMF} \Gamma(\M; \mathcal{F}). \] So the descent spectral sequence is also the $\TMF_1(3)$-based Adams spectral sequence in $\Mod_\TMF$. There is a well-known identification \begin{lemma} The $\TMF_1(3)$-based Adams spectral sequence in $\Mod_\TMF$ is the same as the $BP$-based Adams--Novikov spectral sequence in spectra. \end{lemma} We only use this result to apply the machinery of synthetic spectra to the descent spectral sequence; the morally correct approach would be to reproduce the theory of synthetic spectra inside $\Mod_\TMF$, but we'd rather not take that up. \begin{proof} Following \cite[Section 1]{relations-ass}, it suffices to show that any $\TMF_1(3)$-resolution of a $\TMF$-module in $\Mod_\TMF$ is also an $BP$-resolution in $\Sp$. To do so, we have to show that every $\TMF_1(3)$-injective module in $\Mod_{\TMF}$ is $BP$-injective in $\Sp$, and every $\TMF_1(3)$-exact sequence in $\Mod_\TMF$ is $BP$-exact in $\Sp$. \begin{enumerate} \item We have to show that $\TMF_1(3) \otimes_\TMF X$ is $BP$-injective in $\Sp$ for any $X \in \Mod_\TMF$. Since $\TMF_1(3)$ is complex orientable, there is a homotopy ring map $MU \to \TMF_1(3)$. Thus, $\TMF_1(3) \otimes_\TMF X$ is a homotopy $MU$-module, hence $MU$-injective, hence $BP$-injective. \item Since $F(\TMF, -)$ is right-adjoint to the forgetful functor $\Mod_\TMF \to \Sp$, by definition of exactness, it suffices to show that if $X$ is $BP$-injective, then $F(\TMF, X)$ is $\TMF_1(3)$-injective. Again we may assume $X = BP \otimes Y$. By \cite[Theorem 1.2]{homology-tmf}, there exists an even spectrum $Z$ such that $\TMF_1(3) = \TMF \otimes Z$. By evenness, $BP$ is a retract of $BP \otimes DZ$. Thus, $F(\TMF, BP \otimes Y)$ is a retract of $F(\TMF, BP \otimes DZ \otimes Y) = F(\TMF \otimes Z, BP \otimes Y) = F(\TMF_1(3), BP \otimes Y)$, which is a $\TMF_1(3)$-module, hence $\TMF_1(3)$-injective. \end{enumerate} \end{proof} For convenience, set \[ A = \pi_* \TMF_1(3) ,\quad \Gamma = \pi_* \TMF_1(3) \otimes_\TMF \TMF_1(3). \] Then $(\Gamma, A)$ is a Hopf algebroid, and for any $\TMF$-module $N = \Gamma(\M; \mathcal{F})$, we have \[ \Ext^s_\Gamma(A, \pi_t (\TMF_1(3) \otimes_\TMF N)) = \Ext^s_\Gamma(A, \pi_t (i^* \mathcal{F})) \Rightarrow \pi_{t - s} N. \] To perform calculations, it is of course necessary to identify $(\Gamma, A)$ explicitly. From \cite{tmf-level-3}, we have \[ A \equiv \pi_* \TMF_1(3) = \Z_2[a_1, a_3, \Delta^{-1}],\quad \Delta = a_3^3 (a_1^3 - 27 a_3),\quad |a_i| = 2i, \] with associated elliptic curve \[ \E': y^2 z + a_1 xyz + a_3 yz^2 = x^3. \] $\Spec \Gamma$ is the classifying scheme of two curves of the form $\E'$ that are abstractly isomorphic, i.e.\ related by a coordinate transform. Consider the change of coordinates \[ \begin{aligned} x &\mapsto x + rz\\ y &\mapsto y + sx + tz \end{aligned} \] In order to preserve the form of the equation, we need \[ \begin{aligned} 0 &= 3r - s^2 - a_1 s\\ 0 &= s^4 - 6 st + a_1 s^3 - 3 a_1 t - 3 a_3 s\\ 0 &= s^6 - 27 t^2 + 3 a_1 s^5 - 9 a_1 s^2 t + 3 a_1^2 s^4 - 9 a_1^2 st + a_1^3 s^3 - 27 a_3 t \end{aligned} \] So we have $\Gamma = A[s, t]/I$, where $I$ is the ideal generated by the relations above (we have eliminated $r$ entirely). One checks that $\Gamma$ is the free $A$-module on $\{1, s, s^2, s^3, t, st, s^2 t, s^3 t\}$, and these generators exhibits $\TMF_1(3) \otimes_\TMF \TMF_1(3)$ as the sum of $8$ suspended copies of $\TMF_1(3)$. We can read off the structure maps of the Hopf algebroid to be \[ \begin{aligned} \eta_R(a_1) &= a_1 + 2s\\ \eta_R(a_3) &= a_3 + a_1 r + 2t \\ \Delta(s) &= s \otimes 1 + 1 \otimes s\\ \Delta(r) &= r \otimes 1 + 1 \otimes r\\ \Delta(t) &= t \otimes 1 + 1 \otimes t + s \otimes r. \end{aligned} \] This Hopf algebroid (or rather, the connective version without inverting $\Delta$) was studied in detail in \cite{tilman-tmf}, whose computations and names we will use significantly. \section{The \texorpdfstring{$E_2$}{E2} page of the DSS}\label{section:e2} \subsection{Computing the comodule} Let $q\colon \E' \to \M_1(3)$ be the canonical elliptic curve over $\M_1(3)$, so that we have a pullback diagram \[ \begin{tikzcd} \E' \ar[d, "q"] \ar[r, "j"] & \E \ar[d, "p"]\\ \M_1(3) \ar[r, "i"] & \M. \end{tikzcd} \] Then we have \[ \TMF_1(3) \otimes_\TMF \TMF^{\mathcal{B}C_2} = \Gamma(i^* p_* \mathcal{O}_{\E[2]}) = \Gamma(q_* \mathcal{O}_{\E'[2]}) = \TMF_1(3)^{\mathcal{B}C_2}, \] and similarly with the bar version. In this section, we compute $\pi_* \TMF_1(3)^{\mathcal{B}C_2}$ as a $\Gamma$-comodule, and then quotient out the image of $1$ and $\tr$. By \Cref{cor:underlying-torsion}, $\pi_* \TMF_1(3)^{\mathcal{B}C_2}$ is given by (the global sections of) the classical scheme of $2$-torsion points of $\E'$. The na\"ive way to compute $\E'[2]$ is to write down the duplication formula for $\E'$ and compute its kernel. However, the duplication formula is unwieldy. Instead, we write down the inversion map $i\colon \E' \to \E'$ and compute the equalizer with the identity map. The inversion map is induced by the map of projective spaces \[ \begin{aligned} \P^2 &\to \P^2\\ [x:y:z] &\mapsto [x:-y - a_1 x - a_3 z: z] \end{aligned} \] (we use $z$ instead of $z$ since we shall soon use $z$ to mean $-\frac{x}{y}$). The equalizer of $i$ with the identity is then cut out by the equations \[ \begin{aligned} x(2y + a_1 x + a_3 z) &= 0\\ z(2y + a_1 x + a_3 z) &= 0 \end{aligned} \] Now observe that the $2$-torsion points are contained in the affine chart $y = 1$. Indeed, if $y = 0$, then the equation defining $E$ tells us $x = 0$. So the unique point on the curve when $y = 0$ is $[0:0:1]$. But this doesn't satisfy the last equation above since $a_3$ is invertible. Therefore, we work in the $y = 1$ chart. Following standard conventions, we redefine \[ z = -\frac{x}{y},\quad w = -\frac{z'}{y}, \] where $z'$ is the old $z$. In the new coordinate system, the $2$-torsion points are cut out by the equations \[ \begin{aligned} z^3 - w + a_1 zw + a_3 w^2 &= 0\\ 2z - a_1 z^2 - a_3 zw &= 0\\ 2w - a_1 zw - a_3 w^2 &= 0. \end{aligned} \] Adding the first and last equation gives \[ z^3 + w = 0. \] Eliminating $w$, we find that \[ \E'[2] = \Spec A[z]/(2z - a_1 z^2 + a_3 z^4). \] In other words, \[ \pi_* \TMF_1(3)^{\mathcal{B}C_2} = A[z]/(2z - a_1 z^2 + a_3 z^4),\quad |z| = -2. \] Since $a_3$ is invertible, this is a free $A$-module of rank $4$. The $\Gamma$-coaction on $\pi_* \TMF_1(3)^{\mathcal{B}C_2}$ comes directly from the construction of $\Gamma$ itself; it is given by \[ z = -\frac{x}{y} \mapsto -\frac{x + rz'}{y + sx + tz'} = \frac{-\frac{x}{y} - r \frac{z'}{y}}{1 + s \cdot \frac{x}{y} + t \cdot \frac{\tilde{z}}{y}} = \frac{z + rw}{1 - sz - tw} =\frac{z - r z^3}{1 - sz + tz^3}. \] \begin{theorem}\label{thm:tr-value} The map $\tr\colon \TMF_1(3) \to \TMF_1(3)^{\mathcal{B}C_2}$ sends $1$ to $2 - a_1 z + a_3 z^3$. In particular, by naturality, $2 - a_1 z + a_3 z^3$ is a permanent cocycle. \end{theorem} The argument is similar to \cite[Satz 4]{dieck72} (see also \cite[Remark 6.15]{hkr}). \begin{proof} This map is a map of $\TMF_1(3)^{\mathcal{B}C_2}$-modules. Since $z$ acts trivially on $\pi_* \TMF_1(3)$, this means $z \tr 1 = 0$. Since $A[z]$ is a UFD, we know that $\tr 1$ must be a multiple of $2 - a_1 z + a_3 z^3$. Moreover, since it is equal to $2$ after modding out by $z$, the multiple must be $1$. \end{proof} So after taking the cofiber by $1$ and $\tr$, we get $\pi_* \overline{\TMF_1(3)^{\mathcal{B}C_2}} = A\{z, z^2\}$, and the $E_2$ page of the descent spectral sequence for $\pi_* \overline{\TMF^{\mathcal{B}C_2}}$ is given by $\Ext_\Gamma(A, A\{z, z^2\})$. \subsection{Computing the cohomology mod \texorpdfstring{$2$}{2}} While the coaction itself is fairly complicated, there is a major simplification after we reduce mod $2$. By computer calculation (\Cref{section:sage}), we find that: \begin{lemma}\label[lemma]{lemma:computer-action} Let \[ b_1 = a_3 z^2,\quad b_5 = a_3^2 z; \quad |b_i| = 2i. \] Then $A\{z, z^2\} = A\{b_1, b_5\}$, and there is a short exact sequence of comodules \[ 0 \to A\{b_1\}/2 \to A \{b_1, b_5\}/2 \to A\{b_5\}/2 \to 0, \] where both ends are cofree on the indicated generator, inducing a long exact sequence in $\Ext$. More precisely, the class $b_1 \in A\{z, z^2\} / 2$ is invariant, while \[ \psi(b_5) - [1] b_5 = [r^2] b_1. \] Thus, the connecting map of the long exact sequence in $\Ext$ is given by $b_5 \mapsto [r^2] b_1$. \end{lemma} For reference, we display the cohomology of $A / 2$ in \Cref{fig:cohomology-a-mod-2}, as computed by \cite{tilman-tmf}. This chart is read as follows: \begin{itemize} \item Each dot represents a copy of $\F_2$; $h_1$-multiplication and $h_3$-multiplication are denoted by lines of slope $1$ and $1/3$ respectively. \item $[r^2]$ represents the class $x$ in bidegree $(7, 1)$. This class is uniquely characterized by the fact that $a_1^2$ kills this class, coming from the cobar differential \[ \d(a_3^2) = [a_1^2 r^2]. \] \item The long dotted lines denote the extension $h_1^4 = a_1^4 \Delta^{-1} g$. \item The classes fading out continue $a_1$-periodically, and each ``period'' consists of an infinite $h_1$ tower. \end{itemize} In \Cref{fig:ext-connecting}, we put two copies of this next to each other and draw the connecting differential. The resulting cohomology is in \Cref{fig:ext-mod-2}. The hidden extensions follow from a ``multiplication by $\sqrt{\Delta}$'' operator, which we shall next explain. \DeclareSseqCommand\htwo {} { \class (\lastx + 3, \lasty + 1) \structline[htwo] } \DeclareSseqCommand\hone {} { \class (\lastx + 1, \lasty + 1) \structline[hone] } \DeclareSseqCommand\hzero {} { \class (\lastx, \lasty + 1) \structline[hzero] } \DeclareSseqCommand\honedot {m} { \savestack \foreach \n in {1,...,#1} { \hone } \draw [->](\lastclass0) -- ++(0.5, 0.5); \restorestack } \DeclareSseqCommand\honei {} { \class (\lastx - 1, \lasty - 1) \structline[hone] } \DeclareSseqGroup\tmfAnssModTwoUnit {m} { \class (0, 0); \draw [->] (0, 0) -- (0.8, 0.8); \htwo \htwo \htwo \honei \honei \htwo \honei \ifnum#1>0 \foreach \n in {2, 4, 6, 8} { \SseqParseInt\ten{\n * 10} \class [white!\ten!black](\n, 0); \draw [->, white!\ten!black] (\n, 0) -- (\n + 0.8, 0.8); } \fi \class (15, 3) \honei \htwo \honei \honei \htwo \htwo } \DeclareSseqGroup\tmfAnssModTwoLayer{m} { \tmfAnssModTwoUnit(-24, 0){#1} \tmfAnssModTwoUnit(0, 0){#1} \tmfAnssModTwoUnit(24, 0){#1} } \DeclareSseqGroup\tmfAnssModTwo {m} { \tmfAnssModTwoLayer{#1} \tmfAnssModTwoLayer(-4, 4){#1} \ifnum#1>0 \draw [dashed] (24, 0) -- (28, 4); \draw [dashed] (-24, 0) -- (-20, 4); \draw [dashed] (0, 0) -- (4, 4); \fi } \begin{figure} \caption{Cohomology of $A/2$} \label{fig:cohomology-a-mod-2} \end{figure} \begin{figure} \caption{Connecting maps for $\Ext_\Gamma(A, A\{b_1, b_5\} \label{fig:ext-connecting} \end{figure} \DeclareSseqGroup\TmfCtwoAnssModTwo {} { \foreach \m in {0, 24} { \class (2 + \m, 0) \draw [->] (2 + \m, 0) -- (2.8 + \m, 0.8); \htwo \htwo \foreach \n in {2, 4, 6, 8} { \SseqParseInt\ten{\n * 10} \class [white!\ten!black](\n + \m + 2, 0); \draw [->, white!\ten!black] (\n + \m + 2, 0) -- (\n + \m + 2.8, 0.8); } } \class(17, 1) \htwo \begin{scope}[red] \foreach \m in {0, 24} { \class (1 + \m, 3) \class (3 + \m, 3) \honei \honei \htwo \htwo } \foreach \n in {0, 2, 4, 6, 8} { \SseqParseInt\ten{\n * 10} \class [white!\ten!red](\n + 14, 0); \draw [->, white!\ten!red] (\n + 14, 0) -- (\n + 14.8, 0.8); } \class (13, 3) \class (13, 1) \draw [->] (13, 1) -- (13.8, 1.8); \htwo \htwo \end{scope} } \begin{figure} \caption{$\Ext_\Gamma(A, A\{b_1, b_5\} \label{fig:ext-mod-2} \end{figure} Originally, we have an action of $A[z]/(2z - a_1 z^2 + a_3 z^4)$ on $A\{z, z^2\} / 2$. Since we have quotiented out by $2$ and $2 - a_1 z + a_3 z^3$, this reduces to an action by $A [z]/(2, a_1 z + a_3 z^3)$. Further, since we are acting on $z$-multiples only, this reduces to an action by $A[z]/(2, a_1 + a_3 z^2)$. In this ring, we have \[ \Delta = a_3^4 + a_3^3 a_1^3 = a_3^4 + a_3^4 a_1^2 z^2 = (a_3^2 (1 + a_1 z))^2. \] One can check via \texttt{sage} that $\sqrt{\Delta} = a_3^2 (1 + a_1 z)$ is invariant in $A[z]/(2, z + a_3 z^2)$, so acts on $\Ext_\Gamma(A, A\{z, z^2\}/ 2)$ (see again \Cref{section:sage}). For example, the surviving class in bidegree $(14, 0)$ is $\sqrt{\Delta} b_1 = a_3^3z^2 - a_3^2 a_1^2 z$. \subsection{\texorpdfstring{$2$}{2}-Bockstein spectral sequence} We now run the $2$-Bockstein spectral sequence, which we will find to degenerate on the $E_2$ page. These Bockstein $d_1$'s resemble the $d_3$'s in the descent spectral sequence quite a bit. Thus, despite the fact that a lot of the differentials can be computed by writing down explicit cocycles, we try our best to argue them formally so that the same argument can be applied to the $d_3$'s. Looking at the chart in \Cref{fig:ext-mod-2}, it is not hard to see what to expect. All differentials have bidegree $(-1, 1)$, and we know that nothing above the zero line survives, since $(\Gamma, A)$ has no rational cohomology. Thus, for example, up to $O(a_1)$, the class in bidegree $(1, 1)$ must be hit by a differential from $b_1$. The main work to do is to make sure nothing exotic happens with the highly $a_1$-divisible classes coming from $\Delta$ division. To begin, recall that in the $2$-Bockstein for $\Ext_\Gamma(A, A)$, we have \[ d_1(a_1) = h_1, \] since $h_1 = [s]$ and $\eta_R(a_1) = a_1 + 2s$. \begin{lemma}\label[lemma]{lemma:no-h1} There are no non-zero classes of the form $h_1^2 a$ on the $E_2$ page. Further, any permanent class of this form equals $d_1(h_1 a_1 a)$. \end{lemma} \begin{proof} If $d_1(h_1^2 a) \not= 0$, then it doesn't survive. Otherwise, consider $d_1(a)$. This must be $h_1^2$ torsion, so it is an $h_2$ multiple. Then $h_1 d_1(a) = 0$. So $d_1(h_1 a_1 a) = h_1^2 a$. \end{proof} In general, we let $x_{t - s, s}$ denote a class in the corresponding bidegree that generates the bidegree after modding out by $a_1$- and $h_1$-multiples, if this makes sense. This class is well-defined up to $a_1$- and $h_1$-multiples. \begin{lemma} $d_1(b_1) = x_{1, 1}$ and $d_1(\sqrt{\Delta} b_1) = \sqrt{\Delta} x_{1, 1}$. \end{lemma} Note that since $x_{1, 1}$ is only well-defined up to $a_1$ multiples, this is equivalent to saying that $d_1(b_1)$ and $d_1(\sqrt{\Delta}) b_1$ are not $a_1$ divisible. Since $x_{1, 1}$ is not well-defined, neither is $\sqrt{\Delta} x_{1, 1}$, and we are not claiming that there is a single choice of $x_{1, 1}$ for which both equations hold. \begin{proof} First observe that there is a choice of $x_{1, 1}$ that is permanent. Indeed, for any choice of $x_{1, 1}$, the class $d_1(x_{1, 1})$ must be $h_1^2$-divisible, so it must be hit by a $d_1$ from an $a_1$-multiple by \Cref{lemma:no-h1}, which we can add to $x_{1, 1}$, so that it survives the $d_1$. From the $E_2$ page onwards, the target bidegree of the differential is $0$ by \Cref{lemma:no-h1} again. Now $h_1^2 x_{1, 1}$ must be hit by a $d_1$, and the source can only be $h_1^2 b_1 + O(a_1)$, since $h_2 x_{1, 1}$ is permanent and other classes are highly $a_1$-divisible. So $d_1(b_1)$ must hit a version of $x_{1, 1}$. The case of $\sqrt{\Delta} b_1$ is analogous. \end{proof} \begin{corollary} There is a hidden extension $a_1 x_{1, 1} = h_1 b_1 + O(a_1^2)$. \end{corollary} \begin{proof} The bidegree $(3, 1)$ is generated by $h_1 b_1$ and $a_1^2$ multiples. So $a_1 x_{1, 1}$ is either $h_1 b_1 + O(a_1^2)$ or $O(a_1^2)$. But $d_1(a_1 x_{1, 1}) = h_1 x_{1, 1}$ is not an $a_1^2$-multiple, so $a_1 x_{1, 1}$ cannot be $O(a_1^2)$. So it must be $h_1 b_1 + O(a_1^2)$. \end{proof} \begin{lemma}\label[lemma]{lemma:juggle} $d_1(a_1 b_1) = d_1(a_1 \sqrt{\Delta} b_1) = 0$. \end{lemma} \begin{proof} Note that $b_1$ and $\sqrt{\Delta} b_1$ generate the $0$-line under $a_1$ and $\Delta^{\pm}$, and $d_1(a_1 b_1)$ and $d_1(a_1 \sqrt{\Delta} b_1)$ must be in the submodule generated by these and $h_1$. We first show that the values of the differentials must be $a_1^2$-divisible. Indeed, we cannot have $d_1(a_1 b_1) = h_1 b_1 + O(a_1^2)$, because applying $d_1$ again would imply that \[ 0 = h_1 x_{1, 1} + O(a_1^2), \] a contradiction. The argument for $d_1(a_1 \sqrt{\Delta} b_1)$ is similar. Set $\Delta = 1$, and let $x = a_1 b_1$, $y = a_1 \sqrt{\Delta} b_1$. Then we can write \[ d_1 \begin{pmatrix}x\\y \end{pmatrix} = h_1 M \begin{pmatrix}x\\y \end{pmatrix} \] for some matrix $M$ of odd polynomials in $a_1$. Applying $d_1$ again gives us the equation \[ M^2 = \frac{1}{a_1} M. \] In other words, we have $M = a_1 M^2$. Iterating this equation shows that $M$ is infinitely $a_1$-divisible, so it must be trivial. \end{proof} \begin{corollary}\label[corollary]{cor:cross} $a_1 d_1(b_1) = h_1 b_1$, or equivalently, $a_1 x_{1, 1} = h_1 b_1 + O(a_1^2)$. \end{corollary} \begin{remark} We can in fact write down explicit lifts of $a_1 b_1$ and $a_1 \sqrt{\Delta} b_1$, namely \[ a_1 b_1 + 2 a_3 z,\quad a_1 \sqrt{\Delta} b_1 + 2a_3^3 z, \] whose coboundary vanishes mod $2^2$. However, the proofs above will be used for $d_3$'s in the descent spectral sequence too, and we cannot write down explicit cocycles for that. \end{remark} With $\Delta^{\pm}$ and $g$ periodicity, this gives all $d_1$'s. No classes are left in positive $s$ so we are done. The resulting $E_2$ page of the descent spectral sequence of $\overline{\TMF^{\mathcal{B}C_2}}$ has a fairly regular pattern, which we exhibit in \Cref{fig:anss-e2-page}. The names are intentionally left off; they can be found in \Cref{fig:anss-e3-page}. \begin{sseqdata}[name = anss e3, small, x range={0}{30}, y range={0}{7}] \foreach \x in {0, 12, 24} { \class(\x + 1, 1) \honedot{5}; \htwo \htwo \class(\x - 3, 5) \honedot{2}; \htwo \htwo \class[rectangle, fill=white] (\x + 4, 0); \honedot{3} \begin{scope}[white!40!black] \class[rectangle, fill=white] (\x + 8, 0); \honedot{3} \end{scope} \begin{scope}[white!80!black] \class[rectangle, fill=white] (\x + 12, 0); \honedot{3} \end{scope} \class (\x, 4) \honedot{2} } \d3(4, 0) \d3(5, 1) \d3(6, 2) \d3(7, 3, 2) \d[white!80!blue]3(12, 0) \d3(13, 1, 2) \d3(14, 2, 2) \d3(15, 3, 2) \d3(4 + 24, 0)(27, 3, 2) \d3(5 + 24, 1) \d3(6 + 24, 2) \d3(24, 4) \d3(20, 0)(19, 3, 2) \structline[dashed, bend left=30, page=3, source anchor=70, target anchor=200] (19, 3, 1)(22, 6) \end{sseqdata} \begin{figure} \caption{$E_2$ page of ANSS} \label{fig:anss-e2-page} \end{figure} \section{Differentials in the DSS}\label{section:differentials} We have now computed the $E_2$ page of the descent spectral sequence of $\overline{\TMF^{\mathcal{B}C_2}}$. The goal of this section is to compute the differentials. The main difficulty in computing the descent spectral sequence differentials is translational invariance --- the $E_2$ page is $\Delta$-invariant, but the $E_\infty$ page will only be $\Delta^8$-invariant. If we had a connective version, then the leftmost class must be permanent since there is nothing to hit. Since we do not, we need external means of determining that certain classes are permanent. Once we do so, we can use standard techniques in homotopy theory to compute the remaining differentials. We begin by computing the $d_3$'s, where most of the hard work lies in. We depict the end result in \Cref{fig:anss-e3-page} for reference. \begin{figure} \caption{$E_3$ page of the descent spectral sequence} \label{fig:anss-e3-page} \end{figure} To compute the $d_3$'s, we have to show that $x_{1, 1}$ is permanent by explicitly constructing a homotopy class $t$, while $\sqrt{\Delta} x_{1, 1}$ supports a $d_3$. The rest then follows formally using $\eta^4 = 0$. Along the way, we will find a hidden $\nu$-extension from $h_2^2 \sqrt{\Delta} t$ to $h_1 gt$, which will be useful later on. For the purposes of computing $d_3$, it is convenient to have a ``multiplication by $v_1^2$'' operation, obtained by lifting $a_1^2$-multiplication on the mod $2$ cohomology. This is well-defined up to $O(2)$, which is fine because the targets of all differentials are $2$-torsion. Then $c_4 = v_1^4$ and $c_6 = v_1^6$, which lets us deduce that $d_3(v_1^2) = h_1^3$. \begin{theorem}\label{thm:x11-permanent} There is a choice of $x_{1, 1}$ that survives and has order $2$. We call this class $t$. \end{theorem} In fact, all choices survive and have order $2$, but we will only get to see this after computing the spectral sequence fully. \begin{proof} We define $t$ to be the composition \[ t\colon \Sigma \TMF \hookrightarrow \TMF \otimes \RP^\infty_+ = \TMF_{h C_2} \overset{\mathrm{Nm}}\longrightarrow \TMF^{\mathcal{B}C_2} \to \TMF^{\mathcal{B}C_2} / (1, \tr), \] where the first map is the inclusion of the $1$-cell. We claim this this has ANSS filtration $1$ and is non-$v_1^4$-divisible on the $E_2$ page (note that there are no Adams filtration 0 elements in odd degrees). Then it must be detected by a choice of $x_{1, 1}$. To do so, consider the composite \begin{multline*} t'\colon \Sigma \TMF \overset{t}\to \TMF^{\mathcal{B}C_2}/(1, \tr) \\ \to \TMF^{h C_2}/(1, \tr) = \TMF^{\RP^\infty_+}/(1, \tr) \to \TMF^{\RP^2_+}/(1, \tr), \end{multline*} where we use $\tr$ to refer to the composite $\Sigma \TMF \to \TMF^{\RP^\infty_+} \to \TMF^{\RP^2_+}$ as well. It suffices to prove the same properties for $t'$. The key fact from equivariant homotopy theory we use is the following: let $X$ be a genuine $C_2$-spectrum whose $C_2$ action on the underlying spectrum $\iota X$ is trivial. Then the cofiber of the composition \[ \iota X \otimes \RP^n_+ \to \iota X \otimes \RP^\infty_+ = X_{h C_2} \overset{\mathrm{Nm}}\to X^{h C_2} = (\iota X)^{\RP^\infty_+} \to (\iota X)^{\RP^m_+} \] is $\Sigma \iota X \otimes P_{-m - 1}^n$, where $P_{-m}^n$ is the stunted projective space. This is well-known, but we are unable to find a reference, so we prove this in \Cref{section:stunted}. Take $n = 1$, so that $\RP^n_+ = S^0 \vee S^1$. The cell diagram of $\Sigma P_{-3}^1$ is given by \noindent where as usual the attaching maps of degree $1, 2, 4$ are $2, \eta, \nu$ respectively. In this diagram, we think of each cell as a $\TMF$-module cell, i.e.\ a copy of $\TMF$. We can read off all the information we need from this diagram. We start with $\TMF^{\RP^2_+}$, which is the bottom three cells in the diagram. We first understand what happens when we mod out $1$ and $\tr$. Recall that $1$ is the global sections of the projection map $\E[2] \to \M$, which is split by the identity section $\M \to \E[2]$. The global sections of the identity section is the inclusion of the fixed points $\TMF^{\mathcal{B}C_2} \to \TMF^{h C_2} \to \TMF$ by construction of equivariant TMF. That is, $1$ is a section of the projection $\TMF^{\RP^2_+} \to \TMF$ onto the $0$-cell. Thus, quotienting out by $1$ kills off the $0$-cell, and we are left with the bottom two cells. By construction, $\tr$ is the attaching map of the $1$-cell. Thus, further quotienting by $\tr$ adds the $1$-cell, and $\TMF^{\RP^2_+}/(1, \tr)$ is the question mark complex, i.e.\ the subcomplex consisting of the $(-2)$-, $(-1)$- and $1$-cell. Finally, $t'$ is the attaching map of the $2$-cell. It must factor through the bottom cell since $\pi_2 \TMF / \eta = 0$, and the diagram tells us this map is $\nu$ on the bottom cell, as desired. \end{proof} \begin{corollary}\label[corollary]{cor:d3x40} There is a choice of $x_{4, 0}$ with $d_3(x_{4, 0}) = h_1^2 t$. \end{corollary} \begin{proof} Since $\eta^4 = 0$, we know that $h_1^4 t$ must be hit by a differential. The only possible source is $h_1^2 x_{4, 0}$. So we are done. \end{proof} Our next goal is to show that $\sqrt{\Delta} t$ is \emph{not} permanent, and instead supports a $d_3$. We can think of this as a $d_3$ on the hypothetical $\sqrt{\Delta}$ (which, if existed, must support a $d_3$ since $\sqrt{\Delta}^2$ supports a non-$2$-divisible $d_5$). The proof is somewhat roundabout. Since $t$ is $2$-torsion, we get a map $\Sigma \TMF/2 \to \overline{\TMF^{\mathcal{B}C_2}}$ picking out $t$. The homotopy groups of $\tmf / 2$ up to the $20$\textsuperscript{th} stem are depicted in \Cref{fig:anss-tmf-mod-2}. We name these classes as follows --- if $y \in \pi_* \TMF$ is $2$-torsion, we let $\tilde{y} \in \pi_* \TMF/2$ be the class that is $y$ on the top cell. This is well-defined up to an element in the image of $\pi_* \TMF$. In particular, we are interested in the following classes: \begin{enumerate} \item $\kappa \in \pi_{14} \TMF$ is well-defined, while $\kappabar \in \pi_{20} \TMF$ is well-defined mod $2$. \item $\tilde{\nu^2} \in \pi_7 \TMF /2 \cong \Z/2$ is the unique non-zero element in this degree. \item $\tilde{\kappa} \in \pi_{15} \TMF/2$ is well-defined up to $\eta \kappa$. Thus, $\nu \tilde{\kappa}$ is well-defined. \end{enumerate} \DeclareSseqGroup\zigzag {} { \class(0, 0) \savestack \hone \hone \class(2, 0) \structline[dashed] \lastclass1 \hone \hone \restorestack } \begin{figure} \caption{$E_\infty$ page of the ANSS of $\TMF/2$} \label{fig:anss-tmf-mod-2} \end{figure} \begin{lemma} In $\pi_* \TMF/2$, we have \[ \eta \kappabar = \nu^2 \tilde{\kappa} + \kappa \tilde{\nu^2}. \] \end{lemma} \begin{proof} We start with the Adams spectral sequence for $\pi_* \tmf$, which is depicted in \Cref{fig:tmf-ass-e2}. This may be computed by the May spectral sequence or a computer. The only possible $d_2$'s in this range are the ones we have drawn, and any of the differentials implies all others by the Leibniz rule. We can get these via the fact that $v_1^4\nu = 0$, for example. From this, Moss' convergence theorem \cite{moss} tells us \[ \eta\kappabar = \langle \kappa, 2, \nu^2\rangle \in \pi_* \TMF \] with no indeterminacy. By definition, the right-hand side is given by the composite\footnote{In this diagram, the spectrum in the middle is (a shift of) $\TMF/2$ and the ones at the end are $\TMF$. The map on the left is any map such that if you project onto the top cell of $\TMF / 2$, then the map is $\kappa$ (``$\kappa$ on the top cell''); the map on the right is any map such that the restriction to the bottom cell of $\TMF / 2$ is $\nu^2$ (``$\nu^2$ on the bottom cell''). The composite of any two such choices given an element in the Toda bracket, and vice versa.} Let $\overline{\nu^2}\colon \Sigma^6 \TMF / 2 \to \TMF / 2$ be the map that first projects to the top $\TMF$-cell, and then maps via $\tilde{\nu^2}$. Then $\nu^2 - \overline{\nu^2}$ maps trivially to the top cell, so factors through the bottom cell. This factorization is a valid choice of ``$\nu^2$ on the bottom cell''. So \[ \eta \kappabar = (\nu^2 - \overline{\nu^2}) \tilde{\kappa} = \nu^2 \tilde{\kappa} - \overline{\nu^2} \tilde{\kappa}. \] Finally, note that $\overline{\nu^2} \tilde{\kappa} = \tilde{\nu^2} \kappa$, and that everything is $2$-torsion, so we can drop the signs. \end{proof} \begin{figure} \caption{$E_2$ page of Adams spectral sequence for $\tmf$} \label{fig:tmf-ass-e2} \end{figure} \begin{corollary}\label[corollary]{cor:hidden-ext} $h_2 \sqrt{\Delta} t$ represents $\tilde{\kappa} t$, and in particular is permanent. Further, there is a hidden $\nu$ extension from $h_2^2 \sqrt{\Delta} t$ to $h_1 g t$. \end{corollary} \begin{proof} The class $t$ gives a map $\Sigma \TMF / 2 \to \overline{\TMF^{\mathcal{B}C_2}}$. Thus, the previous lemma gives \[ \nu^2 \tilde{\kappa} t = \eta \kappabar t + \tilde{\nu^2} \kappa t \in \pi_* \overline{\TMF^{\mathcal{B}C_2}} \] We know that $\tilde{\nu^2} t \in \pi_8 \overline{\TMF^{\mathcal{B}C_2}}$ has very high ANSS filtration (at least 7) because there is nothing in lower degrees. So \[ \nu^2 \tilde{\kappa} t = \eta \kappabar t + \text{higher filtration}. \] $\eta \kappabar t$ is represented by $h_1 g t$, so $\tilde{\kappa} t$ must be detected by a permanent class with filtration at most $4$, which must be $h_2 \sqrt{\Delta} t$ (this can alternatively follow from calculating the products in $\Ext$, but we spare ourselves the trouble). \end{proof} Our next result uses the machinery of synthetic spectra, which ``categorifies'' the Adams spectral sequence. They were originally introduced in \cite{synthetic}, but \cite[Section 9]{manifold-synthetic} gives a more computationally-oriented introduction to the subject. Specifically, the precise relationship between synthetic spectra and the Adams spectral sequence is laid out in \cite[Theorem 9.19]{manifold-synthetic}. Further, \cite[Appendix A.2]{manifold-synthetic} gives some helpful example calculations. Note however that we use a different grading convention. Our grading corresponds directly to the $x$ and $y$ coordinates in the Adams chart. The exact conversion is that $\mathbb{S}^{a, b}$ in their grading is $S^{a, b - a}$ in ours. If $z$ is a cocycle in the $E_2$ page, we let $[z]$ denote any element of $\pi_*$ that is represented by $z$. \begin{corollary}\label[corollary]{cor:hidden-nu} There is a hidden $\nu$ extension from $\Delta^k h_2^2 \sqrt{\Delta} t$ to $\Delta^k h_1 g t$ for every $k$. That is, if $\Delta^k h_2^2 \sqrt{\Delta} t$ is permanent, then $\nu [\Delta^k h_2^2 \sqrt{\Delta} t]$ is detected by $\Delta^k h_1 g t$. \end{corollary} We use $h_2$ to denote multiplication on the $E_2$ page, and $\nu$ to denote multiplication on homotopy groups. We insist on distinguishing these since $[h_2^2 \sqrt{\Delta} t]$ is not $\nu^2$-divisible in the homotopy groups. \begin{proof} We work in $BP$-synthetic spectra, and identify $\overline{\TMF^{\mathcal{B}C_2}}$ with its synthetic analogue $\nu (\overline{\TMF^{\mathcal{B}C_2}})$. In synthetic homotopy groups, the previous result can be written as $\nu^2 \tilde{\kappa} t = \tau^2 \eta \kappabar t$.\footnote{To see this, we know that $\nu^2 \tilde{\kappa} t$ is equal to $\eta \kappabar t$ after inverting $\tau$, and $\tau^2$ is the right number of copies of $\tau$ to put the right-hand side in the right bidegree, since the product jumps by two filtrations. We also have to check that there are no $\tau$-torsion classes. $\tau$-torsion classes are generated by classes that are hit by differentials, and a class hit by a $d_k$ is killed by $\tau^{k - 1}$. Thus, the $\tau$-torsion terms in bidegree $(22, 4)$ are classes hit by differentials from $(23, 2)$ or below, of which there are none.} Suppose $\Delta^k h_2^2 \sqrt{\Delta} t$ is permanent. Let $\alpha \in \pi_{19 + 24k, 3} \overline{\TMF^{\mathcal{B}C_2}}$ be a class whose image in $\overline{\TMF^{\mathcal{B}C_2}}/\tau$ is $\Delta^k h_2^2 \sqrt{\Delta} t$. Consider its image in $\overline{\TMF^{\mathcal{B}C_2}}/\tau^3$. Since $\Delta$ survives to the $E_5$-page, we know that $\Delta$ lifts to $\pi_{24, 0} \TMF/\tau^3$ (uniquely, since it is on the $0$-line). Since $\tilde{\kappa} t$ represents $h_2 \sqrt{\Delta} t$, we can write\footnote{To multiply, we need to know that $C\tau^n$ is a ring. To see this, note that the natural $t$-structure of \cite[Proposition 2.16]{synthetic} is compatible with the symmetric monoidal structure by \cite[Proposition 2.29]{synthetic}. Further, the proof of \cite[Proposition 4.29]{synthetic} shows that $C\tau^n$ is the $(n - 1)$-truncation of the unit, so it has a natural $\mathbb{E}_\infty$-ring structure.} \[ \alpha = \Delta^k \nu \tilde{\kappa} t \in \pi_{19 + 24k, 3} \overline{\TMF^{\mathcal{B}C_2}}/\tau^3. \] So we know that \[ \nu \alpha = \Delta^k \nu^2 \tilde{\kappa} t = \tau^2 \Delta^k \eta \kappabar t \in \pi_{22 + 24k, 4} \overline{\TMF^{\mathcal{B}C_2}}/\tau^3. \] So in $\pi_{*, *} \overline{\TMF^{\mathcal{B}C_2}}$, we know that $\nu \alpha = \tau^2 [\Delta^k h_1 gt] + O(\tau^3)$. \end{proof} \begin{lemma}\label[lemma]{lemma:sqrt-delta-t} $\sqrt{\Delta} t$ does not survive to the $E_4$ page. \end{lemma} Note that if $\sqrt{\Delta} t$ survived to the $E_\infty$ page and is $2$-torsion, then \Cref{cor:hidden-nu} implies $\eta \kappabar t$ is $\nu^3$-divisible. However, $\nu^3$ is $\eta^2$-divisible in $\pi_* \TMF / 2$, and there is no candidate for the $\eta^2$ division of $\eta \kappabar t$ --- the classes on the $0$-line are $\kappabar$-torsion but $\eta \kappabar t$ is not. The proof runs this argument in synthetic spectra to get the stronger claim that it doesn't survive to $E_4$. \begin{proof} We again work in synthetic spectra. If $\sqrt{\Delta} t$ survived to the $E_4$ page, then it lifts to a class in $\pi_{13, 1}(\overline{\TMF^{\mathcal{B}C_2}} / \tau^3)$, which we shall call $\sqrt{\Delta} t$ again. Then \[ \nu^3 \sqrt{\Delta} t = \tau^2 \eta \kappabar t \not= 0 \in \pi_{22, 4} \overline{\TMF^{\mathcal{B} C_2}} / \tau^3. \] Now note that $2 \sqrt{\Delta} t = 0 \in \pi_{13, 1} \overline{\TMF^{\mathcal{B}C_2}} / \tau^3$, since it is true mod $\tau$ (i.e.\ on the $E_2$ page), and there are no $\tau$ multiples in the bidegree. So we get a map of synthetic spectra $\Sigma^{13, 1} \TMF / 2 \to \overline{\TMF^{\mathcal{B}C_2}} / \tau^3$ picking out $\sqrt{\Delta} t$. From \Cref{fig:anss-tmf-mod-2}, we can read that $\nu^3 = \eta^2 \tilde{\nu^2} \in \pi_{9, 3}\TMF / 2$, noting that there are no $\tau$-divisible classes in that bidegree. Thus, $\nu^3 \sqrt{\Delta} t = \eta^2 \tilde{\nu^2}\sqrt{\Delta} t$. However, $\tilde{\nu^2} \sqrt{\Delta} t \in \pi_{20, 2}\overline{\TMF^{\mathcal{B}C_2}} / \tau^3 = 0$, which is a contradiction ($\tau^2 h_1^4 = 0$, so the $h_1$ towers cannot contribute). \end{proof} \begin{corollary} $d_3(\sqrt{\Delta} t) = x_{12, 4}$. \end{corollary} \begin{proof} This is equivalent to saying that $d_3(\sqrt{\Delta} t)$ is not $h_1$-divisible. If it were, then we can write \[ d_3(\sqrt{\Delta} t) = h_1^4 y \] for some $y$. Then $h_1^4 d_3(y) = d_3(h_1^4 y) = 0$, so $d_3(y) = 0$ since $h_1^4$ is injective on the target bidegree. Then $d_3(h_1 v_1^2 y) = h_1^4 y$, and so $d_3(\sqrt{\Delta}t + h_1 v_1^2 y) = 0$. But the argument of \Cref{lemma:sqrt-delta-t} applies equally well to $\sqrt{\Delta} t + h_1 v_1^2 y$. So we get a contradiction. \end{proof} \begin{corollary} $d_3$ vanishes on any class in bidegree $(8k, 0)$. \end{corollary} \begin{proof} By the computation of the $E_2$ page, we can pick $x_{4, 0}$ and $x_{16, 0}$ to be such that they generate the $0$-line under $v_1^2$ and $\Delta^{\pm}$. Let $x_{8, 0} = v_1^2 x_{4, 0}$. Then $x_{8, 0}$ and $x_{16, 0}$ generate the classes in bidegree $(8k, 0)$ under $v_1^4$ and $\Delta^{\pm}$, and it suffices to show that $d_3(x_{8, 0}) = d_3(x_{16, 0}) = 0$. The strategy is to repeat the argument of \Cref{lemma:juggle} with $(d_1, h_1)$ replaced by $(d_3, h_1^3)$. To do so, we need to check that $d_3(x_{8, 0})$ and $d_3(x_{16, 0})$ are $O(v_1^4)$. We can compute that \[ d_3(x_{8, 0}) = d_3(v_1^2 x_{4, 0}) = h_1^3 x_{4, 0} + v_1^2 h_1^2 t + O(v_1^4). \] But $h_1^3 x_{4, 0} = v_1^2 h_1^2 t + O(v_1^4)$ by \Cref{cor:cross} (the corollary shows that the remainder term is $O(a_1^3)$, but anything on the $E_2$ page that is $a_1^3$-divisible is also $a_1^4$-divisible). So this is $O(v_1^4)$. To see that $d_3(x_{16, 0}) = O(v_1^4)$, suppose instead that $d_3(x_{16, 0}) = h_1^2 \sqrt{\Delta} t + O(v_1^4)$. Then applying $d_3$ again gives $0 = h_1^2 x_{12, 4} + O(v_1^4)$, which is a contradiction since $h_1^2 x_{12, 4}$ is not $v_1^4$-divisible. \end{proof} This concludes the calculation of the $d_3$ differentials. The $E_5$ page (with the $\ko$-like patterns omitted\footnote{It is easy to check that there cannot be differentials from the $\ko$-like classes since the possible targets are non-$g$-torsion}) is shown in \Cref{fig:anss-e5-page}. The differentials come from applying the Leibniz rule with $d_5(\Delta) = h_2 g$. We then have $d_7$s in \Cref{fig:anss-e7-page} that are forced by the hidden $\nu$ extensions. Indeed, we have shown that the targets of the depicted differentials are $\nu$ times classes that are zero, hence must be hit by a differential, and the only possible sources are the ones we have drawn. A more careful argument using synthetic spectra can directly prove that these specific $d_7$'s must occur. The $E_9$ page is then depicted in \Cref{fig:anss-e5-page}, which is still $\Delta^2$-invariant. We will show that the greyed out classes do not survive, while the black ones do. Afterwards, all the remaining differentials are long differentials that kill off high $\kappabar$ powers. These are shown in \Cref{fig:anss-long-d}, and the $E_\infty$ page is shown in \Cref{fig:anss-e-infty-page}. In the rest of the section, we shall show that the long differentials that occur are indeed what we indicated. We then conclude the calculation using that $\Delta^{\pm 8}$ is permanent. \begin{sseqdata}[large, name = tmf anss, x range={0}{100}, y range={0}{20}] \foreach \delta in {0,...,5} { \foreach \g in {0,...,6} { \hook(\delta * 24 - 4 * \g, 4 * \g); \htwoedge(\delta * 24 + 12 - 4 * \g, 4 * \g); } \ifnum\delta=0 \classoptions["t"](\delta * 24 + 1, 1) \else\ifnum\delta=1 \classoptions["\Delta t"](\delta * 24 + 1, 1) \else \classoptions["\Delta^{\delta} t"](\delta * 24 + 1, 1) \fi \fi } \foreach \delta in {0,...,5} { \foreach \g in {0,...,6} { \IfExistsT(\delta * 24 + 22 - 4 * \g, 6 + 4 * \g) { \structline [dashed, bend left=30] (\delta * 24 + 19 - 4 * \g, 3 + 4 * \g)(\delta * 24 + 22 - 4 * \g, 6 + 4 * \g) } } } \foreach \delta in {0,...,3} { \foreach \g in {0,...,6} { \DrawIfValidDifferential5(\delta * 48 + 25 - 28 * \g, 1 + 4 * \g); \DrawIfValidDifferential5(\delta * 48 + 28 - 28 * \g, 2 + 4 * \g); \DrawIfValidDifferential5(\delta * 48 + 40 - 28 * \g, 2 + 4 * \g); \DrawIfValidDifferential7(\delta * 48 + 43 - 28 * \g, 3 + 4 * \g); } } \end{sseqdata} \begin{figure} \caption{$E_5$ page of the descent spectral sequence} \label{fig:anss-e5-page} \end{figure} \begin{figure} \caption{$E_7$ page of the descent spectral sequence} \label{fig:anss-e7-page} \end{figure} \begin{figure} \caption{$E_9$ page of the descent spectral sequence} \label{fig:anss-e9-page} \end{figure} \newcommand\LONGDMAXN{9} \NewSseqGroup \longd {} { \hook \htwoedge(12, 0) \class(21, 5) \class(22, 6) \structline[hone] \structline [dashed, bend left=30] (19, 3)(22, 6) \foreach \n in {0, ..., \LONGDMAXN} { \class(26 + \n * 20, 2 + \n * 4) \class(31 + \n * 20, 3 + \n * 4) \class(36 + \n * 20, 6 + \n * 4) \class(41 + \n * 20, 9 + \n * 4) } } \begin{sseqdata}[large, name = tmf anss long, x range={88}{188}, y range={0}{40}, class label handler = { \def\result{\scalebox{0.65}{$#1$}} }] \foreach \x in {0, ..., 4} { \longd(\x * 48, 0) \SseqParseInt\dp{2 * \x} \ifnum\x=0 \def\Delta^{\dp}{} \else \def\Delta^{\dp}{\Delta^{\dp}} \fi \classoptions["\Delta^{\dp} t"](\x * 48 + 1, 1) \foreach \n in {0, ..., \LONGDMAXN} { \SseqParseInt\wpow{3 + 4 * \n} \classoptions["w^{\wpow} \Delta^{\dp} t"](\x * 48 + 16 + \n * 20, 2 + \n * 4) \SseqParseInt\wpow{4 + 4 * \n} \classoptions["w^{\wpow} \Delta^{\dp} t"](\x * 48 + 21 + \n * 20, 5 + \n * 4) \SseqParseInt\wpow{5 + 4 * \n} \classoptions["w^{\wpow} \Delta^{\dp} t"](\x * 48 + 26 + \n * 20, 2 + \n * 4) \SseqParseInt\wpow{6 + 4 * \n} \classoptions["w^{\wpow} \Delta^{\dp} t"](\x * 48 + 31 + \n * 20, 3 + \n * 4) } } \foreach \m in {0, 1} { \d17(97 + \m * 48, 1) \d17(112 + \m * 48, 2) \d17(117 + \m * 48, 5) \foreach \n in {0, ..., \LONGDMAXN} { \DrawIfValidDifferential23(122 + \n * 20 + \m * 48, 2 + \n * 4) \DrawIfValidDifferential19(127 + \n * 20 + \m * 48, 3 + \n * 4) \DrawIfValidDifferential17(132 + \n * 20 + \m * 48, 6 + \n * 4) \DrawIfValidDifferential17(137 + \n * 20 + \m * 48, 9 + \n * 4) } } \end{sseqdata} \begin{figure} \caption{Remaining long differentials} \label{fig:anss-long-d} \end{figure} \begin{figure} \caption{The $E_\infty$ page of the descent spectral sequence} \label{fig:anss-e-infty-page} \end{figure} We start with the observation that \begin{lemma} There are no classes above the $s = 24$-line that survive. \end{lemma} \begin{proof} On the $E_4$ page, multiplication by $g$ is injective and surjective above the $3$-line. If $x$ is an element above the $24$-line, then we can write $x = g^6 y$ for some $y$. If $x$ does not support any differential, then neither does $y$, by injectivity of $g$-multiplication. So $y$ is permanent, and the class representing $x$ is $\kappabar^6$-divisible. But $\kappabar^6 = 0$ in $\tmf$. \end{proof} The hardest part is to show that $\Delta^2 t$ is permanent. The difficulty here is again translational invariance. Our starting piece of knowledge is that $t$ is permanent, and we want to somehow deduce that $\Delta^2 t$ is permanent too. However, we must not allow ourselves to repeat the argument, using that $\Delta^2 t$ is permanent to deduce that $\Delta^4 t$ is, because it is not. The key property we can make use of is the fact that the class $t$ extends to a map from $\TMF \otimes \RP^\infty$. Our job would be easy if $\Delta^2$ of the bottom cell is permanent in $\TMF \otimes \RP^\infty$, but that's not true. However, we can get by with the following version: \begin{lemma} The class $t\colon \Sigma \TMF \to \overline{\TMF^{\mathcal{B}C_2}}$ extends to a map from $\TMF \otimes L$. \end{lemma} Recall that $L$ is the dual of $DL$, as in the statement of the main theorem (\Cref{thm:main}). Its cell diagram is depicted in \Cref{fig:cell-l}. \begin{proof} First of all, it extends to $\Sigma \TMF / 2$ since $2 t = 0$. The obstruction to extending to the $4$-cell is $\langle \eta, 2, t\rangle$. Since $t$ comes from restricting the norm map $\TMF_{h C_2} \to \TMF^{\mathcal{B}C_2}$, we know it extends to a map $\RP^4 \to \overline{\TMF^{\mathcal{B}C_2}}$. Let $y \in \pi_* \overline{\TMF^{\mathcal{B}C_2}}$ be the image of the $3$-cell. Then the cell structure of $\RP^4$ (\Cref{fig:cell-l}) tells us \[ \langle \eta, 2, t \rangle = 2y. \] But all possible images of $y$ are $2$-torsion. So $\langle \eta, 2, t \rangle = 0$. Finally, the obstruction to extending to all of $L$ is $\langle \nu, \eta, 2, t \rangle$, which is defined since $\langle \nu, \eta, 2\rangle = 0$ with no indeterminacy. However, the only possible class is a $\nu$-multiple, hence is in the indeterminacy. So $0 \in \langle \nu, \eta, 2, t \rangle$, and we can extend to $L$. \end{proof} \begin{figure} \caption{Cell diagrams of $L$ and $\RP^4$} \label{fig:cell-l} \end{figure} \begin{remark} \emph{A posteriori}, we expect such a map to exist. We know that $\overline{\TMF^{\mathcal{B}C_2}} = \TMF \otimes DL$, and this is the map $\TMF \otimes L \to \TMF \otimes DL$ whose cofiber is $\KO$. \end{remark} Let $w_k$ be the $k$-cell of $L$. \begin{theorem} In the Adams--Novikov spectral sequence of $\TMF \otimes L$, the class $\Delta^2 w_1$ survives and has order $2$. \end{theorem} \begin{proof} It suffices to prove this for $\tmf \otimes L$ instead. We do not know of a way to compute the $E_2$ page of the Adams--Novikov spectral sequence for $\tmf \otimes L$, as the attaching map of the $4$-cell is filtration $0$ but non-injective in homology, so there is no long exact sequence. To remedy this problem, we use a modified Adams spectral sequence via the technology of synthetic spectra. First observe that $2\colon \tmf \to \tmf$ is in fact injective in $BP$-homology, since $BP \otimes \tmf$ is non-torsion (see e.g.\ \cite[Corollary 5.2]{homology-tmf}). So $\nu (\tmf / 2) = \nu (\tmf) / 2$. We next construct synthetic $\nu(\tmf)$-modules $\widetilde{Q}, \widetilde{L}$ by the cofiber sequences \[ \begin{tikzcd}[row sep=tiny] \Sigma^{3, 0}\tmf \ar[r, "{[\eta w_2]}"] & \nu(\Sigma \tmf / 2) \ar[r] & \widetilde{Q}\\ \Sigma^{7, 0}\tmf \ar[r, "{[\nu w_4]}"] & \widetilde{Q} \ar[r] & \widetilde{L} \end{tikzcd} \] where $[\eta w_2]$ and $[\nu w_4]$ are the attaching maps of the $4$- and $8$-cell respectively. They are characterized by the fact that the projection onto the top cell of their targets are $\eta$ and $\nu$ respectively. The universal property of the cofiber gives us natural comparison maps $\widetilde{Q} \to \nu (\tmf \otimes Q)$ and $\widetilde{L} \to \nu (\tmf \otimes L)$. For example, the second map is obtained from the first via \[ \begin{tikzcd} \Sigma^{7, 0} \tmf \ar[r, "{[\nu w_4]}"] \ar[d, equals] & \tilde{Q} \ar[r] \ar[d] & \widetilde{L} \ar[d, dashed]\\ \Sigma^{7, 0} \tmf = \nu(\Sigma^7 \tmf) \ar[r, "{\nu([\nu w_4])}"] & \nu (\tmf \otimes Q) \ar[r] & \nu (\tmf \otimes L). \end{tikzcd} \] Here the top row is a cofiber sequence in the category of synthetic spectra, and the bottom row is $\nu$ applied to a cofiber sequence in the category of spectra. The only thing to check is that the left-hand square commutes, which is true since every map $S^{k, 0} \to \nu Z$ is uniquely of the form $\nu f$; there are no $\tau$-torsion classes in this bidegree since these would have to be hit by a differential from below the $0$-line. Given this, it suffices to show that $\Delta^2 w_1$ survives in $\widetilde{L}$. To understand $\widetilde{L}$, we start with $\Sigma \tmf / 2$, whose ANSS was computed by \cite{tmf-rp2} and is shown in \Cref{fig:anss-c2-full} (with $\ko$-like terms omitted as usual). The important feature is that the differential on $\Delta^2 w_1$ hits $g^2 x w_1$ --- we have to show that this class vanishes in $\widetilde{L}$. The synthetic cofiber sequence $\Sigma^{3, 0} \tmf \to \nu (\Sigma \tmf / 2) \to \widetilde{Q}$ tells us the $E_2$ page of the ANSS for $\tilde{Q}$ sits in a long exact sequence between that of $\Sigma^{3, 0} \tmf$ and $\nu (\Sigma \tmf / 2)$. This is displayed in \Cref{fig:anss-q-full}, where the depicted differentials are the connecting map. The crucial claim in this diagram is that there is a hidden extension $\eta [\nu w_4] = x w_1$ on the $E_2$ page. Then since $\widetilde{L}$ is obtained by killing $[\nu w_4]$, we know that $gxw_1 = 0$ in the $E_2$ page of $\tilde{L}$. Since there are no higher filtration terms, $\Delta^2 w_1$ must survive. To see this hidden extension, note that $x \in \pi_* \tmf / 2$ detects $\nu^2$ on the top cell. If we quotient out the bottom cell in $\tilde{Q}$, then we can write the class of interest as \[ \eta [\nu w_4] = \eta \langle \nu, \eta, w_2 \rangle = \langle \eta, \nu, \eta\rangle w_2 = \nu^2 w_2, \] as desired. In this equation, $w_2$ is the homotopy class of the $2$-cell of $\tilde{Q}/w_1$, which is now an actual element since we killed off the bottom cell. Finally, it is straightforward to check that there are no classes above $\Delta^2 w_1$, so it must have order $2$. \end{proof} \DeclareSseqGroup\tmfAnssModTwoUnitPrime {} { \class (0, 0) \savestack \hone \hone \class (2, 0) \structline[dashed] \hone \hone \restorestack \htwo \htwo \htwo \honei \honei \htwo \honei \structline[dashed](9, 3) \class (15, 3) \honei \htwo \honei \honei \structline[dashed](15, 3) \htwo \htwo } \DeclareSseqGroup\tmfAnssZero {} { \class[rectangle, fill=white](0, 0) \hone \hone \hone \class[circlen=2](3, 1) \structline[dashed] \structline(0, 0) \htwo \htwo \honei } \DeclareSseqGroup\tmfAnssHigh {} { \class(-5, -1) \honei \htwo \class[circlen = 3](0, 0) \structline \hone \hone \hone \class[circlen=2](3, 1) \structline[dashed] \structline(0, 0) \htwo \htwo \honei } \begin{figure} \caption{Adams--Novikov spectral sequence for $\tmf / 2$} \label{fig:anss-c2-full} \end{figure} \begin{figure} \caption{Adams--Novikov spectral sequence for $\widetilde{Q} \label{fig:anss-q-full} \end{figure} \begin{corollary} The class $\Delta^2 t$ in the descent spectral sequence of $\overline{\TMF^{\mathcal{B}C_2}}$ is permanent and has order $2$. \end{corollary} \begin{proof} We previously constructed a map $\TMF \otimes L \to \overline{\TMF^{\mathcal{B}C_2}}$ where the bottom cell hits $t$. Applying $\nu$ to this, we get a map $\tilde{t}\colon \nu(\TMF \otimes L) \to \nu (\overline{\TMF^{\mathcal{B}C_2}})$ where the bottom cell hits $\tau t$.\footnote{We use $t$ to denote the lift of the permanent class $x_{1, 1} \in \pi_{1, 1} \nu(\overline{\TMF^{\mathcal{B}C_2}}) / \tau$ to $\pi_{1, 1} \nu(\overline{\TMF^{\mathcal{B}C_2}})$. Since the bottom cell hits $\pi_{1, 0} \nu(\overline{\TMF^{\mathcal{B} C_2}})$ and is equal to $t$ after $\tau$-inversion, it hits $\tau t$.} Now consider $\tilde{t}(\Delta^2 w_1)$. This is a permanent class, and since $\Delta^2 \in \TMF / \tau^2$, after modding out by $\tau^2$, we know that it must hit $\tau \Delta^2 t$. So $\tilde{t}(\Delta^2 w_1)$ is detected by $\Delta^2 t$. \end{proof} For the rest of the section, let $z = t$ or $\Delta^2 t$. It remains to consider the ``$w$-chains'' starting from $z$. There is a partially defined multiplication-by-$w$ operation on the $E_9$ page, where $w$ increases stem by $5$. To formally define this, we set \[ w^3 z = h_2 \sqrt{\Delta} z, \quad w^5 z = \Delta h_1 z,\quad w^6 z = \Delta h_2^2 z,\quad w^{k + 4} z = g w^k z. \] \begin{corollary} The $w$ chain starting from $z$ is permanent. \end{corollary} \begin{proof} The argument of \Cref{cor:hidden-ext} shows that $w^3 z$ is permanent and represents $\tilde{\kappa} z$ (for $\Delta^2 t$, run the same argument but start with the map $\Sigma^{49} \TMF / 2 \to \overline{\TMF^{\mathcal{B}C_2}}$ detecting $\Delta^2 t$, which we now know to be permanent and $2$-torsion). Since $w^5 = [h_1 \Delta]$ is permanent, we know that $w^5 z$ is also permanent. This leaves the $w^6$ terms, before we can conclude by $g = w^4$-periodicity. The crucial observation is that $\nu^3 z = 0$, and then Moss' convergence theorem \cite{moss} tells us $w^6 z$ detects $\langle \kappabar, \nu^3, z \rangle$ and is permanent. To see that $\nu^3 z = 0$, it suffices to show that $\nu^3 w_1 = \nu^3 \Delta^2 w_1 = 0$ in $\tilde{L}$. We have previously seen that in $\tilde{Q}$, we have $\nu^3 w_1 = \eta^2 x w_1 = \eta^3 [\nu w_4]$. Since $[\nu w_4]$ is killed in $\tilde{L}$, so is $\nu^3 w_1$. Since $\Delta^2$ exists on the $E_2$ page, we know that $\nu^3 \Delta^2 w_1 = 0$ on the Adams--Novikov $E_2$ page of $\tilde{L}$, and there are no higher filtration classes, so the product must be zero. \end{proof} \begin{corollary} There is a differential $d_?(\Delta^4 w^k z) = w^{k + 19} z$ for all $k \geq 3$ and $k = 0$. \end{corollary} Here the length of the differential depends on the value of $k \pmod 4$, which can be read off the charts. The precise values are, however, unimportant. \begin{proof} For $k \geq 8$, this follows from $\kappabar^6 = 0$, since these are the only classes that can hit them. For smaller $k$, this follows from $g$-division. \end{proof} \section{Identification of the last factor}\label{section:ident} To identify $\overline{\TMF^{\mathcal{B}C_2}} \cong \TMF \otimes DL$, we map $DL$ in by obstruction theory, and show it is an isomorphism after base change to $\TMF_1(3)$. To do so, we need to understand the $\TMF_1(3)$-homology of $DL$. \begin{lemma} We can choose classes $y_{-8}, y_{-4}, y_{-2} \in \pi_* \TMF_1(3) \otimes DL$ such that \begin{enumerate} \item $y_{-k} \in \pi_{-k} \TMF_1(3) \otimes DL$; \item $y_{-8}$ is the bottom cell of $DL$; \item $\{y_{-4}, y_{-2}\}$ generates $\pi_* \TMF_1(3) \otimes DL$ as a free $\pi_* \TMF_1(3)$-module; and \item \[ \begin{aligned} y_{-8} &= v_2^{-1} (a_1 y_{-4} + 2 y_{-2}) + O(2^2),\\ d(y_{-4}) &\equiv \psi(y_{-4}) - [1] y_{-4} = [r] y_{-8} + O(2^2), \end{aligned} \] where $v_2 = a_1^3 - 27 a_3$. \end{enumerate} \end{lemma} The choice of $y_{-4}$ and $y_{-2}$ is pretty much arbitrary. Other choices will result in slightly different formulas. These are chosen to simplify the ensuing calculation. \begin{remark} Note that $[r]$ is the class that represents $h_2$, not $s^2$; the latter is not a cocycle since $2 \not= 0$. \end{remark} \begin{proof} We carefully construct $\tmf \otimes DL$ in the category of $\tmf$-modules. We start with the bottom cell and attach $y_{-4}$ to kill $[r] y_{-8}$. The class $y_{-4}$ is only well-defined up to multiples of $y_{-8}$, which in this case is integral multiples of $a_1^2 y_{-8}$. The coboundary of $a_1^2 y_{-8}$ is $[12r] y_{-8}$, and so \[ \psi(y_{-4}) = y_{-4} + [k r] y_{-8}, \] where $k \equiv 1 \pmod 4$. The next cell will kill of the cocycle \[ \{h_1 y_{-4}\} = [s] y_{-4} - [k(a_1 r + t)] y_{-8} - ? ([s]a_1^2 - [12t]) y_{-8}. \] Here $?$ is either $1$ or $0$, noting that twice the class is a coboundary. There is exactly one choice of $?$ for which this cocycle is permanent, since there is a $d_3([s] a_1^2 - [12t]) = h_1^4$. On the other hand, we know that $y_{-4}$ is not entirely well-defined, and we can absorb the term into $y_{-4}$ (and redefine $k$). We choose $y_{-4}$ so that $? = 1$. We now set \[ \psi(y_{-2}) = y_{-2} + \{h_1 y_{-4}\}. \] and then the class \[ 2 y_{-2} + a_1 y_{-4} - k a_3 y_{-8} - (a_1^3 - 12 a_3) y_{-8}. \] is a cocycle, which the top cell kills off. So we get the relation \[ 2 y_{-2} + a_1 y_{-4} - (a_1^3 - 27 a_3) y_{-8} + O(2^2), \] as desired. \end{proof} We now construct a map $f\colon \TMF \otimes DL \to \overline{\TMF^{\mathcal{B}C_2}}$. This is constructed via obstruction theory. The relevant homotopy groups are in the range $[-8, 0]$, which are depicted in \Cref{fig:homotopy-neg}. In this range, all the homotopy groups come from the $\ko$-like patterns. \begin{figure} \caption{Homotopy groups of $\overline{\TMF^{\mathcal{B} \label{fig:homotopy-neg} \end{figure} In general, let $z_{-k}$ be the images of $y_{-k}$ under $f$ (after base change to $\TMF_1(3)$). In constructing $f$, the first step is to pick the image of the bottom cell, i.e.\ the value of $z_{-8}$. This lives in $\pi_{-8}(\overline{\TMF^{\mathcal{B}C_2}})$, which is the direct sum of infinitely many copies of $\Z$. Choosing $z_{-8}$ requires a bit of care, but once we have chosen it, we can always extend it to a full map $f$. Indeed, the obstructions are \[ \nu x_{-8, 0},\quad \langle \eta, \nu, x_{-8, 0}\rangle,\quad \langle 2, \eta, \nu, x_{-8, 0} \rangle, \] which all vanish because \[ \pi_{-5} \overline{\TMF^{\mathcal{B}C_2}} = \pi_{-3} \overline{\TMF^{\mathcal{B}C_2}} = \pi_{-2} \overline{\TMF^{\mathcal{B}C_2}} = 0. \] While the extension always exists, it is not unique. Specifically, the extension to the second cell is not unique. This results in $z_{-4}$ being well-defined up to a permanent class in $\pi_{-4} \overline{\TMF^{\mathcal{B}C_2}}$, which is again infinitely many copies of $\Z$. After extending to the second cell, there is a unique way to extend all the way to $DL$, because $\pi_{-2} \overline{\TMF^{\mathcal{B}C_2}} = \pi_{-1} \overline{\TMF^{\mathcal{B}C_2}} = 0$. We are now ready to choose $z_{-8}$. We definitely don't want this to be $v_1^4$-divisible, but this only defines $z_{-8}$ up to $v_1^4$-multiples. After some experimentation, we settle on the following choice: \begin{lemma}\label[lemma]{lemma:computer-z-8} In the $E_2$ page of the Adams--Novikov spectral sequence of $\overline{\TMF^{\mathcal{B}C_2}}$, there is a permanent cocycle of the form \[ z_{-8} = v_2^{-1}[a_1(z^2 - a_3^{-1} a_1^2 z) + 2z] + O(2^2) \] This will be our choice of $z_{-8}$. \end{lemma} This is a lift of $\Delta^{-1} a_1 x_{14, 0}$. \begin{proof} Computer calculation (see \Cref{section:sage}) verifies that this is a cocycle mod $2^2$. In the bidegree $(-8, 0)$, there are no $2$-Bockstein differentials and DSS differentials, so any cocycle mod $2^n$ would lift to a permanent cocycle. \end{proof} Given the relation between the $y_{-k}$, we know there must be some $g$ such that \[ \begin{aligned} z_{-4} &= z^2 - a_3^{-1} a_1^2 z + 2g + O(2^2)\\ z_{-2} &= z + a_1 g + O(2). \end{aligned} \] The indeterminacy tells us $2g$ is well-defined up to a permanent class. \begin{lemma}\label[lemma]{lemma:computer-massey} There is a choice of $f$ such that $g = 0 \bmod 2$. \end{lemma} \begin{proof} In $\TMF_1(3) \otimes DL$, we have a cobar differential \[ d(y_{-4}) = [r] y_{-8} + O(2^2). \] So we find that \[ d(z_{-4}) = [r] z_{-8} + O(2^2). \] On the other hand, by computer calculation, we find that (see \Cref{section:sage}) \[ d(z^2 - a_1^2 a_3^{-1} z) = [r] z_{-8} + O(2^2). \] Comparing the two, we must have \[ d(2g) = O(2^2). \] So $g$ is a cocycle mod $2$. Since there are no $2$-Bocksteins, $g$ lifts to an actual cocycle $\tilde{g}$, and $2\tilde{g}$ is permanent. So we can subtract $2\tilde{g}$ from $z_{-4}$ so that $g$ is now $0 \bmod 2$. \end{proof} \begin{corollary} This choice of $f$ is an equivalence, and hence $\overline{\TMF^{\mathcal{B}C_2}} \cong \TMF \otimes DL$. \end{corollary} \begin{proof} It suffices to show that $f$ is an equivalence after base change to $\TMF_1(3)$. Moreover, since we $2$-complete, we can further reduce mod $2$. Both surjectivity and injectivity in $\pi_*$ are easy linear algebra. \end{proof} \section{Further questions}\label{section:further-questions} \begin{itemize} \item Is there a ``geometric'' description of the piece $\TMF \otimes DL$, similar to the representation-theoretic description of $\KO^{\mathcal{B}G}$? Can such a description streamline the calculations in the paper? (e.g.\ avoid the need of computers and ``explain'' the formula for $z_{-8}$) In particular, the equation $-v_3 y_{-8} + a_1 y_{-4} + 2 y_{-2}$ in $\TMF_1(3) \otimes DL$ looks remarkably similar to the defining equation $a_3 z^4 - a_1 z^2 + 2 z$ in $\TMF_1(3)^{\mathcal{B}C_2}$. \item The initial calculation of the Hopf algebroid is made possible by the fact that the inversion map of a Weierstrass elliptic curve has a simple formula. For $C_3$, we can take the equalizer of the inversion map and the duplication map $[2]$, but the latter is unwieldy. \end{itemize} \appendix \section{Connective \texorpdfstring{$C_2$}{C2}-equivariant \texorpdfstring{$\tmf$}{tmf}}\label{section:connective} At the prime $2$, one prized property of $\tmf$ is \[ H_* \tmf = \A \square_{A(2)} \F_2, \] which lets us identify the $E_2$-page of its Adams spectral sequence as $\Ext_{\A(2)}(\F_2, \F_2)$. It is natural to expect $C_2$-equivariant $\tmf$ to have a similar properties, and there have been attempts to construct $C_2$-equivariant $\tmf$ along these lines (e.g.\ \cite{ricka-mmf}). The goal of this section is not to construct $C_2$-equivariant $\tmf$, but to deduce properties of any such construction based on its homology. \begin{theorem} Let $\tmf_{C_2} \in \Sp_{C_2}$ be a spectrum such that \[ (\HF_2)_\star \tmf_{C_2} = \aesq{2} \Mt \] and the $C_2$ action on the underlying spectrum $\iota \tmf_{C_2}$ is trivial. Then \[ H_* (\tmf_{C_2})^{C_2} \cong H_*(\tmf \oplus \tmf \oplus \tmf \otimes L) \] as a Steenrod comodule. If $(\tmf_{C_2})^{C_2}$ has a $\tmf$-module structure, then we have an isomorphism of 2-completed $\tmf$-modules. \end{theorem} Note that it is $L$ that appears here, not $DL$, so $\Delta^{-1} (\tmf_{C_2})^{C_2}$ and $\TMF^{C_2}$ are duals as $\TMF$-modules (after completion). \begin{remark} We would expect that the underlying spectrum of $\tmf_{C_2}$ is $\tmf$ with the trivial action, and that there is a natural ring map $\tmf \to (\tmf_{C_2})^{C_2}$, giving the target a $\tmf$-module structure. The proof uses weaker (albeit less natural) assumptions, which is what we state in the theorem. \end{remark} Since all we know about this hypothetical $\tmf_{C_2}$ is its homology, it will be cleaner to work with a general $C_2$-spectrum whose homology is $\aesq{n} \Mt$ for some $n$, specializing to $n = 2$ only at the very end. Nevertheless, we shall shortly see that such a spectrum can only exist for $n \leq 2$, as in the non-equivariant case. \subsection*{Conventions} \begin{itemize} \item For consistency with existing literature, we use $(-)^{C_2}$ to denote the genuine fixed points of a genuine $C_2$-spectrum, instead of the previous $(-)^{\mathcal{B}C_2}$. \item We work with $\RO(C_2)$-graded homotopy groups throughout. We have an explicit identification $\RO(C_2) = \Z[\sigma]/(\sigma^2 - 1)$, and we shall write $p + q\sigma = (p + q, q)$ --- the first degree is the total degree of the representation and the second degree is the number of $\sigma$'s, also called the \emph{weight}. We use $\star$ to denote an $\RO(C_2)$-grading and $*$ for an integer grading. \item We let $\rho \colon S^0 \to S^\sigma$ be the natural inclusion of the representation spheres (more commonly known as $a$). Then $\rho \in \pi_{-1, -1}(S^0)$. \item We write $\iota\colon \Sp_{C_2} \to \Sp$ for the underlying spectrum functor. \end{itemize} \subsection{\texorpdfstring{$C_2$}{C2}-equivariant homotopy theory} Let $X$ be a $C_2$-spectrum. The main difficulty in analyzing $H_* X^{C_2}$ is that taking categorical fixed points is not symmetric monoidal, so there is no \emph{a priori} relation between $H\F_2 \otimes X^{C_2}$ and $(\HF_2 \otimes X)^{C_2}$. To understand $X^{C_2}$, we consider two other functors $\Sp_{C_2} \to \Sp$ that \emph{are} symmetric monoidal, namely the geometric fixed points $X^{\Phi C_2}$ and the underlying spectrum $\iota X$. The goal of this section is to understand the effects of these constructions on $\RO(C_2)$-graded \emph{homotopy} groups. The homotopy groups of the categorical fixed points are easy to compute. Essentially by definition, we have \[ \pi_* X^{C_2} = \pi_{*, 0} X. \] The underlying spectrum and geometric fixed points are obtained by performing certain constructions in $\Sp_{C_2}$ and then applying categorical fixed points. In the $C_2$-equivariant world, the geometric fixed points admit a particularly simple description, since $\rho^{-1}S^0$ is a model of $\tilde{E}C_2$: \begin{lemma}\label[lemma]{lemma:geometric-homotopy} The geometric fixed points is given by \[ X^{\Phi C_2} = (\rho^{-1} X)^{C_2}, \] and we can write its homotopy groups as \[ \pi_* X^{\Phi C_2} = \pi_{*, 0} (\rho^{-1} X) = \pi_\star X / (\rho - 1), \] where in the latter formulation, an element of bidegree $(s, w)$ in $\pi_\star X$ gives an element of degree $s - w$ in $\pi_* X^{\Phi C_2}$. \end{lemma} The underlying spectrum is slightly more involved. It can be expressed as \[ \iota X = ((C_2)_+ \otimes X)^{C_2} = F((C_2)_+, X)^{C_2}, \] since $(C_2)_+$ is self-dual. To compute its homotopy groups, we use the cofiber sequence \[ (C_2)_+ \to S^0 \overset\rho\to S^\sigma. \] We can either apply $F(-, X)$ or $(-) \otimes X$ to this, and get \begin{lemma}\label[lemma]{lemma:underlying-les} There are long exact sequences \[ \cdots \longrightarrow \pi_{s + 1, w + 1} X \overset\rho\longrightarrow \pi_{s, w} X \overset{\mathrm{res}}\longrightarrow \pi_{s, w} (C_2)_+ \otimes X \longrightarrow \pi_{s, w + 1} X \overset\rho\longrightarrow \cdots. \] and \[ \cdots \overset{\rho}\longrightarrow \pi_{s, w - 1} X \longrightarrow \pi_{s, w} (C_2)_+ \otimes X \overset{\mathrm{tr}}\longrightarrow \pi_{s, w} X \overset\rho\longrightarrow \pi_{s - 1, w - 1} X \longrightarrow \cdots . \] Moreover, $\mathrm{res} \circ \mathrm{tr}$ is $1 + \gamma$ on $\pi_* \iota X = \pi_{*, 0} (C_2)_+ \otimes X$, where $\gamma$ is the generator of $C_2$. \end{lemma} \begin{corollary}\label[corollary]{cor:underlying-inj} Suppose $\rho$ is injective on $\pi_{*, 0} X$ and $\pi_{*, 1} X$. Then \[ \pi_* \iota X = (\pi_\star X / \rho)_{*, 0} \] with $\gamma = -1$. \end{corollary} \begin{proof} We first apply the first sequence with $w = 0$. By assumption, the last $\rho$ is injective, hence has trivial kernel. So $\mathrm{res}$ is surjective with kernel given by $\rho$-multiples. Now consider the second sequence with $w = 0$. Since the second $\rho$ is injective, it has trivial kernel, so $\mathrm{tr}$ has zero image. Hence $1 + \gamma = \mathrm{res} \circ \mathrm{tr} = 0$. \end{proof} \subsection{The \texorpdfstring{$C_2$}{C2}-equivariant Steenrod algebra} We recall the computation of the $C_2$-equivariant Steenrod algebra. \begin{lemma}[{\cite[Proposition 6.2]{hu-kriz}}] \[ \Mt \equiv \pi_\star \HF_2 = \F_2[\tau, \rho] \oplus \left\{\frac{\gamma}{\rho^k \tau^\ell}\mid k, \ell \geq 1\right\} \] with the obvious ring structure. The degrees of the classes are \[ |\tau| = (0, -1),\quad |\rho| = (-1, -1),\quad |\gamma| = (1, 0). \] \end{lemma} The terms involving $\gamma$ are known as the ``negative cone''. They will not play a role in the story (though we will have to verify that they indeed do not play a role). \begin{lemma}[{\cite[Theorem 6.41]{hu-kriz}}] \[ \A^{C_2} \equiv \pi_\star (\HF_2 \otimes \HF_2) = \Mt[\tau_0, \tau_1, \ldots, \xi_1, \xi_2 \ldots]/(\tau_i^2 = \rho \tau_{i + 1} + \bar\tau \xi_{i + 1}), \] where $\bar\tau = \tau + \rho \tau_0$ and \[ |\tau_n| = (2^{n + 1} - 1, 2^n - 1),\quad |\xi_n| = (2^{n + 1} - 2, 2^n - 1). \] We have \[ \eta_R(\rho) = \rho,\quad \eta_R(\tau) = \bar{\tau} = \tau + \rho \tau_0 \] and \[ \Delta \xi_k = \sum \xi_{k - i}^{2^i} \otimes \xi_i,\quad \Delta \tau_k = \sum_{i = 0}^k \xi_{k - i}^{2^i} \otimes \tau_i + \tau_k \otimes 1. \] \end{lemma} One observes that \Cref{cor:underlying-inj} applies to $\Mt$ and $\A^{C_2}$, so we can read off the homotopy groups of the underlying spectra of $\HF_2$ and $\HF_2 \otimes \HF_2$. This tells us $\iota \HF_2 = H\F_2$ and $\pi_* \iota (\HF_2 \otimes \HF_2) \cong \A$, as expected. The identification of the latter with $\A$ is canonical for the following unimaginative reason: \begin{lemma}\label[lemma]{lemma:steenrod-auto} Let $\zeta_1, \zeta_2, \ldots \in \A$ be such that $|\zeta_i| = 2^i - 1$ and \[ \Delta \zeta_k = \sum \zeta_{k - i}^{2^i} \otimes \zeta_i. \] If $\zeta_1 \not= 0$, then $\zeta_i = \xi_i$. \end{lemma} \begin{proof} We prove this by induction on $i$. It is clear for $i = 1$, since there is a unique element of degree $1$. Then if it is true for $k < i$, then $\zeta_i - \xi_i$ is primitive. But the only primitive elements of $\A$ are of the form $\xi_1^{2^k}$, which is not in the bidegree. So this is zero, and $\zeta_i = \xi_i$. \end{proof} \subsection{The homology of geometric fixed points} Since $(-)^{\Phi C_2}$ is symmetric monoidal, in order to deduce $H_* X^{\Phi C_2}$ from $(\HF_2)_\star X$, it suffices to understand $\HF_2^{\Phi C_2}$. \begin{lemma} We have $\HF_2^{\Phi C_2} = H\F_2 [\tau]$ as an $\mathbb{E}_1$-ring, where $|\tau| = 1$. \end{lemma} Truncation gives us an $\mathbb{E}_\infty$ map $\HF_2^{\Phi C_2} \to H\F_2$, which we can understand as quotienting out $\tau$. \begin{proof} The inclusion of the categorical fixed points gives $\HF_2^{\Phi C_2}$ the structure of an $\mathbb{E}_\infty$-$H\F_2$-algebra. Since $H\F_2[\tau]$ is the free $\mathbb{E}_1$-ring on one generator over $H\F_2$, it suffices to observe that $\pi_* \HF_2^{\Phi C_2} = \F_2[\tau]$ by \Cref{lemma:geometric-homotopy}. \end{proof} \begin{corollary} If $X \in \Sp_{C_2}$, then \[ H_* X^{\Phi C_2} \cong ((\HF_2)_\star X)/(\rho - 1, \tau). \] If $X$ is a homotopy ring, then this is an isomorphism of rings. \end{corollary} We are, of course, not only interested in the homology groups as groups, but as Steenrod modules. This involves understanding the comparison $\HF_2^{\Phi C_2} \otimes \HF_2^{\Phi C_2} \to H\F_2 \otimes H\F_2$ given by squaring the projection $\HF_2^{\Phi C_2} \to H\F_2$. \begin{corollary} The map $\pi_* (\HF_2^{\Phi C_2} \otimes\HF_2^{\Phi C_2}) \to \pi_* (H\F_2 \otimes H\F_2)$ sends $\xi_i \mapsto \xi_i$ and kills $\tau_0, \tau$. \end{corollary} \begin{proof} After setting $\rho = 1$, we can express $\tau_{i + 1}$ in terms of $\tau_i^2$ and $\xi_{i + 1}$, so $\A^{C_2}/(\rho - 1)$ is generated polynomially over $\F_2[\tau]$ by $\tau_0, \xi_1, \xi_2, \ldots$ with no relations. We know that $\eta_L (\tau)$ and $\eta_R (\tau)$ get mapped to zero, and these are $\tau$ and $\tau + \tau_0$ respectively. So $\tau$ and $\tau_0$ get killed. As a spectrum, $\HF_2^{\Phi C_2}$ splits as a sum of $H\F_2$'s, we know that the map is surjective, and thus $\xi_1$ must hit $\xi_1$ since it is the only element left in the degree. Finally, the images of the $\xi_i$ satisfy the conditions of \Cref{lemma:steenrod-auto}, so must be equal to $\xi_i$. \end{proof} \begin{corollary} If $X \in \Sp_{C_2}$, then the coaction on $H_* X^{\Phi C_2}$ is given by taking the $C_2$-equivariant coaction \[ (\HF_2)_\star X \to \A^{C_2} \otimes_{\Mt} (\HF_2)_\star X \] and quotienting out by $(\rho - 1, \tau)$ on the left and $(\rho - 1, \tau \otimes 1, \tau_0 \otimes 1)$ on the right. \end{corollary} \subsection{Spectra whose homology is \texorpdfstring{$\aesq{n} \Mt$}{}} We now introduce the standard quotient coalgebras of $\A^{C_2}$: \begin{definition} We define \[ \A^{C_2}(n) = \A^{C_2}/(\xi_i^{2^{n - i + 1}}, \tau_{n + 1}, \tau_{n + 2}, \ldots). \] If $n < 0$, we set $\A^{C_2}(n) = \Mt$. Then we have \[ \aesq{n} \Mt = \Mt[\bxi_1^{2^n}, \ldots, \bxi_n^2, \bxi_{n + 1}, \bxi_{n + 2}, \ldots, \bar{\tau}_{n + 1}, \bar{\tau}_{n + 2}, \ldots]. \] \end{definition} As usual, the bar denotes the Hopf algebroid antipode. \begin{definition} We let $\Y_n \in \Sp_{C_2}$ be any spectrum such that \[ (\HF_2)_\star \Y_n = \A^{C_2} \square_{\A^{C_2}(n)} \Mt \] and the $C_2$ action on the underlying spectrum is trivial. If $n \geq m$, a map $f\colon \Y_n \to \Y_m$ is admissible if it induces the natural injection in homology. \end{definition} It is helpful to note that \begin{lemma} There is a unique admissible map $\Y_n \to \HF_2$. \end{lemma} Since we understand the situation of $\HF_2$ completely, we get to learn about $\Y_n$ through comparison. \begin{proof} Since $H_\star \Y_n$ is free over $\Mt$, the universal coefficients theorem tells us such maps are classified by $\Mt$-module maps $H_\star \Y_n \to \Mt$, which in turn is in bijection with $\A^{C_2}$-comodule maps $H_\star \Y_n \to \A^{C_2}$. \end{proof} \begin{lemma} $H_* \iota \Y_n = \A \square_{\A(n)} \F_2$ with trivial $C_2$ action as a Steenrod comodule, and admissible maps induce the natural inclusion. \end{lemma} This implies that $\Y_n$ cannot exist for $n > 2$. \begin{proof} Since $\iota$ is symmetric monoidal, we have $H_* \iota \Y_n = \pi_* \iota (\HF_2 \otimes \Y_n)$. Observe that in $(\HF_2)_\star \Y_n$, the $\rho$-torsion term of smallest weight is $\frac{\gamma}{\rho \tau}$ with weight $2$. So $\rho$ is injective on $(\HF_2)_{*, 0} \Y_n$ and $(\HF_2)_{*, 1} \Y_n$, and \Cref{cor:underlying-inj} applies. This tells us $H_* \iota \Y_n = ((\HF_2)_\star \Y_n / \rho)_{*, 0}$. It is then easy to check that the ring map $\A \square_{\A(n)} \F_2 \to H_* \iota \Y_n$ sending $\bxi_i^k$ to $(\tau^{2^{i - 1} - 1}\bar{\tau}_{i - 1})^k$ is an isomorphism. Since the ring is $2$-torsion, we have $\gamma = -1 = 1$. The comodule structure follows from a similar calculation. \end{proof} We can similarly compute \begin{lemma} \[ H_* \Y_n^{\Phi C_2} = \F_2[\bar{\tau}_{n + 1}, \bxi_1^{2^n}, \ldots, \bxi_n^2, \bxi_{n + 1}, \ldots] \] with the coactions of the $\bxi_i$ as usual and \[ \psi (\bar{\tau}_{n + 1}) = 1 \otimes \bar{\tau}_{n + 1}. \] An admissible map $Y_n \to Y_m$ acts as the ``identity'' on the $\bxi_i^{2^n}$'s and sends $\bar{\tau}_{n + 1}$ to $\bar{\tau}_{m + 1}^{2^{n - m}}$. \end{lemma} Our goal is to understand the homology of the categorical fixed points $\Y_n^{C_2}$. To do so, we make use of the commutative diagram \[ \begin{tikzcd} (\Y_n)_{h C_2} \ar[d, equals] \ar[r] & \Y_n^{C_2} \ar[r] \ar[d] & \Y_n^{\Phi C_2} \ar[d] \ar[r] & \Sigma (\Y_n)_{h C_2} \ar[d, equals]\\ (\Y_n)_{h C_2} \ar[r] & \iota \Y_n \ar[r] & \widetilde{\Y_n} \ar[r] & \Sigma (\Y_n)_{h C_2}, \end{tikzcd} \] where $\widetilde{Y_n}$ is defined to be the cofiber of $(Y_n)_{h C_2} \to \iota Y_n$. To compute $H_* \Y_n^{C_2}$, we have to understand the effect of $\Y_n^{\Phi C_2} \to \Sigma (\Y_n)_{h C_2}$ on homology. This factors through $\widetilde{\Y_n}$, and the strategy is to understand each factor separately. By \Cref{thm:stunted}, we can identify the homology of the sequence \[ \iota \Y_n \to \widetilde{\Y_n} \to \Sigma (\Y_n)_{h C_2} \] with the natural maps \[ \A \square_{\A(n)} \F_2 \to \A \square_{\A(n)} H_* \Sigma \RP^\infty_{-1} \to \A \square_{\A(n)} H_* \Sigma \RP^\infty, \] which is a short exact sequence. As for the map $\Y_n^{\Phi C_2} \to \widetilde{\Y_n}$, we know it is an equivalence in the case $n = -1$. In particular, it is injective in homology. Since $H_* \Y_n^{\Phi C_2}$ and $H_* \widetilde{\Y_n}$ inject into the $n = -1$ counterparts, we know that $H_* \Y_n^{\Phi C_2} \to H_* \widetilde{\Y_n}$ is injective as well. It remains to compute the image. At this moment it is slightly more convenient to consider cohomology. We follow some constructions from \cite{lin-ext}. For any $N$, we have a map $\Sigma \RP^\infty_{-N} \to \Sigma \RP^\infty_{-1}$ which presents $H^* \Sigma \RP^\infty_{-1}$ as a submodule of $H^* \Sigma \RP^\infty_{-N}$. Let $M_{n, N}^* \subseteq H^* \Sigma \RP^\infty_{-N}$ be the $\A(n)^*$-submodule generated under $\A(n)^*$ by elements of negative degree, and let $L_n^* = M_{n, N}^* \cap H^* \Sigma \RP^\infty_{-1}$ for sufficiently large $N$ (where it no longer depends on $N$). Let $G_n^* = H^* \Sigma \RP^\infty_{-1} / L_n^*$. We then have a short exact sequence of $\A(n)$-comodules. \[ 0 \to G_n \to H_* \Sigma \RP^\infty_{-1} \to L_n \to 0. \] \begin{lemma} $L_n^*$ is trivial in degree $0$. \end{lemma} \begin{proof} In the notation of \cite[Page 1]{lin-ext}, we need to know that $x^{-1}$ is not in the image of the $\Sq^k$'s. We have \[ \Sq^k x^{-k - 1} = \frac{(-k - 1)(-k - 2) \cdots (-2k)}{1 \cdot 2 \cdot\cdots \cdot k} x^{-1} = (-1)^{?} \binom{2k}{k} x^{-1} = 0. \] \end{proof} \begin{lemma} There are no non-zero $\A$-comodule maps $H_* \Y_n^{\Phi C_2} \to \A \square_{\A(n)} L_n$. \end{lemma} \begin{proof} By adjunction, this is equivalent to showing that there are no non-zero $\A(n)$-comodule maps $H_* \Y_n^{\Phi C_2} \to L_n$, or equivalently, no $\A(n)^*$-module maps $L_n^* \to H^* \Y_n^{\Phi C_2}$. Now note that in $L_n^*$ all elements have positive degree. Moreover, since $\A(n)^*$ is generated by elements of degree at most $2^n$, the module $L_n^*$ is generated as an $\A(n)^*$-module by elements of degree less than $2^n - 1$. On the other hand, the elements of lowest degree in $H_* \Y_n^{\Phi C_2}$ are in degree $0$ and $2^n$ (given by $1$ and $\bxi_1^{2^n}$). Thus, any $\A(n)^*$-module map $L_n \to H^* \Y_n^{\Phi C_2}$ must kill the generators, hence must be zero. \end{proof} \begin{corollary} The map $H_* \Y_n^{\Phi C_2} \to H_* \widetilde{\Y_n}$ factors through $\A \square_{\A(n)} G_n$. \end{corollary} \begin{corollary} $H_* \Y_n^{\Phi C_2} \to \A \square_{\A(n)} G_n$ is an isomorphism. \end{corollary} \begin{proof} Since $H_* \Y_n^{\Phi C_2} \to H_* \widetilde{\Y_n}$ is injective, so must this map. By \cite[Lemma 1.3]{lin-ext} both sides are abstractly isomorphic as graded groups. Since they are finite dimensional in each degree, it must be an isomorphism. \end{proof} Since $H_* \widetilde{\Y_n} \to H_* \Sigma (\Y_n)_{h C_2}$ is surjective in homology with kernel $\A \square_{\A(n)} \F_2$, we learn that: \begin{corollary} There is a short exact sequence of comodules \[ 0 \to \A \square_{\A(n)} L_n[-1] \to H_* \Y_n^{C_2} \to \A \square_{\A(n)} \F_2 \to 0. \] \end{corollary} These $L_n$ can be explicitly computed. \begin{example} $L_0[-1] = 0$. $L_1[-1] = \F_2$. $L_2[-1] = \F_2 \oplus H_* L$. \end{example} Specializing to the $n = 2$ case, \begin{lemma} When $n = 2$, the short exact sequence \[ 0 \to \A \square_{\A(2)} L_2[-1] \to H_* \Y_2^{C_2} \to \A \square_{\A(2)} \F_2 \to 0. \] splits non-canonically. \end{lemma} \begin{proof} It is classified by an element in \[ \Ext_{\A}^{1, 0} (\A \square_{\A(2)} \F_2, \A \square_{\A(2)} L_2[-1]) = \Ext_{\A(2)} (\A \square_{\A(2)} \F_2, L_2[-1]). \] Thus, we have to equivalently show that any short exact sequence of $\A(2)^*$-modules \[ 0 \to \A^*/\!/\A(2)^* \to Z \to L_2[-1]^* \to 0 \] splits. Let $x_i \in L_2[-1]^*$ be the unique element in degree $i$, if exists. Let $z_0 \in Z$ be any lift of $x_0 \in L_2[-1]^*$ and $z_1$ the unique lift of $x_1$. Then we can attempt to produce a splitting by mapping $x_0$ to $z_0$ and $x_1$ to $z_1$. Since the lowest non-zero degrees of $\A^*/\!/\A(2)^*$ are $0, 8, 12$, the only obstruction is if we cannot map $x_8$ to $\Sq^4 \Sq^2 \Sq^1 z_1$. This occurs if and only if $\Sq^4 \Sq^4 \Sq^2 \Sq^1 z_1 \not= 0$. But $\Sq^4 \Sq^4 \Sq^2 \Sq^1 = \Sq^7 \Sq^3 \Sq^1$, and $\Sq^3 \Sq^1 z_1$ has degree $5$, where $Z$ is trivial. So this cannot happen. \end{proof} \begin{corollary} We have \[ H_* \Y_2^{C_2} = \A \square_{\A(2)} (\F_2 \oplus \F_2 \oplus H_* L). \] \end{corollary} \begin{corollary} If $\Y_2^{C_2}$ admits the structure of a $\tmf$-module, then upon 2-completion, we have an isomorphism \[ \Y_2^{C_2} = \tmf \oplus \tmf \oplus \tmf \otimes L. \] \end{corollary} \begin{proof} The Adams spectral sequence has two $h_0$-towers at $0$, and this lets us map in two copies of $\tmf$. After quotienting them out we are left with $\Ext_{\A(2)}(\F_2, H_* L)$, which is shown in \Cref{figure:ext-l}. \begin{figure} \caption{$\Ext_{\A(2)} \label{figure:ext-l} \end{figure} Let $w_1$ be the class in degree $1$. To map in $L$, we need to show that \[ 2w_1 = 0, \quad \langle \eta, 2, w_1\rangle = 0,\quad \langle \nu, \eta, 2, w_1\rangle = 0. \] These obstructions live in $\pi_1, \pi_3$ and $\pi_7$ respectively. The only non-trivial case is the last case, but the only possible class lives in the indeterminacy. So we get a map $L \to Y_2^{C_2}$, which lifts to a map $\tmf \otimes L \to Y_2^{C_2}$ via the $\tmf$-module structure. This map induces an isomorphism on homology groups, hence an equivalence on $2$-completion. \end{proof} \section{Stunted projective spaces}\label{section:stunted} The goal of this appendix is to prove the following folklore theorem we used in \Cref{thm:x11-permanent}: \begin{theorem}\label{thm:stunted} Let $X$ be a genuine $C_2$-spectrum whose $C_2$ action on the underlying spectrum $\iota X$ is trivial. Then the cofiber of the composition \[ \iota X \otimes \RP^n_+ \to \iota X \otimes \RP^\infty_+ = X_{h C_2} \overset{\mathrm{Nm}}\to X^{h C_2} = (\iota X)^{\RP^\infty_+} \to (\iota X)^{\RP^m_+} \] is $\iota X \otimes \Sigma P_{-m - 1}^n$, where $P_{-m}^n$ is the stunted projective space. \end{theorem} We should think of $\iota X \otimes \RP^n_+$ as the colimit of the constant functor on $\iota X$ under $\RP^n$. In general, we can understand this colimit as follows: \begin{lemma}\label[lemma]{lemma:adams} Let $X$ be any genuine $C_2$-spectrum, and restrict it to a diagram on $\RP^n$ under the inclusion $\RP^n \hookrightarrow BC_2 = \RP^\infty$. Then \[ \colim_{\RP^n} X \simeq (S((n + 1)\sigma)_+ \otimes X)_{h C_2} \simeq (S((n + 1)\sigma)_+ \otimes X)^{C_2}, \] where if $V \in \RO(C_2)$, then $S(V)$ is the unit sphere of $V$. \end{lemma} Note that since $C_2$ acts freely on $S((n + 1) \sigma)$, this homotopy quotient agrees with the literal quotient under point set models. \begin{proof} The idea of the first isomorphism is that we have a $2$-sheeted universal cover $S((n + 1)\sigma) \to \RP^n$ with deck transformation group $C_2$. To take the colimit over $\RP^n$, we can take the colimit over $S((n + 1)\sigma)$, and then take the colimit over the residual $C_2$ action. The restriction of $X$ to $S((n + 1) \sigma)$ is trivial since the following diagram commutes \[ \begin{tikzcd} S((n + 1)\sigma) \ar[r] \ar[d] & S(\infty \sigma) = * \ar[d]\\ \RP^n \ar[r] & \RP^\infty, \end{tikzcd} \] so the colimit over $S((n + 1) \sigma)$ is simply given by tensoring with $S((n + 1) \sigma)_+$. To prove this formally, we apply \cite[Corollary 4.2.3.10]{htt}, where we choose $\mathcal{J} = BC_2$, $K = \RP^n$ and $F$ to be the universal cover $S((n + 1)\sigma) \to \RP^n$ with $\mathcal{J}$ acting via deck transformations. The second equivalence is the Adams isomorphism (see e.g.\ \cite[Theorem 8.4]{equivariant-lectures}). \end{proof} We recall some basic properties of stunted projective spaces. Let $\phi(n)$ be the number of integers $j$ congruent to $0, 1, 2$ or $4$ mod $8$ such that $0 < j \leq n$. \begin{theorem}[{\cite[Theorem V.2.14]{h-infinity}}] There is a (unique) collection of spectra $\{P_m^n\}_{n \geq m}$ with the property that \begin{enumerate} \item If $m > 0$, then $P^n_m = \Sigma^\infty \RP^n / \RP^{m - 1}$ using the standard inclusion. \item If $r \equiv n \pmod {2^{\phi(k)}}$, then $P_n^{n + k} \cong \Sigma^{n - r} P^{r + k}_r$. \end{enumerate} Further, we have \begin{enumerate} \setcounter{enumi}{2} \item If we restrict the $BC_2$ action on $S^{\sigma}$ to $\RP^m \subseteq \RP^\infty = BC_2$, then \[ \colim_{\RP^m} S^{n\sigma} \simeq P_n^{n + m} \] \item $D P_m^n \simeq \Sigma P_{-n-1}^{-m-1}$. \end{enumerate} \end{theorem} \begin{corollary} There is a natural isomorphism \[ \lim_{\RP^m} S^{n \sigma} \simeq \Sigma P_{n - m + 1}^{n - 1}. \] \end{corollary} \begin{proof} \[ \lim_{\RP^m} S^{n \sigma} \simeq D \colim_{\RP^m} S^{-n \sigma} \simeq D P_{-n}^{m - n} \simeq \Sigma P_{n - m - 1}^{n - 1}. \] \end{proof} \begin{corollary}\label[corollary]{cor:sns} Let $X$ be a spectrum with trivial $C_2$ action. Then \[ (S^{n \sigma} \otimes X)^{h C_2} \simeq \lim_N (\Sigma P^{n - 1}_{-N} \otimes X). \] \end{corollary} \begin{proof} We write $X$ as a filtered colimit of finite spectra, $X = \colim_\alpha X^{(\alpha)}$. Then we have \[ \begin{aligned} (S^{n \sigma} \otimes X)^{h C_2} &= \left(S^{n \sigma} \otimes \colim_\alpha X^{(\alpha)}\right)^{h C_2} \\ &= \left(\colim_\alpha \left(S^{n \sigma} \otimes X^{(\alpha)}\right)\right)^{h C_2} \\ &= \lim_{\RP^\infty} \colim_\alpha \left(S^{n \sigma} \otimes X^{(\alpha)}\right) \\ &= \lim_m \lim_{\RP^m} \colim_\alpha \left(S^{n \sigma} \otimes X^{(\alpha)}\right) \\ &= \lim_m \colim_\alpha \lim_{\RP^m} \left(S^{n \sigma} \otimes X^{(\alpha)}\right) \\ &= \lim_m \colim_\alpha \left(\Sigma P_{n - m - 1}^{n - 1} \otimes X^{(\alpha)}\right) \\ &= \lim_m \left(\Sigma P_{n - m - 1}^{n - 1} \otimes X\right), \end{aligned} \] where we use that finite limits commute with filtered colimits and tensoring with \emph{finite} spectra. \end{proof} We now recall the construction of the norm map. Let $X$ be a genuine $C_2$-spectrum. By \Cref{lemma:adams}, we have an isomorphism \[ \colim_{\RP^n} X = (S((n + 1)\sigma)_+ \otimes X)^{C_2}. \] Taking the colimit as $n \to \infty$ gives \[ X_{hC_2} = (S(\infty \sigma)_+ \otimes X)^{C_2}, \] where $S(\infty \sigma) = \colim_n S(n \sigma)$. The norm map $X_{h C_2} \to X^{C_2}$ is then induced by the projection $S(\infty \sigma)_+ \to S^0$. By replacing $X$ with the cofree version $X^{(EC_2)_+}$, we obtain the $X^{h C_2}$-valued version of the norm map. \begin{lemma} Let $X$ be a genuine $C_2$-spectrum whose $C_2$ action on the underlying spectrum $\iota X$ is trivial. Then the cofiber of the composition \[ \iota X \otimes \RP^n_+ \to \iota X \otimes \RP^\infty_+ = X_{h C_2} \overset{\mathrm{Nm}}\to X^{h C_2} = (\iota X)^{\RP^\infty_+} \] is $\lim_N (\iota X \otimes \Sigma P_{-N}^n)$. \end{lemma} \begin{proof} This composition is obtained by taking the fixed points of \[ S((n + 1) \sigma)_+ \otimes X^{(EC_2)_{+}} \to X^{(EC_2)_+}. \] The cofiber of this map is $S^{(n+1) \sigma} \otimes X^{(EC_2)_+} \cong (S^{(n + 1)\sigma} \otimes X)^{(EC_2)_+}$. So this follows from \Cref{cor:sns}. \end{proof} We can now prove \Cref{thm:stunted}. \begin{proof}[of \Cref{thm:stunted}] This follows from the commutative diagram \[ \begin{tikzcd} & (\iota X)^{\RP^\infty_{m + 1}} \ar[r, equals] \ar[d] & \lim_N (\Sigma P^{-m - 2}_{-N} \otimes \iota X) \ar[d]\\ \RP^n_+ \otimes \iota X \ar[r] \ar[d, equals] & (\iota X)^{\RP^\infty_+} \ar[r] \ar[d] & \lim_N (\Sigma P^{n}_{-N} \otimes \iota X) \ar[d] \\ \RP^n_+ \otimes \iota X \ar[r] & (\iota X)^{\RP^m_+} \ar[r] & \Sigma P^n_{-m - 1} \otimes \iota X. \end{tikzcd} \] \end{proof} \section{Sage script}\label{section:sage} This appendix contains the sage script used to perform the computer calculations. The actual computations are at the end of the script and the comments indicate the lemmas they prove. \lstset{ belowcaptionskip=1\baselineskip, language=Python, showstringspaces=false, basicstyle=\footnotesize\ttfamily, keywordstyle=\bfseries\color{green!50!black}, commentstyle=\color{purple!40!black}, identifierstyle=\color{blue}, deletekeywords=[2]{reduce}, escapeinside={@}{@}, } \lstinputlisting{public.sage} \ifspringer \else \printbibliography \fi \end{document}
\begin{document} \title{A proof of the McKay-Radziszowski\\ subgraph counting conjecture} \author{ Alexander Engstr\"om\footnote{The author is Miller Research Fellow 2009-2012 at UC Berkeley, and gratefully acknowledges support from the Adolph C. and Mary Sprague Miller Institute for Basic Research in Science.} \\ Department of Mathematics \\ UC Berkeley \\ \texttt{[email protected]} } \date\today \maketitle \abstract{ We prove a theorem on how to count induced subgraphs in neighborhoods of graphs. Then we use it to prove a subgraph counting identity conjectured by McKay and Radziszowski in their work on Ramsey theory. } \section{Introduction} This paper is about counting induced subgraphs in neighborhoods. We prove a general theorem that can be used to turn expressions about enumerations on a normal form: as sums of specific functions. Then we use it to solve a subgraph counting conjecture by McKay and Radziszowski \cite{MR}. The number of induced subgraphs of $G$ isomorphic to $J$ is called $s(J,G)$. The subgraph of $G$ induced by the vertices adjacent to $v$ is $G^{+}_v$, and the one induced by the vertices not adjacent to $v$ is $G^{-}_v$. This is our main theorem. \newline \newline {\bf Theorem \ref{T2}}\emph{ Let $J_1, J_2, \ldots J_k$ and $J'_1, J'_2, \ldots J'_l$ be graphs. Then there is a set $\mathcal{J}$ of graphs with at most \[K=1+\sum_{i=1}^k |V(J_i)| + \sum_{j=1}^l |V(J'_j)| \] vertices, and constants $m_J$ for each $J\in \mathcal{J}$, such that \[ \sum_{v\in V(G)} \prod_{i=1}^k s(J_i,G^-_v) \prod_{j=1}^l s(J'_j,G^+_v) = \sum_{J\in \mathcal{J}} m_Jj(J,G) \] for any graph $G$. } \newline \newline The function $j(J,G)$ is of inclusion/exclusion type, and is defined by \[ j(J,G)=\sum_{\phi} (-1)^{ | (E(\phi(J)) \setminus E(G) | } \] where $\phi$ ranges over the injections from $V(J)$ into $V(G)$. In the last section we use Theorem~\ref{T2} to prove this new subgraph counting identity. \newline \newline {\bf Theorem (Conjecture 1 in McKay and Radziszowski \cite{MR})}\emph{ If $G$ is a graph with $n$ vertices, then \[ \sum_{v\in V(G)} (p_1(G_v^+)+p_2(G_v^-)+p_3(G_v^+,G_v^-))=0 \] where \[\begin{array}{rcl} p_1(X)&=& n(n-3)s(K_1,X)-(n^2+2n-6)s(K_1,X)^2+3ns(K_1,X)^3 \\ && -2s(K_1,X)^4+2(n^2+n-8)s(K_2,X)-12s(K_2,X)^2 \\ && -12(n-1)s(K_1,X)s(K_2,X) +12s(K_1,X)^2s(K_2,X) \\ && +72s(C_4,X) +12(n-2)s(K_3,X)+24s(K_{1,3},X) \\ && +24s(P_4,X)+24s(T_{3,1},X) +12(n+2)s(P_3,X) \\ && -24s(K_1,X)s(P_3,X)+32s(T_{3,2},X), \\ \end{array}\] \[\begin{array}{rcl} p_2(Y)&=&4s(K_2,Y)^2-12s(K_{1,3},Y)-8s(C_4,Y)-8s(T_{3,1},Y) \\&& -24s(T_{3,2},Y)+2(n-8)s(P_3,Y), \end{array}\] and \[\begin{array}{rcl} p_3(X,Y)&=&4s(K_1,X)s(P_3,Y)-2(n-2)s(K_1,X)s(K_2,Y)\\ && +4s(K_1,X)^2s(K_2,Y). \end{array}\] } The starting point of this paper was Goodman's identity \cite{G}, and in a sense Theorem~\ref{T2} is a generalization of it. There are a few infinite families of easily described subgraph counting identities \cite{MR}, and we provide the first example of one outside them. But there should be an infinite family of "difficult" subgraph counting identities. \begin{conjecture} For every $K\geq 4$ there is a subgraph counting identity \[ \sum_{v\in V(G)} \sum_{a=1}^N m_a(n) \prod_{i=1}^k s(J_{a,i},G^-_v) \prod_{j=1}^l s(J'_{a,j},G^+_v) = 0 \] that is true for every graph $G$ with $n$ vertices. In the identity, \[ \deg m_a(n) + \sum_{i=1}^k |V(J_{a,i})| + \sum_{j=1}^l|V(J'_{a,j})| \leq K \] for all $a$. There is one graph $J_{a,i}$ or $J'_{a,j}$ with $K$ vertices, and the identity is not in one of the easily described families. \end{conjecture} \begin{conjecture} Modulo the easily described families, and the difficult ones for lower $K$, there is only \emph{one} subgraph counting identity for every $K\geq 4$. \end{conjecture} \section{Counting induced subgraphs} All graphs and sets are finite. For graph theory not introduced, see Diestel~\cite{D}. The complete graph with vertex set $S$ is called $K_S$, and if $S=\{1,2,\ldots,n\}$ it is called $K_n$. If $J$ itself is a graph, then $K_J$ is defined as the complete graph on the same vertex set as $J$. The graph $T_{n,k}$ is a complete graph $K_n$ extended with an extra vertex that is adjacent to $k$ of the old ones. Recall that the function $s(J,G)$ counts the number of induced subgraphs of $G$ isomorphic with $J$. For any sets $A$ and $B$, the set of injective functions $\phi$ from $A$ to $B$ is \[ {\tt inj}(A,B). \] The number of graph automorphisms of $J$ is denoted $\textrm{Aut}(J)$. Most of our calculations takes place in the polynomial ring \[ R_S=\mathbb{R}[ x_e \mid e\in E(K_S) ] / \langle x_e^2=1 \mid e\in E(K_S) \rangle . \] If $\phi$ is a function defined from the vertex set of a graph, then we use the short $\phi(uv)$ for $\phi(u)\phi(v)$ if $uv$ is an edge. We write $x_{\phi(e)}$ instead of $x_{\phi(uv)}$ or $x_{\phi(u)\phi(v)}$ if $e$ is the edge $uv$. Many times the sign of a $\pm 1$ will depend on the truth of a boolean proposition, so we introduce the notation \[ \pm_P k = \left\{ \begin{array}{rl} k & \textrm{if $P$ is a true statement,} \\ -k & \textrm{if $P$ is a false statement.} \end{array} \right. \] The function $s(J,G)$ enumerates the induced copies of $J$ in $G$. Now we define a polynomial version of it called $\tilde{s}(J,G;x)$. Later on when polynomial versions of functions are introduced, they get the same symbol, but with a tilde on them. \begin{definition} Let $J$ be a graph and $S$ a set, then \[ \tilde{s}(J,G;x) = \frac{ \displaystyle \sum_{\phi\in {\tt inj}(V(J),S)} \,\, \prod_{e\in K_J} \left( 1 \pm_{e\in E(J)}x_{\phi(e)} \right) }{2^{- |E(K_J)|} \textrm{\emph{Aut}}(J) } \] \end{definition} The next definition is used for moving between functions and their polynomial versions, as for $s$ and $\tilde{s}$. \begin{definition} For any graph $G$ and edge $e\in K_G$, let $x^G_e=\pm_{e\in E(G)}1$. \end{definition} \begin{proposition} For any graphs $J$ and $G$, $s(J,G)=\tilde{s}(J,V(G),x^G)$. \end{proposition} \begin{proof} The product \[\prod_{e\in K_J} \left( 1 \pm_{e\in E(J)}x_{\phi(e)}^G \right)\] is $2^{|E(K_J)|}$ when $\phi(J)=G[\phi(V(J))]$ and zero otherwise. For every induced copy of $J$ in $G$ there are $\textrm{Aut}(J)$ injections from $V(J)$ to $V(G)$ with $\phi(J)=G[\phi(V(J))]$. \end{proof} The $\tilde{s}$--polynomials, and products and sums of them, are broken down into $\tilde{\jmath}$--polynomials. \begin{definition} Let $J$ be a graph and $S$ a set. The \emph{$\tilde{\jmath}$--polynomial} in $R_S$ is defined as \[ \tilde{\jmath}(J,S;x) = \sum_{\phi \in {\tt inj}(V(J),S)} \,\, \prod_{e\in E(J)} x_{\phi(e)} \] \end{definition} The corresponding $j$--polynomial is now defined implicitly. \begin{definition} Let $J$ and $G$ be graphs, then \[j(J,G)=\tilde{\jmath}(J,V(G);x^G).\] \end{definition} There is an easy way to calculate $j$ without $\tilde{\jmath}$. \begin{proposition} If $J$ and $G$ are graphs, then \[ j(J,G)=\sum_{\phi \in {\tt inj}(V(J),V(G)) } (-1)^{ | (E(\phi(J)) \setminus E(G) | } \] \end{proposition} \begin{proof} \[ \tilde{\jmath}(J,S;x^G) = \sum_{\phi \in {\tt inj}(V(J),S)} \,\, \prod_{e\in E(J)} \pm_{\phi(e)\in E(G)}1 \] \end{proof} \begin{theorem}\label{prop:prodTwoJ} Let $J_1$ and $J_2$ be two graphs and $S$ a set. Then \[ \tilde{\jmath}(J_1,S; x)\tilde{\jmath}(J_2,S; x)= \sum_{I\subseteq V(J_1)} \sum_{\lambda \in {\tt inj}(I, V(J_2))} \tilde{\jmath}(J_{I,\lambda},S; x)\] where $J_{I,\lambda}$ is a graph produced like this: Start with the disjoint union of $J_1$ and $J_2$ and then identify $v$ in $J_1$ with $\lambda(v)$ in $J_2$ for all $v\in I$. If there are any double edges after the identifications, then \emph{both} of the edges should be removed. \end{theorem} \begin{proof} We get the monomials in the sum defining $\tilde{\jmath}(J_k,S;x)$ by relabeling the vertices of $J_k$ with elements of $S$. Given one monomial from $\tilde{\jmath}(J_1,S;x)$ and one from $\tilde{\jmath}(J_2,S;x)$, the $I\subseteq V(J_1)$ on the right hand side of the equality accounts for the vertices of $J_1$ that are relabeled with an element of $S$ that is also used in the relabeling of $J_2$. Double edges are removed since $x^2_e=1$ in the ring $R_{S}$ where the $\tilde{\jmath}$--polynomials are defined. \end{proof} \begin{corollary} If $J_1$ is a graph on $n_1$ vertices, and $J_2$ is a graph on $n_2$ vertices, then $\tilde{\jmath}(J_1,S;x)\tilde{\jmath}(J_2,S;x)$ is a sum of $\tilde{\jmath}(J,S;x)$ polynomials, where no graph $J$ has more than $n_1+n_2$ vertices. \end{corollary} Graphs $J$ with isolated points used in $\tilde{\jmath}(J,S;x)$ can be reduced. \begin{proposition}\label{propIso} Let $J$ be a graph with an isolated vertex $v$, and $S$ a set. Then \[\tilde{\jmath}(J,S;x)= (|S|-|V(J)|+1)\tilde{\jmath}(J\setminus v,S;x).\] \end{proposition} \begin{proof} The polynomial $\tilde{\jmath}(J,S;x)$ is a sum of monomials indexed by injections from $V(J)$ to $S$. For any injection $\phi$ from $V(J \setminus v)$ to $S$, we can extend it to an injection from $V(J)$ to $S$ in $(|S|-|V(J)|+1)$ ways without changing the monomial in $\tilde{\jmath}(J,S;x)$. \end{proof} The conjecture we prove in this paper is about induced subgraphs inside neighborhoods. And now we have to extend the concept of $\tilde{\jmath}$--polynomials to $\tilde{k}$--polynomials. For any set $S$, let \[ R_S^\circ=R_S\cdot \mathbb{R}[ y_v \mid v \in S ] / \langle y_v^2=1 \mid v\in S \rangle . \] \begin{definition} Let $J$ be a graph, $L$ a subset of $V(J)$, and $S$ a set. The \emph{$\tilde{k}$--polynomial} in $R_S^\circ$ is defined as \[ \tilde{k}(J,L,S;x,y) = \sum_{\phi \in {\tt inj}(V(J),S)} \,\, \prod_{e\in E(J)} x_{\phi(e)} \prod_{u\in L}y_{\phi(u)} \] \end{definition} The substitution of every $y_u$ in $\tilde{k}(J,L,S;x,y)$ with $x_{uw}$ is denoted \[ \tilde{k}(J,L,S;x,y)|_{y_u:=x_{uw}}. \] After the substitution we get a polynomial in $R_{S\cup\{w\}}$. \begin{proposition}\label{propKJ} Let $J$ be a graph, $L$ as subset of $V(J)$, and $S$ a set. Then \[ \tilde{\jmath}(J',S;x)=\sum_{s\in S} \tilde{k}(J,L,S \setminus s;x,y)|_{y_u:=x_{us}} \] where $J'$ is the graph $J$ extended by a vertex $w$ with neighborhood $L$. \end{proposition} \begin{proof} \[ \begin{array}{rcl} \tilde{\jmath}(J',S;x)& =& \displaystyle \sum_{\phi\in {\tt inj}(V(J'),S)} \,\, \prod_{e\in E(J')} x_{\phi(e)} \\ & =& \displaystyle \sum_{s\in S}\,\, \sum_{\phi \in {\tt inj}(V(J'\setminus w),S\setminus s)} \,\, \prod_{e\in E(J'\setminus w)} x_{\phi(e)} \prod_{uw \in E(J')} x_{\phi(u)s} \\ & =& \displaystyle \sum_{s\in S}\,\, \sum_{\phi \in {\tt inj}(V(J),S\setminus s)} \,\, \prod_{e\in E(J)} x_{\phi(e)} \bigl. \prod_{u \in L} y_{\phi(u)} \bigr|_{y_u:=x_{us}} \\ & =& \displaystyle \sum_{s\in S} \tilde{k}(J,L,S \setminus s;x,y)|_{y_u:=x_{us}} \end{array} \] \end{proof} Now we define the polynomial equivalent of the $s$--polynomial in the neighborhood case. \begin{definition} For any graph $J$ and set $S$, the \emph{$\tilde{r}$--polynomial} in the ring $R_S^\circ$ is defined by \[ \tilde{r}(J,S;x,y)= \frac{ \displaystyle \sum_{V\subseteq V(J)} \sum_{E\subseteq E(K_J)} (-1)^{| E\setminus E(J)|} \tilde{k}(K_J[E],V,S;x,y) }{2^{-|E(K_J)|-|V(J)|} \textrm{\emph{Aut}}(J)} \] \end{definition} \begin{proposition}\label{propFacPlus} If $J$ and $G$ are graphs, then for any vertex $w$ of $G$, \[ s(J,G_w^+)=\tilde{r}(J,V(G\setminus w);x^G,y)|_{y_u:= x_{uw}^G}. \] \end{proposition} \begin{proof} By definition $s(J,G^+_w)$ equals $\tilde{s}(J,G[N(w)];x^G)$. We reformulate the product $2^{|E(K_J)|} \textrm{Aut}(J) ^{-1}\tilde{s}(J,G[N(w)];x^G)$ several times: \[ \sum_{\phi \in {\tt inj}(V(J),N(w))} \prod_{e\in E(K_J)}(1\pm_{e\in E(J)}x_{\phi(e)}) \] \[ \sum_{\phi \in {\tt inj}(V(J),V(G\setminus w))} \biggl( \prod_{v\in V(J)} \frac{ 1+ x^G_{\phi(v)w}}{2} \biggr) \biggl( \prod_{e\in E(K_J)}(1\pm_{e\in E(J)}x_{\phi(e)}) \biggr) \] \[ 2^{-|V(J)|} \makebox[-1cm]{} \sum_{\phi \in {\tt inj}(V(J),V(G\setminus w))} \makebox[-0.15cm]{} \biggl( \sum_{V\subseteq V(J)} \prod_{v\in V} x^G_{\phi(v)w} \makebox[-0.1cm]{} \biggr) \biggl( \sum_{E \subseteq E(K_J) } (-1)^{|E\setminus E(J)|} \prod_{ e \in E} x_{\phi(e)} \makebox[-0.1cm]{} \biggr)\] \[ 2^{-|V(J)|} \makebox[-0.25cm]{} \sum_{V\subseteq V(J)} \sum_{E \subseteq E(K_J) } (-1)^{|E\setminus E(J)|} \makebox[-0.8cm]{} \sum_{\phi \in {\tt inj}(V(J),V(G\setminus w))} \, \prod_{v\in V} x^G_{\phi(v)w} \prod_{ e \in E} x_{\phi(e)} \] \[ 2^{-|V(J)|} \makebox[-0.25cm]{} \sum_{V\subseteq V(J)} \sum_{E \subseteq E(K_J) } (-1)^{|E\setminus E(J)|} \tilde{k} (K_J[E],V,V(G\setminus w);x^G,y)|_{y_u:=x^G_{uw}} \] and get the desired $2^{|E(K_J)|} \textrm{Aut}(J)^{-1}\tilde{r}(J,V(G\setminus w);x^G,y)|_{y_u:= x_{uw}^G}.$ \end{proof} We also need a polynomial equivalent of $s$ for when we are outside the neighborhood. \begin{definition} For any graph $J$ and set $S$, the \emph{$\tilde{q}$--polynomial} in the\linebreak ring $R_S^\circ$ is defined by \[ \tilde{q}(J,S;x,y)= \frac{ \displaystyle \sum_{V\subseteq V(J)} \sum_{E\subseteq E(K_J)} (-1)^{|V|+ |E\setminus E(J)|} \tilde{k}(K_J[E],V,S;x,y) }{2^{-|E(K_J)|-|V(J)|} \textrm{\emph{Aut}}(J)} \] \end{definition} \begin{proposition}\label{propFacMinus} If $J$ and $G$ are graphs, then for any vertex $w$ of $G$, \[s(J,G^-_w)=\tilde{q}(J,V(G\setminus w ), x^G,y)|_{y_u=x^G_{uw}}.\] \end{proposition} \begin{proof} The proof of Proposition~\ref{propFacPlus} goes through with minor modifications. The calculation is done for the vertices outside the neighborhood of $w$, so the term \[ \prod_{v\in V(J)} \frac{1+x^G_{\phi(v)w}}{2} \] is changed to \[ \prod_{v\in V(J)} \frac{1-x^G_{\phi(v)w}}{2} \] and that contributes the extra $(-1)^{|V|}$ in the definition of $\tilde{q}$ compared to $\tilde{r}$. \end{proof} \begin{theorem}\label{propMulK} Let $J_1$ and $J_2$ be two graphs, $L_1\subseteq V(L_1), L_2\subseteq V(L_2)$, and $S$ a set. Then $ \tilde{k}(J_1,L_1,S;x,y)\tilde{k}(J_2,L_2,S;x,y)$ equals \[ \sum_{I\subseteq V(J_1)} \,\, \sum_{\lambda \in {\tt inj}(I,V(J_2)) } \tilde{k}(J_{I,\lambda},L_{I,\lambda},S,x,y)\] where $J_{I,\lambda}$ is a graph and $L_{I,\lambda}$ a subset of its vertex set produced like this: Start with the disjoint union of $J_1$ and $J_2$ where the vertices of $J_i$ that are in $L_i$ are colored blue and the other ones red. Then identify $v$ in $J_1$ with $\lambda(v)$ in $J_2$ for all $v\in I$. If $v$ and $\lambda(v)$ had the same color before their identification the identified vertex is red and otherwise blue. If there are any double edges after the identifications then \emph{both} of the edges should be removed. The graph we get is $J_{I,\lambda}$ and the blue vertices form $L_{I,\lambda}$. \end{theorem} \begin{proof} The proof is a straightforward generalization of the proof of Theorem~\ref{prop:prodTwoJ} and is left to the reader. \end{proof} \begin{corollary}\label{cor:MulK} Let $J_1$ and $J_2$ be two graphs, $L_1\subseteq V(L_1), L_2\subseteq V(L_2)$, and $S$ a set. Then $ \tilde{k}(J_1,L_1,S;x,y)\tilde{k}(J_2,L_2,S;x,y)$ equals a sum of\linebreak $ \tilde{k}(J,L,S;x,y)$ polynomials where the graph $J$ has at most $|V(J_1)|+|V(J_2)|$ vertices. \end{corollary} This is our main theorem. \begin{theorem}\label{T2} Let $J_1, J_2, \ldots J_k$ and $J'_1, J'_2, \ldots J'_l$ be graphs. Then there is a set $\mathcal{J}$ of graphs with at most \[K=1+\sum_{i=1}^k |V(J_i)| + \sum_{j=1}^l |V(J'_j)| \] vertices, and constants $m_J$ for each $J\in \mathcal{J}$, such that \[ \sum_{v\in V(G)} \prod_{i=1}^k s(J_i,G^-_v) \prod_{j=1}^l s(J'_j,G^+_v) = \sum_{J\in \mathcal{J}} m_Jj(J,G) \] for any graph $G$. \end{theorem} \begin{proof} Using Proposition~\ref{propFacMinus} any $s(J_i,G^{-}_v)$ can be expanded into the form \[ \sum_{V\subseteq V(J_i)} \sum_{E \subseteq E(K_{J_i})} k_{i,V,E} \tilde{k}(K_{J_i}[E],V,V(G\setminus v);x^G,y)|_{y_u:=x^G_{uv}} \] and any $s(J_i',G^{+}_v)$ can be turned into \[ \sum_{V\subseteq V(J_i)} \sum_{E \subseteq E(K_{J_i})} k_{i,V,E}' \tilde{k}(K_{J_i'}[E],V,V(G\setminus v);x^G,y)|_{y_u:=x^G_{uv}} \] by Proposition~\ref{propFacPlus}. Observe that the coefficients $k_{i,V,E}$ and $k'_{j,V,E}$ does \emph{not} depend on $v$ or $G$. By Corollary~\ref{cor:MulK} the product of $\tilde{k}(J,\ldots)$ and $\tilde{k}(J',\ldots)$ is a weighted sum of $\tilde{k}(J'',\ldots)$ polynomials with $|V(J'')| \leq |V(J)|+|V(J')|$. Thus the products \[\prod_{i=1}^k \sum_{V\subseteq V(J_i)} \sum_{E \subseteq E(K_{J_i})} k_{i,V,E} \tilde{k}(K_{J_i}[E],V,V(G\setminus v);x^G,y) \] and \[\prod_{i=1}^l \sum_{V\subseteq V(J_i)} \sum_{E \subseteq E(K_{J_i})} k_{i,V,E}' \tilde{k}(K_{J_i'}[E],V,V(G\setminus v);x^G,y) \] can be rewritten as \[ \sum_{J\in \mathcal{J}'} \sum_{L\in V(J)} m_{J,L}\tilde{k}(J,L,V(G\setminus v);x^G,y) \] where $m_{J,L}$ are independent of $G$ and $v$; and $\mathcal{J}'$ is a set of graphs with at most $K-1$ vertices. By Proposition~\ref{propKJ} \[ \sum_{v\in V(G)} \sum_{J\in \mathcal{J}'} \sum_{L\in V(J)} m_{J,L}\tilde{k}(J,L,V(G\setminus v);x^G,y)|_{y_u=x^G_{uv}} \] can be written as a sum \[\sum_{J\in \mathcal{J}} m_J\tilde{\jmath}(J,V(G);x^G) = \sum_{J\in \mathcal{J}} m_Jj(J,G) \] where $\mathcal{J}$ is a finite set of graphs on at most $K$ vertices. \end{proof} We do not want graphs in $\mathcal{J}$ with isolated vertices. \begin{corollary}\label{workie} Let $J_1, J_2, \ldots J_k$ and $J'_1, J'_2, \ldots J'_l$ be graphs. Then there is a finite set $\mathcal{J}$ of graphs without isolated vertices, and polynomials $m_J(n)$ such that \[ |V(J)| + \deg m_J(n) \leq 1+\sum_{i=1}^k |V(J_i)| + \sum_{j=1}^l |V(J'_j)| \] and \[ \sum_{v\in V(G)} \prod_{i=1}^k s(J_i,G^-_v) \prod_{j=1}^l s(J'_j,G^+_v) = \sum_{J\in \mathcal{J}} m_J(n)j(J,G) \] for any graph $G$. \end{corollary} \begin{proof} For every isolated vertex removed with Proposition~\ref{propIso} the degree of the corresponing polynomial increases with at most one. \end{proof} \section{A proof of the McKay-Radziszowski\\ subgraph counting conjecture} In this section we prove the McKay-Radziszowski subgraph counting conjecture stated in the introduction. \begin{proof} In the conjectured equality there are many terms of the type \[ \sum_{v\in V(G)} \prod_{i=1}^k s(J_i,G^-_v) \prod_{j=1}^l s(J'_j,G^+_v). \] Our proof strategy is to expand all of them using Corollary~\ref{workie}, and then show that the terms cancel out. To calculate that the terms cancels, given the expansions, is elementary but tedious. We have performed it both by hand and by computer, and will not write down the calculations in this paper. We will however tabulate the different $m_J(n)$ polynomials whose existence is given by Corollary~\ref{workie}. We calculated them by inserting all graphs $G$ with less than 10 vertices, and many large random graphs, to get very determined linear equations for the coefficients in the $m_J(n)$ polynomials. The graphs on less than six vertices with no isolated vertices are \[ \begin{array}{ccc} G_2=K_2 & G_3=P_3 & G_4=K_3\\ G_5=K_{1,3} & G_6=2K_2 & G_7=P_4\\ G_8=T_{3,1} & G_9=C_4 & G_{10}=K_4-K_2 \\ G_{11}=K_4 & G_{12}=K_{1,4} & G_{13}=K_2\cup P_3\\ G_{15}=K_{1,4}+K_2 & G_{16}=P_5 & G_{20}=K_3\cup K_2 \\ G_{23}=T_{4,1} & G_{24}=K_{2,3} & G_{25}=K_5-K_3 \\ G_{27}=C_5 & G_{28}=C_5+K_2 & G_{29}=K_5-P_4 \\ G_{30}=K_5-(P_3\cup K_2) & G_{31}=K_5-P_3 & G_{32}=K_5-2K_2 \\ G_{33}=K_5-K_2 & G_{34}=K_5 \\ \end{array}\] and the rest of them are drawn in Figure~\ref{fig}. \begin{figure} \caption{Some graphs on 5 vertices} \label{fig} \end{figure} To shorten the list we use the notation $n^ {\underline m} = n(n-1)(n-2)\cdots (n-m+1).$ Let $G$ be a graph with $n$ vertices, and let $g_i=j(G_i,G)$ for $i=2,3,\ldots, 34$. To save space, $\sum=\sum_{v\in V(G)}$ in the table. The expansions from Corollary~\ref{workie} are as follows: \newline \newline $2 \sum s(K_1,G_v^+)=n^{\underline 2}+g_{2}$, \newline \newline $2^{2} \sum s(K_1,G_v^+)^2=nn^{\underline 2}+2(n-1)g_{2}+g_{3}$, \newline \newline $2^{3} \sum s(K_1,G_v^+)^3=n^{\underline 2}({n}^{2}+n-2)+(3{n}^{2}-3n-2)g_{2}+3(n-1)g_{3}+g_{5}$, \newline \newline $2^{4} \sum s(K_1,G_v^+)^4=nn^{\underline 2}({n}^{2}+3n-6)+4(n-1)({n}^{2}+n-4)g_{2}+2(3{n}^{2}-3n-4)g_{3}+4(n-1)g_{5}+g_{12}$, \newline \newline $2^{4} \sum s(K_2,G_v^+)=n^{\underline 3}+3(n-2)g_{2}+3g_{3}+g_{4}$, \newline \newline $2^{8} \sum s(K_2,G_v^+)^2=n^{\underline 3}({n}^{2}+n+4)+2n(n-2)(3n-1)g_{2}+2(5{n}^{2}-3n-12)g_{3}+2({n}^{2}+n-4)g_{4}+4ng_{5}+(5n-4)g_{6}+8(n+2)g_{7}+4(n+4)g_{8}+8g_{9}+8g_{10}+g_{12}+6g_{13}+4g_{14}+2g_{15}+4g_{16}+2g_{20}+4g_{21}+g_{26}$, \newline \newline $2^{5} \sum s(K_1,G_v^+)s(K_2,G_v^+)=n^{\underline 3}(n+1)+4n(n-2)g_{2}+(5n-3)g_{3}+(n+1)g_{4}+g_{5}+g_{6}+2g_{7}+g_{8}$, \newline \newline $2^{6} \sum s(K_1,G_v^+)^2 s(K_2,G_v^+)=n^{\underline 3}({n}^{2}+3n-2)+(n-2)(5{n}^{2}+5n-12)g_{2}+2(4{n}^{2}-3n-3)g_{3}+({n}^{2}+3n-2)g_{4}+2(2n-3)g_{5}+2(n+1)g_{6}+4(n+1)g_{7}+2(n+1)g_{8}+g_{12}+g_{13}+2g_{14}+g_{15}$, \newline \newline $2^{13} \sum s(C_4,G_v^+)=n^{\underline 5}+6(n-2)^{\underline 3}g_{2}+6(n-3)^{\underline 2}g_{3}-2(n-3)^{\underline 2}g_{4}-4(n-4)g_{5}+7(n-4)g_{6}+4(n-4)g_{7}-20(n-4)g_{8}-(n-4)g_{9}-14(n-4)g_{10}-3(n-4)g_{11}-3g_{12}+10g_{13}-4g_{14}-14g_{15}+12g_{16}-12g_{17}+4g_{18}-20g_{19}+2g_{20}+4g_{21}-4g_{22}-4g_{23}+2g_{24}-2g_{25}-g_{26}+4g_{27}+12g_{28}+4g_{29}+10g_{30}+6g_{31}+7g_{32}+6g_{33}+g_{34}$, \newline \newline $3\cdot 2^{7} \sum s(K_3,G_v^+ )=n^{\underline 4}+6(n-2)^{\underline 2}g_{2}+12(n-3)g_{3}+4(n-3)g_{4}+4g_{5}+3g_{6}+12g_{7}+12g_{8}+3g_{9}+6g_{10}+g_{11}$, \newline \newline $3\cdot 2^{11} \sum s(K_{1,3},G_v^+)=n^{\underline 5}+4(n-2)^{\underline 3}g_{2}+6(n-3)^{\underline 2}g_{3}-4(n-3)^{\underline 2}g_{4}+8(n-4)g_{5}-3(n-4)g_{6}-12(n-4)g_{8}+3(n-4)g_{9}-12(n-4)g_{10}-5(n-4)g_{11}+5g_{12}-12g_{13}+12g_{14}+12g_{15}-12g_{16}+24g_{18}+12g_{19}-10g_{20}-24g_{21}-12g_{22}-8g_{23}+10g_{24}+4g_{25}-3g_{26}+12g_{28}+12g_{30}-6g_{31}+3g_{32}-4g_{33}-g_{34}$, \newline \newline $2^{11} \sum s(P_4,G_v^+)=n^{\underline 5}+4(n-2)^{\underline 3}g_{2}+2(n-3)^{\underline 2}g_{3}+(n-4)g_{6}-8(n-4)g_{7}-4(n-4)g_{8}-5(n-4)g_{9}-4(n-4)g_{10}-(n-4)g_{11}+g_{12}-4g_{14}+4g_{15}-4g_{16}-8g_{18}+4g_{19}+2g_{20}+8g_{21}+4g_{22}-2g_{24}+5g_{26}+4g_{28}+8g_{29}-2g_{31}-g_{32}-4g_{33}-g_{34}$, \newline \newline $2^{11} \sum s(T_{3,1},G_v^+)=n^{\underline 5}+6(n-2)^{\underline 3}g_{2}+10(n-3)^{\underline 2}g_{3}+2(n-3)^{\underline 2}g_{4}+4(n-4)g_{5}+3(n-4)g_{6}+4(n-4)g_{7}+4(n-4)g_{8}-(n-4)g_{9}+2(n-4)g_{10}+(n-4)g_{11}+g_{12}-2g_{13}-4g_{14}+2g_{15}-12g_{16}-4g_{17}-12g_{18}+4g_{19}-2g_{20}-12g_{21}-4g_{22}+4g_{23}-2g_{24}+2g_{25}-g_{26}-4g_{27}-12g_{28}+4g_{29}-2g_{30}+10g_{31}+3g_{32}+6g_{33}+g_{34}$, \newline \newline $2^{7} \sum s(P_3,G_v^+)=n^{\underline 4}+4(n-2)^{\underline 2}g_{2}+4(n-3)g_{3}+g_{6}-4g_{8}-g_{9}-4g_{10}-g_{11}$, \newline \newline $2^{8} \sum s(K_1,G_v^+)s(P_3,G_v^+)=n^{\underline 4}(n+2)+(n-2)^{\underline 2}(5n+4)g_{2}+(n-3)(7n-4)g_{3}+3(n-4)g_{5}+2(n-1)g_{6}+2(n-4)g_{7}-3(n+4)g_{8}-(n+2)g_{9}-4(n+2)g_{10}-(n+2)g_{11}+g_{12}+g_{14}+g_{15}-2g_{16}-2g_{17}-g_{18}-g_{19}-g_{20}-3g_{21}-3g_{22}-g_{23}$, \newline \newline $2^{12} \sum s(T_{3,2},G_v^+)=n^{\underline 5}+8(n-2)^{\underline 3}g_{2}+18(n-3)^{\underline 2}g_{3}+4(n-3)^{\underline 2}g_{4}+8(n-4)g_{5}+9(n-4)g_{6}+24(n-4)g_{7}+12(n-4)g_{8}+3(n-4)g_{9}-(n-4)g_{11}+g_{12}+12g_{13}+12g_{14}+12g_{16}-12g_{19}+2g_{20}-12g_{22}-8g_{23}-2g_{24}-4g_{25}-3g_{26}-12g_{28}-24g_{29}-12g_{30}-18g_{31}-9g_{32}-8g_{33}-g_{34}$, \newline \newline $2^{8} \sum s(K_2,G_v^-)^2=n^{\underline 3}({n}^{2}+n+4)-2(n-2)({n}^{2}-3n+8)g_{2}+2({n}^{2}-7n+4)g_{3}+2({n}^{2}+n-4)g_{4}-4ng_{5}-(3n+4)g_{6}+8(n-2)g_{7}-4(n-4)g_{8}+8g_{9}-8g_{10}+g_{12}-2g_{13}-4g_{14}+2g_{15}+4g_{16}+2g_{20}-4g_{21}+g_{26}$, \newline \newline $3\cdot 2^{11} \sum s(K_{1,3},G_v^-)=n^{\underline 5}-4(n-2)^{\underline 3}g_{2}+6(n-3)^{\underline 2}g_{3}-4(n-3)^{\underline 2}g_{4}-3(n-4)g_{6}+12(n-4)g_{8}+3(n-4)g_{9}-12(n-4)g_{10}+3(n-4)g_{11}-3g_{12}+12g_{13}-12g_{14}+12g_{15}-12g_{16}-12g_{19}-2g_{20}+12g_{22}+2g_{24}+4g_{25}-3g_{26}+12g_{28}-12g_{30}-6g_{31}+3g_{32}+4g_{33}-g_{34}$, \newline \newline $2^{13} \sum s(C_4,G_v^-)=n^{\underline 5}-2(n-2)^{\underline 3}g_{2}-2(n-3)^{\underline 2}g_{3}-2(n-3)^{\underline 2}g_{4}-4(n-4)g_{5}-(n-4)g_{6}+20(n-4)g_{7}-4(n-4)g_{8}-(n-4)g_{9}-6(n-4)g_{10}+5(n-4)g_{11}+5g_{12}-6g_{13}-4g_{14}-6g_{15}-4g_{16}+20g_{17}-12g_{18}-4g_{19}+10g_{20}-12g_{21}-4g_{22}-4g_{23}+10g_{24}-2g_{25}-g_{26}+4g_{27}-4g_{28}+20g_{29}-6g_{30}-2g_{31}-g_{32}-2g_{33}+g_{34}$, \newline \newline $2^{11} \sum s(T_{3,1},G_v^- )=n^{\underline 5}-2(n-2)^{\underline 3}g_{2}+2(n-3)^{\underline 2}g_{3}+2(n-3)^{\underline 2}g_{4}-4(n-4)g_{5}-5(n-4)g_{6}+4(n-4)g_{7}-4(n-4)g_{8}-(n-4)g_{9}+2(n-4)g_{10}+(n-4)g_{11}+g_{12}+6g_{13}+4g_{14}+2g_{15}+4g_{16}-4g_{17}-4g_{18}-4g_{19}-2g_{20}-4g_{21}+4g_{22}-4g_{23}-2g_{24}+2g_{25}-g_{26}-4g_{27}+4g_{28}+4g_{29}+6g_{30}+2g_{31}-5g_{32}-2g_{33}+g_{34}$, \newline \newline $2^{12} \sum s(T_{3,2},G_v^-)=n^{\underline 5}+2(n-3)^{\underline 2}g_{3}+4(n-3)^{\underline 2}g_{4}-8(n-4)g_{5}-7(n-4)g_{6}+8(n-4)g_{7}-4(n-4)g_{8}+3(n-4)g_{9}-8(n-4)g_{10}-(n-4)g_{11}+g_{12}-4g_{13}-4g_{14}+8g_{15}+12g_{16}+4g_{19}+2g_{20}+4g_{22}+8g_{23}-2g_{24}-4g_{25}-3g_{26}-12g_{28}-8g_{29}+4g_{30}-2g_{31}+7g_{32}-g_{34}$, \newline \newline $2^{7} \sum s(P_3,G_v^-)=n^{\underline 4}-2(n-2)^{\underline 2}g_{2}-g_{6}+4g_{7}-g_{9}-2g_{10}+g_{11}$, \newline \newline $2^{8} \sum s(K_1,G_v^+)s(P_3,G_v^-)=n^{\underline 5}-(n-2)^{\underline 3}g_{2}-3(n-3)^{\underline 2}g_{3}+3(n-4)g_{5}+2(n-4)g_{7}+(n-4)g_{8}-(n-4)g_{9}-2(n-4)g_{10}+(n-4)g_{11}-g_{12}-2g_{13}+3g_{14}-g_{15}+2g_{16}-2g_{17}-g_{18}+g_{19}-g_{20}+3g_{21}-3g_{22}+g_{23}$, \newline \newline $2^{5} \sum s(K_1,G_v^+)s(K_2,G_v^-)=n^{\underline 4}-3(n-3)g_{3}+(n-3)g_{4}+g_{5}+g_{6}-2g_{7}+g_{8}$, \newline \newline $2^{6} \sum s(K_1,G_v^+)^2s(K_2,G_v^-)=n^{\underline 4}(n-2)+(n-2)^{\underline 3}g_{2}-2(n-3)(2n-5)g_{3}+(n-2)^{\underline 2}g_{4}+2g_{5}+2(n-3)g_{6}-4(n-3)g_{7}+2(n-3)g_{8}+g_{12}+g_{13}-2g_{14}+g_{15}$. \end{proof} \end{document}
\begin{equation}gin{document} \twocolumn[ \icmltitle{Learning Infinite Layer Networks Without the Kernel Trick} \begin{equation}gin{icmlauthorlist} \icmlauthor{Roi Livni}{pr} \icmlauthor{Daniel Carmon}{tau} \icmlauthor{Amir Globerson}{tau} \end{icmlauthorlist} \icmlaffiliation{pr}{University of Princeton, Princeton, New Jersey, USA} \icmlaffiliation{tau}{Tel-Aviv University, Tel-Aviv, Israel} \icmlcorrespondingauthor{Roi Livni}{[email protected]} \icmlcorrespondingauthor{Daniel Carmon}{[email protected]} \icmlcorrespondingauthor{Amir Globerson}{[email protected]} \vskip 0.3in ] \murintAffiliationsAndNotice{} \begin{equation}gin{abstract} Infinite Layer Networks (ILN) have been proposed as an architecture that mimics neural networks while enjoying some of the advantages of kernel methods. ILN are networks that integrate over infinitely many nodes within a single hidden layer. It has been demonstrated by several authors that the problem of learning ILN can be reduced to the kernel trick, implying that whenever a certain integral can be computed analytically they are efficiently learnable. In this work we give an online algorithm for ILN, which avoids the \emph{kernel trick assumption}. More generally and of independent interest, we show that kernel methods in general can be exploited even when the kernel cannot be efficiently computed but can only be estimated via sampling. We provide a regret analysis for our algorithm, showing that it matches the sample complexity of methods which have access to kernel values. Thus, our method is the first to demonstrate that the kernel trick is not necessary, as such, and random features suffice to obtain comparable performance. \end{abstract} \section{Introduction} With the increasing success of highly non-convex and complex learning architectures such as neural networks, there is an increasing effort to further understand and explain the limits of training such hierarchical structures. Recently there have been attempts to draw mathematical insight from kernel methods in order to better understand deep learning, as well as come up with new computationally learnable architectures. One such line of work consists of learning classifiers that are linear functions of a very large or infinite collection of non-linear functions \cite{bach2014breaking,daniely2016toward,cho2009kernel,heinemann2016improper,williams1997computing}. Such models can be interpreted as a neural network with infinitely many nodes in a hidden layer, and we thus refer to them as ``Infinite Layer Networks'' (ILN). They are of course also related to kernel based classifiers, as will be discussed later. A target function in an ILN class will be of the form: \begin{equation}gin{equation}\label{eq:infNet} \mathcal{X}x\to \int \musi(\mathcal{X}x; \mathbf{w}) f(\mathbf{w}) d\mu(\mathbf{w}), \end{equation} Here $\musi$ is some function of the input $\mathcal{X}x$ and parameters $\mathbf{w}$, and $d\mu(\mathbf{w})$ is a prior over the parameter space. For example, $\musi(\mathcal{X}x; \mathbf{w})$ can be a single sigmoidal neuron or a complete convolutional network. The integral can be thought of as an infinite sum over all such possible networks, and $f(\mathbf{w})$ can be thought of as an infinite output weight vector to be trained. A Standard $1$--hidden layer network with a finite set of units can be obtained from the above formalism as follows. First, choose $ \musi(\mathcal{X}x; \mathbf{w}) = \sigma(\mathcal{X}x\cdot\mathbf{w})$ where $\sigma$ is an activation function (e.g., sigmoid or relu). Next, set $d\mu(\mathbf{w})$ to be a discrete measure over a finite set $\mathbf{w}_1,\ldots, \mathbf{w}_d$.\footnote{In $\delta$ function notation $d\mu(\mathbf{w}) = \frac{1}{d}\sum_{i=1}^d \delta(\mathbf{w}-\mathbf{w}_i)d\mathbf{w}$} In this case, the integral results in a network with $d$ hidden units, and the function $f$ is the linear weights of the output layer. Namely: \[\mathcal{X}x \to \frac{1}{d} \sum_{i=1}^d f(\mathbf{w}_i) \cdot \sigma(\mathcal{X}x\cdot \mathbf{w}_i).\] The main challenge when training $1$--hidden layer networks is of course to \emph{find} the $\mathbf{w}_1,\ldots,\mathbf{w}_d$ on which we wish to support our distribution. It is known \cite{livni2014computational}, that due to hardness of learning intersection of halfspaces \cite{klivans2006cryptographic,daniely2014average}, $1$--hidden layer neural networks are computationally hard for a wide class of activation functions. Therefore, as the last example illustrates, the choice of $\mu$ is indeed crucial for performance. For a fixed prior $\mu$, the class of ILN functions is highly expressive, since $f$ can be chosen to approximate any 1-hidden layer architecture to arbitrary precision (by setting $f$ to delta functions around the weights of the network, as we did above for $\mu$). However, this expressiveness comes at a cost. As argued in \citet{heinemann2016improper}, ILN will generalize well when there is a large probability mass of $\mathbf{w}$ parameters that attain a small loss. The key observation that makes certain ILN tractable to learn is that \eqref{eq:infNet} is a linear functional in $f$. In that sense it is a linear classifier and enjoys the rich theory and algorithmic toolbox for such classifiers. In particular, one can use the fact that linear classifiers can be learned via the kernel trick in a batch \cite{cortes1995support} as well as online settings \cite{kivinen2004online}. In other words, we can reduce learning ILN to the problem of computing the kernel function between two examples. Specifically the problem reduces to computing integrals of the following form: \begin{equation}a\label{eq:kernelTrick} k(\mathcal{X}x_1,\mathcal{X}x_2) &=& \int \musi(\mathcal{X}x_1; \mathbf{w}) \cdot \musi(\mathcal{X}x_2; \mathbf{w}) d\mu(\mathbf{w}) \\ &=& \mathop\mathbb{E}_{\bar{\mathbf{w}}\sim \mu}\left[\musi(\mathcal{X}x_1; \bar{\mathbf{w}})\cdot \musi(\mathcal{X}x_2 ; \bar{\mathbf{w}})\right]. \end{eqnarray} In this work we extend this result to the case where no closed form kernel is available, and thus the kernel trick is not directly applicable. We thus turn our attention to the setting where features (i.e., $\mathbf{w}$ vectors) can be randomly sampled. In this setting, our main result shows that for the squared loss, we can efficiently learn the above class. Moreover, we can surprisingly do this with a computational cost comparable to that of methods that have access to the closed form kernel $k(\mathcal{X}x_1,\mathcal{X}x_2)$. The observation we begin with is that sampling random features (i.e., $\mathbf{w}$ above), leads to an unbiased estimate of the kernel in \eqref{eq:kernelTrick}. Thus, if for example, we ignore complexity issues and can sample infinitely many $\mathbf{w}$'s, it is not surprising that we can avoid the need for exact computation of the kernel. However, our results provide a much stronger and practical result. Given $T$ training samples, the lower bound on achievable accuracy is $O(1/\sqrt{T})$ \citep[see][]{shamir2014sample}. We show that we can in fact achieve this rate, using $\tilde{O}(T^2)$ calls\footnote{We use $\tilde{O}$ notation to suppress logarithmic factors} to the random feature generator. For comparison, note that $O(T^2)$ is the size of the kernel matrix, and is thus likely to be the cost of any algorithm that uses an explicit kernel matrix, where one is available. As we discuss later, our approach improves on previous random features based learning \cite{dai2014scalable,rahimi2009weighted} in terms of sample/computational complexity, and expressiveness. \section{Problem Setup} We consider algorithms that learn a mapping from input instances $\mathcal{X}x\in \mathcal{X}$ to labels $y\in \mathcal{Y}$. We focus on the regression case where $\mathcal{Y}$ is the interval $[-1,1]$. Our starting point is a class of feature functions $\action{\mathbf{w}}{\mathcal{X}x}: \Omega\times \mathcal{X} \to \mathbb{R}$, parametrized by vectors $\mathbf{w} \in \Omega$. The functions $\action{\mathbf{w}}{\mathcal{X}x}$ may contain highly complex non linearities, such as multi-layer networks consisting of convolution and pooling layers. Our only assumption on $\action{\mathbf{w}}{\mathcal{X}x}$ is that for all $\mathbf{w}\in \Omega$ and $\mathcal{X}x\in \mathcal{X}$ it holds that $|\action{\mathbf{w}}{\mathcal{X}x}|<1$. Given a distribution $\mu$ on $\Omega$, we denote by $L_2(\Omega,\mu)$ the class of square integrable functions over $\Omega$. \[L_2(\Omega,\mu) = \left\{f: \int f^2(\mathbf{w})d\mu(\mathbf{w})<\infty\right\}.\] We will use functions $f\inL_2(\Omega,\mu)$ as mixture weights over the class $\Omega$, where each $f$ naturally defines a new regression function from $\mathcal{X}x$ to $\mathbb{R}$ as follows: \begin{equation}gin{equation}\label{eq:identification} \mathcal{X}x\to \int \action{\mathbf{w}}{\mathcal{X}x}f(\mathbf{w})d\mu(\mathbf{w}).\end{equation} Our key algorithmic assumption is that the learner can efficiently sample random $\mathbf{w}$ according to the distribution $\mu$. Denote the time to generate one such sample by $\rho$. In what follows it will be simpler to express the integrals as scalar products. Define the following scalar product on functions $f\inL_2(\Omega,\mu)$. \begin{equation} \scalar{f}{g} = \int f(\mathbf{w})g(\mathbf{w})d\mu(\mathbf{w}) \end{equation} We denote the corresponding $\ell_2$ norm by $\norm{f} = \sqrt{\scalar{f}{f}}$. Also, given features $\mathcal{X}x$ denote by $\fwx{\mathcal{X}x}$ the function in $L_2(\Omega,\mu)$ given by $\fwx{\mathcal{X}x}[\mathbf{w}]=\action{\mathbf{w}}{\mathcal{X}x}$. The regression functions we are considering are then of the form $\mathcal{X}x \to \scalar{f}{\fwx{\mathcal{X}x}}$. A subclass of norm bounded elements in $L_2(\Omega,\mu)$ induces a natural subclass of regression functions. Namely, we consider the following class: \[ \mathcal{H}^B_\mu= \left\{\mathcal{X}x\to \scalar{f}{\fwx{\mathcal{X}x}}: \|f\|<B\right\}.\] Our ultimate goal is to output a predictor $f\in L_2(\Omega,\mu)$ that is competitive, in terms of prediction, with the best target function in the class $\mathcal{H}^B_\mu$. We will consider an online setting, and use it to derive generalization bounds via standard online to batch conversion. In our setting, at each round a learner chooses a target function $f_t\in L_2(\Omega,\mu)$ and an adversary then reveals a sample $\mathcal{X}x_t$ and label $y_t$. The learner then incurs a loss of \begin{equation} \ell_t(f_t) = \frac{1}{2}\left(\scalar{f_t}{\fwx{\mathcal{X}x_t}} - y_t\right)^2. \label{eq:loss} \end{equation} The use of squared loss might seem restrictive if one is interested in classification. However, $L_2$ loss is common by now in classification with support vector machines and kernel methods since \citep{suykens1999least, suykens2002weighted}. More recently \citet{zhang2016understanding} showed that when using a large number of features regression achieves performance comparable to the corresponding linear classifiers (see Section 5 therein). The objective of the learner is to minimize her $T$ round regret w.r.t norm bounded elements in $L_2(\Omega,\mu)$. Namely: \begin{equation} \sum_{t=1}^T \ell_t(f_t) - \min_{f^*\in \mathcal{H}^B_\mu} \sum_{t=1}^T \ell_t(f^*). \label{eq:regret} \end{equation} In the statistical setting we assume that the sequence $S=\{(\mathcal{X}x_i,y_i)\}_{i=1}^T$ is generated IID according to some unknown distribution $\mathbb{P}$. We then define the expected loss of a predictor as \begin{equation}gin{equation}\label{eq:statistical} L(f)= \mathop\mathbb{E}_{(\mathcal{X}x,y)\sim \mathbb{P}} \left[\frac{1}{2}\left(\scalar{f}{\fwx{\mathcal{X}x}} - y\right)^2\right].\end{equation} \section{Main Results} \label{sec:results} \thmref{thm:mainmain} states our result for the online model. The corresponding result for the statistical setting is given in \corref{cor:mainmain}. We will elaborate on the structure of the Algorithm later, but first provide the main result. { \begin{equation}gin{algorithm}[h] \KwData{$ T,~ B>1, \eta , m$} \KwResult{Weights $\alpha^{(1)},\ldots,\alpha^{(T+1)}\in\mathbb{R}^{T}$. Functions $f_t \in L_2(\Omega,\mu)$ defined as $f_t = \sum_{i=1}^t \alpha_i^{(t)} \fwx{\mathcal{X}x_i}$\;} Initialize $\alpha^{(1)}=\bar{0} \in \mathbb{R}^T$\; \For{$t=1,\ldots, T$}{ Observe $\mathcal{X}x_t,y_t$\; Set $E_t=\mathrm{EST\_SCALAR\_PROD}(\alpha^{(t)},\mathcal{X}x_{1:{t-1}},\mathcal{X}x_t,m)$;\\ \uIf{$|E_t|<16 B $}{ $\alpha^{(t+1)}=\alpha^{(t)}$\; $\alpha^{(t+1)}_t = -\eta (y_t - E_t)$; } \mathop\mathbb{E}lse{$\alpha^{(t+1)}=\frac{1}{4}\alpha^{(t)}$;} } \caption{The SHRINKING\_GRADIENT algorithm.} \label{alg:main} \end{algorithm} \begin{equation}gin{algorithm}[h] \caption{EST\_SCALAR\_PROD} \KwData{$\alpha$,~$\mathcal{X}x_{1:{t-1}}$,~$\mathcal{X}x$,~$m$} \KwResult{Estimated scalar product $E$ } \If{$\alpha = \bar{0}$}{ Set $E=0$ } \mathop\mathbb{E}lse{ \For{k=1.\ldots,m}{ Sample $i$ from the distribution $q(i) = \frac{|\alpha_i|}{\sum |\alpha_i|}$ \; Sample parameter $\bar{\mathbf{w}}$ from $\mu$. Set $E^{(k)} = \mathrm{sgn}(\alpha_i)\es{\mathcal{X}x_i}{\mathcal{X}x}$;\ } Set $E= \frac{\|\alpha\|_1}{m} \sum_{k=1}^m E^{(k)}$\label{alg:spsub} } \end{algorithm} } \begin{equation}gin{theorem}\label{thm:mainmain} Run \algref{alg:main} with parameters $T$, $B\ge 1$, $\eta= \frac{B}{\sqrt{T}}$ and $m = O\left(B^4 T\log \left(BT\right)\right)$. Then: \begin{equation}gin{enumerate} \item For every sequence of squared losses $\ell_1,\ldots, \ell_T$ observed by the algorithm we have for $f_1,\ldots, f_T$: \[\mathop\mathbb{E}\left[\sum_{t=1}^T \ell_t(f_t) - \min_{f^*\in \mathcal{H}^B_\mu} \sum_{t=1}^T \ell_t(f^*) \right]= O(B\sqrt{T})\] \item The run-time of the algorithm is $\tilde{O}\left(\rho B^4 T^2\right)$.\footnote{Ignoring logarithmic factors in $B$ and $T$.} \item For each $t=1\ldots T$ and a new test example $\mathcal{X}x$, we can with probability $\geq 1-\delta$ estimate $\left<f_t,\fwx{\mathcal{X}x}\right>$ within accuracy $\epsilon_0$ by running \algref{alg:spsub} with parameters $\alpha^{(t)}$, $\{\mathcal{X}x_i\}_{i=1}^t$, $,\mathcal{X}x$ and $m=O(\frac{B^4T}{\epsilon_0^2}\log 1/\delta )$. The resulting running time for a test point is then $O(\rho m)$. \end{enumerate} \end{theorem} We next turn to the statistical setting, where we provide bounds on the expected performance. Following standard online to batch conversion and \thmref{thm:mainmain} we can obtain the following Corollary \citep[e.g., see][]{shalev2011online}: \begin{equation}gin{corollary}[Statistical Setting]\label{cor:mainmain} The following holds for any $\epsilon > 0$. Run Algorithm \ref{alg:main} as in \thmref{thm:mainmain}, with $T= O( \frac{B^2}{\epsilon^2})$. Let $S=\{(x_t,y_t)\}_{t=1}^T$, be an IID sample drawn from some unknown distribution $\mathbb{P}$. Let $f_S= \frac{1}{T} \sum f_t$. Then the expected loss satisfies: \[ \mathop\mathbb{E}_{S\sim \mathbb{P}} \left[L(f_S)\right] < \inf_{f^* \in \mathcal{H}^B_\mu} L(f^*)+\epsilon.\] The runtime of the algorithm, as well as estimation time on a test example are as defined in \thmref{thm:mainmain}. \end{corollary} Proofs of the results are provided in \secref{sec:analysis} and the appendix. \section{Related Work} Learning with random features can be traced to the early days of learning \cite{minsky1988perceptrons}, and infinite networks have also been introduced more than 20 years ago \cite{williams1997computing, hornik1993some}. More recent works have considered learning neural nets (also multi-layer) with infinite hidden units using the kernel trick \cite{cho2009kernel, deng2012use, hazan2015steps, heinemann2016improper}. These works take a similar approach to ours but focus on computing the kernel for certain feature classes in order to invoke the kernel trick. Our work in contrast avoids using the kernel trick and applies to any feature class that can be randomly generated. All the above works are part of a broader effort of trying to circumvent hardness in deep learning by mimicking deep nets through kernels \cite{mairal2014convolutional, bouvrie2009invariance, bo2011object, bo2010kernel}, and developing general duality between neural networks and kernels \cite{daniely2016toward}. From a different perspective the relation between random features and kernels has been noted by \citet{rahimi2007random} who show how to represent translation invariant kernels in terms of random features. This idea has been further studied \cite{bach2015equivalence, kar2012random} for other kernels as well. The focus of these works is mainly to allow scaling down of the feature space and representation of the final output classifier. \citet{dai2014scalable} focus on tractability of large scale kernel methods, and their proposed {\em doubly stochastic} algorithm can also be used for learning with random features as we have here. In \citet{dai2014scalable} the objective considered is of the regularized form:$\frac{\gamma}{2} \|f\|^2 + R(f)$, with a corresponding sample complexity of $O(1/(\gamma^2\epsilon^2))$ samples needed to achieve $\epsilon$ approximation with respect to the risk of the optimum of the regularized objective. To relate the above results to ours, we begin by emphasizing that the bound in \cite{dai2014scalable} holds for fixed $\gamma$, and refers to optimization of the regularized objective\ignore{\footnote{See there that the generalization bound (with expectation i.e. thm 4, but similar to thm. 6 you need to take sqroot) is of order $O(\frac{Q_1}{\sqrt{t}})$, and $Q_1\in O(Q_0)$ and $Q_0\in O(\theta^2)$ and $\theta \in O(1/\gamma)$ to conclude we obtain a generalization bound of order $O(\frac{1}{t\gamma^4})$.\roi{Please do not erase this foot note even if put in remark}}}. Our objective is to minimize the risk $R(f)$ which is the expected squared loss, for which we need to choose $\gamma = O(\frac{\epsilon}{B^2})$ in order to attain accuracy $\epsilon$ \cite{sridharan2009fast}. Plugging this $\gamma$ into the generalization bound in \citet{dai2014scalable} we obtain that the algorithm in \citet{dai2014scalable} needs $O(\frac{B^4}{\epsilon^4})$ samples to compete with the optimal target function in the $B$-ball. Our algorithm needs $O(\frac{B^2}{\epsilon^2})$ examples which is considerably better. We note that their method does extend to a larger class of losses, whereas our is restricted to the quadratic loss. In \citet{rahimi2009weighted}, the authors consider embedding the domain into the feature space $\mathcal{X}x \to \left[\action{\mathbf{w}_1}{\mathcal{X}x},\ldots,\action{\mathbf{w}_m}{\mathcal{X}x}\right]$, where $\mathbf{w}_i$ are IID random variables sampled according to some prior $\mu(\mathbf{w})$. They show that with $O(\frac{B^2 \log 1/\delta}{\epsilon^2})$ random features estimated on $O(\frac{B^2 \log 1/\delta}{\epsilon^2})$ samples they can compete with the class: \begin{equation} {\mathcal{H}^B_\mu}_{\max}= \left\{\mathcal{X}x \to \int \action{\mathbf{w}}{\mathcal{X}x} f(\mathbf{w}) d\mu(\mathbf{w}) ~: ~|f(\mathbf{w})| \le B\right\} \nonumber \end{equation} Our algorithm relates to the mean square error cost function which does not meet the condition in \citet{rahimi2009weighted}, and is hence formally incomparable. Yet we can invoke our algorithm to compete against a larger class of target functions. Our main result shows that \algref{alg:main}, using $\tilde{O}(\frac{B^8}{\epsilon^4})$ estimated features and using $O(\frac{B^2}{\epsilon^2})$ samples will, in expectation, output a predictor that is $\epsilon$ close to the best in $\mathcal{H}^B_\mu$. Note that $|f(\mathbf{w})| <B$ implies $\mathbb{E}_{\mathbf{w}\sim \mu} (f^2(\mathbf{w})) <B^2$. Hence ${\mathcal{H}^B_\mu}_{\max} \subseteq \mathcal{H}^B_\mu$. Note however, that the number of estimated features (as a function of $B$) is worse in our case. Our approach to the problem is to consider learning with a noisy estimate of the kernel. A related setting was studied in \citet{cesa2011online}, where the authors considered learning with kernels when the data is corrupted. Noise in the data and noise in the scalar product estimation are not equivalent when there is non-linearity in the kernel space embedding. There is also extensive research on linear regression with actively chosen attributes \cite{cesa2011efficient,hazan2012linear}. The convergence rates and complexity of the algorithms are dimension dependent. It would be interesting to see if their method can be extended from finite set of attributes to a continuum set of attributes. \section{Algorithm}\label{sec:alg} We next turn to present \algref{alg:main}, from which our main result is derived. The algorithm is similar in spirit to Online Gradient Descent (OGD) \cite{Zinkevich03}, but with some important modifications that are necessary for our analysis. We first introduce the problem in the terminology of online convex optimization, as in \citet{Zinkevich03}. At iteration $t$ our algorithm outputs a hypothesis $f_t$. It then receives as feedback $(\mathcal{X}x_t,y_t)$, and suffers a loss $\ell_t(f_t)$ as in \eqref{eq:loss}. The objective of the algorithm is to minimize the regret against a benchmark of $B$-bounded functions, as in \eqref{eq:regret}. A classic approach to the problem is to exploit the OGD algorithm. Its simplest version would be to update $f_{t+1} \to f_t - \eta \nabla_t$ where $\eta$ is a step size, and $\nabla_t$ is the gradient of the loss w.r.t. $f$ at $f_t$. In our case, $\nabla_t$ is given by: \begin{equation} \nabla_t = \left(\scalar{f_t}{\mathcal{X}xtp} - y_t \right) \mathcal{X}xtp \label{eq:exact_gradient} \end{equation} Applying this update would also result in a function $f_t = \sum_{i=1}^t \alpha_i \mathcal{X}xtp$ as we have in \algref{alg:main} (but with different $\alpha_i$ from ours). However, in our setting this update is not applicable since the scalar product $\scalar{f_t}{\mathcal{X}xtp}$ is not available. One alternative is to use a stochastic unbiased estimate of the gradient that we denote by $\bar{\nabla}_t$. This induces an update step $f_{t+1} \to f_t -\eta \bar{\nabla}_t$. One can show that OGD with such an estimated gradient enjoys the following upper bound on the regret $\expect{\sum \ell_t(f_t) - \ell_t(f^*)}$ for every $\|f^*\|\le B$ \citep[e.g., see][]{shalev2011online}: \begin{equation}gin{equation}\label{eq:ogd}\frac{B^2}{\eta} + \eta\sum_{i=1}^T\expect{\|\nabla_t\|^2}+ \eta\sum_{i=1}^T\Var{\bar{\nabla}_t} ~,\end{equation} where $\Var{\bar{\nabla}_t} = \expect{\|\bar{\nabla}_t-\nabla_t\|^2}$. We can bound the first two terms using standard techniques applicable for the squared loss \citep[e.g., see][]{zhang2004solving,srebro2010smoothness}. The third term depends on our choice of gradient estimate. There are various choices for such an estimate, and we use a version which facilitates our analysis, as explained below. Assume that at iteration $t$, our function $f_t$ is given by $f_t = \sum_{i=1}^t \alpha^{(t)}_i \mathcal{X}xtp$. We now want to use sampling to obtain an unbiased estimate of $\scalar{f_t}{\mathcal{X}xtp}$. This will be done via a two step sampling procedure, as described in Algorithm \ref{alg:spsub}. First, sample an index $i\in[1,\ldots,t]$ by sampling according to the distribution $q(i) \muropto |\alpha^{(t)}_i|$. Next, for the chosen $i$, sample $\bar{\mathbf{w}}$ according to $\mu$, and use $\musi(\mathcal{X}x;\bar{\mathbf{w}}) \musi(\mathcal{X}x_i;\bar{\mathbf{w}})$ to construct an estimate of $\scalar{\fwx{\mathcal{X}x_i}}{\mathcal{X}xtp}$. The resulting unbiased estimate of $\scalar{\fwx{\mathcal{X}x_i}}{\mathcal{X}xtp}$ is denoted by $E_t$ and given by: \begin{equation} E_t = \frac{\|\alpha^{(t)}\|_1}{m} \sum_{i=1}^m \textrm{sgn}(\alpha^{(t)}_i)\es{\mathcal{X}x_i}{\mathcal{X}x_t} \end{equation} The corresponding unbiased gradient estimate is: \begin{equation} \bar{\nabla}_t= \left(E_t -y_t\right) \mathcal{X}x_t \end{equation} The variance of $\bar{\nabla}$ affects the convergence rate and depends on both $\|\alpha\|_1$ and the number of estimations $m$. We wish to maintain $m=O(T)$ estimations per round, while achieving $O(\sqrt{T})$ regret. To effectively regularize $\|\alpha\|_1$, we modify the OGD algorithm so that whenever $E_t$ is larger then $16 B$, we do not perform the usual update. Instead, we perform a shrinking step that divides $\alpha^{(t)}$ (and hence $f_t$) by $4$. Treating $B$ as constant, this guarantees that $\|\alpha\|_1= O(\eta T )$, and in turn $\textrm{Var}(\bar{\nabla}_t) = O(\frac{\eta^2 T^2}{m})$. Setting $\eta = O(1/\sqrt{T})$, we have that $m=O(T)$ estimations are sufficient. The rationale for the shrinkage is that whenever $E_t$ is large, it indicates that $f_t$ is ``far away'' from the $B$-ball, and a shrinkage step, similar to projection, brings $f_t$ closer to the optimal element in the $B$-ball. However, due to stochasticity, the shrinkage step does add a further term to the regret bound that we would need to take care of. \ignore{ \begin{equation}gin{definition} A set $K$ in the unit ball of a linear space is $D$-bounded if for every $\mathcal{X}x_1,\mathcal{X}x_2 \in K$ we have that $|\es{\mathcal{X}x_1}{\mathcal{X}x_2}|<D$ a.s. \end{definition} \begin{equation}gin{theorem}\label{thm:main} Let $K$ be a $D$-bounded set. Run Algorithm \ref{alg:main} with parameters $T$, $B\ge 1$, $\eta :=\frac{B}{2\sqrt{T}}$, and $m=((16B+1) D B)^2T\log \gamma$, where $\gamma = \frac{((16B+1)\eta T+B)^2)}{\eta^2}$. We assume that $\eta<1/8$. Assume that $\mathcal{X}x_t \in K$ and $y_t\in [-1,1]$ for all $t$ and for each $t$ let $f_t= \sum \alpha^{(t)}_i \fwx{\mathcal{X}x_i}$. Then: \[\sum_{t=1}^T \ell_t(f_t) - \min_{\|f^*\|\le B}\sum_{t=1}^T \ell_t(f^*) = O\left(B\sqrt{T}\right).\] The number of times the algorithm performs estimation of the scalar product is \[Tm= \tilde{O}((DB^2T)^2).\] Finally, for any $\mathcal{X}x$ the scalar product $\scalar{f_t}{\fwx{\mathcal{X}x}}$ (i.e., the regression function) can be estimated within accuracy $\epsilon_0$ using $O(\frac{D^2B^4 T}{\epsilon_0^2}\log 1/\delta )$ estimations. \end{theorem}} \subsection{Analysis}\label{sec:analysis} In what follows we analyze the regret for \algref{alg:main}, and provide a high level proof of Theorem \ref{thm:mainmain}. The appendix provides the necessary lemmas and a more detailed proof. We begin by modifying the regret bound for OGD in \eqref{eq:ogd} to accommodate for steps that differ from the standard gradient update, such as shrinkage. We use the following notation for the regret at iteration $t$: \begin{equation} R_t(f^*) = \expect{\sum_{t=1}^T \ell_t(f_t)-\ell_t(f^*)} \end{equation} \begin{equation}gin{lemma}\label{lem:core} Let $\ell_1,\ldots,\ell_T$ be an arbitrary sequence of convex loss functions, and let $f_1,\ldots,f_T$ be random vectors, produced by an online algorithm. Assume $\|f_i\|\le B_T$ for all $i\le T$. For each $t$ let $\bar{\nabla}_t$ be an unbiased estimator of $\nabla \ell_t(f_t)$. Denote $\Omegaat{f_t}=f_{t-1} - \eta \bar{\nabla}_{t-1}$ and let \begin{equation}gin{equation}\label{eq:ptvv} P_t(f^*) = \murob{\|f_t-f^*\|> \|\Omegaat{f}_t-f^*\|}.\end{equation} For every $\|f^*\|\le B$ it holds that : \begin{equation}a\label{eq:main} R_t(f^*) &\leq& \frac{B^2}{\eta} +\eta\sum_{t=1}^T\expect{\|\nabla_t\|^2}+\eta\sum_{t=1}^T\Var{\bar{\nabla}_t} + \nonumber \\ && \sum_{t=1}^T \frac{(B_T+B)^2}{\eta}\expect{P_{t}(f^*)} \end{eqnarray} \end{lemma} See \apref{sec:proof_lem_core} for proof of the lemma. As discussed earlier, the first three terms on the RHS are the standard bound for OGD from \eqref{eq:ogd}. Note that in the standard OGD it holds that $f_t=\Omegaat{f}_t$, and therefore $P_t(f^*)=0$ and the last term disappears. The third term will be bounded by controlling $\|\alpha\|_1$. The last term $P_{t}(f^*)$ is a penalty that results from updates that stir $f_t$ away from the standard update step $\Omegaat{f}_t$. This will indeed happen for the shrinkage step. The next lemma bounds this term. See \apref{sec:proof_ptbound} for proof. \begin{equation}gin{lemma}\label{thm:ptbound} Run \algref{alg:main} with parameters $T$, $B\ge 1$ and $\eta<1/8$. Let $\bar{\nabla}_t$ be the unbiased estimator of $\nabla \ell_t(f_t)$ of the form $\bar{\nabla}_t = (E_t -y_t) \mathcal{X}xtp$. Denote $\Omegaat{f}_t= f_t-\eta \bar{\nabla}_t$ and define $P_t(f^*)$ as in \eqref{eq:ptvv}. Then: \[P_t(f^*)\le 2\exp\left(-\frac{m}{(3\eta t)^2}\right)\] \end{lemma} The following lemma (see \apref{sec:proof_bound_var} for proof) bounds the second and third terms of \eqref{eq:main}. \begin{equation}gin{lemma} \label{lemma:bound_var} Consider the setting as in \lemref{thm:ptbound}. Then $\Var{\bar{\nabla}_t} \le \frac{((16 B+1)\eta t)^2}{m}$ and $\expect{\|{\nabla}_{t}\|^2} \le 2\expect{\ell_t(f_t)}$. \end{lemma} \muaragraph{Proof of \thmref{thm:mainmain}} Combining Lemmas \ref{lem:core}, \ref{thm:ptbound} and \ref{lemma:bound_var} and rearranging we get: \begin{equation}a\label{eq:almost_main} && (1-2\eta)\expect{R_t(f^*)} \le \frac{B^2}{\eta} + 2\eta \sum_{t=1}^T \ell_t(f^*) + \\ && \eta \frac{ ((16B+1)\eta T)^2 T}{m}+ \frac{(B_T+B)^2}{\eta} \sum_{t=1}^TP_t(f^*) \nonumber \end{eqnarray} To bound the second term in \eqref{eq:almost_main} we note that: \begin{equation}gin{equation}\label{eq:ltbound}\min_{\|f^*\|<B} \sum_{t=1}^T \ell_t(f^*)\le \sum_{t=1}^T \ell_t(0)\le T.\end{equation} We next set $\eta$ and $m$ as in the statement of the theorem. Namely: $\eta =\frac{B}{2\sqrt{T}}$, and $m=((16B+1) B)^2T\log \gamma$, where $\gamma = \max\left(\frac{((16B+1)\eta T+B)^2)}{\eta^2},e\right)$. This choice of $m$ implies that $m>((16B+1)\eta T)^2$, and hence the third term in \eqref{eq:almost_main} is upper bounded by $T$. Next we have that $m> (3\eta t)^2 \log \gamma$ for every $t$, and by the bound on $B_T$ we have that $\gamma> \frac{(B+B_T)^2}{\eta^2}$. Taken together with \lemref{thm:ptbound} we have that: \begin{equation}gin{equation}\label{eq:ptogd} \frac{(B_T+B)^2}{\eta} \sum_{t=1}^T P_t(f^*)\le \eta T.\end{equation} The above bounds imply that: \[(1-2\eta)\expect{R_t(f^*)}\le \frac{B^2}{\eta} + 2\eta T +\eta T+\eta T\] Finally by choice of $\eta$, and dividing both sides by $(1-2\eta)$ we obtain the desired result. \section{Experiments \label{sec:exp}} In this section we provide a toy experiment to compare our Shrinking Gradient algorithm to other random feature based methods. In particular, we consider the following three algorithms: {\bf Fixed-Random:} Sample a set of $r$ features $\mathbf{w}_1,\ldots,\mathbf{w}_r$ and evaluate these on all the train and test points. Namely, all $\mathcal{X}x$ points will be evaluated on the same features. This is the standard random features approach proposed in \citet{rahimi2007random,rahimi2009weighted}. {\bf Doubly Stochastic Gradient Descent \cite{dai2014scalable}:} Here each training point $\mathcal{X}x$ {samples} $k$ features $\mathbf{w}_1,\ldots,\mathbf{w}_k$. These features will from that point on be used for evaluating dot products with $\mathcal{X}x$. Thus, different $\mathcal{X}x$ points will use different features. {\bf Shrinking Gradient:} This is the approach proposed here in \secref{sec:results}. Namely, each training point $\mathcal{X}x$ samples $m$ features in order to calculate the dot product with the current regression function. \ignore{ \begin{equation}gin{itemize} \item {\bf Fixed-Random:} Sample a set of $r$ features $\mathbf{w}_1,\ldots,\mathbf{w}_r$ and evaluate these on all the train and test points. Namely, all $\mathcal{X}x$ points will be evaluated on the same features. This is the standard random features approach proposed in \citet{rahimi2007random,rahimi2009weighted}. \item {\bf Doubly Stochastic Gradient Descent \cite{dai2014scalable}:} Here each training point $\mathcal{X}x$ {samples} $k$ features $\mathbf{w}_1,\ldots,\mathbf{w}_k$. These features will from that point on be used for evaluating dot products with $\mathcal{X}x$. Thus, different $\mathcal{X}x$ points will use different features. \item {\bf Shrinking Gradient:} This is the approach proposed here in \secref{sec:results}. Namely, each training point $\mathcal{X}x$ samples $m$ features in order to calculate the dot product with the current regression function. \end{itemize} } In comparing the algorithms we choose $r,k,m$ so that the same overall number of features is calculated. For all methods we explored different initial step sizes and schedules for changing the step size. The key question in comparing the three algorithms is how well they use a given budget of random features. To explore this we perform an experiments to simulate the high dimensional feature case. We consider vectors $\mathcal{X}x\in\mathbb{R}^D$, where a random feature $w$ corresponds to a uniform choice of coordinate $w$ in $\mathcal{X}x$. We work in the regime where $D$ is {\em large} in the sense that $D>T$, where $T$ is the size of the training data. Thus random sampling of $T$ features will not reveal all coordinates of $\mathcal{X}x$. The training set is generated as follows. First, a training set $\mathcal{X}x_1,\ldots,\mathcal{X}x_T\in\mathbb{R}^D$ is sampled from a standard Gaussian. We furthermore clip negative values to zero, in order to make the data sparser and more challenging for feature sampling. Next a weight vector $a\in\mathbb{R}^D$ is chosen as a random sparse linear combination of the training points. This is done in order for the true function to be in the corresponding RKHS. Finally, the training set is labeled using $y_i = a\cdot\mathcal{X}x_i$. \ignore{ \begin{equation}gin{itemize} \item A training set $\mathcal{X}x_1,\ldots,\mathcal{X}x_T\in\mathbb{R}^D$ is sampled from a standard Gaussian. We furthermore clip negative values to zero, in order to make the data sparser and more challenging for feature sampling. \item A weight vector $a\in\mathbb{R}^D$ is chosen as a random sparse linear combination of the training points. This is done in order for the true function to be in the corresponding RKHS. \item The training set is labeled using $y_i = a\cdot\mathcal{X}x_i$. \end{itemize} } During training we do not assume that the algorithms have access to $\mathcal{X}x$. Rather they can uniformly sample coordinates from it, which mimics our setting of random features. For the experiment we take $D=550,600,\ldots,800$ and $T=200$. All algorithms perform one pass over the data, to emulate the online regret setting. The results shown in Figure~\ref{linear_experiments} show that our method indeed achieves a lower loss while working with the same feature budget. \begin{equation}gin{figure}[th] \begin{equation}gin{center} \centerline{\includegraphics[scale=0.4]{new_plot.pdf}} \caption{Comparison of three random feature methods. See \secref{sec:exp} for details.} \label{linear_experiments} \end{center} \end{figure} \section{Discussion} We presented a new online algorithm that employs kernels implicitly but avoids the kernel trick assumption. Namely, the algorithm can be invoked even when one has access to only estimations of the scalar product. The problem was motivated by kernels resulting from neural nets, but it can of course be applied to any scalar product of the form we described. As an example of an interesting extension, consider a setting where a learner can observe an unbiased estimate of a coordinate in a kernel matrix, or alternatively the scalar product between any two observations. Our results imply that in this setting the above rates are applicable, and at least for the square loss, having no access to the true values in the kernel matrix is not necessarily prohibitive during training. The results show that with sample size $T$ we can achieve error of $O(\frac{B}{\sqrt{T}})$. As demonstrated in \citet{shamir2014sample} these rates are optimal, even when the scalar product is computable. To achieve this rate our algorithm needs to perform $\tilde{O}(B^4 T^2)$ scalar product estimations. When the scalar product can be computed, existing kernelized algorithms need to observe a fixed proportion of the kernel matrix, hence they observe order of $\Omega(T^2)$ scalar products. In \citet{cesa2015complexity} it was shown that when the scalar product can be computed exactly, one would need access to at least $\Omega(T)$ entries to the kernel matrix. It is still an open problem whether one has to access $\Omega(T^2)$ entries when the kernel can be computed exactly. However, as we show here, for fixed $B$ even if the kernel can only be estimated $\tilde{O}(T^2)$ estimations are enough. It would be interesting to further investigate and improve the performance of our algorithm in terms of the norm bound $B$. \ignore{ Another point to consider in future research is the scalability of the predictor at test time. We presented a training algorithm that is comparable with standard kernel methods. Our output predictor can be efficiently estimated with $\epsilon$ accuracy and requires $O(\frac{T}{\epsilon^2})$ generated features. Squared loss is often used as a convex surrogate for $0-1$ loss, and for binary classification constant $\epsilon_0$ is enough under appropriate assumptions. It is still interesting to find out if methods such as presented in \cite{rahimi2007random,dai2014scalable} may be used to scale down the representation of the predictor.} To summarize, we have shown that the {\em kernel trick} is not strictly necessary in terms of sample complexity. Instead, simply sampling random features via our proposed algorithm results in a similar sample complexity. Recent empirical results by \citet{zhang2016understanding} show that using a large number of random features and regression comes close to the performance of the first successful multilayer CNNs \cite{krizhevsky2012imagenet} on CIFAR-10. Although deep learning architectures still substantially outperform random features, it is conceivable that with the right choice of random features, and scalable learning algorithms like we present here, considerable improvement in performance is possible. \ignore{ \section{Estimation Concentration Bounds} In this section we provide concentration bounds for the estimation procedure in \algref{alg:spsub}. \begin{equation}gin{lemma}\label{lem:spsub} Run \algref{alg:spsub} with $\alpha$ and, $\{\mathcal{X}x_i\}_{i=1}^T$, $\mathcal{X}x$, and $m$. Let $f=\sum \alpha_i \mathcal{X}xip$. Assume that $|\action{\mathcal{X}x}{\mathbf{w}}|<1$ for all $\mathbf{w}$ and $\mathcal{X}x$. Let $E$ be the output of \algref{alg:spsub}. Then $E$ is an unbiased estimator for $\left<f,\mathcal{X}xp\right>$ and: \begin{equation} \murob{ |E- \langlef,\mathcal{X}xp\rangle| > \epsilon} \le \exp\left(-\frac{m\epsilon^2}{\|\alpha\|_1^2}\right) \end{equation} \end{lemma} \begin{equation}gin{proof} Consider the random variables $\|\alpha\|_1 E^{(k)}$ (where $E^{(k)}$ is as defined in \algref{alg:spsub}) and note that they are IID. One can show that $\expect{\|\alpha\|_1 E^{(k)}}=\sum \alpha_i\expect{\action{\mathcal{X}x_i}{\mathbf{w}}\action{\mathcal{X}x}{\mathbf{w}}} = \langlef,\mathcal{X}xp\rangle$. By the bound on $\action{\mathcal{X}x}{\mathbf{w}}$ we have that $\left|\|\alpha\|_1E^{(k)}\right| < \|\alpha\|_1$ with probability $1$. Since $E=\frac{1}{m} \sum E^{(k)}$ the result follows directly from Hoeffding's inequality. \end{proof} Next, we bound the $\alpha^{(t)}$ coeffcients and obtain a concentration bound for the estimated dot product $E_t$. \begin{equation}gin{lemma}\label{lem:estimation} The $\alpha^{(t)}$ obtained in \algref{alg:main} satisfies: \[\|\alpha^{(t)}\|_1 \le (16B+1)\eta t .\] As a corollary of this and Lemma \ref{lem:spsub} we have that the function $f_t$ satisfies: \begin{equation} \murob{ |E_t- \langlef_t,\mathcal{X}xtp\rangle| > \epsilon} \le \exp\left(-\frac{\epsilon^2 m}{((16B+1) \eta t)^2}\right) \end{equation} \end{lemma} \begin{equation}gin{proof} We prove the statement by induction. We separate into two cases, depending on whether the shrinkage step was performed or not. If $|E_t|\geq 16B$ the algorithm sets $\alpha^{(t+1)}=\frac{1}{4} \alpha^{(t)}$, and: \[\|\alpha^{(t+1)}\|_1=\frac{1}{4}\|\alpha^{(t)}\|_1 \le (16 B+1)\eta (t+1)\] If $|E_t|< 16B$ the gradient update is performed. Since $|y_t|\leq 1$ we have that $|E_t-y_t |<16B +1$ and: \[ \|\alpha^{(t+1)}\|_1 \le \|\alpha^{(t)}\|_1 +\eta |E_t-y_i |\le (16 B+1)\eta (t+1).\] \end{proof} \input{app_b} \subsection{Proof of Lemma \ref{lemma:bound_var} \label{sec:proof_bound_var}} \ignore{ We begin by deriving \corref{cor:standardTerms} that bounds the first two terms in the regret bound. As discussed, this section follows standard techniques. We begin with an upper bound on $\mathbb{E}(\|\bar{\nabla}_t\|^2)$. \begin{equation}gin{lemma} Consider the setting as in \lemref{thm:ptbound}. Then \[\Var{\bar{\nabla}_t} \le \frac{((16 B+1)\eta t)^2}{m},\] and, \[\expect{\|{\nabla}_{t}\|^2} \le 2\expect{\ell_t(f_t)}.\] \end{lemma} \begin{equation}gin{proof} } Begin by noting that since $\|\fwx{\mathcal{X}x}\|<1$, it follows from the definitions of $\nabla,\bar{\nabla}$ that $\Var{\bar{\nabla}_{t}}=\expect{\|\bar{\nabla}_{t}-\nabla_{t}\|^2} $ and therefore \[ \Var{\bar{\nabla}_{t}} \le \expect{\left(E_t-\langlef_t,\mathcal{X}xtp\rangle\right)^2}=\Var{E_t} \] By construction (see \algref{alg:spsub}) we have that: \[\Var{E_t}= \frac{1}{m}\Var{\|\alpha^{(t)}\|_1^2\action{\mathcal{X}x_i}{\mathbf{w}}{\action{\mathcal{X}x_t}{\mathbf{w}}}}\] where the index $i$ is sampled as in \algref{alg:spsub}, and $\action{\mathcal{X}x_i}{\mathbf{w}}{\action{\mathcal{X}x_t}{\mathbf{w}}}$ is bounded by $1$. By \lemref{lem:estimation} we have that \[\Var{E_t} \le \frac{((16B+1)\eta t)^2}{m}.\] This provides the required bound on $\Var{\bar{\nabla}_{t}}$. Additionally, we have that \[ \|\nabla_t\|^2= (\scalar{f_t}{\fwx{\mathcal{X}x_t}}-y_t)^2\|\fwx{\mathcal{X}x_t}\|^2 \le 2\ell_t(f_t) \] and the result follows by taking expectation. \ignore{ We thus have the following corollary that bounds the first three terms in \eqref{eq:main}: \begin{equation}gin{corollary}\label{cor:standardTerms} With the notations and setting of \lemref{thm:ptbound} we have: \begin{equation}a \frac{B^2}{\eta} + \eta\sum_{t=1}^T \expect{\|{\nabla}_{t}\|^2} +\eta\sum_{t=1}^T \Var{\bar{\nabla}_{t}} \le && 2\eta \expect{\sum_{t=1}^T \ell_t(f_t)-\ell_t(f^*)} +\frac{B^2}{\eta} \\ && +2\eta \sum_{t=1}^T \ell_t(f^*)+ \eta \sum_{t=1}^T \frac{((16B+1)\eta T)^2}{m} \nonumber \end{eqnarray} \end{corollary} } \ignore{ \subsection{Proof of \thmref{thm:mainmain}} Recall that Lemma \ref{lem:core} and Corollary \ref{cor:standardTerms} assume an upper bound $B_T$ on $\norm{f_t}$. We begin by noting that $B_T$ can be bounded as follows, using \lemref{lem:estimation}: \begin{equation} B_T = \max_{t} \|f_t\| \le \max_t \|\alpha^{(t)}\|_1\le (16 B+1 ) \eta T. \end{equation} Plugging \corref{cor:standardTerms} into \eqref{eq:main} we obtain: \begin{equation}\label{eq:almost} (1-2\eta)\expect{\sum_{t=1}^T \ell_t(f_t)-\ell_t(f^*)} \le \frac{B^2}{\eta} + 2\eta \sum_{t=1}^T \ell_t(f^*) +\eta\sum_{t=1}^T \frac{ ((16B+1)\eta T)^2}{m}+ \frac{(B_T+B)^2}{\eta} \sum_{t=1}^TP_t(f^*) \end{equation} To bound the second term we note that: \begin{equation}gin{equation}\label{eq:ltbound}\min_{\|f^*\|<B} \sum_{t=1}^T \ell_t(f^*)\le \sum_{t=1}^T \ell_t(0)\le T.\end{equation} We next set $\eta$ and $m$ as in the statement of the theorem. Namely: $\eta =\frac{B}{2\sqrt{T}}$, and $m=((16B+1) B)^2T\log \gamma$, where $\gamma = \max\left(\frac{((16B+1)\eta T+B)^2)}{\eta^2},e\right)$. Our choice of $m$ implies that $m>((16B+1)\eta T)^2$, and hence the third term in \eqref{eq:almost} is bounded as follows: \begin{equation}gin{equation}\label{eq:mbound}\eta \sum_{t=1}^T \frac{((16B+1)\eta T)^2}{m} \le \eta T\end{equation} Next we have that $m> (3\eta t)^2 \log \gamma$ for every $t$, and by the bound on $B_T$ we have that $\gamma> \frac{(B+B_T)^2}{\eta^2}$. Taken together with \lemref{thm:ptbound} we have that: \begin{equation}gin{equation}\label{eq:ptogd} \frac{(B_T+B)^2}{\eta} \sum_{t=1}^T P_t(f^*)\le \eta T.\end{equation} \[(1-2\eta)\expect{\sum_{t=1}^T \ell_t(f_t)-\ell_t(f^*)}\le \frac{B^2}{\eta} + 2\eta T +\eta T+\eta T\] Finally by choice of $\eta$, and dividing both sides by $(1-2\eta)$ we obtain the desired result. It remains to show that we can estimate each $f_t$ in the desired complexity (the result for the averaged $f$ is the same). Each $f_t$ has the form $f_t= \sum_{t=1}^T \alpha_i^{(t)}\mathcal{X}x_i$, By \lemref{lem:spsub} and \lemref{lem:estimation}, running \algref{alg:spsub} $m$ iterations will lead to a random variable $E$ such that: \[\murob{|E-\left<f_t,\mathcal{X}x\right>|}\le \exp\left(-\frac{\epsilon^2 m}{((16B+1)B\sqrt{T})^2}\right).\] We obtain that order of $O(\frac{B^4 T}{\epsilon^2}\log 1/\delta)$ estimations are enough. } \textbf{Acknowledgements} The authors would like to thank Tomer Koren for helpful discussions. Roi Livni was supported by funding from Eric and Wendy Schmidt Fund for Strategic Innovation. This work was supported by the Blavatnik Computer Science Research Fund, the Intel Collaborative Research Institute for Computational Intelligence (ICRI-CI), and an ISF Centers of Excellence grant. \small \normalsize \end{document}
\mathbf Egin{document} \title{An approach to nonsolvable base change and descent} \author{Jayce R.~ Getz} \mathrm{ad}dress{Department of Mathematics and Statistics\\ McGill University\\ Montreal, QC, H3A 2K6} \email{[email protected]} \mathbb{S}ubjclass[2000]{Primary 11F70} \mathbf Egin{abstract} We present a collection of conjectural trace identities and explain why they are equivalent to base change and descent of automorphic representations of $\mathrm{GL}_n(\mathbb{A}_F)$ along nonsolvable extensions (under some simplifying hypotheses). The case $n=2$ is treated in more detail and applications towards the Artin conjecture for icosahedral Galois representations are given. \end{abstract} \maketitle \tableofcontents \mathbb{S}ection{Introduction} Let $F$ be a number field and let $v$ be a nonarchimedian place of $F$. By the local Langlands correspondence, now a theorem due to Harris and Taylor building on work of Henniart, there is a bijection \mathbf Egin{align} \label{loc-Langl} \left(\varphi_v:W_{F_v}' \to \mathrm{GL}_n(\textf{C}C)\right) \longmapsto {\sf p}i(\varphi_v) \end{align} between equivalence classes of Frobenius semisimple representations $\varphi_v$ of the local Weil-Deligne group $W_{F_v}'$ and isomorphism classes of irreducible admissible representations of $\mathrm{GL}_n(F_v)$. The bijection is characterized uniquely by certain compatibilities involving $\varepsilon$-factors and $L$-functions which are stated precisely in \cite{HT} (see also \cite{PreuveHenn}). The corresponding statement for $v$ archimedian was proven some time ago by Langlands \cite{LanglandsArch}. We write $\varphi_v({\sf p}i_v)$ for any representation attached to ${\sf p}i_v$ and call it the \textbf{Langlands parameter} or \textbf{$L$-parameter} of ${\sf p}i_v$; it is unique up to equivalence of representations. Now let $E/F$ be an extension of number fields, let $v$ be a place of $F$ and let $w|v$ be a place of $E$. We say that an admissible irreducible representation $\Pi_w$ of $\mathrm{GL}_n(E_w)$ is a \textbf{base change} of ${\sf p}i_v$ and write ${\sf p}i_{vEw}:=\Pi_w$ if $$ \varphi({\sf p}i_v)|_{W_{E_w}'}\cong \varphi(\Pi_w). $$ In this case we also say that $\Pi_w$ \textbf{descends} to ${\sf p}i_v$. We say that an isobaric\footnote{For generalities on isobaric automorphic representations see \cite{LanglEinM} and \cite{JSII}.} automorphic representation $\Pi$ of $\mathrm{GL}_n(\mathbb{A}_E)$ is a \textbf{base change} (resp.~\textbf{weak base change}) of an isobaric automorphic representation ${\sf p}i$ of $\mathrm{GL}_n(\mathbb{A}_F)$ if $\Pi_w={\sf p}i_{vE}$ for all (resp.~almost all) places $v$ of $F$ and all places $w|v$ of $E$. If $\Pi$ is a (weak) base change of ${\sf p}i$, then we also say $\Pi$ descends (weakly) to ${\sf p}i$. We write ${\sf p}i_E$ for a weak base change of ${\sf p}i$, if it exists; it is uniquely determined up to isomorphism by the strong multiplicity one theorem \cite[Theorem 4.4]{JSII}. If $\Pi$ is a weak base change of ${\sf p}i$, we say that the base change is compatible at a place $v$ of $F$ if $\Pi_w$ is a base change of ${\sf p}i_v$ for all $w|v$. If $E/F$ is a prime degree cyclic extension, then the work of Langlands \cite{Langlands} for $n=2$ and Arthur-Clozel \cite{AC} for $n$ arbitrary implies that a base change always exists. The fibers and image of the base change are also described in these works. Given that any finite degree Galois extension $E/F$ contains a family of subextensions $E=E_0 \mathfrak{g}eq E_1 \mathfrak{g}eq \cdots \mathfrak{g}eq E_n=F$ where $E_i/E_{i+1}$ is Galois with simple Galois group, to complete the theory of base change it is necessary to understand base change and descent with respect to Galois extensions with simple nonabelian Galois group. In this paper we introduce a family of conjectural trace identities that are essentially equivalent to proving base change and descent in this setting. The (conjectural) trace identity is based on combining two fundamental paradigms pioneered by Langlands, the second still in its infancy: \mathbf Egin{itemize} \item Comparison of trace formulae, and \item Beyond endoscopy. \end{itemize} The point of this paper is to provide some evidence that proving the conjectural trace identities unconditionally is a viable strategy for proving nonsolvable base change. \mathbb{S}ubsection{Test functions} Fix an integer $n \mathfrak{g}eq 1$, a Galois extension of number fields $E/F$, an automorphism $\tau \in \mathrm{Gal}(E/F)$, and a set of places $S_0$ of $E$ containing the infinite places and the places where $E/F$ is ramified. Let $w \not \in S_0$ and let $v$ be the place of $F$ below $w$. Let $$ A=\mathbf Egin{pmatrix} t_{1w} & & \\ & \mathfrak{d}ots & \\ & & t_{nw}\end{pmatrix}{\sf q}uad \textrm{ and } {\sf q}uad A^{\tau}=\mathbf Egin{pmatrix} t_{1w^{\tau}} & & \\ & \mathfrak{d}ots & \\& & t_{nw^{\tau}}\end{pmatrix}. $$ We view these as matrices in ${\sf p}rod_{w|v} \mathrm{GL}_n(\textf{C}C[t_{1w}^{{\sf p}m 1},\cdots,t_{nw}^{{\sf p}m 1}])$ For $j \in \mathbb{Z}_{> 0}$ let $\mathrm{Sym}^j:\mathrm{GL}_n \to \mathrm{GL}_{\binom{n+j-1}{j}}$ be the $j$th symmetric power representation, where $\binom{m}{j}$ is the $m$-choose-$j$ binomial coefficient. For a prime power ideal $\varpi_w^j$ of $\mathcal{O}_{E}$ define a test function \mathbf Egin{align} f(\varpi_w^j):=\mathcal{S}^{-1}(\mathrm{tr}(\mathrm{Sym}^j(A \otimes (A^{\tau})^{-1}))) \in C_c^{\infty}(\mathrm{GL}_n(E \otimes_F F_v)//\mathrm{GL}_n(\mathcal{O}_{E} \otimes_{\mathcal{O}_F}\mathcal{O}_{F_v})) \end{align} where $\mathcal{S}$ is the Satake isomorphism (see \S \ref{ssec-uha}). We denote by $f(\mathcal{O}_{E_w})$ the characteristic function of $\mathrm{GL}_n(\mathcal{O}_E \otimes_{\mathcal{O}_F} \mathcal{O}_{F_v})$ and regard the $f(\varpi_w^j)$ as elements of $C_c^{\infty}(\mathrm{GL}_n(\mathbb{A}_E^{\infty})//\mathrm{GL}_n(\widehat{\mathcal{O}}_E))$. Define $f(\mathfrak{n}) \in C_c^{\infty}(\mathrm{GL}_{n}(\mathbb{A}_{E}^{\infty})//\mathrm{GL}_{n}(\widehat{\mathcal{O}}_E))$ in general by declaring that $f$ is multiplicative, that is, if $\mathfrak{n}+\mathfrak{m}=\mathcal{O}_E$ we set $$ f(\mathfrak{n}\mathfrak{m}):=f(\mathfrak{n})*f(\mathfrak{m}) $$ where the asterisk denotes convolution in the Hecke algebra. If $\mathfrak{n}$ is coprime to $S_0$, we often view $f(\mathfrak{n})$ as an element of $C_c^{\infty}(\mathrm{GL}_n(\mathbb{A}_E^{S_0})//\mathrm{GL}_n(\widehat{\mathcal{O}}_E^{S_0}))$. Assume that $\Pi$ is an isobaric automorphic representation of $\mathrm{GL}_n(\mathbb{A}_E)$ unramified outside of $S_0$. Define $\Pi^{\tau}$ by $\Pi^{\tau}(g):=\Pi(g^{\tau})$. The purpose of defining the operators $f(\mathfrak{m})$ is the following equality: $$ \mathbb{S}um_{\mathfrak{m} \mathbb{S}ubset \mathcal{O}_{E}^{S_0}} \frac{\mathrm{tr}(\Pi^{S_0})(f(\mathfrak{m}))}{|\mathrm{N}_{F/\mathbb{Q}}(\mathfrak{m})|^{s}}=L^{S_0}(s,\Pi \times \Pi^{\tau}) $$ This follows from \eqref{RS-descr} and the fact that, in the notation of loc.~cit., $A(\Pi^{\tau}_w)=A(\Pi_{w^{\tau}})$. Let ${\sf p}hi \in C_c^{\infty}(0,\infty)$ be nonnegative. Thus $\widetilde{{\sf p}hi}(1) >0$, where $$ \widetilde{{\sf p}hi}(s):=\int_{0}^{\infty}{\sf p}hi(s)x^{s-1}dx $$ is the Mellin transform of ${\sf p}hi$. We introduce the following test function, a modification of that considered by Sarnak in \cite{Sarnak}: \mathbf Egin{align} \label{Sig-func} \Sigma^{S_0}(X):=\Sigma_{{\sf p}hi}^{S_0}(X):=\mathbb{S}um_{\mathfrak{m} \mathbb{S}ubset \mathcal{O}_E^{S_0}} {\sf p}hi(X/|\mathrm{N}_{E/\mathbb{Q}}(\mathfrak{m})|)f(\mathfrak{m}). \end{align} \mathbb{S}ubsection{Conjectural trace identities} Assume that $E/F$ is a Galois extension. For convenience, let \mathbf Egin{align} \Pi_n(F):&=\{\textrm{Isom.~classes of isobaric automorphic representations of }\mathrm{GL}_n(\mathbb{A}_F)\}\\ \Pi_n^0(F):&=\{\textrm{Isom.~classes of cuspidal automorphic representations of }\mathrm{GL}_n(\mathbb{A}_F)\} \nonumber \\ \Pi_n^{\mathrm{prim}}(E/F):&=\{ \textrm{Isom.~classes of $E$-primitive automorphic representations of }\mathrm{GL}_n(\mathbb{A}_F)\}. \nonumber \end{align} The formal definition of an $E$-primitive automorphic representation is postponed until \S \ref{ssec-primitive}. If we knew Langlands functoriality we could characterize them easily as those representations that are cuspidal and not automorphically induced from an automorphic representation of a subfield $E \mathfrak{g}eq K > F$. We note that there is a natural action of $\mathrm{Gal}(E/F)$ on $\Pi_n(E)$ that preserves $\Pi_n^0(E)$; we write $\Pi_n(E)^{\mathrm{Gal}(E/F)}$ for those representations that are isomorphic to their $\mathrm{Gal}(E/F)$-conjugates and $\Pi_n^0(E)^{\mathrm{Gal}(E/F)}=\Pi_n^0(E) \cap \Pi_n(E)^{\mathrm{Gal}(E/F)}$. Let $S$ be a finite set of places of $F$ including all infinite places and let $S'$, $S_0$ be the set of places of $F'$, $E$ lying above $S$. Assume that $h \in C_c^{\infty}(\mathrm{GL}_n(\mathbb{A}_{F'}))$, $\Phi \in C_c^{\infty}(\mathrm{GL}_n(\mathbb{A}_F))$ are transfers of each other in the sense of \S \ref{ssec-transfers} below and that they are unramified outside of $S'$ and $S$, that is, invariant under right and left multiplication by $\mathrm{GL}_n(\widehat{\mathcal{O}}_{F'}^{S'})$ and $\mathrm{GL}_n(\widehat{\mathcal{O}}_F^S)$, respectively. For the purposes of the following theorems, if $G$ is a finite group let $G^{\mathrm{ab}}$ be the maximal abelian quotient of $G$. Assume for the remainder of this subsection that $\mathrm{Gal}(E/F)$ is the universal perfect central extension of a finite simple nonabelian group. Let $E \mathfrak{g}eq F' \mathfrak{g}eq F$ be a subfield such that $\mathrm{Gal}(E/F')$ is solvable and $H^2(\mathrm{Gal}(E/F'),\textf{C}C^{\times})=0$. Moreover let $\tau \in \mathrm{Gal}(E/F)$ be an element such that $$ \mathrm{Gal}(E/F)=\langle \tau,\mathrm{Gal}(E/F')\rangle. $$ \mathbf Egin{rem} In \S \ref{ssec-upce} we discuss these assumptions, the upshot being that they are no real loss of generality. \end{rem} Our first main theorem is the following: \mathbf Egin{thm} \label{main-thm-1} Consider the following hypotheses: \mathbf Egin{itemize} \item $\mathrm{Gal}(E/F')$ is solvable, $H^2(\mathrm{Gal}(E/F'),\textf{C}C^{\times})=0$ and $[E:F']$ is coprime to $n$. \item For all divisors $m|n$ there is no irreducible nontrivial representation $$ \mathrm{Gal}(E/F) \longrightarrow \mathrm{GL}_m(\textf{C}C), $$ \item The case of Langlands functoriality explicated in Conjectures \ref{conj-1} below is true for $E/F$, and \item The case of Langlands functoriality explicated in Conjecture \ref{conj-solv} is true for $E/F'$. \end{itemize} If these hypotheses are valid and $h$ and $\Phi$ are transfers of each other then the limits \mathbf Egin{align} \label{11} \lim_{X \to \infty}|\mathrm{Gal}(E/F')^{\mathrm{ab}}|^{-1}X^{-1} \mathbb{S}um_{{\sf p}i' \textrm{ $E$-primitive}} \mathrm{tr}({\sf p}i')(h^1b_{E/F'}(\Sigma_{{\sf p}hi}^{S_0}(X))) \end{align} and \mathbf Egin{align} \label{12} \lim_{X \to \infty} X^{-1}\mathbb{S}um_{{\sf p}i} \mathrm{tr}({\sf p}i)( \Phi^1b_{E/F}(\Sigma_{{\sf p}hi}^{S_0}(X))) \end{align} converge absolutely and are equal. Here the first sum is over a set of representatives for the equivalence classes of $E$-primitive cuspidal automorphic representations of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})$ and the second sum is over a set of representatives for the equivalence classes of cuspidal automorphic representations of $A_{\mathrm{GL}_{nF}} \backslash \mathrm{GL}_n(\mathbb{A}_{F})$. \end{thm} Here \mathbf Egin{align*} h^1(g):&=\int_{A_{\mathrm{GL}_{nF'}}}h(ag)da'\\ \Phi^1:&=\int_{A_{\mathrm{GL}_{nF}}}\Phi(ag)da \end{align*} where the $da'$ and $da$ are the Haar measures on $A_{\mathrm{GL}_{nF'}}$ and $A_{\mathrm{GL}_{nF}}$, respectively, used in the definition of the transfer. For the definition of $A_{\mathrm{GL}_{nF'}}$ and $A_{\mathrm{GL}_{nF}}$ we refer to \S \ref{HC-subgroup} and for the definition of $b_{E/F}$ and $b_{E/F'}$ we refer to \eqref{bEF}. \mathbf Egin{remarks} \item If we fix a positive integer $n$, then for all but finitely many finite simple groups $G$ with universal perfect central extensions $\widetilde{G}$ any representation $\widetilde{G} \to \mathrm{GL}_{n}$ will be trivial. This follows from \cite[Theorem 1.1]{LS}, for example (the author does not known if it was known earlier). Thus the second hypothesis in Theorem \ref{main-thm-1} holds for almost all groups (if we fix $n$). In particular, if $n=2$, then the only finite simple nonabelian group admitting a projective representation of degree $2$ is $A_5$ by a well-known theorem of Klein. Thus when $n=2$ and $\mathrm{Gal}(E/F)$ is the universal perfect central extension of a finite simple group other than $A_5$ the first hypothesis of Theorem \ref{main-thm-1} holds. \item Conjecture \ref{conj-1} and its analogues conjectures \ref{conj-2}, \ref{conj-32} and \ref{conj-33} below each amount to a statement that certain (conjectural) functorial transfers of automorphic representations exist and have certain properties. To motivate these conjectures, we state and prove the properties of $L$-parameters to which they correspond in propositions \ref{prop-bij-EF'}, \ref{prop-A5-EF} and lemmas \ref{lem-A5-EF}, \ref{lem-A5-EF3} respectively. The facts about $L$-parameters we use are not terribly difficult to prove given basic facts from finite group theory, but they are neither obvious nor well-known, and one of the motivations for this paper is to record them. \item Conjecture \ref{conj-solv} is a conjecture characterizing the image and fibers of solvable base change. Rajan \cite{Rajan3} has explained how it can be proved (in principle) using a method of Lapid and Rogawski \cite{LR} together with the work of Arthur and Clozel \cite{AC}. It is a theorem when $n=2$ \cite[Theorem 1]{Rajan3} or when $\mathrm{Gal}(E/F)$ is cyclic of prime degree \cite[Chapter 3, Theorems 4.2 and 5.1]{AC}. \end{remarks} The following weak converse of Theorem \ref{main-thm-1} is true: \mathbf Egin{thm} \label{main-thm-1-conv} Assume Conjecture \ref{conj-transf} on transfers of test functions and assume that $F$ is totally complex. If \eqref{11} and \eqref{12} converge absolutely and are equal for all $h$ unramified outside of $S'$ with transfer $\Phi$ unramified outside of $S$, then every cuspidal automorphic representation $\Pi$ of $\mathrm{GL}_n(\mathbb{A}_E)$ satisfying $\Pi^{\mathbb{S}igma} \cong \Pi$ for all $ \mathbb{S}igma \in \mathrm{Gal}(E/F)$ admits a unique weak descent to $\mathrm{GL}_n(\mathbb{A}_F)$. Conversely, if ${\sf p}i$ is a cuspidal automorphic representation of $\mathrm{GL}_n(\mathbb{A}_F)$ such that \mathbf Egin{align} \label{non-zero-Sig} \lim_{X \to \infty}X^{-1}\mathrm{tr}({\sf p}i)(b_{E/F}(\Sigma_{{\sf p}hi}^{S_0}(X))) \neq 0 \end{align} then ${\sf p}i$ admits a weak base change to $\mathrm{GL}_n(\mathbb{A}_E)$. The weak base change of a given cuspidal automorphic representation ${\sf p}i$ of $\mathrm{GL}_n(\mathbb{A}_F)$ is unique. The base change is compatible for the infinite places of $F$ and the finite places $v$ of $F$ where $E/F$ and ${\sf p}i_v$ are unramified or where ${\sf p}i_v$ is a twist of the Steinberg representation by a quasi-character. \end{thm} Here, as usual, $\Pi^{\mathbb{S}igma}:=\Pi \circ \mathbb{S}igma$ is the representation acting on the space of $\Pi$ via $$ \Pi^{\mathbb{S}igma}(g):=\Pi(\mathbb{S}igma(g)). $$ \mathbf Egin{rem} Conjecture \ref{conj-transf} is roughly the statement that there are ``enough'' $h$ and $\Phi$ that are transfers of each other. If one assumes that $\Pi$ (resp.~${\sf p}i$) and $E/F$ are everywhere unramified, then one can drop the assumption that Conjecture \ref{conj-transf} is valid. \end{rem} We conjecture that \eqref{non-zero-Sig} is always nonzero: \mathbf Egin{conj} \label{conj-nonzero} Let ${\sf p}i$ be a cuspidal automorphic representation of $A_{\mathrm{GL}_{nF}} \backslash \mathrm{GL}_n(\mathbb{A}_F)$; then \mathbf Egin{align*} \lim_{X \to \infty}X^{-1}\mathrm{tr}({\sf p}i)(b_{E/F}(\Sigma_{{\sf p}hi}^{S_0}(X))) \neq 0 \end{align*} \end{conj} This conjecture is true for all ${\sf p}i$ that admit a base change to an isobaric automorphic representation of $\mathrm{GL}_n(\mathbb{A}_E)$ by an application of Rankin-Selberg theory (compare Proposition \ref{Perron-prop} and \eqref{ord-pole}), however, assuming this would be somewhat circular for our purposes. The author is hopeful that Conjecture \ref{conj-nonzero} can be proven independently of the existence of the base change. Indeed, the Chebatarev density theorem is proven despite the fact that the Artin conjecture is still a conjecture. The smoothed sum in Conjecture \ref{conj-nonzero} is analogous to some sums that can be evaluated using the Chebatarev density theorem; in some sense the Chebatarev density theorem is the case where ${\sf p}i$ is the trivial representation of $\mathrm{GL}_1(\mathbb{A}_F)$. Isolating primitive representations is not a trivial task. For example, the main focus of \cite{Venk} is the isolation of cuspidal representations that are not primitive when $n=2$. Therefore it seems desirable to have a trace identity similar to that of Theorem \ref{main-thm-1} that involves sums over all cuspidal representations. This is readily accomplished under additional assumptions on $\mathrm{Gal}(E/F)$ using the following lemma: \mathbf Egin{lem} \label{lem-prim}Let $L/K$ be a Galois extension of number fields. Suppose that there is no proper subgroup $H \leq \mathrm{Gal}(L/K)$ such that $[\mathrm{Gal}(L/K):H]|n$. Then $$ \Pi_n^{\mathrm{prim}}(L/K)=\Pi_n^0(K). $${\sf q}ed \end{lem} The proof is immediate from the definition of $L$-primitive automorphic representations in \S \ref{ssec-primitive}. \mathbb{S}ubsection{Icosahedral extensions} We now consider the case of the smallest simple nonabelian group $A_5$ in more detail. We begin by setting notation for specific subsets of $\Pi^0_n(F)$ and $\Pi^0_n(E)$. Let $E/F$ be a Galois extension, and let $$ \rho:W_F' \longrightarrow {}^L\mathrm{GL}_{nF} $$ be an $L$-parameter trivial on $W_E'$; thus $\rho$ can essentially be identified with the Galois representation $\rho_0:\mathrm{Gal}(E/F) \to \mathrm{GL}_{n}(\textf{C}C)$ obtained by composing $\rho$ with the projection ${}^L \mathrm{GL}_{nF} \to \mathrm{GL}_n(\textf{C}C)$. For every quasi-character $\chi:F^{\times} \backslash \mathbb{A}_F^{\times} \cong (W_{F}')^{\mathrm{ab}} \to \textf{C}C^{\times} $ we can then form the $L$-parameter $$ \rho \otimes \chi:W_{F}' \longrightarrow \mathrm{GL}_n(\textf{C}C). $$ We say that a cuspidal automorphic representation ${\sf p}i$ of $\mathrm{GL}_n(\mathbb{A}_F)$ is \textbf{associated} to $\rho \otimes \chi$ if ${\sf p}i_v$ is the representation attached to the $L$-parameter $(\rho \otimes \chi)_v$: $$ {\sf p}i_v={\sf p}i((\rho \otimes \chi)_v) $$ for almost all places $v$ of $F$ (see \eqref{loc-Langl} above). If ${\sf p}i_v={\sf p}i((\rho \otimes \chi)_v)$ for all places $v$, then we write ${\sf p}i={\sf p}i(\rho \otimes \chi)$. In this case we also say that ${\sf p}i$ and $\rho \otimes \chi$ are \textbf{strongly associated}. More generally, if ${\sf p}i$ is a cuspidal automorphic representation of $\mathrm{GL}_n(\mathbb{A}_F)$ such that ${\sf p}i$ is associated to $\rho \otimes \chi$ for some $\chi$ we say that ${\sf p}i$ is of \textbf{$\rho$-type}. If ${\sf p}i$ is associated to $\rho \otimes \chi$ for some $\rho$ and $\chi$ we say that ${\sf p}i$ is of \textbf{Galois type}. Assume for the remainder of this section that $\mathrm{Gal}(E/F) \cong \widetilde{A}_5$, the universal perfect central extension of the alternating group $A_5$ on $5$ letters. One can formulate analogues of theorems \ref{main-thm-1} and \ref{main-thm-1-conv} in this setting. For this purpose, fix an embedding $A_4 \hookrightarrow A_5$, and let $\widetilde{A}_4 \leq \widetilde{A}_5$ be the preimage of $A_4$ under the surjection $ \widetilde{A}_5 \to A_5$. Thus $\widetilde{A}_4$ is a nonsplit double cover of $A_4$. \mathbf Egin{thm} \label{main-thm-2} Let $n=2$, let $F' = E^{\widetilde{A}_4}$, and let $\tau \in \mathrm{Gal}(E/F)$ be any element of order $5$. Let $h \in C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_{F'}))$ be unramified outside of $S'$ and have transfer $\Phi \in C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_F))$ unramified outside of $S$. Assume the case of Langlands functoriality explicated in Conjecture \ref{conj-2} for $E/F$. Then the limits \mathbf Egin{align} \label{A21} 2\lim_{X \to \infty}\left(\frac{d^{3}}{ds^{3}}(\widetilde{{\sf p}hi}(s)X^s)|_{s=1}\right)^{-1}|\mathrm{Gal}(E/F')^{\mathrm{ab}}|^{-1} \mathbb{S}um_{{\sf p}i'} \mathrm{tr}({\sf p}i')(h^1b_{E/F'}(\Sigma_{{\sf p}hi}^{S_0}(X))) \end{align} and \mathbf Egin{align} \label{A22} \lim_{X \to \infty} \left(\frac{d^{3}}{ds^{3}}(\widetilde{{\sf p}hi}(s)X^s)|_{s=1}\right)^{-1} \mathbb{S}um_{{\sf p}i } \mathrm{tr}({\sf p}i)(\Phi^1b_{E/F}(\Sigma_{{\sf p}hi}^{S_0}(X))) \end{align} converge absolutely and are equal. Similarly, again assuming Conjecture \ref{conj-2} below, the limits \mathbf Egin{align} \label{B21} \lim_{X \to \infty}X^{-1}|\mathrm{Gal}(E/F')^{\mathrm{ab}}|^{-1} \mathbb{S}um_{\mathbb{S}ubstack{{\sf p}i' \textrm{ not of $\rho$-type for $\rho$ trivial on $W_E'$}}} \mathrm{tr}({\sf p}i')(h^1b_{E/F'}(\Sigma_{{\sf p}hi}^{S_0}(X))) \end{align} and \mathbf Egin{align} \label{B22} \lim_{X \to \infty} X^{-1} \mathbb{S}um_{\mathbb{S}ubstack{{\sf p}i \textrm{ not of $\rho$-type for $\rho$ trivial on $W_E'$}}} \mathrm{tr}({\sf p}i)(\Phi^1b_{E/F}(\Sigma^{S_0}_{{\sf p}hi}(X))) \end{align} converge absolutely and are equal. In both cases the first sum is over a set of representatives for the equivalence classes of cuspidal automorphic representations of $A_{\mathrm{GL}_{2F'}} \backslash \mathrm{GL}_2(\mathbb{A}_{F'})$ and the second sum is over a set of representatives for the equivalence classes of cuspidal automorphic representations of $A_{\mathrm{GL}_{2F}} \backslash \mathrm{GL}_2(\mathbb{A}_{F})$. \end{thm} Again, a converse statement is true: \mathbf Egin{thm} \label{main-thm-2-conv} Assume Conjecture \ref{conj-transf} on transfers of test functions, assume that $F$ is totally complex, and assume that the limits \eqref{B21} and \eqref{B22} converge absolutely for all test functions $h$ unramified outside of $S'$ with transfer $\Phi$ unramified outside of $S$. Under these assumptions every cuspidal automorphic representation $\Pi$ of $\mathrm{GL}_2(\mathbb{A}_E)$ that is isomorphic to its $\mathrm{Gal}(E/F)$-conjugates is a weak base change of a unique cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_F)$. Conversely, if ${\sf p}i$ is a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_F)$ such that \mathbf Egin{align*} \lim_{X \to \infty}X^{-1}\mathrm{tr}({\sf p}i)(b_{E/F}(\Sigma_{{\sf p}hi}^{S_0}(X))) \neq 0 \end{align*} then ${\sf p}i$ admits a unique weak base change to $\mathrm{GL}_2(\mathbb{A}_F)$. If ${\sf p}i$ is a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_F)$ that is not of $\rho$-type for $\rho$ trivial on $W_{F}'$, then ${\sf p}i_E$ is cuspidal. The base change is compatible at the infinite places of $F$ and the finite places $v$ of $F$ where $E/F$ and ${\sf p}i_v$ are unramified or ${\sf p}i_v$ is a twist of the Steinberg representation by a quasi-character. \end{thm} \mathbb{S}ubsection{On the Artin conjecture for icosahedral representations} As in the last subsection we assume that $\mathrm{Gal}(E/F) \cong \widetilde{A}_5$. Fix an embedding $\mathbb{Z}/2 \times \mathbb{Z}/2 \hookrightarrow A_5$ and let $Q \hookrightarrow \widetilde{A}_5$ be the inverse image of $\mathbb{Z}/2 \times \mathbb{Z}/2$ under the quotient $\widetilde{A}_5 \to A_5$. For the purposes of the following theorem, let $S_1$ be a subset of the places of $F$ disjoint from $S$ and let $S'_1$ (resp.~$S_{10}$) be the set of places of $F'$ (resp.~$E$) above $S_1$. Moreover let $h^{S'_1} \in C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_{F'}^{S'})$ and $\Phi^{S_1} \in C_c^{\infty}(\mathrm{GL}_2(\mathbb{A}_F^{S_1}))$ be transfers of each other unramified outside of $S'$ and $S$, respectively, and let $h_{S_1'} \in C_c^{\infty}(\mathrm{GL}_2(F'_{S'_1})//\mathrm{GL}_2(\mathcal{O}_{F'_{S'_1}}))$. \mathbf Egin{thm} \label{main-thm-3} Consider the following hypotheses: \mathbf Egin{itemize} \item One has $n=2$ and $F'=E^Q$, and the case of Langlands functoriality explicated in Conjecture \ref{conj-32} is true for $E/F$. \item One has $n=3$, $F'=E^{\widetilde{A}_4}$, the case of Langlands functoriality explicated in Conjecture \ref{conj-33} is true for $E/F$, and Conjecture \ref{conj-solv} is true for $E/F'$ \end{itemize} Under these assumptions the limits \mathbf Egin{align} \label{31} 2\lim_{X \to \infty}\left(\frac{d^{n^2-1}}{ds^{n^2-1}}(\widetilde{{\sf p}hi}(s)X^s)\big|_{s=1}\right)^{-1} \mathbb{S}um_{{\sf p}i' } \mathrm{tr}({\sf p}i')((h^{S'_1})^{1}h_{S'_1}b_{E/F'}(\Sigma_{{\sf p}hi}^{S_0}(X))) \end{align} and \mathbf Egin{align} \label{32} \lim_{X \to \infty} \left(\frac{d^{n^2-1}}{ds^{n^2-1}}(\widetilde{{\sf p}hi}(s)X^s)\big|_{s=1}\right)^{-1} \mathbb{S}um_{{\sf p}i } \mathrm{tr}({\sf p}i)((\Phi^{S_1})^1b_{F'/F}(h_{S'_1})b_{E/F}(\Sigma_{{\sf p}hi}^{S_0}(X))) \end{align} converge absolutely and are equal. Here the first sum is over equivalence classes of cuspidal automorphic representations of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})$ and the second sum is over equivalence classes of cuspidal automorphic representations of $A_{\mathrm{GL}_{nF}} \backslash \mathrm{GL}_n(\mathbb{A}_{F})$. \end{thm} \mathbf Egin{remarks} \item One can always find $\tau \in \mathrm{Gal}(E/F)$ such that $\langle \tau, \mathrm{Gal}(E/F') \rangle=\mathrm{Gal}(E/F)$ (this follows from Theorem \ref{thm-GK}, for example, or by an elementary argument). \item The fact that this theorem involves more general test functions than those in theorems \ref{main-thm-1} and \ref{main-thm-2} is important for applications to the Artin conjecture (see Theorem \ref{main-thm-3-conv}). \end{remarks} Let $\rho_2:W_F' \to {}^L\mathrm{GL}_{2F}$ be an irreducible $L$-parameter trivial on $W_E'$ (i.e. an irreducible Galois representation $\rho_2:\mathrm{Gal}(E/F) \to \mathrm{GL}_2(\textf{C}C)$). Its character takes values in $\mathbb{Q}(\mathbb{S}qrt{5})$ and if $\langle \xi \rangle =\mathrm{Gal}(\mathbb{Q}(\mathbb{S}qrt{5})/\mathbb{Q})$ then $\xi \circ \rho_2$ is another irreducible $L$-parameter that is not equivalent to the first (see \S \ref{appendix}). A partial converse of Theorem \ref{main-thm-3} above is the following: \mathbf Egin{thm} \label{main-thm-3-conv} Assume Conjecture \ref{conj-transf} and that \eqref{31} and \eqref{32} converge and are equal for all test functions as in Theorem \ref{main-thm-3} for $n \in \{2,3\}$. Assume moreover that $F$ is totally complex. Then there is a pair of nonisomorphic cuspidal automorphic representations ${\sf p}i_1,{\sf p}i_2$ of $\mathrm{GL}_2(\mathbb{A}_F)$ such that $$ {\sf p}i_{1} \boxplus {\sf p}i_2 \cong {\sf p}i((\rho_{2} \mathrm{op}lus \xi \circ \rho_2)). $$ \end{thm} Here the $\boxplus$ denotes the isobaric sum \cite{LanglEinM} \cite{JSII}. \mathbf Egin{rem} It should be true that, upon reindexing if necessary, ${\sf p}i_{1} \cong {\sf p}i(\rho_{2})$. However, the author does not know how to prove this at the moment. \end{rem} As a corollary of this theorem and work of Kim and Shahidi, we have the following: \mathbf Egin{cor} \label{cor-artin-cases} Under the hypotheses of Theorem \ref{main-thm-3-conv}, if $\rho:\mathrm{Gal}(E/F)\to \mathrm{GL}_n(\textf{C}C)$ is an irreducible Galois representation of degree strictly greater than $3$, then there is an automorphic representation ${\sf p}i$ of $\mathrm{GL}_n(\mathbb{A}_F)$ such that ${\sf p}i={\sf p}i(\rho)$. \end{cor} The point of the theorems above is that the sums \eqref{11}, \eqref{12} and their analogues in the other theorems can be rewritten in terms of orbital integrals using either the trace formula (compare \cite{LanglBeyond}, \cite{FLN}) or the relative trace formula (specifically the Bruggeman-Kuznetsov formula, compare \cite{Sarnak}, \cite{Venk})\footnote{We note that when applying the relative trace formula the distributions $h \mapsto \mathrm{tr}({\sf p}i')(h^1)$ will be replaced by Bessel distributions defined using Whittaker functionals. Thus the definition of $\Sigma_{{\sf p}hi}^{S_0}(X)$ has to be modified to be useful in a relative trace formula approach.}. One then can hope to compare these limits of orbital integrals and prove nonsolvable base change and descent. The author is actively working on this comparison. He hopes that the idea of comparing limiting forms of trace formulae that underlies theorems \ref{main-thm-1-conv}, \ref{main-thm-2-conv}, \ref{main-thm-3-conv} will be useful to others working ``beyond endoscopy.'' To end the introduction we outline the sections of this paper. Section \ref{sec-notat} states notation and conventions; it can be safely skipped and later referred to if the reader encounters unfamiliar notation. In \S \ref{sec-tf}, we review unramified base change, introduce a notion of transfer for test functions, and prove the existence of the transfer in certain cases. Section \ref{sec-limit-cusp} introduces the smoothed test functions used in the statement and proof of our main theorems and develops their basic properties using Rankin-Selberg theory. Perhaps the most important result is that the trace of these test functions over the cuspidal spectrum is well-defined and picks out the representations of interest (see Corollary \ref{cor-aut-trace}). The behavior of $L$-parameters under restriction along an extension of number fields is considered in \S \ref{sec-rest-desc}; this is used to motivate the conjectures appearing in our main theorems above, which are also stated precisely in \S \ref{sec-rest-desc}. Section \ref{sec-proofs} contains the proofs of the theorems stated above and the proof of Corollary \ref{cor-artin-cases}. Finally, in \S \ref{sec-groups} we explain why the group-theoretic assumptions made in theorems \ref{main-thm-1} and \ref{main-thm-2} are essentially no loss of generality. \mathbb{S}ection{General notation} \label{sec-notat} \mathbb{S}ubsection{Ad\`eles} The ad\`eles of a number field $F$ will be denoted by $\mathbb{A}_F$. We write $\widehat{\mathcal{O}}_{F}:={\sf p}rod_{v \textrm{ finite}}\mathcal{O}_{F_v}$. For a set of places $S$ of $F$ we write $\mathbb{A}_{F,S}:=\mathbb{A}_F \cap {\sf p}rod_{v \in S}F_v$ and $\mathbb{A}^S_F:=\mathbb{A}_F \cap {\sf p}rod_{v \not \in S}F_v$. If $S$ is finite we sometimes write $F_S:=\mathbb{A}_{FS}$. The set of infinite places of $F$ will be denoted by $\infty$. Thus $\mathbb{A}_{\mathbb{Q},\infty}=\mathrm{R}R$ and $\mathbb{A}_{\mathbb{Q}}^{\infty}:={\sf p}rod_{\mathbb{S}ubstack{p \in \mathbb{Z}_{>0} \{\sf p} \textrm{ prime}}}\mathbb{Q}_p$. For an affine $F$-variety $G$ and a subset $W \leq G(\mathbb{A}_F)$ the notation $W_{S}$ (resp. $W^S$) will denote the projection of $W$ to $G(\mathbb{A}_{F,S})$ (resp. $G(\mathbb{A}^S_F)$). If $W$ is replaced by an element of $G(\mathbb{A}_F)$, or if $G$ is an algebraic group and $W$ is replaced by a character of $G(\mathbb{A}_F)$ or a Haar measure on $G$, the same notation will be in force; e.g. if $\mathfrak{g}amma \in G(\mathbb{A}_F)$ then $\mathfrak{g}amma_v$ is the projection of $\mathfrak{g}amma$ to $G(F_v)$. If $w,v$ are places of $E,F$ with $w|v$ we let $e(E_w/F_v)$ (resp.~$f(E_w/F_v)$) the ramification degree (resp.~inertial degree) of $E_w/F_v$. \mathbb{S}ubsection{Restriction of scalars} Let $A \to B$ be a morphism of $\mathbb{Z}$-algebras and let $X \to \mathrm{Spec}(B)$ be a $\mathrm{Spec}(B)$-scheme. We denote by $$ \mathrm{R}_{B/A}(X) \to \text{Sp}ec(A) $$ the Weil restriction of scalars of $X$. We will only use this functor in cases where the representability of $\mathrm{R}_{B/A}(X)$ by a scheme is well-known. If $X \to \text{Sp}ec(A)$, we often abbreviate $$ \mathrm{R}_{B/A}(X):=\mathrm{R}_{B/A}(X_B). $$ \mathbb{S}ubsection{Characters} If $G$ is a group we let $G^{\widetilde{\varepsilon}dge}$ be the group of abelian characters of $G$. Characters are always assumed to be unitary. A general homomorphism $G \to \textf{C}C^{\times}$ will be called a quasi-character. If $E/F$ is a Galois extension of number fields, we often identify $$ \mathrm{Gal}(E/F)^{\widetilde{\varepsilon}dge}=F^{\times} \backslash \mathbb{A}_F^{\times}/\mathrm{N}_{E/F}(\mathbb{A}_E^{\times}) $$ using class field theory. \mathbb{S}ubsection{Harish-Chandra subgroups} \label{HC-subgroup} Let $G$ be a connected reductive group over a number field $F$. We write $A_G \leq Z_G(F \otimes_{\mathbb{Q}} \mathrm{R}R)$ for the connected component of the real points of the largest $\mathbb{Q}$-split torus in the center of $\mathrm{R}_{F/\mathbb{Q}}G$. Here when we say ``connected component'' we mean in the real topology. Write $X^*$ for the group of $F$-rational characters of $G$ and set $\mathfrak{a}_G:=\mathrm{Hom}(X^*,\mathrm{R}R)$. There is a morphism \mathbf Egin{align*} HC_G:G(\mathbb{A}_F) \longrightarrow \mathfrak{a}_G \end{align*} defined by \mathbf Egin{align} \langle HC_G(x),\chi\rangle =|\log(x^{\chi})| \end{align} for $x \in G(\mathbb{A}_F)$ and $\chi \in X^*$. We write \mathbf Egin{align} G(\mathbb{A}_F)^1:=\ker(HC_G). \end{align} and refer to it as the Harish-Chandra subgroup of $G(\mathbb{A}_F)$. Note that $G(F) \leq G(\mathbb{A}_F)^1$ and $G(\mathbb{A}_F)$ is the direct product of $A_G$ and $G(\mathbb{A}_F)^1$. We say that ${\sf p}i$ is an \textbf{automorphic representation of $A_{GF} \backslash G(\mathbb{A}_F)$} if it is an automorphic representation of $G(\mathbb{A}_F)$ trivial on $A_{GF}$ (and therefore unitary). \mathbb{S}ubsection{Local fields} \label{ssec-loc-fields} A uniformizer for a local field equipped with a discrete valuation will be denoted by $\varpi$. If $F$ is a global field and $v$ is a non-archimedian place of $F$ then we will write $\varpi_v$ for a choice of uniformizer of $F_v$. The number of elements in the residue field of $F_v$ will be denoted by $q_v$, and we write $$ | \cdot|_v:F_v \longrightarrow \mathrm{R}R_{\mathfrak{g}eq 0} $$ for the $v$-adic norm, normalized so $|\varpi_v|_v=q_v^{-1}$. For an infinite place $v$, we normalize $$ |a|_v:=\mathbf Egin{cases} |a| &\textrm{(the usual absolute value) if }v \textrm{ is real}\\ a\overline{a} & \textrm{(the square of the usual absolute value) if }v \textrm{ is complex.} \end{cases} $$ \mathbb{S}ubsection{Field extensions} \label{ssec-fe} In this paper we will often deal with a tower of field extensions $E \mathfrak{g}eq F' \mathfrak{g}eq F$. When in this setting we have attempted to adhere to the following notational scheme: \mathbf Egin{center} \mathbf Egin{tabular}{ l | c |c |c | c} & Place & Set of places & Test function & Representation \\ \hline $E$ & $w$ & $S_0$ & $f$ & $\Pi$\\ $F'$ & $v'$ & $S'$ & $h$ & ${\sf p}i'$\\ $F$ & $v$ & $S$ & $\Phi$ & ${\sf p}i$ \end{tabular} \end{center} Thus, e.g. $w$ will be a place of $E$ above the place $v$ of $F$ and $h$ will denote an element of $C_c^{\infty}(\mathrm{GL}_2(F_{v'}'))$ for some place $v'$ of $F'$. \mathbb{S}ection{Test functions} \label{sec-tf} In this section we recall basic results on test functions that are used later in the paper. In \S \ref{ssec-uha} we set notation for unramified Hecke algebras and the Satake isomorphism. In \S \ref{ssec-bcformal} we recall the usual base change map on unramified Hecke algebras and in \S \ref{ssec-transfers} we define a notion of transfer. \mathbb{S}ubsection{Unramified Hecke algebras} \label{ssec-uha} For each positive integer $n$ let $T_n \leq \mathrm{GL}_n$ be the standard diagonal maximal torus and let \mathbf Egin{align} X_*(T_n) \cong \mathbb{Z}^n=\{\lambda:=(\lambda_1, \cdots, \lambda_n)\} \end{align} be the group of rational cocharacters. Let $F_v$ be the completion of a global field $F$ at some non-archimedian place $v$ and let $\varpi_v$ be a uniformizer of $F_v$. We write $$ \mathbf{1}_{\lambda}:=\mathrm{ch}_{\mathrm{GL}_n(\mathcal{O}_{F_v}) \varpi_v^{\lambda} \mathrm{GL}_n(\mathcal{O}_{F_v})} \in C_c^{\infty}(\mathrm{GL}_n(F_v)//\mathrm{GL}_n(\mathcal{O}_{F_v})) $$ for the characteristic function of the double coset $$ \mathrm{GL}_{n}(\mathcal{O}_{F_v})\varpi_v^{\lambda}\mathrm{GL}_n(\mathcal{O}_{F_v}) \in \mathrm{GL}_n(\mathcal{O}_{F_v}) \backslash \mathrm{GL}_n(F_v) /\mathrm{GL}_n(\mathcal{O}_{F_v}). $$ Let $\widehat{T}_n \leq \widehat{\mathrm{GL}}_n$ denote the dual torus in the (complex) connected dual group. We let $$ \mathcal{S}:C_c^{\infty}(\mathrm{GL}_n(F_v)//\mathrm{GL}_n(\mathcal{O}_{F_v})) \longrightarrow \textf{C}C[X^*(\widehat{T}_n)]^{W(T_n,\mathrm{GL}_n)} =\textf{C}C[t_1^{{\sf p}m 1}, \dots,t_n^{{\sf p}m 1}]^{S_n} $$ denote the Satake isomorphism, normalized in the usual manner (see, e.g. \cite[\S 4.1]{Laumon}). Here $W(T_n,\mathrm{GL}_n)$ is the Weyl group of $T_n$ in $\mathrm{GL}_n$; it is well-known that $W(T_n,\mathrm{GL}_n) \cong S_n$, the symmetric group on $n$ letters. Let $E/F$ be a field extension and let $v$ be a finite place of $F$. For the purpose of setting notation, we recall that the Satake isomorphism for $\mathrm{R}_{E/F}\mathrm{GL}_n(F_v)$ induces an isomorphism $$ \mathcal{S}:C_c^{\infty}(\mathrm{R}_{E/F}\mathrm{GL}_n(F_v)//\mathrm{R}_{\mathcal{O}_{E}/\mathcal{O}_F}\mathrm{GL}_n(\mathcal{O}_{F_v})) \tilde{\longrightarrow} \otimes_{w|v} \textf{C}C[t_{1w}^{{\sf p}m 1},\dots,t_{nw}^{{\sf p}m 1}]^{S_n}. $$ Here the product is over the places $w$ of $E$ dividing $v$ and the $w$ factor of $$ C_c^{\infty}(\mathrm{R}_{E/F}\mathrm{GL}_n(F_v)//\mathrm{R}_{\mathcal{O}_{E}/\mathcal{O}_F}\mathrm{GL}_n(\mathcal{O}_{F_v})) \cong {\sf p}rod_{w|v}C_c^{\infty}(\mathrm{GL}_n(E_{w})//\mathrm{GL}_n(\mathcal{O}_{E_w})), $$ is sent to the $w$ factor of $\otimes_{w|v} \textf{C}C[t_{1w}^{{\sf p}m 1},\dots,t_{nw}^{{\sf p}m 1}]^{S_n}$. For a place $w|v$, write $$ \mathbf{1}_{\lambda w} \in C_c^{\infty}(\mathrm{R}_{E/F}\mathrm{GL}_n(F_v)//\mathrm{R}_{\mathcal{O}_{E}/\mathcal{O}_F}\mathrm{GL}_n(\mathcal{O}_{F_v})) $$ for the product of $\mathbf{1}_{\lambda} \in C_c^{\infty}(\mathrm{GL}_n(E_{w})//\mathrm{GL}_n(\mathcal{O}_{E_{w}}))$ with $$ {\sf p}rod_{\mathbb{S}ubstack{w'\neq w\\ w' |v}}\mathbf{1}_{(0,\dots,0)w'} \in {\sf p}rod_{\mathbb{S}ubstack{w' \neq w\\w' |v}}C_c^{\infty}\left(\mathrm{GL}_n(E_{w'})//\mathrm{GL}_n(\mathcal{O}_{E_{w'}})\right). $$ Thus $\mathcal{S}(\mathbf{1}_{\lambda w})=p(t_{1w},\dots,t_{nw})$ for some polynomial $p \in \textf{C}C[x_1^{{\sf p}m 1},\dots,x_n^{{\sf p}m 1}]^{S_n}$. \mathbb{S}ubsection{Base change for unramified Hecke algebras} \label{ssec-bcformal} Let $E/F$ be an extension of global fields. For any subfield $E \mathfrak{g}eq k \mathfrak{g}eq F$ we have a base change map $$ b_{E/k}:{}^L\mathrm{R}_{k/F}\mathrm{GL}_n \longrightarrow {}^L\mathrm{R}_{E/F}\mathrm{GL}_n; $$ it is given by the diagonal embedding on connected components: $$ ({}^L\mathrm{R}_{k/F}\mathrm{GL}_n)^{\circ} \cong \mathrm{GL}_n(\textf{C}C)^{[k:F]} \longrightarrow ({}^L\mathrm{R}_{E/F}\mathrm{GL}_n)^{\circ} \cong \mathrm{GL}_n(\textf{C}C)^{[E:F]} $$ and the identity on the Weil-Deligne group. Suppose that $E/F$ is unramified at a finite place $v$ of $F$. We recall that via the Satake isomorphism the base change map $b_{E/k}$ defines an algebra homomorphism \mathbf Egin{align} \label{bEF} b_{E/k}:C_c^{\infty}(\mathrm{R}_{E/F}\mathrm{GL}_n(F_v)//\mathrm{R}_{\mathcal{O}_E/\mathcal{O}_F}\mathrm{GL}_n(\mathcal{O}_{F_v})) \longrightarrow C_c^{\infty}(\mathrm{R}_{k/F}\mathrm{GL}_n(F_v)//\mathrm{R}_{\mathcal{O}_k/\mathcal{O}_F}\mathrm{GL}_n(\mathcal{O}_{F_v})). \end{align} In terms of Satake transforms, this map is given explicitly by $$ b_{E/k}\left({\sf p}rod_{w|v}\mathcal{S}(f_w)(t_{1w},\dots,t_{nw})\right)={\sf p}rod_{v'|v}{\sf p}rod_{w|v'}\mathcal{S}(f_w)(t_{1v'}^{i_{v'}},\dots,t_{nv'}^{i_{v'}}) $$ where the product over $v'|v$ is over the places of $k$ dividing $v$ and $i_{v'}$ is the inertial degree of $v'$ in the extension $E/k$ \cite[Chapter 1, \S 4.2]{AC}. It satisfies the obvious compatibility condition $$ b_{E/F}=b_{k/F} \circ b_{E/k}. $$ Let ${\sf p}i_v$ be an irreducible admissible unramified representation of $\mathrm{GL}_n(F_v)$ and let $w$ be a place of $E$ above $v$. There exists an irreducible admissible representation $$ b_{E/F}({\sf p}i_v)={\sf p}i_{vEw} $$ of $\mathrm{GL}_n(E_{w})$, unique up to equivalence of representations, such that \mathbf Egin{align} \label{BC-map} \mathrm{tr}(b_{E/F}({\sf p}i_v)(f))=\mathrm{tr}({\sf p}i_v(b_{E/F}(f))) \end{align} for all $f \in C_c^{\infty}(\mathrm{GL}_n(E_w)//\mathrm{GL}_n(\mathcal{O}_{E_w}))$. It is called the \textbf{base change} of ${\sf p}i_v$, and is an unramified irreducible admissible representation of $\mathrm{GL}_n(E_w)$. Explicitly, if ${\sf p}i_v=I(\chi)$ is the irreducible unramified constituent of the unitary induction of an unramified character $\chi:T_n(F_v) \longrightarrow \textf{C}C^{\times}$, then ${\sf p}i_{vEw} \cong I(\chi \circ \mathrm{N}_{E_w/F_v})$, where $$ \mathrm{N}_{E_w/F_v}:T_n(E_w) \longrightarrow T_n(F_v) $$ is the norm map induced by the norm map $\mathrm{N}_{E_w/F_v}:E_w \to F_v$. Here $I(\chi \circ \mathrm{N}_{E_w/F_v})$ is the irreducible unramified constituent of the unitary induction of $\chi \circ \mathrm{N}_{E_w/F_v}$. The fact that \mathbf Egin{align} \label{BC-map2} \mathrm{tr}(b_{E/F}(I(\chi \circ \mathrm{N}_{E_w/F_v})(f))=\mathrm{tr}(I(\chi)(b_{E/F}(f))) \end{align} is readily verified using the well-known formulas for the trace of an (unramified) Hecke operator in $C_c^{\infty}(\mathrm{GL}_n(F_v)//\mathrm{GL}_n(\mathcal{O}_{F_v}))$ acting on a spherical representation in terms of Satake parameters (see \cite[Theorem 7.5.6]{Laumon}). \mathbb{S}ubsection{Transfers} \label{ssec-transfers} Let $E/F$ be a field extension. As indicated at the beginning of this paper, the local Langlands correspondence for $\mathrm{GL}_n$ implies that the local base change transfer exists. Thus any irreducible admissible representation ${\sf p}i_v$ of $\mathrm{GL}_n(F_v)$ admits a base change $\Pi_w={\sf p}i_{vEw}$ to $\mathrm{GL}_n(E_w)$ for any place $w|v$; this representation is uniquely determined up to isomorphism by the requirement that $$ \varphi({\sf p}i_v)|_{W_{E_w}'} \cong \varphi(\Pi_w) $$ where $\varphi(\cdot)$ is the $L$-parameter of $(\cdot)$. \mathbf Egin{rem} There is a representation-theoretic definition, due to Shintani, of a base change of an admissible irreducible representation of $\mathrm{GL}_n(F_v)$ along a cyclic extension \cite[Chapter 1, Definition 6.1]{AC}. One can iterate this definition along cyclic subextensions of a general solvable extension of local fields to arrive at a representation-theoretic definition of the base change of an irreducible admissible representation. For unramified representations of non-archimedian local fields, one uses descent \cite[Lemma 7.5.7]{Laumon} to verify that the two definitions are compatible. Similarly, it is easy to see that they are compatible for abelian twists of the Steinberg representation using compatibility of the local Langlands correspondence with twists, the fact that the Steinberg representation has a very simple $L$-parameter (namely the representation of \cite[(4.1.4)]{Tate}) and \cite[Chapter 1, Lemma 6.12]{AC}. However, the author does not know of any reference for their compatibility in general. It probably follows from the compatibility of the local Langlands correspondence with $L$-functions and $\varepsilon$-factors of pairs together with the local results of \cite[Chapter 1, \S 6]{AC}, but we have not attempted to check this. \end{rem} Now assume that $E \mathfrak{g}eq F' \mathfrak{g}eq F$ is a subfield. Let $S$ be a set of places of $F$ and let $S'$ (resp.~$S_0$) be the set of places of $F'$ (resp.~$E$) lying above places in $S$. \mathbf Egin{defn} \label{defn-transf} Two functions $h_{S'} \in C_c^{\infty}(\mathrm{GL}_n(\mathbb{A}_{F'S'}))$ and $\Phi_S \in C_c^{\infty}(\mathrm{GL}_n(\mathbb{A}_{FS}))$ are said to be \textbf{transfers} of each other if there is a function $f_{S_0} \in C_c^{\infty}(\mathrm{GL}_n(\mathbb{A}_{ES_0}))$ such that for all irreducible generic unitary representations ${\sf p}i_{S}$ of $\mathrm{GL}_n(F_S)$ one has $$ {\sf p}rod_{w \in S_0}\mathrm{tr}({\sf p}i_{vEw})(f_w)={\sf p}rod_{v' \in S'} \mathrm{tr}({\sf p}i_{vF'v'})(h_{v'})={\sf p}rod_{v \in S}\mathrm{tr}({\sf p}i_{v})(\Phi_v) $$ \end{defn} We immediately state one conjecture suggested by this definition: \mathbf Egin{conj} \label{conj-transf} Let $S$ be a finite set of places of $F$ containing the infinite places. If $\Pi^{\mathbb{S}igma} \cong \Pi$ for all $\mathbb{S}igma \in \mathrm{Gal}(E/F)$, then there exists an $f_{S_0} \in C_c^{\infty}(\mathrm{GL}_n(E_{S_0}))$ and $h_{S'} \in C_c^{\infty}(\mathrm{GL}_n(F'_{S'}))$ that admits a transfer $\Phi_{S} \in C_c^{\infty}(\mathrm{GL}_n(F_S))$ of positive type such that the identity of Definition \ref{defn-transf} holds for all irreducible generic unitary representations ${\sf p}i_S$ of $\mathrm{GL}_n(F_S)$ and additionally $$ \mathrm{tr}(\Pi_{S_0})(f_{S_0}) \neq 0. $$ \end{conj} Here we say that $\Phi_S$ is of positive type if $\mathrm{tr}({\sf p}i_S)(\Phi_S) \mathfrak{g}eq 0$ for all irreducible generic unitary admissible representations ${\sf p}i_S$ of $\mathrm{GL}_n(F_S)$. \mathbf Egin{rem} Understanding which $h_{S'}$ and $\Phi_S$ are transfers of each other seems subtle. One na\"ive guess is that those $\Phi_S$ that are supported on ``norms'' of elements of $\mathrm{GL}_n(E_{S_0})$ should be transfers. However, there does not appear to be a good notion of the norm map from conjugacy classes in $\mathrm{GL}_n(E_{S_0})$ to conjugacy classes in $\mathrm{GL}_n(F_S)$ and this guess makes little sense without a notion of norm. One also hopes to be able to make use of the theory of cyclic base change, but in that setting one is interested in twisted traces, not traces. For these reasons, the author is somewhat hesitant in stating Conjecture \ref{conj-transf}. The author wouldn't be surprised if the conjectured equality holds only up some transfer factor. \end{rem} In one case the existence of transfers is clear: \mathbf Egin{lem} \label{lem-unr-transf} Assume that $S$ is a set of finite places and that $E/F$ is unramified at all places in $S$. Let $$ f_{S_0} \in C_c^{\infty}(\mathrm{GL}_n(E_{S_0})//\mathrm{GL}_n(\mathcal{O}_{E_{S_0}})). $$ Then $b_{E/F'}(f_{S_0})$ and $b_{E/F}(f_{S_0})$ are transfers of each other. \end{lem} \mathbf Egin{proof} If ${\sf p}i_S$ is unramified then the identity in the definition of transfers follows from the discussion in \S \ref{ssec-bcformal}. If for some $v \in S$ the representation ${\sf p}i_v$ is ramified then ${\sf p}i_{vE}$ and ${\sf p}i_{vF'}$ are both ramified since the extension $E/F$ is unramified and the local Langlands correspondence sends (un)ramified representations to (un)ramified representations. Thus in this case for $f_{S_0}$ as in the lemma one has $$ {\sf p}rod_{w \in S_0}\mathrm{tr}({\sf p}i_{vEw})(f_w)={\sf p}rod_{v' \in S'} \mathrm{tr}({\sf p}i_{vF'v'})(h_{v'})={\sf p}rod_{v \in S}\mathrm{tr}({\sf p}i_{v})(\Phi_v)=0. $$ \end{proof} Another case where transfers exist is the following: \mathbf Egin{lem} \label{lem-archi-transf} Let $v$ be a complex place of $F$ and let $S=\{v\}$. Moreover let $f_{S_0}=\otimes_{w|v}f_w \in C_c^{\infty}(\mathrm{GL}_n(E_{S_0}))$. Then there exist functions $h_{S'} \in C_c^{\infty}(\mathrm{GL}_n(F'_{S'}))$ and $\Phi_v \in C_c^{\infty}(\mathrm{GL}_n(F_v))$ that are transfers of each other such that the identity of Definition \ref{defn-transf} holds. \end{lem} \mathbf Egin{proof} We use descent to prove the lemma. Let $B \leq \mathrm{GL}_n$ be the Borel subgroup of lower triangular matrices. We let $B=MN$ be its Levi decomposition; thus $M \leq \mathrm{GL}_n$ is the maximal torus of diagonal matrices. Let ${\sf p}i_v$ be an irreducible unitary generic representation of $\mathrm{GL}_n(F_v)$. It is necessarily isomorphic to the unitary induction $\mathrm{Ind}_{B(F_v)}^{\mathrm{GL}_n(F_v)}(\chi)$ of a quasi-character $\chi:M(F_v) \to \textf{C}C^{\times}$ (where $\chi$ is extended to a representation of $MN(F_v)$ by letting $\chi$ act trivially on $N(F_v)$). Here we have used our assumption that ${\sf p}i_v$ is generic to conclude that the corresponding induced representation is irreducible (see the comments following \cite[Lemma 2.5]{JacquetRS} or \cite{Vogan}). Let $\Phi_v^{(B(F_v))} \in C_c^{\infty}(M(F_v))$ denote the constant term of $\Phi_v$ along $B(F_v)$. Then one has $$ \mathrm{tr}({\sf p}i_v)(\Phi_v)=\mathrm{tr}(\chi)(\Phi_v^{(B(F_v))}) $$ (see \cite[(10.23)]{Knapp}). Now let \mathbf Egin{align*} N_{E/F'}:M(E \otimes_FF_v) &\longrightarrow M(F' \otimes_FF_v)\\ \mathrm{N}_{E/F} M(E \otimes_F F_v) &\longrightarrow M(F_v) \end{align*} be the norm maps. Let $f_{S_0}=\otimes_vf_v \in C_c^{\infty}(\mathrm{GL}_n(E_{S_0}))$. From the comments above, we see that if $h_{S'} =\otimes_{v'|v}h_v\in C_c^{\infty}(\mathrm{GL}_n(F'_{S'}))$ and $\Phi_v \in C_c^{\infty}(\mathrm{GL}_n(F_v))$ are any functions such that \mathbf Egin{align*} \mathrm{N}_{E/F'*}(f^{(B(E_{S_0}))})&=h_{S'}^{(B(F'_{S'}))}\\ \mathrm{N}_{E/F*}(f^{(B(E_{S_0}))})&=\Phi_v^{(B(F_v))} \end{align*} then $h_{S'}$ and $\Phi_v$ are transfers of each other and the identity of Definition \ref{defn-transf} holds. We note that $$ \mathrm{N}_{E/F'*}(f^{(B(E_{S_0}))})_{v'} \in C_c^{\infty}(M(F'_{v'}))^{W(M,\mathrm{GL}_n)} $$ for each $v'|v$ and $$ \mathrm{N}_{E/F*}(f^{(B(E_{S_0}))})_v \in C_c^{\infty}(M(F_v))^{W(M,\mathrm{GL}_n)}. $$ Thus to complete the proof it suffices to observe that the map \mathbf Egin{align*} C_c^{\infty}(\mathrm{GL}_n(F_v)) &\longrightarrow C_c^{\infty}(M(F_v))^{W(M,\mathrm{GL}_n)}\\ \Phi_v &\longmapsto \Phi_v^{(B(F_v))} \nonumber \end{align*} is surjective by definition of the constant term \cite[(10.22)]{Knapp} and the Iwasawa decomposition of $\mathrm{GL}_n(F_v)$. \end{proof} Finally we consider transfers of Euler-Poincar\'e functions. Let $v$ be a finite place of $F$ and let $$ f_{EP} \in C_c^{\infty}(\mathrm{SL}_n(F_v)) {\sf q}uad \textrm{ and } {\sf q}uad f'_{EP} \in C_c^{\infty}(\mathrm{SL}_n(F' \otimes_FF_v)) $$ be the Euler-Poincar\'e functions on $\mathrm{SL}_n(F_v)$ and $\mathrm{SL}_n(F' \otimes F_v)$, respectively, defined with respect to the Haar measures on $\mathrm{SL}_n(F_v)$ and $\mathrm{SL}_n(F' \otimes F_v)$ giving $\mathrm{SL}_n(\mathcal{O}_{F_v})$ and $\mathrm{SL}_n(\mathcal{O}_{F'} \otimes_{\mathcal{O}_{F}} \mathcal{O}_{F_v})$ volume one, respectively \cite[\S 2]{KottTama}. Moreover fix functions $\nu \in C_c^{\infty}(\mathrm{GL}_n(F_v)/\mathrm{SL}_n(F_v))$ and $\nu' \in C_c^{\infty}(\mathrm{GL}_n(F' \otimes_FF_v)/\mathrm{SL}_n(F' \otimes F_v))$. We thereby obtain functions $$ \nu f_{EP} \in C_c^{\infty}(\mathrm{GL}_n(F_v)) {\sf q}uad \textrm{ and } {\sf q}uad \nu'f'_{EP} \in C_c^{\infty}(\mathrm{GL}_n(F' \otimes_F F_v)) $$ We refer to any function in $C_c^{\infty}(\mathrm{GL}_n(F_v))$ (resp.~$C_c^{\infty}(\mathrm{GL}_n(F' \otimes_F F_v))$) of the form $\nu f_{EP}$ (resp.~$\nu'f'_{EP}$) as a \textbf{truncated Euler-Poincar\'e function}. For the purposes of stating a lemma on transfers, fix measures on $F_v^{\times}$, $(F' \otimes_F F_{v})^{\times}$, and $(E \otimes_F F_v)^{\times}$ giving their maximal compact open subgroups (e.g. $\mathcal{O}_{F_v}^{\times}$) volume one. These measures induce Haar measures on $F_v^{\times} \cong \mathrm{GL}_n(F_v)/\mathrm{SL}_n(F_v)$, etc. Assume for the purposes of the following lemma that $\nu$ and $\nu'$ are chosen so that there is a function $\nu_0 \in C_c^{\infty}(\mathrm{GL}_n(E \otimes_F F_v)/\mathrm{SL}_n(E \otimes_F F_v))$ such that $$ \mathrm{N}_{E\otimes_{F}F_v/F' \otimes_F F_v*}\nu_0=\nu' {\sf q}uad \textrm{ and } {\sf q}uad \mathrm{N}_{E\otimes_{F}F_v/F_v*}\nu_0=\nu. $$ \mathbf Egin{lem} \label{lem-EP} Let $v$ be a finite place of $F$ that is unramified in $E/F$ and let $S=\{v\}$. The Euler Poincar\'e functions $$ (-1)^{r_1}\nu'f'_{EP} {\sf q}uad \textrm{ and } {\sf q}uad (-1)^{r_2} \nu f_{EP} $$ are transfers of each other for some integers $r_1,r_2$ that depend on $v$ and $E/F$. \end{lem} \mathbf Egin{proof} Recall that if $\mathrm{St}_v$ (resp.~$\mathrm{St}_{v'}$, $\mathrm{St}_{w}$) denotes the Steinberg representation of $\mathrm{GL}_n(F_v)$ (resp.~$\mathrm{GL}_n(F'_{v'})$, $\mathrm{GL}_n(E_w)$), then $\mathrm{St}_{vF'v'}=\mathrm{St}_{v'}$ and $\mathrm{St}_{vEw}=\mathrm{St}_w$ for all places $v'|v$ of $F'$ and $w|v$ of $E$ \cite[Chapter 1, Lemma 6.12]{AC}. Let $f_{0EP} \in C_c^{\infty}(\mathrm{SL}_n(E \otimes_FF_v))$ be the Euler-Poincare function constructed with respect to the Haar measure giving measure one to $\mathrm{SL}_n(\mathcal{O}_E \otimes_{\mathcal{O}_F} \mathcal{O}_{F_v})$. We let $f_{S}=\nu_0f_{0EP}$ in the definition of transfer. The statement of the lemma follows from the comments in the first paragraph of this proof and \cite[Theorem 2'(b)]{KottTama}. This theorem of Casselman states in particular that the only generic unitary irreducible admissible representations of a semisimple $p$-adic group that have nonzero trace when applied to an Euler-Poincar\'e function are the Steinberg representations and gives a formula for the trace in this case. We note that in the case at hand one has $q(\mathrm{SL}_{nF_v})=n-1$ in the notation of loc.~cit. \end{proof} \mathbb{S}ection{Limiting forms of the cuspidal spectrum} \label{sec-limit-cusp} In the statement of our main theorems we considered limits built out of sums of the trace of a test function acting on the cuspidal spectrum. In this section we prove that these limits converge absolutely. Let $E/F$ be a Galois extension of number fields, let $E \mathfrak{g}eq F' \mathfrak{g}eq F$ be a solvable subfield, and let $\tau \in \mathrm{Gal}(E/F)$. Assume that $S'$ is a finite set of places of $F'$ including the infinite places and that $S_0$ is the set of places of $E$ above them. Let ${\sf p}hi \in C_c^{\infty}(0,\infty)$ and let $$ \widetilde{{\sf p}hi}(s):=\int_{0}^{\infty}x^s{\sf p}hi(x)\frac{dx}{x} $$ be its Mellin transform. The first result of this section is the following proposition: \mathbf Egin{prop} \label{prop-testsums} Let ${\sf p}i'$ be a cuspidal unitary automorphic representation of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})$ and assume that ${\sf p}i'$ admits a weak base change $\Pi:={\sf p}i_E'$ to $\mathrm{GL}_n(\mathbb{A}_E)$ that satisfies $\Pi_w=({\sf p}i_{v'}')_E$ for all $w|v'$ such that $v' \not \in S'$. Fix $\varepsilon>0$. One has \mathbf Egin{align*} &\mathrm{tr}({\sf p}i'^{S'})(b_{E/F'}\Sigma^{S_0}_{{\sf p}hi}(X))=\mathrm{Res}_{s=1}\left(\widetilde{{\sf p}hi}(s)X^sL(s,\Pi \times \Pi^{\tau\varepsilone S_0})\right)+O_{E,F',n,S_0,\varepsilon}(C_{\Pi \times \Pi^{\tau \varepsilone}}(0)^{\tfrac{1}{2}+\varepsilon}) \end{align*} for all sufficiently large $X \mathfrak{g}eq 1$. Here ``sufficiently large'' depends only on $E,F',n,S_0,\varepsilon$. \end{prop} Here the complex number $C_{\Pi \times \Pi^{\tau \varepsilone}}(s)$ is the analytic conductor of $L(s,\Pi \times \Pi^{\tau \varepsilone})$ normalized as in \S \ref{ssec-analytic-cond} below and the $L$-function is the Rankin-Selberg $L$-function (see, e.g. \cite{Cog1}). The point of Proposition \ref{prop-testsums} is the fact that $\lim_{X \to \infty} X^{-1} \mathrm{tr}({\sf p}i^{S'})(\Sigma_{{\sf p}hi}^{S_0}(X)) \neq 0$ if and only if $\mathrm{Hom}_{I}(\Pi, \Pi^{\tau}) \neq 0$ (see \S \ref{ssec-analytic-cond}). This is the keystone of the approach to nonsolvable base change and descent exposed in this paper. We will prove Proposition \ref{prop-testsums} in the following subsection. Let \mathbf Egin{align} \label{cusp-subspace} L^2_0(\mathrm{GL}_n(F')A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})) \leq L^2(\mathrm{GL}_n(F')A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})) \end{align} be the cuspidal subspace. For $h\in C_c^{\infty}(\mathrm{GL}_n(\mathbb{A}_{F'})$ unramified outside of $S'$, let $$ h^1(g):=\int_{A_{\mathrm{GL}_{nF'}}}h(ag)da \in C_c^{\infty}(A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})) $$ and let $R(h^1)$ be the corresponding operator on $L^2(\mathrm{GL}_n(F') A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'}))$. Finally let $R_0(h^1)$ be its restriction to $L^2_0(\mathrm{GL}_n(F') A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'}))$. This restriction is well-known to be of trace class \cite[Theorem 9.1]{Donnely}. The following corollary implies that the limits and sums in \eqref{11}, \eqref{A21}, \eqref{B21}, and \eqref{31} of theorems \ref{main-thm-1}, \ref{main-thm-2} and \ref{main-thm-3} can be switched: \mathbf Egin{cor} \label{cor-aut-trace} One has \mathbf Egin{align*} \mathrm{tr}(R_0(h^1b_{E/F'}(\Sigma^{S'}_{{\sf p}hi}(X))))&+o_{E,F',n,S_0}(X)\\&=\mathbb{S}um_{{\sf p}i'} \mathrm{tr}({\sf p}i')(h^1) \mathrm{Res}_{s=1}\left( \widetilde{{\sf p}hi}(s)X^sL(s,({\sf p}i'_E \times {\sf p}i'^{\tau \varepsilone}_E)^{S_0})\right) \end{align*} where the sum is over any subset of the set of equivalence classes of cuspidal automorphic representations ${\sf p}i'$ of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})$. The sum on the right converges absolutely. \end{cor} \mathbb{S}ubsection{Rankin-Selberg $L$-functions and their analytic conductors} \label{ssec-analytic-cond} For our later use, in this subsection we will consider a setting slightly more general than that relevant for the proof of Proposition \ref{prop-testsums}. Let $n_1,n_2$ be positive integers and let $\Pi_1,\Pi_2$ be isobaric automorphic representations of $A_{\mathrm{GL}_{n_1E}} \backslash \mathrm{GL}_{n_1}(\mathbb{A}_E)$, $A_{\mathrm{GL}_{n_2E}} \backslash \mathrm{GL}_{n_2}(\mathbb{A}_E)$, respectively\footnote{For generalities on isobaric automorphic representations see \cite{LanglEinM} and \cite{JSII}.}. We always assume that $$ \Pi_i\cong \Pi_{i1} \boxplus \cdots \boxplus \Pi_{im_i} $$ for $i \in\{1,2\}$ where the $\Pi_{ij}$ are cuspidal automorphic representations of $A_{\mathrm{GL}_{d_jE}} \backslash \mathrm{GL}_{d_j}(\mathbb{A}_E)$ for some set of integers $d_j$ such that $\mathbb{S}um_{j=1}^{m_i}d_j=n_i$. We let $L(s,\Pi_1 \times \Pi_2)$ be the Rankin-Selberg $L$-function \cite{Cog1}; it satisfies $$ L(s,\Pi_1 \times \Pi_2)={\sf p}rod_{r=1}^{m_1}{\sf p}rod_{s=1}^{m_2} L(s,\Pi_{1r} \times \Pi_{2s}). $$ and is holomorphic in the complex plane apart from possible poles at $s \in \{0,1\}$ \cite[Theorem 4.2]{Cog1}. One sets \mathbf Egin{align} \label{hom-I} \mathrm{Hom}_I(\Pi_1,\Pi_2)=\mathrm{op}lus_{j_1,j_2:\Pi_{1j_1} \cong \Pi_{2j_2}^{\varepsilone}} \textf{C}C. \end{align} Lest the notation mislead the reader, we note that $\Pi_1$ and $\Pi_2$ are irreducible \cite[\S 2]{LanglEinM}, so the space of ``honest'' morphisms of automorphic representations between them is at most one-dimensional. A fundamental result due to Jacquet and Shalika is that \mathbf Egin{align} \label{ord-pole} -\mathrm{ord}_{s=1}L(s,\Pi_1 \times \Pi_2)=\dim(\mathrm{Hom}_I(\Pi_1,\Pi_2)) \end{align} \cite[Theorem 4.2]{Cog1}. There is a set of complex numbers $\{\mu_{\Pi_{1iv} \times \Pi_{2jv}}\}_{\mathbb{S}ubstack{1 \leq i \leq n_2\\ 1 \leq j \leq n_2}} \mathbb{S}ubset \textf{C}C$, an integer $N_{\Pi_1 \times \Pi_2}$, and a complex number $\varepsilonilon_{\Pi_1 \times \Pi_2}$ such that if we set $$ \Lambda(s,\Pi_1 \times \Pi_2):=L(s,(\Pi_1 \times \Pi_2)^{\infty}){\sf p}rod_{w|\infty} {\sf p}rod_{i=1}^{n_1}{\sf p}rod_{j=1}^{n_2}\Gamma_{w}(s+\mu_{\Pi_{1wi} \times \Pi_{2wj}}) $$ then \mathbf Egin{align} \label{FE} \Lambda(s,\Pi_1 \times \Pi_2)=\varepsilonilon_{\Pi_1 \times \Pi_2} N_{\Pi_1 \times \Pi_2}^{\tfrac{1}{2}(1-s)}\Lambda(1-s,\Pi_1^{\varepsilone} \times \Pi_2^{\varepsilone}). \end{align} Here $$ \Gamma_{v}(s):=\mathbf Egin{cases}{\sf p}i^{-s/2}\Gamma(s/2) \textrm{ if }w \textrm{ is real}\\2(2{\sf p}i)^{-s}\Gamma(s) \textrm{ if }w \textrm{ is complex}. \end{cases} $$ For a proof of this statement, combine \cite[Proposition 3.5]{Cog1}, the appendix of \cite{JacquetRS} and \cite[Theorem 4.1]{Cog1} (\cite[\S 3]{Tate} is also useful). In these references one can also find the fact that, at least after reindexing, one has \mathbf Egin{align} \label{conj} \overline{\mu_{\Pi_{1wi}^{\varepsilone} \times \Pi_{2wj}^{\varepsilone}}}=\mu_{\Pi_{1wi} \times \Pi_{2wj}}. \end{align} One sets $$ \mathfrak{g}amma(\Pi_1 \times \Pi_2,s):={\sf p}rod_{w|\infty}{\sf p}rod_{i=1}^{n_1}{\sf p}rod_{j=1}^{n_2} \Gamma_{w}(s+\mu_{\Pi_{1wi} \times \Pi_{2wj}}) $$ Following Iwaniec-Sarnak \cite{IS} (with a slight modification), for $s \in \textf{C}C$ the \textbf{analytic conductor} is defined to be \mathbf Egin{align} C_{\Pi_1 \times \Pi_2}(s):=N_{\Pi_1 \times \Pi_2} {\sf p}rod_{w|\infty}{\sf p}rod_{i=1}^{n_1}{\sf p}rod_{j=1}^{n_2} \left|\frac{1+\mu_{\Pi_{1wi} \times \Pi_{2wj}}+s}{2{\sf p}i}\right|_w. \end{align} We recall that $N_{\Pi_1 \times \Pi_2}={\sf p}rod_{w \nmid \infty}N_{\Pi_{1w} \times \Pi_{2w}}$ for some integers numbers $N_{\Pi_{1w} \times \Pi_{2w}}$ \cite[Proposition 3.5]{Cog1}. If $S_0$ is a set of places of $E$ set \mathbf Egin{align*} C_{\Pi_{1S_0} \times \Pi_{2S_0}}(s):&=\left({\sf p}rod_{\textrm{infinite }w \in S_0} {\sf p}rod_{i=1}^{n_1}{\sf p}rod_{j=1}^{n_2}\left|\frac{1+\mu_{\Pi_{1iw} \times \Pi_{2jw}}+s}{2{\sf p}i}\right|_w\right) {\sf p}rod_{\textrm{ finite }w \in S_0} N_{\Pi_{1w} \times \Pi_{2w}}. \end{align*} For the purpose of stating a proposition, let $\lambda(m) \in \textf{C}C$ be the unique complex numbers such that $$ L(s,(\Pi_1 \times \Pi_2)^{S_0})=\mathbb{S}um_{m=1}^{\infty}\frac{\lambda(m)}{m^s} $$ for $\mathrm{Re}(s) \mathfrak{g}g 0$. With all this notation set, we have the following proposition: \mathbf Egin{prop} \label{Perron-prop} Assume that $\Pi_1 \times \Pi_2$ is automorphic of the form $$ \Pi_1 \times \Pi_2 \cong \Pi_1 \boxplus \cdots \boxplus \Pi_m $$ where the $\Pi_i$ are cuspidal automorphic representations of $A_{\mathrm{GL}_{n_iE}} \backslash \mathrm{GL}_{n_i}(\mathbb{A}_E)$. Fix $\varepsilon>0$. For $X \in \mathrm{R}R_{>0}$ sufficiently large one has $$ \mathbb{S}um_{m=1}^{\infty}\lambda(m){\sf p}hi(m/X)=\mathrm{Res}_{s=1}\left( \widetilde{{\sf p}hi}(s)X^sL(s,\Pi_1 \times \Pi_2^{S_0})\right) +O_{E,n,S_0}(C_{\Pi_1 \times \Pi_2}(0)^{\tfrac{1}{2}+\varepsilon}). $$ Here ``sufficiently large'' depends only on $E,n,S_0$. \end{prop} Before beginning the proof of Proposition \ref{Perron-prop} we record one consequence of known bounds towards the Ramanujan-Petersson conjecture. Let $r(E,n) \in \mathrm{R}R_{\mathfrak{g}eq 0}$ be a nonnegative real number such that for all finite places $w$ of $E$ one has \mathbf Egin{align} \label{rp-bound} |L(s,\Pi_{1w} \times \Pi_{2w})| \leq (1-q_w^{-\mathrm{Re}(s)+r(E,n)})^{-n} \end{align} for $\mathrm{Re}(s)>r(E,n)$ and \mathbf Egin{align} \label{rbound} |L(s,\Pi_{1w} \times \Pi_{2w})^{-1}| \leq (1+q_w^{-\mathrm{Re}(s)+r(E,n)})^n. \end{align} for all $s$. This real number exists (and can be taken to be independent of the choice of cuspidal automorphic representations $\Pi_1$ and $\Pi_2$ of $A_{\mathrm{GL}_{nE}} \backslash \mathrm{GL}_n(\mathbb{A}_E)$) by \cite[Theorem 2]{LRS} in the unramified case and \cite[Proposition 3.3]{MS} in the general case. In particular we may take \mathbf Egin{align} \label{LRS-bound} r(E,n) < \frac{1}{2}-\frac{1}{n^2+1}. \end{align} If the Ramanujan-Petersson conjecture were known then we could take $r(E,n)=0$. \mathbf Egin{proof}[Proof of Proposition \ref{Perron-prop}] The proof is a standard application of the inverse Mellin transform entirely analogous to the proof of \cite[Theorem 3.2]{Booker}. We only make a few comments on how to adapt the proof of \cite[Theorem 3.2]{Booker}. First, the assumption of the Ramanujan conjecture in \cite[Theorem 3.2]{Booker} can be replaced by the known bounds toward it that are recorded in \eqref{LRS-bound} above. Second, the bounds on the gamma factors in terms of the analytic conductor are proven in detail in \cite{Moreno}. Finally, we recall that if $L(s,\Pi_1 \times \Pi_2)$ has a pole in the half-plane $\mathrm{Re}(s)>0$ then it is located at $s=1$ and is of order equal to $-\dim \mathrm{Hom}_{I}(\Pi_1,\Pi_2)$ by a result of Jacquet, Piatetskii-Shapiro and Shalika \cite[Theorem 4.2]{Cog1}; this accounts for the main term in the expression above. \end{proof} We now prepare for the proof of Proposition \ref{prop-testsums}. If $w$ is a finite place of $E$ and $\Pi_w$ is an unramified representation of $\mathrm{GL}_n(E_w)$ we denote by $A(\Pi_w)$ the Langlands class of $\Pi_w$; it is a semisimple conjugacy class in $({}^L\mathrm{GL}_{nE})^{\circ}=\mathrm{GL}_n(\textf{C}C)$, the neutral component of ${}^L\mathrm{GL}_{nE}$. For $\mathrm{Re}(s)>1$ we have that \mathbf Egin{align} \label{RS-descr} L(s,(\Pi_1 \times \Pi_2)^{S_0}) = {\sf p}rod_{w \not \in S_0} \mathbb{S}um_{n \mathfrak{g}eq 1}\mathrm{tr}(\mathrm{Sym}^n(A(\Pi_{1w}) \otimes A(\Pi_{2w})))q_w^{-ns} \end{align} and the sum on the right converges absolutely \cite[Theorem 5.3 and proof of Proposition 2.3]{JS}. \mathbf Egin{proof}[Proof of Proposition \ref{prop-testsums}] Let $v'$ be a place of $F'$ where ${\sf p}i'$ is unramified and let $a_1,\cdots,a_n$ be the Satake parameters of ${\sf p}i'_v$ (i.e. the eigenvalues of $A({\sf p}i'_{v'})$). We recall that $$ \mathrm{tr}({\sf p}i'_{v'})(h)=\mathcal{S}(h)(a_1,\dots,a_n) $$ for all $h \in C_c^{\infty}(\mathrm{GL}_n(F'_{v'})//\mathrm{GL}_n(\mathcal{O}_{F_{v'}'}))$ \cite[Theorem 7.5.6]{Laumon}. This together with Proposition \ref{Perron-prop}, \eqref{RS-descr}, and the description of unramified base change recalled in \S \ref{ssec-bcformal} implies the proposition. \end{proof} \mathbb{S}ubsection{Proof of Corollary \ref{cor-aut-trace}} \label{ssec-cor-trace} In this subsection our ultimate goal is to prove Corollary \ref{cor-aut-trace}. In order to prove this corollary we first establish the following two lemmas: \mathbf Egin{lem} \label{lem-cond} Let $v'$ be an infinite place of $F'$, let $h_{v'} \in C_c^{\infty}(\mathrm{GL}_n(F_{v'}'))$ and let $N \in \mathbb{Z}$. If $A$ is a countable set of inequivalent irreducible generic admissible representations of $\mathrm{GL}_n(F_{v'}')$, then for ${\sf p}i'_{v'} \in A$ $$ \mathrm{tr}({\sf p}i'_{v'})(h_{v'})C_{{\sf p}i'_{v'}}(0)^N \to 0 \textrm{ as } C_{{\sf p}i'_{v'}}(0) \to \infty. $$ \end{lem} \mathbf Egin{lem} \label{lem-Casimir} Fix a positive integer $n$. There is an integer $N>0$ depending on $n$ and a polynomial $P$ of degree $N$ in $n$ variables such that the Casimir eigenvalue of an irreducible generic admissible representation ${\sf p}i_v$ of $\mathrm{GL}_n(F_v)$ is bounded by $|P(|\mu_{1{\sf p}i_v}|,\dots,|\mu_{n{\sf p}i_v}|)|$. \end{lem} Thus the trace and Casimir eigenvalue of the ${\sf p}i_v$ are controlled by the analytic conductor. This is certainly well-known, but the author was unable to locate these results in the literature. Moreover, the proof of Lemma \ref{lem-cond} is more interesting than one would expect. We begin by recalling some notions that will allow us to use decent. Let $v$ be an archimedian place of $F$; we fix an embedding $\mathrm{R}R \hookrightarrow F_v$ (which is an isomorphism if $v$ is real). Let $\mathfrak{h} \leq \mathrm{R}_{F_v/\mathrm{R}R}\mathfrak{gl}_n$ be the Cartan subalgebra of diagonal matrices. For a Lie algebra $\mathfrak{g}$ over $\mathrm{R}R$, write $\mathfrak{g}_{\textf{C}C}:=\mathfrak{g} \otimes_{\mathrm{R}R} \textf{C}C$. Without loss of generality we assume that the set of positive roots of $\mathfrak{h}_{\textf{C}C}$ inside $\mathrm{R}_{F_v/\mathrm{R}R}\mathfrak{gl}_{n\textf{C}C}$ is defined using the Borel subgroup $B \leq \mathrm{GL}_n$ of \textbf{lower} triangular matrices (this is to be consistent with \cite{JacquetRS}). Thus standard parabolic subgroups are parabolic subgroups containing $B$. If $Q=MN$ is the Levi decomposition of a standard parabolic subgroup then \mathbf Egin{align} \label{Levi-decomp} M ={\sf p}rod_jM_j \cong {\sf p}rod_{j} \mathrm{R}_{F_v/\mathrm{R}R}\mathrm{GL}_{n_j} \end{align} where $M_j \cong \mathrm{R}_{F_v/\mathrm{R}R}\mathrm{GL}_{n_j}$ and $\mathbb{S}um_jn_j=n$. If $Q$ is cuspidal then $n_j \in \{1,2\}$ if $v$ is real and all $n_j=1$ if $v$ is complex. We let $\mathfrak{m}:=\mathrm{Lie}(M)$ and $\mathfrak{m}_j:=\mathrm{Lie}(M_j)$. Moreover we let $\mathfrak{h}_j:=\mathfrak{h} \cap \mathfrak{m}_j$; thus $\mathfrak{h}_j$ is isomorphic to the Cartan subalgebra of diagonal matrices in $\mathrm{R}_{F_v/\mathrm{R}R}\mathrm{GL}_{n_j}$. Let ${\sf p}i_v$ be an irreducible admissible representation of $\mathrm{GL}_n(F_v)$. Thus there is a cuspidal standard parabolic subgroup $Q \leq \mathrm{Res}_{F_v/\mathrm{R}R}\mathrm{GL}_n$ with Levi decomposition $Q=MN$ and an irreducible admissible representation ${\sf p}i_M$ of $M(F_v)$ such that $$ {\sf p}i_v \cong J({\sf p}i_M) $$ where $J({\sf p}i_M)$ is the Langlands quotient of the induced representation $\mathrm{Ind}_{Q(F_v)}^{\mathrm{GL}_n(F_v)}({\sf p}i_M)$ \cite[Theorem 14.92]{Knapp}. Moreover ${\sf p}i_M$ can be taken to be a twist of a discrete series or limit of discrete series. Here we are viewing ${\sf p}i_M$ as a representation of $Q(F_v)$ by letting it act trivially on $N(F_v)$. If ${\sf p}i_v$ is generic, then $\mathrm{Ind}_{Q(F_v)}^{\mathrm{GL}_n(F_v)}({\sf p}i_M)$ is irreducible and hence $$ {\sf p}i_v \cong J({\sf p}i_M) \cong \mathrm{Ind}_{Q(F_v)}^{\mathrm{GL}_n(F_v)}({\sf p}i_M) $$ (see the comments after \cite[Lemma 2.5]{JacquetRS} or \cite{Vogan}). We decompose $$ {\sf p}i_M \cong \otimes_j {\sf p}i_j $$ where each ${\sf p}i_j$ is an admissible irreducible representation of $M_j(F_v)$. We note that, essentially by definition, \mathbf Egin{align} \label{L-prod} L(s,{\sf p}i_v)={\sf p}rod_jL(s,{\sf p}i_j)={\sf p}rod_j{\sf p}rod_{i=1}^{n_j}\Gamma_v(s+\mu_{i{\sf p}i_{j}}) \end{align} (compare \cite[Appendix]{JacquetRS}). We are now in a position to prove Lemma \ref{lem-cond}: \mathbf Egin{proof}[Proof of Lemma \ref{lem-cond}] Let $f \in C_c^{\infty}(\mathrm{GL}_n(F_v))$ and let $f^{(Q)} \in C_c^{\infty}(M(F_v))$ be the constant term of $f$ along $Q$ (see \cite[(10.22)]{Knapp} for notation). Using the natural isomorphism $C_c^{\infty}(M(F_v))={\sf p}rod_{j}C_c^{\infty}(M_j(F_v))$ we decompose $f^{(Q)}={\sf p}rod_jf_j$. One then has \mathbf Egin{align} \label{const} \mathrm{tr}({\sf p}i_v)(f)=\mathrm{tr}({\sf p}i_M)(f^{(Q)})={\sf p}rod_j\mathrm{tr}({\sf p}i_j)(f_j) \end{align} (see \cite[(10.23)]{Knapp}). Combining \eqref{const} and \eqref{L-prod}, we see that the lemma will follow if we establish it in the special cases $n \in \{1,2\}$ for $v$ real and $n=1$ for $v$ complex. Moreover when $n=2$ we can assume that ${\sf p}i_v$ is a twist by a quasi-character of a discrete series or limit of discrete series representation. We henceforth place ourselves in this situation. Assume for the moment that $n=1$ and $v$ is real. Then ${\sf p}i_v$ is a quasi-character of $\mathrm{R}R^{\times}$ and hence it is of the form $$ {\sf p}i_v(t)=|t|_v^{u}\mathrm{sgn}^{k}(t) $$ for $t \in \mathrm{R}R^{\times}$ and some $u \in \textf{C}C$ and $k \in \{0,1\}$. In this case we have $\mu_{1{\sf p}i_v}=u+k$ by \cite[Appendix]{JacquetRS}. Similarly, if $n=1$ and $v$ is complex, then ${\sf p}i_v$ is a quasi-character of $\textf{C}C^{\times}$ and hence is of the form $$ {\sf p}i_v(z):=z^{m}(z\overline{z})^{-\tfrac{m}{2}}|z|_v^{u} $$ for $z \in \textf{C}C^{\times}$ and some $m \in \mathbb{Z}$ and $u \in \textf{C}C$. In this case we have $\mu_{1{\sf p}i_v}=\tfrac{m}{2}+u$ by \cite[Appendix]{JacquetRS}. In either case, as a function of $\mu_{1{\sf p}i_v}$ the trace $\mathrm{tr}({\sf p}i_v)(f)$ is easily seen to be rapidly decreasing since the Fourier transform of a compactly supported smooth function on $\mathrm{R}R^{\times}$ or $\textf{C}C^{\times}$ is rapidly decreasing. The lemma follows in these cases. We are left with the case where $n=2$ and $v$ is real; thus ${\sf p}i_v$ is a twist of a discrete series or nondegenerate limit of discrete series representation by a quasi-character. For $m \in \mathbb{Z}$ let $$ \Omega_{m}(z):=z^{m}(z\overline{z})^{-\tfrac{m}{2}} {\sf q}uad \textrm{ and } {\sf q}uad \mathbb{S}igma_m:=\mathrm{Ind}_{\textf{C}C^{\times}}^{W_{F_v}}(\Omega_m). $$ The $L$-parameter $\varphi({\sf p}i_v):W_{F_v} \to {}^L\mathrm{GL}_{2F_v}$ attached to ${\sf p}i_v$ is of the form $\mathbb{S}igma_m \otimes \chi$ for some $m \in \mathbb{Z}_{\mathfrak{g}eq 0}$ and some one-dimensional representation $\chi:W_{F_v} \to \mathrm{R}R^{\times}$. The discrete series (or limit of discrete series) representation ${\sf p}i(\mathbb{S}igma_m)$ will be denoted by $D_{m+1}$; it is in the discrete series if and only if $m>0$ (see \cite[Appendix]{JacquetRS}). The representation $D_{m+1}$ is usually referred to as the discrete series of weight $m+1$ if $m>0$ and the limit of discrete series if $m=0$. Recall that any one-dimensional representation of $W_{F_v}$ can be regarded (canonically) as a character $\mathrm{R}R^{\times} \to \mathrm{R}R^{\times}$; this applies in particular to $\chi$. We note that $$ \mathbb{S}igma_m \otimes \mathrm{sgn} \cong \mathbb{S}igma_m $$ since $\mathrm{sgn}$ can be identified with the nontrivial character of $W_{\mathrm{R}R}/W_{\textf{C}C}$ by class field theory. Since ${\sf p}i$ is assumed to be unitary, we assume without loss of generality that $\chi=|\cdot|_v^{it}$ for some real number $t$. With this in mind, the duplication formula implies that we may take $\mu_{1{\sf p}i_v}=\tfrac{m}{2}+it$ and $\mu_{2{\sf p}i_v}=\tfrac{m}{2}+1+it$ (compare \cite[Appendix]{JacquetRS}). We compute the trace $\mathrm{tr}({\sf p}i_v)(f)$ for $f \in C_c^{\infty}(\mathrm{GL}_2(F_v))=C_c^{\infty}(\mathrm{GL}_2(\mathrm{R}R))$. First, define \mathbf Egin{align*} f_t:\mathrm{SL}_2(\mathrm{R}R) &\longrightarrow \textf{C}C\\ g &\longmapsto \int_{\mathrm{R}R^{\times}}|z|_v^{it}\int_{\mathrm{SO}_2(\mathrm{R}R)}f(k^{-1}zgk)dzdk \end{align*} where we normalize the measure $dk$ so that $\mathrm{meas}_{dk}(\mathrm{SO}_2(\mathrm{R}R))=1$ and $dz$ is some choice of Haar measure. Thus $f_t \in C_c^{\infty}(\mathrm{SL}_2(\mathrm{R}R))$. One has (with appropriate choices of measures) \mathbf Egin{align*} \mathrm{tr}({\sf p}i_v)(f)=\int_{\mathrm{SL}_2(\mathrm{R}R)} \Theta_{m+1}(g)f_t(g)dg=:\Theta_{m+1}(f_t) \end{align*} where $dg$ is the Haar measure on $\mathrm{SL}_2(\mathrm{R}R)$ giving $\mathrm{SO}_2(\mathrm{R}R)$ measure one and $\Theta_{m+1}$ is the character of $D_{m+1}|_{\mathrm{SL}_2(\mathrm{R}R)}$. By Fourier theory on $C_{\mathrm{GL}_{2}}(\mathrm{R}R) \cong \mathrm{R}R^{\times}$, one sees that to prove the lemma it suffices to prove that as $m \to \infty$ $$ |\Theta_{m+1}(f_t)| |m|^N \to 0 $$ for all $N \in \mathbb{Z}$. For this it suffices to show that for all $f \in C_c^{\infty}(\mathrm{SL}_2(\mathrm{R}R))$ such that $f(kxk^{-1})=f(x)$ one has \mathbf Egin{align} \label{to-show-induct} |\Theta_{m+1}(f)||m|^N \to 0 \end{align} for all $N \in \mathbb{Z}$. This is what we will show. Let $$ M(F_v)=\left\{ a_t: a_t=\mathbf Egin{pmatrix}e^t & 0 \\ 0 & e^{-t} \end{pmatrix},t \in \mathrm{R}R\right\} $$ and $$ T(F_v)=\left\{k_{\theta}: k_{\theta}=\mathbf Egin{pmatrix} \cos \theta & \mathbb{S}in \theta\\ -\mathbb{S}in \theta & \cos \theta \end{pmatrix}, 0 < \theta \leq 2{\sf p}i\right\} $$ By \cite[(11.37)]{Knapp}\footnote{Knapp denotes $M$ by $T$ and $T$ by $B$.} and the discussion below it, for $f \in C_c^{\infty}(\mathrm{SL}_2(\mathrm{R}R))$ satisfying $f(kxk^{-1})=f(x)$ for $k \in \mathrm{SL}_2(\mathrm{R}R)$ we have \mathbf Egin{align} \label{first-formula} m\Theta_{m+1}(f)=&-\frac{1}{2{\sf p}i i} \int_{0}^{2 {\sf p}i}(e^{im\theta}+e^{-im\theta})\frac{d}{d\theta}F^T_f(\theta)d\theta\\& \nonumber +\frac{1}{2}\int_{-\infty}^{\infty} e^{-m|t|}(\mathrm{sgn}(t)) \frac{d}{dt} F^M_f(a_t)dt\\ \nonumber &+\frac{1}{2}(-1)^m\int_{-\infty}^{\infty}e^{-m|t|}(\mathrm{sgn}(t))\frac{d}{dt}F_f^M(-a_t)dt \end{align} for $m>0$. The $m=0$ term is unimportant for our purposes as we are interested in the behavior as $m \to \infty$. Here $dt$ and $d\theta$ are the usual Lesbesgue measures and \mathbf Egin{align*} F^T_f(k_{\theta})&=(e^{i\theta}-e^{-i\theta})\int_{\mathrm{GL}_2(F_v)/T(F_v)}f(xk_{\theta}x^{-1})d \dot{x}\\ F^M_f({\sf p}m a_t)&={\sf p}m|e^t-e^{-t}|\int_{\mathrm{GL}_2(F_v)/M(F_v)} f(xa_tx^{-1}) d\dot{x} \end{align*} for suitably chosen Haar measures (that are independent of ${\sf p}i_v$) \cite[(10.9a-b)]{Knapp}. We note that the functions $F^M_f$ are smooth \cite[Proposition 11.8]{Knapp} and for integers $k \mathfrak{g}eq 0$ the odd-order derivative $\frac{d^{2k+1}}{dt^{2k+1}}F^T_f(\theta)$ is continuous (see the remarks after \cite[Proposition 11.9]{Knapp}). Moreover $F^M_f(a_t)$ vanishes outside a bounded subset of $M(\mathrm{R}R)$ \cite[Proposition 11.7]{Knapp}. We claim that for $m>0$ and $k \mathfrak{g}eq 1$ one has {\allowdisplaybreaks \mathbf Egin{align} \label{claim-induct} m^{2k+1}\Theta_{m+1}(f)=&-\frac{1}{2{\sf p}i i^{2k+1}} \int_{0}^{2 {\sf p}i}(e^{im \theta}+e^{-im \theta})\frac{d^{2k+1}}{d\theta^{2k+1}}F_f^T(\theta) d\theta\\&+\frac{1}{2}\int_{-\infty}^{\infty}e^{-m|t|}(\mathrm{sgn}(t))\frac{d^{2k+1}}{dt^{2k+1}}F^M_f(a_t)dt \nonumber \\ &+\frac{1}{2}(-1)^m\int_{-\infty}^{\infty}e^{-m|t|} (\mathrm{sgn}(t))\frac{d^{2k+1}}{dt^{2k+1}}F^M_f(-a_t)dt. \nonumber \end{align}} Assuming \eqref{claim-induct}, an application of the Riemann-Lesbesgue lemma implies \eqref{to-show-induct} which in turn implies the lemma. Thus proving \eqref{claim-induct} will complete the proof of the lemma. Proceeding by induction, assume \eqref{claim-induct} is true for $k-1>0$. Applying integration by parts we obtain that \eqref{claim-induct} is equal to {\allowdisplaybreaks\mathbf Egin{align*} &-(mi)^{-1}\left(-\frac{1}{2 {\sf p}i i^{2k-1}}\right)\int_{0}^{2 {\sf p}i}( e^{im\theta}-e^{-im\theta})\frac{d^{2k}}{d\theta^{2k}}F_f^T(\theta)d\theta\\ &-\frac{1}{2m}\int_{-\infty}^{\infty} e^{-m|t|}(\mathrm{sgn}(t)) \frac{d^{2k}}{dt^{2k}} F^M_f(a_t)dt\\ &-\frac{1}{2m}(-1)^m\int_{-\infty}^{\infty}e^{-m|t|}(\mathrm{sgn}(t))\frac{d^{2k}}{dt^{2k}}F_f^M(-a_t)dt\\ &+-\frac{1}{-2m}e^{-m|0^+|}\left( \frac{d^{2k-1}}{dt^{2k-1}}F_f^M(a_0^+) +(-1)^m\frac{d^{2k-1}}{dt^{2k-1}}F_f^M(-a_0^+)\right)\\&+-\frac{1}{-2m}e^{-m|0^-|}\left( \frac{d^{2k-1}}{dt^{2k-1}}F_f^M(a_0^-) +(-1)^m\frac{d^{2k-1}}{dt^{2k-1}}F_f^M(-a_0^-)\right)\\ &=-m^{-1}\left(-\frac{1}{2 {\sf p}i i^{2k}}\right)\int_{0}^{2 {\sf p}i}( e^{im\theta}-e^{-im\theta})\frac{d^{2k}}{d\theta^{2k}}F_f^T(\theta)d\theta\\ &-\frac{1}{2m}\int_{-\infty}^{\infty} e^{-m|t|}(\mathrm{sgn}(t)) \frac{d^{2k}}{dt^{2k}} F^M_f(a_t)dt\\ &-\frac{1}{2m}(-1)^m\int_{-\infty}^{\infty}e^{-m|t|}(\mathrm{sgn}(t))\frac{d^{2k}}{dt^{2k}}F_f^M(-a_t)dt \\&+\frac{1}{m}\left( \frac{d^{2k-1}}{dt^{2k-1}}F_f^M(a_0)+(-1)^m\frac{d^{2k-1}}{dt^{2k-1}}F_f^M(-a_0)\right) \end{align*}} \noindent where the ${\sf p}m$ denote values as $t \to 0^{{\sf p}m}$ (this is purely for emphasis, as $F_f^M$ is smooth). We note that the extra terms occur because of the singularity of the sign function $\mathrm{sgn}(t)$ at $t=0$. Since $F_f^M({\sf p}m a_t)$ is even as a function of $t$, the last terms in the expression above vanish. Thus the quantity above is equal to \mathbf Egin{align*} &-m^{-1}\left(-\frac{1}{2 {\sf p}i i^{2k}}\right)\int_{0}^{2 {\sf p}i}( e^{im\theta}-e^{-im\theta})\frac{d^{2k}}{d\theta^{2k}}F_f^T(\theta)d\theta\\ &-\frac{1}{2m}\int_{-\infty}^{\infty} e^{-m|t|}(\mathrm{sgn}(t)) \frac{d^{2k}}{dt^{2k}} F^M_f(a_t)dt\\ &-\frac{1}{2m}(-1)^m\int_{-\infty}^{\infty}e^{-m|t|}(\mathrm{sgn}(t))\frac{d^{2k}}{dt^{2k}}F_f^M(-a_t)dt. \end{align*} Keeping in mind that $\frac{d^{2k}}{d\theta^{2k}}F_f^T(\theta)$ has jump discontinuities at $0$ and ${\sf p}i$ (see the remark after \cite[Proposition 11.9]{Knapp}), we now apply integration by parts again to see that this expression is equal to \mathbf Egin{align*} &m^{-2}\left(-\frac{1}{2 {\sf p}i i^{2k+1}}\right)\int_{0}^{2 {\sf p}i}( e^{im\theta}+e^{-im\theta})\frac{d^{2k+1}}{d\theta^{2k+1}}F_f^T(\theta)d\theta\\ &-m^{-2}\left(-\frac{1}{ 2{\sf p}i i^{2k+1}}\right)(2)\left( \frac{d^{2k}}{d\theta^{2k}}F_f^T(0^-)-(-1)^m\frac{d^{2k}}{d\theta^{2k}}F_f^T({\sf p}i^+)+ (-1)^m\frac{d^{2k}}{d\theta^{2k}}F_f^T({\sf p}i^-)-\frac{d^{2k}}{d\theta^{2k}}F_f^T(0^+)\right)\\ &+\frac{1}{2m^2}\int_{-\infty}^{\infty} e^{-m|t|}(\mathrm{sgn}(t)) \frac{d^{2k+1}}{dt^{2k+1}} F^M_f(a_t)dt\\ &+\frac{1}{2m^2}(-1)^m\int_{-\infty}^{\infty}e^{-m|t|}(\mathrm{sgn}(t))\frac{d^{2k+1}}{dt^{2k+1}}F_f^M(-a_t)dt\\ &+\frac{1}{m^2}\frac{d^{2k}}{dt^{2k}}F_f^T(a_0)+\frac{1}{m^2}(-1)^m\frac{d^{2k}}{dt^{2k}}F_f^T(-a_0). \end{align*} The second and last lines of the expression above cancel by the jump relations \cite[(11.45a), (11.45b)]{Knapp}. Thus the above is equal to \mathbf Egin{align*} &m^{-2}\left(-\frac{1}{2 {\sf p}i i^{2k+1}}\right)\int_{0}^{2 {\sf p}i}( e^{im\theta}+e^{-im\theta})\frac{d^{2k+1}}{d\theta^{2k+1}}F_f^T(\theta)d\theta\\ &+\frac{1}{2m^2}\int_{-\infty}^{\infty} e^{-m|t|}(\mathrm{sgn}(t)) \frac{d^{2k+1}}{dt^{2k+1}} F^M_f(a_t)dt\\ &+\frac{1}{2m^2}(-1)^m\int_{-\infty}^{\infty}e^{-m|t|}(\mathrm{sgn}(t))\frac{d^{2k+1}}{dt^{2k+1}}F_f^M(-a_t)dt \end{align*} which completes the induction, proving \eqref{claim-induct} and hence the lemma. \end{proof} \mathbf Egin{rem} The jump relations which appear in this proof play a role in Langlands' adelization of the trace formula and his hope that it will be amenable to Poisson summation \cite{FLN} \cite{LSing}. \end{rem} For the proof of Lemma \ref{lem-Casimir}, it is convenient to summarize some of the information obtained in the previous lemma in the following table: \mathbf Egin{center} \mathbf Egin{tabular}{ l | c |c } ${\sf p}i_{v}$& $v$ & $(\mu_{i{\sf p}i_{v}})$\\ \hline $t \mapsto \mathrm{sgn}(t)^k|t|_v^{u}$ & real & $k+u$\\ $D_{m+1} \otimes |\cdot|_v^u$ & real &$(m/2+u,m/2+1+u)$\\ $z \mapsto z^{m}(z\overline{z})^{-\tfrac{m}{2}}|z|_v^{u}$ & complex & $\tfrac{m}{2}+u$ \end{tabular} \end{center} We now prove Lemma \ref{lem-Casimir}: \mathbf Egin{proof}[Proof of Lemma \ref{lem-Casimir}] The Harish-Chandra isomorphism \cite[\S VIII.5]{Knapp} factors as $$ \mathfrak{g}amma:Z(\mathrm{R}_{F_v/\mathrm{R}R}\mathfrak{gl}_{n \textf{C}C}) \tilde{\longrightarrow} Z(\mathfrak{m}_{\textf{C}C}) \tilde{\longrightarrow} U(\mathfrak{h}_{\textf{C}C}) $$ where the second map is the Harish-Chandra isomorphism $$ \mathfrak{g}amma_M:Z(\mathfrak{m}_{\textf{C}C}) \tilde{\longrightarrow} U(\mathfrak{h}_{\textf{C}C}). $$ We also have Harish-Chandra isomorphisms $$ \mathfrak{g}amma_j:Z(\mathfrak{m}_{j\textf{C}C}) \tilde{\longrightarrow} U(\mathfrak{h}_{j\textf{C}C}). $$ The infinitisimal character of ${\sf p}i_v$ (resp.~${\sf p}i_M$) is of the form $\Lambda({\sf p}i_v) \circ \mathfrak{g}amma$ (resp.~$\Lambda({\sf p}i_{M}) \circ \mathfrak{g}amma_M$) for some $\Lambda({\sf p}i_v)$ (resp.~$\Lambda({\sf p}i_M)$) in $ \mathfrak{h}_{\textf{C}C}^{\widetilde{\varepsilon}dge}$ \cite[\S VIII.6]{Knapp}. Similarly the infinitisimal character of ${\sf p}i_j$ is of the form $\Lambda({\sf p}i_j) \circ \mathfrak{g}amma_j$ for some $\Lambda({\sf p}i_j) \in \mathfrak{h}_j^{\widetilde{\varepsilon}dge}$. Moreover \mathbf Egin{align} \label{factor-Lambda} \Lambda({\sf p}i_v)=\Lambda({\sf p}i_M)=\mathbb{S}um_j \Lambda({\sf p}i_j) \end{align} \cite[Proposition 8.22]{Knapp}. If $C \in Z(\mathrm{R}_{F_v/\mathrm{R}R}\mathfrak{gl}_{n\textf{C}C})$ is the Casimir operator, the eigenvalue of $C$ acting on the space of ${\sf p}i_v$ is $\Lambda({\sf p}i_v)( \mathfrak{g}amma(C))$. For each $j$ let $\{\Lambda_{j,\alpha}\} \mathbb{S}ubset \mathfrak{h}_{j\textf{C}C}^{\widetilde{\varepsilon}dge}$ be a basis, and write $$ \Lambda({\sf p}i_v):=\mathbb{S}um_{j} \mathbb{S}um_{\alpha} a_{j,\alpha}({\sf p}i_j) \Lambda_{j,\alpha} $$ for some $a_{j,\alpha}({\sf p}i_v) \in \textf{C}C$. We note that $\mathfrak{g}amma(C)$ does not depend on ${\sf p}i_v$. Therefore in order to prove the lemma it suffices to exhibit a basis as above such that the $a_{j,\alpha}({\sf p}i_j)$ are bounded by a polynomial in the $|\mu_{i{\sf p}i_v}|$ for $1 \leq i \leq n$. In view of \eqref{L-prod} and \eqref{factor-Lambda} it follows that in order to prove the lemma it suffices to prove this statement in the special case where $n \in \{1,2\}$ for $v$ real and the special case $n=1$ for $v$ complex. In the $n=1$ cases this comes down to unraveling definitions. For $n=2$ we can assume that ${\sf p}i_v$ is a twist by a quasi-character of a discrete series or limit of discrete series representation. In this case we refer to \cite[Chapter VIII, \S 16, Problems 1]{Knapp}. \end{proof} We now prove Corollary \ref{cor-aut-trace}: \mathbf Egin{proof}[Proof of Corollary \ref{cor-aut-trace}] In view of Proposition \ref{prop-testsums} in order to prove the corollary it suffices to show that the contribution of the terms in Proposition \ref{prop-testsums} that depend on the automorphic representation does not grow too fast when we sum over all automorphic representations of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})$ fixed by a given compact open subgroup of $\mathrm{GL}_n(\mathbb{A}_{F'}^{\infty})$. More precisely, it suffices to show that \mathbf Egin{align} \label{obound} \mathbb{S}um_{{\sf p}i'}\mathrm{tr}({\sf p}i')(h)(C_{{\sf p}i'_E \times {\sf p}i'^{\tau\varepsilone}_E}(0)^{\tfrac{1}{2}+\varepsilon})=o(X) \end{align} where the sum is over any subset of the set of equivalence classes of cuspidal automorphic representations ${\sf p}i'$ of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_{n}(\mathbb{A}_{F'})$ and ${\sf p}i_E'$ is the base change of ${\sf p}i'$ to $\mathrm{GL}_n(\mathbb{A}_E)$. The basic properties of cyclic base change (i.e. the relationship between the $L$-function of an admissible representation and its base change) together with the recollections on local $L$-factors collected in \S \ref{ssec-analytic-cond} above imply that \mathbf Egin{align*} C_{{\sf p}i'_{E\infty} \times {\sf p}i'^{\varepsilone}_{E\infty}}(s) \leq C_{{\sf p}i'_{\infty}}(s)^N \end{align*} for sufficiently large $N \mathfrak{g}eq 0$ depending on $E/F'$ and $n$. Using Lemma \ref{lem-cond} and the Weyl law for cusp forms \cite[Theorem 9.1]{Donnely}, we see that in order to prove \eqref{obound} it suffices to show that the Casimir eigenvalues (and hence the Laplacian eigenvalues) of a cuspidal automorphic representation ${\sf p}i'$ contributing to \eqref{obound} are a polynomial function of the absolute value of the parameters $\mu_{i{\sf p}i'_{v'}}$ for archimedian $v'$. This is the content of Lemma \ref{lem-Casimir}. \end{proof} \mathbb{S}ection{Restriction and descent of $L$-parameters}\label{sec-rest-desc} The goal of this section is to prove some properties of $L$-parameters under restriction along an extension of number fields and then formulate the conjectures in automorphic representation theory to which these properties correspond. Criteria for parameters to descend are given in \S \ref{ssec-desc}. In \S \ref{ssec-primitive} we define a notion of $E$-primitive parameters and automorphic representations and then use it in \S \ref{ssec-rest-param} to give an explicit description of the fibers and image of the restriction map (see Proposition \ref{prop-bij-EF'}). In \S \ref{ssec-icosa-gp} a complement to Proposition \ref{prop-bij-EF'}, namely Proposition \ref{prop-A5-EF}, is given. More specifically, Proposition \ref{prop-A5-EF} deals with the case of field extensions with Galois group isomorphic to $\widetilde{A}_5$. Propositions \ref{prop-bij-EF'} and \ref{prop-A5-EF} are meant as motivation for conjectures \ref{conj-1} and \ref{conj-2} below respectively; these are the conjectures that appear in the statements of our first two main theorems. Finally, in \S \ref{ssec-artin-conj} we prove lemmas on restriction of $L$-parameters along subfields of a $\widetilde{A}_5$-extension that motivate conjectures \ref{conj-32} and \ref{conj-33}, the conjectures assumed in Theorem \ref{main-thm-3}. \mathbb{S}ubsection{Parameters and base change} \label{ssec-param-bc} In this subsection we recall the base change map (or restriction map) on $L$-parameters which conjecturally corresponds to base change of automorphic representations of $\mathrm{GL}_n$. Let $E/F$ be an extension of number fields, let $W_E$ (resp.~$W_F$) denote the Weil group of $E$ (resp.~$F$) and let $$ W_E'=W_E \times \mathrm{SU}(2) $$ (resp.~$W_F':=W_F \times \mathrm{SU}(2)$) denote the Weil-Deligne group of $E$ (resp.~$F$), where $\mathrm{SU}(2)$ is the real compact Lie group of unitary matrices of determinant one\footnote{There are competing definitions of representations of the Weil-Deligne group that are all equivalent, see \cite[\S 2.1]{GR}. To pass from $\mathrm{SL}_2(\textf{C}C)$ to $\mathrm{SU}(2)$ one uses the unitary trick.}. We will be using the notion of an $L$-parameter $$ \varphi:W_F' \longrightarrow{}^L\mathrm{GL}_{nF}=W_{F}' \times \mathrm{GL}_n(\textf{C}C) $$ extensively\footnote{$L$-parameters are defined in \cite[\S 8]{Borel} where they are called ``admissible homomorphisms.''}. Part of the definition of an $L$-parameter is the stipulation that the induced map $$ W_{F}' \longrightarrow W_F' $$ given by projection onto the first factor of ${}^L\mathrm{GL}_{nF}=W_{F}' \times \mathrm{GL}_n(\textf{C}C)$ is the identity. Thus $\varphi$ is determined by the representation $W_{F}' \to\mathrm{GL}_n(\textf{C}C)$ defined by projection onto the second factor of ${}^L\mathrm{GL}_{nF}$: \mathbf Egin{align} \label{proj-second} \mathbf Egin{CD} W_F' @>{{\sf p}hi}>> {}^L\mathrm{GL}_{nF}@>>> {}^L\mathrm{GL}_{nF}^{\circ}=\mathrm{GL}_n(\textf{C}C). \end{CD} \end{align} Thus one can safely think of $L$-parameters as representations $W_{F}' \to \mathrm{GL}_n(\textf{C}C)$ satisfying certain additional properties. We say that an $L$-parameter ${\sf p}hi:W_F' \to {}^L\mathrm{GL}_{n}$ is irreducible if the representation \eqref{proj-second} is irreducible. For convenience, we denote by \mathbf Egin{align} \label{L-param} \Phi_n(F):&=\{\textrm{Equivalence classes of }L\textrm{-parameters }\varphi:W_{F}' \to {}^L\mathrm{GL}_{nF}\}\\ \nonumber \Phi_n^0(F):&=\{\textrm{Equivalence classes of irreducible }L\textrm{-parameters }\varphi:W_{F}' \to {}^L\mathrm{GL}_{nF}\}. \end{align} If $E/F$ is Galois then there is a natural action of $\mathrm{Gal}(E/F)$ on the set of $L$-parameters from $W_E'$ given by $$ {\sf p}hi^{\mathbb{S}igma}(g)={\sf p}hi(\mathbb{S}igma^{-1}g\mathbb{S}igma). $$ This induces an action of $\mathrm{Gal}(E/F)$ on $\Phi_n(E)$ which preserves $\Phi_n^0(E)$; we denote the invariants under this action by $\Phi_n(E)^{\mathrm{Gal}(E/F)}$ (resp.~$\Phi_n^0(E)^{\mathrm{Gal}(E/F)}$). As noted above in \S \ref{ssec-bcformal}, we have a base change $L$-map $$ b_{E/F}:{}^L\mathrm{GL}_{nF} \longrightarrow {}^L\mathrm{R}_{E/F}\mathrm{GL}_{nF} $$ given by the diagonal embedding on connected components and the identity on the $W_F'$-factors. For each $L$-parameter $\varphi:W_F' \to {}^L\mathrm{GL}_{nF}$ the composition $b_{E/F} \circ \varphi :W_F' \to {}^L\mathrm{GL}_{nF}$ is another $L$-parameter (compare \cite[\S 15.3]{Borel}). One can view this construction in an equivalent manner as follows: an $L$-parameter ${\sf p}hi:W_F' \to {}^L\mathrm{R}_{E/F}\mathrm{GL}_{nF}$ can be identified canonically with an $L$-parameter ${\sf p}hi:W_E' \to {}^L\mathrm{GL}_{nE}$. From this viewpoint, the base change map simply associates to a parameter $\varphi:W_F' \to {}^L\mathrm{GL}_{nF}$ its restriction $b_{E/F}\circ \varphi:=\varphi|_{W_E'}$ (compare \cite[\S 15.3]{Borel}). Thus $b_{E/F}$ induces a map \mathbf Egin{align} \label{L-restr} b_{E/F}:\Phi_n^0(F) &\longrightarrow \Phi_n(E)\\ \varphi &\longmapsto \varphi|_{W_E'} \nonumber \end{align} which has image in $\Phi_n(E)^{\mathrm{Gal}(E/F)}$ if $E/F$ is Galois. According to Langlands functoriality, there should be a corresponding transfer of $L$-packets of automorphic representations. In fact, since $L$-packets are singletons in the case at hand, we should obtain an honest map from the set of equivalence classes of automorphic representations of $\mathrm{GL}_n(\mathbb{A}_F)$ to the set of equivalence classes of automorphic representations of $\mathrm{R}_{E/F}\mathrm{GL}_n(\mathbb{A}_F)=\mathrm{GL}_n(\mathbb{A}_E)$. Thus we should expect a map \mathbf Egin{align*} b_{E/F}:\Pi_n(F) &\mathbb{S}tackrel{?}{\longrightarrow} \Pi_n(E)\\ {\sf p}i &\mathbb{S}tackrel{?}{\longmapsto} {\sf p}i_E \end{align*} which has image in $\Pi_n(E)^{\mathrm{Gal}(E/F)}$ if $E/F$ is Galois. Moreover this map should share certain properties of \eqref{L-restr}. Making this precise in general seems to require the introduction of the conjectural Langlands group. Rather than take this route, we will simply prove properties of the restriction map on $L$-parameters (specifically propositions \ref{prop-bij-EF'} and \ref{prop-A5-EF} and lemmas \ref{lem-A5-EF} and \ref{lem-A5-EF3}) and then state the specific conjectures in automorphic representation theory (specifically conjectures \ref{conj-1}, \ref{conj-2}, \ref{conj-32} and \ref{conj-33}) that they suggest. \mathbb{S}ubsection{Descent of parameters} \label{ssec-desc} Our goal in this subsection is to prove the following lemma: \mathbf Egin{lem} \label{lem-bc-param} Let $E/F$ be a Galois extension of number fields. If ${\sf p}hi$ is irreducible and ${\sf p}hi^{\mathbb{S}igma} \cong {\sf p}hi$ for all $\mathbb{S}igma \in \mathrm{Gal}(E/F)$, then there is an $L$-parameter $$ \varphi:W_F' \longrightarrow {}^L\mathrm{GL}_{nF} $$ such that $b_{E/F} \circ \varphi \otimes \chi={\sf p}hi$, where $\chi:W_F' \longrightarrow {}^L\mathrm{GL}_{1F}$ is a quasi-character invariant under $\mathrm{Gal}(E/F)$. If $H^2(\mathrm{Gal}(E/F),\textf{C}C^{\times})=0$, then $\chi$ can be taken to be trivial. \end{lem} Before we begin the proof we set a little notation. Let $\varphi$ and ${\sf p}hi$ be $L$-parameters as above. We let \mathbf Egin{align} \label{naughts} \varphi_0:W_F' &\longrightarrow ({}^L\mathrm{GL}_{nF})^{\circ}=\mathrm{GL}_n(\textf{C}C)\\ \nonumber {\sf p}hi_0:W_E' &\longrightarrow ({}^L\mathrm{GL}_{nE})^{\circ}=\mathrm{GL}_n(\textf{C}C) \end{align} be the homomorphisms defined by composing $\varphi$ (resp.~${\sf p}hi$) with the projection ${}^L\mathrm{GL}_{nF} \to ({}^L\mathrm{GL}_{nF})^{\circ}$ (resp. ${}^L\mathrm{GL}_{nE} \to ({}^L\mathrm{GL}_{nE})^{\circ}$). \mathbf Egin{proof} By assumption, for every $\mathbb{S}igma \in \mathrm{Gal}(E/F)$ we are given a $c(\mathbb{S}igma) \in \mathrm{GL}_n(\textf{C}C)$ such that $$ {\sf p}hi_0(\mathbb{S}igma \text{\sffamily{\bf\textsf{z}}}eta \mathbb{S}igma^{-1})=c(\mathbb{S}igma){\sf p}hi_0(\text{\sffamily{\bf\textsf{z}}}eta)c(\mathbb{S}igma)^{-1}. $$ Since ${\sf p}hi_0$ is irreducible, Schur's lemma implies that $c(\mathbb{S}igma)c(\tau)=\lambda(\mathbb{S}igma,\tau)c(\mathbb{S}igma\tau)$ for some $\lambda(\mathbb{S}igma, \tau) \in \textf{C}C^{\times}$. In other words, the projective representation $$ P{\sf p}hi_0:W_E' \longrightarrow ({}^L\mathrm{GL}_{nE})^{\circ} \longrightarrow \mathrm{PGL}_n(\textf{C}C) $$ obtained by composing ${\sf p}hi_0$ with the canonical projection can be extended to a (continuous) projective representation $$ {\sf p}si:W_F'\longrightarrow \mathrm{PGL}_n(\textf{C}C). $$ This extension has the property that ${\sf p}si(w)$ is semisimple for all $w \in W_F$. By \cite[\S 8]{Rajan2}, there is an $L$-parameter $$ \varphi:W_F' \longrightarrow {}^L\mathrm{GL}_{nF} $$ such that $\varphi$ is a lift of ${\sf p}si$. Let $P(b_{E/F}(\varphi)_0)$ denote the composite of $b_{E/F}(\varphi)_0=\varphi_0|_{W_E'}$ with the projection ${}^L\mathrm{GL}_{nE} \to \mathrm{PGL}_n(\textf{C}C)$. We have that $$ P(b_{E/F}(\varphi)_0) \cong P {\sf p}hi_0. $$ It follows that $b_{E/F}(\varphi)_0 \cong {\sf p}hi_0 \otimes \chi$ for some character $\chi:W_E' \to \textf{C}C^{\times}$ invariant under $\mathrm{Gal}(E/F)$. To complete the proof of the lemma, we need to show that if $H^2(\mathrm{Gal}(E/F),\textf{C}C^{\times})=0$, then any character $\chi:W_E' \to \textf{C}C^{\times}={}^L\mathrm{GL}_{1F}$ that is invariant under $\mathrm{Gal}(E/F)$ is the restriction of a character of $W_F'$. Viewing $\textf{C}C^{\times}$ as a trivial $W_F'$ and $\mathrm{Gal}(\overline{F}/F$)-module, we have an inflation-restriction exact sequence \cite[\S 3]{Rajan2} \mathbf Egin{align} \label{inf-res} \mathbf Egin{CD} H^1(W_F',\textf{C}C^{\times}) @>{\mathrm{res}}>> H^1(W_E',\textf{C}C^{\times})^{W_E'/W_F'} @>>> H^2(W_F'/W_E',\textf{C}C^{\times}) \end{CD} \end{align} coming from the Hochschild-Serre spectral sequence. Here $H$ denotes the Moore cohomology groups. We note that for $i \mathfrak{g}eq 1$, $G$ discrete the Moore cohomology group $H^i(G,M)$ is equal to the usual continuous group cohomology group \cite[\S 3]{Rajan2}. Since $W_F'/W_E' \cong \mathrm{Gal}(E/F)$, this completes the proof of the lemma. \end{proof} One would like to construct functorial transfers of automorphic representations corresponding to the base change map on $L$-parameters recalled above. The $n=1$ case is trivial, as we now explain: Given a quasi-character $$ \mu:\mathrm{GL}_1(\mathbb{A}_F) \longrightarrow \textf{C}C^{\times} $$ trivial on $\mathrm{GL}_1(F)$ its base change $b_{E/F}(\mu)$ is given by $$ b_{E/F}(\mu):=\mu \circ \mathrm{N}_{E/F}:\mathrm{GL}_1(\mathbb{A}_E) \longrightarrow \textf{C}C^{\times} $$ where $\mathrm{N}_{E/F}$ is the norm map. We have the following lemma characterizing the image of the base change: \mathbf Egin{lem} \label{lem-image-bc} Suppose that $E/F$ is Galois and $H^2(\mathrm{Gal}(E/F),\textf{C}C^{\times})=0$. If $\mathrm{\textrm{\'{e}t}}a:\mathrm{GL}_1(\mathbb{A}_E) \to \textf{C}C^{\times}$ is a quasi-character trivial on $\mathrm{GL}_1(E)$ satisfying $\mathrm{\textrm{\'{e}t}}a^{\mathbb{S}igma} =\mathrm{\textrm{\'{e}t}}a$ for all $\mathbb{S}igma \in \mathrm{Gal}(E/F)$ then $\mathrm{\textrm{\'{e}t}}a=\chi \circ \mathrm{N}_{E/F}$ for some quasi-character $\chi:\mathrm{GL}_1(\mathbb{A}_F) \to \textf{C}C^{\times}$ trivial on $\mathrm{GL}_1(F)$. \end{lem} \mathbf Egin{proof} We have a commutative diagram \mathbf Egin{align} \label{nice-diag} \mathbf Egin{CD} E^{\times} \backslash \mathbb{A}_E^{\times} @>{r_E}>> W_E'/W'^c_E\\ @V{\mathrm{N}_{E/F}}VV @VVV\\ F^{\times} \backslash \mathbb{A}_F^{\times} @>{r_F}>> W_F'/W'^c_F \end{CD} \end{align} where $(\cdot)^c$ denotes the closure of the commutator subgroup of $(\cdot)$ and the right homomorphism is induced by the inclusion $W_E' \leq W_F'$ \cite[(1.2.2)]{Tate}. As we proved in Lemma \ref{lem-bc-param}, any quasi-character of $W_E'/W'^{c}_E$ that is invariant under $\mathrm{Gal}(E/F)$ is the restriction of a quasi-character of $W_F'/W'^{c}_F$. Translating this to the left hand side of \eqref{nice-diag}, this implies that any quasi-character of $\mathbb{A}_E^{\times}$ trivial on $E^{\times}$ that is invariant under $\mathrm{Gal}(E/F)$ is the composition of a quasi-character of $\mathbb{A}_F^{\times}$ trivial on $F^{\times}$ with the norm map. \end{proof} \mathbb{S}ubsection{Primitive parameters and automorphic representations} \label{ssec-primitive} Let $F$ be a number field. It is convenient to introduce the following definition: \mathbf Egin{defn} \label{defn-induced-L} An $L$-parameter $\varphi:W_F' \to {}^L\mathrm{GL}_{nF}$ is \textbf{$K$-induced} if there is a nontrivial field extension $K/F$ of finite degree and an irreducible $L$-parameter ${\sf p}hi:W_K' \to {}^L\mathrm{GL}_{nK}$ such that $\varphi \cong \mathrm{Ind}_{W_K'}^{W_F'}{\sf p}hi$. If $E/F$ is a nontrivial field extension, then an \textbf{$E$-primitive} $L$-parameter $\varphi$ is an irreducible $L$-parameter such that $\varphi$ is not $K$-induced for all subfields $E \mathfrak{g}eq K > F$. \end{defn} We denote by \mathbf Egin{align} \Phi^{\mathrm{prim}}_n(E/F):=\{ \textrm{Equiv.~classes of $E$-primitive }L\textrm{-parameters }\varphi:W_F' \to {}^L\mathrm{GL}_{nF}\}. \end{align} Let $k$ be a global or local field and let $K/k$ be an \'etale $k$-algebra. Let $\overline{k}$ be a choice of algebraic closure of $k$. Write $K=\mathrm{op}lus_iK_i$ where the $K_i$ are finite extension fields of $k$. Let $$ \mathrm{Ind}_{K}^k:{}^L\mathrm{R}_{K/k}\mathrm{GL}_{nk} \to {}^L\mathrm{GL}_{n[K:k]k} $$ be the $L$-map that embeds $({}^L\mathrm{R}_{K/k}\mathrm{GL}_{nk})^{\circ}=\times_i\mathrm{GL}_n(\textf{C}C)^{\mathrm{Hom}_k(K_i,\overline{k})}$ diagonally and sends $W_{k}'$ to the the appropriate group of permutation matrices\footnote{$L$-maps are defined in \cite[\S 15.1]{Borel}.}. We recall that $L$-parameters ${\sf p}hi:W_K' \to {}^L\mathrm{GL}_{nK}$ can be identified canonically with $L$-parameters ${\sf p}hi:W_k' \to {}^L\mathrm{Res}_{K/k}\mathrm{GL}_{nk}$ \cite[Proposition 8.4]{Borel}; under this identification $\mathrm{Ind}_K^k({\sf p}hi)=\mathrm{op}lus_i\mathrm{Ind}_{W'_{K_i}}^{W_k'}({\sf p}hi)$. Using the local Langlands correspondence, for any irreducible admissible representation $\Pi_v$ of $\mathrm{GL}_n(E \otimes_FF_v)$ we can associate an irreducible admissible representation ${\sf p}i_v$ of $\mathrm{GL}_n(F_v)$ by stipulating that if ${\sf p}hi:W_{F_v}' \to {}^L\mathrm{R}_{E/F}\mathrm{GL}_n$ is the $L$-parameter attached to $\Pi_v$ then $\mathrm{Ind}_E^F \circ {\sf p}hi$ is the $L$-parameter attached to ${\sf p}i_v$. If this is the case then we write $$ {\sf p}i_v \cong \mathrm{Ind}_E^F(\Pi_v). $$ \mathbf Egin{defn} An automorphic representation ${\sf p}i$ of $\mathrm{GL}_{n}(\mathbb{A}_F)$ is \textbf{$K$-automorphically induced} if there is a nontrivial finite extension field $K/F$ and an automorphic representation $\Pi$ of $\mathrm{GL}_n(\mathbb{A}_K)=\mathrm{Res}_{K/F}\mathrm{GL}_n(\mathbb{A}_F)$ such that ${\sf p}i_v \cong \mathrm{Ind}_K^F(\Pi_v)$ for almost all places $v$ of $F$. If $E/F$ is a nontrivial field extension then an \textbf{$E$-primitive} automorphic representation of $\mathrm{GL}_n(\mathbb{A}_F)$ is a cuspidal automorphic representation of $\mathrm{GL}_{n}(\mathbb{A}_F)$ that is not $K$-induced for all subfields $E \mathfrak{g}eq K >F$. \end{defn} For field extensions $E/F$ let \mathbf Egin{align} \Pi^{\mathrm{prim}}_n(E/ F):=\{\textrm{isom.~classes of $E$-primitive automorphic reps.~of }\mathrm{GL}_{n}(\mathbb{A}_F)\}. \end{align} \mathbb{S}ubsection{Restriction of parameters} \label{ssec-rest-param} In \S \ref{ssec-desc} we discussed descent of parameters along a Galois extension $E/F$; the main result being that if $H^2(\mathrm{Gal}(E/F),\textf{C}C^{\times})=0$ then $\mathrm{Gal}(E/F)$-invariant irreducible parameters descend. In this subsection we explore certain converse statements involving restrictions of parameters. The main result is Proposition \ref{prop-bij-EF'}. The statement parallel to Proposition \ref{prop-bij-EF'} in the context of automorphic representations is Conjecture \ref{conj-1}, the conjecture that appeared in the statement of Theorem \ref{main-thm-1}. Let $E \mathfrak{g}eq K \mathfrak{g}eq F$ be a subfield. For the remainder of this section, to ease notation we will often write $K$ where more properly we should write $W_K'$, e.g. $$ \varphi|_{K}:=\varphi|_{W_K'}. $$ We begin with the following lemma: \mathbf Egin{lem} \label{lem-restriction} Let $E/F$ be a Galois extension of number fields such that $H^2(\mathrm{Gal}(E/F),\textf{C}C^{\times})=0$. Let $\varphi:W_F' \to {}^L\mathrm{GL}_{nF}$ be an irreducible $L$-parameter. Either there is a subfield $E \mathfrak{g}eq K \mathfrak{g}eq F$ and an irreducible $L$-parameter ${\sf p}hi:W_K' \to {}^L\mathrm{GL}_{mK}$ such that $\varphi \cong \mathrm{Ind}_{K}^{F}{\sf p}hi$ or there is an $L$-parameter $\varphi_1:W_F' \to {}^L\mathrm{GL}_{mF}$ with $\varphi_1|_{E}$ irreducible and a finite-dimensional irreducible representation $\rho:\mathrm{Gal}(E/F) \to \mathrm{GL}_{d}(\textf{C}C)$ such that $$ \varphi \cong \rho \otimes \varphi_1. $$ Here we view $\rho$ as an $L$-parameter via the quotient map $W_{F}' \to W_{E}'/W_F'=\mathrm{Gal}(E/F)$. \end{lem} We note in particular that in the notation of the lemma one has $m[K:F]=n$ in the former case and $md=n$ in the latter. The extreme cases $m=1$ and $d=1$ occur. For our use in the proof of this lemma and later, we record the following: \mathbf Egin{lem} \label{lem-basic} Let $H \leq G$ be groups with $H$ normal in $G$ and $[G:H]< \infty$. Moreover let $\varphi:G \to \mathrm{Aut}(V)$ be a finite-dimensional complex representation that is irreducible upon restriction to $H$. Then $$ \mathrm{Ind}_{H}^G(1) \otimes \varphi \cong \mathrm{Ind}_{H}^G(\varphi|_H) $$ and $\rho \otimes \varphi$ is irreducible for any irreducible representation $\rho$ of $G/H$. \end{lem} \mathbf Egin{proof} The first statement is \cite[\S 3.3, Example 5]{SerreFG}. As a representation of $G$, one has $\mathrm{Ind}_H^G(1) \cong \mathrm{op}lus_{i=1}^n \rho_i^{\mathrm{op}lus \mathrm{deg}(\rho_i)}$, where the sum is over a set of representatives for the irreducible representations of $G/H$. Thus to prove the second statement of the lemma it suffices to show that $$ \mathrm{dim}_{\textf{C}C}\mathrm{Hom}_{G}(\mathrm{Ind}_{H}^G(1) \otimes \varphi,\mathrm{Ind}_{H}^G(1) \otimes \varphi)=\mathbb{S}um_{i}\mathrm{deg}(\rho_i)^2. $$ By the first assertion of the lemma and Frobenius reciprocity we have \mathbf Egin{align*} \mathrm{Hom}_{G}(\mathrm{Ind}_{H}^G(1) \otimes \varphi,\mathrm{Ind}_{H}^G(1) \otimes \varphi) &\cong\mathrm{Hom}_{G}(\mathrm{Ind}_{H}^G(1) \otimes \varphi,\mathrm{Ind}_{H}^G(\varphi|_H))\\&\cong\mathrm{Hom}_{H}(\mathrm{op}lus_{i=1}^n \varphi|_H^{\mathrm{deg}(\rho_i)^2},\varphi|_H) \end{align*} which has dimension $\mathbb{S}um_{i=1}^n \mathrm{deg}(\rho_i)^2$. \end{proof} \mathbf Egin{proof}[Proof of Lemma \ref{lem-restriction}] Assume that there does not exist a subfield $E \mathfrak{g}eq K \mathfrak{g}eq F$ and an irreducible $L$-parameter ${\sf p}hi:W_K' \to {}^L\mathrm{GL}_{mK}$ such that $\varphi \cong \mathrm{Ind}_{K}^{F}({\sf p}hi)$. Then, by \cite[\S 8.1, Proposition 24]{SerreFG} the restriction $\varphi|_{E}$ is isomorphic to a direct sum of some number of copies of a fixed irreducible $L$-parameter ${\sf p}hi_1:W_E' \to {}^L\mathrm{GL}_{mE}$. Since $\varphi|_E^{\tau} \cong \varphi|_E$ for $\tau \in W_F'$ (trivially) it follows in particular that ${\sf p}hi_1$ is $\mathrm{Gal}(E/F)$-invariant and therefore descends to a parameter $\varphi_1:W_F' \to {}^L\mathrm{GL}_{mF}$ by Lemma \ref{lem-bc-param}. By Lemma \ref{lem-basic} one has $$ \mathrm{Ind}_{E}^{F}(1) \otimes \varphi_1 \cong \mathrm{Ind}_{E}^{F}({\sf p}hi_1). $$ Applying Frobenius reciprocity we see that $$ 0 \neq \mathrm{Hom}_{E}(\varphi|_{E},{\sf p}hi_1)=\mathrm{Hom}_{F}(\varphi,\mathrm{Ind}_{E}^{F} ({\sf p}hi_1))=\mathrm{Hom}_{F}(\varphi,\mathrm{Ind}_{E}^{F}(1) \otimes \varphi_1) $$ which, in view of Lemma \ref{lem-basic}, completes the proof of the lemma. \end{proof} As an example, we have the following corollary: \mathbf Egin{cor} \label{cor-restriction} Under the assumptions of Lemma \ref{lem-restriction}, if $\mathrm{Gal}(E/F)$ is the universal perfect central extension of a finite simple nonabelian group $G$, $n=2$ and $\varphi|_{W'_E}$ is reducible, then $G=A_5$. \end{cor} \mathbf Egin{proof} By Lemma \ref{lem-restriction}, if $\varphi|_{E}$ is reducible, then either there is a quadratic extension $K/F$ contained in $E$ such that $\varphi \cong \mathrm{Ind}_{K}^{F}\varphi_1$ for some parameter $\varphi_1:W_K' \to {}^L\mathrm{GL}_{1K}$ or one has a nontrivial representation $\mathrm{Gal}(E/F) \to \mathrm{GL}_2(\textf{C}C)$ (there are no nontrivial one-dimensional representations of $\mathrm{Gal}(E/F)$ since $\mathrm{Gal}(E/F)$ is perfect). In the former case the extension $K/F$ would correspond to an index $2$ subgroup $H \leq \mathrm{Gal}(E/F)$, which would a fortiori be normal. Thus we would have $\mathrm{Gal}(E/F)/H \cong \mathbb{Z}/2$ contradicting the assumption that $\mathrm{Gal}(E/F)$ is perfect. Hence we must be in the latter case. The nontrivial representation $\mathrm{Gal}(E/F) \to \mathrm{GL}_2(\textf{C}C)$ induces a nontrivial projective representation $G \to \mathrm{PGL}_2(\textf{C}C)$ since $\mathrm{Gal}(E/F)$ is perfect. By a well-known theorem of Klein, if $G$ is a finite simple nonabelian group and $G \to \mathrm{PGL}_2(\textf{C}C)$ is a nontrivial projective representation, then $G\cong A_5$. \end{proof} In view of Lemma \ref{lem-restriction}, for each $n$ there are two natural cases to consider, namely the case where there is a nontrivial representation $\mathrm{Gal}(E/F) \to \mathrm{GL}_m(\textf{C}C)$ for some $m|n$ and the case where there is no nontrivial representation $\mathrm{Gal}(E/F) \to \mathrm{GL}_m(\textf{C}C)$ for any $m|n$. We will deal with the former case under the additional assumption that $n=2$ in \S \ref{ssec-icosa-gp} below. In the latter case one obtains a complete description of the fibers and image of base change on primitive parameters as follows: \mathbf Egin{prop} \label{prop-bij-EF} Let $E/F$ be a Galois extension of number fields such that $\mathrm{Gal}(E/F)$ is the universal perfect central extension of a finite simple nonabelian group. \mathbf Egin{enumerate} \item If $\varphi_1,\varphi_2 :W_{F} \to {}^L\mathrm{GL}_{nF}$ are $L$-parameters such that $\varphi_1|_{E}$ and $\varphi_2|_{E}$ are irreducible and isomorphic, then $\varphi_1 \cong \varphi_2$. \item Assume that for all divisors $m|n$ there are no nontrivial irreducible representations $\mathrm{Gal}(E/F) \to \mathrm{GL}_{m}(\textf{C}C)$. Under this assumption, restriction of parameters induces a bijection \mathbf Egin{align*} b_{E/F}:\Phi_n^{\mathrm{prim}}(E/F) &\tilde{\longrightarrow} \Phi_n^0(E)^{\mathrm{Gal}(E/F)}\\ \varphi &\longmapsto \varphi|_{E}. \end{align*} \end{enumerate} \end{prop} \mathbf Egin{proof} We first check (1). Suppose that $\varphi_1,\varphi_2:W_{F}' \to {}^L\mathrm{GL}_{nF'}$ are two irreducible parameters with isomorphic irreducible restrictions to $W_{E}'$. Then by Frobenius reciprocity and Lemma \ref{lem-basic} we have \mathbf Egin{align*} 0 \neq \mathrm{Hom}_E(\varphi_1|_E,\varphi_2|_E)&=\mathrm{Hom}_F(\mathrm{Ind}_{E}^F(\varphi_1|_E),\varphi_2)\\&=\mathrm{op}lus_i \mathrm{Hom}_F(\rho_i \otimes \varphi_1,\varphi_2)^{\mathrm{op}lus \mathrm{deg}(\rho_i)} \end{align*} where the sum is over a set of representatives for the irreducible representations of $\mathrm{Gal}(E/F)$. By Lemma \ref{lem-basic}, $\rho_i \otimes \varphi_1$ is irreducible for all $i$, so by considering degrees we must have $\rho_i \otimes \varphi_1 \cong \varphi_2$ where $\rho_i$ is an abelian character of $\mathrm{Gal}(E/F)$. Since $\mathrm{Gal}(E/F)$ is perfect, this $\rho_i$ is necessarily trivial. Moving on to (2), we note that the restriction map from $L$-parameters of $W_F'$ to $L$-parameters of $W_E'$ obviously has image in the set of $\mathrm{Gal}(E/F)$-invariant parameters and under the addition assumption in (2) it has image in the set of irreducible parameters by Lemma \ref{lem-restriction}. In other words we have a well-defined map $$ b_{E/F}:\Phi_n^{\mathrm{prim}}(E/F) \longrightarrow \Phi_n^{0}(E)^{\mathrm{Gal}(E/F)}. $$ It is injective by (1) and surjective by Lemma \ref{lem-bc-param}, which completes the proof of the proposition. \end{proof} To set up trace identities it is convenient to work with automorphic representations attached to a subfield $F' \leq E$. In view of this we prove the following modification of Proposition \ref{prop-bij-EF}: \mathbf Egin{prop} \label{prop-bij-EF'} Let $E/F$ be a Galois extension of number fields such that $\mathrm{Gal}(E/F)$ is the universal perfect central extension of a finite simple nonabelian group. Assume that for all $m|n$ there are no nontrivial irreducible representations $\mathrm{Gal}(E/F) \to \mathrm{GL}_{m}(\textf{C}C)$. If $E \mathfrak{g}eq F' \mathfrak{g}eq F$ is a subfield then the restriction map induces an injection \mathbf Egin{align} \label{restr-map} b_{F'/F}:\Phi_n^{\mathrm{prim}}(E/F) & \longrightarrow \Phi_n^{\mathrm{prim}}(E/F') \\ \varphi &\longmapsto \varphi|_{{F'}}. \nonumber \end{align} If ${\sf p}hi':W_{F'}' \to {}^L\mathrm{GL}_{nF'}$ is an $L$-parameter such that ${\sf p}hi'|_{W_E'}$ is irreducible and $\mathrm{Gal}(E/F)$-invariant then there is a unique character $\chi' \in \mathrm{Gal}(E/F')^{\widetilde{\varepsilon}dge}$ such that ${\sf p}hi' \otimes \chi'$ is in the image of the restriction map \eqref{restr-map}. If $\mathrm{Gal}(E/F')$ is solvable of order coprime to $n$ then for any irreducible $L$-parameter ${\sf p}hi':W_{F'}' \to {}^L\mathrm{GL}_{nF'}$ the restriction ${\sf p}hi'|_{W_E'}$ is again irreducible. \end{prop} \mathbf Egin{proof} Note that $\varphi|_{E}=(\varphi|_{F'})|_{E}$. Thus part (2) of Proposition \ref{prop-bij-EF} implies that restriction of $L$-parameters from $W_F'$ to $W_{F'}'$ maps primitive $L$-parameters to primitive $L$-parameters, so \eqref{restr-map} is well-defined. Parts (1) and (2) of Proposition \ref{prop-bij-EF} imply that \eqref{restr-map} is injective. Now suppose that ${\sf p}hi':W_{F'} \to {}^L\mathrm{GL}_{nF'}$ is an $L$-parameter such that ${\sf p}hi'|_{E}$ is irreducible and $\mathrm{Gal}(E/F)$-invariant. By Lemma \ref{lem-bc-param} the restriction ${\sf p}hi'|_{{E}}$ descends to an irreducible parameter $\varphi:W_F' \to {}^L\mathrm{GL}_{nF}$. By Frobenius reciprocity and Lemma \ref{lem-basic} we have \mathbf Egin{align} \mathrm{Hom}_{E}(\varphi|_{E},{\sf p}hi'|_{E})= \mathrm{Hom}_{{F'}}(\mathrm{Ind}_{E}^{F'}(\varphi|_{E'}),{\sf p}hi') =\mathrm{op}lus_{i}\mathrm{Hom}_{{F'}}(\rho_i \otimes \varphi|_{{F'}},{\sf p}hi')^{\mathrm{deg}(\rho_i)} \end{align} where the sum is over a set of representatives for the irreducible representations of $\mathrm{Gal}(E/F')$. The first space is one dimensional and hence so is the last. By Lemma \ref{lem-basic} $\rho_i \otimes \varphi|_{F'}$ is irreducible for all $i$, so by considering dimensions we see that $\rho_i \otimes \varphi|_{F'} \cong {\sf p}hi'$ for some character $\rho_i$ of $\mathrm{Gal}(E/F')$. This proves the second claim of the proposition. We are left with the final assertion of the proposition. Since $\mathrm{Gal}(E/F')$ is solvable there is a chain of subfields $F'=E_0 \leq \cdots \leq E_n=E$ such that $E_j/E_{j-1}$ is cyclic of prime degree. Using this fact the final assertion follows from Lemma \ref{lem-restriction}. \end{proof} Motivated by Proposition \ref{prop-bij-EF'}, we make the following conjecture, which is an elaboration of a case of Langlands functoriality: \mathbf Egin{conj} \label{conj-1} Let $E/F$ be a Galois extension of number fields and let $n$ be an integer such that \mathbf Egin{itemize} \item $\mathrm{Gal}(E/F)$ is the universal perfect central extension of a finite simple nonabelian group, and \item For every divisor $m|n$ there are no nontrivial irreducible representations $\mathrm{Gal}(E/F) \to \mathrm{GL}_{m}(\textf{C}C)$. \end{itemize} Let $E \mathfrak{g}eq F' \mathfrak{g}eq F$ be a subfield. Every $E$-primitive automorphic representation ${\sf p}i$ of $\mathrm{GL}_n(\mathbb{A}_F)$ admits a unique base change ${\sf p}i_{F'}$ to $\mathrm{GL}_n(\mathbb{A}_{F'})$ and a unique base change to $\mathrm{GL}_n(\mathbb{A}_E)$, the first of which is an $E$-primitive automorphic representation. Thus base change induces an injection \mathbf Egin{align*} b_{E/F'}:\Pi_n^{\mathrm{prim}}(E/F) &\longrightarrow \Pi_n^{\mathrm{prim}}(E/F')\\ {\sf p}i &\longmapsto {\sf p}i_{F'} \end{align*} If ${\sf p}i'$ is a cuspidal automorphic representation of $\mathrm{GL}_n(\mathbb{A}_{F'})$ such that its base change ${\sf p}i'_E$ to $\mathrm{GL}_n(\mathbb{A}_E)$ is cuspidal and $\mathrm{Gal}(E/F)$-invariant then ${\sf p}i'_E$ descends to an automorphic representation of $\mathrm{GL}_n(\mathbb{A}_F)$. \end{conj} We also require a conjecture which can be addressed using endoscopic techniques, is discussed at length in \cite{Rajan3}, and is a theorem when $n=2$ \cite[Theorems 1 and 2]{Rajan3} or $\mathrm{Gal}(E/F')$ is cyclic \cite[Chapter 3, Theorems 4.2 and 5.1]{AC}: \mathbf Egin{conj} \label{conj-solv} Let $E/F'$ be a solvable extension of number fields and let $\Pi$ be a cuspidal automorphic representation of $\mathrm{GL}_n(\mathbb{A}_E)$. If $\Pi$ is $\mathrm{Gal}(E/F')$-invariant, then there is a $\mathrm{Gal}(E/F')$-invariant character $\chi \in (E^{\times} \backslash \mathbb{A}_E^{\times})^{\widetilde{\varepsilon}dge}$ such that $\Pi \otimes \chi$ descends to $\mathrm{GL}_n(\mathbb{A}_{F'})$. If $H^2(\mathrm{Gal}(E/F'),\textf{C}C^{\times})=0$, then $\chi$ can be taken to be the trivial character. Conversely, if ${\sf p}i'_1$, ${\sf p}i_2'$ are cuspidal automorphic representations of $\mathrm{GL}_n(\mathbb{A}_{F'})$ that both base change to a cuspidal automorphic representation $\Pi$ of $\mathrm{GL}_n(\mathbb{A}_E)$, then there is a unique $\chi \in \mathrm{Gal}(E/F')^{\widetilde{\varepsilon}dge}$ such that ${\sf p}i_1' \cong {\sf p}i_2' \otimes \chi$. \end{conj} \mathbb{S}ubsection{The icosahedral group} \label{ssec-icosa-gp} Assume that $n=2$ and that $$ \mathrm{Gal}(E/F) \cong \mathrm{SL}_2(\mathbb{Z}/5) \cong \widetilde{A}_5, $$ the universal perfect central extension of $A_5$. We fix such an isomorphism for the remainder of this section and view it as an identification: $\mathrm{Gal}(E/F)=\widetilde{A}_5$. In this subsection we describe the image and fibers of the base change map on $L$-parameters in this setting. This description is used as motivation for Conjecture \ref{conj-2} below, the conjecture used in the statement of Theorem \ref{main-thm-2} above. As remarked below Theorem \ref{main-thm-1}, if $n=2$ the case where $\mathrm{Gal}(E/F)$ is the universal perfect central extension of $A_5$ is the only case in which the hypotheses of Proposition \ref{prop-bij-EF} do not hold. Moreover, the conclusion of Proposition \ref{prop-bij-EF} does not hold. Indeed, any irreducible $2$-dimensional representation of $\mathrm{SL}_2(\mathbb{Z}/5)$ induces an irreducible $L$-parameter $\varphi:W_{F}' \to {}^L\mathrm{GL}_{2F}$ such that $\varphi|_{E}$ is the direct sum of two copies of the trivial representation. For our purposes it is more important to find an analogue of Proposition \ref{prop-bij-EF'}. The facts from group theory that we require in this subsection are collected in \S \ref{appendix}. Fix an injection $A_4 \hookrightarrow A_5$ and let $\widetilde{A}_4$ denote the inverse image of $A_4$ under the projection map $\widetilde{A}_5 \to A_5$. It is a double cover of $A_4$. \mathbf Egin{lem} \label{lem-gen} Let $\tau \in \mathrm{Gal}(E/F)$ be of order $5$. Then $\langle \tau ,\widetilde{A}_4\rangle=\mathrm{Gal}(E/F)$. \end{lem} \mathbf Egin{proof} By Lagrange's theorem for any element $\tau \in \mathrm{Gal}(E/F)$ of order $5$ the group $\langle \tau,\widetilde{A}_4 \rangle$ has order divisible by $(5)(24)=120$. \end{proof} Our analogue of Proposition \ref{prop-bij-EF'} is the following proposition: \mathbf Egin{prop} \label{prop-A5-EF} Assume that $F'=E^{\widetilde{A}_4}$ and $\tau \in \mathrm{Gal}(E/F)$ is of order $5$. In this case restriction of parameters induces a map \mathbf Egin{align} \label{restr-map2} b_{F'/F}:\Phi_2^0(F) &\longrightarrow \Phi_2^0(F')\\ \varphi &\longmapsto \varphi|_{{F'}}. \nonumber \end{align} If ${\sf p}hi':W_{F'}' \to {}^L\mathrm{GL}_{2F'}$ is an $L$-parameter such that ${\sf p}hi'|_{E}$ is irreducible and $\mathrm{Gal}(E/F)$-invariant then ${\sf p}hi' \otimes \chi'$ is in the image of the restriction map \eqref{restr-map2} for a unique $\chi' \in \mathrm{Gal}(E/F')^{\widetilde{\varepsilon}dge}$. If ${\sf p}hi'|_{E}$ is reducible and $\mathrm{Hom}_{E}({\sf p}hi'|_{E},{\sf p}hi'|_{E}^{\tau}) \neq 0$ then ${\sf p}hi'|_E$ is the restriction of a parameter ${\sf p}hi:W_F' \to {}^L\mathrm{GL}_{2F}$. There are exactly two nonisomorphic irreducible ${\sf p}hi_1,{\sf p}hi_2 :W_{F}' \to {}^L\mathrm{GL}_{2F}$ such that ${\sf p}hi|_{E} \cong {\sf p}hi_{1}|_{E} \cong {\sf p}hi_{2}|_{E}$. \end{prop} \mathbf Egin{proof} We first check that an irreducible $L$-parameter $\varphi$ as above restricts to an irreducible $L$-parameter on $W_{F'}'$. Since $\mathrm{SL}_2(\mathbb{Z}/5)$ is perfect, there are no subgroups of $\mathrm{SL}_2(\mathbb{Z}/5)$ of index $2$. Since $\varphi$ has degree $2$ it follows from Lemma \ref{lem-restriction} that $\varphi$ is not induced, and hence $\varphi|_{E}$ is either irreducible or $$ \varphi \cong \chi \otimes \rho $$ for some character $\chi:W_F' \to {}^L\mathrm{GL}_{1F}$ and some irreducible representation $$ \rho:\mathrm{Gal}(E/F) \to \mathrm{GL}_2(\textf{C}C). $$ In the former case $\varphi|_{F'}$ is also irreducible, and hence we are done. Suppose on the other hand that $\varphi \cong \chi \otimes \rho$. Notice that any irreducible two-dimensional representation of $\mathrm{Gal}(E/F)$ is necessarily faithful. Indeed, the only normal subgroup of $\mathrm{Gal}(E/F)$ is its center and if such a representation was trivial on the center it would descend to a representation of $A_5$, a group that has no irreducible two-dimensional representations. Since $\mathrm{Gal}(E/F')$ is nonabelian $\rho(\mathrm{Gal}(E/F'))$ is nonabelian and it follows that $\rho|_{{F'}}$ is irreducible and hence so is $\varphi|_{F'}$. The second statement of the proposition is proved by the same argument as the analogous statement in Proposition \ref{prop-bij-EF'}. For the last assertion assume that ${\sf p}hi':W_{F'}' \to {}^L\mathrm{GL}_{2F'}$ is an $L$-parameter such that ${\sf p}hi'|_{E}$ is reducible. It follows from Lemma \ref{lem-restriction} that there is an element $\mathbb{S}igma_0 \in \mathrm{Gal}(E/F')$ of order dividing $2$ and a character $\chi_0:W_E' \to {}^L\mathrm{GL}_{1E}$ such that ${\sf p}hi'|_{E} \cong \chi_0 \mathrm{op}lus \chi_0^{\mathbb{S}igma_0}$. Since ${\sf p}hi'|_E \cong {\sf p}hi'|_E^{\mathbb{S}igma}$ for all $\mathbb{S}igma \in \mathrm{Gal}(E/F)$, the group $W_{F'}' / W_E'$ acts by conjugation on these two factors and this action defines a homomorphism $\mathrm{Gal}(E/F')\cong W_{F'}'/W_E' \to \mathbb{Z}/2$. Now $\widetilde{A}_4$ has no subgroup of index two, so this implies that homomorphism $\widetilde{A}_4 \to \mathbb{Z}/2$ just considered is trivial and hence the action of $W_{F'}'/W_E'$ on the pair $\{\chi_0,\chi_0^{\mathbb{S}igma_0}\}$ is trivial. It follows in particular that $\chi_0 \cong \chi_0^{\mathbb{S}igma_0}$ and additionally $\chi_0$ is isomorphic to all of its $\mathrm{Gal}(E/F')$-conjugates. If additionally $\mathrm{Hom}_{E}({\sf p}hi'|_{E},{\sf p}hi'|_{E}^{\tau}) \neq 0$ then $\chi_0$ is fixed under $\mathrm{Gal}(E/F')$ and $\tau$ and hence it is isomorphic to all of its $\mathrm{Gal}(E/F)$-conjugates by Lemma \ref{lem-gen}. Thus $\chi_0$ descends to a character $\chi:W_F' \to {}^L\mathrm{GL}_{1F}$ by Lemma \ref{lem-bc-param}. Let $\rho_2$ be a rank two irreducible representation of $\mathrm{Gal}(E/F)$ with character $\theta_2$ in the notation of \S \ref{appendix} and let $\langle \xi \rangle =\mathrm{Gal}(\mathbb{Q}(\mathbb{S}qrt{5})/\mathbb{Q})$. Then $$ \rho_2 \otimes \chi {\sf q}uad \textrm{ and } {\sf q}uad \xi \circ \rho_2 \otimes \chi $$ are two nonisomorphic $L$-parameters from $W_F'$ with restriction to $W_E'$ isomorphic to ${\sf p}hi'|_E$. If ${\sf p}hi:W_F' \to {}^L\mathrm{GL}_{2F}$ is any $L$-parameter with ${\sf p}hi|_E \cong {\sf p}hi'|_{E}$, then \mathbf Egin{align} \label{1-frob} 2=\mathrm{dim}(\mathrm{Hom}_{E}(\chi|_{E},{\sf p}hi|_{E}))= \mathrm{dim}(\mathrm{Hom}_{{F}}(\mathrm{Ind}_{{E}}^{{F}}(\chi|_E),{\sf p}hi)). \end{align} Now by Lemma \ref{lem-basic} $$ \mathrm{Ind}_{{E}}^{{F}}(\chi|_{{E}})\cong \mathrm{Ind}_{{E}}^{{F}}(1)\otimes \chi. $$ This combined with \eqref{1-frob} implies that $$ {\sf p}hi \cong \rho_2 \otimes \chi \textrm{ or }{\sf p}hi \cong \xi \circ \rho_2 \otimes \chi. $$ \end{proof} Motivated by Proposition \ref{prop-A5-EF} and Proposition \ref{prop-bij-EF} we propose the following conjecture. It is the (conjectural) translation of Proposition \ref{prop-A5-EF} and part (1) of Proposition \ref{prop-bij-EF} into a statement on automorphic representations. \mathbf Egin{conj} \label{conj-2} In the setting of Proposition \ref{prop-A5-EF} above each cuspidal automorphic representation ${\sf p}i$ of $\mathrm{GL}_2(\mathbb{A}_F)$ admits a unique cuspidal base change to $\mathrm{GL}_2(\mathbb{A}_{F'})$ and a unique base change to an isobaric automorphic representation of $\mathrm{GL}_2(\mathbb{A}_E)$. If ${\sf p}i'$ is a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_{F'})$ such that ${\sf p}i'_E$ is cuspidal and $\mathrm{Hom}_I({\sf p}i_E',{\sf p}i'^{\tau}_E) \neq 0$, then there is a unique cuspidal automorphic representation ${\sf p}i$ of $\mathrm{GL}_2(\mathbb{A}_F)$ that has ${\sf p}i'_E$ as a base change. If ${\sf p}i'_E$ is not cuspidal and $\mathrm{Hom}_I({\sf p}i'_E,{\sf p}i'^{\tau}_E) \neq 0$ then there are precisely two isomorphism classes of cuspidal automorphic representations of $\mathrm{GL}_2(\mathbb{A}_F)$ that base change to ${\sf p}i'_E$. \end{conj} \mathbf Egin{rem} In understanding the analogy between Proposition \ref{prop-A5-EF} and Conjecture \ref{conj-2} it is helpful to recall that if ${\sf p}i'_E$ is cuspidal and $\mathrm{Hom}_I({\sf p}i'_E,{\sf p}i'^{\tau}_E)\neq 0$ then ${\sf p}i'_E$ is isomorphic to all of its twists under elements of $\langle \mathrm{Gal}(E/F'),\tau\rangle=\mathrm{Gal}(E/F)$. \end{rem} \mathbb{S}ubsection{Motivating conjectures \ref{conj-32} and \ref{conj-33}} \label{ssec-artin-conj} In this section we prove some lemmas on restriction of $L$-parameters along subfields of an $\widetilde{A}_5$-extension and then state the conjectures (namely conjectures \ref{conj-32} and \ref{conj-33}) that are the translation of these statements to the context of automorphic representations. These conjectures are used in the statement of Theorem \ref{main-thm-3} above. As above, we identify $\mathrm{Gal}(E/F) =\widetilde{A}_5$. Fix an embedding $\mathbb{Z}/2 \times \mathbb{Z}/2 \hookrightarrow A_5$ and let $Q \hookrightarrow \widetilde{A}_5$ be its inverse image under the quotient map $\widetilde{A}_5 \to A_5$; it is isomorphic to the quaternion group. \mathbf Egin{lem} \label{lem-A5-EF} Let $F'=E^Q$. For all quasi-characters $\chi_0:W_{E}' \to {}^L\mathrm{GL}_{1E}$ invariant under $\mathrm{Gal}(E/F')$ there is an irreducible parameter $\varphi':W_{F'}' \to {}^L\mathrm{GL}_{2F'}$ such that $\varphi'|_E \cong \chi_0 \mathrm{op}lus \chi_0$. The parameter $\varphi'$ is unique up to isomorphism. Let $\varphi:W_F' \to {}^L\mathrm{GL}_{2F}$ be an irreducible $L$-parameter such that $\varphi|_E\cong \chi_0 \mathrm{op}lus \chi_0$ where $\chi_0:W_E' \to {}^L\mathrm{GL}_{1E}$ is $\mathrm{Gal}(E/F)$-invariant. Then $\varphi|_{F'}$ is irreducible, and there are precisely two distinct equivalence classes of $L$-parameters in $\Phi^0_2(F)$ that restrict to $\varphi|_{F'}$. Conversely, if $\varphi': W_{F'}' \to {}^L\mathrm{GL}_{2F'}$ is an irreducible parameter such that $\varphi'|_E \cong \chi_0 \mathrm{op}lus \chi_0$ for some quasi-character $\chi_0:W_{E}' \to {}^L\mathrm{GL}_{1E}$ invariant under $\mathrm{Gal}(E/F)$, then $\varphi'$ extends to an $L$-parameter on $W_F'$. \end{lem} \mathbf Egin{proof} One can twist by $\chi_0^{-1}$ and its extension to $W_F'$ to reduce the lemma to the case where $\chi_0$ is trivial (recall that a $\mathrm{Gal}(E/F')$ (resp.~$\mathrm{Gal}(E/F)$)-invariant quasi-character descends by Lemma \ref{lem-bc-param} and the fact that both of these groups have trivial Schur multiplier). In this case the lemma follows immediately from the character tables included in \S \ref{appendix} below (see Lemma \ref{lem-Q} in particular). \end{proof} The following is the conjectural translation of this statement (via Langlands functoriality) into the language of automorphic representations: \mathbf Egin{conj} \label{conj-32} Let $F'=E^Q$. Let ${\sf p}i$ be a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_F)$ with base change $\chi_0 \boxplus \chi_0$ to an isobaric automorphic representation of $\mathrm{GL}_2(\mathbb{A}_E)$. Then ${\sf p}i$ admits a base change ${\sf p}i_{F'}$ to $\mathrm{GL}_2(\mathbb{A}_{F'})$ that is cuspidal. There are precisely two distinct isomorphism classes of cuspidal automorphic representations of $\mathrm{GL}_2(\mathbb{A}_{F})$ that base change to ${\sf p}i_{F'}$. Conversely, if ${\sf p}i'$ is a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_{F'})$ such that ${\sf p}i'_E \cong \chi_0 \boxplus \chi_0$ where $\chi_0$ is $\mathrm{Gal}(E/F)$-invariant, then ${\sf p}i'$ descends to a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_{F})$. \end{conj} The situation for $n=3$ is similar: \mathbf Egin{lem} \label{lem-A5-EF3} Let $F'=E^{\widetilde{A}_4}$. Let $\chi_0:W_{E}' \to {}^L\mathrm{GL}_{1E}$ be a character invariant under $\mathrm{Gal}(E/F')$. There is an irreducible parameter $\varphi':W_{F'}' \to {}^L\mathrm{GL}_{3F'}$ such that $\varphi'|_{W_E'} \cong \chi_0^{\mathrm{op}lus 3}$, unique up to isomorphism. Let $\varphi:W_F' \to {}^L\mathrm{GL}_{3F}$ be an irreducible $L$-parameter such that $\varphi|_E\cong \chi_0^{\mathrm{op}lus 3}$ where $$ \chi_0:W_E' \to {}^L\mathrm{GL}_{1E} $$ is $\mathrm{Gal}(E/F)$-invariant. Then $\varphi|_{F'}$ is irreducible, and there are precisely two inequivalent isomorphism classes of $L$-parameters in $\Phi^0_3(F)$ that restrict to the isomorphism class of $\varphi|_{F'}$. Conversely, if $\varphi': W_{F'}' \to {}^L\mathrm{GL}_{3F'}$ is an irreducible parameter such that $\varphi'|_E \cong \chi_0^{\mathrm{op}lus 3}$ for some quasi-character $\chi_0:W_{E}' \to {}^L\mathrm{GL}_{1F}$ invariant under $\mathrm{Gal}(E/F)$, then $\varphi'$ extends to an $L$-parameter on $W_F'$. \end{lem} \mathbf Egin{proof}One can twist by $\chi_0^{-1}$ and its extension to $W_F'$ to reduce the lemma to the case where $\chi_0$ is trivial (recall that a $\mathrm{Gal}(E/F')$ (resp.~$\mathrm{Gal}(E/F)$)-invariant quasi-character descends by Lemma \ref{lem-bc-param} and the fact that both of these groups have trivial Schur multiplier). In this case the lemma follows immediately from the character tables included in \S \ref{appendix} (see Lemma \ref{lem-tetra-reps} in particular). \end{proof} The corresponding conjecture is the following: \mathbf Egin{conj} \label{conj-33} Let $F'=E^{\widetilde{A}_4}$. Let ${\sf p}i$ be a cuspidal automorphic representation of $\mathrm{GL}_3(\mathbb{A}_F)$ with base change $\chi_0^{\boxplus 3}$ to an isobaric automorphic representation of $\mathrm{GL}_3(\mathbb{A}_E)$. Then ${\sf p}i$ admits a base change ${\sf p}i_{F'}$ to $\mathrm{GL}_3(\mathbb{A}_{F'})$ that is cuspidal. There are precisely two nonisomorphic cuspidal automorphic representations of $\mathrm{GL}_3(\mathbb{A}_{F})$ that base change to ${\sf p}i_{F'}$. Conversely, if ${\sf p}i'$ is a cuspidal automorphic representation of $\mathrm{GL}_3(\mathbb{A}_{F'})$ such that ${\sf p}i'_E \cong \chi_0^{\boxplus 3}$ where $\chi_0$ is $\mathrm{Gal}(E/F)$ invariant, then ${\sf p}i'$ descends to a cuspidal automorphic representation of $\mathrm{GL}_3(\mathbb{A}_{F})$. \end{conj} \mathbb{S}ubsection{Appendix: The representations of some binary groups} \label{appendix} In \S \ref{ssec-icosa-gp} and \S \ref{ssec-artin-conj} we studied the problem of base change along an extension $E/F$ where $\mathrm{Gal}(E/F)$ was isomorphic to the binary icosahedral group, that is, the universal perfect central extension $\widetilde{A}_5$ of the alternating group $A_5$ on $5$ letters. Fix an embedding $A_4 \hookrightarrow A_5$, and let $\widetilde{A}_4$ be the inverse image of $A_4$ under the quotient map $\widetilde{A}_5 \to A_5$. Similarly fix an embedding $\mathbb{Z}/2 \times \mathbb{Z}/2 \hookrightarrow A_5$ and let $Q$ be the inverse of $\mathbb{Z}/2 \times \mathbb{Z}/2$ under the quotient map $\widetilde{A}_5 \to A_5$. In \S \ref{ssec-icosa-gp} and \S \eqref{ssec-artin-conj} we required various properties of the representations of $\widetilde{A}_5$, $\widetilde{A}_4$, and $Q$. We collect these properties in this subsection for ease of reference. We now write down the character table of $\widetilde{A}_5$. For $n \in \{1,2,3,4,6\}$ let $C_n$ be the unique conjugacy class of $\widetilde{A}_5$ consisting of the elements of order $n$. Let $C_5$ and $C_5'$ be the two conjugacy classes of elements of order $5$, and if $g \in C_5$ (resp.~$C_5'$) let $C_{10}$ (resp.~$C_{10}'$) be the conjugacy class of $-g$ (viewed as a matrix in $\mathrm{SL}_2(\mathbb{Z}/5) \cong \widetilde{A}_5$). The degree of an irreducible representation is given by its subscript. We let $u,v$ be the distinct roots of the polynomial $x^2-x-1$. The following character table is in \cite[\S 7]{Buhler} (see \cite[Proof of Lemma 5.1]{KimIcos} for corrections). \mathbf Egin{center} \mathbf Egin{tabular}{ l | c |c |c | c | c | c | c| c |c |} & $C_1$ & $C_2$ & $C_4$ & $C_3$ & $C_6$ & $C_5$ & $C_{10}$ & $C_{5}'$ & $C_{10}'$ \\ \hline $1$ & $1$ & $1$ & $1$ & $1$ & $1$ & $1$ & $1$ & $1$ & $1$\\ $\theta_3$ & $3$ & $3$ & $-1$ & $0$ & $0$ & $u$ & $u$ & $v$ & $v$ \\ $\theta_3'$ & $3$ & $3$ & $-1$ & $0$ & $0$ & $v$ & $v$ & $u$ & $u$ \\ $\theta_4$ & $4$ & $4$ & $0$ & $1$ & $1$ & $-1$ & $-1$ & $-1$ & $-1$\\ $\theta_5$ & $5$ &$5$ & $1$ & $-1$ & $-1$ & 0 & 0 & 0 & 0\\ $\theta_2$ & $2$ & $-2$ & $0$ & $-1$ & $1$ & $u-1$ & $1-u$ & $v-1$ & $1-v$ \\ $\theta_2'$ & $2$ & $-2$ & $0$ & $-1$ & $1$ & $v-1$ & $1-v$ & $u-1$ & $1-u$ \\ $\theta_4'$ & $4$ & $-4$ & $0$ & $1$ & $-1$ & $-1$ & $1$ & $-1$ & $1$ \\ $\theta_6$ & $6$ & $-6$ & $0$ & $0$ & $0$ & $1$ & $-1$ & $1$ & $-1$ \end{tabular} \end{center} Let $\chi$ be a nontrivial character of $\widetilde{A}_4$. It is of order $3$, as $\widetilde{A}_4^{\mathrm{ab}} \cong \mathbb{Z}/3$. Using the character table above, one proves the following lemma \cite[Lemmas 5.1-5.3]{KimIcos} \mathbf Egin{lem} \label{lem-icosa-reps}Let $\langle \xi \rangle =\mathrm{Gal}(\mathbb{Q}(\mathbb{S}qrt{5})/\mathbb{Q})$. The following is a complete list of irreducible characters of $\widetilde{A}_5$: \mathbf Egin{enumerate} \item trivial \item $\theta_2$, $\xi \circ \theta_2$ ($2$-dimensional) \item $\mathrm{Sym}^2(\theta_2)$, $\mathrm{Sym}^2(\xi \circ \theta_2)$ ($3$-dimensional) \item $\mathrm{Sym}^3(\theta_2)=\mathrm{Sym}^3(\xi \circ \theta_2)$, $\theta_2 \otimes \xi \circ \theta$ ($4$-dimensional) \item $\mathrm{Ind}_{\widetilde{A}_4}^{\widetilde{A}_5}(\chi) = \mathrm{Sym}^4(\theta_2) = \mathrm{Sym}^4(\xi \circ \theta_2)$ ($5$-dimensional) \item $\mathrm{Sym}^2(\theta_2) \otimes \xi \circ \theta_2 =\theta_2 \otimes \mathrm{Sym}^2(\xi \circ \theta_2)=\mathrm{Sym}^5(\theta_2)$ ($6$-dimensional) \end{enumerate} There two characters of degree $2,3,4$ given above are not equivalent. \end{lem} \mathbf Egin{rem} The fact that $\mathrm{Sym}^4(\theta_2) = \mathrm{Ind}_{\widetilde{A}_4}^{\widetilde{A}_5}(\chi)$ was observed by D.~Ramakrishnan (see \cite{KimIcos}). We point this out because it turns out to be an important fact for the arguments of \S \ref{ssec-trace-to-func2} below. \end{rem} Next we discuss the representations of $\widetilde{A}_4$. Write $t=(123)$ and let $\overline{C}_{t^i}$ be the conjugacy classes of $t^i$ for $i \in \{1,2\}$ in $A_4$, respectively. The inverse image of $\overline{C}_{t^i}$ is a union of two conjugacy classes $C_{t^i},C_{t^i}'$ for each $i \in \{1,2\}$. We assume that for $c \in C_{t^i}$ one has $|c|=3$ and for $c' \in C_{t^i}'$ one has $|c'|=6$. Write $C_2$ for the conjugacy class of elements of order $2$ and $C_4$ for the conjugacy class of order $4$. One has the following character table: \mathbf Egin{center} \mathbf Egin{tabular}{ l | c |c |c | c | c | c | c } & $C_1$ & $C_2$ & $C_4$ & $C_t$ & $C_t'$& $C_{t^2}'$ & $C_{t^2}'$ \\ \hline $1$ & $1$ & $1$ & $1$ & $1$ & $1$ & $1$ & $1$\\ ${\sf p}si_1$ & $1$ & $1$ & $1$ & $e^{2 {\sf p}i i/3}$ & $e^{2 {\sf p}i i/3}$ & $e^{4 {\sf p}i i/3}$ & $e^{4 {\sf p}i i/3}$ \\ ${\sf p}si_1^2$ & $1$& $1$ & $1$ & $e^{4 {\sf p}i i/3}$ & $e^{4 {\sf p}i i/3}$ & $e^{2 {\sf p}i i/3}$ & $e^{2 {\sf p}i i/3}$ \\ ${\sf p}si_3$ & $3$ & $3$ & $-1$ & $0$ & $0$ & 0 & $0$\\ ${\sf p}si_2$ & $2$ & $-2$ & $0$ & $-1$ & $1$ & $-1$ & $1$\\ ${\sf p}si_2 {\sf p}si_1$ &$2$ &$-2$ &$0$& $-e^{2 {\sf p}i i/3}$& $e^{2 {\sf p}i i/3}$& $-e^{4{\sf p}i i/3}$& $e^{4 {\sf p}i i/3}$\\ ${\sf p}si_2{\sf p}si_1^2$ &$2$ & $-2$&$0$& $-e^{4 {\sf p}i i/3}$ & $e^{4 {\sf p}i i/3}$ & $-e^{2 {\sf p}i i/3}$ & $e^{2 {\sf p}i i/3}$ \end{tabular} \end{center} We make a few comments on the computation of this table. First, the characters that are lifts of characters of $A_4$ are computed in \cite[\S 5.7]{SerreFG}. Second, we note that ${\sf p}si_2:=\theta_2|_{\widetilde{A}_4}$ is irreducible. Indeed, the only normal subgroup of $\widetilde{A}_5$ is the center and $\theta_2$ is not the restriction of a character of $A_5$ since there are no rank two characters of $A_5$. Thus any representation with character $\theta_2$ is faithful. Since $\theta_2$ is of degree $2$, if the representation with character $\theta_2|_{\widetilde{A}_4}$ were reducible, it would provide an isomorphism from $\widetilde{A}_4$ to an abelian group. Since $\widetilde{A}_3$ is nonabelian, this shows that $\theta_2|_{\widetilde{A}_4}$ is irreducible. Its character values therefore follow from the character table for $\widetilde{A}_5$ above. The fact that the characters ${\sf p}si_2{\sf p}si_1^i$ are distinct for distinct $i \in \{1,2,3\}$ follows by considering determinants. Using the fact that that the sum of the squares of the degrees of the irreducible characters must equal the order of the group we see that the table is complete. \mathbf Egin{lem} \label{lem-tetra-reps} Let $\langle \xi \rangle=\mathrm{Gal}(\mathbb{Q}(\mathbb{S}qrt{5})/\mathbb{Q})$. One has $$ \theta_2|_{\widetilde{A}_4}=\xi \circ \theta_2|_{\widetilde{A}_4}={\sf p}si_2. $$ Moreover $$ \theta_3|_{\widetilde{A}_4}={\sf p}si_3. $$ \end{lem} \mathbf Egin{proof} This follows immediately from the character tables above. \end{proof} Finally we record the character table for the quaternion group $Q$. We present the group as $$ Q= \langle i, j : i^4=1,\, \,i^2=j^2,\, \,i^{-1}ji=j^{-1} \rangle. $$ Denoting by $C_x$ the conjugacy class of an element $x \in Q$, one has the following character table \cite[\S 19.1]{DF}: \mathbf Egin{center} \mathbf Egin{tabular}{ l | c |c |c | c | c | } & $C_1$ & $C_{-1}$ & $C_{i}$ & $C_{j}$ & $C_{ij}$ \\ \hline $1$ & $1$ & $1$ & $1$ & $1$ & $1$ \\ $\Theta_1$ & $1$ & $1$ & $-1$ & $1$ & $-1$ \\ $\Theta_1'$ & $1$ & $1$ & $1$ & $-1$ & $-1$ \\ $\Theta_1\Theta_1'$ & $1$ & $1$ & $-1$ & $-1$ & $1$\\ $\Theta_2$ & $2$ & $-2$ & $0$ & $0$ & $0$ \end{tabular} \end{center} We note that as before the subscript indicates the degree of the representation. By examining the character tables of $\widetilde{A}_5$ and $Q$ one immediately deduces the following lemma: \mathbf Egin{lem} \label{lem-Q} Let $\langle \xi \rangle=\mathrm{Gal}(\mathbb{Q}(\mathbb{S}qrt{5})/\mathbb{Q})$. One has $\theta_2|_{Q}=\xi \circ \theta_2|_Q=\Theta_2$. {\sf q}ed \end{lem} \mathbb{S}ection{Proofs of the main theorems} \label{sec-proofs} In this section we prove the theorems stated in the introduction. \mathbb{S}ubsection{Preparation} The propositions of this subsection will be used in the proof of our main theorems in \S \ref{ssec-func-to-trace} and \S \ref{ssec-trace-to-func} below. \mathbf Egin{prop} \label{prop-solv} Let $E/F'$ be a Galois extension with $\mathrm{Gal}(E/F') \cong \widetilde{A}_4$ and let ${\sf p}i'$ be a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_{F'})$. There are precisely $|\mathrm{Gal}(E/F')^{\mathrm{ab}}|$ non-isomorphic cuspidal automorphic representations of $\mathrm{GL}_2(\mathbb{A}_{F'})$ that have ${\sf p}i'_E$ as a base change. \end{prop} \mathbf Egin{prop} \label{prop-solv3} Let $E/F'$ be a Galois extension with $\mathrm{Gal}(E/F') \cong \widetilde{A}_4$ and let ${\sf p}i'$ be a cuspidal automorphic representation of $\mathrm{GL}_3(\mathbb{A}_{F'})$. If ${\sf p}i'_E\cong \chi_0^{\boxplus 3}$ where $\chi_0$ is a quasi-character invariant under $\mathrm{Gal}(E/F')$, then there is a unique cuspidal automorphic representations of $\mathrm{GL}_2(\mathbb{A}_{F'})$ that has ${\sf p}i'_E$ as a base change. It is of $\rho_3$-type, where $\rho_3: W_{F'}' \to {}^L\mathrm{GL}_{3F'}$ is a representation trivial on $W_E'$ that has character equal to the unique degree three irreducible character of $\widetilde{A}_4$. \end{prop} These propositions correspond to the first (and easiest) assertions on $L$-parameters in lemmas \ref{lem-A5-EF} and \ref{lem-A5-EF3}, respectively. They will be proven in a moment after some preparation. Let $P \leq \mathrm{GL}_{n}$ be a parabolic subgroup and let $P=MN$ be its Levi decomposition. Suppose that $\Pi_M$ is a cuspidal automorphic representation of $M(E) A_{\mathrm{GL}_{nE}} \backslash M(\mathbb{A}_E)$ and that $$ \Pi=\mathrm{Ind}_{M(\mathbb{A}_E)}^{\mathrm{GL}_{n}(\mathbb{A}_E)}(\Pi_M) $$ is an (irreducible) automorphic representation of $\mathrm{GL}_n(E) A_{\mathrm{GL}_{nE}} \backslash \mathrm{GL}_n(\mathbb{A}_E)$. Here $\Pi_M$ is extended to a representation of $P(\mathbb{A}_E)$ by letting the action of $N(\mathbb{A}_F)$ be trivial. We note that $\Pi$ is irreducible and unitary \cite[Chapter 3, \S 4]{AC}. Write $M={\sf p}rod_{i} \mathrm{GL}_{n_i}$ for some set of integers $n_i \mathfrak{g}eq 1$ and $\Pi_M=\otimes_i\Pi_i$ where the $\Pi_i$ are cuspidal automorphic representations of $\mathrm{GL}_{n_i}(F) \backslash \mathrm{GL}_{n_i}(\mathbb{A}_F)$. \mathbf Egin{lem} \label{lem-const-term} Suppose that $E/F'$ is a Galois extension of number fields and $\Pi^{\mathbb{S}igma} \cong \Pi$ for all $\mathbb{S}igma \in \mathrm{Gal}(E/F')$. Then $\{\Pi_i\}=\{\Pi_i^{\mathbb{S}igma}\}$ for all $\mathbb{S}igma \in \mathrm{Gal}(E/F')$. \end{lem} \mathbf Egin{proof} Since $\Pi$ is induced from cuspidal we use the theory of Eisenstein series to view $\Pi$ as a subrepresentation (not just subquotient) of $L^2(\mathrm{GL}_n(E) A_{\mathrm{GL}_{nE}} \backslash \mathrm{GL}_n(\mathbb{A}_E))$. Let $V_{\Pi} \leq L^2(\mathrm{GL}_n(E) A_{\mathrm{GL}_{nE}} \backslash \mathrm{GL}_n(\mathbb{A}_E))$ be the subspace of $\Pi$-isotypic automorphic forms. Consider the constant term $$ {\sf p}hi_P(m):=\int_{N(E) \backslash N(\mathbb{A}_E)} {\sf p}hi(nm)dn. $$ It is an automorphic form on $M(\mathbb{A}_E)$ \cite[Lemma 4]{LanglNotion}. There is a natural action of $\mathrm{Gal}(E/F')$ on $L^2( M(E)A_{\mathrm{GL}_{2E}} \backslash M(\mathbb{A}_E))$. By the normal basis theorem one has $d(n^{\mathbb{S}igma})=dn$ for all $\mathbb{S}igma \in \mathrm{Gal}(E/F')$, and hence the map \mathbf Egin{align*} V_{\Pi} &\longrightarrow L^2(A_{\mathrm{GL}_{2E}} M(E) \backslash M(\mathbb{A}_E))\\ {\sf p}hi &\longmapsto {\sf p}hi_P \end{align*} is $\mathrm{Gal}(E/F')$-invariant. Using the theory of Eisenstein series, specifically \cite[Propositions II.1.7 and IV.1.9]{MW}, it follows that that $\mathrm{Gal}(E/F')$ preserves the set of representations $\Pi_{1M}$ of $M(F) A_{\mathrm{GL}_{nE}} \backslash M(\mathbb{A}_E)$ such that $\Pi$ is a constituent of $\mathrm{Ind}_{M(\mathbb{A}_E)}^{\mathrm{GL}_{n}(\mathbb{A}_E)}(\Pi_{1M})$. Here, as before, we are extending $\Pi_{1M}$ to a representation of $P(\mathbb{A}_E)$ by letting $N(\mathbb{A}_E)$ act trivially. To make this statement easier for the reader to check, we note that our assumptions imply that $\Pi$ is not in the discrete spectrum, so no residues of Eisenstein series come into play (see \cite{MW2} for the classification of the discrete non-cuspidal spectrum of $\mathrm{GL}_n$). By the results contained in \cite[(4.3)]{JSII} on isobaric automorphic representations, the lemma follows. \end{proof} We now prove Proposition \ref{prop-solv}: \mathbf Egin{proof}[Proof of Proposition \ref{prop-solv}] Recall that $H^2(\mathrm{Gal}(E/F'),\textf{C}C^{\times})=H^2(\widetilde{A}_4,\textf{C}C^{\times})=0$. Thus if ${\sf p}i'_E$ is cuspidal then the proposition is \cite[Theorem 2]{Rajan3}. In the remainder of the proof we will constantly use facts on cyclic prime degree base change established in \cite{Langlands}. A convenient list of the basic properties (in a more general setting) is given in \cite[Chapter 3, Theorems 4.2 and 5.1]{AC}. Assume that ${\sf p}i'_E$ is not cuspidal. By the theory of prime degree base change we must then have ${\sf p}i'_E \cong \chi_0 \boxplus \chi_0^{\mathbb{S}igma_0}$ for some quasi-character $\chi_0:E^{\times} \backslash \mathbb{A}_E^{\times} \to \textf{C}C^{\times}$ and some $\mathbb{S}igma_0 \in \mathrm{Gal}(E/F')$. Therefore we can apply Lemma \ref{lem-const-term} to see that $\widetilde{A}_4$ permutes the two-element set $\{\chi_0,\chi_0^{\mathbb{S}igma_0}\}$. Since $\mathrm{Gal}(E/F') \cong \widetilde{A}_4$ has no subgroup of index two one has $\chi_0^{\mathbb{S}igma}=\chi_0=\chi_0$ for all $\mathbb{S}igma \in \mathrm{Gal}(E/F')$. Since $\chi_0$ is $\mathrm{Gal}(E/F')$-invariant and $H^2(\mathrm{Gal}(E/F'),\textf{C}C^{\times})=0$, Lemma \ref{lem-bc-param} implies that $\chi_0$ extends to a quasi-character $\chi'$ of $F'^{\times} \backslash \mathbb{A}_{F'}^{\times}$. Thus, upon replacing ${\sf p}i'$ by ${\sf p}i' \otimes \chi'^{-1}$ if necessary, we see that to complete the proof of the proposition it suffices to show that there are $|\mathrm{Gal}(E/F)^{\mathrm{ab}}|$ distinct isomorphism classes of cuspidal automorphic representations ${\sf p}i'$ of $\mathrm{GL}_{2}(\mathbb{A}_{F'})$ such that ${\sf p}i'_E \cong 1 \boxplus 1$. We now look more closely at the structure of $\widetilde{A}_4$. Let $V = \mathbb{Z}/2 \times \mathbb{Z}/2$ denote the Klein $4$ group and fix an embedding $V \hookrightarrow A_4$. The inverse image $Q$ of $V$ under the covering map $\widetilde{A}_4 \to A_4$ is isomorphic to the quaternion group; it is a nonabelian group of order $8$. The subgroup $Q \leq \widetilde{A}_4$ is normal and the quotient $\widetilde{A}_4 \to \widetilde{A}_4/Q \cong \mathbb{Z}/3$ induces an isomorphism $$ \widetilde{A}_4^{\mathrm{ab}} \tilde{\longrightarrow} \mathbb{Z}/3. $$ Let $\mu$ be a nontrivial character of $F'^{\times} \backslash \mathbb{A}_{F'}^{\times}$ trivial on $\mathrm{N}_{E/F'}\mathbb{A}_E^{\times}$. Then since $\widetilde{A}_4^{\mathrm{ab}} \tilde{\longrightarrow}\mathbb{Z}/3$ we have $\mu^3=1$. The three cuspidal automorphic representations ${\sf p}i', {\sf p}i' \otimes \mu$ and ${\sf p}i' \otimes \mu^2$ are all nonisomorphic (as can be seen by examining central characters) and all have the property that their base changes to $E$ are isomorphic to $1 \boxplus 1$. Therefore our task is to show that there are no other isomorphism classes of cuspidal automorphic representations of $\mathrm{GL}_2(\mathbb{A}_{F'})$ that base change to $1 \boxplus 1$. We note that $({\sf p}i'\otimes \mu^i)_{E^Q}$ is independent of $i$ and is cuspidal by prime degree base change. Therefore it suffices to show that there is at most one cuspidal automorphic representation ${\sf p}i_0$ of $\mathrm{GL}_2(\mathbb{A}_{E^{Q}})$ whose base change to $\mathrm{GL}_2(\mathbb{A}_E)$ is $1 \boxplus 1$. Let ${\sf p}i_0$ be a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_{E^Q})$ whose base change to $\mathrm{GL}_2(\mathbb{A}_E)$ is $1 \boxplus 1$. Choose a chain of subfields $E >E_1>E_2>E^Q$. We denote by $\chi_1 \in \mathrm{Gal}(E/E_2)^{\widetilde{\varepsilon}dge}$ a character that restricts nontrivially to $\mathrm{Gal}(E/E_1)$. The theory of prime degree base change implies that ${\sf p}i_{0E_1}$ cannot be cuspidal since $1$ is invariant under $\mathrm{Gal}(E/E_1)$. Hence ${\sf p}i_{0E_1}$ must be isomorphic to one of \mathbf Egin{align} 1 \boxplus 1, {\sf q}uad 1 \boxplus \chi_1|_{\mathbb{A}_{E_1}^{\times}},{\sf q}uad \textrm{ or } {\sf q}uad \chi_1|_{\mathbb{A}_{E_1}^{\times}} \boxplus \chi_1|_{\mathbb{A}_{E_1}^{\times}}. \end{align} Thus applying the theory of prime degree base change again we see that ${\sf p}i_{E_2}$ cannot be cuspidal. Now by assumption ${\sf p}i_0$ is cuspidal, and since ${\sf p}i_{0E_2}$ is not cuspidal ${\sf p}i_0$ is $E_2$-induced. In particular, ${\sf p}i_0={\sf p}i({\sf p}hi)$ for an irreducible $L$-parameter ${\sf p}hi:W_{E^Q}' \to {}^L\mathrm{GL}_{2E_Q}$ (compare \cite[\S 2 C)]{Langlands}). Note that ${\sf p}hi$ is necessarily trivial on $W_E'$, and hence can be identified with a two-dimensional irreducible representation of $\mathrm{Gal}(E/E^Q)$. There is just one isomorphism class of such representations by the character table for $Q$ recorded in \S \ref{appendix}. It follows that ${\sf p}i_0$ is the unique cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_{E^Q})$ whose base change to $\mathrm{GL}_{2}(\mathbb{A}_E)$ is $1 \boxplus 1$. As mentioned above, this implies the proposition. \end{proof} As a corollary of the proof, we have the following: \mathbf Egin{cor} \label{cor-rho-type} Let $E/F'$ be a Galois extension with $\mathrm{Gal}(E/F') \cong \widetilde{A}_4$, and let ${\sf p}i'$ be a cuspidal automorphic representation of $\mathrm{GL}_2(\mathbb{A}_{F'})$. If ${\sf p}i'_E$ is not cuspidal, then ${\sf p}i'$ is of $\rho$-type for some $L$-parameter $\rho$ trivial on $W_E'$. \end{cor} \mathbf Egin{proof} The proof of Proposition \ref{prop-solv} implies that there is a character $\chi'$ of $F'^{\times} \backslash \mathbb{A}_{F'}^{\times}$ such that $({\sf p}i' \otimes \chi'^{-1})|_{E} \cong 1 \boxplus 1$, so it suffices to treat the case where ${\sf p}i'|_E \cong 1 \boxplus 1$. By the argument in the proof of proposition \ref{prop-solv} and using the notation therein we have that ${\sf p}i'|_{E^Q}={\sf p}i({\sf p}hi)$, where ${\sf p}hi:W_{E^Q}' \to {}^L\mathrm{GL}_{2E^Q}$ is the unique irreducible $L$-parameter trivial on $W_E'$. Twisting ${\sf p}i'$ by an abelian character of $\widetilde{A}_4$ if necessary, we can and do assume that the central character of ${\sf p}i'$ is trivial. Thus we can apply \cite[\S 3]{Langlands} to conclude that ${\sf p}i'={\sf p}i({\sf p}hi')$ for some $L$-parameter ${\sf p}hi':W_{F'}' \to{}^L\mathrm{GL}_{2F'}$. \end{proof} We now prove Proposition \ref{prop-solv3}: \mathbf Egin{proof}[Proof of Proposition \ref{prop-solv3}] The quasi-character $\chi_0$ descends to a quasi-character $\chi':F'^{\times} \backslash \mathbb{A}_{F'}^{\times} \to \textf{C}C^{\times}$ by Lemma \ref{lem-bc-param} and the fact that $H^2(\mathrm{Gal}(E/F'),\textf{C}C^{\times})=H^2(\widetilde{A}_4,\textf{C}C^{\times})=0$. Replacing ${\sf p}i'$ by ${\sf p}i' \otimes \chi'^{-1}$ if necessary, we can and do assume that ${\sf p}i'_E\cong 1^{\boxplus 3}$. In the remainder of the proof we will constantly use facts on cyclic prime degree base change established in \cite{AC}. A convenient list of the basic properties is given in \cite[Chapter 3, Theorems 4.2 and 5.1]{AC}. Let $Q \hookrightarrow \widetilde{A}_4$ be as in the proof of Proposition \ref{prop-solv}. By the theory of cyclic prime-degree base change $$ {\sf p}i'_{E^Q} =\chi_1 \boxplus \chi_2 \boxplus \chi_3 $$ for some characters $\chi_i:F'^{\times} \backslash \mathbb{A}_{F'}^{\times} \to \textf{C}C^{\times}$. Thus, by \cite[Chapter 3, Theorem 6.2]{AC} and its proof, since ${\sf p}i'$ is cuspidal we conclude that $$ {\sf p}i'\cong\mathrm{Ind}_{E^Q}^{F'}(\chi_1) $$ and hence is of $\rho$-type for some irreducible degree three $L$-parameter $\rho:W_{F'}' \to {}^L\mathrm{GL}_{nF'}$ trivial on $W_E'$. By the character table of $\widetilde{A}_4$ recorded in \S \ref{appendix}, we conclude that $\rho \cong \rho_3$ for $\rho_3$ as in the proposition. \end{proof} We also require the following linear independence statement: \mathbf Egin{lem} \label{lem-lin-ind} Let $M \leq \mathrm{GL}_n$ be the maximal torus of diagonal matrices. Let $v$ be a place of $F$. Suppose that there is a countable set $\mathcal{X}$ of quasi-characters of $M(F_v)$ and that the set $\mathcal{X}$ is stable under the natural action of $W(M,\mathrm{GL}_n)$. Suppose moreover that $\{a(\chi_v)\}_{\chi_v \in \mathcal{X}}$ is a set of complex numbers such that for all $f_v \in C_c^{\infty}(M(F_v))^{W(M,\mathrm{GL}_n)}$ one has $$ \mathbb{S}um_{\chi_v \in \mathcal{X}} a(\chi_v)\mathrm{tr}(\chi_v)(f_v)=0 $$ where the sum is absolutely convergent. Then $$ \mathbb{S}um_{W \in W(M,\mathrm{GL}_n)}a(\chi_v \circ W)=0 $$ for each $\chi_v \in \mathcal{X}$. \end{lem} \mathbf Egin{proof} By assumption \mathbf Egin{align*} 0&=\mathbb{S}um_{\chi_v \in \mathcal{X}} \mathbb{S}um_{W \in W(M,\mathrm{GL}_n)}a(\chi_v)\mathrm{tr}(\chi_v)(f_v \circ W)\\ &=\mathbb{S}um_{\chi_v \in \mathcal{X}}\mathbb{S}um_{W \in W(M,\mathrm{GL}_n)} a(\chi_v)\mathrm{tr}(\chi_v \circ W^{-1})(f_v)\\ &=\mathbb{S}um_{\chi_v \in \mathcal{X}}\mathrm{tr}(\chi_v)(f_v)\mathbb{S}um_{W \in W(M,\mathrm{GL}_n)} a(\chi_v \circ W) \end{align*} for all $f_v \in C_c^{\infty}(M(F_v))$. The result now follows from generalized linear independence of characters (see \cite[Lemma 6.1]{LabLan} and \cite[Lemma 16.l.1]{JacquetLanglands}). \end{proof} \mathbb{S}ubsection{Functoriality implies the trace identities} \label{ssec-func-to-trace} In this subsection we prove theorems \ref{main-thm-1}, \ref{main-thm-2}, and \ref{main-thm-3}, namely that cases of Langlands functoriality explicated in conjectures \ref{conj-1} and \ref{conj-solv} in the first case, Conjecture \ref{conj-2} in the second case, and conjectures \ref{conj-32} and \ref{conj-33} in the third case imply the stated trace identities. By Corollary \ref{cor-aut-trace} the sum \mathbf Egin{align*} \mathbb{S}um_{{\sf p}i'} \mathrm{tr}({\sf p}i')(h^1b_{E/F'}(\Sigma_{{\sf p}hi}^{S_0}(X))) \end{align*} is equal to $o(X)$ plus \mathbf Egin{align*} \mathbb{S}um_{{\sf p}i'} \mathrm{tr}({\sf p}i')(h^1) \mathrm{Res}_{s=1}\left( \widetilde{{\sf p}hi}(s)X^sL(s,({\sf p}i'_E \times {\sf p}i'^{\tau \varepsilone}_E)^{S_0})\right). \end{align*} Here the sum is over a set of equivalence classes of automorphic representations of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})$. Specifically, for \eqref{11} of Theorem \ref{main-thm-1} we take it to be over $E$-primitive representations, for \eqref{A21} of Theorem \ref{main-thm-2} and \eqref{31} of Theorem \ref{main-thm-3} we take it to be over all cuspidal representations, and for \eqref{B21} of Theorem \ref{main-thm-2} we take it to be over cuspidal representations not of $\rho$-type for any two-dimensional representation $\rho:W_{F'}' \to {}^L\mathrm{GL}_{2F'}$ trivial on $W_E'$. The only nonzero contributions to this sum occur when $L(s,({\sf p}i'_E \times {\sf p}i'^{\tau\varepsilone}_E)^{S_0})$ has a pole, which implies that $\mathrm{Hom}_I({\sf p}i'_E,{\sf p}i'^{\tau}_E) \neq 0$ (see \eqref{ord-pole}). In this case if ${\sf p}i'_E$ is cuspidal it is then invariant under $\langle \mathrm{Gal}(E/F'),\tau \rangle=\mathrm{Gal}(E/F)$ and the pole is simple \eqref{ord-pole}. In view of conjectures \ref{conj-1} and \ref{conj-2}, in the setting of theorems \ref{main-thm-1} and \ref{main-thm-2} this implies that if $L(s,({\sf p}i'_E \times {\sf p}i'^{\tau\varepsilone}_E)^{S_0})$ has a pole then ${\sf p}i'_E$ descends to a cuspidal representation ${\sf p}i$ of $F$, whether or not ${\sf p}i'_E$ is cuspidal. On the other hand, the only nonzero contributions to the quantity \eqref{31} in Theorem \ref{main-thm-3} come from representations where $\dim \mathrm{Hom}_I({\sf p}i_E',{\sf p}i'^{\tau}_E)=n^2$, and this is the case if and only if ${\sf p}i'_E \cong \chi_0^{\boxplus n}$ where $\chi_0:E^{\times} \backslash \mathbb{A}_E^{\times} \to \textf{C}C^{\times}$ is a quasi-character invariant under $\mathrm{Gal}(E/F) =\langle \mathrm{Gal}(E/F'),\tau \rangle$. In these cases ${\sf p}i'_E$ descends to a cuspidal representation of $\mathrm{GL}_n(\mathbb{A}_F)$ by conjectures \ref{conj-32} and \ref{conj-33}. Assume for the moment that we are in the setting of Theorem \ref{main-thm-1}. In this case by Conjecture \ref{conj-solv} there are precisely $|\mathrm{Gal}(E/F')^{\mathrm{ab}}|$ inequivalent cuspidal representations of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})$ that base change to ${\sf p}i'_E$, since ${\sf p}i'_E$ is cuspidal by the theory of prime degree base change \cite[Chapter 3, Theorems 4.2 and 5.1]{AC}. With this in mind, the definition of transfer (see \S \ref{ssec-transfers} and Lemma \ref{lem-unr-transf}) completes the proof of the claimed trace identity. We only remark that the absolute convergence of the two sums follows from Corollary \ref{cor-aut-trace} and the fact that $L(s,({\sf p}i'_E \times {\sf p}i'^{\tau \varepsilone}_E)^{S_0})$ has a pole of order $\mathrm{Hom}_I({\sf p}i'_E,{\sf p}i'^{\tau}_E)$ (see \eqref{ord-pole}). Now assume that we are in the setting of Theorem \ref{main-thm-2}. In this case ${\sf p}i'_E$ may not be cuspidal, but by Proposition \ref{prop-solv} there are still exactly $|\mathrm{Gal}(E/F')^{\mathrm{ab}}|$ non-isomorphic cuspidal automorphic representations of $\mathrm{GL}_2(\mathbb{A}_{F'})$ that base change to ${\sf p}i'_E$. With this in mind, the claimed trace identity follows as before. The proof of Theorem \ref{main-thm-3} is essentially the same. We only point out the most significant difference, namely that we are claiming that one can consider arbitrary Hecke functions on $C_c^{\infty}(\mathrm{GL}_n(F'_{S'_1})//\mathrm{GL}_n(\mathcal{O}_{F'S'_1}))$ instead of just those that are base changes of Hecke functions in $C_c^{\infty}(\mathrm{GL}_n(E_{S_{10}})//\mathrm{GL}_n(\mathcal{O}_{ES_{10}}))$. The reason this is possible is that for each $\mathrm{Gal}(E/F')$-invariant quasi-character $\chi_0:E^{\times} \backslash \mathbb{A}_E^{\times} \to \textf{C}C^{\times}$ there is a unique cuspidal automorphic representation of $\mathrm{GL}_n(\mathbb{A}_{F'})$ such that ${\sf p}i'_E \cong \chi_0^{\boxplus n}$ by propositions \ref{prop-solv} and \ref{prop-solv3}. {\sf q}ed \mathbb{S}ubsection{The trace identity implies functoriality: first two cases} \label{ssec-trace-to-func} In this subsection we prove theorems \ref{main-thm-1-conv} and \ref{main-thm-2-conv}, namely that the trace identities of Theorem \ref{main-thm-1}, and \ref{main-thm-2} imply the corresponding cases of functoriality under the assumption of a supply of transfers (more precisely, under Conjecture \ref{conj-transf}). By assumption, for all $h$ unramified outside of $S'$ with transfer $\Phi$ unramified outside of $S$ one has an identity \mathbf Egin{align} \label{id1} &\lim_{X \to \infty}|\mathrm{Gal}(E/F')^{\mathrm{ab}}|^{-1}X^{-1} \mathbb{S}um'_{{\sf p}i'} \mathrm{tr}({\sf p}i')(h^1b_{E/F'}(\Sigma^{S_0}_{{\sf p}hi}(X)) \\&= \lim_{X \to \infty} X^{-1}\mathbb{S}um'_{{\sf p}i} \mathrm{tr}({\sf p}i)( \Phi^1b_{E/F}(\Sigma^{S_0}_{{\sf p}hi}(X))). \nonumber \end{align} Here for the proof of Theorem \ref{main-thm-1-conv}, the sums are over a set of representatives for the equivalence classes of $E$-primitive automorphic representations of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})$ and $A_{\mathrm{GL}_{nF}} \backslash \mathrm{GL}_n(\mathbb{A}_F)$, respectively. For the proof of Theorem \ref{main-thm-2-conv}, the sums are over a set of representatives for the equivalence classes of cuspidal automorphic representations of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})$ and $A_{\mathrm{GL}_{nF}} \backslash \mathrm{GL}_n(\mathbb{A}_F)$, respectively, that are not of $\rho$-type for $\rho$ trivial on $W_E'$. We start by refining \eqref{id1}. Notice that each representation ${\sf p}i'$ appearing in \eqref{id1} above admits a base change ${\sf p}i'_E$ to $\mathrm{GL}_n(\mathbb{A}_E)$ by a series of cyclic base changes. We claim that ${\sf p}i'_E$ is cuspidal. In Theorem \ref{main-thm-1-conv} we have assumed that ${\sf p}i'$ is $E$-primitive. Hence, by the theory of cyclic base change, ${\sf p}i'_E$ must be cuspidal \cite[Chapter 3, Theorem 4.2 and Theorem 5.1]{AC}. In Theorem \ref{main-thm-2-conv} we have assumed that ${\sf p}i'$ is not of $\rho$-type for any $\rho$ trivial on $W_E'$. Thus ${\sf p}i'_E$ is cuspidal by Corollary \ref{cor-rho-type}. Now applying Corollary \ref{cor-aut-trace} and \eqref{ord-pole} we see that the top line of \eqref{id1} is equal to \mathbf Egin{align*} |\mathrm{Gal}(E/F')^{\mathrm{ab}}|^{-1} \mathbb{S}um'_{{\sf p}i':{\sf p}i'_E \cong {\sf p}i'^{\tau}_E} \mathrm{tr}({\sf p}i')(h^1)\widetilde{{\sf p}hi}(1)\mathrm{Res}_{s=1}L(s,({\sf p}i'_E \times {\sf p}i'^{\tau \varepsilone}_E)^{S_0}). \end{align*} Note that the given residue is nonzero and that this sum is absolutely convergent by Corollary \ref{cor-aut-trace}. At this point we assume that the function $\Phi_S$ is chosen so that at finite places $v \in S$ where $\Phi_v \not \in C_c^{\infty}(\mathrm{GL}_n(F_v)//\mathrm{GL}_n(\mathcal{O}_{F_v}))$ the function $\Phi_v$ is of positive type (this is possible by Conjecture \ref{conj-transf}). Under this assumption we claim that the second line of \eqref{id1} is absolutely convergent. Indeed, the $L$-function $L(s,({\sf p}i_E \times {\sf p}i_E^{\varepsilone})^{S_0})$ of the admissible representation ${\sf p}i_E^{S_0} \times {\sf p}i_E^{\varepsilone S_0}$ is defined and convergent in some half plane \cite[Theorem 13.2]{Borel}, \cite{LanglProb}, and its Dirichlet series coefficients are positive \cite[Lemma a]{HR}. Thus the smoothed partial sums $\mathrm{tr}({\sf p}i)(b_{E/F}(\Sigma^{S_0}_{{\sf p}hi}(X)))$ have positive coefficients. The fact that the second line of \eqref{id1} converges absolutely follows. Now we have refined \eqref{id1} to an identity of absolutely convergent sums \mathbf Egin{align} \label{id2} &|\mathrm{Gal}(E/F')^{\mathrm{ab}}|^{-1} \mathbb{S}um'_{{\sf p}i':{\sf p}i'_E \cong {\sf p}i'^{\tau}_E} \mathrm{tr}({\sf p}i')(h^1)\widetilde{{\sf p}hi}(1)\mathrm{Res}_{s=1}L(s,({\sf p}i'_E \times {\sf p}i'^{\tau \varepsilone}_E)^{S_0}) \\&= \mathbb{S}um'_{{\sf p}i} \mathrm{tr}({\sf p}i)(\Phi^1)\lim_{X \to \infty} X^{-1}\mathrm{tr}({\sf p}i)(b_{E/F}(\Sigma^{S_0}_{{\sf p}hi}(X))). \nonumber \end{align} where the residues in the top line are nonzero. Before starting the proof in earnest, we wish to refine \eqref{id2} yet again to an identity where only representations of a given infinity type are involved. Let $\Psi=\otimes_{w|\infty}\Psi_w \in \otimes_{w |\infty}C_c^{\infty}(M(E_{w}))^{W(M,\mathrm{GL}_n)}$, where $M \leq \mathrm{GL}_n$ is the standard maximal torus of diagonal matrices. For an irreducible unitary generic admissible representation $\Pi_{\infty}$ of $\mathrm{GL}_n(E_{\infty})$ (resp.~${\sf p}i_{\infty}$ of $\mathrm{GL}_n(F_{\infty})$) write $\chi_{\Pi_{\infty}}: M(E_{\infty}) \to \textf{C}C^{\times}$ (resp.~$\chi_{{\sf p}i_{\infty}}: M(F_{\infty}) \to \textf{C}C^{\times}$) for a choice of quasi-character whose unitary induction to $\mathrm{GL}_n(E_{\infty})$ (resp.~$\mathrm{GL}_n(F_{\infty})$) is $\Pi_{\infty}$ (resp.~${\sf p}i_{\infty}$). Here we are using our assumption that $F$ is totally complex. The quasi-characters $\chi_{\Pi_{\infty}w}$ and $\chi_{{\sf p}i_{\infty}v}$ for infinite places $w$ of $E$ and $v$ of $F$ are determined by $\Pi_{w}$ and ${\sf p}i_{v}$, respectively, up to the action of $W(M,\mathrm{GL}_n)$. Moreover, they determine $\Pi_w$ and ${\sf p}i_v$, respectively. We note that by an application of the descent arguments proving Lemma \ref{lem-archi-transf} the identity \eqref{id2} implies \mathbf Egin{align} \label{id3} &|\mathrm{Gal}(E/F')^{\mathrm{ab}}|^{-1} \mathbb{S}um'_{{\sf p}i':{\sf p}i'_E \cong {\sf p}i'^{\tau}_E} \mathrm{tr}(\chi_{{\sf p}i'_E})(\Psi)\mathrm{tr}(h^{\infty})\widetilde{{\sf p}hi}(1)\mathrm{Res}_{s=1}L(s,({\sf p}i'_E \times {\sf p}i'^{\tau \varepsilone}_E)^{S_0})\\ &= \mathbb{S}um'_{{\sf p}i} \mathrm{tr}(\chi_{{\sf p}i'})(\otimes_{v |\infty} (*_{w|v}\Psi_w))\mathrm{tr}({\sf p}i)(\Phi^{\infty})\lim_{X \to \infty} X^{-1}\mathrm{tr}({\sf p}i)(b_{E/F}(\Sigma^{S_0}_{{\sf p}hi}(X))) \nonumber \end{align} where the $*$ denotes convolution in $M(F_{\infty})$ (note we are implicitly choosing isomorphisms $M(E \otimes_F F_{v}) \cong \times_{w|v} M(F_{v})$ for each $v|\infty$ to make sense of this). Let $\chi_0:M(F_{\infty}) \to \textf{C}C^{\times}$ be a given quasi-character. By Lemma \ref{lem-lin-ind} the identity \eqref{id3} can be refined to \mathbf Egin{align} \label{id3.5} &|\mathrm{Gal}(E/F')^{\mathrm{ab}}|^{-1} \mathbb{S}um'_{\mathbb{S}ubstack{{\sf p}i':\chi_{{\sf p}i'_{E\infty}w}=\chi_{0Ew}^{W}\\ \textrm{ for some }W \in W(M,\mathrm{GL}_n)\\ \textrm{for all }w|\infty}} \mathrm{tr}(\chi_{{\sf p}i'_E})(\Psi)\mathrm{tr}(h^{\infty})\widetilde{{\sf p}hi}(1)\mathrm{Res}_{s=1}L(s,({\sf p}i'_E \times {\sf p}i'^{\tau \varepsilone}_E)^{S_0})\\ &= \mathbb{S}um'_{\mathbb{S}ubstack{{\sf p}i:\chi_{{\sf p}i_{\infty}v}=\chi_{0v}^W\\ \textrm{ for some }W \in W(M,\mathrm{GL}_n)\\\textrm{for all }v|\infty}} \mathrm{tr}(\chi_{{\sf p}i'})(\otimes_{v |\infty} (*_{w|v}\Psi_w))\mathrm{tr}({\sf p}i)(\Phi^{\infty})\lim_{X \to \infty} X^{-1}\mathrm{tr}({\sf p}i)(b_{E/F}(\Sigma^{S_0}_{{\sf p}hi}(X))).\nonumber \end{align} Now by descent \eqref{id3.5} implies the identity \mathbf Egin{align} \label{id4} &|\mathrm{Gal}(E/F')^{\mathrm{ab}}|^{-1} \mathbb{S}um'_{\mathbb{S}ubstack{{\sf p}i':{\sf p}i'_E \cong {\sf p}i'^{\tau}_E\\{\sf p}i'_E \cong {\sf p}i_{0\infty E}}} \mathrm{tr}({\sf p}i')(h^1)\widetilde{{\sf p}hi}(1)\mathrm{Res}_{s=1}L(s,({\sf p}i'_E \times {\sf p}i'^{\tau \varepsilone}_E)^{S_0}) \\&= \mathbb{S}um'_{{\sf p}i:{\sf p}i_{\infty} \cong {\sf p}i_{0\infty}} \mathrm{tr}({\sf p}i)(\Phi_S^1)\lim_{X \to \infty} X^{-1}\mathrm{tr}({\sf p}i)(b_{E/F}(\Sigma^{S_0}_{{\sf p}hi}(X))) \nonumber \end{align} for all irreducible admissible generic unitary representations ${\sf p}i_{0\infty}$ of $\mathrm{GL}_n(F_{\infty})$; here, as before, for finite $v$ the functions $\Phi_v$ are assumed to be of positive type when they are ramified (i.e.~not in $C_c^{\infty}(\mathrm{GL}_n(F_v)//\mathrm{GL}_n(\mathcal{O}_{F_v}))$). Note in particular that for any given $\Phi_S$ and $h_S$ the sums in \eqref{id4} are finite. We now start to work with \eqref{id4}. First consider descent of primitive representations. Suppose that $\Pi$ is a $\mathrm{Gal}(E/F)$-invariant primitive automorphic representation of $A_{\mathrm{GL}_{nE}} \backslash \mathrm{GL}_n(\mathbb{A}_{E})$. Then by Conjecture \ref{conj-solv} $\Pi$ descends to a representation ${\sf p}i'$ of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})$. Here in the $n=2$ case we are using the fact that $H^2(\mathrm{Gal}(E/F'),\textf{C}C^{\times})=H^2(\widetilde{A}_4,\textf{C}C^{\times})=0$. The existence of a primitive automorphic representation ${\sf p}i$ of $A_{\mathrm{GL}_{nF}} \backslash \mathrm{GL}_n(\mathbb{A}_F)$ that is a weak descent of $\Pi$ now follows from \eqref{id4} and a standard argument using the transfer of unramified functions (Lemma \ref{lem-unr-transf}). In more detail, assume that $\Pi$ and $E/F$ are unramified outside of $S$. Then choosing $h^{S'}=b_{E/F'}(f^{S_0})$ and $\Phi^{S}=b_{E/F}(f^{S_0})$ for $f^{S_0} \in C_c^{\infty}(\mathrm{GL}_n(\mathbb{A}_{E}^{S_0})//\mathrm{GL}_n(\widehat{\mathcal{O}}_{E}^{S_0}))$ (which are transfers of each other by Lemma \ref{lem-unr-transf}) the identity \eqref{id4} implies an identity of the form \mathbf Egin{align*} &\mathbb{S}um'_{\mathbb{S}ubstack{{\sf p}i':{\sf p}i'_E \cong {\sf p}i'^{\tau}_E\\{\sf p}i'_E \cong {\sf p}i_{0\infty E}}} a({\sf p}i')\mathrm{tr}({\sf p}i')(b_{E/F'}(f^{S_{0}})) = \mathbb{S}um'_{{\sf p}i:{\sf p}i_{\infty} \cong {\sf p}i_{0\infty}} c({\sf p}i)\mathrm{tr}({\sf p}i)(b_{E/F}(f^{S_{0}}))\nonumber \end{align*} for some $a({\sf p}i') \in \mathrm{R}R_{>0},c({\sf p}i) \in \mathrm{R}R_{ \mathfrak{g}eq 0}$ (here we are using the fact that we assumed the functions $\Phi_v$ to be of positive type if they are ramified). Applying linear independence of characters, this implies a refined identity \mathbf Egin{align*} &\mathbb{S}um a({\sf p}i')\mathrm{tr}({\sf p}i')(b_{E/F'}(f^S))= \mathbb{S}um c({\sf p}i)\mathrm{tr}({\sf p}i)(b_{E/F}(f^S)) \end{align*} where the sum on top (resp.~bottom) is over cuspidal automorphic representations ${\sf p}i'$ (resp.~${\sf p}i$) such that the character $\mathrm{tr}({\sf p}i' \circ b_{E/F'})$ (resp.~$\mathrm{tr}({\sf p}i \circ b_{E/F})$) of $C_c^{\infty}(\mathrm{GL}_n(\mathbb{A}_E^{S})//\mathrm{GL}_n(\mathcal{O}_{E}^{S}))$ is equal to $\mathrm{tr}(\Pi)$. Thus any of the representations ${\sf p}i$ on the right is a weak descent of $\Pi$, and there must be some representation on the right because the sum on the left is not identically zero as a function of $f^{S_0}$. We also note that the base change is compatible at places $v$ where ${\sf p}i$ is an abelian twist of the Steinberg representation by Lemma \ref{lem-EP}. This proves the statements on descent of cuspidal automorphic representations contained in theorems \ref{main-thm-1-conv} and \ref{main-thm-2-conv}. Now assume that ${\sf p}i$ is an $E$-primitive automorphic representation of $A_{\mathrm{GL}_{nF}} \backslash \mathrm{GL}_n(\mathbb{A}_F)$, and if $n=2$ assume that ${\sf p}i$ is not of $\rho$-type for any $\rho:W_F' \to {}^L\mathrm{GL}_{2F}$ trivial on $W_E'$. By assumption we have that \mathbf Egin{align} \label{claim-45} \lim_{X \to \infty}X^{-1}\mathrm{tr}({\sf p}i)(b_{E/F}(\Sigma^{S_0}_{{\sf p}hi}(X))) \neq 0. \end{align} Let ${\sf p}i'$ be a cuspidal automorphic representation of $A_{\mathrm{GL}_{nF'}} \backslash \mathrm{GL}_n(\mathbb{A}_{F'})$ that is not of $\rho'$-type for any $\rho':W_{F'}' \to {}^L\mathrm{GL}_{2F'}$ trivial on $W_E'$. By Lemma \ref{lem-unr-transf} one has $$ \mathrm{tr}({\sf p}i'_{v'})(b_{E/F'}(f_w))={\sf p}i'_{Ew}(f_w) $$ whenever $w$ is a finite place of $E$ dividing $v'$ and $f_w \in C_c^{\infty}(\mathrm{GL}_n(E_w)//\mathrm{GL}_n(\mathcal{O}_{E_w}))$. Thus by \eqref{claim-45}, the existence of a weak base change of ${\sf p}i$ to $A_{\mathrm{GL}_{nE}} \backslash \mathrm{GL}_n(\mathbb{A}_E)$ follows as before. This completes the proof of Theorem \ref{main-thm-1-conv} and Theorem \ref{main-thm-2-conv}. {\sf q}ed \mathbb{S}ubsection{Artin representations: Theorem \ref{main-thm-3-conv}} \label{ssec-trace-to-func2} Let $E/F$ be a Galois extension such that $\mathrm{Gal}(E/F)\cong \widetilde{A}_5$. We assume that $F$ is totally complex. As above, we fix embeddings $A_4 \hookrightarrow A_5$ and $\mathbb{Z}/2 \times \mathbb{Z}/2 \hookrightarrow A_4 \hookrightarrow A_5$ and let $\widetilde{A}_4,Q \leq \widetilde{A}_5$ denote the pull-backs of these groups under the quotient map $\widetilde{A}_5 \to A_5$. Throughout this subsection we assume the hypotheses of Theorem \ref{main-thm-3-conv}. We fix throughout this subsection a representation $\rho_2:W_F' \to {}^L\mathrm{GL}_{2F}$ trivial on $W_E'$ that has character $\theta_2$ in the notation of \S \ref{appendix}. There is exactly one other nonisomorphic irreducible degree-two character of $\mathrm{Gal}(E/F)$, namely $\xi \circ \theta_2$ where $\xi \in \mathrm{Gal}(\mathbb{Q}(\mathbb{S}qrt{5})/\mathbb{Q})$. In this subsection we prove Theorem \ref{main-thm-3-conv}, which asserts that the trace identities of Theorem \ref{main-thm-3} imply that $\rho_2 \mathrm{op}lus \xi \circ \rho_2$ has an associated isobaric automorphic representation. We note at the outset that the argument is modeled on a well-known argument of Langlands in the tetrahedral case \cite[\S 3]{Langlands}. The trace identities of Theorem \ref{main-thm-3} involve two different fields that were both denoted by $F'$; it is now necessary to distinguish between them. We let $$ F':=E^{\widetilde{A}_4} \leq K :=E^Q. $$ We require the following lemma: \mathbf Egin{lem} \label{lem-Q-autom} There is a cuspidal automorphic representation ${\sf p}i'$ of $\mathrm{GL}_2(\mathbb{A}_{F'})$ and a cuspidal automorphic representation $\mathbb{S}igma$ of $\mathrm{GL}_2(\mathbb{A}_K)$ such that ${\sf p}i'={\sf p}i(\rho_2|_{F'})$ and $\mathbb{S}igma={\sf p}i(\rho_2|_K)={\sf p}i'_K$. \end{lem} \mathbf Egin{proof} One has an automorphic representation ${\sf p}i'$ such that ${\sf p}i'={\sf p}i(\rho_2|_{F'})$ by Langlands' work \cite[\S 3]{Langlands}; see also \cite[\S 6]{GerLab}. By its construction ${\sf p}i'_K$ is isomorphic to $\mathbb{S}igma:={\sf p}i(\rho_2|_K)$. \end{proof} Choose $\mathbb{S}igma$ and ${\sf p}i'$ as in the lemma. Assuming the trace identities of Theorem \ref{main-thm-3} in the $n=2$ case there are precisely two distinct isomorphism classes of cuspidal automorphic representations represented by, say, ${\sf p}i_1,{\sf p}i_2$, such that ${\sf p}i_{iK} \cong \mathbb{S}igma$. Using our assumption that $F$ is totally complex this can be proven by arguments analogous to those used in \S \ref{ssec-trace-to-func}; we only note that $$ \lim_{X \to \infty} (\frac{d^{3}}{ds^3}(\widetilde{{\sf p}hi}(s)X^s))^{-1}\mathrm{tr}(\mathbb{S}igma)(b_{E/F'}(\Sigma^{S_0}(X)) \neq 0 $$ since $\mathrm{dim}(\mathrm{Hom}_I({\sf p}i'_E,{\sf p}i'^{\tau}_E)) =4$ by construction of ${\sf p}i$ (compare Proposition \ref{Perron-prop}). We emphasize that the trace identity of Theorem \ref{main-thm-3} tells us that $\mathbb{S}igma$ is the unique weak base change of ${\sf p}i_i$, which is stronger than the statement that $\mathbb{S}igma_E$ is the unique weak base change of ${\sf p}i_i$. We note in particular that using the transfers supplied in \S \ref{ssec-transfers} we have that the base changes are compatible at finite places $v$ that are unramified in $E/F$ and at all infinite places (which are complex by assumption). Moreover the ${\sf p}i_i$ are unramified outside of the set of places where $E/F$ is ramified. One expects that upon reindexing if necessary one has \mathbf Egin{align*} {\sf p}i_{1} &\mathbb{S}tackrel{?}{\cong} {\sf p}i(\rho_{2})\\ {\sf p}i_{2} &\mathbb{S}tackrel{?}{\cong} {\sf p}i(\xi \circ \rho_{2}). \end{align*} We do not know how to prove this, but we will prove something close to it, namely Corollary \ref{cor-isob} below. Consider $\mathrm{Sym}^2({\sf p}i')$ and $\mathrm{Sym}^2(\mathbb{S}igma)$; the first is a cuspidal automorphic representation of $\mathrm{GL}_3(\mathbb{A}_{F'})$ by \cite[Theorem 9.3]{GJ} and the second is an isobaric (noncuspidal) automorphic representation of $\mathrm{GL}_3(\mathbb{A}_K)$ \cite[Remark 9.9]{GJ}. \mathbf Egin{lem} \label{lem-sym} For $i \in \{1,2\}$ one has $$ \mathrm{Sym}^2({\sf p}i_i)_K \cong \mathrm{Sym}^2(\mathbb{S}igma) $$ and $$ \mathrm{Sym}^2({\sf p}i_i)_{F'} \cong \mathrm{Sym}^2({\sf p}i'). $$ \end{lem} \mathbf Egin{proof} Since ${\sf p}i_{iK} \cong {\sf p}i'$, it is easy to see that $\mathrm{Sym}^2({\sf p}i_{i})_{Kv_K} \cong \mathrm{Sym}^2(\mathbb{S}igma)_{v_K}$ for all places $v_K$ of $K$ that are finite and such that $K/F$ and $\mathbb{S}igma_i$ are unramified. The first statement then follows from strong multiplicity one for isobaric automorphic representations \cite[Theorem 4.4]{JSII}. Since the ${\sf p}i_i$ were defined to be weak descents of $\mathbb{S}igma$, they are in particular weak descents of the isobaric representation $1 \boxplus 1$ of $\mathrm{GL}_2(\mathbb{A}_E)$. Thus $$ \lim_{X \to \infty} \left(\frac{d^{8}}{ds^8}(\widetilde{{\sf p}hi}(s)X^s)\big|_{s=1}\right)^{-1}\mathrm{tr}(\mathrm{Sym}^2({\sf p}i_i))(b_{E/F}(\Sigma^{S_0}(X))) \neq 0 $$ since $\mathrm{tr}(\mathrm{Sym}^2({\sf p}i_i))(b_{E/F}(\Sigma^{S_0}(X))$ is a smoothed partial sum of the Dirichlet series $\text{\sffamily{\bf\textsf{z}}}eta_E^{S_0}(s)^9$. Applying the trace identities of Theorem \ref{main-thm-3} we conclude that $\mathrm{Sym}^2({\sf p}i_{i})$ admits a weak base change $\mathrm{Sym}^2({\sf p}i_i)_{F'}$ to $F'$. Now $\mathrm{Sym}^2({\sf p}i_i)_{F'}$ and $\mathrm{Sym}^2({\sf p}i')$ both base change to $\mathrm{Sym}^2(\mathbb{S}igma)$. Since $\mathrm{Sym}^2(\mathbb{S}igma)$ is not cuspidal, this implies that $\mathrm{Sym}^2({\sf p}i') \cong \mathrm{Sym}^2({\sf p}i_i)_{F'}$ \cite[Chapter 3, Theorems 4.2 and 5.1]{AC}. \end{proof} For convenience, let $S$ be the set of finite places where $E/F$ is ramified. Thus the base change from ${\sf p}i_i$ to $\mathbb{S}igma$ is compatible outside of $S$ and the the base changes from $\mathrm{Sym}^2({\sf p}i_i)$ to $\mathrm{Sym}^2({\sf p}i')$ and $\mathrm{Sym}^2(\mathbb{S}igma)$ are all compatible outside of $S$ \mathbf Egin{lem} \label{lem-F'} For $i \in \{1,2\}$ the cuspidal automorphic representation ${\sf p}i'$ is a weak base change of ${\sf p}i_i$: $$ {\sf p}i_{iF'} \cong {\sf p}i'. $$ The base change is compatible for $v \not \in S$. \end{lem} \mathbf Egin{proof} Fix $i \in \{1,2\}$. We will verify that the local base change ${\sf p}i_{iF'v'}$ is isomorphic to ${\sf p}i'_{v'}$ for all places $v'$ of $F'$ not dividing places in $S$; this will complete the proof of the lemma (notice that the local base change is well-defined even though we do not yet know that ${\sf p}i_{iF'}$ exists as an automorphic representation). Notice that ${\sf p}i_{iK} \cong {\sf p}i'_K \cong \mathbb{S}igma$ by construction of ${\sf p}i_i$. Thus if $v'$ is a place of $F'$ split in $K/F'$ and not lying above a place of $S$, then ${\sf p}i_{iF'v'} \cong {\sf p}i'_v$. Suppose that $v'$ is a place of $F'$ that is nonsplit in $K/F'$ and not lying above a place of $S$. Then there is a unique place $v_K|v'$ and $[K_{v_K}:F'_{v'}]=3$. Notice that ${\sf p}i_{i}$ and ${\sf p}i'$ have trivial central character by construction. Thus their Langlands classes are of the form \mathbf Egin{align*} A({\sf p}i_{iF'v'})=\mathbf Egin{pmatrix} a\text{\sffamily{\bf\textsf{z}}}eta & \\ & a^{-1} \text{\sffamily{\bf\textsf{z}}}eta^{-1}\end{pmatrix}{\sf q}uad \textrm{ and }{\sf q}uad A({\sf p}i_{v'}')=\mathbf Egin{pmatrix} a &\\ & a^{-1}\end{pmatrix} \end{align*} for some $a \in \textf{C}C^{\times}$ and some third root of unity $\text{\sffamily{\bf\textsf{z}}}eta$. By Lemma \ref{lem-sym} we have that $$ \mathrm{Sym}^2(A({\sf p}i_{iF'v'}))=\mathbf Egin{pmatrix}a^2\text{\sffamily{\bf\textsf{z}}}eta^2 & & \\ & 1 & \\&&a^{-2} \text{\sffamily{\bf\textsf{z}}}eta^{-2} \end{pmatrix} $$ is conjugate under $\mathrm{GL}_3(\textf{C}C)$ to $$ \mathrm{Sym}^2(A({\sf p}i'_{v'}))=\mathbf Egin{pmatrix} a^2 & & \\ & 1 & \\ & & a^{-2} \end{pmatrix}. $$ Thus $\{a^2,a^{-2}\}=\{a^2\text{\sffamily{\bf\textsf{z}}}eta^2,a^{-2}\text{\sffamily{\bf\textsf{z}}}eta^{-2}\}$. If $a^2=a^2\text{\sffamily{\bf\textsf{z}}}eta^2$ then $\text{\sffamily{\bf\textsf{z}}}eta=1$, proving that ${\sf p}i_{iF'v'} \cong {\sf p}i'_{v'}$. If on the other hand $a^2=a^{-2}\text{\sffamily{\bf\textsf{z}}}eta^{-2}$ and $\text{\sffamily{\bf\textsf{z}}}eta \neq 1$, then $$ a^4=\text{\sffamily{\bf\textsf{z}}}eta^{-2} $$ and the matrix $\mathrm{Sym}^2(A({\sf p}i'_{v'}))$ has order $6$. On the other hand, $\mathrm{Sym}^2(A({\sf p}i'_{v'}))$ is the image of a Frobenius element of $\mathrm{Gal}(E/F')$ under the Galois representation corresponding to $\mathrm{Sym}^2({\sf p}i')$. This Galois representation is the symmetric square of a representation of $\widetilde{A}_4$ with trivial determinant, and hence factors through $A_4$. As $A_4$ has no elements of order $6$, we arrive at a contradiction, proving that $\text{\sffamily{\bf\textsf{z}}}eta=1$. Hence ${\sf p}i_{iF'v'} \cong {\sf p}i'_{v'}$. \end{proof} Let $\chi \in \widetilde{A}_4^{\widetilde{\varepsilon}dge}$ be a nontrivial (abelian) character. Then for all places $v$ of $F$ one has an admissible representation $\mathrm{Ind}_{F'}^{F}(\chi)_v$. It is equal to $\otimes_{v'|v} \mathrm{Ind}_{F'_{v'}}^{F_v}(\chi_{v'})$. Note that one does not know a priori whether or not $\mathrm{Ind}_{F'}^F(\chi)$ is automorphic; proving this in a special case is the subject matter of \cite{KimIcos}. By class field theory we can also view $\mathrm{Ind}_{F'}^F(\chi)$ as an $L$-parameter $$ \mathrm{Ind}_{F'}^{F}(\chi):W_{F}' \longrightarrow {}^L\mathrm{GL}_{5F'} $$ and with this viewpoint in mind we prove the following lemma: \mathbf Egin{lem} \label{lem-ind-autom} For each $i \in \{1,2\}$ the $L$-parameter $\mathrm{Ind}_{F'}^F(\chi)$ is associated to $\mathrm{Sym}^4({\sf p}i_i)$. More precisely, $\mathrm{Ind}_{F'}^F(\chi)_v={\sf p}i(\mathrm{Ind}_{F'}^F(\chi)_{v}) \cong \mathrm{Sym}^4({\sf p}i_{iv})$ for all $v \not \in S$. \end{lem} We note that $\mathrm{Sym}^4({\sf p}i_i)$ is an automorphic representation of $\mathrm{GL}_5(\mathbb{A}_F)$ by work of Kim \cite{Kim} and Kim-Shahidi \cite[Theorem 3.3.7]{KScusp}. \mathbf Egin{proof} At the level of admissible representations for $v \not \in S$ one has \mathbf Egin{align} \label{first-frob} \mathrm{Sym}^4({\sf p}i_i)^{\varepsilone}_v \otimes \mathrm{Ind}_{F'}^F(\chi)_v \cong \otimes_{v'|v}\mathrm{Ind}_{F'_{v'}}^{F_v}(\mathrm{Sym}^4({\sf p}i'_{v'})^{\varepsilone} \otimes \chi_{v'}) \end{align} by Frobenius reciprocity. On the other hand ${\sf p}i'^{\varepsilone}={\sf p}i(\rho_{2}|_{F'}^{\varepsilone})$ and $$ \mathrm{Sym}^4(\rho_2)^{\varepsilone} \cong \mathrm{Sym}^4(\rho_2^{\varepsilone}) \cong \mathrm{Sym}^4(\rho_2) \cong \mathrm{Ind}_{F'}^F(\chi) \cong \mathrm{Ind}_{F'}^F(\chi)^{\varepsilone} $$ at the level of Galois representations (see Lemma \ref{lem-icosa-reps}). Thus the right hand side of \eqref{first-frob} is isomorphic to $$ \otimes_{v'|v}\mathrm{Ind}_{F'_{v'}}^{F_v}(\mathrm{Ind}_{F'_{v'}}^{F_v}(\chi_{v'})^{\varepsilone}|_{F'_{v'}} \otimes \chi_{v'}) \cong \otimes_{v'|v}\mathrm{Ind}_{F'_{v'}}^{F_v}(\chi_{v'})^{\varepsilone} \otimes \mathrm{Ind}_{F'_{v'}}^{F_v}(\chi_{v'}) $$ and we conclude that \mathbf Egin{align} \label{sym-isom} \mathrm{Sym}^4({\sf p}i_i)^{\varepsilone}_v \otimes \mathrm{Ind}_{F'}^F(\chi)_v \cong \otimes_{v'|v}\left(\mathrm{Ind}_{F'_{v'}}^{F_v}(\chi_{v'})^{\varepsilone} \otimes \mathrm{Ind}_{F'_{v'}}^{F_v}(\chi_{v'})\right). \end{align} Now if $A$ and $B$ are two square invertible diagonal matrices of rank $n$, the eigenvalues of $A$ can be recovered from knowledge of the eigenvalues of $A \otimes B$ and the eigenvalues of $B$. With this remark in hand, we see that \eqref{sym-isom} implies that $$ \mathrm{Sym}^4({\sf p}i_i)_v \cong \mathrm{Ind}_{F'}^F(\chi)_v $$ for all $v \not \in S$. \end{proof} With this preparation in place, we make a step towards proving that $\rho_2$ and ${\sf p}i_1$ are associated: \mathbf Egin{lem} \label{lem-places} Let $v \not \in S$. One has ${\sf p}i_{1v} \cong {\sf p}i(\rho_{2v})$ or ${\sf p}i_{1v} \cong {\sf p}i(\xi \circ \rho_{2v})$. \end{lem} \mathbf Egin{proof} For infinite places we use our running assumption that $F$ is totally complex together with Lemma \ref{lem-F'}. This allows one to deduce the lemma in this case. Assume now that $v$ is finite and choose $v'|v$. By Lemma \ref{lem-F'}, up to conjugation, the Langlands class of ${\sf p}i_{1v}$ and the Frobenius eigenvalue of $\rho_2(\mathrm{Frob}_v)$ satisfy \mathbf Egin{align*} A({\sf p}i_{1v})=\mathbf Egin{pmatrix} a\text{\sffamily{\bf\textsf{z}}}eta & \\ & a^{-1} \text{\sffamily{\bf\textsf{z}}}eta^{-1}\end{pmatrix}{\sf q}uad \textrm{ and } {\sf q}uad \rho_2(\mathrm{Frob}_v)=\mathbf Egin{pmatrix} a &\\ & a^{-1}\end{pmatrix} \end{align*} where $\text{\sffamily{\bf\textsf{z}}}eta$ is a $[F'_{v'}:F_v]$-root of unity. Thus if there is a place $v'|v$ such that $[F'_{v'}:F_v]=1$, then we are done. Since $[F':F]=5$, there are two other cases to consider, namely where there is a single $v'|v$ of relative degree $5$ and when there are two places $v'_2,v'_3|v$, one of them of relative degree $2$ and the other of relative degree $3$. By Lemma \ref{lem-ind-autom} the two matrices \mathbf Egin{align} \label{are-conj} \mathbf Egin{pmatrix} a^4\text{\sffamily{\bf\textsf{z}}}eta^4 & & & & \\ & a^2\text{\sffamily{\bf\textsf{z}}}eta^2 & & &\\ & & 1 & & \\ & & & a^{-2} \text{\sffamily{\bf\textsf{z}}}eta^{-2} & \\ & & & & a^{-4}\text{\sffamily{\bf\textsf{z}}}eta^{-4}\end{pmatrix} {\sf q}uad \textrm{ and }{\sf q}uad \mathbf Egin{pmatrix} a^4 & & & & \\ & a^2 & & &\\ & & 1 & & \\ & & & a^{-2} & \\ & & & & a^{-4}\end{pmatrix} \end{align} are conjugate. We will use this fact and a case-by-case argument to prove the lemma. Assume $[F'_{v'}:F_{v}]=5$. In this case $a+a^{-1}$ is ${\sf p}m \frac{\mathbb{S}qrt{5}-1}{2}$ or ${\sf p}m \frac{-\mathbb{S}qrt{5}-1}{2}$ by the character table of $\widetilde{A}_5$ in \S \ref{appendix} above, which implies that $a={\sf p}m\nu$ for a primitive fifth root of unity $\nu$. We conclude from the conjugacy of the two matrices \eqref{are-conj} that $\text{\sffamily{\bf\textsf{z}}}eta \neq \nu^{-1}$. On the other hand, if $\text{\sffamily{\bf\textsf{z}}}eta$ is any other fifth root of unity then the matrix $A({\sf p}i_{1v})$ is conjugate to either $\rho_2(\mathrm{Frob}_v)$ or $\xi \circ \rho_2(\mathrm{Frob}_v)$, where as above $\xi$ is the generator of $\mathrm{Gal}(\mathbb{Q}(\mathbb{S}qrt{5})/\mathbb{Q})$. Thus the lemma follows in this case. Assume now that $[F'_{v'}:F_v]=3$; this is the last case we must check. By consulting the character table of $\widetilde{A}_5$ in \S \ref{appendix} we see that $a+a^{-1}={\sf p}m 1$ which implies $a$ is a primitive $6$th root of unity or a primitive $3$rd root of unity. By the conjugacy of the matrices \eqref{are-conj} we conclude that $\text{\sffamily{\bf\textsf{z}}}eta \neq {\sf p}m a^{-1}$. Thus if $a$ is a primitive $3$rd root of unity the matrices $$ \mathbf Egin{pmatrix} a \text{\sffamily{\bf\textsf{z}}}eta & \\ & a^{-1} \text{\sffamily{\bf\textsf{z}}}eta^{-1} \end{pmatrix}, \, \, \, \mathbf Egin{pmatrix} a & \\ & a^{-1} \end{pmatrix} $$ are either equal (if $\text{\sffamily{\bf\textsf{z}}}eta=1$) or conjugate (if $\text{\sffamily{\bf\textsf{z}}}eta \neq a^{-1}$ is a nontrivial $3$rd root of unity). Now suppose that $a$ is a primitive $6$th root of unity; by replacing $a$ by $a^{-1}$ if necessary we may assume that $a=e^{2 {\sf p}i i/6}$. In this case the right matrix in \eqref{are-conj} has eigenvalues $e^{{\sf p}m 2 {\sf p}i i/3},1$ (the first two with multiplicity two and the last with multiplicity one). Since $\text{\sffamily{\bf\textsf{z}}}eta \neq {\sf p}m a^{-1}$, we must have $\text{\sffamily{\bf\textsf{z}}}eta=1$ or $\text{\sffamily{\bf\textsf{z}}}eta=e^{-2{\sf p}i i /3}$. In the former case $A({\sf p}i_{1v})$ and $\rho_2(\mathrm{Frob}_v)$ are equal and in the latter case they are conjugate. \end{proof} Another way of stating the lemma that appears more ``global'' is the following corollary: \mathbf Egin{cor} \label{cor-isob} One has $$ {\sf p}i_1 \boxplus {\sf p}i_2 \cong {\sf p}i(\rho_2 \mathrm{op}lus \xi \circ \rho_2). $$\ \end{cor} This is precisely Theorem \ref{main-thm-3-conv}. \mathbf Egin{proof} This follows from Lemma \ref{lem-places} and \cite[Proposition 4.5]{HennCyc}. To apply \cite[Proposition 4.5]{HennCyc} one uses the fact that the isobaric sum ${\sf p}i_1 \boxplus {\sf p}i_2$ is necessarily locally generic (see \cite[\S 0.2]{Bernstein} for the nonarchimedian case, which is all we use). \end{proof} Finally, we prove Corollary \ref{cor-artin-cases}: \mathbf Egin{proof}[Proof of Corollary \ref{cor-artin-cases}] In the notation above, Corollary \ref{cor-isob} implies the following isomorphisms at the level of admissible representations: \mathbf Egin{align*} \mathrm{Sym}^3({\sf p}i_1) &\cong {\sf p}i(\mathrm{Sym}^3(\rho_2))\\ {\sf p}i_1 \boxtimes {\sf p}i_2 &\cong {\sf p}i(\rho_2 \otimes \xi \circ \rho_2)\\ \mathrm{Sym}^4({\sf p}i_1) &\cong {\sf p}i(\mathrm{Sym}^4(\rho_2))\\ \mathrm{Sym}^2({\sf p}i_1) \boxtimes {\sf p}i_2 &\cong {\sf p}i(\mathrm{Sym}^2(\rho_2) \otimes \xi \circ \rho_2). \end{align*} Notice that any irreducible representation of $\mathrm{Gal}(E/F)$ of dimension greater than $3$ is on this list by Lemma \ref{lem-icosa-reps}. Therefore to complete the proof of the corollary it suffices to recall that all of the representations on the left are known to be automorphic. More precisely, the $\mathrm{Sym}^3$ lift was treated by work of Kim and Shahidi \cite[Theorem B]{KiSh}. The Rankin product ${\sf p}i_1 \boxtimes {\sf p}i_2$ is automorphic by work of Ramakrishnan \cite[Theorem M]{RRS}. The fact that the symmetric fourth is automorphic follows from \cite[Theorem 3.3.7]{KScusp} (see also \cite[Theorem 4.2]{KimIcos}). Finally, for the last case, one can invoke \cite[Theorem A]{KiSh} and \cite[Proposition 4.1]{SW}. \end{proof} \mathbb{S}ection{Some group theory} \label{sec-groups} In this section we explain why two group-theoretic assumptions we have made in theorems \ref{main-thm-1} and \ref{main-thm-1-conv} are essentially no loss of generality. \mathbb{S}ubsection{Comments on universal perfect central extensions} \label{ssec-upce} The underlying goal of this paper is to study the functorial transfer conjecturally attached to the map of $L$-groups $$ b_{E/F}:{}^L\mathrm{GL}_{nF} \longrightarrow {}^L\mathrm{Res}_{E/F}\mathrm{GL}_{nE} $$ for Galois extensions $E/F$. We explain how ``in principle'' this can be reduced to the study of Galois extensions $E/F$ where $\mathrm{Gal}(E/F)$ is the universal perfect central extension of a finite simple group. Given a Galois extension $E/F$, we can find a chain of subextensions $E_0=E \mathfrak{g}eq E_1 \mathfrak{g}eq \cdots \mathfrak{g}eq E_m=F$ such that $E_i/E_{i+1}$ is Galois with simple Galois group. Using this, one can in principle reduce the study of arbitrary Galois extensions to the study of extensions with simple Galois group\footnote{Of course, this reduction will be subtle; see \cite{LR} and \cite{Rajan3} for the solvable case.}. If the extension is cyclic, then we can apply the body of work culminating in the book of Arthur and Clozel \cite{AC}. We therefore consider the case where $\mathrm{Gal}(E/F)$ is a finite simple nonabelian group. Assume for the moment that $\mathrm{Gal}(E/F)$ is a finite simple nonabelian group. There exists an extension $L/E$ such that $L/F$ is Galois, \mathbf Egin{align} \label{star} \mathbf Egin{CD} 1 @>>> \mathrm{Gal}(L/E) @>>>\mathrm{Gal}(L/F) @>>> \mathrm{Gal}(E/F)@>>>1 \end{CD} \end{align} is a central extension and \mathbf Egin{align} \label{abundant} \mathrm{Gal}(L/L \cap EF^{\mathrm{ab}}) \cong H^2(\mathrm{Gal}(E/F),\textf{C}C^{\times})^{\widetilde{\varepsilon}dge}, \end{align} where $F^{\mathrm{ab}}$ is the maximal abelian extension of $F$ (in some algebraic closure) \cite[Theorem 5]{Miyake} (in fact Miyake's theorem is valid for an arbitrary Galois extension $E/F$)\footnote{Here the $\widetilde{\varepsilon}dge$ denotes the dual, so $H^2(\mathrm{Gal}(E/F),\textf{C}C^{\times})^{\widetilde{\varepsilon}dge} \cong H^2(\mathrm{Gal}(E/F),\textf{C}C^{\times})$ since $H^2(\mathrm{Gal}(E/F),\textf{C}C^{\times})$ is finite abelian.}. Such an extension $L$ is called an abundant finite central extension in loc.~cit. Choose an abelian extension $F'/F$ such that $$ L \cap EF^{\mathrm{ab}}=EF'. $$ We claim that $\mathrm{Gal}(L/F')$ is the universal perfect central extension of a finite simple group. Indeed, the central extension \eqref{star} restricts to induce a central extension $$ \mathbf Egin{CD} 1 @>>> \mathrm{Gal}(L/EF') @>>>\mathrm{Gal}(L/F') @>>> \mathrm{Gal}(E/F)@>>>1 \end{CD} $$ Moreover, $L \cap EF^{\mathrm{ab}}=EF'$ implies $L \cap F^{\mathrm{ab}}=F'$ since $\mathrm{Gal}(E/F)$ is a simple nonabelian group and therefore $\mathrm{Gal}(L/F')$ is perfect. By \eqref{abundant}, we conclude that $\mathrm{Gal}(L/F')$ is the universal perfect central extension of the finite simple group $\mathrm{Gal}(E/F)$ \cite[Proposition 4.228]{Gorenstein}. We observe that if we understand the functorial lifting conjecturally defined by $b_{L/F'}$, then we can ``in principle'' use abelian base change to understand the functorial lifting conjecturally defined by $b_{E/F}$. Thus assuming that $\mathrm{Gal}(E/F)$ is the universal perfect central extension of a finite simple group from the outset is essentially no loss of generality. \mathbb{S}ubsection{Generating $\mathrm{Gal}(E/F)$} \label{gen-gal} In the statement of Theorems \ref{main-thm-1} and \ref{main-thm-1-conv}, we required that $\mathrm{Gal}(E/F)=\langle \tau, \mathrm{Gal}(E/F') \rangle$ for some subfield $E \mathfrak{g}eq F' \mathfrak{g}eq F$ with $E/F'$ solvable and some element $\tau$. We also placed restrictions on the order of $\mathrm{Gal}(E/F')$. The theorems we recall in this subsection indicate that these restrictions are little or no loss of generality, and also demonstrate that one has a great deal of freedom in choosing generators of universal perfect central extensions of finite simple groups. To state some results, recall that a finite group $G$ is quasi-simple if $G/Z_G$ is a nonabelian simple group and $G$ is perfect. Thus universal perfect central extensions of simple nonabelian groups are quasi-simple. \mathbf Egin{thm}[Guralnick and Kantor] \label{thm-GK} Let $G$ be a quasi-simple group. Let $x \in G$ that is not in the center $Z_G$ of $G$. Then there is an element $g \in G$ such that $\langle x,g \rangle=G$. \end{thm} \mathbf Egin{proof} Let $\overline{x}$ be the image of $x$ in $G/Z_G$. Then there exists a $\overline{g} \in G/Z_G$ such that $\langle \overline{x}, \overline{g}\rangle=G/Z_G$ by \cite[Corollary]{GurKant}. We simply let $g \in G$ be any element mapping to $\overline{g}$. \end{proof} For applications to base change and descent of automorphic representations of $\mathrm{GL}_2$, preliminary investigation indicates that the primes $2$ and $3$ are troublesome. With this in mind, the following theorem might be useful (see \cite[Corollary 8.3]{GurM}): \mathbf Egin{thm}[Guralnick and Malle] \label{thm-good} Let $G$ be a quasi-simple group. Then $G$ can be generated by two elements of order prime to $6$. {\sf q}ed \end{thm} \mathbb{S}ection*{Acknowledgments} The author would like to thank A.~Adem for information on finite simple groups, R.~Guralnick and G.~Malle for including a proof of Theorem \ref{thm-good} in \cite{GurM} and R.~Guralnick for correcting mistakes in \S \ref{gen-gal}. H.~Hahn, R.~Langlands, S.~Morel, Ng\^o B.~C., P.~Sarnak, and N.~Templier deserve thanks for many useful conversations. The author is also grateful for the encouragement of R.~Langlands, Ng\^o B.~C., D.~Ramakrishnan, and P.~Sarnak. R.~Langlands deserves additional thanks in particular for encouraging the author to record the results in this paper. {\sf q}uash{ He also thanks J.~C.~for everything. } \mathbf Egin{thebibliography}{} \bibitem[AC]{AC} J.~Arthur and L.~Clozel, \textbf{Simple Algebras, Base Change, and the Advanced Theory of the Trace Formula}, Princeton University Press, 1989. \bibitem[Be]{Bernstein} J.~Bernstein, \emph{$P$-invariant distributions on $\mathrm{GL}(N)$ and the classficiation of unitary representations of $\mathrm{GL}(N)$ (non-archimedian case)} in \textbf{Lie Group Representations II}, Springer LNM {\bf 1041} 1984. \bibitem[B]{Booker} A.~Booker, \emph{Numerical tests of modularity}, J.~Ramanujan Math.~Soc. {\bf 20} No.~4 (2005) 283-339. \bibitem[Bo]{Borel} A.~Borel, \emph{Automorphic $L$-functions} in \textbf{Automorphic Forms, Representations, and $L$-functions}, Proceedings of Symposia in Pure Mathematics {\bf 33.2} AMS 1979. \bibitem[Buh]{Buhler} J.~Buhler \textbf{Icosahedral Galois representations}, LNM {\bf 654} Springer-Verlag, 1978. \bibitem[C1]{Cog1} J.~Cogdell, \emph{$L$-functions and converse theorems for $\mathrm{GL}_n$}, in \textbf{Automorphic Forms and Applications}, IAS/Park City Math.~Ser. {\bf 12} AMS 2007. \bibitem[D]{Donnely} H.~Donnelly, \emph{On the cuspidal spectrum for finite volume symmetric spaces}, J.~Differential Geometry {\bf 17} (1982) 239-253. \bibitem[DF]{DF} D.~S.~Dummit and R.~M.~Foote, \textbf{Abstract Algebra}, second edition, Prentice Hall, 1999. \bibitem[FLN]{FLN} E.~Frenkel, R.~Langlands, and Ng\^o B.~C., \emph{Formule des traces et fonctorialit\'e: Le d\'ebut d'un programme}, preprint (2010). \bibitem[GJ]{GJ} S.~Gelbart and H.~Jacquet \emph{A relation between automorphic representations of $\mathrm{GL}(2)$ and $\mathrm{GL}(3)$}, Ann.~scient.~\'Ec.~Norm.~Sup., $4^e$ s\'erie, t.~11, (1978) 471-542. \bibitem[GL]{GerLab} P.~G\'erardin and J.~P.~Labesse, \emph{The solution to a base change problem for $\mathrm{GL}(2)$ (following Langlands, Saito, Shintani)}, in \textbf{Automorphic Forms, Representations, and $L$-functions}, Proceedings of Symposia in Pure Mathematics {\bf 33.2} AMS 1979. \bibitem[Go]{Gorenstein} D.~Gorenstein, \textbf{Finite Simple Groups: An Introduction to their Classification}, Plenum Press, 1982. \bibitem[GrRe]{GR} D.~Gross and M.~Reeder, \emph{Arithmetic invariants of discrete Langlands parameters}, Duke Math. J. {\bf 154} (2010) 431-508. \bibitem[GuK]{GurKant} R.~M.~Guralnick and W.~M.~Kantor, \emph{Probabalistic generation of finite simple groups}, J.~Algebra {\bf 234} (2000) 743-792. \bibitem[GuM]{GurM} R.~M.~Guralnick and G.~Malle, \emph{Products of conjugacy classes and fixed point spaces}, JAMS {\bf 25} (2012) 77-121. \bibitem[HT]{HT} M.~Harris and R.~Taylor, \textbf{The Geometry and Cohomology of Some Simple Shimura Varieties}, Annals of Math. Studies, Princeton University Press, 2001. \bibitem[H1]{HennCyc} G.~Henniart, \emph{On the local Langlands conjecture for $\mathrm{GL}(n)$: The cyclic case} Annals of Math., {\bf 123} No.~1 1986 (145-203). \bibitem[H2]{PreuveHenn} G.~Henniart, \emph{Une preuve simple des conjectures de Langlands pour $\mathrm{GL}(n)$ sur un corps $p$-adique}, Invent.~Math. {\bf 139} 439-455 (2000). \bibitem[HoR]{HR} J.~Hoffstein and D.~Ramakrishnan, \emph{Siegel zeros and cusp forms}, IMRN, No.~6 (1995) 279-308. \bibitem[IS]{IS} H.~Iwaniec and P.~Sarnak, \emph{Perspectives on the analytic theory of $L$-functions}, GAFA Special Volume (2000) 705-741. \bibitem[JL]{JacquetLanglands} H.~Jacquet and R.~Langlands, \textbf{Automorphic Forms on $\mathrm{GL}(2)$}, LNM {\bf 114}, Springer Verlag 1970. \bibitem[JShI]{JS} H.~Jacquet and J.~Shalika, \emph{On Euler products and the classification of automorphic representations I}, AJM {\bf 103} No.3 (1981) 499-558. \bibitem[JShII]{JSII} H.~Jacquet and J.~Shalika, \emph{On Euler products and the classification of automorphic representations II}, AJM {\bf 103} No.4 (1981) 777-815. \bibitem[J2]{JacquetRS} H.~Jacquet, \emph{Archimedian Rankin-Selberg Integrals}, in \textbf{Automorphic Forms and $L$-functions II, Local Aspects}, AMS 2009. \bibitem[K1]{Kim} H.~Kim, \emph{Functoriality for the exterior square of $\mathrm{GL}_4$ and the symmetric fourth of $\mathrm{GL}_2$}, JAMS, {\bf 16} No.~1, (2002) 139-183. \bibitem[K2]{KimIcos} H.~Kim, \emph{An example of non-normal quintic automorphic induction and modularity of symmetric powers of cusp forms of icosahedral type}, Invent.~Math., {\bf 156} 495-502 (2004). \bibitem[KSh1]{KiSh} H.~Kim and F.~Shahidi, \emph{Functorial products for $\mathrm{GL}_2 \times \mathrm{GL}_3$ and the symmetric cube for $\mathrm{GL}_2$} Annals of Math. {\bf 155} (2002) 837-893. \bibitem[KSh2]{KScusp} H.~Kim and F.~Shahidi, \emph{Cuspidality of symmetric powers with applications}, Duke Math.~J. {\bf 122}, No.~1 (2002) 177-197. \bibitem[Kn]{Knapp} A.~Knapp, \textbf{Representation Theory of Semisimple Groups: An Overview Based on Examples}, Princeton University Press 1986. \bibitem[Ko]{KottTama} R.~E.~Kottwitz, \emph{Tamagawa numbers}, Annals of Math., {\bf 127} No.~3 (1998) 629-646. \bibitem[La1]{LanglProb} R.~P.~Langlands, \emph{Problems in the theory of automorphic forms}, in \textbf{Lectures in Modern Analysis and Applications}, LNM {\bf 170} Springer 1970. \bibitem[La2]{LanglEinM} R.~P.~Langlands, \emph{Automorphic representations, Shimura varieties and motives. Ein M\"archen}, in \textbf{Automorphic Forms, Representations, and $L$-functions}, Proceedings of Symposia in Pure Mathematics {\bf 33.2} AMS 1979. \bibitem[La3]{LanglNotion} R.~P.~Langlands, \emph{On the notion of an automorphic representation}, in \textbf{Automorphic Forms, Representations, and $L$-functions}, Proceedings of Symposia in Pure Mathematics {\bf 33.1} AMS 1979. \bibitem[La4]{Langlands} R.~P.~Langlands, \textbf{Base Change for $GL(2)$}, Annals of Mathematics Studies {\bf 96}, Princeton University Press 1980. \bibitem[La5]{LanglandsArch} R.~P.~Langlands, \emph{The classification of representations of real reductive groups}, in {\bf Math.~Surveys and Monographs 31}, AMS 1988. \bibitem[La6]{LanglBeyond} R.~P.~Langlands, \emph{Beyond endoscopy}, in \textbf{Contributions to Automorphic Forms, Geometry, and Number Theory: A Volume in Honor of Joseph Shalika}, The Johns Hopkins University Press 2004. \bibitem[La7]{LSing} R.~P.~Langlands, \emph{Singulariti\'es et transfert}, to appear in Annales des Sciences Mathematiques du Quebec. \bibitem[LLa]{LabLan} J.-P.~Labesse and R.~P.~Langlands, \emph{$L$-indistinguishability for $\mathrm{SL}(2)$}, Canad. J.~Math., {\bf 31} (1979) 726-785. \bibitem[LapRo]{LR} E.~Lapid and J.~Rogawski, \emph{On twists of cuspidal representations of $\mathrm{GL}(2)$}, Forum Mathematicum {\bf 10} (1998) 175-197. \bibitem[Lau]{Laumon} G.~Laumon, \textbf{Cohomology of Drinfeld Modular Varieties, Part I: Geometry, counting of points and local harmonic analysis}, Cambridge 1996. \bibitem[LiSha]{LS} M.~W.~Liebeck and A.~Shalev, \emph{Fuchsian groups, finite simple groups and representation varieties}, Invent.~Math. {\bf 159} (2005) 317-367. \bibitem[LuRS]{LRS} W.~Luo, Z.~Rudnick, and P.~Sarnak, \emph{On the generalized Ramanujan conjectures for $\mathrm{GL}(n)$} in Proc. Symp.~Pure Math. {\bf 66} Part 2, (1999) 301-311. \bibitem[Mi]{Miyake} K.~Miyake, \emph{Central extensions and Schur's multiplicators of Galois groups}, Nagoya Math.~J., {\bf 90} (1983) 137-144. \bibitem[MoeW1]{MW} C.~Moeglin and J-L.~Waldspurger, \textbf{Spectral Decomposition and Eisenstein Series}, Cambridge University Press 1995. \bibitem[MoeW2]{MW2} C.~Moeglin and J-L.~Waldspurger, \emph{Le spectre r\'esiduel de $\mathrm{GL}(n)$}, Annales Scientifiques de l'\'E.~N.~S., {\bf 22} n.~4 (1989) 605-674. \bibitem[Mo]{Moreno} C.~J.~Moreno, \textbf{Advanced Analytic Number Theory: $L$-functions}, AMS 2005. \bibitem[M\"uSp]{MS} W.~M\"uller and B.~Speh, \emph{On the absolute convergence of the spectral side of the Arthur trace formula for $\mathrm{GL}_2$}, GAFA {\bf 14} (2004) 58-93. \bibitem[R1]{Rajan2} C.~S.~Rajan, \emph{On the vanishing of the measurable Schur cohomology groups of Weil groups}, Compos.~Math.~{\bf 140} No.~1 (2004) 84-98. \bibitem[R2]{Rajan3} C.~S.~Rajan, \emph{On the image and fibres of solvable base change}, MRL {\bf 9} 499-508 (2002). \bibitem[Ra1]{RRS} D.~Ramakrishnan, \emph{Modularity of the Rankin-Selberg $L$-series and multiplicity one for $\mathrm{SL}(2)$}, Ann. of Math. {\bf 152} No.~1 (2000) 45-111. \bibitem[S]{Sarnak} P.~Sarnak, \emph{Comments on Robert Langlands' lecture: ``Endoscopy and Beyond''}, available at www.math.princeton.edu/sarnak. \bibitem[Se]{SerreFG} J-P.~Serre, \textbf{Linear Representations of Finite Groups} Springer 1977. \bibitem[T]{Tate} J.~Tate, \textbf{Number theoretic background}, in \textbf{Automorphic Forms, Representations, and $L$-functions}, Proceedings of Symposia in Pure Mathematics {\bf 33.2} AMS (1979) 3-26. \bibitem[V]{Venk} A.~Venkatesh, \emph{Beyond endoscopy and the classification of special forms on $\mathrm{GL}(2)$}, J. f\"ur die reine und angew. Math., {\bf 577} (2004) 23-80. \bibitem[Vo]{Vogan} D.~Vogan, \emph{Gel'fand-Kirillov dimension for Harish-Chandra modules}, Invent.~Math.~{\bf 48} 449-505. \bibitem[W]{SW} S.~Wang, \emph{On the symmetric powers of cusp forms on $\mathrm{GL}(2)$ of icosahedral type}, IMRN {\bf 44} (2003) 2373-2390. \end{thebibliography} \end{document}
\begin{document} \title[Multiple solutions for superlinear fractional problems via theorems of mixed type]{Multiple solutions for superlinear fractional problems via theorems of mixed type} \author[V. Ambrosio]{Vincenzo Ambrosio} \address[V. Ambrosio]{Dipartimento di Scienze Pure e Applicate (DiSPeA), Universit\`a degli Studi di Urbino `Carlo Bo' Piazza della Repubblica, 13 61029 Urbino (Pesaro e Urbino, Italy)} \email{[email protected]} \keywords{Fractional Laplacians; multiple solutions; $\nabla$-theorem; extension method} \subjclass[2010]{35A15, 35J60, 35R11, 45G05} \begin{abstract} In this paper we investigate the existence of multiple solutions for the following two fractional problems \begin{equation*} \left\{ \begin{array}{ll} (-\Delta_{\Omega})^{s} u-\lambda u= f(x, u) &\mbox{ in } \Omega\\ u=0 &\mbox{ in } \partial \Omega \end{array} \right. \end{equation*} and \begin{equation*} \left\{ \begin{array}{ll} (-\Delta_{\mathbb{R}^{N}})^{s} u-\lambda u= f(x, u) &\mbox{ in } \Omega\\ u=0 &\mbox{ in } \mathbb{R}^{N}\setminus \Omega, \end{array} \right. \end{equation*} where $s\in (0,1)$, $N>2s$, $\Omega$ is a smooth bounded domain of $\mathbb{R}^{N}$, and $f:\bar{\Omega}\times \mathbb{R}\rightarrow \mathbb{R}$ is a superlinear continuous function which does not satisfy the well-known Ambrosetti-Rabinowitz condition. Here $(-\Delta_{\Omega})^{s}$ is the spectral Laplacian and $(-\Delta_{\mathbb{R}^{N}})^{s}$ is the fractional Laplacian in $\mathbb{R}^{N}$. By applying variational theorems of mixed type due to Marino and Saccon and the Linking Theorem, we prove the existence of multiple solutions for the above problems. \end{abstract} \maketitle \section{Introduction} \noindent In this paper we focus our attention on the multiplicity of the following two fractional problems \begin{equation}\label{P1} \left\{ \begin{array}{ll} (-\Delta_{\Omega})^{s} u-\lambda u= f(x, u) &\mbox{ in } \Omega\\ u=0 &\mbox{ in } \partial \Omega \end{array} \right. \end{equation} and \begin{equation}\label{P} \left\{ \begin{array}{ll} (-\Delta_{\mathbb{R}^{N}})^{s} u-\lambda u= f(x, u) &\mbox{ in } \Omega\\ u=0 &\mbox{ in } \mathbb{R}^{N}\setminus \Omega, \end{array} \right. \end{equation} where $s\in (0, 1)$, $N>2s$, $\lambda \in \mathbb{R}$ and $\Omega\subset \mathbb{R}^{N}$ is a smooth bounded open set. Here $(-\Delta_{\Omega})^{s}$ is the spectral Laplacian which is given by \begin{align} (-\Delta_{\Omega})^{s}u(x)= \sum_{k=1}^{+\infty} c_{k} \alpha_{k}^{s} \varphi_{k}(x) \mbox{ for any } u=\sum_{k=1}^{+\infty} c_{k}\varphi_{k}\in C^{\infty}_{c}(\Omega), \end{align} where $\{\varphi_{k}\}_{k\in \mathbb{N}}$ denotes the orthonormal basis of $L^{2}(\Omega)$ consisting of eigenfunctions of $-\Delta$ in $\Omega$ with homogeneous Dirichlet boundary conditions associated to the eigenvalues $\{\alpha_{k}\}_{k\in \mathbb{N}}$, that is, \begin{equation*} \left\{ \begin{array}{ll} -\Delta \varphi_{k}=\alpha_{k} \varphi_{k} &\mbox{ in } \Omega\\ \varphi_{k}=0 &\mbox{ in } \partial \Omega. \end{array} \right. \end{equation*} The fractional Laplacian operator $(-\Delta_{\mathbb{R}^{N}})^{s}$ may be defined for any $u:\mathbb{R}^{N}\rightarrow \mathbb{R}$ belonging to the Schwarz space $\mathcal{S}(\mathbb{R}^{N})$ of rapidly decaying $C^{\infty}$ functions in $\mathbb{R}^{N}$ by \begin{align} (-\Delta_{\mathbb{R}^{N}})^{s} u(x)= \frac{C_{N, s}}{2} \int_{\mathbb{R}^{N}} \frac{2u(x)- u(x+y)- u(x-y)}{|y|^{N+2s}} dy, \end{align} where $C_{N, s}$ is a normalizing constant depending only on $N$ and $s$; see \cite{DPV, MBRS} for more details. \\ As observed in \cite{sv3}, these two operators are completely different. Indeed, the spectral operator $(-\Delta_{\Omega})^{s}$ depends on the domain $\Omega$ considered, while the integral one $(-\Delta_{\mathbb{R}^{N}})^{s}$ evaluated at some point is independent on the domain in which the equation is set. Moreover, in contrast with the setting for the fractional Laplacian, it is not true that all functions are s-harmonic with respect to the spectral fractional Laplacian, up to a small error; see \cite{AV, DSV} for more details.\\ Recently, many papers have appeared dealing with the existence and the multiplicity of solutions to problems driven by these two operators, by applying several variational and topological techniques. In particular, a great attention has been devoted to the study of fractional problems like \eqref{P1} and \eqref{P} involving superlinear nonlinearities with subcritical or critical growth; see for instance \cite{A1, A2, A3, A4, A5, AFP, BCdPS, BMBS, CT, MBRS, PPS, sv1, sv2, sv4, YYY}. It is worth observing that a typical assumption to study this class of problems is to require that the nonlinearity $f$ verifies the well-known Ambrosetti-Rabinowitz condition \cite{AR}, that is there exist $\mu>2$ and $R>0$ such that \begin{align}\label{AR} 0< \mu F(x, t) \leq t f(x, t) \mbox{ for any } x\in \Omega, |t|>R. \end{align} This condition is quite natural and fundamental not only to guarantee that the Euler-Lagrange functional associated to the problem under consideration has a mountain pass geometry, but also to show that the Palais-Smale sequence of the Euler-Lagrange functional is bounded. We recall that \eqref{AR} is somewhat restrictive and eliminates many nonlinearities. For instance the function \begin{equation}\label{exf} f(x, t)=2t\log(1+t^{4})+\frac{4t^{5}}{t^{4}+1} \mbox{ with } (x, t)\in \Omega\times \mathbb{R} \end{equation} is superlinear at infinity but does not verify the condition \eqref{AR}.\\ The purpose of this paper is to investigate the multiplicity for the above two fractional problems when the parameter $\lambda$ lies in a suitable neighborhood of any eigenvalue of the fractional operator under consideration, and $f$ is superlinear and subcritical, but does not fulfill \eqref{AR}. \\ More precisely, along the paper we assume that $f:\bar{\Omega}\times \mathbb{R}\rightarrow \mathbb{R}$ is a continuous function satisfying the following conditions \begin{compactenum}[$(f1)$] \item there exist $c_{1}>0$ and $p\in (1, 2^{*}_{s}-1)$, with $2^{*}_{s}=\frac{2N}{N-2s}$, such that $$ |f(x, t)|\leq c_{1}(1+|t|^{p}) \mbox{ for any } (x, t)\in \Omega\times \mathbb{R}; $$ \item $$ \lim_{|t|\rightarrow 0} \frac{f(x, t)}{|t|}=0 \mbox{ uniformly in } x\in \Omega; $$ \item $$ \lim_{|t|\rightarrow \infty} \frac{F(x, t)}{t^{2}}=+\infty \mbox{ uniformly in } x\in \Omega, $$ where $F(x, t)=\int_{0}^{t} f(x, \tau)\, d\tau$; \item there exist $\beta\in (\frac{2Np}{N+2s}, 2^{*}_{s})$, $c_{2}>0$ and $T>0$ such that \begin{align*} &f(x, t)t-2F(x, t)>0 \mbox{ for any } x\in \Omega, |t|>0,\\ &f(x, t)t-2F(x, t)\geq c_{2}|t|^{\beta} \mbox{ for any } x\in \Omega, |t|\geq T; \end{align*} \item $F(x, t)\geq 0$ for any $(x, t)\in \Omega\times \mathbb{R}$. \end{compactenum} As a model for $f$ we can take the function defined in \eqref{exf}. \noindent Now we state our first main result regarding the multiplicity for the problem \eqref{P1}: \begin{thm}\label{thm2} Assume $(f1)$-$(f5)$. Then for any $i\geq 2$ there exists $\delta_{i}>0$ such that for any $\lambda\in (\alpha^{s}_{i}-\delta_{i}, \alpha^{s}_{i})$, problem \eqref{P1} admits at least three nontrivial solutions. \end{thm} \noindent In order to prove Theorem \ref{thm2}, we apply suitable variational methods after transforming the problem \eqref{P1} into a degenerate elliptic equation with a nonlinear Neumann boundary condition by using the extension technique \cite{BrCDPS, CT, CS, CDDS}. Thanks to this approach we are able to overcome the nonlocality of the operator $(-\Delta_{\Omega})^{s}$ and we can use some critical point results to study the extended problem. More precisely, we show that the functional associated to the extended problem respects the geometry required by the $\nabla$-Theorem introduced by Marino and Saccon in \cite{MS}. Roughly speaking, this theorem says that if a $C^{1}$-functional $I$ defined on a Hilbert space has a linking structure and $\nabla I$ verifies an appropriate condition on some suitable sets (see Definition 2.1 below), then $I$ has two nontrivial critical points which may have the same critical level. We will apply this abstract result to the functional associated to the extended problem and we will get the existence of two nontrivial solutions. Finally, exploiting an additional linking structure, we get the existence of a third nontrivial solution. We recall that in the local setting, similar arguments have been developed and applied in many situations to obtain multiplicity results for several and different problems such as, elliptic problems of second and fourth order, noncooperative elliptic systems, nonlinear Schr\"odinger equations with indefinite linear part in $\mathbb{R}^{N}$, variational inequalities; see \cite{MMS, MS2, OL, w, WZZ}. Differently from the classic case, in the nonlocal framework, the only result comparable to Theorem \ref{thm2} is due to Mugnai and Pagliardini \cite{MP} who obtained a multiplicity result to problem \eqref{P1} when $s= \frac{1}{2}$ and $f$ satisfies \eqref{AR}. \noindent Our second main result concerns the multiplicity of solutions to \eqref{P}. \begin{thm}\label{thm1} Assume $(f1)$-$(f5)$. Then for any $i\geq 2$ there exists $\delta_{i}>0$ such that for any $\lambda\in (\lambda_{i}-\delta_{i}, \lambda_{i})$, problem \eqref{P} admits at least three nontrivial solutions. Here $\{\lambda_{k}\}_{k\in \mathbb{N}}$ are the eigenvalues of the fractional Laplacian $(-\Delta_{\mathbb{R}^{N}})^{s}$ with homogeneous condition in $\mathbb{R}^{N}\setminus \Omega$. \end{thm} \noindent The proof of the above result is obtained following the approach developed to prove Theorem \ref{thm2}. Anyway, we do not make use of any extension method and our techniques work also when we replace $(-\Delta_{\mathbb{R}^{N}})^{s}$ by the more general integro-differential operator $-\mathcal{L}_{K}$ defined up to a positive constant as $$ \mathcal{L}_{\mathcal{K}} u(x)=\int_{\mathbb{R}^{N}} (u(x+y)+u(x-y)-2u(x)) \mathcal{K}(y) dy \quad (x\in \mathbb{R}^{N}), $$ where $\mathcal{K}:\mathbb{R}^{N}\setminus\{0\}\rightarrow (0, \infty)$ is a measurable function such that $\mathcal{K}(-x)=\mathcal{K}(x)$ for all $x\in \mathbb{R}^{N}\setminus\{0\}$, $m \mathcal{K}\in L^{1}(\mathbb{R}^{N})$ with $m(x)=\min\{|x|^{2}, 1\}$, and there exists $\theta>0$ such that $\mathcal{K}(x)\geq \theta |x|^{-(N+2s)}$ for all $x\in \mathbb{R}^{N}\setminus\{0\}$. In this context, we take care of the well-known results on the spectrum of integro-differential operators obtained by Servadei and Valdinoci in \cite{sv2, sv3}. We point out that in a recent paper Molica Bisci et al. \cite{MBMS} proved a similar result to Theorem \ref{thm1} when $f$ verifies condition \eqref{AR}, obtaining a nonlocal counterpart of the multiplicity result established in \cite{M}. The paper is organized as follows. In Section $2$ we recall some useful results related to the extension method in a bounded domain and then we provide some useful lemmas which will be fundamental to apply a critical point theorem of mixed nature. In Section $3$ we deal with the existence of three nontrivial weak solutions to the problem \eqref{P}. \section{multiplicity for the problem \eqref{P1}} \subsection{Extended problem in the half-cylinder} In order to study problem \eqref{P1}, we use a suitable variant of the extension technique due to Caffarelli and Silvestre \cite{CS}; see \cite{BrCDPS, CT, CDDS} for more details. Firstly, we collect some useful notations and basic results which will be useful along the paper. \\ Fix $s\in (0, 1)$. We say that $u\in H^{s}(\Omega)$ if $u\in L^{2}(\Omega)$ and it holds $$ [u]_{H^{s}(\Omega)}^{2}=\iint_{\Omega\times \Omega} \frac{|u(x)-u(y)|^{2}}{|x-y|^{N+2s}} \, dx dy<\infty. $$ We define $H^{s}_{0}(\Omega)$ as the closure of $C^{\infty}_{c}(\Omega)$ with respect to the norm $[u]_{H^{s}(\Omega)}^{2}+\|u\|_{L^{2}(\Omega)}^{2}$. The space $H^{\frac{1}{2}}_{00}(\Omega)$ is the Lions-Magenes space \cite{LM} which consists of the function $u\in H^{\frac{1}{2}}(\Omega)$ such that $$ \int_{\Omega} \frac{u^{2}(x)}{{\rm dist}(x, \partial \Omega)}\, dx<\infty. $$ Let us introduce the Hilbert space $$ \mathbb{H}^{s}(\Omega)=\Bigl\{u\in L^{2}(\Omega): \sum_{k=1}^{\infty} |c_{k}|^{2}\alpha_{k}^{s}<\infty\Bigr\}. $$ It is well known \cite{LM} that interpolation leads to \begin{equation*} \mathbb{H}^{s}(\Omega)= \left\{ \begin{array}{ll} H^{s}(\Omega) &\mbox{ if } s\in (0, \frac{1}{2})\\ H^{\frac{1}{2}}_{00}(\Omega) &\mbox{ if } s=\frac{1}{2} \\ H^{s}_{0}(\Omega) &\mbox{ if } s\in (\frac{1}{2}, 1). \end{array} \right. \end{equation*} Let us define the cylinder $\mathcal{C}=\Omega\times (0, +\infty)$ and its lateral boundary $\partial_{L}\mathcal{C}=\partial \Omega\times [0, +\infty)$. Let us denote by $H^{1}_{0, L}(y^{1-2s})$ the space of measurable functions $v:\mathcal{C}\rightarrow \mathbb{R}$ such that $v\in H^{1}(\Omega\times (\alpha, \beta))$ for all $0<\alpha<\beta<+\infty$, $u=0$ on $\partial_{L}\mathcal{C}$ and for which the following norm is finite $$ \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2}=\iint_{\mathcal{C}} y^{1-2s} |\nabla u|^{2}\, dx dy. $$ We recall the following trace theorem which relates $H^{1}_{0, L}(y^{1-2s})$ to $\mathbb{H}^{s}(\Omega)$. \begin{thm}\cite{BrCDPS, CT, CDDS}\label{tracethm} There exists a surjective continuous linear map $\textup{Tr}: H^{1}_{0, L}(y^{1-2s})\rightarrow \mathbb{H}^{s}(\Omega)$ such that, for any $u\in H^{1}_{0, L}(y^{1-2s})$ $$ \kappa_{s} \|\textup{Tr}(u)\|^{2}_{\mathbb{H}^{s}(\Omega)}\leq \|u\|^{2}_{H^{1}_{0, L}(y^{1-2s})}. $$ \end{thm} \noindent We also have some useful embedding results. \begin{thm}\cite{BrCDPS, CT, CDDS}\label{SSembedding} Let $N> 2s$ and $q\in [1, 2^{*}_{s}]$. Then there exists a constant $C$ depending on $N$, $q$ and the measure of $\Omega$, such that, for all $u\in \mathbb{X}_{0}$ $$ \|\textup{Tr}(u)\|_{L^{q}(\Omega)}\leq C \|u\|_{H^{1}_{0, L}(y^{1-2s})}. $$ Moreover, $H^{1}_{0, L}(y^{1-2s})$ is compactly embedded into $L^{q}(\Omega)$ for any $q\in [1, 2^{*}_{s})$. \end{thm} \noindent Thus, we get the following fundamental result which allows us to realize the fractional spectral Laplacian $(-\Delta_{\Omega})^{s}$. \begin{thm}\cite{BrCDPS, CT, CDDS} Let $u\in \mathbb{H}^{s}(\Omega)$. Then there exists a unique $v\in H^{1}_{0, L}(y^{1-2s})$ such that \begin{equation*} \left\{ \begin{array}{ll} -\dive(y^{1-2s} v)= 0 &\mbox{ in } \mathcal{C} \\ v=0 &\mbox{ on } \partial_{L} \mathcal{C}\\ \textup{Tr}(v)=u &\mbox{ on } \partial^{0} \mathcal{C}. \end{array} \right. \end{equation*} Moreover $$ \frac{\partial v}{\partial \nu^{1-2s}}:=-\lim_{y\rightarrow 0^{+}} y^{1-2s} v_{y}(x, y)=\kappa_{s}(-\Delta_{\Omega})^{s} u(x) \mbox{ in } \mathbb{H}^{s}(\Omega)^{*}, $$ where $\mathbb{H}^{s}(\Omega)^{*}$ is the dual of $\mathbb{H}^{s}(\Omega)$. The function $v$ is called the extension of $u$. \end{thm} \noindent We also recall that if $u=\sum_{k=1}^{\infty} c_{k} \varphi_{k}\in \mathbb{H}^{s}(\Omega)$, then the extension of $u$ is given by $$ v(x, y)=\sum_{k=1}^{\infty} c_{k} \varphi_{k}(x) \theta(\sqrt{\lambda_{k}} y), $$ where $\theta\in H^{1}(\mathbb{R}_{+}, y^{1-2s})$ solves the problem \begin{equation*} \left\{ \begin{array}{ll} \theta''+\frac{1-2s}{y}\theta'-\theta= 0 &\mbox{ in } \mathbb{R}_{+}\\ \theta(0)=1, \quad \mbox{ and } -\lim_{y\rightarrow 0^{+}} y^{1-2s}\theta'(y)=\kappa_{s}. \end{array} \right. \end{equation*} In addition, $\|v\|_{H^{1}_{0, L}(y^{1-2s})}^{2}=\kappa_{s}\|u\|^{2}_{\mathbb{H}^{s}(\Omega)}$; see \cite{BrCDPS, CT, CDDS} for more details. \begin{remark} In order to simplify notation, when no confusion arises, we shall denote by $v$ the function defined in the cylinder $\mathcal{C}$ as well as its trace $\textup{Tr}(v)$ on $\Omega\times\{y=0\}$. \end{remark} Taking into account the above results, we can deduce that the study of \eqref{P1} is equivalent to consider the following degenerate elliptic problem with nonlinear Neumann boundary condition \begin{equation}\label{P3} \left\{ \begin{array}{ll} \dive(y^{1-2s}\nabla u)=0 &\mbox{ in } \mathcal{C}\\ u=0 &\mbox{ on } \partial_{L} \mathcal{C} \\ \frac{\partial u}{\partial \nu^{1-2s}}=\kappa_{s}[\lambda u+f(x, u)] &\mbox{ on } \partial \Omega\times \{0\}. \end{array} \right. \end{equation} For simplicity, in what follows, we will assume that $\kappa_{s}=1$. \subsection{Technical lemmas and $\nabla$-condition} For $i\geq 2$, let us introduce the following notations. Let $H_{i}={\rm span} \{\psi_{1}, \dots, \psi_{i}\}$, where $\psi_{k}(x, y)=\varphi_{k}(x)\theta(\sqrt{\lambda_{k}}y)$, $$ H_{i}^{\perp}=\{u\in H^{1}_{0, L}(y^{1-2s}): \langle u, \psi_{j}\rangle_{H^{1}_{0, L}(y^{1-2s})}=0, \mbox{ for all } j=1, \dots, i\} $$ and $H_{i}^{0}=\{\psi_{i}, \dots, \psi_{j} \}$. We set $\mu_{i}=\alpha_{i}^{s}$. Since $\{\alpha_{k}\}_{k\in \mathbb{N}}$ is increasing, a direct calculation yields the next result. \begin{lem}\label{Slem} For any $i\geq 1$, the following inequalities hold \begin{equation}\label{7s} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2}\leq \mu_{i} \|u\|_{L^{2}(\Omega)}^{2} \mbox{ for any } u\in H_{i} \end{equation} and \begin{equation}\label{8s} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2}\geq \mu_{i+1} \|u\|_{L^{2}(\Omega)}^{2} \mbox{ for any } u\in H_{i}^{\perp}. \end{equation} \end{lem} \noindent Now, we prove an auxiliary lemma. \begin{lem}\label{clem} Let $K: L^{2}(\Omega)\rightarrow H^{1}_{0, L}(y^{1-2s})$ be the operator defined by setting $K(u)=v$, where $v$ is the unique solution to \begin{equation}\label{PK} \left\{ \begin{array}{ll} -\dive(y^{1-2s} v)= 0 &\mbox{ in } \mathcal{C} \\ v=0 &\mbox{ on } \partial_{L} \mathcal{C}\\ \frac{\partial v}{\partial \nu^{1-2s}}=u &\mbox{ on } \partial^{0} \mathcal{C}. \end{array} \right. \end{equation} Then $K$ is compact. \end{lem} \begin{proof} Let $\{u_{n}\}_{n\in \mathbb{N}}$ be a bounded sequence in $L^{2}(\Omega)$. From the weak formulation of \eqref{PK} and Theorem \ref{SSembedding}, we can see that $$ \|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2}\leq \|u_{n}\|_{L^{2}(\Omega)}\|v_{n}\|_{L^{2}(\Omega)}\leq C \|u_{n}\|_{L^{2}(\Omega)} \|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})} $$ that is, $\{v_{n}\}_{n\in \mathbb{N}}$ is bounded in $H^{1}_{0, L}(y^{1-2s})$. Then, in view of Theorem \ref{SSembedding}, we may assume that \begin{align}\begin{split}\label{SCS} &v_{n}\rightharpoonup v \mbox{ in } H^{1}_{0, L}(y^{1-2s}) \\ &v_{n}\rightarrow v \mbox{ in } L^{q}(\Omega) \quad \forall q\in [1, 2^{*}_{s}). \end{split}\end{align} Now, by using \eqref{PK}, we can see that for any $n\in \mathbb{N}$ \begin{equation}\label{waffle} \|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2}-\iint_{\mathcal{C}} y^{1-2s} \nabla v_{n}\nabla v\, dx dy=\int_{\Omega} u_{n}(v_{n}-v)\, dx. \end{equation} Taking into account \eqref{SCS} and the fact that $\{u_{n}\}_{n\in \mathbb{N}}$ is bounded in $L^{2}(\Omega)$, from \eqref{waffle} we can deduce that $\|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}\rightarrow \|v\|_{H^{1}_{0, L}(y^{1-2s})}$. Since $H^{1}_{0, L}(y^{1-2s})$ is a Hilbert space, we can conclude the proof. \end{proof} \noindent In order to deduce a multiplicity result for \eqref{P3} we need to recall the $\nabla$-Theorem due to Marino and Saccon \cite{MS}. We begin giving the following definition. \begin{defn} Let $X$ be a Hilbert space, $I\in C^{1}(X, \mathbb{R})$ and $M$ a closed subspace of $X$, $a, b\in \mathbb{R}\cup \{-\infty, +\infty\}$. We say that the condition $(\nabla) (I, M, a, b)$ holds if there is $\gamma>0$ such that $$ \inf \{\|P_{M} \nabla I(u)\|: a\leq I(u)\leq b, dist(u, M)<\gamma\}>0, $$ where $P_{M}: X\rightarrow M$ is the orthogonal projection of $X$ onto $M$. \end{defn} \noindent Therefore, if the above condition holds, then $I_{\mid M}$ has no critical points $u$ such that $a\leq I(u)\leq b$, with some uniformity. \begin{thm}\cite{MS}\label{MS} Let $X$ be a Hilbert space and $X_{i}$, $i=1, 2, 3$ three subspaces of $X$ such that $X=X_{1}\oplus X_{2}\oplus X_{3}$ and $dim(X_{i})<+\infty$ with $i=1, 2$. Let us denote by $P_{i}$ the orthogonal projection of $X$ onto $X_{i}$, $I\in C^{1, 1}(X, \mathbb{R})$. Let $R, R', R'', \varrho>0$ such that $R'<\varrho<R''$. Define \begin{align*} &\Gamma=\{u\in X_{1}\oplus X_{2}: R'\leq \|u\|\leq R'', \|P_{1}u\|\leq R\} \mbox{ and } T=\partial_{X_{1}\oplus X_{2}} \Gamma, \\ &S_{23}(\varrho)=\{u\in X_{2}\oplus X_{3}: \|u\|=\varrho\} \mbox{ and } B_{23}(\varrho)=\{u\in X_{2}\oplus X_{3}: \|u\|\leq \varrho\}. \end{align*} Especially, if $R'=0$, $T$ may be defined as follows: \begin{align*} T=&\{u\in X_{1}: \|u\|\leq R\}\cup \{u\in X_{1}\oplus X_{2}: \|P_{2}u\|=R'', \|P_{1}u\|\leq R\} \\ &\cup \{u\in X_{1}\oplus X_{2}: \|P_{2}u\|\leq R'', \|P_{1}u\|= R\}. \end{align*} Assume that $$ a'=\sup I(T)<\inf I(S_{23}(\varrho))=a''. $$ Let $a$ and $b$ such that $a'<a<a''$ and $b>\sup I(\Gamma)$. Assume that $(\nabla) (I, X_{1}\oplus X_{3}, a, b)$ holds and that the $(PS)_{c}$ condition holds at any $c\in [a, b]$. Then $I$ has at least two critical points in $I^{-1}([a, b])$. Moreover, if $$ \inf I(B_{23}(\varrho))>a_{1}>-\infty $$ and the $(PS)_{c}$ condition holds at any $c\in [a_{1}, b]$, then $I$ has another critical level in $[a_{1}, a']$. \end{thm} \noindent Now, we introduce the energy functional $I_{\lambda}: H^{1}_{0, L}(y^{1-2s})\rightarrow \mathbb{R}$ associated to \eqref{P3}, that is, $$ I(u)=\frac{1}{2}\|u\|^{2}_{H^{1}_{0, L}(y^{1-2s})}-\frac{\lambda}{2}\int_{\Omega} u^{2}\, dx-\int_{\Omega} F(x, u)\, dx $$ defined for any $u\in H^{1}_{0, L}(y^{1-2s})$. From the assumptions on $f$, it is clear that the functional $I_{\lambda}$ is well-defined, $I_{\lambda}\in C^{1}(H^{1}_{0, L}(y^{1-2s}), \mathbb{R})$ and its derivative is given by $$ \langle I'_{\lambda}(u), v\rangle=\iint_{\mathcal{C}} y^{1-2s} \nabla u \nabla v \, dx dy-\lambda \int_{\Omega} u v \, dx-\int_{\Omega} f(x, u) v\, dx \mbox{ for any } v\in H^{1}_{0, L}(y^{1-2s}). $$ Since we aim to show that $I_{\lambda}$ verifies the assumptions of Theorem \ref{MS}, we need to prove some useful lemmas which allow us to verify that there exist $0<a<b$ such that the condition $(\nabla)(I_{\lambda}, H_{i-1}\oplus H_{i}^{\perp}, a, b)$ holds. \begin{lem}\label{lem1s} Assume that $(f1)$ and $(f4)$ hold. Then, for any $\delta\in (0, \min\{\mu_{i+1}-\mu_{i}, \mu_{i}-\mu_{i-1}\})$ there exists $\e_{0}>0$ such that for any $\lambda\in [\mu_{i}-\delta, \mu_{i}+\delta]$ the unique critical point $u$ of $I_{\lambda}$ constrained on $H_{i-1}\oplus H_{i}^{\perp}$ such that $I_{\lambda}(u)\in [-\e_{0}, \e_{0}]$ is the trivial one. \end{lem} \begin{proof} Suppose by contradiction that there exist $\delta_{0}$, $\lambda_{n}\in [\mu_{i}- \delta_{0}, \mu_{i}+ \delta_{0}]$ and $\{u_{n}\}_{n\in \mathbb{N}} \subset H_{i-1} \oplus H_{i}^{\perp}\setminus\{0\}$ such that, for any $v\in H_{i-1}\oplus H_{i}^{\perp}$ we get \begin{align} &I_{\lambda_{n}}(u_{n})=\frac{1}{2} \iint_{\mathcal{C}} y^{1-2s} |\nabla u_{n}|^{2} \, dx dy-\frac{\lambda_{n}}{2}\int_{\Omega} |u_{n}|^{2} dx - \int_{\Omega} F(x, u_{n})\, dx \rightarrow 0 \label{9s}\\ &\langle I_{\lambda_{n}}'(u_{n}), v\rangle=\iint_{\mathcal{C}} y^{1-2s} \nabla u_{n} \nabla v \, dx dy- \lambda_{n}\int_{\Omega} u_{n}\, v \, dx - \int_{\Omega} f(x, u_{n}) \,v\,dx =0. \label{10s} \end{align} Up to a subsequence, we may assume that $\lambda_{n}\rightarrow \lambda \in [\mu_{i}- \delta_{0}, \mu_{i}+ \delta_{0}]$ as $n\rightarrow \infty$. Let us define $A_{n}:=\{x \in \Omega : |u_{n}(x)|\geq T\}$. Then, by assumption $(f4)$ we deduce \begin{align}\label{tv1s} 2 I_{\lambda_{n}}(u_{n})- \langle I_{\lambda_{n}}'(u_{n}), u_{n}\rangle = \int_{\Omega}(f(x, u_{n}) \, u_{n} - 2F(x, u_{n}))\, dx \geq c_{2} \int_{A_{n}} |u_{n}|^{\beta} \, dx. \end{align} By using \eqref{9s} and \eqref{10s} with $v=u_{n}$, from inequality \eqref{tv1s} we get \begin{align}\label{11s} \int_{A_{n}} |u_{n}|^{\beta} \, dx \rightarrow 0 \, \mbox{ as } n\rightarrow \infty. \end{align} Now, let us observe that \begin{align}\label{12s} \int_{\Omega} |u_{n}|^{\beta} \, dx = \int_{A_{n}} |u_{n}|^{\beta} \, dx + \int_{\Omega \setminus A_{n}}|u_{n}|^{\beta} \, dx \leq \int_{A_{n}} |u_{n}|^{\beta}\, dx + |\Omega| T^{\beta}. \end{align} Set $u_{n}= v_{n}+ w_{n} \in H_{i-1}\oplus H_{i}^{\perp}$. Then, by using \eqref{7s}, \eqref{8s}, the fact that $\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2}=\|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2}+ \|w_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2}$ and \eqref{10s}, we have \begin{align}\label{13s} &\int_{\Omega} f(x, u_{n})(w_{n}- v_{n})\, dx\nonumber \\ &= \|w_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2} - \lambda_{n}\int_{\Omega} |w_{n}|^{2} dx - \|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2} + \lambda_{n} \int_{\Omega} |v_{n}|^{2} \, dx\nonumber \\ &\geq \frac{\mu_{i+1} - \lambda_{n}}{\mu_{i+1}}\|w_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2} - \frac{\mu_{i-1} - \lambda_{n}}{\mu_{i-1}}\|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2}\nonumber \\ &\geq c_{3}\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2}, \end{align} where $c_{3}= \min \left\{\frac{\mu_{i+1}-\lambda_{n}}{\mu_{i+1}}, \frac{\lambda_{n}-\mu_{i-1}}{\mu_{i-1}}\right\}$.\\ From Theorem \ref{SSembedding} and by applying H\"older's inequality, we can infer that \begin{align}\label{tv2s} \int_{\Omega} f(x, u_{n})(w_{n}- v_{n})\, dx & \leq \left( \int_{\Omega} |f(x, u_{n})|^{\frac{p+1}{p}}dx \right)^{\frac{p}{p+1}} \left( \int_{\Omega} |w_{n}- v_{n}|^{p+1} dx\right)^{\frac{1}{p+1}} \nonumber \\ & \leq 2C \|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})} \left(\int_{\Omega} |f(x, u_{n})|^{\frac{p+1}{p}}dx \right)^{\frac{p}{p+1}}. \end{align} Taking into account \eqref{13s} and \eqref{tv2s}, and recalling that $u_{n}\not \equiv 0$, we have \begin{align}\label{14s} \|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}\leq c_{4} \left( \int_{\Omega} |f(x, u_{n})|^{\frac{p+1}{p}}dx \right)^{\frac{p}{p+1}}, \end{align} for some positive constant $c_{4}$. \\ Now, by using $(f1)$, Theorem \ref{SSembedding}, \eqref{12s} and H\"older's inequality we can deduce that \begin{align}\label{tv3s} \left| \int_{\Omega} f(x, u_{n})(v_{n}- w_{n})\, dx \right| &\leq\int_{\Omega} |f(x, u_{n})| |v_{n}- w_{n}|\, dx \nonumber \\ &\leq c_{1} \int_{\Omega} (|v_{n} - w_{n}|+ |u_{n}|^{p} |v_{n}- w_{n}|) \, dx\nonumber \\ &\leq c_{1} \|v_{n}- w_{n}\|_{L^{1}(\Omega)}+ c_{1} \left( \int_{\Omega} |u_{n}|^{\beta} dx \right)^{\frac{p}{\beta}}\left(\int_{\Omega} |v_{n}- w_{n}|^{\frac{\beta}{\beta-p}}dx \right)^{\frac{\beta-p}{\beta}} \nonumber \\ &\leq c_{1} C \|v_{n}- w_{n}\|_{H^{1}_{0, L}(y^{1-2s})} \left( 1+ \left( \int_{A_{n}} |u_{n}|^{\beta} dx + |\Omega|T^{\beta}\right)^{\frac{p}{\beta}}\right)\nonumber \\ &\leq c_{1} C\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})} \left( 1+ \left( \int_{A_{n}} |u_{n}|^{\beta} + |\Omega| T^{\beta}\right)^{\frac{p}{\beta}}\right). \end{align} Therefore, putting together \eqref{11s}, \eqref{13s} and \eqref{tv3s}, we deduce that $\{u_{n}\}_{n\in \mathbb{N}}$ is bounded in $H^{1}_{0, L}(y^{1-2s})$. Hence, in view of Theorem \ref{SSembedding}, we may assume that, up to a subsequence, there are a sequence $\{u_{n}\}_{n\in \mathbb{N}}$ and a function $u\in H^{1}_{0, L}(y^{1-2s})$ such that \begin{align}\begin{split}\label{limitss} &u_{n}\rightharpoonup u \mbox{ in } H^{1}_{0, L}(y^{1-2s}) \\ &u_{n}\rightarrow u \mbox{ in } L^{r}(\Omega) \mbox{ for all } r\in [1, 2^{*}_{s})\\ &u_{n}(x) \rightarrow u(x) \mbox{ a.e. } x\in \Omega. \end{split}\end{align} By applying \eqref{9s}, \eqref{10s} and Fatou's Lemma we get \begin{align*} 0&= \lim_{n\rightarrow \infty} 2I_{\lambda_{n}}(u_{n}) - \langle I_{\lambda_{n}}'(u_{n}), u_{n}\rangle \\ &= \lim_{n\rightarrow \infty} \int_{\Omega} (f(x, u_{n}) u_{n} - 2F(x, u_{n}))\, dx \\ &\geq \int_{\Omega} \liminf_{n\rightarrow \infty} (f(x, u_{n})u_{n} - 2 F(x, u_{n}))\, dx\\ &= \int_{\Omega} (f(x,u)u- 2F(x,u))\, dx \end{align*} which, combined with the assumptions $(f2)$ and $(f4)$, gives $u=0$. \\ Now, we distinguish two cases. Let us assume that $u_{n}\rightarrow 0$ as $n\rightarrow \infty$ in $H^{1}_{0,L}(y^{1-2s})$. From $(f1)$ and $(f2)$ we know that for any $\e>0$ there exists $c_{\e}>0$ such that \begin{equation}\label{mbmss} |f(x, t)|\leq \e |t|+c_{\e}|t|^{p} \mbox{ for any } (x, t)\in \Omega\times \mathbb{R}. \end{equation} By using \eqref{14s}, \eqref{mbmss} and Theorem \ref{SSembedding}, we have \begin{align*} 1\leq \lim_{n\rightarrow \infty} c_{4} \frac{\left(\int_{\Omega} |f(x, u_{n})|^{\frac{p+1}{p}}dx \right)^{\frac{p}{p+1}}}{\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}=0. \end{align*} On the other hand, if there exists $\alpha>0$ such that $\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}\geq \alpha$ for $n$ large enough, then from \eqref{14s}, \eqref{limitss}, $(f2)$, the Dominated Convergence Theorem and $u=0$, we get \begin{align*} 0<\alpha \leq \lim_{n\rightarrow \infty} c_{4} \left( \int_{\Omega} |f(x, u_{n})|^{\frac{p+1}{p}}dx \right)^{\frac{p}{p+1}}=0, \end{align*} which is a contradiction. This ends the proof of the lemma. \end{proof} \begin{lem}\label{lem2s} Assume that $(f1)$ and $(f4)$ hold, $\lambda\in (\mu_{i-1}, \mu_{i+1})$ and $\{u_{n}\}_{n\in \mathbb{N}}\subset H^{1}_{0, L}(y^{1-2s})$ such that $I_{\lambda}(u_{n})$ is bounded, $Pu_{n}\rightarrow 0$ and $Q \nabla I_{\lambda}(u_{n})\rightarrow 0$ as $n\rightarrow +\infty$. Then $\{u_{n}\}_{n\in \mathbb{N}}$ is bounded in $H^{1}_{0, L}(y^{1-2s})$. \end{lem} \begin{proof} Assume by contradiction that, up to a subsequence, $\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}\rightarrow \infty$ as $n\rightarrow \infty$. \\ Note that $u_{n}=Pu_{n}+Qu_{n}$, $Pu_{n}\rightarrow 0$ in $H^{1}_{0, L}(y^{1-2s})$ and $Q\nabla I_{\lambda}(u_{n})\rightarrow 0$, where $\nabla I_{\lambda}(u_{n})= v_{n}$ is such that \begin{equation*} \langle I'_{\lambda}(u_{n}), z\rangle= \iint_{\mathcal{C}} y^{1-2s} v_{n} \nabla z \, dxdy \end{equation*} for any $z\in H^{1}_{0, L}(y^{1-2s})$. So we get \begin{equation*} v_{n}= u_{n}- K(\lambda u_{n} + f(x, u_{n})), \end{equation*} where $K$ is defined as in Lemma \ref{clem}. Now, we recall that $u_{n}=Pu_{n}+Qu_{n}$ and $Pu_{n}\rightarrow 0$ in $H^{1}_{0, L}(y^{1-2s})$. Then, by exploiting the assumption $(f1)$, H\"older's inequality and the fact that all norms in $H_{i}^{0}$ are equivalent, we can see that \begin{align}\label{tv4s} \left|\int_{\Omega} f(x, u_{n}) Pu_{n}\, dx \right|&\leq \int_{\Omega} |f(x, u_{n})| |Pu_{n}|\, dx \nonumber \\ &\leq c_{1} \left( \int_{\Omega} |Pu_{n}|\, dx + \int_{\Omega} |Pu_{n}| |u_{n}|^{p}\, dx \right) \nonumber \\ &\leq c_{1} \|Pu_{n}\|_{L^{1}(\Omega)} + c_{1} \left( \int_{\Omega} |u_{n}|^{\beta} dx \right)^{\frac{p}{\beta}}\left(\int_{\Omega} |Pu_{n}|^{\frac{\beta}{\beta-p}}dx \right)^{\frac{\beta-p}{\beta}}\nonumber \\ &\leq c_{5} \|Pu_{n}\|_{L^{\infty}(\Omega)} (1+ \|u_{n}\|_{L^{\beta}(\Omega)}^{p}), \end{align} with $c_{5}>0$. Now, from the assumption $(f4)$ and \eqref{tv4s}, we can deduce that \begin{align}\label{tv5s} &2I_{\lambda}(u_{n})- \langle Q\nabla I_{\lambda}(u_{n}), u_{n}\rangle \nonumber \\ &= \int_{\Omega} (f(x, u_{n}) u_{n} - 2F(x, u_{n}))\, dx + \|Pu_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2} - \lambda \int_{\Omega} |Pu_{n}|^{2} dx - \int_{\Omega} f(x, u_{n}) Pu_{n} \, dx \nonumber \\ &\geq c_{2} \|u_{n}\|_{L^{\beta}(\Omega)}^{\beta} + \|Pu_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2} -\lambda \|Pu_{n}\|_{L^{2}(\Omega)}^{2} - c_{5} \|Pu_{n}\|_{L^{\infty}(\Omega)} (1+ \|u_{n}\|_{L^{\beta}(\Omega)}^{p}). \end{align} Here we used that for every $z\in H^{1}_{0, L}(y^{1-2s})$, $Pz$ is smooth and $\nabla Pu_{n}=P\nabla u_{n}$ due to $u\in {\rm span}\{\psi_{i}, \dots, \psi_{j}\}$ and $Pz\perp Qz$, so we have \begin{align*} \iint_{\mathcal{C}} y^{1-2s} &\nabla (P(u_{n}-K(\lambda u_{n}+g(x, u_{n})))) \nabla u_{n} \, dx dy\\ &= \|Pu_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2} - \lambda \int_{\Omega} |Pu_{n}|^{2} dx - \int_{\Omega} f(x, u_{n}) Pu_{n} \, dx. \end{align*} Since $1<p<\beta$, ${\rm dim}H_{i}^{0}<+\infty$ and $\|Pu_{n}\|_{L^{\infty}(\Omega)} \rightarrow 0$ as $n\rightarrow \infty$, from \eqref{tv5s} we can infer that \begin{align}\label{tv60s} \frac{\|u_{n}\|_{L^{\beta}(\Omega)}^{p}}{\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}\rightarrow 0 \mbox{ as } n\rightarrow \infty. \end{align} Set $Qu_{n}= v_{n}+ w_{n} \in H_{i-1}\oplus H_{i}^{\perp}$. By using $(f1)$, Theorem \ref{SSembedding}, \eqref{7s} and H\"older's inequality we have \begin{align*} \langle Q\nabla I_{\lambda}(u_{n}), -v_{n} \rangle &= \lambda \|v_{n}\|_{L^{2}(\Omega)}^{2} - \|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2} + \int_{\Omega} f(x, u_{n})v_{n}\, dx\\ &\geq \frac{\lambda- \mu_{i-1}}{\mu_{i-1}} \|v_{n}\|^{2} - \int_{\Omega} |f(x, u_{n})||v_{n}|\, dx \\ &\geq \frac{\lambda- \mu_{i-1}}{\mu_{i-1}}\|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2} -c_{1} \int_{\Omega} (|u_{n}|^{p} |v_{n}| + |v_{n}|)\, dx \\ &\geq \frac{\lambda- \mu_{i-1}}{\mu_{i-1}}\|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2} -c_{1} \left(\int_{\Omega} |u_{n}|^{\beta}dx\right)^{\frac{p}{\beta}} \left(\int_{\Omega} |v_{n}|^{\frac{\beta}{\beta-p}} dx\right)^{\frac{\beta-p}{\beta}} - c_{1}\|v_{n}\|_{L^{1}(\Omega)} \\ &\geq \frac{\lambda- \mu_{i-1}}{\mu_{i-1}}\|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}^{2} -c'_{1} C\|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})} (1+ \|u_{n}\|_{L^{\beta}(\Omega)}^{p}). \end{align*} Therefore, \eqref{tv60s} and H\"older's inequality imply that \begin{align}\label{16s} \frac{\|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}{\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}\rightarrow 0 \mbox{ as } n\rightarrow \infty. \end{align} In similar fashion we can infer that \begin{align}\label{17s} \frac{\|w_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}{\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}\rightarrow 0 \mbox{ as } n\rightarrow \infty. \end{align} We can also show that \begin{align}\label{18s} \frac{\|Pu_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}{\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}\rightarrow 0 \mbox{ as } n\rightarrow \infty. \end{align} Indeed, if \eqref{18s} does not hold, then $\frac{\|Pu_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}{\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}\rightarrow \ell\in (0, +\infty)$ and we can see that $$ 0\leftarrow \|Pu_{n}\|_{H^{1}_{0, L}(y^{1-2s})}=\frac{\|Pu_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}{\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}\rightarrow \ell \cdot (+\infty)=+\infty $$ which is impossible. Putting together \eqref{16s}, \eqref{17s} and \eqref{18s} we deduce that \begin{align*} 1= \frac{\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}{\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}\leq \frac{\|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}+ \|Pu_{n}\|_{H^{1}_{0, L}(y^{1-2s})} + \|w_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}{\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}\rightarrow 0 \mbox{ as } n\rightarrow \infty, \end{align*} which is a contradiction. Thus $\{u_{n}\}_{n\in \mathbb{N}}$ is bounded in $H^{1}_{0, L}(y^{1-2s})$. \end{proof} \begin{lem}\label{lem3s} Assume $(f1)$ and $(f4)$. Then, for any $\delta\in (0, \min\{\mu_{i+1}-\mu_{i}, \mu_{i}-\mu_{i-1}\})$ there exists $\e_{0}>0$ such that for any $\lambda\in [\mu_{i}-\delta, \mu_{i}+\delta]$ and for any $\e_{1}, \e_{2}\in (0, \e_{0})$ with $\e_{1}<\e_{2}$, the condition $(\nabla) (I_{\lambda}, H_{i-1}\oplus H_{i}^{\perp}, \e_{1}, \e_{2})$ holds. \end{lem} \begin{proof} Suppose by contradiction that there exists a positive constant $\delta_{0}$ such that for all $\varepsilon_{0}>0$ there are $\lambda\in [\mu_{i}- \delta_{0}, \mu_{i}+ \delta_{0}]$ and $\varepsilon_{1}, \varepsilon_{2} \in (0, \varepsilon_{0})$ with $\varepsilon_{1}< \varepsilon_{2}$ such that the condition $(\nabla)(I_{\lambda}, H_{i-1}\oplus H_{i}^{\perp}, \varepsilon_{1}, \varepsilon_{2})$ does not hold. \\ Let $\varepsilon_{0}>0$ be as in Lemma \ref{lem1s}. Then, we can find a sequence $\{u_{n}\}_{n\in \mathbb{N}}\subset H^{1}_{0, L}(y^{1-2s})$ such that ${\rm dist}(u_{n}, H_{i-1} \oplus H_{i}^{\perp})\rightarrow 0$, $I_{\lambda}(u_{n})\in (\varepsilon_{1}, \varepsilon_{2})$ and $Q\nabla I_{\lambda}(U_{n})\rightarrow 0$. By Lemma \ref{lem2s} we deduce that $\{u_{n}\}_{n\in \mathbb{N}}$ is bounded. Thus, by applying Theorem \ref{SSembedding}, there are a subsequence (still denoted by $u_{n}$) and $u\in H^{1}_{0, L}(y^{1-2s})$ such that $u_{n}\rightharpoonup u$ in $H^{1}_{0, L}(y^{1-2s})$ and $u_{n}\rightarrow u$ in $L^{q}(\Omega)$ for any $q\in [1, 2^{*}_{s})$. Taking into account assumption $(f1)$, $Q\nabla I_{\lambda}(u_{n})\rightarrow 0$, $Pu_{n}\rightarrow 0$ and Lemma \ref{clem}, we can see that $$ Q\nabla I_{\lambda}(u_{n})=u_{n}-Pu_{n}-K(\lambda u_{n}+f(x, u_{n})) $$ yields $u_{n}\rightarrow u$ in $H^{1}_{0, L}(y^{1-2s})$ and $u$ is a critical point of $I_{\lambda}$ constrained on $H_{i-1}\oplus H_{i}^{\perp}$. Hence, in view of Lemma \ref{lem1s}, we can infer that $u=0$. Since $0<\varepsilon_{1}\leq I_{\lambda}(u)$, we obtain a contradiction. \end{proof} \noindent Let us introduce the following notations: for fixed $i, k\in \mathbb{N}$ and $R, \varrho >0$, let \begin{align*} &B_{i}(R)= \{u\in H_{i} : \|u\|_{H^{1}_{0, L}(y^{1-2s})}\leq R\}, \\ &T_{i-1, i}(R) = \{u \in H_{i-1}: \|u\|_{H^{1}_{0, L}(y^{1-2s})}\leq R\} \cup \{u \in H_{i} : \|u\|_{H^{1}_{0, L}(y^{1-2s})}=R\}, \\ &S_{k}^{+}(\varrho)= \{u \in H_{k}^{\perp}: \|u\|_{H^{1}_{0, L}(y^{1-2s})}= \varrho\}, \\ &B_{k}^{+}(\varrho)= \{u\in H_{k}^{\perp}: \|u\|_{H^{1}_{0, L}(y^{1-2s})}\leq \varrho\}. \end{align*} \begin{lem}\label{lem4s} Assume that $(f1)$-$(f3)$ and $(f5)$. Then, for any $\lambda\in (\mu_{i-1}, \mu_{i+1})$, there are $R>\varrho>0$ such that $$ 0=\sup I_{\lambda}(T_{i-1, i}(R))<\inf I_{\lambda}(S_{i-1}^{+}(\varrho)). $$ \end{lem} \begin{proof} By using \eqref{7s} and the assumption $(f5)$, for any $u\in H_{i-1}$ and $\lambda\in (\mu_{i-1}, \mu_{i})$ we have \begin{align}\label{19s} I_{\lambda}(u)&= \frac{1}{2} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2} - \frac{\lambda}{2} \int_{\Omega} |u|^{2} dx - \int_{\Omega} F(x, u)\, dx \nonumber \\ &\leq \frac{\mu_{i-1}- \lambda}{2\mu_{i-1}}\|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2} \leq 0. \end{align} Taking into account the assumption $(f3)$ and the continuity of $F$, for any $c_{6}>0$ there is $M_{1}>0$ such that \begin{align}\label{tv6s} F(x, t) \geq \frac{c_{6}}{2}t^{2} - M_{1} \quad \forall (x, t)\in \Omega \times \mathbb{R}. \end{align} By using \eqref{7s} and \eqref{tv6s}, for any $u\in H_{i}$ and $\lambda\in (\mu_{i-1}, \mu_{i})$ we have \begin{align}\label{tv7s} I_{\lambda}(u)&= \frac{1}{2} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2} - \frac{\lambda}{2} \int_{\Omega} |u|^{2} dx - \int_{\Omega} F(x, u)\, dx \nonumber \\ &\leq \frac{\mu_{i}- \lambda}{2\mu_{i}} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2} - \frac{c_{6}}{2} \|u\|_{L^{2}(\Omega)}^{2} + M_{1}|\Omega| \nonumber \\ & \leq \frac{\mu_{i}- \lambda- c_{6}}{2\mu_{i}} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2} + M_{1}|\Omega|. \end{align} Taking $c_{6}= 2(\mu_{i}- \lambda)$, from \eqref{tv7s} we deduce that \begin{align}\label{20s} I_{\lambda}(u)\rightarrow - \infty \mbox{ as } \|u\|_{H^{1}_{0, L}(y^{1-2s})}\rightarrow \infty. \end{align} Now, we note that $(f1)$ and $(f2)$ imply that for any $\e>0$ there is $C_{\e}>0$ such that \begin{align}\label{tv8s} F(x, t)\leq \frac{\e}{2} t^{2} + C_{\e} |t|^{p+1} \quad \forall (x, t)\in \Omega \times \mathbb{R}, \end{align} which gives \begin{align}\label{21s} \left| \int_{\Omega} F(x, u)\, dx \right| \leq \frac{\e}{2} \|u\|_{L^{2}(\Omega)}^{2} + C_{\e} \|u\|_{L^{p+1}(\Omega)}^{p+1} \quad \forall u\in H^{1}_{0, L}(y^{1-2s}). \end{align} Thus, from \eqref{21s}, we can see that for any $u\in H_{i-1}^{\perp}$ \begin{align}\label{22s} I_{\lambda}(u)&= \frac{1}{2}\|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2} - \frac{\lambda}{2} \int_{\Omega} |u|^{2} dx - \int_{\Omega} F(x, u)\, dx \nonumber \\ &\geq \frac{\mu_{i}- \lambda- \e}{2 \mu_{i}} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2} - CC_{\e} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{p+1}. \end{align} Take $\e= \frac{\mu_{i}- \lambda}{2}>0$. Recalling that $\lambda \in (\mu_{i-1}, \mu_{i})$ and $p+1>2$, from \eqref{19s}, \eqref{20s} and \eqref{22s}, we can find $R>\varrho >0$ such that \begin{align*} \sup I_{\lambda} (T_{i-1, i}(R))< \inf I_{\lambda} (S_{i-1}^{+}(\varrho)). \end{align*} \end{proof} \begin{lem}\label{lem5s} Assume that $(f5)$ holds. Then, for $R>0$ in Lemma \ref{lem4s} and for any $\e>0$ there exists $\delta'_{i}>0$ such that for any $\lambda\in (\mu_{i}-\delta'_{i}, \mu_{i})$ it holds $$ \sup I_{\lambda}(B_{i}(R))<\e. $$ \end{lem} \begin{proof} By using \eqref{7s}, the assumption $(f5)$ and $\lambda<\mu_{i}$, we deduce that, for any $u\in H_{i}$ we deduce \begin{align*} I_{\lambda}(u) = \frac{1}{2} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2} - \frac{\lambda}{2} \int_{\Omega} |u|^{2} dx - \int_{\Omega} F(x, u)\, dx \leq \frac{\mu_{i}- \lambda}{2 \mu_{i}} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2}. \end{align*} Take $\delta_{i}'= \frac{\mu_{i}\e}{R^{2}}$. Then we deduce that \begin{align*} \sup I_{\lambda}(B_{i}(R)) \leq \frac{\mu_{i}- \lambda}{2 \mu_{i}} R^{2} = \frac{(\mu_{i}- \lambda)\e}{2 \delta_{i}'}<\e. \end{align*} \end{proof} \begin{lem}\label{lem6s} Assume that $(f1)$ and $(f4)$ hold. Then $I_{\lambda}$ verifies the Palais-Smale condition. \end{lem} \begin{proof} Let $\{u_{n}\}_{n\in \mathbb{N}}$ be a Palais-Smale sequence of $I_{\lambda}$. Taking into account $(f1)$, we have only to show that $\{u_{n}\}_{n\in \mathbb{N}}$ is bounded. From the arguments in Lemma \ref{lem2s}, it is enough to prove that \begin{equation}\label{tv8s} \frac{\|Pu_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}{\|u_{n}\|_{H^{1}_{0, L}(y^{1-2s})}}\rightarrow 0 \mbox{ as } n\rightarrow +\infty. \end{equation} In view of $(f4)$, we know that there exist $c_{7}, c_{8}>0$ such that \begin{align*} f(x, t)t-2F(x, t)\geq c_{7}|t|-c_{8} \mbox{ for any } (x, t)\in \Omega\times \mathbb{R}. \end{align*} Then, by using the above inequality, the equivalence of the norms on the finite-dimensional space, and Theorem \ref{SSembedding}, we get \begin{align}\label{tv9s} 2I_{\lambda}(u_{n})-\langle I'_{\lambda}(u_{n}), u_{n}\rangle&=\int_{\Omega} (f(x, u_{n})u_{n}-2F(x, u_{n})) \, dx \nonumber\\ &\geq \int_{\Omega} (c_{7}|u_{n}|-c_{8})\, dx \nonumber\\ &\geq \int_{\Omega} (c_{7}|Pu_{n}|-c_{7}|v_{n}|-c_{7}|w_{n}|-c_{8})\, dx \nonumber\\ &\geq c_{9}\|Pu_{n}\|_{L^{1}(\Omega)}-c_{10}(\|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}+\|w_{n}\|_{H^{1}_{0, L}(y^{1-2s})}+1) \nonumber \\ &\geq c'_{9}\|Pu_{n}\|_{H^{1}_{0, L}(y^{1-2s})}-c_{10}(\|v_{n}\|_{H^{1}_{0, L}(y^{1-2s})}+\|w_{n}\|_{H^{1}_{0, L}(y^{1-2s})}+1). \end{align} Putting together \eqref{16s}, \eqref{17s} and \eqref{tv9s} we can deduce that \eqref{tv8s} holds. \end{proof} \noindent Now we are in the position to give the proof of the main result of this section. \begin{proof}[Proof of Theorem \ref{thm2}] Firstly, we prove the existence of two critical points. Taking into account Lemma \ref{lem3s}, Lemma \ref{lem4s} and Lemma \ref{lem5s}, we can take $a\in (0, \inf I_{\lambda}(S^{+}_{i-1}(\varrho)))$ and $b>\sup I_{\lambda}(B_{i}(R))$ such that $0<a<b<\e_{0}$. Then the condition $(\nabla)(I_{\lambda}, H_{i-1}\oplus H_{i}^{\perp}, a, b)$ is satisfied.By applying Lemma \ref{lem6s} and Theorem \ref{MS}, we can deduce that there exist two critical points $u_{1}, u_{2}\in H^{1}_{0,L}(y^{1-2s})$ such that $I_{\lambda}(u_{i})\in [a, b]$ for $i=1, 2$. Now, we prove the existence of a third critical point by invoking the Linking Theorem \cite{Rab}. Taking into account Theorem $5.3$ in \cite{Rab} and Lemma \ref{lem6s}, it is enough to prove that there are $\delta_{i}''>0$ and $R_{1}>\varrho_{1}>0$ such that for any $\lambda\in (\mu_{i}-\delta''_{i}, \mu_{i})$ we get \begin{equation}\label{23s} \sup I_{\lambda}(T_{i, i+1}(R_{1}))<\inf I_{\lambda}(S^{+}(\varrho_{1})). \end{equation} Let us note that \eqref{8s}, \eqref{21s} and Theorem \ref{SSembedding} yield \begin{align}\label{tv10s} I_{\lambda}(u)&=\frac{1}{2} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2}-\frac{\lambda}{2} \int_{\Omega} u^{2} \, dx-\int_{\Omega} F(x, u)\, dx \nonumber \\ &\geq \frac{\mu_{i+1}-\lambda-\e}{2\mu_{i+1}} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2}-CC_{\e}\|u\|_{H^{1}_{0, L}(y^{1-2s})}^{p+1} \mbox{ for any } u\in H_{i}^{\perp}. \end{align} Take $\e=\frac{\mu_{i+1}-\lambda}{2}$. Then, recalling that $p>1$, in view of \eqref{tv10s}, we can find $\varrho_{1}>0$ and $\alpha>0$ such that \begin{equation}\label{24s} \inf I_{\lambda}(S^{+}_{i}(\varrho_{1}))\geq \alpha>0. \end{equation} Now, by using \eqref{7s} and $(f5)$, we deduce that \begin{align}\label{tv11s} I_{\lambda}(u)&=\frac{1}{2}\|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2}-\frac{\lambda}{2} \int_{\Omega} u^{2} \, dx-\int_{\Omega} F(x, u)\, dx \nonumber \\ &\leq \frac{\mu_{i}-\lambda}{2\mu_{i}} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2} \mbox{ for any } u\in H_{i}. \end{align} Hence, by using \eqref{tv11s}, we can see that there exist $\delta''_{i}>0$ and $R_{1}>0$ such that for any $\lambda\in (\mu_{i}-\delta''_{i}, \mu_{i})$ we get \begin{equation}\label{25s} I_{\lambda}(u)< \alpha \mbox{ for any } \|u\|_{H^{1}_{0, L}(y^{1-2s})}\leq R_{1}. \end{equation} On the other hand, by using \eqref{7s} and $(f5)$, we can see that for any $u\in H_{i+1}$ and $\lambda\in (\mu_{i}-\delta''_{i}, \mu_{i})$, we have \begin{align}\label{26s} I_{\lambda}(u)&=\frac{1}{2} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2}-\frac{\lambda}{2} \int_{\Omega} u^{2} \, dx-\int_{\Omega} F(x, u)\, dx \nonumber \\ &\leq \frac{\mu_{i+1}-\lambda}{2\mu_{i+1}} \|u\|_{H^{1}_{0, L}(y^{1-2s})}^{2}. \end{align} Putting together \eqref{24s}, \eqref{25s} and \eqref{26s} we can infer that \eqref{23s} is verified. By applying the Linking Theorem, we can deduce that there exists a critical point $u_{3}\in H^{1}_{0, L}(y^{1-2s})$ of $I_{\lambda}$ such that $I_{\lambda}(u)\geq \inf I_{\lambda}(S^{+}_{i}(\varrho_{1}))$. Choosing $\delta_{i}=\min\{ \delta'_{i}, \delta''_{i} \}$, where $\delta'_{i}$ is given in Lemma \ref{lem5s}, we can conclude that Theorem \ref{thm2} holds. \end{proof} \section{multiple solutions for the problem \eqref{P}} \noindent This section is devoted to the proof of Theorem \ref{thm1}. Since many calculations are adaptations to the ones presented in the previous section, we will emphasize only the differences between the ``spectral'' and the ``integral'' case. Firstly, we collect some notations and results which we will use in the sequel. For more details we refer the interested reader to \cite{MBRS, sv1, sv2, sv3}.\\ Let us define $$ \mathbb{X}_{0}=\{u\in H^{s}(\mathbb{R}^{N}): u=0 \mbox{ a.e. in } \mathbb{R}^{N}\setminus \Omega\}. $$ endowed wit the norm $$ \|u\|_{\mathbb{X}_{0}}^{2}=\iint_{\mathbb{R}^{2N}} \frac{|u(x)-u(y)|^{2}}{|x-y|^{N+2s}} \, dx dy. $$ Then, $\mathbb{X}_{0}$ is a Hilbert space, and the following useful embedding result holds. \begin{thm}\cite{sv1}\label{Sembedding} $\mathbb{X}_{0}$ is compactly embedded into $L^{q}(\mathbb{R}^{N})$ for any $q\in [1, 2^{*}_{s})$. \end{thm} \noindent Let us denote by $\{e_{k}, \lambda_{k}\}_{k\in \mathbb{N}}$ the eigenvalues and corresponding eigenfunctions of the fractional Laplacian operator $(-\Delta_{\mathbb{R}^{N}})^{s}$ with homogeneous boundary condition in $\mathbb{R}^{N}\setminus \Omega$, that is, \begin{equation*} \left\{ \begin{array}{ll} (-\Delta_{\mathbb{R}^{N}})^{s} e_{k}=\lambda_{k} e_{k} &\mbox{ in } \Omega\\ e_{k}=0 &\mbox{ in } \mathbb{R}^{N}\setminus \Omega. \end{array} \right. \end{equation*} We recall that $\lambda_{1}$ is simple, $0<\lambda_{1}<\lambda_{2}\leq \dots\leq \lambda_{k}\leq \lambda_{k+1}\leq \dots$, $\lambda_{k}\rightarrow +\infty$ and $e_{k}$ are H\"older continuous up to the boundary (differently from the ones of $(-\Delta_{\Omega})^{s}$ that are as smooth up the boundary as the boundary allows). \noindent As in Section $2$, for any $i\geq 2$, we denote by $P: \mathbb{X}_{0}\rightarrow H^{0}_{i}$ and $Q: \mathbb{X}_{0}\rightarrow H_{i-1}\oplus H_{i}^{\perp}$ the orthogonal projections, where $H_{i}^{0}={\rm span}\{e_{i}, \dots, e_{j}\}$. The next lemma is proved in \cite{sv2}. \begin{lem}\cite{sv2}\label{SVlem} The following inequalities holds \begin{align} &\iint_{\mathbb{R}^{2N}} \frac{|u(x)- u(y)|^{2} dx}{|x-y|^{N+2s}} \, dxdy \leq \lambda_{j} \int_{\Omega} |u|^{2} dx \mbox{ for all } u\in H_{j} \label{7}, \\ &\iint_{\mathbb{R}^{2N}} \frac{|u(x)- u(y)|^{2} dx}{|x-y|^{N+2s}} \, dxdy \geq \lambda_{j+1} \int_{\Omega} |u|^{2} dx \mbox{ for all } u\in H_{j}^{\perp} \label{8}. \\ \end{align} \end{lem} \noindent We say that a a function $u\in \mathbb{X}_{0}$ is a weak solution to \eqref{P} if it satisfies the identity $$ \iint_{\mathbb{R}^{2N}} \frac{(u(x)-u(y))}{|x-y|^{N+2s}}(v(x)-v(y))\, dx dy=\lambda \int_{\Omega} u v \, dx+\int_{\Omega} f(x, u) v\, dx $$ for any $v\in \mathbb{X}_{0}$. For this reason, we will look for critical points of the Euler-Lagrange functional $I_{\lambda}: \mathbb{X}_{0}\rightarrow \mathbb{R}$ defined by \begin{equation} I_{\lambda}(u)=\frac{1}{2} \|u\|_{\mathbb{X}_{0}}^{2}-\frac{\lambda}{2}\int_{\Omega} u^{2}\, dx-\int_{\Omega} F(x, u) \,dx. \end{equation} Since we will proceed as in Section $2$, we prove some technical lemmas which will be fundamental to deduce Theorem \ref{thm1}. With suitable modifications, it is easy to see that the next lemma can be proved following the lines of the proof of Lemma \ref{lem1s}. \begin{lem}\label{lem1} Assume that $(f1)$ and $(f4)$ hold. Then, for any $\delta\in (0, \min\{\lambda_{i+1}-\lambda_{i}, \lambda_{i}-\lambda_{i-1}\})$ there exists $\e_{0}>0$ such that for any $\lambda\in [\lambda_{i}-\delta, \lambda_{i}+\delta]$ the unique critical point $u$ of $I_{\lambda}$ constrained on $H_{i-1}\oplus H_{i}^{\perp}$ such that $I_{\lambda}(u)\in [-\e_{0}, \e_{0}]$ is the trivial one. \end{lem} \begin{lem}\label{lem2} Assume that $(f1)$ and $(f4)$ hold, $\lambda\in (\lambda_{i-1}, \lambda_{i+1})$ and $\{u_{n}\}_{n\in \mathbb{N}}\subset \mathbb{X}_{0}$ such that $I_{\lambda}(u_{n})$ is bounded, $Pu_{n}\rightarrow 0$ and $Q \nabla I_{\lambda}(u_{n})\rightarrow 0$ as $n\rightarrow +\infty$. Then $\{u_{n}\}_{n\in \mathbb{N}}$ is bounded in $\mathbb{X}_{0}$. \end{lem} \begin{proof} Suppose by contradiction that, up to a subsequence, $\|u_{n}\|_{\mathbb{X}_{0}}\rightarrow \infty$ as $n\rightarrow \infty$. \\ Set $u_{n}=Pu_{n}+Qu_{n}$. By using $(f1)$, H\"older's inequality and the fact that all norms in $H_{i}^{0}$ are equivalent, we get \begin{align}\label{tv4} \left|\int_{\Omega} f(x, u_{n}) Pu_{n}\, dx \right|&\leq \int_{\Omega} |f(x, u_{n})| |Pu_{n}|\, dx \nonumber \\ &\leq c_{1} \left( \int_{\Omega} |Pu_{n}|\, dx + \int_{\Omega} |Pu_{n}| |u_{n}|^{p}\, dx \right) \nonumber \\ &\leq c_{1} \|Pu_{n}\|_{L^{1}(\Omega)} + c_{1} \left( \int_{\Omega} |u_{n}|^{\beta} dx \right)^{\frac{p}{\beta}}\left(\int_{\Omega} |Pu_{n}|^{\frac{\beta}{\beta-p}}dx \right)^{\frac{\beta-p}{\beta}}\nonumber \\ &\leq c_{5} \|Pu_{n}\|_{L^{\infty}(\Omega)} (1+ \|u_{n}\|_{L^{\beta}(\Omega)}^{p}), \end{align} with $c_{5}>0$. Now, we observe that \begin{align*} \langle Q\nabla I_{\lambda}(u_{n}), u_{n}\rangle&=\langle \nabla I_{\lambda}(u_{n}), u_{n}\rangle-\langle P\nabla I_{\lambda}(u_{n}), u_{n}\rangle \nonumber\\ &=\|u_{n}\|_{\mathbb{X}_{0}}^{2}-\lambda \|u_{n}\|^{2}_{L^{2}(\Omega)}-\int_{\Omega} f(x, u_{n})u_{n}\, dx \nonumber\\ &-\langle P(u_{n}-(-(-\Delta)^{s})^{-1}(\lambda u_{n}+f(x, u_{n}))), u_{n}\rangle. \end{align*} Since $\langle Pu, v\rangle_{\mathbb{X}_{0}}=\langle u, Pv\rangle_{\mathbb{X}_{0}}$ for any $u, v\in \mathbb{X}_{0}$, we can see that \begin{align*} \langle P(u_{n}-(-(-\Delta)^{s})^{-1}(\lambda u_{n}+f(x, u_{n}))), u_{n}\rangle&=\|Pu_{n}\|_{\mathbb{X}_{0}}^{2}-\lambda \langle Pu_{n}, (-(-\Delta)^{s})^{-1} u_{n}\rangle-\langle Pu_{n}, (-(-\Delta)^{s})^{-1} f(x, u_{n})\rangle \nonumber \\ &=\|Pu_{n}\|{\mathbb{X}_{0}}^{2}-\lambda \|Pu_{n}\|_{L^{2}(\Omega)}^{2}-\int_{\Omega} f(x, u_{n})Pu_{n}\, dx. \end{align*} Thus $(f4)$ and \eqref{tv4} give \begin{align}\label{tv5} &2I_{\lambda}(u_{n})- \langle Q\nabla I_{\lambda}(u_{n}), u_{n}\rangle \nonumber \\ &= \int_{\Omega} (f(x, u_{n}) u_{n} - 2F(x, u_{n}))\, dx + \|Pu_{n}\|{\mathbb{X}_{0}}^{2}-\lambda \|Pu_{n}\|_{L^{2}(\Omega)}^{2} - \int_{\Omega} f(x, u_{n}) Pu_{n} \, dx \nonumber \\ &\geq c_{2} \|u_{n}\|_{L^{\beta}(\Omega)}^{\beta} + \|Pu_{n}\|_{\mathbb{X}_{0}}^{2} -\lambda \|Pu_{n}\|_{L^{2}(\Omega)}^{2} - c_{5} \|Pu_{n}\|_{L^{\infty}(\Omega)} (1+ \|u_{n}\|_{L^{\beta}(\Omega)}^{p}). \end{align} Since $1<p<\beta$, ${\rm dim}H_{i}^{0}<+\infty$ and $\|Pu_{n}\|_{L^{\infty}(\Omega)} \rightarrow 0$ as $n\rightarrow \infty$, from \eqref{tv5} we can deduce that \begin{align}\label{tv60} \frac{\|u_{n}\|_{L^{\beta}(\Omega)}^{p}}{\|u_{n}\|_{\mathbb{X}_{0}}}\rightarrow 0 \mbox{ as } n\rightarrow \infty. \end{align} Set $Qu_{n}= v_{n}+ w_{n} \in H_{i-1}\oplus H_{i}^{\perp}$. By using $(f1)$, Theorem \ref{Sembedding}, \eqref{7} and H\"older's inequality we have \begin{align*} \langle Q\nabla I_{\lambda}(u_{n}), -v_{n} \rangle &= \lambda \|v_{n}\|_{L^{2}(\Omega)}^{2} - \|v_{n}\|_{\mathbb{X}_{0}}^{2} + \int_{\Omega} f(x, u_{n})v_{n}\, dx\\ &\geq \frac{\lambda- \lambda_{i-1}}{\lambda_{i-1}} \|v_{n}\|_{\mathbb{X}_{0}}^{2} - \int_{\Omega} |f(x, u_{n})||v_{n}|\, dx \\ &\geq \frac{\lambda- \lambda_{i-1}}{\lambda_{i-1}}\|v_{n}\|_{\mathbb{X}_{0}}^{2} -c_{1} \int_{\Omega} (|u_{n}|^{p} |v_{n}| + |v_{n}|)\, dx \\ &\geq \frac{\lambda- \lambda_{i-1}}{\lambda_{i-1}}\|v_{n}\|_{\mathbb{X}_{0}}^{2} -c_{1} \left(\int_{\Omega} |u_{n}|^{\beta}dx\right)^{\frac{p}{\beta}} \left(\int_{\Omega} |v_{n}|^{\frac{\beta}{\beta-p}} dx\right)^{\frac{\beta-p}{\beta}} - c_{1}\|v_{n}\|_{L^{1}(\Omega)} \\ &\geq \frac{\lambda- \lambda_{i-1}}{\lambda_{i-1}}\|v_{n}\|^{2}_{\mathbb{X}_{0}} -c_{1} C\|v_{n}\|_{\mathbb{X}_{0}} (1+ \|u_{n}\|_{L^{\beta}(\Omega)}^{p}). \end{align*} Arguing as in the proof of Lemma \ref{lem2s}, we can see that \begin{align}\label{16} \frac{\|v_{n}\|_{\mathbb{X}_{0}}}{\|u_{n}\|_{\mathbb{X}_{0}}}\rightarrow 0, \quad \frac{\|w_{n}\|_{\mathbb{X}_{0}}}{\|u_{n}\|_{\mathbb{X}_{0}}}\rightarrow 0 \mbox{ as } n\rightarrow \infty, \end{align} and \begin{align}\label{18} \frac{\|Pu_{n}\|_{\mathbb{X}_{0}}}{\|u_{n}\|_{\mathbb{X}_{0}}}\rightarrow 0 \mbox{ as } n\rightarrow \infty. \end{align} Putting together \eqref{16} and \eqref{18} we can see that \begin{align*} 1= \frac{\|u_{n}\|_{\mathbb{X}_{0}}}{\|u_{n}\|_{\mathbb{X}_{0}}}\leq \frac{\|v_{n}\|_{\mathbb{X}_{0}}+ \|Pu_{n}\|_{\mathbb{X}_{0}} + \|w_{n}\|_{\mathbb{X}_{0}}}{\|u_{n}\|_{\mathbb{X}_{0}}}\rightarrow 0 \mbox{ as } n\rightarrow \infty, \end{align*} which is impossible. \end{proof} \begin{lem}\label{lem3} Assume that $(f1)$ and $(f4)$. Then, for any $\delta\in (0, \min\{\lambda_{i+1}-\lambda_{i}, \lambda_{i}-\lambda_{i-1}\})$ there exists $\e_{0}>0$ such that for any $\lambda\in [\lambda_{i}-\delta, \lambda_{i}+\delta]$ and for any $\e_{1}, \e_{2}\in (0, \e_{0})$ with $\e_{1}<\e_{2}$, the condition $(\nabla) (I_{\lambda}, H_{i-1}\oplus H_{i}^{\perp}, \e_{1}, \e_{2})$ holds. \end{lem} \begin{proof} The proof follows the lines of the proof of Lemma \ref{lem3s} replacing Lemma \ref{lem1s}, Lemma \ref{lem2s} and Theorem \ref{SSembedding} by Lemma \ref{lem1}, Lemma \ref{lem2}, and Theorem \ref{Sembedding} respectively. Moreover, in this case, to prove that $u_{n}$ converges strongly in $\mathbb{X}_{0}$, we use that fact that $K=(-(-\Delta_{\mathbb{R}^{N}})^{s})^{-1}: L^{q'}(\Omega)\rightarrow \mathbb{X}_{0}$ is compact, with $q\in [1, 2^{*}_{s})$; see Section $2.4$ in \cite{MBMS}. \end{proof} \noindent Now, we define the following sets: for fixed $i, k\in \mathbb{N}$ and $R, \varrho >0$, let \begin{align*} &B_{i}(R)= \{u\in H_{i} : \|u\|_{\mathbb{X}_{0}}\leq R\}, \\ &T_{i-1, i}(R) = \{u \in H_{i-1}: \|u\|_{\mathbb{X}_{0}}\leq R\} \cup \{u \in H_{i} : \|u\|_{\mathbb{X}_{0}}=R\}, \\ &S_{k}^{+}(\varrho)= \{u \in H_{k}^{\perp}: \|u\|_{\mathbb{X}_{0}}= \varrho\}, \\ &B_{k}^{+}(\varrho)= \{u\in H_{k}^{\perp}: \|u\|_{\mathbb{X}_{0}}\leq \varrho\}. \end{align*} \begin{lem}\label{lem4} Assume that $(f1)$-$(f3)$ and $(f5)$ hold. Then, for any $\lambda\in (\lambda_{i-1}, \lambda_{i+1})$, there are $R>\varrho>0$ such that $$ 0=\sup I_{\lambda}(T_{i-1, i}(R))<\inf I_{\lambda}(S_{i-1}^{+}(\varrho)). $$ \end{lem} \begin{proof} By using \eqref{7} and the assumption $(f5)$, for any $u\in H_{i-1}$ and $\lambda\in (\lambda_{i-1}, \lambda_{i})$ we have \begin{align}\label{19} I_{\lambda}(u)&= \frac{1}{2} \iint_{\mathbb{R}^{2N}} \frac{|u(x)- u(y)|^{2}}{|x-y|^{N+2s}}\, dxdy - \frac{\lambda}{2} \int_{\Omega} |u|^{2} dx - \int_{\Omega} F(x, u)\, dx \nonumber \\ &\leq \frac{\lambda_{i-1}- \lambda}{2\lambda_{i-1}}\|u\|_{\mathbb{X}_{0}}^{2} \leq 0. \end{align} Recalling $(f3)$ and \eqref{7}, for any $u\in H_{i}$ and $\lambda\in (\lambda_{i-1}, \lambda_{i})$ we get \begin{align}\label{tv7} I_{\lambda}(u)&= \frac{1}{2} \iint_{\mathbb{R}^{2N}} \frac{|u(x)- u(y)|^{2}}{|x-y|^{N+2s}}\, dxdy - \frac{\lambda}{2} \int_{\Omega} |u|^{2} dx - \int_{\Omega} F(x, u)\, dx \nonumber \\ &\leq \frac{\lambda_{i}- \lambda}{2\lambda_{i}} \|u\|_{\mathbb{X}_{0}}^{2} - \frac{c_{6}}{2} \|u\|_{L^{2}(\Omega)}^{2} + M_{1}|\Omega| \nonumber \\ & \leq \frac{\lambda_{i}- \lambda- c_{6}}{2\lambda_{i}} \|u\|_{\mathbb{X}_{0}}^{2} + M_{1}|\Omega|. \end{align} Taking $c_{6}= 2(\lambda_{i}- \lambda)$, from \eqref{tv7} we deduce that \begin{align}\label{20} I_{\lambda}(u)\rightarrow - \infty \mbox{ as } \|u\|_{\mathbb{X}_{0}}\rightarrow \infty. \end{align} By exploiting $(f1)$ and $(f2)$, we can see that for any $u\in H_{i-1}^{\perp}$ \begin{align}\label{22} I_{\lambda}(u)&= \frac{1}{2} \iint_{\mathbb{R}^{2N}} \frac{|u(x)- u(y)|^{2}}{|x-y|^{N+2s}}\, dxdy - \frac{\lambda}{2} \int_{\Omega} |u|^{2} dx - \int_{\Omega} F(x, u)\, dx \nonumber \\ &\geq \frac{\lambda_{i}- \lambda- \e}{2 \lambda_{i}} \|u\|_{\mathbb{X}_{0}}^{2} - CC_{\e} \|u\|_{\mathbb{X}_{0}}^{p+1}. \end{align} Choosing $\e= \frac{\lambda_{i}- \lambda}{2}>0$, and by using $\lambda \in (\lambda_{i-1}, \lambda_{i})$, $p+1>2$, \eqref{19}, \eqref{20} and \eqref{22}, we can deduce that there exist $R>\varrho >0$ such that \begin{align*} \sup I_{\lambda} (T_{i-1, i}(R))< \inf I_{\lambda} (S_{i-1}^{+}(\varrho)). \end{align*} \end{proof} \noindent The next result can be obtained following the proof of Lemma \ref{lem5s}. \begin{lem}\label{lem5} Assume that $(f5)$ holds. Then, for $R>0$ in Lemma \ref{lem4} and for any $\e>0$ there exists $\delta'_{i}>0$ such that for any $\lambda\in (\lambda_{i}-\delta'_{i}, \lambda_{i})$ it holds $$ \sup I_{\lambda}(B_{i}(R))<\e. $$ \end{lem} \begin{lem}\label{lem6} Assume that $(f1)$ and $(f4)$ hold. Then $I_{\lambda}$ verifies the Palais-Smale condition. \end{lem} \begin{proof} Let $\{u_{n}\}_{n\in \mathbb{N}}$ be a Palais-Smale sequence of $I_{\lambda}$. We have only to show that \begin{equation}\label{tv8} \frac{\|Pu_{n}\|_{\mathbb{X}_{0}}}{\|u_{n}\|_{\mathbb{X}_{0}}}\rightarrow 0 \mbox{ as } n\rightarrow +\infty. \end{equation} By using $(f4)$ and the equivalence of the norms on the finite-dimensional space, we get \begin{align}\label{tv9} 2I_{\lambda}(u_{n})-\langle I'_{\lambda}(u_{n}), u_{n}\rangle&=\int_{\Omega} (f(x, u_{n})u_{n}-2F(x, u_{n})) \, dx \nonumber\\ &\geq \int_{\Omega} (c_{7}|u_{n}|-c_{8})\, dx \nonumber\\ &\geq \int_{\Omega} (c_{7}|Pu_{n}|-c_{7}|v_{n}|-c_{7}|w_{n}|-c_{8})\, dx \nonumber\\ &\geq c_{9}\|Pu_{n}\|_{\mathbb{X}_{0}}-c_{10}(\|v_{n}\|_{\mathbb{X}_{0}}+\|w_{n}\|_{\mathbb{X}_{0}}+1). \end{align} Putting together \eqref{16} and \eqref{tv9}, we can deduce that \eqref{tv8} holds. \end{proof} \begin{proof}[Proof of Theorem \ref{thm1}] In view of Lemma \ref{lem3}, Lemma \ref{lem4} and Lemma \ref{lem5}, we can take $$ a\in (0, \inf I_{\lambda}(S^{+}_{i-1}(\varrho))) \mbox{ and } b>\sup I_{\lambda}(B_{i}(R)) $$ such that $0<a<b<\e_{0}$. Thus the condition $(\nabla)(I_{\lambda}, H_{i-1}\oplus H_{i}^{\perp}, a, b)$ holds. By using Lemma \ref{lem6} and Theorem \ref{MS}, we can find two critical points $u_{1}, u_{2}\in \mathbb{X}_{0}$ such that $I_{\lambda}(u_{i})\in [a, b]$ for $i=1, 2$. The existence of a third critical point will be obtained by applying the Linking Theorem. We prove that there are $\delta''_{i}>0$ and $R_{1}>\varrho_{1}>0$ such that for any $\lambda\in (\lambda_{i}-\delta''_{i}, \lambda_{i})$ it results \begin{equation}\label{23} \sup I_{\lambda}(T_{i, i+1}(R_{1}))<\inf I_{\lambda}(S^{+}(\varrho_{1})). \end{equation} By using \eqref{8}, $(f1)$, and $(f2)$ we get \begin{align}\label{tv10} I_{\lambda}(u)&=\frac{1}{2} \iint_{\mathbb{R}^{2N}} \frac{|u(x)-u(y)|^{2}}{|x-y|^{N+2s}} \, dx dy-\frac{\lambda}{2} \int_{\Omega} u^{2} \, dx-\int_{\Omega} F(x, u)\, dx \nonumber \\ &\geq \frac{\lambda_{i+1}-\lambda-\e}{2\lambda_{i+1}} \|u\|_{\mathbb{X}_{0}}^{2}-CC_{\e}\|u\|_{\mathbb{X}_{0}}^{p+1} \mbox{ for any } u\in H_{i}^{\perp}. \end{align} Then, taking $\e=\frac{\lambda_{i+1}-\lambda}{2}$, and recalling that $p>1$, from \eqref{tv10} it follows that there are $\varrho_{1}>0$ and $\alpha>0$ such that \begin{equation}\label{24} \inf I_{\lambda}(S^{+}_{i}(\varrho_{1}))\geq \alpha>0. \end{equation} On the other hand, by using \eqref{7} and $(f5)$, we deduce that \begin{align}\label{tv11} I_{\lambda}(u)&=\frac{1}{2} \iint_{\mathbb{R}^{2N}} \frac{|u(x)-u(y)|^{2}}{|x-y|^{N+2s}} \, dx dy-\frac{\lambda}{2} \int_{\Omega} u^{2} \, dx-\int_{\Omega} F(x, u)\, dx \nonumber \\ &\leq \frac{\lambda_{i}-\lambda}{2\lambda_{i}} \|u\|_{\mathbb{X}_{0}}^{2} \mbox{ for any } u\in H_{i}. \end{align} Therefore \eqref{tv11} implies that there exist $\delta''_{i}>0$ and $R_{1}>0$ such that for any $\lambda\in (\lambda_{i}-\delta''_{i}, \lambda_{i})$ we get \begin{equation}\label{25} I_{\lambda}(u)< \alpha \mbox{ for any } \|u\|_{\mathbb{X}_{0}}\leq R_{1}. \end{equation} Thus by using \eqref{7} and $(f5)$, we can see that for any $u\in H_{i+1}$ and $\lambda\in (\lambda_{i}-\delta''_{i}, \lambda_{i})$, we have \begin{align}\label{26} I_{\lambda}(u)&=\frac{1}{2} \iint_{\mathbb{R}^{2N}} \frac{|u(x)-u(y)|^{2}}{|x-y|^{N+2s}} \, dx dy-\frac{\lambda}{2} \int_{\Omega} u^{2} \, dx-\int_{\Omega} F(x, u)\, dx \nonumber \\ &\leq \frac{\lambda_{i+1}-\lambda}{2\lambda_{i+1}} \|u\|_{\mathbb{X}_{0}}^{2}. \end{align} Putting together \eqref{24}, \eqref{25} and \eqref{26} we can deduce that \eqref{23} is verified. By applying the Linking Theorem, we can find a third critical point $u_{3}\in \mathbb{X}_{0}$ of $I_{\lambda}$ such that $I_{\lambda}(u)\geq \inf I_{\lambda}(S^{+}_{i}(\varrho_{1}))$. Choosing $\delta_{i}=\min\{ \delta'_{i}, \delta''_{i} \}$, where $\delta'_{i}$ is given in Lemma \ref{lem5}, we can conclude that Theorem \ref{thm1} holds. \end{proof} \noindent {\bf Acknowledgements.} The author warmly thanks the anonymous referee for her/his useful and nice comments on the paper. The manuscript was carried out under the auspices of the INDAM - Gnampa Project 2017 titled:{\it Teoria e modelli per problemi non locali}. \end{document}
\begin{document} \begin{abstract} The main problem considered in the present paper is to single out classes of convex sets, whose convexity property is preserved under nonlinear smooth transformations. Extending an approach due to B.T. Polyak, the present study focusses on the class of uniformly convex subsets of Banach spaces. As a main result, a quantitative condition linking the modulus of convexity of such kind of set, the regularity behaviour around a point of a nonlinear mapping and the Lipschitz continuity of its derivative is established, which ensures the images of uniformly convex sets to remain uniformly convex. Applications of the resulting convexity principle to the existence of solutions, their characterization and to the Lagrangian duality theory in constrained nonconvex optimization are then discussed. \end{abstract} \maketitle \section{Introduction} In many fields of mathematics, persistence phenomena of specific geometrical properties under various kind of transformations have been often a subject of interest and study. Transformations, when possible formalized by mappings acting among spaces, sometimes have been classified on the basis of features in a structure that they can preserve (whence the very term ``morphism''). Convexity is a geometrical property which emerged in ancient times, at the very beginning of geometry, and since then remained essentially unchanged for almost two millennia and half. This happened by virtue of the great variety of successful applications that it found in many different areas. In particular, the relevant role played by convexity in optimization and control theory is widely recognized. This led to develop a branch of mathematics, called convex analysis, that elected convexity as its main topic of study. In spite of such an interest and motivations, not much seems to be known up to now about phenomena of persistence of convexity under nonlinear transformations. Yet, advances in this direction would have a certain impact on the analysis of optimization problems. Historically, the first results somehow connected with the issue at the study relate to the numerical range of quadratic mappings (namely, mappings whose components are quadratic forms) and can be found in \cite{Dine41} (see also \cite{Poly98}). A notable step ahead was made when the preservation of convexity of small balls under smooth regular transformations between Hilbert spaces was established by B.T. Polyak (see \cite{Poly01}). After that, some other contributions to understanding the phenomenon in a similar context were given by \cite{BacSam09,BoEmKo04,Dyma16,Reis07}. Various applications of it to topics in linear algebra, optimization and control theory are presented in \cite{Poly98,Poly01,Poly01b,Poly03,Reis07} In the present paper, by following the approach introduced by B.T. Polyak, the study of classes of sets with persistent convexity properties is carried on. More precisely, the analysis here proposed focusses on the class of uniformly convex subsets of certain Banach spaces. An interest in similar classes of sets, in connection with the problem under study, appears already in \cite{Poly01}, where strongly convex sets are actually mentioned. This seems to be rather natural, inasmuch as elements of such classes share the essential geometrical features of balls in a Hilbert space: nonempty interior, boundedness and, what plays a crucial role, a uniform rotundity, which implies a boundary consisting of extreme points only. The feature last mentioned is captured and quantitatively expressed by the notion of modulus of convexity of a set. In developing the Polyak's approach, the main idea behind the investigations exposed in the paper is that, if the modulus of convexity of a given set matches the smoothness and the regularity property of a given nonlinear mapping, then the persistence of convexity under that mapping can be guaranteed. The understanding of such a fundamental relation between quantitative aspects of the convexity property for a set and the quantitative regularity behaviour of a mapping acting on it should shed light on the general phenomenon under study. Concretely, this leads to enrich the class of sets interested by the phenomenon. In turn, since the persistence of convexity under nonlinear transformations is at the origin of a certain qualification (in terms of solution existence and characterization) observed in optimization problems with possibly nonconvex data, the result here established allows one to enlarge the class of problems for which the consequent benefits can be expected. The contents of the paper are arranged in the next sections as follows. In Section \ref{Sec:2}, the notion of modulus of convexity of a set and of uniformly convexity are recalled, along with several examples and related facts, useful for the subsequent analysis. Besides, the regularity behaviour of a nonlinear smooth mapping, namely its openness at a linear rate, is entered as a crucial tool, along with the related exact bound. In Section \ref{Sec:3}, the main result of the paper, which is an extension of the aforementioned convexity principle due to B.T. Polyak, is established and some of its features are discussed. In Section \ref{Sec:4}, some applications of the main result to nonconvex constrained optimization problems are provided. \section{Notations and preliminaries} \label{Sec:2} The basic notations in use throughout the paper are as follows. $\mathbb R$ denotes the real number set. Given a metric space $(X,d)$, an element $x_0\in X$ and $r\ge 0$, $\ball{x_0}{r}=\{x\in X:\ d(x,x_0) \le r\}$ denotes the (closed) ball with center $x_0$ and radius $r$. In particular, in a Banach space, the unit ball centered at the null vector will be indicated by $\mathbb B$, whereas the unit sphere by $\mathbb S$. The distance of $x_0\in X$ from a set $S\subseteq X$ is denoted by $\dist{x_0}{S}$. If $S\subseteq X$, $\ball{S}{r}= \{x\in X:\ \dist{x}{S}\le r\}$ denotes the (closed) $r$-enlargement of $S$. The diameter of a set $S\subseteq X$ is defined as ${\rm diam}\, S=\sup\{d(x_1,x_2):\ x_1,\, x_2\in S\}$. By ${\rm int}\, S$, ${\rm cl}\, S$ and ${\rm bd}\, S$ the topological interior, the closure and the boundary of a set $S$ are marked, respectively. If $S$ is a subset of a Banach space $(\mathbb X,\|\cdot\|)$, ${\rm ext}\, S$ denotes the set of all extreme points of $S$, in the sense of convex analysis, $\mathbf{0}$ stands for the null element of $\mathbb X$ and $[x_1,x_2]$ denotes the closed line segment with endpoints $x_1,\, x_2\in\mathbb X$. Given a function $h:X\longrightarrow Y$ between metric spaces and a set $U\subseteq X$, $h$ is said to be Lipschitz continuous on $U$ if there exists a constant $\ell>0$ such that \begin{equation} \label{in:defLip} d(h(x_1),h(x_2))\le\ell d(x_1,x_2),\quad \forall x_1,\, x_2\in U. \end{equation} The infimum over all values $\ell$ making the last inequality satisfied on $U$ is called exact bound of Lipschitz continuity of $h$ on $U$ and is denoted by $\lip{h}{U}$, i.e. $$ \lip{h}{U}=\inf\{\ell\ge 0:\ \hbox{ inequality $(\ref{in:defLip})$ holds}\}. $$ The Banach space of all bounded linear operators between the Banach spaces $\mathbb X$ and $\mathbb Y$, equipped with the operator norm, is denoted by $(\mathcal{L}(\mathbb X,\mathbb Y),\|\cdot\|_\mathcal{L})$. If, in particular, it is $\mathbb Y=\mathbb R$, the simpler notation $(\mathbb X^*,\|\cdot\|_*)$ is used. The null vector in a dual space is marked by $\mathbf{0}^*$, whereas the unit sphere by $\mathbb S^*$, with $\langle\cdot,\cdot\rangle$ marking the duality pairing a space and its dual. Given a mapping $f:\Omega\longrightarrow\mathbb Y$, with $\Omega$ open subset of $\mathbb X$, and $x_0\in\Omega$, the Gat\^eaux derivative of $f$ at $x_0$ is denoted by $\der{f}{x_0}$. If $f$ is Gat\^eaux differentiable at each point of $\Omega$ and the mapping $\dif{f}:\Omega\longrightarrow \mathcal{L}(\mathbb X,\mathbb Y)$ is Lipschitz continuous on $\Omega$, $f$ is said to be of class ${\rm C}^{1,1}(\Omega)$. \begin{remark} \label{rem:smoothfacts} (i) In view of a subsequent employment, let us recall that, whenever $f:\Omega\longrightarrow\mathbb Y$ is a mapping of class ${\rm C}^{1,1}(\Omega)$ between Banach spaces, with $\Omega$ open subset of $\mathbb X$ and $x_1,\, x_2\in\Omega$ are such that $[x_1,x_2]\subseteq\Omega$, the following estimate holds true (see, for instance, \cite[Lemma 2.7]{Uder13}) \begin{equation} \label{in:2ndordest} \left\|{f(x_1)+f(x_2)\over 2}-f\left({x_1+x_2\over 2}\right) \right\|\le {\lip{\dif{f}}{\Omega}\over 8}\|x_1-x_2\|^2, \end{equation} where $\lip{\dif{f}}{\Omega}$ denotes the exact bound of Lipschitz continuity of $\dif{f}$ on $\Omega$. (ii) It is not difficult to see that, if $S\subseteq\Omega$ is a bounded set, i.e. ${\rm diam}\, S<+\infty$, and $f\in{\rm C}^{1,1} (\Omega)$, then it must be $$ \sup_{x\in S}\|\der{f}{x}\|_\mathcal{L}<+\infty. $$ Furthermore, if in addition $S$ is convex, then letting $\beta_S=\sup_{x\in S}\|\der{f}{x} \|_\mathcal{L}$, as an immediate consequence of the mean-value theorem, one obtains $$ {\rm diam}\, f(S)\le\beta_S{\rm diam}\, S, $$ that is $f(S)$ is bounded too. \end{remark} \subsection{Uniformly convex sets} \begin{definition} \label{def:uniconv} (i) Let $S\subseteq\mathbb X$ be a nonempty, closed and convex subset of a real Banach space. The function $\delta_S:[0,{\rm diam}\, S)\longrightarrow [0,+\infty)$ defined by $$ \delta_S(\epsilon)=\sup\left\{\delta\ge 0:\ \ball{{x_1+x_2\over 2}}{\delta} \subseteq S,\ \forall x_1,\, x_2\in S:\ \|x_1-x_2\|=\epsilon\right\} $$ is called {\it modulus of convexity} of the set $S$. Whenever the value of ${\rm diam}\, S$ is attained at some pair $x_1,\, x_2\in S$, the function $\delta_S$ will be meant to be naturally extended to $[0,{\rm diam}\, S]$. (ii) After \cite{Poly66}, a nonempty, closed and convex set $S\subseteq\mathbb X$, with $S\ne\mathbb X$, is said to be {\it uniformly convex} provided that $$ \delta_S(\epsilon)>0,\quad\forall \epsilon\in \left\{ \begin{array}{ll} (0,{\rm diam}\, S], &\quad\hbox{if ${\rm diam}\, S$ is attained on $S$}, \\ (0,{\rm diam}\, S), &\quad\hbox{otherwise.} \end{array} \right. $$ \end{definition} Since ${\rm diam}\, S$ vanishes if $S$ is a singleton, Definition \ref{def:uniconv} (ii) does not exclude such kind of convex sets. Nevertheless, as singletons are of minor interest in connection with the problem at the issue, henceforth a uniformly convex set will be always assumed to contain at least two distinct points. \begin{example} \label{ex:ucset} (i) Balls in a uniformly convex Banach space may be viewed as a paradigma for the notion of uniform convexity for sets. Recall that, after \cite{Clar36}, a Banach space $(\mathbb X,\|\cdot\|)$ is said to be {\it uniformly convex} (or to have a uniformly convex norm) if $$ \delta_{\mathbb X}(\epsilon)=\inf\left\{1-\left\|{x_1+x_2\over 2}\right\|:\ x_1,\, x_2\in\mathbb B,\ \|x_1-x_2\|=\epsilon\right\}>0, \forall\epsilon\in (0,2]. $$ The function $\delta_\mathbb X$ is called modulus of convexity of the space $(\mathbb X,\|\cdot\|)$. In fact, it is possible to prove that $$ \delta_\mathbb B(\epsilon)=\delta_\mathbb X(\epsilon),\quad\forall \epsilon\in (0,2]. $$ Such classes of Banach spaces as $l^p$ and $L^p$, with $1<p<\infty$, are known to consist of uniformly convex spaces. In particular, every Hilbert space is uniformly convex. Since every uniformly convex Banach space must be reflexive (according to the Milman-Pettis Theorem), the spaces $l^1,\, L^1,\, L^\infty,\, C([0,1])$ and $c_0$ fail to be. For $p\ge 2$, the exact expression of the modulus of convexity of the spaces $l^p$ and $L^p$ is given by $$ \delta_{l^p}(\epsilon)=\delta_{L^p}(\epsilon)= 1-\left[1-\left({\epsilon\over 2}\right)^p\right]^{1/p}, \quad\forall\epsilon\in (0,2]. $$ For more details on uniformly convex Banach spaces and properties of their moduli the reader may refer to \cite{Dies75,FaHaHaMoPeZi01,Milm71}. A useful remark enlightening the connection between the notions of uniform convexity for sets and uniform convexity of Banach spaces can be found in \cite[Theorem 2.3]{BalRep09}: a Banach space can contain a closed uniformly convex set iff it admits an equivalent uniformly convex norm. Such class of Banach spaces have been characterized in terms of superreflexivity in \cite{Enfl72}. Throughout the present paper, the Banach space $(\mathbb X,\|\cdot\|)$ will be supposed to be equipped with a uniformly convex norm. (ii) After \cite{Polo94,Polo96}, given a positive real $r$, a subset $S\subseteq\mathbb X$ of a Banach space is said to be {\it $r$-convex} (or {\it strongly convex} of radius $r$) if there exists $M\subseteq\mathbb X$, with $M\ne\mathbb X$, such that $$ S=\bigcap_{x\in M}\ball{x}{r}\ne\varnothing. $$ It is readily seen that, if a Banach space $(\mathbb X,\|\cdot\|)$ is uniformly convex with modulus $\delta_\mathbb X$, then any strongly convex set $S\subseteq\mathbb X$ with radius $r$ is uniformly convex and its modulus of convexity satisfies the relation \begin{eqnarray} \label{in:ucmscs} \delta_S(\epsilon)\ge r\delta_\mathbb X\left({\epsilon\over r}\right), \quad\forall \epsilon\in (0,{\rm diam}\, S). \end{eqnarray} (iii) Let $\theta:[0,+\infty)\longrightarrow [0,+\infty)$ be an increasing function vanishing only at $0$. Recall that, according to \cite{Zali02}, a function $\varphi:\mathbb X\longrightarrow\mathbb R$ is said to be {\it uniformly convex with modulus $\theta$} if it holds \begin{eqnarray*} \varphi(tx_1+(1-t)x_2)\le t\varphi(x_1)+(1-t)\varphi(x_2) -t(1-t)\theta(\|x_1-x_2\|),\\ \forall x_1,\, x_2\in\mathbb X, \,\forall t\in [0,1]. \end{eqnarray*} If, in particular, it is $\theta(s)=\kappa s^2$, a uniformly convex function with such a modulus is called {\it strongly convex}. Sublevel sets of Lipschitz continuous uniformly convex functions are uniformly convex sets. More precisely, given $\alpha>0$, if $\varphi$ is Lipschitz continuous on $\mathbb X$, with exact bound $\lip{\varphi}{\mathbb X}>0$, then the set $[\varphi\le\alpha]=\{x\in\mathbb X: \ \varphi(x)\le\alpha\}$ turns out to be uniformly convex with modulus \begin{eqnarray} \label{in:ucmucfunct} \delta_{[\varphi\le\alpha]}(\epsilon)\ge {\theta(\epsilon)\over 4\lip{\varphi}{\mathbb X}}, \quad\forall\epsilon \in (0,{\rm diam}\, [\varphi\le\alpha]). \end{eqnarray} Indeed, fixed $\epsilon\in (0,{\rm diam}\, [\varphi\le\alpha])$, take $x_1,\, x_2\in [\varphi\le\alpha]$, with $x_1\ne x_2$ and $\|x_1-x_2\|=\epsilon$, and set $\bar x={1\over 2}(x_1+x_2)$. By the uniform convexity of $\varphi$ with modulus $\theta$ one has $$ \varphi(\bar x)\le {\varphi(x_1)+\varphi(x_2)\over 2}- {\theta(\|x_1-x_2\|)\over 4}. $$ Therefore, for an arbitrary $\eta>0$, by the Lipschitz continuity of $\varphi$ on $\mathbb X$, one finds \begin{eqnarray*} \varphi(x)&=& \varphi(x)-\varphi(\bar x)+\varphi(\bar x) \\ &\le& (\lip{\varphi}{\mathbb X}+\eta){\theta(\epsilon)\over 4(\lip{\varphi}{\mathbb X}+\eta)}+\alpha- {\theta(\epsilon)\over 4}\le\alpha, \end{eqnarray*} for every $x\in\ball{\bar x}{{\theta(\epsilon)\over 4 (\lip{\varphi}{\mathbb X}+\eta)}}$. Thus, it results in $$ \ball{\bar x}{{\theta(\epsilon)\over 4 (\lip{\varphi}{\mathbb X}+\eta)}}\subseteq [\varphi\le\alpha], $$ so $$ \delta_{[\varphi\le\alpha]}(\epsilon)\ge{\theta(\epsilon) \over 4(\lip{\varphi}{\mathbb X}+\eta)}. $$ The estimate in $(\ref{in:ucmucfunct})$ follows by arbitrariness of $\eta$. \end{example} It is not difficult to see that, given two subsets $S_1$ and $S_2$ of $\mathbb X$, it is $\delta_{S_1\cap S_2}\ge\min\{\delta_{S_1},\, \delta_{S_2}\}$. Therefore, the class of uniformly convex sets is closed under finite intersection. In contrast, unlike the class of convex sets, this class fails to be closed with respect to the Cartesian product. It is worth noting that, as the intersection of balls may yield a boundary with corners or a nonsmooth description, uniformly convex sets may exhibit such kind of pathology. In the next remark, some known facts about uniformly convex sets are collected, which will be relevant to the subsequent analysis. \begin{remark} \label{rem:strconvfacts} (i) Every uniformly convex set, which does not coincide with the entire space, is bounded (see \cite{BalRep09}). (ii) Directly from Definition \ref{def:uniconv}, it follows that every uniformly convex set has nonempty interior. This fact entails that, while uniformly convex subsets are compact if living in finite-dimensional spaces, they can not be so in infinite-dimensional Banach spaces. (iii) As a consequence of Definition \ref{def:uniconv}, if any uniformly convex set $S$ admits a modulus of convexity of power type $2$, i.e. such that \begin{eqnarray} \label{in:qgc} \delta_S(\epsilon)\ge c\epsilon^2, \quad\forall \epsilon\in (0,{\rm diam}\, S), \end{eqnarray} for some $c>0$, then it fulfils the following property: for every $\tilde c\in (0,c)$ it holds $$ \ball{{x_1+x_2\over 2}}{\tilde c\|x_1-x_2\|^2}\subseteq S, \quad\forall x_1,\, x_2\in S. $$ It is worth noting that this happens for the balls in any Hilbert space or in the Banach spaces $l^p$ and $L^p$, with $1<p<2$, where the following estimate is known to hold $$ \delta_{l^p}(\epsilon)=\delta_{L^p}(\epsilon)> {p-1\over 8}\epsilon^2,\quad\forall\epsilon\in (0,2] $$ (see, for instance, \cite{Milm71}). Such a subclass of uniformly convex sets will play a prominent role in the main result of the paper. (iv) For every uniformly convex set $S$, a constant $\beta>0$ can be proved to exist such that $$ \delta_S(\epsilon)\le \beta\epsilon^2,\quad\forall \epsilon\in (0,{\rm diam}\, S) $$ (see \cite{BalRep09}). Thus, a modulus of convexity of the power $2$ is a maximal one. \end{remark} The next proposition provides a complete characterization of uniform convexity for subsets of a finite-dimensional Euclidean space in terms of extremality of their boundary points. Below, a variational proof of this fact is provided. \begin{proposition} \label{pro:ucchar} A convex compact subset $S\subseteq\mathbb R^n$, with nonempty interior, is uniformly convex iff ${\rm ext}\, S={\rm bd}\, S$. \end{proposition} \begin{proof} Observe that by compactness of $S$, it is ${\rm bd}\, S\ne\varnothing$. Actually, the Krein-Milman theorem ensures that ${\rm ext}\, S\ne\varnothing$ also. Clearly, it is ${\rm ext}\, S\subseteq{\rm bd}\, S$. To begin with, assume that $S$ is uniformly convex. Take any $\bar x\in{\rm bd}\, S$. If it were $\bar x\not\in{\rm ext}\, S$, then there would exist $x_1,\, x_2\in S\backslash\{\bar x\}$, with $x_1\ne x_2$, such that $\bar x={x_1+x_2\over 2}$. Observe that, as $\bar x \in{\rm bd}\, S$, the inclusion $\ball{\bar x}{\delta}\subseteq S$ can be true only for $\delta=0$. Thus $\delta_S (\|x_1-x_2\|) =0$, contradicting the fact that $S$ is uniformy convex. Conversely, assume that the equality ${\rm ext}\, S={\rm bd}\, S$ holds true. Fix an arbitrary $\epsilon\in (0,{\rm diam}\, S]$ (under the current hypotheses the value ${\rm diam}\, S$ is attained on $S$). Notice that, since $S$ is compact, the set $$ S^2_\epsilon=\{(x_1,x_2)\in S\times S:\ \|x_1-x_2\|=\epsilon\} $$ is still compact. Define the function $\vartheta:\mathbb R^n\times\mathbb R^n \longrightarrow [0,+\infty)$ by setting $$ \vartheta(x_1,x_2)=\dist{{x_1+x_2\over 2}}{\mathbb R^n\backslash{\rm int}\, S}. $$ Since such a function is continuous on $\mathbb R^n\times\mathbb R^n$, it attains its global minimum over $S^2_\epsilon$ at some point $(\hat x_1,\hat x_2)\in S^2_\epsilon$, with $\hat x_1\ne\hat x_2$ as $\|\hat x_1-\hat x_2\|=\epsilon$. If it were $\vartheta(\hat x_1, \hat x_2)=0$, then it would happen that $$ {\hat x_1+\hat x_2\over 2}\in{\rm bd}\, S. $$ The last inclusion contradicts the fact that ${\hat x_1+\hat x_2\over 2}$ is an extreme point of $S$. Therefore, one deduces that $\vartheta (\hat x_1,\hat x_2)>0$. As it is true that $$ \delta_S(\epsilon)=\min_{(x_1,x_2)\in S^2_\epsilon} \vartheta(x_1,x_2)>0, $$ the requirement in Definition \ref{def:uniconv} (ii) turns out to be satisfied. The arbitrariness of $\epsilon\in (0,{\rm diam}\, S]$ completes the proof. \end{proof} Proposition \ref{pro:ucchar} can not be extended to infinite-dimensional spaces, where balls with ${\rm ext}\, \mathbb B={\rm bd}\, \mathbb B$ can exist, yet failing to be uniformly convex (see \cite{Dies75}). \subsection{Openness at a linear rate} In the next definition, some notions and related results are recalled, which describe quantitatively a certain surjective behaviour of a mapping. Such a local property, in a synergical interplay with other features (${\rm C}^{1,1}$-smoothness and uniform convexity) of the involved objects, allows one to achieve the main result in the paper. \begin{definition} \label{def:lopmap} Let $f:X\longrightarrow Y$ be a mapping between two metric spaces and $x_0\in X$. The mapping $f$ is said to be {\it open at a linear rate around} $x_0$ if there exist positive reals $\delta$, $\zeta$ and $\sigma$ such that \begin{equation} \label{in:pointlop} f(\ball{x}{r})\supseteq\ball{f(x)}{\sigma r}\cap \ball{f(x_0)}{\zeta}, \quad\forall x\in\ball{x_0}{\delta}, \ \forall r\in [0,\delta]. \end{equation} \end{definition} The role of a surjection property in preserving convexity of sets should not come as a surprise: the convexity of the image requires indeed line segments joining points in the image of a set to belong to the image, that is a certain openness/covering behaviour of the reference mapping. It is well known (see, for instance, \cite{DonRoc14,Ioff16,Mord06}) that the property of openness at a linear rate for a mapping $f$ around $x_0$ can be equivalently reformulated as follows: there exist positive reals $\delta$ and $\kappa$ such that \begin{equation} \label{in:pointmr} \dist{x}{f^{-1}(y)}\le\kappa d(y,f(x)),\quad\forall x\in \ball{x_0}{\delta},\ \forall y\in\ball{f(x_0)}{\delta}. \end{equation} Whenever the inequality $(\ref{in:pointmr})$ holds, $f$ is said to be {\it metrically regular} around $x_0$. The infimum over all values $\kappa$ for which there exists $\delta>0$ such that $(\ref{in:pointmr})$ holds true is called {\it exact regularity bound} of $f$ around $x_0$ and it will be denoted by $\reg{f}{x_0}$, with the convention that $\reg{f}{x_0}=+\infty$ means that $f$ fails to be metrically regular around $x_0$. \begin{remark} \label{rem:pointlopsimpl} (i) It is convenient to note that, whenever $f$ is continuous at $x_0$, the inclusion defining the openness of $f$ at a linear rate around $x_0$ takes the simpler form: there exists positive $\delta$ and $\sigma$ such that \begin{equation} \label{in:pointlopsimpl} f(\ball{x}{r})\supseteq\ball{f(x)}{\sigma r}, \quad\forall x\in\ball{x_0}{\delta}, \ \forall r\in [0,\delta]. \end{equation} (ii) From the inclusion $(\ref{in:pointlopsimpl})$ it is clear that, whenever a mapping $f$ is open at a linear rate around $x_0$ and continuous at the same point, it holds \begin{equation} \label{in:intelop} f({\rm int}\, S)\subseteq{\rm int}\, f(S), \end{equation} provided that $S\subseteq\ball{x}{\delta}$, where $\delta$ is as above. Indeed, if it is $x\in{\rm int}\, S$, then for some $r\in (0,\delta)$ it must be $\ball{x}{r}\subseteq S$. Therefore, one gets $$ \ball{f(x)}{\sigma r}\subseteq f(\ball{x}{r})\subseteq f(S). $$ In turn, from the inclusion $(\ref{in:intelop})$, one deduces $$ f^{-1}(y)\cap S\subseteq{\rm bd}\, S,\quad\forall y\in{\rm bd}\, f(S). $$ \end{remark} As the behaviour formalized by openness at a linear rate/metric regularity plays a crucial role in a variety of topics in variational analysis, it has been widely investigated in the past decades and several criteria for detecting the occurrence of it are now at disposal. In the case of smooth mappings between Banach spaces, the main criterion for openness at a linear rate/metric regularity, known under the name of Lyusternik-Graves theorem, can be stated as follows (see \cite{DonRoc14,Ioff16,Mord06}). \begin{theorem}[Lyusternik-Graves] Let $f:\mathbb X\longrightarrow\mathbb Y$ be a mapping between Banach spaces. Suppose that $f$ is strictly differentiable at $x_0\in\mathbb X$. Then, $f$ is open at a linear rate around $x_0$ iff $\der{f}{x_0}$ is onto, i.e. $\der{f}{x_0}(\mathbb X)=\mathbb Y$. \end{theorem} The above criterion is usually complemented with the following (primal and dual) estimates of the exact regularity bound, which are relevant for the present analysis: $$ \reg{f}{x_0}=\sup_{\|y\|\le 1}\inf\{\|x\|:\ x\in \der{f}{x_0}^{-1}(y)\} $$ and $$ \reg{f}{x_0}=\left(\inf_{\|y^*\|_*=1}\|\der{f}{x_0}^*y^*\|_* \right)^{-1}=\left(\dist{\mathbf{0}^*}{\der{f}{x_0}^*(\mathbb S^*)} \right)^{-1}, $$ where $\Lambda^*\in\mathcal{L}(\mathbb Y^*,\mathbb X^*)$ denotes the adjoint operator to $\Lambda\in\mathcal{L}(\mathbb X,\mathbb Y)$ and the conventions $$ \inf\varnothing=+\infty \qquad\hbox{ and }\qquad 1/0=+\infty $$ are adopted. Remember that $\Lambda\in\mathcal{L}(\mathbb X,\mathbb Y)$ is onto iff $\Lambda^*$ has bounded inverse. It is worth noting that, when both $\mathbb X$ and $\mathbb Y$ are finite-dimensional Euclidean spaces, the condition on $\der{f}{x_0}$ to be onto reduces to the fact that Jacobian matrix of $f$ at $x_0$ is full-rank. Furthermore, whenever $\der{f}{x_0}$ happens to be invertible, one has $\reg{f}{x_0}=\|\der{f}{x_0}^{-1}\|_\mathcal{L}$. \section{An extension of the Polyak convexity principle} \label{Sec:3} Given $c>0$, let us introduce the following subclasses of uniformly convex subsets of $\mathbb X$, with modulus of convexity of power type $2$: $$ {\mathcal UC}^2_c(\mathbb X)=\{S\subseteq\mathbb X:\ \delta_S(\epsilon)\ge c\epsilon^2, \ \forall\epsilon\in (0,{\rm diam}\, S)\} $$ and $$ {\mathcal UC}^2(\mathbb X)=\bigcup_{c>0}{\mathcal UC}^2_c(\mathbb X). $$ \begin{remark} \label{rem:midpointconv} In the proof of the next theorem the following fact, which can be easily proved by an iterative bisection procedure, will be used: any closed subset $V$ of a Banach space is convex iff ${y_1+y_2 \over 2}\in V$, whenever $y_1,\, y_2\in V$. It is easy to see that if $V$ is not closed, this mid-point property does not imply the convexity of $V$. Consider, for instance, the set $V$ defined by $$ V=\bigcup_{k=0}^\infty\left\{{i\over 2^k}:\ i\in \{0,1,2,3,\dots, 2^k\}\right\}\subseteq [0,1]. $$ Since $V$ is countable, as a countable union of finite sets, it is strictly included in $[0,1]$. Therefore $V$ can not be convex, because it contains $0$ and $1$, even though it has the mid-point property, as one checks without difficulty. \end{remark} Below, the main result of the paper is established. \begin{theorem} \label{thm:extPCP} Let $f:\Omega\longrightarrow\mathbb Y$ be a mapping between Banach spaces, with $\Omega$ open nonempty subset of $\mathbb X$. Let $x_0\in\Omega$ and $c>0$ such that: \begin{itemize} \item[(i)] $f\in{\rm C}^{1,1}({\rm int}\, \ball{x_0}{r_0})$, for some $r_0>0$; \item[(ii)] $\der{f}{x_0}$ is onto; \item[(iii)] it holds $$ {\reg{f}{x_0}\cdot\lip{\dif{f}}{{\rm int}\, \ball{x_0}{r_0}}\over 8} <c. $$ \end{itemize} Then, there exists $\rho\in (0,r_0)$ such that, for every $S\in {\mathcal UC}^2_c(\mathbb X)$, with $S\subseteq\ball{x_0}{\rho}$ and $f(S)$ closed, it is $f(S)\in{\mathcal UC}^2(\mathbb Y)$. \end{theorem} \begin{proof} The proof is divided into two parts. \noindent {\it First part}: Let us show that $f(S)$ is convex. According to the hypothesis (iii), it is possible to fix positive reals $\kappa$ and $\ell$ in such a way that $\kappa>\reg{f}{x_0}$, $\ell>\lip{\dif{f}}{{\rm int}\, \ball{x_0}{r_0}}$, and the following inequality is fulfilled \begin{equation} \label{in:klc} {\kappa\ell\over 8}<c. \end{equation} By virtue of hypotheses (i) and (ii), as $f$ is in particular strictly differentiable at $x_0$, it is possible to invoke the Lyusternik-Graves theorem, ensuring that $f$ is metrically regular around $x_0$. This means that there exist positive reals $\tilde\kappa$ and $\tilde r$ such that $$ \reg{f}{x_0}<\tilde\kappa<\kappa, \qquad\qquad \tilde r\in (0,r_0), $$ and \begin{equation} \label{in:mrfx0} \dist{x}{f^{-1}(y)}\le\tilde\kappa\|y-f(x)\|, \quad\forall x\in\ball{x_0}{\tilde r},\ \forall y\in\ball{f(x_0)}{\tilde r}. \end{equation} Besides, by the continuity of $f$ at $x_0$, corresponding to $\tilde r$ there exists $r_*\in (0,r_0)$ such that $$ f(x)\in\ball{f(x_0)}{\tilde r},\quad\forall x\in \ball{x_0}{r_*}. $$ Then, take $\rho\in (0,\min\{\tilde r,\, r_*\})$. Notice that, in the light of Remark \ref{rem:pointlopsimpl}, up to a further reduction in the value of $\rho$, one can assume that for some $\sigma>0$ it holds \begin{equation} \label{in:lopx0rho} f(\ball{x}{r})\supseteq\ball{f(x)}{\sigma r},\quad\forall x\in\ball{x_0}{\rho},\ \forall r\in [0,\rho]. \end{equation} Now, take an arbitrary element $S\in{\mathcal UC}^2_c(\mathbb X)$, with $S\subseteq\ball{x_0}{\rho}$ and such that $f(S)$ is closed. According to Remark \ref{rem:midpointconv}, the convexity of $f(S)$ can be proved by showing that for every $y_1,\, y_2\in f(S)$, with $y_1\ne y_2$, it holds ${y_1+y_2\over 2}\in f(S)$. To this aim, let $x_1,\, x_2 \in S$ be such that $y_1=f(x_1)$ and $y_2=f(x_2)$. For convenience, set $$ \bar x={x_1+x_2\over 2}\qquad\hbox{ and }\qquad \bar y={y_1+y_2\over 2} $$ Notice that, as $y_1\ne y_2$, it must be also $x_1\ne x_2$. Moreover, as $S\subseteq\ball{x_0}{\rho}\subseteq \ball{x_0}{r_*}$, one has $y_1,\, y_2\in\ball{f(x_0)}{\tilde r}$ and therefore, by the convexity of a ball, one has also $\bar y\in\ball{f(x_0)}{\tilde r}$. Thus, since $\bar x\in\ball{x_0}{\tilde r}$ and $y\in \ball{f(x_0)}{\tilde r}$, then inequality $(\ref{in:mrfx0})$ implies \begin{equation} \label{in:mrfbarx} \dist{\bar x}{f^{-1}(\bar y)}\le\tilde\kappa \|\bar y-f(\bar x)\|. \end{equation} If $\bar y=f(\bar x)$ the proof of the convexity of $f(S)$ is complete, because $\bar x\in S$. Otherwise, it happens that $\|\bar y-f(\bar x)\|>0$, so the inequality $(\ref{in:mrfbarx})$ entails the existence of $\hat x \in f^{-1}(\bar y)$ such that $$ \|\hat x-\bar x\|<\kappa\|\bar y-f(\bar x)\|. $$ By taking account of the estimate $(\ref{in:2ndordest})$ in Remark \ref{rem:smoothfacts} (i), as it is $[x_1,x_2]\in\ball{x_0}{\rho}\subseteq{\rm int}\, \ball{x_0}{r_0}$, one consequently obtains $$ \|\hat x-\bar x\|<\kappa{\ell\over 8}\|x_1-x_2\|^2, $$ that is $\hat x\in\ball{\bar x}{{\kappa\ell\over 8}\|x_1-x_2\|^2}$. Since $S\in{\mathcal UC}^2_c(\mathbb X)$ and the inequality $(\ref{in:klc})$ is in force, in the light of what observed in Remark \ref{rem:strconvfacts} (iii) it follows $$ \ball{\bar x}{{\kappa\ell\over 8}\|x_1-x_2\|^2}\subseteq S, $$ with the consequence that $\hat x\in S$ and hence $\bar y=f(\hat x)$ turns out to belong to $f(S)$. \noindent {\it Second part}: Let us prove now the assertion in the thesis. According to what noted in Remark \ref{rem:smoothfacts} (ii), under the above hypotheses $f(S)$ is bounded. Fix $\epsilon\in (0,{\rm diam}\, f(S))$ and take arbitrary $y_1,\, y_2\in f(S)$, with $\|y_1-y_2\|=\epsilon$. Let $\bar y,\, x_1,\, x_2,\, \bar x$ and $\hat x$ be as in the first part of the proof (it may happen that $\hat x=\bar x$). In order to prove that $f(S)\in{\mathcal UC}^2(\mathbb Y)$, it is to be shown that, independently of $y_1,\, y_2\in f(S)$ and $\epsilon$, there exists $\gamma>0$ such that $\ball{\bar y}{\gamma\epsilon^2} \subseteq f(S)$. Again recalling Remark \ref{rem:smoothfacts} (ii), it is possible to define the positive real value $$ \beta=\sup_{x\in S}\|\der{f}{x}\|_\mathcal{L}+1<+\infty. $$ By virtue of inequality $(\ref{in:klc})$, it is possible to pick $\eta\in (0,c-{\kappa\ell\over 8})$ in such a way that $$ \hat x\in\ball{\bar x}{{\kappa\ell\over 8}\|x_1-x_2\|^2} \subseteq \ball{\bar x}{\left({\kappa\ell\over 8}+\eta\right) \|x_1-x_2\|^2}\subseteq S. $$ From the last chain of inclusions, it readily follows that $$ \ball{\hat x}{\eta\|x_1-x_2\|^2}\subseteq S. $$ Since, by the mean-value theorem, it is $$ \|y_1-y_2\|\le\beta\|x_1-x_2\|, $$ one obtains $$ \epsilon^2=\|y_1-y_2\|^2\le\beta^2\|x_1-x_2\|^2, $$ and hence $\ball{\hat x}{\eta\epsilon^2/\beta^2} \subseteq S$. Now, recall that $f$ is open at a linear rate around $x_0$. Accordingly, as $S\subseteq\ball{x_0}{\rho}$, up to a further reduction in the value of $\eta>0$ in such a way that $\eta{\rm diam}\, ^2 f(S)/\beta^2<\rho$, one finds $$ \ball{\bar y}{\sigma\eta{\epsilon^2\over\beta^2}}\subseteq f\left(\ball{\hat x}{\eta{\epsilon^2\over\beta^2}}\right) \subseteq f(S) $$ (remember the inclusion $(\ref{in:lopx0rho})$). Thus, since by construction $\sigma$, $\eta$ and $\beta$ are independent of $y_1,\, y_2$ and $\epsilon$, one can conclude that $$ \delta_{f(S)}(\epsilon)\ge{\sigma\eta\over\beta^2}\epsilon^2. $$ By arbitrariness of $\epsilon\in (0,{\rm diam}\, f(S))$, this completes the proof. \end{proof} A first comment to Theorem \ref{thm:extPCP} concerns its hypothesis (iii), which seems to find no counterpart in the convexity principle due to B.T. Polyak (see \cite[Theorem 2.1]{Poly01}). Such hypothesis postulates a uniform convexity property of $S$, which must be quantitatively adequate to the metric regularity of $f$ and to the Lipschitz continuity of $\dif{f}$ around $x_0$. Matching this condition is guaranteed for strongly convex sets (in particular, for balls) with a sufficiently small radius, provided that the underlying Banach space fulfils a certain uniform convexity assumption. This fact is clarified by the following \begin{corollary} \label{cor:strconvpcp} Let $f:\Omega\longrightarrow\mathbb Y$ be a mapping between Banach spaces, with $\Omega$ open nonempty subset of $\mathbb X$. Let $x_0\in\Omega$ be such that: \begin{itemize} \item[(i)] $(\mathbb X,\|\cdot\|)$ admits a modulus of convexity of power type 2; \item[(ii)] $f\in{\rm C}^{1,1}({\rm int}\, \ball{x_0}{r_0})$, for some $r_0>0$; \item[(iii)] $\der{f}{x_0}$ is onto. \end{itemize} \noindent Then, there exists $\rho\in (0,r_0)$ such that, for every $r$-convex set $S$, with $r\in [0,\rho)$ and $f(S)$ closed, it holds $f(S)\in{\mathcal UC}^2(\mathbb Y)$. \end{corollary} \begin{proof} By virtue of the hypothesis (i), according to Example \ref{ex:ucset} (ii), any $r$-convex set $S$ belongs to ${\mathcal UC}^2(\mathbb X)$, for every $r>0$. More precisely, on account of the inequality $(\ref{in:ucmscs})$, one has $$ \delta_{S}(\epsilon)\ge r\delta_{\mathbb X} \left({\epsilon\over r}\right)\ge{\gamma\over r}\epsilon^2, \quad\forall\epsilon\in (0,2r], $$ for some $\gamma>0$. Therefore, in order for the hypothesis (iii) of Theorem \ref{thm:extPCP} to be satisfied, it suffices to take $$ r<{8\gamma\over \reg{f}{x_0}\cdot\lip{\dif{f}}{{\rm int}\, \ball{x_0}{r_0}}+1}. $$ Then, the thesis follows from Theorem \ref{thm:extPCP}. \end{proof} On the other hand, notice that Theorem \ref{thm:extPCP} does not make any direct assumption on the Banach space $(\mathbb X,\|\cdot\|)$ (nonetheless, take into account what remarked at the end of Example \ref{ex:ucset} (i)). Furthermore, since any ball $\ball{x_0}{r}$ is a $r$-convex sets, it should be clear that Corollary \ref{cor:strconvpcp} allows one to embed in the current theory the Polyak convexity principle and its refinement \cite[Theorem 3.2]{Uder13}. Another comment to Theorem \ref{thm:extPCP} deals with the topological assumption on the image $f(S)$. Of course, whenever $\mathbb X$ is a finite-dimensional Euclidean space, $f(S)$ is automatically closed, because $S$ is compact and $f$ is continuous on $S$. In an infinite-dimensional setting, the same issue becomes subtler. The closedness assumption thus appears also in the formulation of other results for the convexity of images of mappings between infinite-dimensional spaces (see \cite[Theorem 2.2]{BacSam09}). It is clear that, whenever $\der{f}{x_0}$ not only is onto but, in particular, is invertible, $f$ turns out to be a diffeomorphism around $x_0$. As a consequence, for a proper $r_0>0$, any closed set $S\subseteq\ball{x_0}{r_0}$ has a closed image. Nevertheless, in the general setting of Theorem \ref{thm:extPCP}, to the best of the author's knowledge, the question of formulating sufficient conditions on $f$ in order for $f(S)$ to be closed is still open. The next proposition, which is far removed from providing a solution to such a question, translates the topological assumption on the image $f(S)$ into variational terms. \begin{proposition} \label{pro:closim} Let $f:\Omega\longrightarrow\mathbb Y$ be a mapping between Banach spaces, with $\Omega$ open nonempty subset of $\mathbb X$, and let $x_0\in\Omega$. Suppose that: \begin{itemize} \item[(i)] $f$ is continuous in $\ball{x_0}{r_0}$, for some $r_0>0$; \item[(ii)] the function $x\mapsto \dist{x}{f^{-1}(y)}$ is weakly lower semicontinuous, for every $y\in\ball{f(x_0)}{r_0}$; \item[(iii)] $(\mathbb X,\|\cdot\|)$ is reflexive; \item[(iv)] $f$ is metrically regular around $x_0$. \end{itemize} Then, there exists $\rho\in (0,r_0)$ such that, for every closed convex set $S\subseteq\ball{x_0}{\rho}$, $f(S)$ is closed. \end{proposition} \begin{proof} Since by the hypothesis (iv) $f$ is metrically regular around $x_0$, there exist positive real $r\in (0,r_0)$ and $\kappa$ such that \begin{equation} \label{in:mrfr} \dist{x}{f^{-1}(y)}\le\kappa\|f(x)-y\|,\quad\forall x\in\ball{x_0}{r},\ \forall y\in \ball{f(x_0)}{r}. \end{equation} By the continuity of $f$ at $x_0$, there exists $\rho\in (0,r)$ such that $$ f(x)\in\ball{f(x_0)}{r},\quad\forall x\in\ball{x_0}{\rho}. $$ Thus, whenever $S\subseteq\ball{x_0}{\rho}$, one has $f(S)\subseteq\ball{f(x_0)}{r}$. Now, suppose that $S\subseteq\ball{x_0}{\rho}$ is a closed convex set and take an arbitrary $y\in{\rm cl}\, f(S)\subseteq \ball{f(x_0)}{r}$. Let $(y_n)_n$ be a sequence in $f(S)$, such that $y_n\longrightarrow y$ as $n\to\infty$. As $y_n\in f(S)$, there exists a sequence $(x_n)_{n}$ in $S$ such that $y_n=f(x_n)$, for each $n\in\mathbb N$. Notice that, since $x_n\in S\subseteq\ball{x_0}{\rho} \subseteq\ball{x_0}{r}$ and $y\in{\rm cl}\, f(S)\subseteq \ball{f(x_0)}{r}$, the inequality $(\ref{in:mrfr})$ applies, namely \begin{equation} \label{in:mrfrxnyn} \dist{x_n}{f^{-1}(y)}\le\kappa\|f(x_n)-y\|= \kappa\|y_n-y\|,\quad\forall n\in\mathbb N. \end{equation} This shows that $\dist{x_n}{f^{-1}(y)}\longrightarrow 0$ as $n\to\infty$ and therefore $$ \inf_{x\in S}\dist{x}{f^{-1}(y)}=0. $$ As a closed convex set, $S$ is also weakly closed. Moreover, as a bounded subset of a reflexive Banach space, $S$ is weakly compact. Thus, since $y\in\ball{f(x_0)}{r_0}$, by virtue of the hypothesis (ii), there must exist $\tilde x\in S$ such that $$ \dist{\tilde x}{f^{-1}(y)}=0. $$ Since $f$ is continuous, the last inequality entails that $\tilde x\in f^{-1}(y)$. This leads to conclude that $y\in f(S)$, thereby completing the proof. \end{proof} The hypothesis (ii) in Proposition \ref{pro:closim} happens to be always satisfied if $f$ is a linear mapping. In the nonlinear case, the situation is expected to be much more complicate. Let $C\subseteq\mathbb Y$ be a closed convex cone with apex at $\mathbf{0}$ and let $S\subseteq\mathbb X$ be nonempty and convex. Recall that a mapping $f:S\longrightarrow\mathbb Y$ is said to be {\it convex-like} on $S$ with respect to $C$ if for every $x_1,\, x_2\in S$ and $t\in [0,1]$, there exists $x_t\in S$ such that $$ (1-t)f(x_1)+tf(x_2)\in f(x_t)+C. $$ Convex-likeness is a generalization of the notion of $C$-convexity of mappings taking values in partially ordered vector spaces. It should be evident that, when $\mathbb Y=\mathbb R$, $C=[0,+\infty)$ and $x_t=(1-t) x_1+tx_2$, the above inclusion reduces to the well-known inequality defining the convexity of a functional. The class of convex-like mappings has found a large employment in optimization and related topics. For instance, if $\mathbb R^m$ and $C=\mathbb R^m_+$ it is readily seen that this class includes all mappings $f=(f_1,\dots,f_m)$, having each component $f_i:S\longrightarrow\mathbb R$, $i=1,\dots,m$ convex on a convex set. For a detailed discussion about the notion of convex-likeness of mappings, its variants and their impact on the study of variational problems, the reader can refer to \cite{MasRap00}. The next corollary, which can be achieved as a direct consequence of Theorem \ref{thm:extPCP}, reveals that any ${\rm C}^{1,1}$ smooth mapping behave as a convex-like mapping on uniformly convex sets of class ${\mathcal UC}^2_c(\mathbb X)$ near a regular point. \begin{corollary} Let $f:\Omega\longrightarrow\mathbb Y$ be a mapping between Banach spaces, $x_0\in\Omega$ and $c>0$. If $f$, $x_0$ and $c$ satisfy all hypotheses of Theorem \ref{thm:extPCP}, then there exists $\rho>0$ such that, for every $S\in{\mathcal UC}^2_c(\mathbb X)$, with $S\subseteq\ball{x_0}{\rho}$ and $f(S)$ closed, and every cone $C\subseteq\mathbb Y$, the mapping $f:S\longrightarrow\mathbb Y$ is convex-like on $S$ with respect to $C$. \end{corollary} \begin{proof} The thesis follows at once by Theorem \ref{thm:extPCP}, from being $$ (1-t)f(x_1)+tf(x_2)\in f(S)\subseteq f(S)+C, \quad \forall x_1,\, x_2\in S,\ \forall t\in [0,1]. $$ \end{proof} \section{Applications to optimization} \label{Sec:4} Throughout this section, applications of Theorem \ref{thm:extPCP} will be considered to the study of constrained optimization problems, having the following format $$ \min_{x\in S}\varphi(x)\quad\hbox{ subject to }\quad g(x)\in C, \leqno ({\mathcal P}) $$ where $\varphi:\mathbb X\longrightarrow\mathbb R$ and $g:\mathbb X\longrightarrow\mathbb Y$ are given functions between Banach spaces, $S\subseteq\mathbb X$ and $C\subseteq\mathbb Y$ are given (nonempty) closed and convex sets. Such a format is frequently employed in the literature for subsuming under a general treatment a broad spectrum of finite and infinite-dimensional extremum problems, with various kinds of constraints. The feasible region of problem $({\mathcal P})$ will be henceforth denoted by $R$, i.e. $R=S\cap g^{-1}(C)$. According to a long-standing approach in optimization, now recognized as ISA (acronym standing for Image Space Analysis), the analysis of several issues related to problem $({\mathcal P})$ can be performed by associating with $({\mathcal P})$ and with an element $x_0\in R$ the mapping $\immap{x_0}:\mathbb X\longrightarrow\mathbb R\times\mathbb Y$, which is defined by $$ \immap{x_0}(x)=(\varphi(x)-\varphi(x_0),g(x)) $$ (see, for instance, \cite{Gian05} and references therein). It is natural to believe that the mapping $\immap{x_0}$ inherits certain structural features of the given problem. Such issues as the solution existence, optimality conditions, duality, and so on, can be investigated by studying relationships between the two subsets of the space $\mathbb R\times\mathbb Y$, namely $\immap{x_0}(S)$ and $Q=(-\infty,0)\times C$, associated with $({\mathcal P})$. \begin{remark} \label{rem:ISAlop} Directly from the above constructions, it is possible to prove the following well-known facts: (i) $x_0\in R$ is a global solution to $({\mathcal P})$ iff $\immap{x_0}(S) \cap Q=\varnothing$; (ii) $x_0\in R$ is a local solution to $({\mathcal P})$ iff there exists $r>0$ such that $\immap{x_0}(S\cap\ball{x_0}{r})\cap Q=\varnothing$. \end{remark} The above facts have been largely employed as a starting point for formulating optimality conditions within ISA. Another relevant property connected with optimality is openness at a linear rate. Its presence, indeed, has been observed to be in contrast with optimality (see, for instance, the so-called noncovering principle in \cite{Ioff16}). Below, a lemma related to this phenomenon, which will be exploited in the proof of the next result, is presented in full detail. \begin{lemma} \label{lem:lopnotsol} With reference to a problem $({\mathcal P})$, suppose that the mapping $\immap{x_0}$ is open at a linear rate around $x_0\in R$ and $x_0\in {\rm int}\, S$. Then, $x_0$ is not a local solution to $({\mathcal P})$. \end{lemma} \begin{proof} By the hypothesis, according to Definition \ref{def:lopmap} there exist positive constants $\delta$, $\zeta$, and $\sigma$ such that, if taking in particular $x=x_0$ in inclusion $(\ref{in:pointlop})$, it holds $$ \immap{x_0}(\ball{x_0}{r})\supseteq\ball{\immap{x_0}(x_0)}{\sigma r} \cap\ball{\immap{x_0}(x_0)}{\zeta},\quad\forall r\in [0,\delta]. $$ Notice that, if $r<\zeta/\sigma$, then the above inclusion reduces to \begin{equation} \label{in:prolopx0} \immap{x_0}(\ball{x_0}{r})\supseteq\ball{\immap{x_0}(x_0)}{\sigma r} =\ball{(0,g(x_0))}{\sigma r}. \end{equation} Since $x_0\in{\rm int}\, S$, there exists $r_0>0$ such that $\ball{x_0}{r_0} \subseteq S$. Now, fix an arbitrary $r\in (0,\, \min\{r_0,\, \zeta/\sigma\})$ and pick $t\in (0,\sigma r)$. Then, on the account of inclusion $(\ref{in:prolopx0})$, there exists $x_r\in\ball{x_0}{r}$ such that $$ \immap{x_0}(x_r)=(-t,g(x_0))\in\ball{(0,g(x_0))}{\sigma r}, $$ that is $$ \varphi(x_r)-\varphi(x_0)=-t<0 \qquad\hbox{ and }\qquad g(x_r)=g(x_0)\in C. $$ This means that $x_r\in S\cap g^{-1}(C)$ and $\varphi(x_r)<\varphi(x_0)$, what contradicts the local optimality of $x_0$ for $({\mathcal P})$, by arbitrariness of $r$. \end{proof} The next theorem, which extends a similar result established in \cite[Theorem 3.2]{Uder13}, provides an answer to the question of solution existence for problem $({\mathcal P})$ and, at the same time, furnishes an optimality condition for detecting a solution. In order to formulate such a theorem, let us denote by $\ncone{C}{\bar y}= \{y^*\in\mathbb Y^*:\ \langle y^*,y-\bar y\rangle\le 0,\quad\forall y\in C\}$ the normal cone to $C$ at $\bar y$ in the sense of convex analysis. Besides, let us denote by ${\rm L}:\mathbb Y^*\times\mathbb X\longrightarrow\mathbb R$ the Lagrangian function associated with problem $({\mathcal P})$, i.e. $$ {\rm L} (y^*,x)=\varphi(x)+\langle y^*,g(x)\rangle. $$ The proof, whose main part is given for the sake of completeness, adapts an argument already exploited in \cite{Uder13}. It derives solution existence from the weak compactness of the problem image and the optimality condition by a linear separation technique. In both the cases, convexity is the geometrical property that makes this possible. \begin{theorem} \label{thm:constoptp} Given a problem $({\mathcal P})$, let $x_0\in g^{-1}(C)$ and let $c$ be a positive real. Suppose that: \begin{itemize} \item[(i)] $(\mathbb Y,\|\cdot\|)$ is a reflexive Banach space; \item[(ii)] $\varphi,\, g\in{\rm C}^{1,1}({\rm int}\, \ball{x_0}{r_0})$, for some $r_0>0$ and $\der{\immap{x_0}}{x_0}$ is onto; \item[(iii)] it holds \begin{equation} \label{in:relipuconvcond} {\reg{\immap{x_0}}{x_0}\cdot\lip{\dif{\immap{x_0}}}{{\rm int}\, \ball{x_0}{r_0}}\over 8} <c. \end{equation} \end{itemize} Then, there exists $\rho\in (0,r_0)$ such that, for every $S\in {\mathcal UC}^2_c(\mathbb X)$, with $x_0\in{\rm int}\, S\subseteq\ball{x_0}{\rho}$ and $\immap{x_0}(S)$ closed, one has \begin{itemize} \item[(t)] there exists a global solution $\bar x_S\in R$ to $({\mathcal P})$; \item[(tt)] $\bar x_S\in{\rm bd}\, S$ and hence $\bar x_S\in{\rm bd}\, R$; \item[(ttt)] there exists $y^*_S\in\ncone{C}{g(\bar x_S)}$ such that $$ {\rm L}(y^*_S,\bar x_S)=\min_{x\in S}{\rm L}(y^*_S,x). $$ \end{itemize} \end{theorem} \begin{proof} (t) Under the hypotheses (ii) and (iii), one can apply Theorem \ref{thm:extPCP}. If $\rho>0$ is as in the thesis Theorem \ref{thm:extPCP}, fix a set $S\in {\mathcal UC}^2_c(\mathbb X)$ satisfying all requirements in the above statement. Then its image $\immap{x_0}(S)$ turns out to be a convex, closed and bounded subset of $\mathbb R\times\mathbb Y$, with nonempty interior. The existence of a global solution to $({\mathcal P})$ will be achieved by proving that an associated minimization problem in the space $\mathbb R\times\mathbb Y$ does admit a global solution. To do so, define $$ \tau=\inf\{t:\ (t,y)\in \immap{x_0}(S)\cap Q\}. $$ Notice that $x_0\in R$. Since $\der{\immap{x_0}}{x_0}$ is onto, by the Lyusternik-Graves theorem the mapping $\immap{x_0}$ too is open at a linear rate around $x_0$. Thus, since $x_0\in{\rm int}\, S$, in the light of Lemma \ref{lem:lopnotsol} $x_0$ must fail to be a local (and hence, a fortiori, global) solution to $({\mathcal P})$. Consequently, according to what observed in Remark \ref{rem:ISAlop} (i), it must be $$ \immap{x_0}(S)\cap Q\ne\varnothing. $$ This implies that $\tau<+\infty$. Furthermore, if setting \begin{equation} \label{eq:deftauisa} \bar\tau=\inf\{t:\ (t,y)\in \immap{x_0}(S)\cap {\rm cl}\, Q\}, \end{equation} it is possible to see that actually it is $\bar\tau=\tau$. Indeed, since $x_0$ is not a solution to $({\mathcal P})$, there exists $\hat x\in R$ such that $\varphi(\hat x)-\varphi(x_0)<0$, and so $\immap{x_0}(\hat x)=(\varphi(\hat x)-\varphi(x_0),g(\hat x)) \in\immap{x_0}(S)\cap Q$. As $\immap{x_0}(S)\cap Q\subseteq \immap{x_0}(S)\cap {\rm cl}\, Q$, it follows that $\bar\tau\le\tau\le \varphi(\hat x)-\varphi(x_0)<0$. Hence, for any $\epsilon\in (0,-\bar\tau)$ there exists $(t_\epsilon,y_\epsilon)\in\immap{x_0}(S) \cap {\rm cl}\, Q$ such that $t_\epsilon<\bar\tau+\epsilon<0$. Noting that ${\rm cl}\, Q=(-\infty,0]\times C$, this implies that $(t_\epsilon,y_\epsilon)\in\immap{x_0}(S)\cap Q$ and consequently that $\bar\tau\le\tau\le t_\epsilon<\bar\tau+\epsilon<0$. Letting $\epsilon\to 0^+$, one obtains $\bar\tau=\tau$. Now, as the set $\immap{x_0}(S)$ is closed, convex and bounded, so is its subset $\immap{x_0}(S)\cap {\rm cl}\, Q$. The boundedness of the latter implies that $\bar\tau>-\infty$. Moreover, by virtue of the hypothesis (i), $\immap{x_0}(S)\cap {\rm cl}\, Q$ turns out to be weakly compact. Since the projection mapping $\Pi_\mathbb R:\mathbb R\times\mathbb Y\longrightarrow\mathbb R$, given by $\Pi_\mathbb R(t,y)=t$ is continuous and convex, it is also weakly l.s.c., with the consequence that the infimum defined in $(\ref{eq:deftauisa})$ is actually attained at some $(\bar t,\bar y)\in\immap{x_0}(S)\cap {\rm cl}\, Q$. This means that there exists $\bar x_S\in S$ such that $$ \tau=\bar\tau=\bar t=\varphi(\bar x_S)-\varphi(x_0)\quad \hbox{ and }\bar y=g(\bar x_S) \in C. $$ Let us show that $\bar x_S$ is a global solution to $({\mathcal P})$. Assume to the contrary that there is $\hat x\in R$ such that $\varphi (\hat x)<\varphi(\bar x_S)$. Then, one finds \begin{eqnarray*} \hat t &=& \varphi(\hat x)-\varphi(x_0)=\varphi(\hat x)-\varphi(\bar x_S) +\varphi(\bar x_S)-\varphi(x_0) \\ &<& \varphi(\bar x_S)-\varphi(x_0)=\bar t=\bar\tau=\tau. \end{eqnarray*} Since it is $\hat x\in R$, then $\hat x\in S$ and $\hat y=g(\hat x)\in C$, wherefrom one has $(\hat t,\hat y)\in\immap{x_0}(S)\cap Q$, which contradicts the definition of $\tau$. (tt) To prove that $\bar x_S$ belongs to ${\rm bd}\, S$, notice that $(\bar t,\bar y)=\immap{x_0}(\bar x_S)\in{\rm bd}\, \immap{x_0}(S)$. Then, by recalling what mentioned in Remark \ref{rem:pointlopsimpl} (ii), this assertion follows from the openness at a linear rate of $\immap{x_0}$ around $x_0$. (ttt) Again remembering Remark \ref{rem:ISAlop} (i), by the global optimality of $\bar x_S$, it results in \begin{equation} \label{eq:barxSopt} \immap{\bar x_S}(S)\cap Q=\varnothing. \end{equation} As one readily checks, it holds $$ \immap{\bar x_S}(S)=\immap{x_0}(S)+(\varphi(x_0)- \varphi(\bar x_S),\mathbf{0}), $$ that is to say $\immap{\bar x_S}(S)$ is a translation of $\immap{x_0}(S)$. Therefore, $\immap{\bar x_S}(S)$ too is a closed, bounded, convex subset of $\mathbb R\times\mathbb Y$, with nonempty interior. Since $(\ref{eq:barxSopt})$ is true, the Eidelheit theorem makes it possible to linearly separate $\immap{\bar x_S}(S)$ and ${\rm cl}\, Q$. In other terms, this means the existence of a pair $(\gamma,y^*)\in(\mathbb R\times\mathbb Y)\backslash \{(0,\mathbf{0}^*)\}$ and $\alpha\in\mathbb R$ such that \begin{equation*} \gamma(\varphi(x)-\varphi(\bar x_S))+\langle y^*,g(x) \rangle\ge\alpha,\quad\forall x\in S, \end{equation*} and \begin{equation*} \gamma t+\langle y^*,y\rangle\le\alpha,\quad\forall (t,y)\in{\rm cl}\, Q=(-\infty,0]\times C. \end{equation*} The rest of the proof relies on a standard usage of the last inequalities and does not need to devise any specific adaptation. \end{proof} Theorem \ref{thm:constoptp} describes the local behaviour of a nonlinear optimization problem $({\mathcal P})$ near a point $x_0\in ({\rm int}\, S)\cap g^{-1}(C)$, around which the condition $(\ref{in:relipuconvcond})$ linking the modulus of convexity of $S$, the regularity behaviour of $\immap{x_0}$ and the Lipschitz continuity of its derivative happens to be satisfied: $({\mathcal P})$ admits a global solution, which lies at the boundary of the feasible region and can be detected by minimizing the Lagrangian function. The reader should notice that globality of a solution and its characterization as a minimizer of a the Lagrangian function are phenomena typically occurring in convex optimization. Instead, they generally fail to occur in nonlinear optimization, where optimality conditions are usually only necessary or sufficient, and frequently expressed in terms of Lagrangian stationary by means of first-order derivative. Another typical phenomenon arising in convex optimization is the vanishing of the duality gap, i.e. the vanishing of the value $$ {\rm gap}\, ({\mathcal P})=\inf_{x\in S}\sup_{y^*\in C^{{}^\ominus}} {\rm L}(y^*,x)-\sup_{y^*\in C^{{}^\ominus}}\inf_{x\in S}{\rm L}(y^*,x), $$ where $C^{{}^\ominus}=\{y^*\in\mathbb Y^*:\ \langle y^*,y\rangle\le 0\}$ is the dual cone to $C$. Such a circumstance, which can be proved to take place in convex programming under proper qualification conditions, is known as strong (Lagrangian) duality. In the current setting, it can be readily achieved as a consequence of Theorem \ref{thm:constoptp}, without the need of extra assumptions, apart from the cone structure now imposed on the set $C$. \begin{corollary} Given a problem $({\mathcal P})$, suppose that $C$ is a closed convex cone. Under the hypothesis of Theorem \ref{thm:constoptp}, it holds $$ {\rm gap}\, ({\mathcal P})=0 $$ and there exists a pair $(y^*_S,\bar x_S)\in C^{{}^\ominus}\times R$, which is a saddle point of ${\rm L}$, i.e. $$ {\rm L}(y^*,\bar x_S)\le{\rm L}(y^*_S,\bar x_S)\le {\rm L}(y^*_S,x),\quad\forall (y^*,x)\in C^{{}^\ominus}\times S. $$ \end{corollary} \begin{proof} Let $\bar x_S$ and $y^*_S$ be as in the thesis of Theorem \ref{thm:constoptp}. Since $C$ is a closed convex cone, $2g(\bar x_S)$ and $\mathbf{0}$ belong to $C$. By recalling that $y^*_S\in\ncone{C}{g(\bar x_S)}$, one has $$ \langle y^*_S,y-g(\bar x_S)\rangle\le 0,\quad\forall y\in C. $$ By replacing $y$ with $2g(\bar x_S)$ and $\mathbf{0}$ in last inequality, one easily shows that $\langle y^*_S,g(\bar x_S)\rangle=0$ and hence $y^*_S\in C^{{}^\ominus}$. The rest of the thesis then follows at once. \end{proof} The above applications of Theorem \ref{thm:extPCP} demonstrate that, even in the absence of convexity assumptions on the functional data of problem $({\mathcal P})$, some good phenomena connected with convexity may still appear. \begin{example} With reference to the problem format $({\mathcal P})$, let $\mathbb X=\mathbb R^2$, $\mathbb Y=\mathbb R$, $C=\{0\}$, and let $\varphi:\mathbb R^2\longrightarrow\mathbb R$ and $g:\mathbb R^2 \longrightarrow\mathbb R$ be defined respectively by $$ \varphi(x)=x_1^2-x_2^2,\qquad g(x)=x_1^2+x_2^2-1. $$ Take $x_0=(1/\sqrt{2},1/\sqrt{2})\in g^{-1}(0)=\mathbb S$ and $S=\ball{x_0}{r}$. With the above choice of data, the problem falls out of the realm of convex optimization: the objective function $\varphi$ is evidently not convex as well as the feasible region $R=S\cap\mathbb S$, for every $r>0$. Throughout the present example, $\mathbb R^2$ is supposed to be equipped with its Euclidean space structure, so that $$ \delta_{\mathbb R^2}(\epsilon)\ge {\epsilon^2\over 8},\quad\forall \epsilon\in (0,2]. $$ Therefore, $S=\ball{x_0}{r}\in{\mathcal UC}^2(\mathbb R^2)$ and, according to the estimate in $(\ref{in:ucmscs})$, one finds $$ \delta_{\ball{x_0}{r}}(\epsilon)\ge r\delta_{\mathbb R^2} \left({\epsilon\over r}\right)={\epsilon^2\over 8r}, $$ that is $\ball{x_0}{r}\in{\mathcal UC}^2_{1/8r}(\mathbb R^2)$, for every $r>0$. Clearly, the function $\immap{x_0}:\mathbb R^2\longrightarrow\mathbb R^2$, which is given in this case by $$ \immap{x_0}(x)=\left(\begin{array}{c} x_1^2-x_2^2 \\ x_1^2+x_2^2-1 \end{array}\right), $$ satisfies the smoothness hypothesis of Theorem \ref{thm:constoptp}. In particular, since it is $$ \der{\immap{x_0}}{x}=\left(\begin{array}{cc} 2x_1 & -2x_2 \\ 2x_1 & x_2 \end{array}\right), $$ it results in $$ \reg{\immap{x_0}}{x_0}=\|\der{\immap{x_0}}{x_0}^{-1}\|_\mathcal{L}= \left\|{1\over 2\sqrt{2}}\left(\begin{array}{rr} 1 & 1 \\ -1 & 1 \end{array}\right)\right\|_\mathcal{L}= {1\over 2}. $$ On the other hand, since the mapping $\dif{\immap{x_0}}:\mathbb R^2\longrightarrow \mathcal{L}(\mathbb R^2,\mathbb R^2)$ is linear in this case, one finds \begin{eqnarray*} \lip{\dif{\immap{x_0}}}{\mathbb R^2} &=& \|\dif{\immap{x_0}}\|_\mathcal{L} =\max_{u\in\mathbb S}\|\der{\immap{x_0}}{u}\|_\mathcal{L} \\ &=& \max_{u\in\mathbb S}\max_{v\in\mathbb S}\|\der{\immap{x_0}}{u}v\| =2\sqrt{2}. \end{eqnarray*} Consequently, the condition $(\ref{in:relipuconvcond})$ becomes $$ {{1\over 2}\cdot 2\sqrt{2}\over 8}<{1\over 8r}. $$ Thus, for every $r<1/\sqrt{2}$, by virtue of Theorem \ref{thm:constoptp} assertions ${\rm (t)-(ttt)}$ hold. In particular, it is not difficult to check (for instance, by means of a level set inspection) that for every $S=\ball{x_0}{r}$, with $r<1/\sqrt{2}$, the unique (global) solution $\bar x_S$ of the related problem lies in ${\rm bd}\, S$. Notice that this fails to be true if $r>\sqrt{2-\sqrt{2}}=\|(0,1)-x_0\|>1/\sqrt{2}$, in which case the solution $\bar x_S=(0,1)$ belongs to ${\rm int}\, \ball{x_0}{r}={\rm int}\, S$. \end{example} \vskip1cm \begin{center} {\sc Acknowledgements} \end{center} The author thanks an anonymous referee for valuable remarks, which help him to considerably improve the quality of his paper; in particular, for pointing out a crucial gap in the original proof of Theorem \ref{thm:constoptp} as well as several inaccuracies. \vskip1cm \end{document} \end{document}
\begin{document} \title{Binarsity: a penalization for one-hot encoded features in linear supervised learning} \author{ Mokhtar Z. Alaya\footnote{LPSM, CNRS UMR 8001, Sorbonne University, Paris France} \and Simon Bussy\footnotemark[1] \and St\'ephane Ga\"iffas\footnote{LPSM, CNRS UMR 8001, Universit\'e Paris Diderot, Paris, France} \and Agathe Guilloux\footnote{LaMME, UEVE and UMR 8071, Universit\'e Paris Saclay, Evry, France} } \maketitle \begin{abstract} This paper deals with the problem of large-scale linear supervised learning in settings where a large number of continuous features are available. We propose to combine the well-known trick of one-hot encoding of continuous features with a new penalization called \emph{binarsity}. In each group of binary features coming from the one-hot encoding of a single raw continuous feature, this penalization uses total-variation regularization together with an extra linear constraint. This induces two interesting properties on the model weights of the one-hot encoded features: they are piecewise constant, and are eventually block sparse. Non-asymptotic oracle inequalities for generalized linear models are proposed. Moreover, under a sparse additive model assumption, we prove that our procedure matches the state-of-the-art in this setting. Numerical experiments illustrate the good performances of our approach on several datasets. It is also noteworthy that our method has a numerical complexity comparable to standard $\ell_1$ penalization. \noindent \emph{Keywords.} Supervised learning; Features binarization; Sparse additive modeling; Total-variation; Oracle inequalities; Proximal methods \end{abstract} \section{Introduction} In many applications, datasets used for linear supervised learning contain a large number of continuous features, with a large number of samples. An example is web-marketing, where features are obtained from bag-of-words scaled using tf-idf~\cite{russell2013mining}, recorded during the visit of users on websites. A well-known trick~\cite{WuCog2012,LiuHussTanDas2002} in this setting is to replace each raw continuous feature by a set of binary features that one-hot encodes the interval containing it, among a list of intervals partitioning the raw feature range. This improves the linear decision function with respect to the raw continuous features space, and can therefore improve prediction. However, this trick is prone to over-fitting, since it increases significantly the number of features. \paragraph{A new penalization.} To overcome this problem, we introduce a new penalization called \emph{binarsity}, that penalizes the model weights learned from such grouped one-hot encodings (one group for each raw continuous feature). Since the binary features within these groups are naturally ordered, the binarsity penalization combines a group total-variation penalization, with an extra linear constraint in each group to avoid collinearity between the one-hot encodings. This penalization forces the weights of the model to be as constant (with respect to the order induced by the original feature) as possible within a group, by selecting a minimal number of relevant cut-points. Moreover, if the model weights are all equal within a group, then the full block of weights is zero, because of the extra linear constraint. This allows to perform raw feature selection. \paragraph{High-dimensional linear supervised learning.} To address the high-dimensionality of features, sparse linear inference is now an ubiquitous technique for dimension reduction and variable selection, see for instance~\cite{BuhVan-11} and \cite{ESL} among many others. The principle is to induce sparsity (large number of zeros) in the model weights, assuming that only a few features are actually helpful for the label prediction. The most popular way to induce sparsity in model weights is to add a $\ell_1$-penalization (Lasso) term to the goodness-of-fit~\cite{Tib-96}. This typically leads to sparse parametrization of models, with a level of sparsity that depends on the strength of the penalization. Statistical properties of $\ell_1$-penalization have been extensively investigated, see for instance~\cite{KniFu-00,zhaoconsistency2006, bunea2007oracle,BicRitTsy-09} for linear and generalized linear models and~\cite{donoho12001,Donoho02optimallysparse,candes2008a,candes2008b} for compressed sensing, among others. However, the Lasso ignores ordering of features. In~\cite{TibRosZhuKni-05}, a structured sparse penalization is proposed, known as fused Lasso, which provides superior performance in recovering the true model in such applications where features are ordered in some meaningful way. It introduces a mixed penalization using a linear combination of the $\ell_1$-norm and the total-variation penalization, thus enforcing sparsity in both the weights and their successive differences. Fused Lasso has achieved great success in some applications such as comparative genomic hybridization~\cite{rapaport2008}, image denoising~\cite{FriHasHofTib-07}, and prostate cancer analysis~\cite{TibRosZhuKni-05}. \paragraph{Features discretization and cuts.} For supervised learning, it is often useful to encode the input features in a new space to let the model focus on the relevant areas~\cite{WuCog2012}. One of the basic encoding technique is \emph{feature discretization} or \emph{feature quantization}~\cite{LiuHussTanDas2002} that partitions the range of a continuous feature into intervals and relates these intervals with meaningful labels. Recent overviews of discretization techniques can be found in~\cite{LiuHussTanDas2002} or~\cite{GarLueSaeLopHer2013}. Obtaining the optimal discretization is a NP-hard problem~\cite{ChlNgu1998}, and an approximation can be easily obtained using a greedy approach, as proposed in decision trees: CART~\cite{BreFriOlsSto-84} and C4.5~\cite{Qui-93}, among others, that sequentially select pairs of features and cuts that minimize some purity measure (intra-variance, Gini index, information gain are the main examples). These approaches build decision functions that are therefore very simple, by looking only at a single feature at a time, and a single cut at a time. Ensemble methods (boosting~\cite{lugosi2004bayes}, random forests~\cite{breiman2001random}) improve this by combining such decisions trees, at the expense of models that are harder to interpret. \paragraph{Main contribution.} This paper considers the setting of linear supervised learning. The main contribution of this paper is the idea to use a total-variation penalization, with an extra linear constraint, on the weights of a generalized linear model trained on a binarization of the raw continuous features, leading to a procedure that selects multiple cut-points per feature, looking at all features simultaneously. Our approach therefore increases the capacity of the considered generalized linear model: several weights are used for the binarized features instead of a single one for the raw feature. This leads to a more flexible decision function compared to the linear one: when looking at the decision function as a function of a single raw feature, it is now piecewise constant instead of linear, as illustrated in Figure~\ref{figure-DecisionFunc} below. \paragraph{Organization of the paper.} The proposed methodology is described in Section~\ref{section:methodology}. Section~\ref{sec:theoretical_results} establishes an oracle inequality for generalized linear models and provides a convergence rate for our procedure in the particular case of a sparse additive model. Section~\ref{section:btv-experiments} highlights the results of the method on various datasets and compares its performances to well known classification algorithms. Finally, we discuss the obtained results in Section~\ref{section:discussion}. \paragraph*{Notations.} Throughout the paper, for every $q > 0,$ we denote by $\norm{v}_q$ the usual $\ell_q$-quasi norm of a vector $v \in {\mathbb{R}}^m,$ namely $\norm{v}_q =(\sum_{k=1}^m|v_k|^q)^{1/q}$, and $\norm{v}_\infty = \max_{k=1, \ldots, m}|v_k|$. We also denote $\norm{v}_0 = |\{k : v_k \neq 0\}|$, where $|A|$ stands for the cardinality of a finite set $A$. For $u, v \in {\mathbb{R}}^m$, we denote by $u \odot v$ the Hadamard product $u\odot v =(u_1v_1, \ldots, u_mv_m)^\top.$ For any $u \in {\mathbb{R}}^m$ and any $L \subset \{1, \ldots, m\},$ we denote $u_L$ as the vector in ${\mathbb{R}}^m$ satisfying $(u_L)_k = u_k$ for $k \in L$ and $(u_L)_k = 0$ for $ k \in L^\complement = \{1, \ldots, m\}\backslash L$. We write, for short, $\mathbf{1}$ (resp. $\mathbf{0}$) for the vector of ${\mathbb{R}}^m$ having all coordinates equal to one (resp. zero). Finally, we denote by $\sgn(x)$ the set of sub-differentials of the function $x \mapsto |x|$, namely $\sgn(x) = \{ 1\}$ if $x > 0$, $\sgn(x) = \{ -1 \}$ if $x < 0$ and $\sgn(0) = [-1, 1]$. \section{The proposed method} \label{section:methodology} Consider a supervised training dataset $(x_i, y_i)_{i=1, \ldots, n}$ containing features $x_i = [x_{i,1} \cdots x_{i,p}]^\top \in {\mathbb{R}}^p$ and labels $y_i \in {\mathcal{Y}} \subset {\mathbb{R}}$, that are independent and identically distributed samples of $(X, Y)$ with unknown distribution $\mathds{P}$. Let us denote ${\boldsymbol X} = [x_{i,j}]_{1 \leq i \leq n; 1 \leq j \leq p}$ the $n \times p$ features matrix vertically stacking the $n$ samples of $p$ raw features. Let ${\boldsymbol X}_{\bulletlet, j}$ be the $j$-th feature column of~${\boldsymbol X}$. \paragraph{Binarization.} The binarized matrix $\boldsymbol{X}^{{B}}$ is a matrix with an extended number $d > p$ of columns, where the $j$-th column ${\boldsymbol X}_{\bulletlet, j}$ is replaced by $d_j \geq 2$ columns $\boldsymbol{X}^{{B}}_{\bulletlet, j, 1}, \ldots, \boldsymbol{X}^{{B}}_{\bulletlet, j, d_j}$ containing only zeros and ones. Its $i$-th row is written \begin{equation*} x_i^B = [x^B_{i,1,1} \cdots x^B_{i,1,d_1} x^B_{i,2,1} \cdots x^B_{i,2,d_2} \cdots x^B_{i,p,1} \cdots x^B_{i,p, d_p}]^\top \in {\mathbb{R}}^d, \end{equation*} where $d = \sum_{j=1}^p d_j$. In order to simplify the presentation of our results, we assume in the paper that all raw features ${\boldsymbol X}_{\bulletlet, j}$ are continuous, so that they are transformed using the following one-hot encoding. For each raw feature $j$, we consider a partition of intervals $I_{j,1}, \ldots, I_{j, d_j}$ of $\text{range}({\boldsymbol X}_{\bulletlet, j})$, namely satisfying $\cup_{k=1}^{d_j}I_{j,k} = \text{range}({\boldsymbol X}_{\bulletlet, j})$ and $I_{j,k} \cap I_{j,k'} = \varnothing$ for $k \neq k'$ and define \begin{equation*} x^B_{i, j, k} = \begin{cases} 1 &\text{ if } x_{i,j} \in I_{j, k}, \\ 0 & \text{ otherwise} \end{cases} \end{equation*} for $i=1, \ldots, n$, $j=1, \ldots, p$ and $k=1, \ldots, d_j$. An example is interquantiles intervals, namely $I_{j, 1} = \big[ q_j(0), q_j(\frac {1}{d_j})\big]$ and $I_{j, k} = \big(q_j(\frac {k-1}{d_j}) , q_j(\frac {k}{d_j}) \big]$ for $k=2, \ldots, d_j$, where $q_j(\alpha)$ denotes a quantile of order $\alpha \in [0, 1]$ for ${\boldsymbol X}_{\bulletlet, j}$. In practice, if there are ties in the estimated quantiles for a given feature, we simply choose the set of ordered unique values to construct the intervals. This principle of binarization is a well-known trick~\cite{GarLueSaeLopHer2013}, that allows to improve over the linear decision function with respect to the raw feature space: it uses a larger number of model weights, for each interval of values for the feature considered in the binarization. If training data contains also unordered qualitative features, one-hot encoding with $\ell_1$-penalization can be used for instance. \paragraph{Goodness-of-fit.} Given a loss function $\ell : {\mathcal{Y}} \times {\mathbb{R}} \rightarrow {\mathbb{R}}$, we consider the goodness-of-fit term \begin{equation} \label{eq:gof} R_n(\theta) = \frac 1n \sum_{i=1}^n \ell(y_i, m_\theta(x_i)), \end{equation} where $m_\theta(x_i) = \theta^\top x_i^B$ and $\theta \in {\mathbb{R}}^d$ where we recall that $d = \sum_{j=1}^p d_j$. We then have $\theta = [\theta_{1, \bulletlet}^\top \cdots \theta_{p,\bulletlet}^\top]^\top$, with $\theta_{j,\bulletlet}$ corresponding to the group of coefficients weighting the binarized raw $j$-th feature. We focus on generalized linear models~\cite{green1994}, where the conditional distribution $Y | X = x$ is assumed to be from a one-parameter exponential family distribution with a density of the form \begin{equation} \label{distribut-glm} y | x \mapsto f^0(y | x) = \exp\Big(\frac{ym^0(x) - b(m^0(x))}{\phi} + c(y,\phi)\Big), \end{equation} with respect to a reference measure which is either the Lebesgue measure (e.g. in the Gaussian case) or the counting measure (e.g. in the logistic or Poisson cases), leading to a loss function of the form \begin{equation*} \ell\big(y_1, y_2) = - y_1 y_2 + b(y_2). \end{equation*} The density described in~\eqref{distribut-glm} encompasses several distributions, see Table~\ref{table:glm}. The functions $b(\cdot)$ and $c(\cdot)$ are known, while the natural parameter function $m^0(\cdot)$ is unknown. The dispersion parameter $\phi$ is assumed to be known in what follows. It is also assumed that $b(\cdot)$ is three times continuously differentiable. It is standard to notice that \begin{equation*} \mathds{E}[Y|X=x] = \int yf^0(y | x) dy = b'(m^0(x)), \end{equation*} where $b'$ stands for the derivative of $b$. This formula explains how $b'$ links the conditional expectation to the unknown $m^0$. The results given in Section~\ref{sec:theoretical_results} rely on the following Assumption. \begin{assumption} \label{ass:glm} Assume that $b$ is three times continuously differentiable, that there is $C_b > 0$ such that $|b'''(z)| \leq C_b |b''(z)|$ for any $z \in {\mathbb{R}}$ and that there exist constants $C_n > 0$ and $0 < L_n \leq U_n$ such that $C_n = \max_{i=1, \ldots, n}|m^0(x_i)| < \infty$ and $L_n \leq \max_{i=1, \ldots, n} b''\big(m^0(x_i)\big) \leq U_n.$ \end{assumption} This assumption is satisfied for most standard generalized linear models. In Table~\ref{table:glm}, we list some standard examples that fit in this framework, see also~\cite{vandegeer2008} and~\cite{rigollet2012}. \begin{table}[htb] \centering \begin{tabular}{ccccccccc} \toprule Model & $\phi$ & $b(z)$ & $b'(z)$ & $b''(z)$ & $b'''(z)$ & $C_b$ & $L_n$ & $U_n$\\ \midrule Normal & $\sigma^2$ & $\frac{z^2}{2}$ & $z$ & $1$ & $0$ & $0$ & $1$ & $1$ \\ Logistic & $1$ & $\log(1 + e^z)$&$\frac{e^z}{1+e^z}$ & $\frac{e^z}{(1+e^z)^2}$ &$\frac{1 - e^z}{1+e^z} b''(z)$ & 2 & $\frac{e^{C_n}}{(1 + e^{C_n})^2}$ & $\frac{1}{4}$\\ Poisson & $1$ & $e^z$ & $e^z$ & $e^z$ & $b''(z)$ & 1 & $e^{-C_n}$ & $e^{C_n}$\\ \bottomrule \end{tabular} \caption[table]{\small Examples of standard distributions that fit in the considered setting of generalized linear models, with the corresponding constants in Assumption~\ref{ass:glm}.} \label{table:glm} \end{table} \paragraph{Binarsity.} Several problems occur when using the binarization trick described above: \begin{enumerate} \item[(P1)] The one-hot-encodings satisfy $\sum_{k=1}^{d_j} \boldsymbol{X}^{{B}}_{i, j, k} = 1$ for $j=1, \ldots, p$, meaning that the columns of each block sum to $\mathbf{1}$, making $\boldsymbol{X}^{{B}}$ not of full rank by construction. \item[(P2)] Choosing the number of intervals $d_j$ for binarization of each raw feature $j$ is not an easy task, as too many might lead to overfitting: the number of model-weights increases with each $d_j$, leading to a over-parametrized model. \item[(P3)] Some of the raw features ${\boldsymbol X}_{\bulletlet, j} $ might not be relevant for the prediction task, so we want to select raw features from their one-hot encodings, namely induce block-sparsity in $\theta$. \end{enumerate} A usual way to deal with (P1) is to impose a linear constraint~\cite{agresti2015foundations} in each block. In order to do so, let us introduce first $n_{j, k} = | \{ i : x_{i, j} \in I_{j, k} \} |$ and the vector $n_j = [n_{j, 1} \cdots n_{j, d_j}] \in {\mathbb{N}}^{d_j}$. In our penalization term, we impose the linear constraint \begin{equation} \label{eq:linear_constraint} n_j^\top \theta_{j, \bullet} = \sum_{k=1}^{d_j} n_{j, k} \theta_{j,k} = 0 \end{equation} for all $j=1, \ldots, p$. Note that if the $I_{j, k}$ are taken as interquantiles intervals, then for each~$j$, we have that $n_{j, k}$ for $k=1, \ldots, d_j$ are equal and the constraint~\eqref{eq:linear_constraint} becomes the standard constraint $\sum_{k=1}^{d_j} \theta_{j,k} = 0$. The trick to tackle (P2) is to remark that within each block, binary features are ordered. We use a within block total-variation penalization \begin{equation*} \sum_{j=1}^p \norm{\theta_{j, \bulletlet}}_{\TV,\hat w_{j,\bulletlet}} \end{equation*} where \begin{equation} \label{eq:tv_no_linear_constraint} \norm{\theta_{j,\bulletlet}}_{\TV,\hat w_{j,\bulletlet}} = \sum_{k=2}^{d_j} \hat w_{j,k} |\theta_{j, k} - \theta_{j, k-1}|, \end{equation} with weights $\hat w_{j, k} > 0$ to be defined later, to keep the number of different values taken by $\theta_{j, \bulletlet}$ to a minimal level. Finally, dealing with (P3) is actually a by-product of dealing with (P1) and (P2). Indeed, if the raw feature $j$ is not-relevant, then $\theta_{j, \bulletlet}$ should have all entries constant because of the penalization~\eqref{eq:tv_no_linear_constraint}, and in this case all entries are zero, because of~\eqref{eq:linear_constraint}. We therefore introduce the following penalization, called \emph{binarsity} \begin{equation} \label{eq:binarsity} \bina(\theta) = \sum_{j=1}^p \Big( \sum_{k=2}^{d_j} \hat w_{j,k}| \theta_{j, k} -\theta_{j, {k-1}}| + \delta_j(\theta_{j, \bulletlet}) \Big) \end{equation} where the weights $\hat w_{j, k} > 0$ are defined in Section~\ref{sec:theoretical_results} below, and where \begin{equation} \label{eq:def_delta_j} \delta_j(u) = \begin{cases} 0 \quad &\text{ if } \quad n_j^\top u = 0, \\ \infty &\text{ otherwise}. \end{cases} \end{equation} We consider the goodness-of-fit~\eqref{eq:gof} penalized by~\eqref{eq:binarsity}, namely \begin{equation} \label{model:general_0} \hat \theta \in \argmin_{\theta \in {\mathbb{R}}^d } \big\{R_n(\theta) + \bina(\theta) \big\}. \end{equation} An important fact is that this optimization problem is numerically cheap, as explained in the next paragraph. Figure~\ref{figure-binarization-features-agg} illustrates the effect of the binarsity penalization with a varying strength on an example. \begin{figure} \caption{\small Illustration of the binarsity penalization on the ``Churn'' dataset (see Section~\ref{section:btv-experiments} \label{figure-binarization-features-agg} \end{figure} In Figure~\ref{figure-DecisionFunc}, we illustrate on a toy example, when $p=2$, the decision boundaries obtained for logistic regression (LR) on raw features, LR on binarized features and LR on binarized features with the binarsity penalization. \begin{figure*} \caption{\small Illustration of binarsity on 3 simulated toy datasets for binary classification with two classes (blue and red points). We set $n=1000$, $p=2$ and $d_1=d_2=100$. In each row, we display the simulated dataset, followed by the decision boundaries for a logistic regression classifier trained on initial raw features, then on binarized features without regularization, and finally on binarized features with binarsity. The corresponding testing AUC score is given on the lower right corner of each figure. Our approach allows to keep an almost linear decision boundary in the first row, while a good decision boundaries are learned on the two other examples, which correspond to non-linearly separable datasets, without apparent overfitting.} \label{figure-DecisionFunc} \end{figure*} \paragraph{Proximal operator of binarsity.} The proximal operator and proximal algorithms are important tools for non-smooth convex optimization, with important applications in the field of supervised learning with structured sparsity \cite{bach2012optimization}. The proximal operator of a proper lower semi-continuous~\cite{BauCom-11} convex function $g : {\mathbb{R}}^d \rightarrow {\mathbb{R}}$ is defined by \begin{equation*} \prox_g(v) \in \argmin_{u \in {\mathbb{R}}^d} \Big\{\frac 12 \norm{v- u}_2^2 + g(u) \Big\}. \end{equation*} Proximal operators can be interpreted as generalized projections. Namely, if $g$ is the indicator of a convex set $C \subset {\mathbb{R}}^d$ given by \begin{equation*} g(u) = \delta_C(u) = \begin{cases} 0 &\text{ if } u \in C, \\ \infty &\text{ otherwise, } \end{cases} \end{equation*} then $\prox_g$ is the projection operator onto $C$. It turns out that the proximal operator of binarsity can be computed very efficiently, using an algorithm~\cite{Cond-13} that we modify in order to include weights $\hat w_{j, k}$. It applies in each group the proximal operator of the total-variation since binarsity penalization is block separable, followed by a simple projection onto $\text{span}(n_j)^\perp$ the orthogonal of~$\text{span}(n_j)$, see Algorithm~\ref{algorithm-primal-computation} below. We refer to Algorithm~\ref{algorithm-weighted-TV-agg} in Section~\ref{appendix:proximal-operator-wTV} for the weighted total-variation proximal operator. \begin{proposition} \label{proposition:prox-btv-primal} Algorithm~\ref{algorithm-primal-computation} computes the proximal operator of $\bina(\theta)$ given by~\eqref{eq:binarsity}. \LinesNotNumbered \begin{algorithm}[htp!] \SetNlSty{textbf}{}{.} \DontPrintSemicolon \caption{Proximal operator of $\bina(\theta)$, see~\eqref{eq:binarsity}} \label{algorithm-primal-computation} \KwIn{vector $\theta \in {\mathbb{R}}^d$ and weights $\hat w_{j, k}$ and $n_{j, k}$ for $j=1, \ldots, p$ and $k=1, \ldots, d_j$} \KwOut{vector $\eta = \prox_{\bina}(\theta)$} \For{$j=1$ {\bfseries to} $p$}{ $\beta_{j,\bulletlet} \gets \prox_{\norm{\theta_{j,\bulletlet}}_{\TV,\hat w_{j,\bulletlet}}}(\theta_{j, \bulletlet})$ (TV-weighted prox in block $j$, see~\eqref{eq:tv_no_linear_constraint}) \\ $\eta_{j,\bulletlet} \gets \beta_{j,\bulletlet} - \frac{n_j^\top \beta_{j,\bulletlet}}{\norm{n_j}_2^2} n_j$ (projection onto $\text{span}(n_j)^\perp$) } \textbf{Return:} {$\eta$} \end{algorithm} \end{proposition} A proof of Proposition~\ref{proposition:prox-btv-primal} is given in Section~\ref{appendix:proof-of-proposition:prox-btv-primal}. Algorithm~\ref{algorithm-primal-computation} leads to a very fast numerical routine, see Section~\ref{section:btv-experiments}. The next section provides a theoretical analysis of our algorithm with an oracle inequality for the prediction error, together with a convergence rate in the particular case of a sparse additive model. \section{Theoretical guarantees} \label{sec:theoretical_results} We now investigate the statistical properties of~\eqref{model:general} where the weights in the binarsity penalization have the form \begin{equation*} \hat w_{j,k} = O \Big(\sqrt{\frac{\log d}{n} \ \hat \pi_{j,k}} \Big), \quad \text{ with } \quad \hat \pi_{j,k} = \frac{\big|\big\{ i=1, \ldots, n: x_{i,j} \in \cup_{k'=k}^{d_j} I_{j, k'} \big\}\big|}{n} \end{equation*} for all $k \in \{2, \ldots, d_j\}$, see Theorem~\ref{thm:oracle} for a precise definition of $\hat w_{j, k}$. Note that $\hat \pi_{j,k}$ corresponds to the proportion of ones in the sub-matrix obtained by deleting the first $k$ columns in the $j$-th binarized block matrix $\boldsymbol{X}^{{B}}_{\bulletlet,j}.$ In particular, we have $\hat \pi_{j,k} > 0$ for all $j, k$. We consider the risk measure defined by \begin{equation*}\label{eqn:risk} R(m_{\theta}) =\frac{1}{n} \sum_{i=1}^n \big\{- b'(m^0(x_i)) m_{\theta}(x_i) + b(m_{\theta}(x_i))\big\}, \end{equation*} which is standard with generalized linear models~\cite{vandegeer2008}. \subsection{A general oracle inequality} We aim at evaluating how ``close'' to the minimal possible expected risk our estimated function $m_{\hat{\theta}}$ with $\hat \theta$ given by~\eqref{model:general} is. To measure this closeness, we establish a non-asymptotic oracle inequality with a fast rate of convergence considering the excess risk of $m_{\hat\theta}$, namely $R(m_{\hat\theta}) - R(m^0)$. To derive this inequality, we consider for technical reasons the following problem instead of~\eqref{model:general_0}: \begin{equation} \label{model:general} \hat \theta \in \argmin_{\theta \in B_d(\rho)} \big\{R_n(\theta) + \bina(\theta) \big\}, \end{equation} where \begin{equation*} B_d(\rho) = \Big \{\theta \in {\mathbb{R}}^d: \sum_{j=1}^p\norm{\theta_{j , \bulletlet}}_\infty \leq \rho \Big\}. \end{equation*} This constraint is standard in literature for the proof of oracle inequalities for sparse generalized linear models, see for instance~\cite{vandegeer2008}, and is discussed in details below. We also impose a restricted eigenvalue assumption on $\boldsymbol{X}^{{B}}.$ For all $\theta \in {\mathbb{R}}^d,$ let $J(\theta)= [J_1 (\theta),\ldots, J_p (\theta)]$ be the concatenation of the support sets relative to the total-variation penalization, that is \begin{equation*} J_j(\theta)= \{k = 2, \ldots, d_j \; : \; \theta_{j,k} \neq \theta_{j,k-1} \}. \end{equation*} Similarly, we denote $J^\complement(\theta)= \big[J_1^\complement(\theta),\ldots,J_p^\complement(\theta)\big]$ the complementary of $J(\theta).$ The restricted eigenvalue condition is defined as follow. \begin{assumption} \label{assumption:RE-XB} Let $K =[K_1, \ldots, K_p]$ be a concatenation of index sets such that \begin{equation} \label{eq:max_K_Jstar} \sum_{j=1}^p |K_j| \leq J^\star, \end{equation} where $J^\star$ is a positive integer. Define \begin{equation*} \normalfont \kappa (K) \in \inf\limits_{\substack{u \in \mathscr{C}_{\TV,\hat w}(K)}\backslash\{\mathbf{0}\}}\Bigg\{\frac{\norm{\boldsymbol{X}^{{B}} u}_2}{\sqrt{n} \norm{u_K}_2} \Bigg\} \end{equation*} with \begin{equation} \normalfont \mathscr{C}_{ \TV, \hat w}(K) \stackrel{}{=} \bigg\{u \in {\mathbb{R}}^d: \sum_{j=1}^p\norm{(u_{j, \bulletlet})_{{K_j}^\complement}}_{ \TV, \hat w_{j,\bulletlet}} \leq 2\sum_{j=1}^p \norm{(u_{j, \bulletlet})_{K_j}}_{ \TV, \hat w_{j,\bulletlet}} \bigg\}. \label{C-AGG} \end{equation} We assume that the following condition holds \begin{equation} \label{eq:kappa-RE-XB} \normalfont \kappa (K) > 0 \end{equation} for any $K$ satisfying~\eqref{eq:max_K_Jstar}. \end{assumption} The set $\mathscr{C}_{\TV, \hat w}(K)$ is a cone composed by all vectors with a support ``close'' to $K$. Theorem~\ref{thm:oracle} gives a risk bound for the estimator $m_{\hat\theta}$. \begin{theorem} \label{thm:oracle} Let Assumptions~\ref{ass:glm} and~\ref{assumption:RE-XB} be satisfied. Fix $A >0$ and choose \begin{equation} \label{choice-of-weights-sq-slow-GLM} \hat w_{j,k} = \sqrt{\frac{2U_n\phi(A +\log d)}{n}\,\hat \pi_{j,k}}. \end{equation} Then, with probability at least $1 -2e^{-A}$, any $\hat \theta$ given by~\eqref{model:general} satisfies \begin{align*} \label{fast-oracle-thm} R(m_{\hat \theta}) - R(m^0) \leq \inf_{\theta} \Big\{ & 3 (R(m_{\theta}) - R(m^0)) \\ & \quad + \frac{2560 (C_b(C_n + \rho) + 2)}{L_n \kappa^2(J(\theta))} \; |J(\theta)| \; \max_{j=1, \ldots, p} \norm{(\hat w_{j,\bulletlet})_{J_j(\theta)}}_\infty^2 \Big\}, \end{align*} where the infimum is over the set of vectors $\theta \in B_d(\rho)$ such that $n_j^\top \theta_{j, \bulletlet} = 0$ for all $j=1, \ldots, p$ and such that $|J(\theta)| \leq J^*$. \end{theorem} The proof of Theorem~\ref{thm:oracle} is given in Section~\ref{proof-fast-oracle-ineq-bina} below. Note that the ``variance'' term or ``complexity'' term in the oracle inequality satisfies \begin{equation} \label{complex-term-thm1} |J(\theta)| \max_{j=1, \ldots, p}\norm{(\hat w_{j,\bulletlet})_{J_j(\theta)}}_\infty^2 \leq 2 U_n\phi \frac{|J(\theta)|(A + \log d)}{n}. \end{equation} The value $|J(\theta)|$ characterizes the sparsity of the vector $\theta$, given by \begin{equation*} |J(\theta)| = \sum_{j=1}^p |J_j(\theta)| = \sum_{j=1}^p | \{k = 1, \ldots, d_j : \theta_{j,k} \neq \theta_{j,k-1} \}|. \end{equation*} It counts the number of non-equal consecutive values of $\theta$. If $\theta$ is block-sparse, namely whenever $|\mathcal{J}(\theta)| \ll p$ where $\mathcal{J}(\theta) = \{ j = 1, \ldots, p : \theta_{j, \bullet} \neq 0_{d_j} \}$ (meaning that few raw features are useful for prediction), then $|J(\theta)| \leq |\mathcal{J}(\theta)| \max_{j \in \mathcal{J}(\theta)} |J_j(\theta)|$, which means that $|J(\theta)|$ is controlled by the block sparsity $|\mathcal{J}(\theta)|$. The oracle inequality from Theorem~\ref{thm:oracle} is stated uniformly for vectors $\theta \in B_d(\rho)$ satisfying $n_j^\top \theta_{j, \bulletlet} = 0$ for all $j=1, \ldots, p$ and $|J(\theta)| \leq J^*$. Writing this oracle inequality under the assumption $|J(\theta)| \leq J^*$ meets the standard way of stating sparse oracle inequalities, see e.g.~\cite{BuhVan-11}. Note that $J^*$ is introduced in Assumption~\ref{assumption:RE-XB} and corresponds to a maximal sparsity for which the matrix ${\boldsymbol X}^B$ satisfies the restricted eigenvalue assumption. Also, the oracle inequality stated in Theorem~\ref{thm:oracle} stands for vectors such that $n_j^\top \theta_{j, \bulletlet} = 0$, which is natural since the binarsity penalization imposes these extra linear constraints. The assumption that $\theta \in B_d(\rho)$ is a technical one, that allows to establish a connection, via the notion of self-concordance, see~\cite{bach2010selfconcordance}, between the empirical squared $\ell_2$-norm and the empirical Kullback divergence (see Lemma~\ref{lemma-connection-L2-KL} in Section~\ref{proof-fast-oracle-ineq-bina}). It corresponds to a technical constraint which is commonly used in literature for the proof of oracle inequalities for sparse generalized linear models, see for instance~\cite{vandegeer2008}, a recent contribution for the particular case of Poisson regression being~\cite{ivanoff2016adaptive}. Also, note that \begin{equation} \label{lemma:control_inner_ball} \max_{i=1,\ldots,n} | \inr{x_i^B, \theta} | \leq \sum_{j=1}^p \norm{\theta_{j , \bulletlet}}_\infty \leq |\mathcal{J}(\theta)| \times \norm{\theta}_\infty, \end{equation} where $\norm{\theta}_\infty = \max_{j=1, \ldots, p} \norm{\theta_{j, \bullet}}_\infty$. The first inequality in~\eqref{lemma:control_inner_ball} comes from the fact that the entries of $\boldsymbol{X}^{{B}}$ are in $\{0, 1\}$, and it entails that $ \max_{i=1,\ldots,n} | \inr{x_i^B, \theta} | \leq \rho$ whenever $\theta \in B_d(\rho)$. The second inequality in~\eqref{lemma:control_inner_ball} entails that $\rho$ can be upper bounded by $|\mathcal{J}(\theta)| \times \norm{\theta}_\infty$, and therefore the constraint $\theta \in B_d(\rho)$ becomes only a box constraint on $\theta$, which depends on the dimensionality of the features through $|\mathcal{J}(\theta)|$ only. The fact that the procedure depends on $\rho$, and that the oracle inequality stated in Theorem~\ref{thm:oracle} depends linearly on $\rho$ is commonly found in literature about sparse generalized linear models, see~\cite{vandegeer2008,bach2010selfconcordance,ivanoff2016adaptive}. However, the constraint $B_d(\rho)$ is a technicality which is not used in the numerical experiments provided in Section~\ref{section:btv-experiments} below. In the next Section, we exhibit a consequence of Theorem~\ref{thm:oracle}, whenever one considers the Gaussian case (least-squares loss) and where $m^0$ has a sparse additive structure defined below. This structure allows to control the bias term from Theorem~\ref{thm:oracle} and to exhibit a convergence rate. \subsection{Sparse linear additive regression} \label{sub:sparse_linear_additive_regression} Theorem~\ref{thm:oracle} allows to study a particular case, namely an additive model, see e.g.~\cite{hastie1990generalized,horowitz2006optimal} and in particular a sparse additive linear model, which is of particular interest in high-dimensional statistics, see~\cite{meier2009high,ravikumar2009sparse,BuhVan-11}. We prove in Theorem~\ref{thm:additive} below that our procedure matches the convergence rates previously known from literature. In this setting, we work under the following assumptions. \begin{assumption} \label{ass:additive-model} We assume to simplify that $x_i \in [0, 1]^d$ for all $i=1, \ldots, n$. We consider the Gaussian setting with the least-squares loss, namely $\ell(y, y') = \frac 12 (y - y')^2$, $b(y) = \frac 12 y^2$ and $\phi = \sigma^2$ (noise variance) in Equation~\eqref{distribut-glm}, with $L_n = U_n = 1$, $C_b = 0$ in Assumption~\ref{ass:glm}. Moreover, we assume that $m^0$ has the following sparse additive structure \begin{equation*} m^0(x) = \sum_{j \in \mathcal{J}_*} m_j^0(x_j) \end{equation*} for $x = [x_1 \cdots x_p] \in {\mathbb{R}}^p$, where $m_j^0 : {\mathbb{R}} \rightarrow {\mathbb{R}}$ are $L$-Lipschitz functions, namely satisfying $|m_j^0(z) - m_j^0(z')| \leq L |z - z'|$ for any $z, z' \in {\mathbb{R}}$, and where $\mathcal{J}_* \subset \{ 1, \ldots, p \}$ is a set of active features (sparsity means that $|\mathcal{J}_*| \ll p$). Also, we assume the following identifiability condition \begin{equation*} \sum_{i=1}^n m_j^0(x_{i, j}) = 0 \end{equation*} for all $j=1, \ldots, p$. \end{assumption} Assumption~\ref{ass:additive-model} contains identifiability and smoothness requirements that are standard when studying additive models, see e.g.~\cite{meier2009high}. We restrict the functions $m_j^0$ to be Lipschitz and not smoother, since our procedure produces a piecewise constant decision function with respect to each $j$, that can approximate optimally only Lipschitz functions. For more regular functions, our procedure would lead to suboptimal rates, see also the discussion below the statement of Theorem~\ref{thm:additive}. \begin{theorem} \label{thm:additive} Consider procedure~\eqref{model:general_0} with $d_j = D$, where $D$ is the integer part of $n^{1/3}$, and $I_{j, 1} = [0, \frac{1}{D}]$, $I_{j, k} = (\frac{k-1}{D}, \frac{k}{D}]$ for all $k=2, \ldots, D$ and $j=1, \ldots, p$, and keep the weights $\hat w_{j, k}$ the same as in Theorem~\ref{thm:oracle}. Introduce also $\theta_{j, k}^* = \sum_{i=1}^n m_j^0(x_{i, j}) \ind{I_k}(x_{i, j}) / \sum_{i=1}^n \ind{I_k}(x_{i, j})$ for $j \in \mathcal{J}_*$ and $\theta_{j, \bullet}^* = \boldsymbol 0_D$ for $j \notin \mathcal{J}_*$. Then, under Assumption~\ref{assumption:RE-XB} with $J^* = J(\theta^*)$ and Assumption~\ref{ass:additive-model}, we have \begin{equation*} \norm{m_{\hat \theta} - m^0}_n^2 \leq \Big(3 L^2 |\mathcal{J}_*| + \frac{5120 M_n \sigma^2 (A + \log(p n^{1/3} M_n)}{\kappa^2(J(\theta^*))} \Big) \frac{|\mathcal{J}_*|}{n^{2/3}}, \end{equation*} where $M_n = \max_{j=1, \ldots, p} \max_{i=1, \ldots, n} |m_j^0(x_{i, j})|.$ \end{theorem} The proof of Theorem~\ref{thm:additive} is given in Section~\ref{sub:proof_of_theorem_thm:additive} below. It is an easy consequence of Theorem~\ref{thm:oracle} under the sparse additive model assumption. It uses Assumption~\ref{assumption:RE-XB} with $J^* = J(\theta^*)$, since $\theta_{j, \bullet}^*$ is the minimizer of the bias for each $j \in \mathcal{J}_*$, see the proof of Theorem~\ref{thm:additive} for details. The rate of convergence is, up to constants and logarithmic terms, of order $|\mathcal{J}_*|^2 n^{-2/3}.$ Recalling that we work under a Lipschitz assumption, namely H\"older smoothness of order $1$, the scaling of this rate w.r.t. to $n$ is $n^{-2 r / (2 r + 1)}$ with $r=1$, which matches the one-dimensional minimax rate. This rate matches the one obtained in~\cite{BuhVan-11}, see Chapter~8 p.~272, where the rate $|\mathcal{J}_*|^2 n^{-2r / (2r + 1)} = |\mathcal{J}_*|^2 n^{-4 / 5}$ is derived under a $C^2$ smoothness assumption, namely $r = 2$. Hence, Theorem~\ref{thm:additive} shows that, in the particular case of a sparse additive model, our procedure matches in terms of convergence rate the state of the art. Further improvements could consider more general smoothness (beyond Lipschitz) and adaptation with respect to the regularity, at the cost of a more complicated procedure which is beyond the scope of this paper. \section{Numerical experiments} \label{section:btv-experiments} In this section, we first illustrate the fact that the binarsity penalization is roughly only two times slower than basic $\ell_1$-penalization, see the timings in Figure~\ref{fig:computing-times-simu}. We then compare binarsity to a large number of baselines, see Table~\ref{table:baselines}, using 9 classical binary classification datasets obtained from the UCI Machine Learning Repository~\cite{Lichman:2013}, see Table~\ref{table:datasets}. \begin{figure} \caption{\small Average computing time in second (with the black lines representing $\pm$ the standard deviation) obtained on 100 simulated datasets for training a logistic model with binarsity VS Lasso penalization, both trained on $\boldsymbol{X} \label{fig:computing-times-simu} \end{figure} \begin{table}[htp!] \centering \small \begin{tabular}{cccc} \toprule Name & Description & Reference \\ \midrule Lasso & Logistic regression (LR) with $\ell_1$ penalization & \cite{tibshirani1996regression}\\ Group L1 & LR with group $\ell_1$ penalization & \cite{meier2008group} \\ Group TV & LR with group total-variation penalization & \\ SVM & Support vector machine with radial basis kernel & \cite{scholkopf2002learning} \\ GAM & Generalized additive model & \cite{hastie1990generalized} \\ RF & Random forest classifier & \cite{breiman2001random} \\ GB & Gradient boosting & \cite{friedman2002stochastic} \\ \bottomrule \end{tabular} \caption{\small Baselines considered in our experiments. Note that Group L1 and Group TV are considered on binarized features.} \label{table:baselines} \end{table} \begin{table}[htb!] \centering \small \begin{tabular}{cccc} \toprule Dataset & \#Samples & \#Features & Reference \\ \midrule Ionosphere & 351 & 34 & \cite{sigillito1989classification}\\ Churn & 3333 & 21 & \cite{Lichman:2013} \\ Default of credit card & 30000 & 24 & \cite{yeh2009comparisons} \\ Adult & 32561 & 14 & \cite{kohavi1996scaling} \\ Bank marketing & 45211 & 17 & \cite{moro2014data} \\ Covertype & 550088 & 10 & \cite{blackard1999comparative} \\ SUSY & 5000000 & 18 & \cite{baldi2014searching} \\ HEPMASS & 10500000 & 28 & \cite{baldi2016parameterized} \\ HIGGS & 11000000 & 24 & \cite{baldi2014searching} \\ \bottomrule \end{tabular} \caption{\small Basic informations about the 9 considered datasets.} \label{table:datasets} \end{table} For each method, we randomly split all datasets into a training and a test set (30\% for testing), and all hyper-parameters are tuned on the training set using $V$-fold cross-validation with $V = 10$. For support vector machine with radial basis kernel (SVM), random forests (RF) and gradient boosting (GB), we use the reference implementations from the \texttt{scikit-learn} library~\cite{scikit-learn}, and we use the \texttt{LogisticGAM} procedure from the~\texttt{pygam} library\footnote{\url{https://github.com/dswah/pyGAM}} for the GAM baseline. The binarsity penalization is proposed in the~\texttt{tick} library~\cite{2017arXiv170703003B}, we provide sample code for its use in Figure~\ref{fig:code}. Logistic regression with no penalization or ridge penalization gave similar or lower scores for all considered datasets, and are therefore not reported in our experiments. \begin{figure} \caption{\small Sample python code for the use of binarsity with logistic regression in the \texttt{tick} \label{fig:code} \end{figure} The binarsity penalization does not require a careful tuning of $d_j$ (number of bins for the one-hot encoding of raw feature $j$). Indeed, past a large enough value, increasing $d_j$ even further barely changes the results since the cut-points selected by the penalization do not change anymore. This is illustrated in Figure~\ref{fig:discretization-impact}, where we observe that past $50$ bins, increasing $d_j$ even further does not affect the performance, and only leads to an increase of the training time. In all our experiments, we therefore fix $d_j=50$ for $j = 1, \ldots, p$. \begin{figure*} \caption{\small Impact of the number of bins used in each block ($d_j$) on the classification performance (measured by AUC) and on the training time using the ``Adult'' and ``Default of credit card'' datasets. All $d_j$ are equal for $j = 1, \ldots, p$, and we consider in all cases the best hyper-parameters selected after cross validation. We observe that past $d_j=50$ bins, performance is roughly constant, while training time strongly increases.} \label{fig:discretization-impact} \end{figure*} The results of all our experiments are reported in Figures~\ref{fig:roc-curves} and~\ref{fig:computing-time-comparison}. In Figure~\ref{fig:roc-curves} we compare the performance of binarsity with the baselines on all 9 datasets, using ROC curves and the Area Under the Curve (AUC), while we report computing (training) timings in Figure~\ref{fig:computing-time-comparison}. We observe that binarsity consistently outperforms Lasso, as well as Group L1: this highlights the importance of the TV norm within each group. The AUC of Group TV is always slightly below the one of binarsity, and more importantly it involves a much larger training time: convergence is slower for Group TV, since it does not use the linear constraint of binarsity, leading to a ill-conditioned problem (sum of binary features equals 1 in each block). Finally, binarsity outperforms also GAM and its performance is comparable in all considered examples to RF and GB, with computational timings that are orders of magnitude faster, see Figure~\ref{fig:computing-time-comparison}. All these experiments illustrate that binarsity achieves an extremely competitive compromise between computational time and performance, compared to all considered baselines. \begin{figure*} \caption{\small Performance comparison using ROC curves and AUC scores (given between parenthesis) computed on test sets. The 4 last datasets contain too many examples for SVM (RBF kernel). Binarsity consistently does a better job than Lasso, Group L1, Group TV and GAM. Its performance is comparable to SVM, RF and GB but with computational timings that are orders of magnitude faster, see Figure~\ref{fig:computing-time-comparison} \label{fig:roc-curves} \end{figure*} \section{Conclusion} \label{section:discussion} In this paper, we introduced the binarsity penalization for one-hot encodings of continuous features. We illustrated the good statistical properties of binarsity for generalized linear models by proving non-asymptotic oracle inequalities. We conducted extensive comparisons of binarsity with state-of-the-art algorithms for binary classification on several standard datasets. Experimental results illustrate that binarsity significantly outperforms Lasso, Group L1 and Group TV penalizations and also generalized additive models, while being competitive with random forests and boosting. Moreover, it can be trained orders of magnitude faster than boosting and other ensemble methods. Even more importantly, it provides interpretability. Indeed, in addition to the raw feature selection ability of binarsity, the method pinpoints significant cut-points for all continuous feature. This leads to a much more precise and deeper understanding of the model than the one provided by Lasso on raw features. These results illustrate the fact that binarsity achieves an extremely competitive compromise between computational time and performance, compared to all considered baselines. \begin{figure*} \caption{\small Computing time comparisons (in seconds) between the methods on the considered datasets. Note that the time values are $\log$-scaled. These timings concern the learning task for each model with the best hyper parameters selected, after the cross validation procedure. The 4 last datasets contain too many examples for the SVM with RBF kernel to be trained in a reasonable time. Roughly, binarsity is between 2 and 5 times slower than $\ell_1$ penalization on the considered datasets, but is more than 100 times faster than random forests or gradient boosting algorithms on large datasets, such as HIGGS.} \label{fig:computing-time-comparison} \end{figure*} \section{Proofs} In this Section we gather the proofs of all the theoretical results proposed in the paper. Throughout this Section, we denote by $\partial(\phi)$ the subdifferential mapping of a convex function $\phi.$ \subsection{Proof of Proposition~\ref{proposition:prox-btv-primal}} \label{appendix:proof-of-proposition:prox-btv-primal} Recall that the indicator function $\delta_j$ is given by~\eqref{eq:def_delta_j}. For any fixed $j=1, \ldots, p,$ we prove that $\prox_{\norm{\cdot}_{\TV,\hat w_{j,\bulletlet}} + \delta_j}$ is the composition of $\prox_{\norm{\cdot}_{\TV,\hat w_{j,\bulletlet}}}$ and $\prox_{\delta_j},$ namely \begin{equation*} \prox_{\norm{\cdot}_{\TV,\hat w_{j,\bulletlet}} + \delta_j}(\theta_{j,\bulletlet}) = \prox_{\delta_j}\big(\prox_{\norm{\cdot}_{\TV,\hat w_{j,\bulletlet}}}(\theta_{j,\bulletlet})\big) \end{equation*} for all $\theta_{j,\bulletlet} \in {\mathbb{R}}^{d_j}$. Using Theorem~1 in~\cite{Yu-13}, it is sufficient to show that for all $\theta_{j,\bulletlet} \in {\mathbb{R}}^{d_j},$ we have \begin{equation} \label{sub-diff-inclusion} \partial \big(\norm{\theta_{j,\bulletlet}}_{\TV,\hat w_{j,\bulletlet}}\big) \subseteq \partial \big(\norm{\prox_{\delta_j}(\theta_{j,\bulletlet})}_{\TV, \hat w_{j,\bulletlet}}\big). \end{equation} We have $\prox_{\delta_j} (\theta_{j,\bulletlet}) = \mathds{P}i_{\text{span}\{n_j\}^\perp}(\theta_{j,\bulletlet}),$ where $\mathds{P}i_{\text{span}\{n_j\}^\perp}(\cdot)$ stands for the projection onto the orthogonal of $\text{span}\{n_j\}$. This projection simply writes \begin{equation*} \mathds{P}i_{\text{span}\{n_j\}^\perp}(\theta_{j,\bulletlet}) = \theta_{j, \bullet} - \frac{n_j^\top \theta_{j, \bulletlet}}{\norm{n_j}_2^2} n_j \end{equation*} Now, let us define the $d_j \times d_j$ matrix $D_j$ by \begin{equation} \label{eq:D_j-matrix-definition} {{D}_{j}}= \begin{bmatrix} 1 & 0 & & 0 \\ -1 & 1 & \\ & \ddots& \ddots \\ 0 & & -1& 1 \end{bmatrix} \in {\mathbb{R}}^{d_j}\times {\mathbb{R}}^{d_j}. \end{equation} We then remark that for all $\theta_{j,\bulletlet} \in {\mathbb{R}}^{d_j}$, \begin{equation} \label{eq:weighted-TV-hadamard-expr} \norm{\theta_{j,\bulletlet}}_{\TV, \hat w_{j,\bulletlet}} = \sum_{k=2}^{d_j} \hat w_{j,k} |\theta_{j,k} - \theta_{j, k-1}| = \norm{\hat w_{j,\bulletlet} \odot D_j \theta_{j,\bulletlet}}_{1}. \end{equation} Using subdifferential calculus (see details in the proof of Proposition~\ref{proposition: KKT-conditions} below), one has \begin{equation*} \partial \big(\norm{\theta_{j,\bulletlet}}_{\TV, \hat w_{j,\bulletlet}}\big) = \partial \big(\norm{\hat w_{j,\bulletlet}\odot{{D_{j}}}\theta_{j,\bulletlet}}_{1}\big) = {{D_{j}}}^\top \hat w_{j,\bulletlet}\odot \sgn({{D_j}}\theta_{j,\bulletlet}). \end{equation*} Then, the linear constraint $n_j^\top \theta_{j, \bullet} = 0$ entails \begin{equation*} {{D_j}}^\top \hat w_{j,\bulletlet}\odot \sgn({{D_j}} \theta_{j,\bulletlet}) = {{D_j}}^\top \hat w_{j,\bulletlet}\odot \sgn \Big( {{D_j}} \big( \theta_{j,\bulletlet} - \frac{n_j^\top \theta_{j, \bulletlet}}{\norm{n_j}_2^2} n_j \big) \Big), \end{equation*} which leads to~\eqref{sub-diff-inclusion} and concludes the proof of the Proposition. $ \square$ \subsection{Proximal operator of the weighted TV penalization} \label{appendix:proximal-operator-wTV} We recall in Algorithm~\ref{algorithm-weighted-TV-agg} an algorithm provided in~\cite{alaya2014} for the computation of the proximal operator of the weighted total-variation penalization \begin{equation} \label{primal-prox-wTV} \beta =\prox_{\norm{\cdot}_{\TV,\hat w}}(\theta) \in \argmin_{\theta\in {\mathbb{R}}^m} \Big\{ \frac{1}{2} \norm{\beta - \theta}_2^2 + \norm{\theta}_{\TV, \hat w} \Big\}. \end{equation} A quick explanation of this algorithm is as follows. The algorithm runs forwardly through the input vector $(\theta_1, \ldots, \theta_m).$ Using Karush-Kuhn-Tucker (KKT) optimality conditions~\cite{BoyVan-04}, we have that at a location $k,$ the weight $\beta_k$ stays constant whenever $|u_k| < \hat w_{k+1}$, where $u_k$ is a solution to a dual problem associated to the primal problem~\eqref{primal-prox-wTV}. If not possible, it goes back to the last location where a jump can be introduced in $\beta$, validates the current segment until this location, starts a new segment, and continues. \LinesNotNumbered \begin{algorithm}[htbp] \SetNlSty{textbf}{}{.} \DontPrintSemicolon \caption{\small Proximal operator of weighted TV penalization} \label{algorithm-weighted-TV-agg} \KwIn{vector $\theta=\big(\theta_1, \ldots, \theta_m\big) ^\top\in {\mathbb{R}}^m$ and weights $\hat w = (\hat w_1,\ldots, \hat w_m) \in {\mathbb{R}}^{m}_{+}.$} \KwOut{vector $\beta = \prox_{\norm{\cdot}_{\TV,\hat w}}(\theta)$} \nl\textbf{Set} {$k=k_0=k_-=k_+ \gets 1$\\$\qquad \beta_{\min} \gets \theta_1- \hat w_2\ ;\ \beta_{\max} \gets \theta_1+ \hat w_2$\\$\qquad u_{\min}\gets \hat w_2\ ;\ u_{\max} \gets -\hat w_2$}\\ \nl \label{step2}{\If{$k=m$} { $\beta_m \gets \beta_{\min}+ u_{\min}$}} \nl \label{step3} \If (\tcc*[f]{negative jump}){$\theta_{k+1} + u_{\min} < \beta_{\min} - \hat w_{k+2}$} {$\beta_{k_0}= \cdots =\beta_{k_-} \gets \beta_{\min}$\\ $k=k_0=k_-=k_+\gets k_- + 1$\\ $ \beta_{\min} \gets \theta_{k} - \hat w_{k +1}+ \hat w_{k }\ ;\ \beta_{\max} \gets \theta_{k} + \hat w_{k +1}+ \hat w_{k}$\\ $u_{\min} \gets \hat w_{k +1}\ ;\ u_{\max} \gets -\hat w_{k+1}$ } \nl \mathds{E}lseIf (\tcc*[f]{positive jump}){$\theta_{k+1} + u_{\max} > \beta_{\max}+ \hat w_{k+2}$} {$\beta_{k_0}= \ldots =\beta_{k_+} \gets \beta_{\max}$\\ $k=k_0=k_-=k_+\gets k_+ + 1$\\ $\beta_{\min} \gets \theta_{k} - \hat w_{k+1} - \hat w_{k }\ ;\ \beta_{\max}\gets \theta_{k} +\hat w_{k+1}- \hat w_{k}$\\ $ u_{\min} \gets \hat w_{k+1}\ ;\ u_{\max} \gets -\hat w_{k+1}$ } \nl \mathds{E}lse (\tcc*[f]{no jump}){ \textbf{set }$k \gets k+1$\\ $\qquad u_{\min} \gets \theta_{k} + \hat w_{k+1} - \beta_{\min}$\\ $\qquad u_{\max} \gets \theta_{k} - \hat w_{k+1} - \beta_{\max}$ \If{$u_{\min} \geq \hat w_{k+1}$}{ $\beta_{\min}\gets \beta_{\min}+ \frac{u_{\min} - \hat w_{k+1}}{k-k_0+1}$\\ $u_{\min} \gets \hat w_{k+1}$\\ $k_- \gets k$} \If{$u_{\max} \leq -\hat w_{k+1}$}{$\beta_{\max}\gets \beta_{\max}+ \frac{u_{\max} + \hat w_{k+1}}{k-k_0+1}$\\ $u_{\max} \gets -\hat w_{k+1}$\\ $k_+ \gets k$} } \nl \If{$ k< m$ } { go to \textbf{\ref{step3}.}} \nl \If{$u_{\min} < 0$ } {$\beta_{k_0}= \cdots =\beta_{k_-} \gets \beta_{\min}$\\ $k=k_0=k_- \gets k_-+1$\\ $\beta_{\min}\gets \theta_k - \hat w_{k+1}+ \hat w_{k}$\\ $u_{\min}\gets \hat w_{k+1}\ ;\ u_{\max} \gets \theta_k+ \hat w_{k} - v_{\max}$ \\go to \textbf{\ref{step2}.}} \nl \mathds{E}lseIf{$u_{\max} > 0$ } { $ \beta_{k_0}= \cdots =\beta_{k_+}\gets \beta_{\max}$\\ $k=k_0=k_+ \gets k_++1$\\ $\beta_{\max} \gets \theta_k +\hat w_{k+1} - \hat w_{k}$\\ $u_{\max}\gets - \hat w_{k+1}\ ;\ u_{\min}\gets \theta_k- \hat w_{k} - u_{\min}$ \\ go to \textbf{\ref{step2}.}} \nl \mathds{E}lse{$\beta_{k_0}= \cdots =\beta_m\gets \beta_{\min} + \frac{u_{\min}}{k-k_0+1}$} \end{algorithm} \subsection{Proof of Theorem~\ref{thm:oracle}} \label{proof-fast-oracle-ineq-bina} The proof relies on several technical properties that are described below. From now on, we consider ${\boldsymbol{y}} = [y_1 \cdots y_n]^\top$, ${\boldsymbol X} = [x_1 \cdots x_n]^\top$, $m^0({\boldsymbol X}) = [m^0(x_1) \cdots m^0(x_n)]^\top,$ and recalling that $m_\theta(x_i) = \theta^\top x^B_i)$ we introduce $m_\theta({\boldsymbol X}) = [m_\theta(x_1) \cdots m_\theta(x_n) ]^\top$ and $b'(m_\theta({\boldsymbol X})) = [b'(m_\theta(x_1)) \cdots b'(m_\theta(x_n)) ]^\top.$ Let us now define the Kullback-Leibler divergence between the true probability density funtion $f^0$ defined in~\eqref{distribut-glm} and a candidate $f_\theta$ within the generalized linear model $f_\theta(y|x) = \exp\big(ym_\theta(x) - b(m_\theta(x))$ as follows \begin{align*} \mathrm{KL}_n(f^0, f_\theta) &= \frac{1}{n} \sum_{i=1}^n \mathds{E}_{\mathds{P}_{\mathbf y|X}}\Big[\log\frac{f^0(y_i|x_i)}{f_{\theta}(y_i | x_i)}\Big]\\ &:= \mathrm{KL}_n(m^0({\boldsymbol X}), m_\theta ({\boldsymbol X})), \end{align*} where $\mathds{P}_{\mathbf y|X}$ is the joint distribution of ${\boldsymbol{y}}$ given ${\boldsymbol X}$. We then have the following Lemma. \begin{lemma} \label{lemma-excess-risk-KL} The excess risk satisfies \begin{equation*} R(m_{\theta}) - R(m^0) = \phi \mathrm{KL}_n(m^0({\boldsymbol X}), m_\theta ({\boldsymbol X})), \end{equation*} where we recall that $\phi$ is the dispertion parameter of the generalized linear model, see~\eqref{distribut-glm}. \end{lemma} \noindent \textbf{Proof.} If follows from the following simple computation \begin{align*} &\mathrm{KL}_n(m^0({\boldsymbol X}), m_\theta ({\boldsymbol X}))\\ &\qquad= \phi^{-1} \frac 1n \sum_{i=1}^n \mathds{E}_{\mathds{P}_{\mathbf y|X}} \Big[\big( -y_i m_{\theta}(x_i) +b(m_{\theta}(x_i))\big) - \big(-y_i m^0(x_i) +b(m^0(x_i))\big) \Big]\\ &\qquad = \phi^{-1} \big(R(m_{\theta}) - R(m^0)\big) \end{align*} which proves the Lemma. $ \square$ \subsection{Optimality conditions} As explained in the following Proposition, a solution to problem~\eqref{model:general} can be characterized using the Karush-Kuhn-Tucker (KKT) optimality conditions~\cite{BoyVan-04}. \begin{proposition} \label{proposition: KKT-conditions} A vector $\hat \theta = [{\hat\theta}_{1,\bulletlet}^\top \cdots {\hat\theta}_{p,\bulletlet}^\top]^\top \in {\mathbb{R}}^d$ is an optimum of the objective function~\eqref{model:general} if and only if there are subgradients $\hat h = [\hat h _{j, \bulletlet}]_{j=1, \ldots, p} \in \partial \norm{\hat \theta}_{ \TV, \hat w}$ and $\hat g = [\hat g _{j, \bulletlet}]_{j=1, \ldots, p} \in \partial [\delta_j(\hat \theta_{j,\bulletlet})]_{j=1, \ldots, p}$ such that \begin{equation*} \nabla R_n(\hat \theta_{j,\bulletlet})+{\hat h}_{j,\bulletlet} + {\hat g}_{j,\bulletlet} = \mathbf{0}, \end{equation*} where \begin{equation} \label{subdifferential-of-TVw} \left\{ \begin{array}{ll} {\hat h}_{j,\bulletlet} = D_{{j}}^\top \big(\hat w_{j,\bulletlet}\odot\sgn(D_{{j}}{\hat\theta}_{j,\bulletlet})\big) & \mbox{if } j \in J(\hat \theta),\\ {\hat h}_{j,\bulletlet} \in D_{{j}}^\top \big( \hat w_{j,\bulletlet}\odot {[-1,+1]}^{d_j}\big) & \mbox{if } j \in J^\complement(\hat \theta), \end{array} \right. \end{equation} and where we recall that $J(\hat \theta)$ is the support set of $\hat \theta$. The subgradient $\hat g_{j, \bulletlet}$ belongs to \begin{equation*} \partial\big(\delta_j(\hat \theta_{j,\bulletlet})\big) = \big\{\mu_{j, \bulletlet}\in {\mathbb{R}}^{d_j}: \mu_{j, \bulletlet}^\top \theta_{j, \bulletlet} \leq \mu_{j, \bulletlet}^\top \hat \theta_{j,\bulletlet} \; \text{ for all } \; \theta_{j, \bulletlet} \; \text{ such that } \; n_j^\top \theta_{j,\bulletlet} = 0 \big\}. \end{equation*} For the generalized linear model, we have \begin{equation} \label{KKT-logistic} \normalfont \frac 1n \big( \boldsymbol{X}^{{B}}_{\bulletlet,j}\big)^\top\big(b'(m_{\hat\theta}({\boldsymbol X}))- {\boldsymbol{y}}\big) + {\hat h}_{j,\bulletlet} + \hat g_{j,\bulletlet} + \hat f_{j,\bulletlet}= \mathbf{0}, \end{equation} where $\hat f = [{\hat f }_{j,\bulletlet}]_{j=1, \ldots, p}$ belongs to the normal cone of the ball $B_d(\rho).$ \end{proposition} \noindent \textbf{Proof.} The function $\theta \mapsto R_n(\theta)$ is differentiable, so the subdifferential of $R_n(\cdot) + \bina(\cdot)$ at a point $\theta = (\theta_{j,\bulletlet})_{j=1, \ldots, p} \in {\mathbb{R}}^d$ is given by \begin{equation*} \partial (R_n(\theta) + \bina(\theta)) = \nabla R_n(\theta) + \partial(\bina(\theta)), \end{equation*} where $\nabla R_n(\theta) = \Big[\frac{\partial R_n(\theta)}{\partial \theta_{1,\bulletlet}} \cdots \frac{\partial R_n(\theta)}{\partial \theta_{p,\bulletlet}}\Big]^\top$ and \begin{equation*} \partial \bina(\theta) = \Big[ \partial \norm{\theta_{1,\bulletlet}}_{ \TV, \hat w_{1,\bulletlet}} + \partial \delta_j(\theta_{1,\bulletlet}) \; \cdots \; \partial \norm{\theta_{p,\bulletlet}}_{ \TV, \hat w_{p,\bulletlet}} + \partial \delta_j(\theta_{p,\bulletlet}) \Big]^\top. \end{equation*} We have $\norm{\theta_{j,\bulletlet}}_{ \TV, \hat w_{j,\bulletlet}} = \norm{ \hat w_{j,\bulletlet} \odot D_j\theta_{j,\bulletlet}}_1$ for all $j =1, \ldots,p$. Then, by applying some properties of the subdifferential calculus, we get \begin{equation} \label{subdifferential-btv} \partial \norm{\theta_{j,\bulletlet}}_{ \TV, \hat w_{j,\bulletlet}} = \begin{cases} D_j^\top \sgn(\hat w_{j,\bulletlet} \odot D_j\theta_{j,\bulletlet}) & \text{ if } D_j\theta \neq \mathbf{0}, \\ D_j^\top\big(\hat w_{j,\bulletlet}\odot v_j) & \text{ otherwise}, \end{cases} \end{equation} where $v_j \in [-1,+1]^{d_j}$ for all $j=1, \ldots, p$. For generalized linear models, we rewrite \begin{equation} \label{glm-model-in-proof} \hat \theta \in \argmin_{\theta \in {\mathbb{R}}^d } \big\{R_n(\theta) + \bina(\theta) + \delta_{B_d(\rho)}(\theta)\big\}, \end{equation} where $\delta_{B_d(\rho)}$ is the indicator function of $B_d(\rho)$. Now, $\hat \theta = [{\hat\theta}_{1,\bulletlet}^\top \cdots {\hat\theta}_{p,\bulletlet}^\top]^\top$ is an optimum of~\eqref{glm-model-in-proof} if and only if $\mathbf{0} \in \nabla R_n(m_{\hat \theta}) + \partial \norm{\hat \theta}_{\TV, \hat w} + \partial \delta_{B_d(\rho)}(\hat \theta_{})$. Recall that the subdifferential of $\delta_{B_d(\rho)}(\cdot)$ is the normal cone of $B_d(\rho)$, namely \begin{equation} \label{normal-cone} \partial \delta_{B_d(\rho)}(\hat \theta) = \big\{\eta \in {\mathbb{R}}^d : \eta^\top \theta \leq \eta^\top \hat \theta \text{ for all } \theta \in B_d(\rho) \}. \end{equation} One has \begin{equation} \label{nabla-logistic} \frac{\partial R_n(\theta)}{\partial \theta_{j,\bulletlet}} = \frac{1}{n} (\boldsymbol{X}^{{B}}_{\bulletlet,j})^\top(b'(m_{\hat\theta}({\boldsymbol X})) - {\boldsymbol{y}}), \end{equation} so that together with~\eqref{nabla-logistic} and~\eqref{normal-cone} we obtain~\eqref{KKT-logistic}, which concludes the proof of Proposition~\ref{proposition: KKT-conditions}. $ \square$ \subsection{Compatibility conditions} \label{sub:compatibility_conditions} Let us define the block diagonal matrix ${{\bf{D}}} = \diag({D}_{{1}}, \ldots, {D}_{{p}})$ with $D_j$ defined in~\eqref{eq:D_j-matrix-definition}. We denote its inverse ${{T}_{j}}$ which is defined by the $d_j \times d_j$ lower triangular matrix with entries $({{T}_{j}})_{r,s} = 0$ if $r < s$ and $({{T}_{j}})_{r,s} = 1$ otherwise. We set $ {{\bf{T}}} =\diag({T}_{{1}}, \ldots, {T}_{{p}})$, so that one has ${\bf{D}} ^{-1} = {\bf{T}}$. In order to prove Theorem~\ref{thm:oracle}, we need the following results which give a compatibility property~\cite{vandegeer2008,vandegeer2013,DalHeiLeb14} for the matrix ${\bf{T}}$, see Lemma~\ref{lemma-compatibility-Tw} below and for the matrix $\boldsymbol{X}^{{B}}{\bf{T}}$, see Lemma~\ref{lemma:CC-XBTw} below. For any concatenation of subsets $ K=[K_1, \ldots, K_p],$ we set \begin{equation} \label{defn:concatenation} K_j = \{\tau_j^1, \ldots, \tau_j^{b_j}\} \subset \{1, \ldots, d_j\} \end{equation} for all $j=1, \ldots, p$ with the convention that $\tau_j^0 = 0$ and $\tau_j^{{b_j} +1} = d_j +1$. \begin{lemma} \label{lemma-compatibility-Tw} Let $ \gamma\in {\mathbb{R}}^d_+$ be given and $K = [K_1, \ldots, K_p]$ with $K_j$ given by~\eqref{defn:concatenation} for all $j=1, \ldots, p$. Then, for every $u \in {\mathbb{R}}^d \backslash \{\mathbf{0}\}$, we have \begin{equation*} \frac{\norm{{\bf{T}} u}_2}{|\norm{u_K\odot \gamma_K}_1 - \norm{u_{K^\complement} \odot \gamma_{K^\complement}}_1|} \geq \kappa_{{\bf{T}},\gamma}(K), \end{equation*} where \begin{equation*} \kappa_{{\bf{T}},\gamma}(K) = \bigg\{ 32 \sum_{j=1}^p\sum_{k=1}^{d_j} | \gamma_{j,k+1} -\gamma_{j,k}|^2+ 2|K_j|\norm{\gamma_{j,\bulletlet}}_\infty^2\Delta_{\min, K_j}^{-1}\bigg\}^{-1/2}, \end{equation*} and $\Delta_{\min, K_j} = \min_{r=1, \ldots b^j}| \tau_j^{{r_j}} - \tau_j^{{r_j} -1 }|.$ \end{lemma} \noindent \textbf{Proof.} Using Proposition~3 in~\cite{DalHeiLeb14}, we have \begin{equation*} \begin{split} &\norm{u_K\odot \gamma_K}_1 - \norm{u_{K^\complement} \odot \gamma_{K^\complement}}_1\\ &\qquad \qquad = \sum_{j=1}^p\norm{u_{K_j}\odot \gamma_{K_j}}_1 - \norm{u_{{K_j}^\complement} \odot \gamma_{{K_j}^\complement}}_1 \\ &\qquad \qquad \leq \sum_{j=1}^p 4\norm{T_{{j}} u_{j,\bulletlet}}_2 \bigg\{2\sum_{k=1}^{d_j} | \gamma_{j,k+1} -\gamma_{j,k}|^2 + 2(b_j +1) \norm{\gamma_{j,\bulletlet}}_\infty^2\Delta_{\min, K_j}^{-1}\bigg\}^{1/2}. \end{split} \end{equation*} Using H\"older's inequality for the right hand side of the last inequality gives \begin{equation*} \begin{split} &\norm{u_K\odot \gamma_K}_1 - \norm{u_{K^\complement} \odot \gamma_{K^\complement}}_1\\ &\qquad \qquad \leq \norm{{\bf{T}} u}_2 \bigg\{ 32\sum_{j=1}^p \sum_{k=1}^{d_j} | \gamma_{j,k+1} -\gamma_{j,k} |^2 + 2|K_j| \norm{\gamma_{j,\bulletlet}}_\infty^2\Delta_{\min, K_j}^{-1}\bigg\}^{1/2}, \end{split} \end{equation*} which completes the proof of the Lemma. $ \square$ Combining Assumption~\ref{assumption:RE-XB} and Lemma~\ref{lemma-compatibility-Tw} allows to establish a compatibility condition satisfied by $\boldsymbol{X}^{{B}} {\bf{T}}$. \begin{lemma} \label{lemma:CC-XBTw} Let $\gamma \in {\mathbb{R}}^d_+$ be given and $K = [K_1, \ldots, K_p]$ with $K_j$ given by~\eqref{defn:concatenation} for $j=1, \ldots, p$. Then, if Assumption~\ref{assumption:RE-XB} holds, one has \begin{equation} \label{ineq:compatibility-XT} \inf\limits_{\substack{u \in \mathscr{C}_{1,\hat w}(K)\backslash\{\mathbf{0}\}}}\Big\{\frac{\norm{\boldsymbol{X}^{{B}} {\bf{T}} u}_2}{\sqrt{n} \;|\; \norm{u_K\odot \gamma_K}_1 - \norm{u_{K^\complement} \odot \gamma_{K^\complement}}_1 |} \Big\}\geq \kappa_{{\bf{T}},\gamma}(K)\kappa(K), \end{equation} where \begin{equation} \label{C-1} \mathscr{C}_{1, \hat w}(K) \stackrel{}{=} \Big\{u \in {\mathbb{R}}^d: \sum_{j=1}^p \norm{(u_{j, \bulletlet})_{{K_j}^\complement}}_{1,\hat w_{j,\bulletlet}} \leq 2\sum_{j=1}^p \norm{(u_{j, \bulletlet})_{K_j}}_{1,\hat w_{j,\bulletlet}}\Big\}. \end{equation} \end{lemma} \noindent \textbf{Proof.} Lemma~\ref{lemma-compatibility-Tw} gives \begin{equation*} \frac{\norm{\boldsymbol{X}^{{B}} {\bf{T}}u}_2}{\sqrt{n} |\norm{u_K\odot \gamma_K}_1 - \norm{u_{K^\complement} \odot \gamma_{K^\complement}}_1|} \geq \kappa_{{\bf{T}},\gamma}(K)\frac{\norm{\boldsymbol{X}^{{B}} {\bf{T}}u}_2}{\sqrt{n} \norm{{\bf{T}}u}_2}. \end{equation*} Now, we note that if $u \in \mathscr{C}_{1, \hat w}(K)$, then ${\bf{T}} u \in \mathscr{C}_{ \TV, \hat w}(K).$ Hence, Assumption~\ref{assumption:RE-XB} entails \begin{equation*} \frac{\norm{\boldsymbol{X}^{{B}} {\bf{T}} u}_2}{\sqrt{n} |\norm{u_K\odot \gamma_K}_1 - \norm{u_{K^\complement} \odot \gamma_{K^\complement}}_1|} \geq \kappa_{{\bf{T}},\gamma}(K) \kappa(K), \end{equation*} which concludes the proof of the Lemma. $ \square$ \subsection{Connection between the empirical Kullback-Leibler divergence and the empirical squared norm} \label{subsection-connection-betwen-KL-sqnorm} The next Lemma is from~\cite{bach2010selfconcordance} (see Lemma~1 herein). \begin{lemma} \label{lemma:self-concordance} Let $\varphi:{\mathbb{R}} \rightarrow {\mathbb{R}}$ be a three times differentiable convex function such that for all $t\in{\mathbb{R}},$ $|\varphi'''(t)| \leq M |\varphi''(t)|$ for some $M \geq 0.$ Then, for all $t \geq 0$, one has \begin{equation*} \frac{\varphi''(0)}{M^2}\psi(-Mt) \leq \varphi(t) - \varphi(0) - \varphi'(0)t \leq \frac{\varphi''(0)}{M^2}\psi(Mt), \end{equation*} with $\psi(u) = e^u - u - 1$. \end{lemma} This Lemma entails the following in our setting. \begin{lemma} \label{lemma-connection-L2-KL} Under Assumption~\ref{ass:glm}, one has \begin{align*} \frac{L_n \psi(-2(C_n + \rho))}{4 \phi (C_n + \rho)^2} \frac{1}{n}\norm{m^0({\boldsymbol X}) - m_\theta({\boldsymbol X})}_2^2 &\leq \mathrm{KL}_n(m^0({\boldsymbol X}), m_\theta({\boldsymbol X})), \\ \frac{U_n \psi(2(C_n + \rho))}{4 \phi (C_n + \rho)^2} \frac{1}{n}\norm{m^0({\boldsymbol X}) - m_\theta({\boldsymbol X})}_2^2 &\geq \mathrm{KL}_n(m^0({\boldsymbol X}), m_\theta({\boldsymbol X})), \end{align*} for all $\theta \in B_d(\rho)$. \end{lemma} \noindent \textbf{Proof.} Let us consider the function $G_n:{\mathbb{R}} \rightarrow {\mathbb{R}}$ defined by $G_n(t)= R_n(m^0 + t m_\eta)$, with $m_\eta$ to be defined later, which writes \begin{equation*} G_n(t) = \frac 1n \sum_{i=1}^n b(m^0(x_i) + tm_\eta(x_i)) - \frac 1n\sum_{i=1}^n y_i (m^0(x_i) + tm_\eta(x_i)). \end{equation*} We have \begin{equation*} \begin{split} &G'_n(t)= \frac{1}{n} \sum_{i=1}^n m_\eta(x_i) b'(m^0(x_i) + tm_\eta(x_i)) - \frac{1}{n} \sum_{i=1}^n y_i m_\eta(x_i),\\ &G''_n(t) = \frac{1}{n} \sum_{i=1}^n m^2_\eta(x_i) b''(m^0(x_i) + tm_\eta(x_i)),\\ \text{and} \quad &G'''_n(t) = \frac{1}{n} \sum_{i=1}^n m^3_\eta(x_i) b'''(m^0(x_i) + tm_\eta(x_i)). \end{split} \end{equation*} Using Assumption~\ref{ass:glm}, we have $|G'''_n(t)| \leq C_b \norm{m_\eta}_{\infty}|G''_n(t)|$ where $\norm{m_\eta}_{\infty} := \max\limits_{i=1, \ldots, n}|m_\eta(x_i)|$. Lemma~\ref{lemma:self-concordance} with $M = C_b \norm{m_\eta}_{\infty}$ gives \begin{equation*} G''_n(0)\frac{ \psi(- C_b \norm{m_\eta}_{\infty}t)}{C_b^2\norm{m_\eta}_{\infty}^2}\leq G_n(t) - G_n(0) - tG'_n(0) \leq G''_n(0) \frac{ \psi(C_b\norm{m_\eta}_{\infty}t)}{C_b^2 \norm{m_\eta}_{\infty}^2} \end{equation*} for all $t\geq 0$ and $t=1$ leads to \begin{equation*} G''_n(0)\frac{ \psi(-C_b \norm{m_\eta}_{\infty})}{C_b^2 \norm{m_\eta}_{\infty}^2} \leq R_n(m^0 + m_\eta) - R_n(m^0) - G'_n(0) \leq G''_n(0) \frac{ \psi(C_b \norm{m_\eta}_{\infty})}{C_b^2 \norm{m_\eta}_{\infty}^2}. \end{equation*} An easy computation gives \begin{align*} - G'_n(0) = \frac{1}{n} \sum_{i=1}^n m_\eta(x_i) \big(y_i - b'(m^0(x_i)) \big) \; \text{ and } \; G''_n(0) = \frac{1}{n} \sum_{i=1}^n m^2_\eta(x_i)b''(m_{\eta }(x_i)), \end{align*} and since obviously $\mathds{E}_{\mathds{P}_{\mathbf y|X}}[G_n'(0)] = 0$, we obtain \begin{align*} &G''_n(0)\frac{ \psi(-C_b \norm{m_\eta}_{\infty})}{C_b^2 \norm{m_\eta}_{\infty}^2} \leq R(m^0 + m_\eta) - R(m^0) \leq G''_n(0)\frac{ \psi(C_b \norm{m_\eta}_{\infty})}{C_b^2 \norm{m_\eta}_{\infty}^2}. \end{align*} Now, choosing $m_{\eta} = m_\theta - m^0$ and combining Assumption~\ref{ass:glm} with Equation~\eqref{lemma:control_inner_ball} gives \begin{equation*} C_b \norm{m_\eta}_{\infty} \leq C_b \max_{i=1, \ldots, n} (|\inr{x_i^B, \theta}| + |m^0(x_i)| ) \leq C_b (\rho + C_n). \end{equation*} Hence, since $x \mapsto \psi(x) / x^2$ is an increasing function on ${\mathbb{R}}^+$, we end up with \begin{align*} G''_n(0)\frac{ \psi(-C_b (C_n + \rho))}{C_b^2 (C_n + \rho)^2} &\leq R(m_{\theta}) - R(m_{0}) = \phi \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\theta}({\boldsymbol X})),\\ G''_n(0)\frac{ \psi(C_b (C_n + \rho))}{C_b^2 (C_n + \rho)^2} &\geq R(m_{\theta}) - R(m_{0})= \phi \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\theta}({\boldsymbol X})), \end{align*} and since $G''_n(0) = n^{-1} \sum_{i=1}^n (m_\theta(x_i) - m^0(x_i) )^2 b''(m^0(x_i))$, we obtain \begin{align*} \frac{L_n \psi(-C_b (C_n + \rho))}{C_n^2 \phi (C_n + \rho)^2} \frac{1}{n} \norm{m^0({\boldsymbol X}) - m_\theta({\boldsymbol X})}_2^2 &\leq \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\theta}({\boldsymbol X})), \\ \frac{U_n\psi(C_b (C_n + \rho))}{C_b^2 \phi (C_n + \rho)^2} \frac{1}{n} \norm{m^0({\boldsymbol X}) - m_\theta({\boldsymbol X})}_2^2 &\geq \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\theta}({\boldsymbol X})), \end{align*} which concludes the proof of the Lemma. $ \square$ \subsection{Proof of Theorem~\ref{thm:oracle}} \label{appendix:proof-theorem-oracle-logistic-V2} Let us recall that \begin{equation*} R_n(m_\theta) = \frac{1}{n} \sum_{i=1}^n b(m_\theta(x_i))- \frac{1}{n} \sum_{i=1}^n y_i m_{\theta}(x_i) \end{equation*} for all $\theta \in {\mathbb{R}}^d$ and that \begin{equation} \label{proof-def-estimator-logistic} \hat \theta \in \argmin_{\theta \in B_d(\rho)} \big\{R_n(\theta) + \bina(\theta)\big\}. \end{equation} Proposition~\ref{proposition: KKT-conditions} above entails that there is $\hat h = [\hat h _{j, \bulletlet}]_{j=1, \ldots, p} \in \partial \norm{\hat \theta}_{\TV, \hat w}$, $\hat g = [\hat g _{j, \bulletlet}]_{j=1, \cdots, p} \in [\partial \delta_j(\hat \theta_{j,\bulletlet})]_{j=1, \ldots, p}$ and $\hat f = [\hat f_{j,\bulletlet}]_{j=1, \ldots, p} \in \partial \delta_{B_d(\rho)}(\hat \theta)$ such that \begin{equation*} \Big\langle \frac{1}{n} (\boldsymbol{X}^{{B}})^\top (b'(m_{\hat\theta}({\boldsymbol X})) - {\boldsymbol{y}}) + \hat h + \hat g + \hat f, \hat \theta - \theta \Big\rangle = 0 \end{equation*} for all $\theta \in {\mathbb{R}}^d$. This can be rewritten as \begin{align*} &\frac 1n \inr{b'(m_{\hat\theta}({\boldsymbol X}))- b'(m^0({\boldsymbol X})),m_{\hat \theta}({\boldsymbol X}) - m_\theta({\boldsymbol X})}\\ &\qquad \qquad - \frac 1n \inr{{\boldsymbol{y}} - b'(m^0({\boldsymbol X})),m_{\hat \theta}({\boldsymbol X}) - m_\theta({\boldsymbol X})} + \inr{\hat h + \hat g + \hat f, \hat \theta - \theta} = 0. \end{align*} For any $\theta \in B_d(\rho)$ such that $n_j^\top \theta_{j, \bullet} = 0$ for all $j$ and $h \in \partial \norm{\theta}_{ \TV, \hat w}$, the monotony of the subdifferential mapping implies $\inr{\hat h, \theta - \hat \theta} \leq \inr{h, \theta - \hat \theta},$ $\inr{\hat g, \theta - \hat \theta} \leq 0,$ and $\inr{\hat f, \theta - \hat \theta} \leq 0$, so that \begin{equation} \label{ineq:subdiff-monocity-logistic} \begin{split} \frac 1n \inr{b'(m_{\hat\theta}({\boldsymbol X})) &- b'(m^0({\boldsymbol X})),m_{\hat \theta}({\boldsymbol X}) - m_\theta({\boldsymbol X})} \\ &\leq \frac 1n \inr{{\boldsymbol{y}} - b'(m^0({\boldsymbol X})),m_{\hat \theta}({\boldsymbol X}) - m_\theta({\boldsymbol X})} - \inr{h,\hat \theta - \theta}. \end{split}{} \end{equation} Now, consider the function $H_n:{\mathbb{R}} \rightarrow {\mathbb{R}}$ defined by \begin{equation*} H_n(t) = \frac 1n \sum_{i=1}^n b(m_{\hat \theta + t\eta}(x_i)) - \frac{1}{n} \sum_{i=1}^nb'(m^0(x_i))m_{\hat \theta + t\eta}(x_i), \end{equation*} where $\eta$ will be defined later. We use again the same arguments as in the proof of Lemma~\ref{lemma-connection-L2-KL}. We differentiate $H_n$ three times with respect $t$, so that \begin{equation*} \begin{split} &H'_n(t)= \frac{1}{n} \sum_{i=1}^n m_{\eta}(x_i) b'(m_{\hat \theta + t\eta}(x_i))-\frac{1}{n} \sum_{i=1}^n b'(m^0(x_i))m_{\eta}(x_i),\\ &H''_n(t) = \frac{1}{n} \sum_{i=1}^n m^2_{\eta}(x_i) b''(m_{\hat \theta + t\eta}(x_i)),\\ \text{and} \quad &H'''_n(t) = \frac{1}{n} \sum_{i=1}^n m^3_{\eta}(x_i) b'''(m_{\hat \theta + t\eta}(x_i)), \end{split} \end{equation*} and in the way as in the proof of Lemma~\ref{lemma-connection-L2-KL}, we have $|H'''_n(t)| \leq C_b (C_n + \rho) |H''_n(t)|$, and Lemma~\ref{lemma:self-concordance} entails \begin{equation*} H''_n(0)\frac{ \psi(-C_b t (C_n + \rho))}{C_b^2 (C_n + \rho)^2}\leq H_n(t) - H_n(0) - tH'_n(0) \leq H''_n(0) \frac{ \psi(C_b t (C_n + \rho))}{C_b^2 (C_n + \rho)^2}, \end{equation*} for all $t \geq 0$. Taking $t=1$ and $\eta = \theta - \hat \theta$ implies \begin{align*} &H_n(1)= \frac 1n \sum_{i=1}^n b(m_{\theta}(x_i)) - \frac{1}{n} \sum_{i=1}^nb'(m^0(x_i))m_{\theta}(x_i) = R(m_{\theta}),\\ \text{and} \quad & H_n(0) = \frac 1n \sum_{i=1}^n b(m_{\hat \theta}(x_i)) - \frac{1}{n} \sum_{i=1}^nb'(m^0(x_i))m_{\hat\theta}(x_i) = R(m_{\hat \theta}). \end{align*} Moreover, we have \begin{align*} H'_n(0) &= \frac{1}{n} \sum_{i=1}^n \inr{x_i^B, \theta - \hat \theta} b'(m_{\hat \theta}(x_i))-\frac{1}{n} \sum_{i=1}^n b'(m^0(x_i))\inr{x_i^B, \hat \theta - \theta}\\ &= \frac 1n\inr{b'(m_{\hat \theta}({\boldsymbol X})) - b'(m^0({\boldsymbol X})),\boldsymbol{X}^{{B}}(\theta - \hat \theta)},\\ \text{and} \quad H''_n(0) &= \frac{1}{n} \sum_{i=1}^n \inr{x_i^B, \hat \theta - \theta}^2 b''(m_{\hat \theta }(x_i)). \end{align*} Then, we deduce that \begin{equation*} \begin{split} H''_n(0)\frac{ \psi(-C_b (C_n + \rho))}{C_b^2 (C_n + \rho)^2} & \leq R(m_\theta) - R(m_{\hat \theta}) - \frac 1n\inr{b'(m_{\hat \theta}({\boldsymbol X})) - b'(m^0({\boldsymbol X})),\boldsymbol{X}^{{B}}(\theta - \hat \theta)} \\ &=\phi \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\theta}({\boldsymbol X})) - \phi \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\hat \theta}({\boldsymbol X}))\\ &\qquad + \frac 1n\inr{b'(m_{\hat \theta}({\boldsymbol X})) - b'(m^0({\boldsymbol X})),m_{\hat \theta}({\boldsymbol X}) - m_\theta({\boldsymbol X})}. \end{split} \end{equation*} Then, with Equation~\eqref{ineq:subdiff-monocity-logistic}, one has \begin{equation} \label{least-squares-in-proof} \begin{split} & \phi \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\hat \theta}({\boldsymbol X}))+ H''_n(0)\frac{ \psi(-C_b (C_n + \rho))}{C_b^2 (C_n + \rho)^2}\\ &\qquad \leq \phi \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\theta}({\boldsymbol X}))+ \frac 1n \inr{{\boldsymbol{y}} - b'(m^0({\boldsymbol X})),m_{\hat \theta}({\boldsymbol X}) - m_\theta({\boldsymbol X})} - \inr{h, \hat \theta - \theta}. \end{split} \end{equation} As $H''_n(0) \geq 0$, it implies that \begin{align} \label{ineq:begining-oracle} \phi \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\hat \theta}({\boldsymbol X})) &\leq \phi \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\theta}({\boldsymbol X}))\nonumber\\ & \qquad+ \frac{1}{n}\inr{{\boldsymbol{y}} - b'(m^0({\boldsymbol X})), m_{\hat \theta}({\boldsymbol X}) - m_\theta({\boldsymbol X}) } - \inr{h,\hat \theta - \theta}. \end{align} If $\frac{1}{n}\inr{{\boldsymbol{y}} - b'(m^0({\boldsymbol X})), \boldsymbol{X}^{{B}}(\hat\theta - \theta)} - \inr{h, \hat \theta - \theta} < 0,$ it follows that \begin{equation*} \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\hat \theta}({\boldsymbol X}))\leq \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\theta}({\boldsymbol X})), \end{equation*} then Theorem~\ref{thm:oracle} holds. From now on, let us assume that \begin{equation} \label{inequality-majoration-of-noise-2} \frac{1}{n}\inr{{\boldsymbol{y}} - b'(m^0({\boldsymbol X})), m_{\hat \theta}({\boldsymbol X}) - m_\theta({\boldsymbol X})} - \inr{h, \hat \theta - \theta} \geq 0. \end{equation} We first derive a bound on $\frac {1}{n} \inr{{\boldsymbol{y}} - b'(m^0({\boldsymbol X})), m_{\hat \theta}({\boldsymbol X}) - m_\theta({\boldsymbol X})}.$ Recall that ${\bf{D}} ^{-1} = {\bf{T}}$ (see beginning of Section~\ref{sub:compatibility_conditions}). We focus on finding out a bound for $\frac {1}{n} \inr{ (\boldsymbol{X}^{{B}}{\bf{T}})^\top({\boldsymbol{y}} - b'(m^0({\boldsymbol X}))), {\bf{D}}(\hat \theta - \theta)}.$ On the one hand, one has \begin{align*} \frac{1}{n} \inr{(\boldsymbol{X}^{{B}})^\top ({\boldsymbol{y}} &- b'(m^0({\boldsymbol X}))), \hat \theta - \theta} \\ &= \frac{1}{n} \inr{(\boldsymbol{X}^{{B}} {\bf{T}})^\top ({\boldsymbol{y}} - b'(m^0({\boldsymbol X}))), {\bf{D}}(\hat \theta - \theta)} \\ &\leq \small{\frac{1}{n} \sum_{j=1}^p \sum_{k=1}^{d_j} |((\boldsymbol{X}^{{B}}_{\bulletlet,j} {T}_{j})_{\bulletlet, k})^\top ({\boldsymbol{y}} - b'(m^0({\boldsymbol X}))) | \; |(D_{j}({\hat \theta}_{j,\bulletlet} - \theta_{j,\bulletlet}))_k |} \end{align*} where $(\boldsymbol{X}^{{B}}_{\bulletlet,j} {T}_{j})_{\bulletlet,k} = [ (\boldsymbol{X}^{{B}}_{\bulletlet,j} {T}_{j})_{1,k} \cdots (\boldsymbol{X}^{{B}}_{\bulletlet,j} {T}_{j})_{n,k}]^\top \in {\mathbb{R}}^n$ is the $k$-th column of the matrix $\boldsymbol{X}^{{B}}_{\bulletlet,j} {T}_{j}.$ Let us consider the event \begin{equation*} \mathcal{E}_n = \bigcap_{j=1}^p \bigcap_{k =2}^{d_j}\mathcal{E}_{n,j,k}, \textrm{ where } \mathcal{E}_{n,j,k} = \Big\{ \frac {1}{n} | (\boldsymbol{X}^{{B}}_{\bulletlet,j}T_j)_{\bulletlet,k}^\top ({\boldsymbol{y}} - b'(m^0({\boldsymbol X}))) | \leq \hat w_{j,k} \Big\}, \end{equation*} so that, on $\mathcal{E}_n$, we have \begin{align} \label{ineq1-proof-thm2} \frac{1}{n} \inr{(\boldsymbol{X}^{{B}})^\top ({\boldsymbol{y}} - b'(m^0({\boldsymbol X})), \hat \theta - \theta} &\leq \sum_{j=1}^p \sum_{k=1}^{d_j} \hat w_{j,k} |(D_{j}({\hat \theta}_{j,\bulletlet} - \theta_{j,\bulletlet}))_k| \nonumber \\ & \leq \sum_{j=1}^p \norm{ \hat w_{j,\bulletlet} \odot D_{j}({\hat \theta}_{j,\bulletlet} - \theta_{j,\bulletlet})}_1. \end{align} On the other hand, from the definition of the subgradient $[h_{j, \bulletlet}]_{j=1, \ldots, p} \in \partial \norm{\theta}_{\TV, \hat w}$ (see Equation~\eqref{subdifferential-of-TVw}), one can choose $h$ such that \begin{equation*} h_{j, k} = (D_{j}^\top (\hat w_{j,\bulletlet}\odot\sgn(D_{{j}}{\theta}_{j,\bulletlet})))_k \end{equation*} for all $k \in J_j(\theta)$ and \begin{equation*} h_{j, k} = (D_{j}^\top (\hat w_{j,\bulletlet}\odot\sgn ( D_{j}\hat \theta_{j, \bulletlet} ) )_k = (D_{j}^\top (\hat w_{j,\bulletlet}\odot\sgn ( D_{j}(\hat \theta_{j, \bulletlet} - \theta_{j,\bulletlet}) ) )_k \end{equation*} for all $k \in J_j^\complement(\theta)$. Using a triangle inequality and the fact that $\sgn(x)^\top x= \norm{x}_1$, we obtain \begin{align} \label{ineq2-proof-thm1} -\inr{h, \hat \theta - \theta} &\leq \sum_{j=1}^p \norm{ (\hat w_{j,\bulletlet})_{J_j(\theta)} \odot D_{{j}}(\hat \theta_{j, \bulletlet} -\theta_{j, \bulletlet})_{J_j(\theta)}}_1 \nonumber\\ & \quad - \sum_{j=1}^p \norm{ (\hat w_{j,\bulletlet})_{J^\complement_j(\theta)} \odot D_{{j}}(\hat \theta_{j, \bulletlet} -\theta_{j, \bulletlet})_{J^\complement_j(\theta)}}_1 \nonumber\\ &\leq \sum_{j=1}^p \norm{(\hat \theta_{j, \bulletlet} -\theta_{j, \bulletlet})_{J_j(\theta)}}_{\TV, \hat w_{j,\bulletlet}} - \sum_{j=1}^p \norm{ (\hat \theta_{j, \bulletlet} -\theta_{j, \bulletlet})_{J^\complement_j(\theta)}}_{\TV, \hat w_{j,\bulletlet}}. \end{align} Combining inequalities~\eqref{ineq1-proof-thm2} and~\eqref{ineq2-proof-thm1}, we get \begin{equation*} \sum_{j=1}^p \norm{ (\hat \theta_{j, \bulletlet} -\theta_{j, \bulletlet})_{J^\complement_j(\theta)}}_{ \TV, \hat w_{j,\bulletlet}} \leq 2\sum_{j=1}^p \norm{(\hat \theta_{j, \bulletlet} -\theta_{j, \bulletlet})_{ J_j(\theta)}}_{ \TV, \hat w_{j,\bulletlet}} \end{equation*} on $\mathcal{E}_n$. Hence \begin{equation*} \sum_{j=1}^p \norm{(\hat w_{j,\bulletlet})_{J^\complement_j(\theta)} \odot D_{{j}}(\hat \theta_{j, \bulletlet} -\theta_{j, \bulletlet})_{J^\complement_j(\theta)}}_1 \leq 2\sum_{j=1}^p \norm{(\hat w_{j,\bulletlet})_{ J_j(\theta)} \odot D_{{j}}(\hat \theta_{j, \bulletlet} -\theta_{j, \bulletlet})_{J_j(\theta)}}_1. \end{equation*} This means that \begin{equation} \label{Delta-hat-theta-in-Cone-square} \hat \theta - \theta \in \mathscr{C}_{\TV, \hat w}(J(\theta)) \textrm{ and } {\bf{D}}(\hat \theta - \theta) \in \mathscr{C}_{1,\hat w}(J(\theta)), \end{equation} see~\eqref{C-AGG} and~\eqref{C-1}. Now, going back to~\eqref{ineq:begining-oracle} and taking into account~\eqref{Delta-hat-theta-in-Cone-square}, the compatibility of $\boldsymbol{X}^{{B}} {\bf{T}}$ given in Equation~\eqref{ineq:compatibility-XT} provides the following on the event $\mathcal{E}_n$: \begin{equation*} \begin{split} \phi \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\hat \theta}({\boldsymbol X})) &\leq \phi \mathrm{KL}_n(m^0({\boldsymbol X}), m_\theta({\boldsymbol X})) \\ &\quad + 2\sum_{j=1}^p \norm{ (\hat w_{j,\bulletlet})_{J_j(\theta)} \odot D_{j}(\hat \theta_{j, \bulletlet} -\theta_{j, \bulletlet})_{J_j(\theta)}}_1. \end{split} \end{equation*} Then \begin{equation} \label{for-case-least-squares} \begin{split} \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\hat \theta}({\boldsymbol X}))&\leq \mathrm{KL}_n(m^0({\boldsymbol X}), m_\theta({\boldsymbol X})) + \frac{\norm{m_{\hat \theta}({\boldsymbol X}) - m_\theta({\boldsymbol X}) }_2}{ \sqrt{n}\, \phi\, \kappa_{\bf{T},\hat \gamma}(J(\theta)) \kappa(J(\theta))}, \end{split} \end{equation} where $\hat \gamma = (\hat \gamma_{1,\bulletlet}^\top, \ldots, \hat \gamma_{p,\bulletlet}^\top)^\top$ is such that \begin{equation*} \hat \gamma_{j,k} = \left\{ \begin{array}{ll} 2\hat {w}_{j,k} & \mbox{if } k \in J_j(\theta),\\ 0 & \mbox{if } k \in J_j^\complement(\theta), \end{array} \right. \end{equation*} for all $j=1, \ldots, p$ and \begin{equation*} \kappa_{{\bf{T}}, \hat \gamma}( J(\theta)) =\bigg\{ 32 \sum_{j=1}^p\sum_{k=1}^{d_j} |\hat \gamma_{j,k+1} - \hat \gamma_{j,k}|^2 + 2|J_j(\theta)| \norm{\hat \gamma_{j,\bulletlet}}_\infty^2\Delta_{\min, J_j(\theta)}^{-1}\bigg\}^{-1/2}. \end{equation*} Now, we find an upper bound for \begin{equation*} \frac{1}{ \kappa^2_{{\bf{T}},\hat\gamma}(J(\theta))} = 32\sum_{j=1}^p\sum_{k=1}^{d_j} |\hat \gamma_{j,k+1} - \hat \gamma_{j,k}|^2 + 2|J_j(\theta)| \norm{\hat \gamma_{j,\bulletlet}}_\infty^2\Delta_{\min, J_j(\theta)}^{-1}. \end{equation*} Note that $\norm{\hat \gamma_{j,\bulletlet}}_\infty \leq 2\norm{\hat w_{j,\bulletlet}}_\infty$. Let us write $J_j(\theta) =\big\{k_j^1, \ldots, k_j^{|J_j(\theta)|}\big\}$ and set $B_r =[\![k_j^{r-1}, k_j^{r}[\![ \; = \{k_j^{r-1}, k_j^{r-1} + 1, \ldots, k_j^{r} -1\}$ for $r = 1, \ldots, |J_j(\theta)|+1$ with the convention that $k_j^0=0$ and $k_j^{|J_j(\theta)|+1} = d_j+1$. Then \begin{equation*} \begin{split} \sum_{k=1}^{d_j} |\hat \gamma_{j,k+1} -\hat \gamma_{j,k}|^2 &= \sum_{r=1}^{|J_j(\theta)|+1} \sum_{k \in B_r} |\hat\gamma_{j,k+1} -\hat\gamma_{j,k}|^2\\ & =\sum_{r=1}^{|J_j(\theta)|+1} |\hat \gamma_{j,k_j^{r -1}+1} - \hat\gamma_{j,k_j^{r -1}}|^2 + |\hat \gamma_{j,k_j^{r}} - \hat \gamma_{j,k_j^{r }-1}|^2 \\ & =\sum_{r=1}^{|J_j(\theta)|+1} \hat \gamma_{j,k_j^{r -1}}^2 + \hat \gamma_{j,k_j^{r}}^2 \\ & = \sum_{r=1}^{|J_j(\theta)|} 2\ \hat \gamma_{j,k_j^{r}}^2\\ & \leq 8\ |J_j(\theta)|\ \norm{(\hat w_{j,\bulletlet})_{J_j(\theta)}}_\infty^2. \end{split} \end{equation*} Therefore \begin{equation} \label{eq:upper_bound_kappa_2_T} \begin{split} \frac{1}{\kappa^2_{{\bf{T}},\hat \gamma}(J(\theta))} &\leq 512 \sum_{j=1}^p \Big( |J_j(\theta)|\ \norm{(\hat w_{j,\bulletlet})_{J_j(\theta)}}_\infty^2 + |J_j(\theta)|\ \norm{(\hat w_{j,\bulletlet})_{J_j(\theta)}}_\infty^2\Delta_{\min, J_j(\theta)}^{-1} \Big) \\ & \leq 512 \sum_{j=1}^p \Big( 1 + \frac{1}{\Delta_{\min, J_j(\theta)}} \Big) |J_j(\theta)| \norm{(\hat w_{j,\bulletlet})_{J_j(\theta)}}_\infty^2\\ & \leq 512 |J(\theta)| \max_{j=1, \ldots, p}\norm{(\hat w_{j,\bulletlet})_{J_j(\theta)}}_\infty^2. \end{split} \end{equation} Now, we use the connection between the empirical norm and Kullback-Leibler divergence. Indeed, using Lemma~\ref{lemma-connection-L2-KL}, we get \begin{align*} &\frac{\norm{ m_{\hat \theta}({\boldsymbol X}) - m_\theta({\boldsymbol X}) }_2}{ \sqrt{n}\phi \kappa_{{\bf{T}},\hat\gamma}( J(\theta)) \kappa(J(\theta))} \\ &\quad \quad \leq \frac{1}{\sqrt{\phi} \kappa_{{\bf{T}},\hat\gamma}(J(\theta)) \kappa(J(\theta))}\Big(\frac{1}{\sqrt{n}} \norm{ m_{\hat \theta}({\boldsymbol X})- m^0({\boldsymbol X})}_2 + \frac{1}{\sqrt{n}}\norm{ m^0({\boldsymbol X}) - m_{\theta}({\boldsymbol X})}_2\Big) \\ &\quad \quad \leq \frac{2}{\sqrt{\phi}\kappa_{{\bf{T}},\hat\gamma}(J(\theta)) \kappa(J(\theta)) \sqrt{C_n(\rho, L_n)} } \Big(\mathrm{KL}_n(m^0({\boldsymbol X}), m_{\hat \theta}({\boldsymbol X}))^{1/2} \\ &\qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad + \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\theta}({\boldsymbol X}))^{1/2} \Big), \end{align*} where we defined $C_n(\rho, L_n) = \frac{L_n \psi(-C_b (C_n + \rho))}{C_b^2 \phi (C_n + \rho)^2}$, so that combined with Equation~\eqref{for-case-least-squares}, we obtain \begin{align*} \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\hat \theta}({\boldsymbol X})) &\leq \mathrm{KL}_n(m^0({\boldsymbol X}), m_\theta({\boldsymbol X})) \\ & \quad + \frac{2}{\sqrt{\phi}\kappa_{{\bf{T}},\hat\gamma}(J(\theta)) \kappa(J(\theta)) \sqrt{C_n(\rho, L_n)}} \big(\mathrm{KL}_n(m^0({\boldsymbol X}), m_{\hat \theta}({\boldsymbol X}))^{1/2} \\ & \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \quad + \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\theta}({\boldsymbol X}))^{1/2} \big). \end{align*} This inequality entails the following upper bound \begin{equation*} \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\hat \theta}({\boldsymbol X})) \leq 3 \mathrm{KL}_n(m^0({\boldsymbol X}), m_\theta({\boldsymbol X})) + \frac{5}{\phi \kappa^2_{{\bf{T}},\hat\gamma}(J(\theta)) \kappa^2(J(\theta)) C_n(\rho, L_n)}, \end{equation*} since whenever we have $x \leq c + b \sqrt x$ for some $x, b, c > 0$, then $x \leq 2c + b^2$. Introducing $g(x) = x^2 / \psi(-x) = x^2 / (e^{-x} + 1 - x)$, we note that \begin{equation*} \frac{1}{C_n(\rho, L_n)} = \frac{\phi}{L_n} g(C_b (C_n + \rho)) \leq \frac{\phi}{L_n} (C_b(C_n + \rho) + 2), \end{equation*} since $g(x) \leq x + 2$ for any $x > 0$. Finally, by using also~\eqref{eq:upper_bound_kappa_2_T}, we end up with \begin{equation*} \mathrm{KL}_n(m^0({\boldsymbol X}), m_{\hat \theta}({\boldsymbol X})) \leq 3 \mathrm{KL}_n(m^0({\boldsymbol X}), m_\theta({\boldsymbol X})) + \frac{2560 (C_b(C_n + \rho) + 2)}{L_n \kappa^2(J(\theta))} \; |J(\theta)| \; \norm{(\hat w_{j,\bulletlet})_{J_j(\theta)}}_\infty^2, \end{equation*} which is the statement provided in Theorem~\ref{thm:oracle}. The only thing remaining is to control the probability of the event $\mathcal{E}_n^\complement$. This is given by the following: \begin{equation*} \begin{split} \mathds{P}[\mathcal{E}_n^\complement] &\leq \sum_{j=1}^p \sum_{k=2}^{d_j} \mathds{P} \Big[\frac{1}{n} | (\boldsymbol{X}^{{B}}_{\bulletlet,j} T_{j})_{\bulletlet,k}^\top ({\boldsymbol{y}} - b'(m^0({\boldsymbol X})))| \geq \hat w_{j,k} \Big] \\ &\leq \sum_{j=1}^p \sum_{k=2}^{d_j} \mathds{P} \Big[\sum_{i=1}^n |(\boldsymbol{X}^{{B}}_{\bulletlet,j} T_{j})_{i,k}(y_i - b'(m^0(x_i))) | \geq { n\hat w_{j,k}} \Big]. \end{split} \end{equation*} Let $\xi_{i,j,k} = (\boldsymbol{X}^{{B}}_{\bulletlet,j} {T}_{j})_{i,k}$ and $Z_{i}= y_i - b'(m^0(x_i)).$ Note that conditionally on $x_i$, the random variables $(Z_{i})$ are independent. It can be easily shown (see Theorem 5.10 in~\cite{lehmann1998}) that the moment generating function of $Z$ (copy of $Z_i$) is given by \begin{equation} \label{moment-gener-func} \mathds{E}[\exp(tZ)] = \exp(\phi^{-1}\{b(m^0(x) + t) - tb'(m^0(x) - b(m^0(x)))\}). \end{equation} Applying Lemma 6.1 in~\cite{rigollet2012}, using~\eqref{moment-gener-func} and Assumption~\ref{ass:glm}, we can derive the following Chernoff-type bounds \begin{equation} \label{cal-proba-Ecomp} \mathds{P}\Big[ \sum_{i=1}^n |\xi_{i,j,k}Z_{i}| \geq n\hat w_{j,k}\Big] \leq 2\exp\Big(- \frac{n^2\hat w^2_{j,k} }{2U_n\phi\norm{\xi_{\bulletlet,j,k}}_2^2}\Big), \end{equation} where $\xi_{\bulletlet,j,k} = [\xi_{1,j,k} \cdots \xi_{n,j,k} ]^\top \in {\mathbb{R}}^n.$ We have \begin{equation*} {\boldsymbol X}^B_{\bulletlet,j} {T}_{j} = \begin{bmatrix} 1 & \sum_{k=2}^{d_j} x_{1,j,k}^B & \sum_{k=3}^{d_j} x_{1,j,k}^B & \cdots& \sum_{k=d_{j-1}}^{d_j} x_{1,j,k}^B & x_{1,j,d_j}^B \\ \vdots & \vdots & \vdots& & \vdots &\vdots \\ 1 &\sum_{k=2}^{d_j} x_{n,j,k}^B & \sum_{k=3}^{d_j} x_{n,j,k}^B & \cdots& \sum_{k=d_{j-1}}^{d_j} x_{n,j,k}^B & x_{n,j,d_j}^B \end{bmatrix}, \end{equation*} therefore \begin{equation} \label{eq:empirical-norm-XT} \norm{\xi_{\bulletlet,j,k}}_2^2 = \sum_{i=1}^n({\boldsymbol X}^B_{\bulletlet,j} {T}_{j})^2_{\bulletlet, k} = \# \Big(\Big\{ i : x_{i,j} \in \bigcup_{r=k}^{d_j} I_{j,r} \Big\} \Big) = n\hat \pi_{j,k}. \end{equation} So, using the weights $\hat w_{j,k}$ given by~\eqref{choice-of-weights-sq-slow-GLM} together with~\eqref{cal-proba-Ecomp} and~\eqref{eq:empirical-norm-XT}, we obtain that the probability of $\mathcal{E}_{n}^\complement$ is smaller than $2e^{-A}.$ This concludes the proof of the first part of Theorem~\ref{thm:oracle}. $ \square$ \subsection{Proof of Theorem~\ref{thm:additive}} \label{sub:proof_of_theorem_thm:additive} First, let us note that in the least squares setting, we have $R(m_{\theta}) - R(m^0) = \norm{m_{\theta} - m^0}_n^2$ for any $\theta \in {\mathbb{R}}^d$ where $\norm{g}_n^2 = \frac 1n \sum_{i=1}^n g(x_i)^2$, and that $b(y) = \frac 12 y^2$, $\phi = \sigma^2$ (noise variance) in Equation~\eqref{distribut-glm}, and $L_n = U_n = 1$, $C_b = 0$. Theorem~\ref{thm:oracle} provides \begin{equation*} \norm{m_{\hat \theta} - m^0}_n^2 \leq 3 \norm{m_{\theta} - m_{\theta^0}}_n^2 + \frac{5120 \sigma^2}{\kappa^2(J(\theta))} \frac{|J(\theta)|(A + \log d)}{n} \end{equation*} for any $\theta \in {\mathbb{R}}^d$ such that $n_j^\top \theta_{j \bullet} = 0$ and $J(\theta) \leq J_*$. Since $d_j = D$ for all $j=1, \ldots, p$, we have $d = D p$ and \begin{equation} \label{eq:sparsity-comparison} | J(\theta) | = \sum_{j=1}^p | \{ k = 2, \ldots, D : \theta_{j, k} \neq \theta_{j, k-1 } \} | \leq (D - 1) |\mathcal{J}(\theta)| \norm{\theta}_\infty \leq D p \norm{\theta}_\infty \end{equation} for any $\theta \in {\mathbb{R}}^d$, where we recall that $\mathcal{J}(\theta) = \{ j=1, \ldots, p : \theta_{j, \bullet} \neq \boldsymbol 0_{D} \}$. Also, recall that $I_{j, 1} = I_1 = [0, \frac{1}{D}]$ and $I_{j, k} = I_k = (\frac{k-1}{D}, \frac{k}{D}]$ for $k=2, \ldots, D$ and $j = 1, \ldots, p$. Also, we consider $\theta = \theta^*$, where $\theta_{j, \bullet}^*$ is defined, for any $j \in \mathcal{J}_*$, as the minimizer of \begin{equation*} \sum_{i=1}^n \Big( \sum_{k=1}^{D} (\theta_{j, k} - m_j^0(x_{i, j}) ) \ind{I_{k}}(x_{i, j}) \Big)^2 \end{equation*} over the set of vectors $\theta_{j, \bullet} \in {\mathbb{R}}^{D}$ satisfying $n_j^\top \theta_{j, \bullet} = 0$, and we put $\theta_{j, \bullet}^* = \boldsymbol 0_D$ for $j \notin \mathcal{J}_*$. It is easy to see that the solution is given by \begin{equation*} \theta_{j, k}^* = \frac{\sum_{i=1}^n m_j^0(x_{i, j}) \ind{I_{k}}(x_{i, j})}{n_{j, k}}, \end{equation*} where we recall that $n_{j, k} = \sum_{i=1}^n \ind{I_{k}}(x_{i, j})$. Note in particular that the identifiability assumption $\sum_{i=1}^n m_j^0(x_{i, j}) = 0$ entails that $n_j^\top \theta_{j, \bullet}^* = 0$. In order to control the bias term, an easy computation gives that, whenever $x_{i, j} \in I_{k}$ \begin{equation*} |\theta_{j, k}^* - m_j^0(x_{i, j})| \leq \frac{\sum_{i'=1}^n | m_j^0(x_{i', j}) - m_j^0(x_{i, j}) | \ind{I_{k}}(x_{i', j})}{n_{j, k}} \leq L |I_{k}| = \frac{L}{D}, \end{equation*} where we used the fact that $m_j^0$ is $L$-Lipschitz, so that \begin{align*} \norm{m_{\theta^*} - m^0}_n^2 &= \frac 1n \sum_{i=1}^n (m_{\theta^*}(x_{i, j}) - m^0(x_{i, j}))^2 \\ &= \frac 1n \sum_{i=1}^n \Big( \sum_{j \in \mathcal{J}_*} \sum_{k=1}^{D} (\theta_{j, k}^* - m^0(x_{i, j})) \ind{I_{k}} \Big)^2 \\ &\leq \frac{|\mathcal{J}_*|}{n} \sum_{i=1}^n \Big( \sum_{j \in \mathcal{J}_*} \sum_{k=1}^{D} (\theta_{j, k}^* - m^0(x_{i, j})) \ind{I_{k}} \Big)^2 \\ &\leq \frac{|\mathcal{J}_*|}{n} \sum_{i=1}^n \sum_{j \in \mathcal{J}_*} \sum_{k=1}^{D} (\theta_{j, k}^* - m^0(x_{i, j}))^2 \ind{I_{k}}(x_{i, j}) \\ &\leq |\mathcal{J}_*| \sum_{j \in \mathcal{J}_*} \sum_{k=1}^{D} L^2 |I_{k}|^2 \ind{I_{k}}(x_{i, j}) \leq \frac{L^2 |\mathcal{J}_*|^2}{D^2}. \end{align*} Note that $|\theta_{j, k}^*| \leq \norm{m_j^0}_{n, \infty}$ where $\norm{m_j^0}_{n, \infty} = \max_{i=1, \ldots, n} |m_j^0(x_{i, j})|$. This entails that $\norm{\theta^*}_\infty \leq \max_{j=1, \ldots, p} \norm{m_j^0}_{n, \infty} = M_n$. So, using also~\eqref{eq:sparsity-comparison}, we end up with \begin{align*} \norm{m_{\hat \theta} - m^0}_n^2 &\leq \frac{3 L^2 |\mathcal{J}_*|^2}{D^2} + \frac{5120 \sigma^2}{\kappa^2(J(\theta^*))} \frac{D \mathcal{J}_* M_n (A + \log (D p M_n))}{n}, \end{align*} which concludes the proof Theorem~\ref{thm:additive} using $D = n^{1/3}$. $ \square$ \end{document}
\begin{document} \parskip = 0mm \title[A Question of Bj\"orner from 1976]{A ``Challenging Question'' of Bj\"orner from 1976: Every Infinite Geometric Lattice of Finite Rank Has a Matching} \author{Jonathan David Farley} \thanks{The author would like to thank Professor Anders Bj\"orner for sending him his two preprints. Not only would the author {\sl like} to thank him, he {\sl does} thank him.} \address{Department of Mathematics, Morgan State University, 1700 E. Cold Spring Lane, Baltimore, Maryland 21251, United States of America, {\tt [email protected]}} \keywords{Geometric lattice, matching, semimodular, rank, height, atom, Hall's Marriage Theorem, shadow-matching, Menger's Theorem.} \subjclass[2010]{05B35, 06C10, 05D15, 03E05.} \begin{abstract} It is proven that every geometric lattice of finite rank greater than 1 has a matching between the points and hyperplanes. This answers a question of P\'olya Prize-winner Anders Bj\"orner from the 1981 Banff Conference on Ordered Sets, which he raised as a ``challenging question'' in 1976. \end{abstract} \maketitle \def\mathbb{Q}_0{\mathbb{Q}_0} \def\mathbb{Q}_1{\mathbb{Q}_1} \def\mathbb{Q}{\mathbb{Q}} \def{\rm card}{{\rm card}} \parskip = 2mm \parindent = 10mm \def{\rm Part}{{\rm Part}} \def{\mathcal P}{{\mathcal P}} \def{\rm Eq}{{\rm Eq}} \defCl_\tau(\Delta){Cl_\tau(\Delta)} \def{\mathcal C}_{\{*\}}{{\mathcal C}_{\{*\}}} \def{\mathcal C}_{{\rm fin}>1}{{\mathcal C}_{{\rm fin}>1}} \def{\mathcal C}_{\infty}{{\mathcal C}_{\infty}} \def{\mathcal P}cf{{\mathcal P}_{\rm cf}} \def{\mathcal F}_n{{\mathcal F}_n} \def{\it Proof. }{{\it Proof. }} \vspace*{-4mm} At the famous 1981 Banff Conference on Ordered Sets---such luminaries as Erd\H{o}s, Professor Garrett Birkhoff, Dilworth, Turing Award-winner D. S. Scott, Daykin, A. Garsia, R. L. Graham, C. Greene, B. J\'onsson, E. C. Milner, and Oxford's H. A. Priestley attended---Bj\"orner asked (with MIT's Richard Stanley asking a question immediately afterwards, judging from the proceedings) if every geometric lattice $L$ of finite rank [$\geq2$] had a matching \cite[pp. xi, xii, and 799]{RivHB}. Greene had proven this for finite lattices \cite[Corollary 3]{GreGJ}. Bj\"orner had proven this in special cases \cite[Theorems 3 and 4]{BjoGF}---for modular lattices and for ``equicardinal lattices,'' i.e., lattices whose hyperplanes contained the same number of atoms. In 1976, Bj\"orner wrote, ``It would be interesting to know if the result of our theorems 3 and 4 can be extended to all infinite geometric lattices, or at least to some classes of such lattices other than the modular and the equicardinal.'' In 1977, he proved it for lattices of rank 3 and for lattices of cardinality less than $\aleph_\omega$. The P\'olya Prize-winner went on to ask at the Banff Conference if there exists a family $M$ of pairwise disjoint maximal chains in $L\setminus\{0,1\}$ whose union contains the set of atoms, saying, ``I showed this is true for modular $L$, and J. Mason showed it to be true for finite $L$.'' He conjectured this in 1977 (\cite[p. 18]{BjoGG}, \cite[p. 10]{BjoGF}), writing in 1976 that ``[a]nother challenging question, related to the existence of matchings, is whether maximal families of pairwise disjoint maximal proper chains do exist in infinite geometric lattices (cf. \cite{MasGC}).'' We answer Bj\"orner's 1976 question about matchings. We selectively use some of the notation and terminology from \cite{DavPriJB} and \cite[Chapter II, \S8 and Chapter IV]{BirFG}. Let $P$ be a poset. Let $x,y\in P$ be such that $x\leq y$. The {\sl closed interval} $[x,y]$ is $\{z\in P: x\leq z\leq y\}$. If $|[x,y]|=2$, we say $x$ is a {\sl lower cover} of $y$ and $y$ is an {\sl upper cover} of $x$ and denote it $x\lessdot y$. Let $P$ be a poset with least element $0$. An {\sl atom} or {\sl point} is a cover of $0$. The set of atoms is $\mathcal A(P)$. If $P$ is a poset with greatest element $1$, a {\sl co-atom}, {\sl co-point}, or {\sl hyperplane} is a lower cover of $1$. The set of hyperplanes is $\mathcal H(P)$. A poset is {\sl semimodular} if, for all $a,b,c\in P$, $a\lessdot b,c$ and $b\ne c$ imply there exists $d\in P$ such that $b,c\lessdot d$. A {\sl geometric lattice of finite height} is a semimodular lattice $L$ with no infinite {\sl chains} (totally ordered subsets)---implying $L$ has a $0$ and a $1$---such that every element is a join of a subset of atoms. It is known \cite[Theorem 9.4]{Nat} that such an $L$ is a complete lattice with a finite maximal chain and all maximal chains have the same size $r+1$, where $r$ is the {\sl height} or {\sl rank} of $L$. Moreover, every element is a join of a finite set of atoms and a meet of a subset of $\mathcal H(L)$ (see \cite[Lemma 1]{BjoGF}). Every interval is a geometric lattice \cite[\S3.3, Lemma]{WelGF}. The rank of $\downarrow x:=[0,x]$ is the {\sl rank} $r(x)$ of $x\in L$. For $x,y\in L$, $r(x\vee y)+r(x\wedge y)\leq r(x)+r(y)$ \cite[Theorem 9.5]{Nat}. For $x\in L$, let $\underline x:=\mathcal A(L)\cap\downarrow x$ and let $\overline x:=\mathcal H(L)\cap[x,1]$. The following is a basic fact (see \cite[p. 3]{BjoGF}). \begin{lemma} Let $L$ be a geometric lattice of finite height. Let $a,b\in L$ be such that $a\leq b$. Then any $x\in[a,b]$ has a {\rm modular complement} in $[a,b]$, i.e., there exists $y\in[a,b]$ such that $x\wedge y=a$, $x\vee y=b$, and $r(x)+r(y)=r(a)+r(b)$. \end{lemma} {\it Proof. } If $x=c_0\lessdot c_1\lessdot\dots\lessdot c_k=b$, find $a_i\in\mathcal A(L)\cap\downarrow c_i\setminus \downarrow c_{i-1}$ for $i=1,\dots,k$. Let $y=a\vee a_1\vee\dots\vee a_k$. Clearly $r(y)-r(a)=k=r(b)-r(x)$, $x\vee y=b$, and $x\wedge y\geq a$. As $r(a)\leq r(x\wedge y)\leq r(x)+r(y)-r(x\vee y)=r(a)+r(b)-r(b)=r(a)$, we have $x\wedge y=a$.\qed See \cite[Chapters 2, 3, 5 and 8]{JecJF} and \cite[Appendix 2, \S3]{LanJB} for basic facts about ordinals and cardinals. If $\kappa$ is a regular cardinal, a subset $\Omega\subseteq\kappa$ is {\sl closed in} $\kappa$ if for every non-empty subset $A\subseteq\Omega$, the supremum of $A$ is $\kappa$ or in $\Omega$; it is {\sl unbounded in} $\kappa$ if the supremum of $\Omega$ is $\kappa$; it is a {\sl club in} $\kappa$ if it is both. A subset $\Omega\subseteq\kappa$ is {\sl stationary in} $\kappa$ if it intersects every club in $\kappa$; note that $|\Omega|=\kappa$. We take our notation from \cite[\S\S2, 4, and 6]{AhaNasSheHC}. A {\sl society} is a triple $\Lambda=(M_\Lambda,W_\Lambda,K_\Lambda)$ where $M_\Lambda\cap W_\Lambda=\emptyset$ and $K_\Lambda\subseteq M_\Lambda\times W_\Lambda$. If $A\subseteq M_\Lambda$ and $X\subseteq W_\Lambda$, then $K_\Lambda[A]:=\{w\in W_\Lambda: (a,w)\in K_\Lambda\text{ for some }a\in A\}$, and $\Lambda[A,X]:=\bigl(A,X,K_\Lambda\cap(A\times X)\bigr)$ is a {\sl subsociety} of $\Lambda$. If $B\subseteq M_\Lambda$, then $\Lambda-B:=\Lambda[M_\Lambda\setminus B,W_\Lambda]$. If ${\mathcal P}i$ is a subsociety, then $\Lambda/{\mathcal P}i:=\Lambda[M_\Lambda\setminus M_{\mathcal P}i,W_\Lambda\setminus W_{\mathcal P}i]$. We call a subsociety ${\mathcal P}i$ of $\Lambda$ {\sl saturated} if $K_\Lambda[M_{\mathcal P}i]\subseteq W_{\mathcal P}i$ and we denote this situation by ${\mathcal P}i\lhd\Lambda$. An {\sl espousal} for $\Lambda$ is an injective function $E:M_\Lambda\to W_\Lambda$ such that $E\subseteq K_\Lambda$. A society is {\sl critical} if it has an espousal and every espousal is surjective. If $I$ is a set and $\bar{\mathcal P}i=({\mathcal P}i_i: i\in I)$ is a family of subsocieties of $\Lambda$, then $\bigcup\bar{\mathcal P}i:=(\bigcup_{i\in I}M_{{\mathcal P}i_i},\bigcup_{i\in I}W_{{\mathcal P}i_i},\bigcup_{i\in I}K_{{\mathcal P}i_i})$. Assume $I$ is an ordinal. If $\theta\leq I$, then $\bar{\mathcal P}i_\theta$ denotes $({\mathcal P}i_i:i<\theta)$. The sequence $\bar{\mathcal P}i$ is {\sl non-descending} if ${\mathcal P}i_i$ is a subsociety of ${\mathcal P}i_j$ whenever $i<j<I$; it is {\sl continuous} if, in addition, $\bigcup\bar{\mathcal P}i_\theta={\mathcal P}i_\theta$ for every limit ordinal $\theta<I$. If $I=J+1$, $\bar{\mathcal P}i$ is a J-{\sl tower in} $\Lambda$ if $\bar{\mathcal P}i$ is a continuous family of saturated subsocieties of $\Lambda$ such that ${\mathcal P}i_0=(\emptyset,\emptyset,\emptyset)$. Let ${\mathcal P}i$ be a subsociety of $\Lambda$. Assume $1\leq\kappa\leq\aleph_0$. Then ${\mathcal P}i$ is a $\kappa$-{\sl obstruction in} $\Lambda$ if ${\mathcal P}i\lhd\Lambda$ and ${\mathcal P}i-A$ is critical for some $A\subseteq M_{\mathcal P}i$ such that $|A|=\kappa$. Now assume $\kappa$ is a regular, uncountable cardinal. A $\kappa$-tower $\bar\Sigma$ in $\Lambda$ is {\sl obstructive} if, for each $\alpha<\kappa$, $\Sigma_{\alpha+1}/\Sigma_\alpha$ is either (a) a $\mu$-obstruction in $\Lambda/\Sigma_\alpha$ for some $\mu<\kappa$ or (b) $(\emptyset,w,\emptyset)$ for some $w\in W_\Lambda$, and $\{\alpha<\kappa: \text{(a) holds at }\alpha\}$ is stationary in $\kappa.$ We say ${\mathcal P}i$ is a $\kappa$-{\sl obstruction in} $\Lambda$ if ${\mathcal P}i=\bigcup\bar\Sigma$ for an obstructive $\kappa$-tower $\bar\Sigma$ in $\Lambda$; by \cite[Lemmas 4.2 and 4.3]{AhaNasSheHC}, ${\mathcal P}i\lhd\Lambda$. For a society $\Lambda$, $\delta(\Lambda)$ is the minimum of $\{|B|: B\subseteq M_\Lambda\text{ such that }\Lambda-B\text{ has an espousal}\}$. We will use the following theorems of Aharoni, Nash-Williams, and Shelah: \begin{theorem}{\rm (from \cite[Lemma 4.2 and Corollary 4.9a]{AhaNasSheHC})} If ${\mathcal P}i$ is a $\kappa$-obstruction, then $\delta({\mathcal P}i)=\kappa$.\qed \end{theorem} \begin{theorem}\cite[Theorem 5.1]{AhaNasSheHC} A society $\Lambda$ has an espousal if and only if it has no obstruction.\qed \end{theorem} We will say that a geometric lattice of finite rank $r\geq 3$ has a {\sl matching} if the society $\biggl(\mathcal A(L),\mathcal H(L),\le\cap\bigl(\mathcal A(L)\times\mathcal H(L)\bigr)\biggr)$ has an espousal. (Since $\mathcal A(L)=\mathcal H(L)$ in geometric lattices of rank 2, we could say they also have a {\sl matching}.) Greene proved: \begin{theorem}\cite[Corollary 3]{GreGJ} Every finite geometric lattice of rank at least 2 has a matching.\qed \end{theorem} Bj\"orner proved: \begin{theorem}\cite[Theorems 3 and 6]{BjoGG} Every geometric lattice of rank 3, or of finite height and cardinality less than $\aleph_\omega$, has a matching.\qed \end{theorem} We use the following results of Bj\"orner: \begin{lemma} {\rm (\cite[Lemma 1]{BjoGG} and \cite[Theorem 1]{BjoGF})} Let $L$ be a geometric lattice of finite height. (a) Let $p\in\mathcal A(L)$, $h\in\mathcal H(L)$ and assume $p\nleq h$. Then $|\underline h|\le|\overline p|$. (b) If $L$ is infinite, then $|\mathcal A(L)|=|\mathcal H(L)|=|L|$.\qed \end{lemma} \begin{theorem} \cite[Theorem 4]{BjoGG} Let $L$ be an infinite geometric lattice of finite height such that $|\downarrow x|<|L|$ for every $x\in L$ of rank 2. If $|L|$ is a regular cardinal, then $L$ has a matching.\qed \end{theorem} Bj\"orner also uses this theorem of Milner and Shelah: \begin{theorem} \cite[Theorem]{TveGF} Let $\Gamma=(M,W,K)$ be a society such that $K[m]\ne\emptyset$ for all $m\in M$ and such that $(m,w)\in K$ implies $|K^{-1}[w]|\le |K[m]|$. Then $\Gamma$ has an espousal.\qed \end{theorem} We are ready to begin answering Bj\"orner's question. \begin{lemma} Let $L$ be a geometric lattice of finite height. Let $B\subseteq\mathcal A(L)$. Let $\mathcal L(B)$ be the subposet $\big\{\bigvee_L\{b_1,\dots,b_n\}:n\in\mathbb N_0,b_1,\dots,b_n\in B\big\}$. Then $\mathcal L(B)$ is a geometric lattice of finite height with rank $r_L(\bigvee_L B)$, and $\mathcal A\big(\mathcal L(B)\big)=B$. The inclusion map is order- and cover-preserving. Also, $0_{\mathcal L(B)}=0_L$ and $|\mathcal L(B)|$ is either finite or $|B|$. If $1_{\mathcal L(B)}=1_L$, then $\mathcal H\big(\mathcal L(B)\big)\subseteq\mathcal H(L)$. \end{lemma} {\it Proof. } Since $\mathcal L(B)$ is closed under arbitrary joins, it is a complete lattice (e.g., \cite[Theorems 2.31 and 2.41]{DavPriJB}). Letting $n$ equal $0$ or $1$, we get $\{0_L\}\cup B\subseteq\mathcal L(B)$ and so $B\subseteq\mathcal A\big(\mathcal L(B)\big)$. But for $n\ge2$, $b_1\vee b_2\vee\dots\vee b_n\ge b_1$, so $\mathcal A\big(\mathcal L(B)\big)\subseteq B$. Clearly every element of $\mathcal L(B)$ is a join of atoms. Let $m,n\in\mathbb N_0$ and let $b_1,\dots,b_n,c_1,\dots,c_m\in B$. Assume $b_1\vee\dots\vee b_n\lessdot_{\mathcal L(B)} c_1\vee\dots\vee c_m$. Then $m\ge1$. Pick $r\in\{1,\dots,m\}$ such that $b_1\vee\dots\vee b_n< b_1\vee\dots\vee b_n\vee c_r\in\mathcal L(B)$. Then $ b_1\vee\dots\vee b_n< b_1\vee\dots\vee b_n\vee c_r\le b_1\vee\dots\vee b_n\vee c_1\vee\dots\vee c_r\vee\dots\vee c_m=c_1\vee\dots\vee c_m$. As $ c_1\vee\dots\vee c_m$ covers $ b_1\vee\dots\vee b_n$ in $\mathcal L(B)$, we conclude $b_1\vee\dots\vee b_n\vee c_r=c_1\vee\dots\vee c_m$. By semimodularity in $L$, $b_1\vee\dots\vee b_n\lessdot_L b_1\vee\dots\vee b_n\vee c_r=c_1\vee\dots\vee c_m$. Now let $k\in\mathbb N_0$ and let $d_1,\dots,d_k\in B$. Assume that $b_1\vee\dots\vee b_n\lessdot_{\mathcal L(B)} d_1\vee\dots\vee d_k$ and $c_1\vee\dots\vee c_m\ne d_1\vee\dots\vee d_k$. As before, for some $s\in\{1,\dots,k\}$, $ b_1\vee\dots\vee b_n \lessdot_L b_1\vee\dots\vee b_n\vee d_s=d_1\vee\dots\vee d_k$. Thus $c_r\nleq d_1\vee\dots\vee d_k$ and $d_s\nleq c_1\vee\dots\vee c_m$. By semimodularity, $c_1\vee\dots\vee c_m\lessdot_L c_1\vee\dots\vee c_m\vee d_s=b_1\vee\dots\vee b_n\vee c_r\vee d_s=d_1\vee\dots\vee d_k\vee c_r$ and $ d_1\vee\dots\vee d_k\lessdot_L d_1\vee\dots\vee d_k\vee c_r$; hence $c_1\vee\dots\vee c_m$, $d_1\vee\dots\vee d_k\lessdot_{\mathcal L(B)} b_1\vee\dots\vee b_n\vee c_r\vee d_s$. This shows that $\mathcal L(B)$ is a geometric lattice, of finite height since $L$ has no infinite chains, with $1_{\mathcal L(B)}=\bigvee_L B$. As $\bigvee_L B=\bigvee_L\{b_1,\dots,b_n\}$ for some $n\in\mathbb N_0$ and some $b_1,\dots,b_n\in B$, picking the smallest such $n$ and using semimodularity in $L$ and $\mathcal L(B)$, we see that $r_L(\bigvee_L B)=r_{\mathcal L(B)}(\bigvee_L B)$, namely $n$. If $1_{\mathcal L(B)}=1_L$, then the hyperplanes of $L$ and $\mathcal L(B)$ have the same rank; thus $\mathcal H\big(\mathcal L(B)\big)\subseteq\mathcal H(L)$. The cardinality of $\mathcal L(B)$ follows from standard arguments (or see \cite[Theorem 1]{BjoGF}).\qed \begin{proposition} Let $\lambda$ be a singular cardinal. Assume that every geometric lattice of finite rank at least $2$ and of cardinality less than $\lambda$ has a matching. Then every geometric lattice of finite rank at least $2$ of cardinality $\lambda$ has a matching. \end{proposition} {\it Proof. } (Compare this with the proof of \cite[Theorem 6.4]{AhaNasSheHC}.) Assume not, for a contradiction. Then by Theorem 3, the society $\Gamma=\bigg(\mathcal A(L),\mathcal H(L),\le\cap\big(\mathcal A(L)\times\mathcal H(L)\big)\bigg)$ has a $\kappa$-obstruction ${\mathcal P}i=(M,W,K)$, where $L$ is the lattice (and $L$ has rank at least $3$). Since $|M|\le\lambda$, then by Theorem 2, we have $\kappa\le\lambda$---indeed $\kappa<\lambda$, since $\kappa$ is finite or a regular cardinal. By Theorem 2, there exists $A\subseteq M$ such that $|A|=\kappa$ and ${\mathcal P}i-A$ has an espousal, $H$. Let $R\subseteq\mathcal A(L)$ be a finite subset such that $1_L=\bigvee R$. Let $B_0=A\cup R$, and, for $n<\omega$, if $B_n$ is defined, let $B_{n+1}=B_n\cup H^{-1}\bigg(\mathcal H\big(\mathcal L(B_n)\big)\bigg)$. Note that $R\subseteq B_n$ for all $n<\omega$, so the rank of $\mathcal L(B_n)$ is the rank of $L$ and $\mathcal H\big(\mathcal L(B_n)\big)\subseteq\mathcal H(L)$ by Lemma 9. Let $B=\bigcup_{n<\omega} B_n\subseteq M\cup R$. Now $|B_0|\le\max\{\kappa,\aleph_0\}<\lambda$. If $n<\omega$ and $|B_n|\le\max\{\kappa,\aleph_0\}$, then $|\mathcal H\big(\mathcal L(B_n)\big)|\le\max\{\kappa,\aleph_0\}$, so $|B_{n+1}|\le\max\{\kappa,\aleph_0\}+\max\{\kappa,\aleph_0\}=\max\{\kappa,\aleph_0\}$. Hence $|B|\le\aleph_0\max\{\kappa,\aleph_0\}=\max\{\kappa,\aleph_0\}<\lambda$. As $R\subseteq B$, Lemma 9 shows that $|\mathcal L(B)|<\lambda$ and $\mathcal H\big(\mathcal L(B)\big)\subseteq\mathcal H(L)$, so $\mathcal L(B)$ has a matching. Let $G$ be the espousal. Since $$ H[(M\setminus A)\setminus(M\setminus A)\cap B]\cap\mathcal H\big(\mathcal L(B)\big)=\emptyset $$ and $A\subseteq B\cap M$---so that $M=[(M\setminus A)\setminus(M\setminus A)\cap B]\cup(B\cap M)$---we know $H|_{(M\setminus A)\setminus(M\setminus A)\cap B}\cup G|_{B\cap M}$ is an espousal of ${\mathcal P}i$, as ${\mathcal P}i\lhd\Gamma$, contradicting Theorem 2.\qed With Theorem 5, Proposition 10 extends Bj\"orner's work to $\aleph_\omega$. But using the argument of \cite[Theorem 6]{BjoGG} almost verbatim, we can settle Bj\"orner's first question from the 1981 Banff Conference on Ordered Sets. Bj\"orner already did the heavy lifting in proving Theorem 5, but to make it clear that his proof is what we need, we include it. \begin{theorem} Every geometric lattice of finite rank greater than $1$ has a matching. \end{theorem} {\it Proof. } The proof is drawn from \cite[pp. 10--13]{BjoGG}. Assume we have a counterexample $L$ of smallest cardinality, and, among those counterexamples, one of smallest rank. By Theorems 4 and 5 and Proposition 10, we can assume $|L|$ is a regular cardinal and that $L$ has rank at least $4$. By Theorem 7, there is $\ell_0\in L$ of rank $2$ such that $|\downarrow\ell_0|=|L|$. Assume that $|\overline p|=|L|$ for all $p\in\underline{\ell_0}$. Consider any $q\in\mathcal A(L)\setminus\underline{\ell_0}$ and consider the rank $3$ geometric lattice $\downarrow(q\vee\ell_0)$. By Lemma 6(b), $$ |L|=|\downarrow{\ell_0}|=|\underline{\ell_0}|\le|\{c\in\downarrow(q\vee\ell_0): q\lessdot c\}| $$ (by Lemma 6(a))$=|\overline q|$ (by Lemma 6(b)), so $|L|=|\overline q|$. Hence $|\overline p|=|L|$ for all $p\in\mathcal A(L)$. By Theorem 8, $L$ has a matching. So now assume $|\overline q|<|L|$ for some $q\in\underline{\ell_0}$. {\it {\bf Case 1.} Every cover of $q$ except $\ell_0$ covers only one other atom.} Then define $s:\mathcal A(L)\setminus{\underline{\ell_0}}\to\{x\in L:q\lessdot x\}$ by $s(p)=p\vee q$ for all $p\in\mathcal A(L)\setminus{\underline{\ell_0}}$. In this case, $s$ is one-to-one. By the minimality of $L$, the geometric lattice $\uparrow q$ has a matching $t: \{x\in L:q\lessdot x\}\to\overline q$. We will define a matching $f$ for $L$. Let $f(p):=t\big(s(p)\big)$ for all $p\in\mathcal A(L)\setminus\underline{\ell_0}$ and let $f(q):=t(\ell_0)$; we just need to define $f$ on $\underline{\ell_0}\setminus\{q\}$. Pick $h_0\in\overline{\ell_0}$ and let $z$ be a modular complement of $\ell_0$ in $\downarrow h_0$. Define $R:\underline{\ell_0}\to\{x\in L:z\lessdot x\lessdot h_0\}$ by $R(p)=p\vee z$ for all $p\in\underline{\ell_0}$. This function is one-to-one: If $p,p'\in\underline{\ell_0}$ but $p\ne p'$ and $p\vee z=p'\vee z$, then $p\vee z=p\vee p'\vee z=\ell_0\vee z=h_0$, a contradiction. If $p\in\underline{\ell_0}\setminus\{q\}$, then $q\nleq R(p)$ (or else $R(p)=p\vee q\vee z=\ell_0\vee z=h_0$, a contradiction), so $R(p)$ is covered by exactly one hyperplane in $\overline q$, namely $q\vee R(p)$, and this is $h_0$. Since $f[\big(\mathcal A(L)\setminus\underline{\ell_0}\big)\cup\{q\}]\subseteq\overline q$, if $p\in\underline{\ell_0}\setminus\{q\}$, we can let $f(p)$ be any hyperplane covering $R(p)$ except $h_0$. If $p_1,p_2\in\underline{\ell_0}\setminus\{q\}$ but $p_1\ne p_2$ and $f(p_1)=f(p_2)$, then $f(p_1)$ covers $R(p_1)=p_1\vee z$ and covers $R(p_2)=p_2\vee z$, so $f(p_1)=p_1\vee p_2\vee z=\ell_0\vee z=h_0$, a contradiction. Thus $f$ is a matching. {\it {\bf Case 2.} There exists $\ell_1\in L\setminus\{\ell_0\}$ such that $q\lessdot\ell_1$ and $|\underline{\ell_1}|\ge3$.} Let $p_1,p_2\in\underline{\ell_1}$ be such that $|\{p_1,p_2,q\}|=3$. Since $q=\ell_0\wedge\ell_1$, we have $p_1,p_2\nleq\ell_0$. Let $h_0\in\overline{\ell_0}$ be such that $p_1\nleq h_0$. (Pick a modular complement of $\ell_0\vee p_1$ in $\uparrow\ell_0$.) If $p_2\le h_0$, then $q\le\ell_0\le h_0$ implies $\ell_1=p_2\vee q\le h_0$, and so $p_1\le h_0$, a contradiction. Hence $p_2\nleq h_0$. By the minimality of $L$, $\downarrow h_0$ has a matching $g:\underline{h_0}\to C:=\{x\in L: x\lessdot h_0\}$. Let $C_2:=\{c\in C: |\overline c|=2\}$ and let $C_3:=C\setminus C_2$. We will show that $|C_3|=|L|$. Because $\{p_1,p_2\}\in\mathcal A(L)\setminus\underline{h_0}$, we have that $q\le\ell_1=p_1\vee p_2\le\bigvee\mathcal A(L)\setminus\underline{h_0}=:y$. {\it Claim. For $x\in C$, $x\in C_2$ if and only if $x=h_0\wedge h$ for some $h\in\overline y$.} {\it Proof of claim.} We have a partition of $\mathcal A(L)\setminus\underline x$: $\{\underline k\setminus\underline x: x\lessdot k\in L\}$. If $x\in C_2$, then $x\lessdot h$ for some $h\in\mathcal H(L)\setminus\{h_0\}$ and so $x=h_0\wedge h$. If $w\in\mathcal A(L)\setminus\underline{h_0}$, then $w\notin\underline x$, so $x\lessdot w\vee x\in\mathcal H(L)$ but $w\vee x\ne h_0$, so $w\vee x=h$. Hence $w\le h$. Therefore $y\le h$. Conversely, if $x=h_0\wedge h$ for some $h\in\overline y$, then $h_0\ne h$. If there exists $h'\in\overline x\setminus\{h_0,h\}$, then, for some $a\in\underline{h'}\setminus\underline x$, $h'=a\vee x$. Hence $a\notin \underline{h_0}\setminus\underline x$, and thus $a\le y$, so $a\le h$ and thus $a\le h\wedge h'=x$, a contradiction. Hence $x\in C_2$.\qed By the claim, $|C_2|\le|\overline y|$. But $q\le y$ implies that $\overline y\subseteq\overline q$ and, since $|\overline q|<|L|$, we conclude $|C_2|<|L|$. By Lemma 6(b), $|C|=|L|$, so $|C_3|=|L|$. We now define our matching as follows: Since $|\mathcal A(L)\setminus\underline{h_0}|\le|C_3|$, take any injection $b:\mathcal A(L)\setminus\underline{h_0}\to C_3$ and let $f(p)=p\vee b(p)$ for $p\in\mathcal A(L)\setminus\underline{h_0}$. For $p\in\underline{h_0}$, let $f(p)$ be any cover of $g(p)$ except $h_0$ or, in case $g(p)=b(p')$ for some $p'\in\mathcal A(L)\setminus\underline{h_0}$, except $f(p')$. (We can do this since $b(p')\in C_3$.) If $x',x''\in C$ and $x'\ne x''$, then $\uparrow x'\cap\uparrow x''=\uparrow h_0$; hence if $p,p'\in\mathcal A(L)\setminus\underline{h_0}$ and $p\ne p'$ but $f(p)=f(p')$ (so $p\vee b(p)=p'\vee b(p')$), then $f(p)\ge h_0$; but $r\big(f(p)\big)=r(h_0)$, so $f(p)=h_0$ and $p\le h_0$, a contradiction. If for some $p\in\underline{h_0}$ and $p'\in\mathcal A(L)\setminus\underline{h_0}$ we have $f(p)=f(p')$, then $g(p)\lessdot p'\vee b(p')$. Since $g(p),b(p')\lessdot h_0$, then $\{g(p),b(p'),h_0,p'\vee b(p')\}$ would be a 4-element crown (also called a ``cycle'') of elements in consecutive ranks---impossible in a lattice---unless $g(p)=b(p')$, which we have ruled out. If $p,p'\in\underline{h_0}$ and $f(p)=f(p')$ but $p\ne p'$, then $g(p)\ne g(p')$ and $g(p),g(p')\lessdot h_0$ and $f(p)$ is a cover of $g(p),g(p')$ distinct from $h_0$, so we get another impossible 4-crown. Hence $f$ is one-to-one.\qed This answers the question of Bj\"orner from 1976 that was the first question he stated at the 1981 Banff Conference on Ordered Sets. A good approach to the second would be to use \cite{AhaBerJI} and \cite{LogShaJD}; the latter contains a theorem that, when he first read it, made this writer feel that it could hold its own alongside many classic results in combinatorics. \end{document}
\begin{document} \keywords{bi-continuous semigroups, extrapolation spaces, implemented semigroups, Desch--Schappacher type perturbation} \subjclass[msc2010]{47D03, 47A55, 34G10, 46A70} \title{A Desch--Schappacher Perturbation Theorem for Bi-Continuous Semigroups} \author{Christian Budde} \email{[email protected]} \author{B\'{a}lint Farkas} \email{[email protected]} \address{University of Wuppertal, School of Mathematics and Natural Science, Gaussstrasse 20, 42119 Wuppertal, Germany} \begin{abstract} We prove a Desch--Schappacher type perturbation theorem for one-parameter semigroups on Banach spaces which are not strongly continuous for the norm, but possess a weaker continuity property. In this paper we chose to work in the framework of bi-continuous semigroups. This choice has the advantage that we can treat in a unified manner two important classes of semigroups: implemented semigroups on the Banach algebra $\mathrm{L}LL(E)$ of bounded, linear operators on a Banach space $E$, and semigroups on the space of bounded and continuous functions over a Polish space induced by jointly continuous semiflows. For both of these classes we present an application of our abstract perturbation theorem. \end{abstract} \maketitle \section*{Introduction} \noindent As suggested by Greiner in \cite{Greiner1987} abstract perturbation theory of one-parameter semigroups provides good means to change the domain of a semigroup generator. For this an enlargement of the underlying Banach space may be necessary and extrapolation spaces become important. One of the well-known results in this direction goes back to the papers of Desch and Schappacher, see \cite{DeschSchappacher} and \cite{Desch1988}. Another prominent example of such general perturbation techniques is due to Staffans and Weiss, \cite{SW2004,Staffans2005}, and an elegant abstract operator theoretic/algebraic approach has been developed by Adler, Bombieri and Engel in \cite{WS}. A general theory of unbounded domain perturbations is given by Hadd, Manzo and Rhandi \cite{Rhandi2014}. A more recent paper by B\'{a}tkai, Jacob, Voigt and Wintermayr \cite{BJVW2018} extends the notion of positivity to extrapolation spaces, and studies positive perturbations for positive semigroups on $\mathrm{AM}$-spaces. Hence, the study of abstract Desch--Schappacher type perturbations is a lively research field, to which we contribute with the present article. The reason for such an active interest in this area is that the range of application is vast. We mention here only a selection from the most recent ones: boundary perturbations by Nickel \cite{Nickel2004}, boundary feedback by Casarino, Engel, Nagel and Nickel \cite{CENN2003}, boundary control by Engel, Kramar Fijav\v{z}, Kl\"{o}ss, Nagel and Sikolya \cite{EKKNS2010} and Engel and Kramar Fijav\v{z} \cite{EK2017}, port-Hamiltonian systems by Baroun and Jacob \cite{BJ2009}, control theory by Jacob, Nabiullin, Partington and Schwenninger \cite{JNPS2018,JNPS2016} and Jacob, Schwenninger and Zwart \cite{JSZ2018} and vertex control in networks by Engel and Kramar Fijav\v{z} \cite{EK2008, EK2018}. \noindent All the previously mentioned abstract perturbation results were developed for strongly continuous semigroups of linear operators on Banach spaces, $C_0$-semigroups for short. This is, for certain applications, e.g., for the theory of Markov transition semigroups, far too restrictive. For this situation the Banach space of bounded and continuous functions over a Polish space is the most adequate, but on this space the strong continuity with the respect to norm is, in general, a too stringent requirement. \noindent K\"uhnemund in \cite{KuPhD} has developed the abstract theory of bi-continuous semigroups, which has the advantage that not only Markov transition semigroup, but also semigroups induced by jointly continuous flows or implemented semigroups, just to mention a few, can be handled in a unified manner. Some perturbation result for bi-continuous semigroups are known, see \cite{FaStud,FaSF,FaPHD}, however, none of which is suitable for domain perturbations. \noindent As first step in longer a program this paper treats a Desch--Schappacher type perturbation theorem for this class of semigroups. Since the theory of bi-continuous semigroups uses a Banach space norm and a additional locally convex topology, it is fundamental to relate our results to the existing, analogous ones on locally convex spaces. We recall the next result, due to Jacob, Wegner, Wintermayr, from \cite{JWW}. \begin{theorem*} Let $X$ be a sequentially complete, locally convex space with fundamental system $\Gamma$ of continuous seminorms, and let $(A,D(A))$ be the generator of a locally equicontinuous $C_0$-semigroup $(T(t))_{t\geq0}$ on $X$. Moreover, let $\overline{X}$ be a sequentially complete locally convex space such that \begin{abc} \item $X\subseteq\overline{X}$ is dense and the inclusion map is continuous, \item $\overline{A}$ with domain $D(\overline{A})=X$ generates a locally equicontinuous $C_0$-semigroup $(\overline{T}(t))_{t\geq0}$ on $\overline{X}$ such that $\overline{T}(t)_{|X}=T(t)$ holds for all $t\geq0$. \end{abc} Let $B:X\rightarrow\overline{X}$ be a linear and continuous operator and $t_0>0$ be a number such that \begin{abc}\setcounter{enumi}{2} \item ${\displaystyle{\forall f\in\mathrm{C}\left(\left[0,t_0\right],X\right): \int_0^{t_0}{\overline{T}(t_0-t)Bf(t)\ \mathrm{d}{t}}\in X}}$, \item ${\displaystyle{\forall p\in\Gamma\ \exists K\in\left(0,1\right)\ \forall f\in\mathrm{C}\left(\left[0,t_0\right],X\right): p\left(\int_0^{t_0}{\overline{T}(t_0-t)Bf(t)\ \mathrm{d}{t}}\right)\leq K\cdot\sup_{t\in\left[0,t_0\right]}{p(f(t))}}}$. \end{abc} Then the operator $(C,D(C))$ defined by \[ Cx=(\overline{A}+B)x\ \text{for}\ x\inD(C)=\left\{x\in X: (\overline{A}+B)x\in X\right\} \] generates a locally equicontinuous $C_0$-semigroup on $X$ if and only if $D(C)\subseteq X$ is dense. \end{theorem*} \noindent We will prove a similar result for bi-continuous semigroups with the advantage that we can relax condition (c) of the previous theorem in the sense that we allow different seminorms on the left- and the right-hand side of the inequality. Moreover, one has to change and expand the conditions for the bi-continuous case carefully to obtain a good interplay between the Banach space norm and the locally convex topology. A space $\overline{X}$ with the properties used in the theorem above is called an extrapolation space. For $C_0$-semigroups on Banach spaces the classical construction is presented in \cite[Chapter II, Sect. 5a]{EN} in a self-contained manner. Extrapolation spaces for $C_0$-semigroups on locally convex spaces are constructed by Wegner in \cite{W}. Extrapolated bi-continuous semigroups and extrapolation spaces are recently treated by Budde and Farkas in \cite{BF}. \noindent This paper is organized as follows. In the first section we recall some definitions and results for bi-continuous semigroups and give some preliminary constructions needed for the Desch--Schappacher perturbation result, which is stated and proved as Theorem \ref{thm:DS} in Section \ref{sec:DS}. Section \ref{sec:adm} contains a sufficient condition for operators to satisfy the hypothesis of the abstract perturbation theorem, see Theorem \ref{thm:admDS}. In Section \ref{sec:trans} we prove that for a large class of bounded functions $g:\mathbb{R}\to\mathbb{C}$ which are continuous up to a discrete set of jump discontinuities and for each bounded (complex) Borel measure $\mu$ on $\mathbb{R}$ the operator \[ Cf:=f'+\int_\mathbb{R} f\,\mathrm{d}\mu\cdot g \] with appropriate domain generates a bi-continuous semigroup on the Banach space $\mathrm{C}_{\mathrm{b}}(\mathbb{R})$ of bounded, continuous functions on $\mathbb{R}$. Section \ref{sec:impl} is devoted to Desch--Schappacher perturbations of left implemented semigroups on the Banach algebra $\mathrm{L}LL(E)$ of bounded, linear operators on a Banach space $E$. \section{Preliminaries}\label{sec:Pre} \subsection{Bi-continuous semigroups}\label{subsec:bicontsemi} The class of bi-continuous semigroups was introduced by K\"uhnemund in \cite{Ku} and \cite{KuPhD} to treat one parameter semigroups on Banach spaces that are not strongly continuous for the norm (i.e., not $C_0$-semigroups), but enjoy continuity properties with respect to a coarser, locally convex topology. Here we briefly describe the basic ingredients needed for this theory. Throughout this paper we need the following main assumptions. \begin{assumption}\label{ass:bicontassump} Consider a triple $(X,\left\|\cdot\right\|,\tau)$ where $X$ is a Banach space, and \begin{abc} \item $\tau$ is a Hausdorff topology, coarser than the norm-topology on $X$. \item $\tau$ is sequentially complete on norm-bounded sets, i.e., every $\left\|\cdot\right\|$-bounded $\tau$-Cauchy sequence is $\tau$-convergent. \item The dual space of $(X,\tau)$ is norming for $X$, i.e., \[ \left\|x\right\|=\sup_{\substack{\varphi\in(X,\tau)'\\\left\|\varphi\right\|\leq1}}{\left|\varphi(x)\right|}\ \text{for all}\ x\in X. \] \end{abc} \end{assumption} \begin{remark} One can reformulate the third assumption equivalently (see \cite[Rem. 4.2]{BF} and \cite[Lemma 4.4]{Kraaij2016}): There is a set $\mathscr{P}$ of $\tau$-continuous seminorms defining the topology $\tau$, such that \begin{equation}\label{eq:semisnorm} \|x\|=\sup_{p\in\mathscr{P}}p(x). \end{equation} \end{remark} \begin{definition}\label{def:biequicont} A family of operators $\mathcal{B}\subseteq\mathrm{L}LL(X)$ is called bi-equicontinuous if for each norm-bounded $\tau$-null sequence $(x_n)_{n\in\mathbb{N}}$ in $X$ one has \[ \mathop{\tau\mathrm{lim}}_{n\rightarrow\infty}{Bx_n}=0, \] uniformly for $B\in\mathcal{B}$. \end{definition} \begin{definition}\label{def:bicontdef} Let $(X,\left\|\cdot\right\|,\tau)$ be a triple satisfying Assumption \ref{ass:bicontassump}. We call a family of bounded linear operators $(T(t))_{t\geq0}$ a $\tau$-\emph{bi-continuous semigroup} if \begin{num} \item $T(t+s)=T(t)T(s)$ and $T(0)=\mathrm{I}$ for all $s,t\geq 0$ (semigroup law). \item $(T(t))_{t\geq0}$ is strongly $\tau$-continuous, i.e., the map $\xi_x:[0,\infty)\to(X,\tau)$ defined by $\xi_x(t)=T(t)x$ is continuous for every $x\in X$. \item $(T(t))_{t\geq0}$ is exponentially bounded, i.e., there exist $M\geq1$ and $\omega\in\mathbb{R}$ such that $\left\|T(t)\right\|\leq M\mathrm{e}^{\omega t}$ for each $t\geq0$. \item $(T(t))_{t\geq0}$ is locally-bi-equicontinuous, i.e., the family $\left\{T(t):\ t\in\left[0,t_0\right]\right\}$ is bi-equicontinuous for each $t_0>0$. \end{num} \end{definition} By saying that $(T(t))_{t\geq0}$ is a bi-continuous semigroup on a Banach space $X$ we implicitly assume that the in the background the triple $(X,\left\|\cdot\right\|,\tau)$ satisfying Assumption \ref{ass:bicontassump} is fixed. One of the prominent examples of bi-continuous semigroups is the translation (semi)group $(T(t))_{t\geq0}$ on the space $\mathrm{C}_{\mathrm{b}}(\mathbb{R})$ of bounded continuous functions on $\mathbb{R}$, \[ T(t)f(x)=f(x+t),\quad t\geq0,\:f\in \mathrm{C}_{\mathrm{b}}(\mathbb{R}),\:x\in \mathbb{R}. \] This semigroup is not strongly continuous with respect to the supremum-norm $\left\|\cdot\right\|_{\infty}$, but it becomes strongly continuous with respect to the compact-open topology $\tau_{\mathrm{co}}$, the locally convex topology on $\mathrm{C}_{\mathrm{b}}(\mathbb{R})$ induced by the family of seminorms $\mathscr{P}=\{p_K:\ K\subseteq\mathbb{R}\ \text{compact}\}$ where \[ p_K(f)=\sup_{x\in K}{\left|f(x)\right|},\quad f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}). \] Similarly to the case of $C_0$-semigroups we define the generator for a bi-continuous semigroup as follows. \begin{definition}\label{def:BiGen} Let $(T(t))_{t\geq0}$ be a bi-continuous semigroup on $X$. The (infinitesimal) generator of $(T(t))_{t\geq0}$ is the linear operator $(A,D(A))$ defined by \[Ax:=\mathop{\tau\mathrm{lim}}_{t\to0}{\frac{T(t)x-x}{t}}\] with domain \[D(A):=\Bigl\{x\in X:\ \mathop{\tau\mathrm{lim}}_{t\to0}{\frac{T(t)x-x}{t}}\ \text{exists and} \ \sup_{t\in(0,1]}{\frac{\|T(t)x-x\|}{t}}<\infty\Bigr\}.\] \end{definition} The following theorem summarizes the most essential properties of bi-continuous semigroups and their generators (see \cite{Ku},\cite{FaStud}). \begin{theorem}\label{thm:bicontprop} Let $(T(t))_{t\geq0}$ be a bi-continuous semigroup with generator $(A,D(A))$. Then the following hold: \begin{abc} \item $A$ is bi-closed, i.e., whenever $x_n\stackrel{\tau}{\to}x$ and $Ax_n\stackrel{\tau}{\to}y$ and both sequences are norm-bounded, then $y\inD(A)$ and $Ax=y$. \item $D(A)$ is bi-dense in $X$, i.e., for each $x\in X$ there exists a norm-bounded sequence $(x_n)_{n\in\mathbb{N}}$ in $D(A)$ such that $x_n\stackrel{\tau}{\to}x$. \item For $x\inD(A)$ we have $T(t)x\inD(A)$ and $T(t)Ax=AT(t)x$ for all $t\geq0$. \item For $t>0$ and $x\in X$ one has \begin{align}\int_0^t{T(s)x\ \mathrm{d} s}\inD(A)\ \ \text{and}\ \ A\int_0^t{T(s)x\ \mathrm{d} s}=T(t)x-x,\end{align} where the integral has to be understood as a $\tau$-Riemann integral. \item For $\lambda>\omega_0(T)$ one has $\lambda\in\rho(A)$ (thus $A$ is closed) and for $x\in X$ holds: \begin{align}\label{eq:bicontlaplace} R(\lambda,A)x=\int_0^{\infty}{\mathrm{e}^{-\lambda s}T(s)x\ \mathrm{d} s}\end{align} where the integral is a $\tau$-improper integral. \end{abc} \end{theorem} \subsection{Extrapolation spaces}\label{subsec:Extrap} In this section we recall some results concerning extrapolation spaces for bi-continuous semigroups from \cite{BF}. Throughout this section we assume without loss of generality that $0\in\rho(A)$. One of the most important ingredient for extrapolation spaces is the following proposition. \begin{proposition}\label{prop:StrCont} Let $(T(t))_{t\geq0}$ be a bi-continuous semigroup on $X$ with generator $(A,D(A))$. The subspace $\underline{X}_0:=\overline{D(A)}^{\left\|\cdot\right\|}\subseteq X$ is $(T(t))_{t\geq0}$-invariant and $(\underline{T}(t))_{t\geq0}:=(T(t)_{|X_0})_{t\geq0}$ is the $C_0$-semigroup on $\underline{X}_0$ generated by the part of $A$ in $\underline{X}_0$ (this generator is denoted by $\underline{A}_0$). \end{proposition} The classical construction of the extrapolation spaces $\underline{X}_{-n}$ corresponding to the $C_0$-semigroup $(\underline{T}(t))_{t\geq0}$ is summarized in \cite[Chapter II, Section 5a]{EN}. Recall from there that one obtains $\underline{X}_{-1}$ as a completion of $\underline{X}_0$ with respect to the $\left\|\cdot\right\|_{-1}$-norm defined by \[ \left\|x\right\|_{-1}:=\left\|\underline{A}^{-1}x\right\|,\quad x\in\underline{X}_0. \] Notice that $\underline{X}_0$ is dense in $\underline{X}_{-1}$ and that $(\underline{T}(t))_{t\geq0}$ extends by continuity to a $C_0$-semigroup $(\underline{T}_{-1}(t))_{t\geq0}$ on $\underline{X}_{-1}$ with generator $(\underline{A}_{-1},D(\underline{A}_{-1}))$, where $D(\underline{A}_{-1})=\underline{X}_0$. By repeating this construction one obtains the following chain of spaces \[ \underline{X}_0\stackrel{\underline{A}_{-1}}{\hookrightarrow}\underline{X}_{-1}\stackrel{\underline{A}_{-2}}{\hookrightarrow}\underline{X}_{-2}\rightarrow\cdots \] where all maps are continuous and dense inclusions. Notice that also \[ \underline{X}_0\hookrightarrow X\hookrightarrow\underline{X}_{-1} \] holds so that we can identify $X$, by the continuity of the inclusions, as a subspace of $\underline{X}_{-1}$. We define the extrapolation space $X_{-1}$ for the bi-continuous semigroup $(T(t))_{t\geq0}$ by \begin{align*} X_{-1}:=\underline{A}_{-2}(X). \end{align*} The norm on $X_{-1}$ is defined by $\|x\|_{-1}:=\|\underline{A}^{-1}_{-2}\|$, the locally convex topology $\tau_{-1}$ on $X_{-1}$ comes from the family of seminorms $\mathscr{P}_{-1}:=\left\{p_{-1}:\ p\in\mathscr{P}\right\}$ where \[ p_{-1}(x):=p(\underline{A}_{-2}^{-1}x),\quad p\in\mathscr{P}, x\in X. \] It was shown in \cite{BF} that $(T(t))_{t\geq0}$ extends to a $\tau_{-1}$-bi-continuous semigroup $(T_{-1}(t))_{t\geq0}$ on $X_{-1}$ and has a generator $A_{-1}$ with domain $D(A_{-1})=X$. The operator $A_{-1}:X\to X_{-1}$ is an isomorphism intertwining the semigroups $(T(t))_{t\geq0}$ and $(T_{-1}(t))_{t\geq0}$. If we want to stress the dependence of the extrapolation space $X_{-1}$ corresponding to the operator $(A,D(A))$ we write $X_{-1}(A)$. This will be used for the discussion of Desch--Schappacher perturbations. \begin{remark}\label{rem:SemiEstim} By construction $A_{-1}:(X,\tau)\to (X_{-1},\tau_{-1})$ is continuous, and actually an isomorphism. In particular, we have \[ \forall p\in\mathscr{P}\ \exists L>0\ \exists\gamma\in\mathscr{P}_{-1} \forall x\in X:\ p(x)\leq L(\gamma(x)+\gamma(A_{-1}x)). \] \end{remark} \subsection{Admissibility space} In this subsection we fix some notation. Let $(T(t))_{t\geq0}$ be a $\tau$-bi-continuous semigroup on a Banach space $X$ with generator $(A,D(A))$, where $\tau$ is generates family of seminorms $\mathscr{P}$. Furthermore, let $B\in\mathrm{L}LL(X,X_{-1})$ such that $B:(X,\tau)\rightarrow({X}_{-1},\tau_{-1})$ is a continuous linear operator and define for $t_0>0$ the following space: \begin{align}\label{eqn:AdmSp} \mathfrak{X}_{t_0}:=\begin{Bmatrix} F:\left[0,t_0\right]\rightarrow\mathscr{L}(X): \tau\text{-}\text{strongly continuous, norm bounded}\\ \text{and} \left\{F(t):t\in\left[0,t_0\right]\right\}\ \text{is bi-equicontinuous} \end{Bmatrix}. \end{align} \begin{remark} In \cite[Lemma 3.2]{FaStud} it was shown that for $t_0>0$ the space $\mathfrak{X}_{t_0}$ is indeed a Banach space (and in particular a Banach algebra) with respect to the norm \[ \left\|F\right\|:=\sup\limits_{t\in\left[0,t_0\right]}{\left\|F(t)\right\|}. \] \end{remark} For $F\in\mathfrak{X}_{t_0}$ and $t\in\left[0,t_0\right]$ we define the so-called \emph{(abstract) Volterra operator} $V_B$ on $\mathfrak{X}_{t_0}$ by \begin{align}\label{eqn:Voltera} (V_BF)(t)x:=\int_0^t{T_{-1}(t-r)BF(r)x\ \mathrm{d} r}. \end{align} The integral has to be understood in the sense of a $\tau_{-1}$-Riemann integral. Notice that in general for $x\in X$ we have $(V_BF)(t)x\in X_{-1}$. For the formulation of our main result we need the following definition. \begin{definition}\label{def:adm} {Let $B\in \mathrm{L}LL(X,X_{-1})$ such that also $B:(X,\tau)\rightarrow(X_{-1},\tau_{-1})$ is continuous. The operator $B$ is said to be \emph{admissible}, if there is $t_0>0$ such that the following conditions are satisfied: \begin{iiv} \item $V_BF(t)x\in X$ for all $t\in\left[0,t_0\right]$ and $x\in X$. \item $\mathrm{Ran}(V_B)\subseteq\mathfrak{X}_{t_0}$. \item $\left\|V_B\right\|<1$. \end{iiv}} The set of all admissible operators $B:(X,\tau)\rightarrow(X_{-1},\tau_{-1})$ will be denoted by $\mathcal{S}_{t_0}^{DS,\tau}$. We write $B\in\mathcal{S}_{t_0}^{DS,\tau}(T)$ whenever it is important to emphasize for which semigroup $(T(t))_{t\geq0}$ the operator $B$ is admissible. \end{definition} \section{An abstract Desch--Schappacher perturbation result} \label{sec:DS} This section contains the formulation of the Desch--Schappacher type perturbation result and its proof. \begin{theorem}\label{thm:DS} Let $(A,D(A))$ be {the} generator of a $\tau$-bi-continuous semigroup $(T(t))_{t\geq0}$ on a Banach space $X$. Let $B:X\to X_{-1}$ such that $B\in\mathcal{S}_{t_0}^{DS,\tau}$ for some $t_0>0$. Then the operator $(A_{-1}+B)_{|{X}}$ with domain \begin{align*} D((A_{-1}+B)_{|X}):=\left\{x\in X:\ A_{-1}x+Bx\in X\right\} \end{align*} generates a $\tau$-bi-continuous semigroup $(S(t))_{t\geq0}$ on $X$. Moreover, the semigroup $(S(t))_{t\geq0}$ satisfies the variation of parameters formula \begin{align}\label{eqn:VariPara} S(t)x=T(t)x+\int_0^t{T_{-1}(t-r)BS(r)x\ \mathrm{d}{r}}, \end{align} for every $t\geq0$ and $x\in X$. \end{theorem} \begin{proof}Since $\left\|V_B\right\|<1$ by hypothesis, we conclude that $1\in\rho(V_B)$ . Now let $t>0$ be arbitrary and write $t=nt_0+t_1$ for $n\in\mathbb{N}$ and $t_1\in\left[0,t_0\right)$. Define \[ S(t):=((R(1,V_B)T_{|[0,t_0]})^n(t_0)\cdot(R(1,V_B)T_{|[0,t_0]})(t_1). \] We first show that $(S(t))_{t\geq0}$ is a semigroup. For $0\leq s,t\leq s+t\leq t_0$ and $n\in\mathbb{N}$ we prove the following identity (cf. \cite[Chapter III, Sect. 3]{EN}) \begin{align}\label{eqn:IdentVB} (V_B^nT)(t+s)=\sum_{k=0}^{n}{(V_B^{n-k}T_{|[0,t_0]})(s)\cdot(V_B^kT)(t)},\ \ \forall n\in\mathbb{N} \end{align} by induction. We abbreviate $V:=V_B$. Since $V^0=\mathrm{I}$, equation \eqref{eqn:IdentVB} is trivially satisfied for $n=0$. Now assume that \eqref{eqn:IdentVB} is true for some $n\in\mathbb{N}$. Then we obtain by this hypothesis that \begin{align*} \sum_{k=0}^{n+1}&{(V^{n+1-k}T)(s)\cdot(V^kT)(t)}\\ &=\sum_{k=0}^n{\left(\int_0^s{T_{-1}(s-r)BV^{n-k}T(r)\ \mathrm{d} r}\right)\cdot V^kT(t)}+T(s)\int_0^t{T_{-1}(t-r)BV^nT(r)\ \mathrm{d} r}\\ &=\int_0^s{T_{-1}(s-r)B\sum_{k=0}^n{V^{n-k}T(r)\cdot V^kT(t)}\ \mathrm{d} r}+\int_0^t{T_{-1}(s+t-r)BV^nT(r)\ \mathrm{d} r}\\ &=\int_0^s{T_{-1}(s-r)B{V^{n}T(r+t)\ \mathrm{d} r}}+\int_0^t{T_{-1}(s+t-r)BV^nT(r)\ \mathrm{d} r}\\ &=\int_t^{s+t}{T_{-1}(s+t-r)B{V^{n}T(r)\ \mathrm{d} r}}+\int_0^t{T_{-1}(s+t-r)BV^nT(r)\ \mathrm{d} r}\\ &=V^{n+1}T(s+t). \end{align*} By this we can conclude that $(S(t))_{t\geq0}$ satisfies the semigroup law for $0\leq s,t\leq s+t\leq t_0$. Indeed, for each $t\in\left[0,t_0\right]$ the point evaluation $\delta_t:\mathfrak{X}_{t_0}\rightarrow\mathrm{L}LL(X)$ is a contraction and since $\left\|V\right\|<1$ by hypothesis the inverse of $\mathrm{I}-V$ is given by the Neumann series. Therefore, \[ S(t)=\delta_t\left(\sum_{n=0}^{\infty}{V^nT}\right)=\sum_{n=0}^{\infty}{(V^nT)(t)},\quad t\in\left[0,t_0\right]. \] Moreover, we have \[ \left\|(V^nT)(t)\right\|=\leq\left\|V^n\right\|\cdot\left\|T_{|[0,t_0]}\right\|, \] and we conclude that the series above converges absolutely. Hence \begin{align*} S(s)S(t)&=\sum_{n=0}^{\infty}{(V^nT)(s)}\cdot\sum_{n=0}^{\infty}{(V^nT)(t)}\\ &=\sum_{n=0}^{\infty}{\sum_{k=0}^{n}{(V^{n-k}T)(s)(V^kT)(t)}}\\ &=\sum_{n=0}^{\infty}{(V^nT)(s+t)}=S(s+t). \end{align*} Now we show that $S(t)S(s)=S(t+s)$ for all $t,s>0$. For that let $t,s>0$ be arbitrary and $n,m\in\mathbb{N}$ and $t_1,t_2\in\left[0,t_0\right)$ such that $t=nt_0+t_1$ and $s=mt_0+t_2$. Then we obtain the following \begin{align*} S(t)S(s)&=S(t_0)^nS(t_1)S(t_0)^mS(t_2)\\ &=S(t_0)^nS(t_0)^mS(t_1)S(t_2)\\ &=\begin{cases}S(t_0)^{n+m}S(t_1+t_2),&\text{if}\ t_1+t_2<t_0,\\S(t_0)^{n+m+1}S(t_2-(t_0-t_1)),&\text{if}\ t_1+t_2\geq t_0.\end{cases} \end{align*} But in both cases the right-hand side equals $S(t+s)$ by definition. Hence $(S(t))_{t\geq0}$ satisfies the semigroup law. The next step is to show that it is a $\tau$-bi-continuous semigroup. Notice that \[ S_{|\left[0,t_0\right]}(t)=R(1,V_B)T_{|\left[0,t_0\right]}(t) \] and hence $(S(t))_{t\geq0}$ is locally bounded and the set $\left\{S(t):t\in\left[0,t_0\right]\right\}$ is bi-equicontinuous. For $t>0$ let $m:=\big\lfloor \frac{t}{t_0}\big\rfloor$ and notice that $\left\{S(t_0)^k:\ 1\leq k\leq m\right\}$ is bi-equicontinuous, hence we conclude that the set \[ \left\{S(t_0)^k:\ 1\leq k\leq m\right\}\cdot\left\{S(s):\ s\in\left[0,t_0\right]\right\} \] is also bi-equicontinuous. By definition of $(S(t))_{t\geq0}$ we obtain $\tau$-strong continuity, and hence $(S(t))_{t\geq0}$ is a $\tau$-bi-continuous semigroup. We now prove \begin{align*} S(t)x=T(t)x+\int_0^t{T_{-1}(t-r)BS(r)x\ \mathrm{d} r} \end{align*} for each $t>0$ and $x\in X$ by proceeding similarly to \cite[Chapter III, Sect. 3]{EN}. For $t=nt_0+t_1$, $n\in\mathbb{N}$ and $t_1\in\left[0,t_0\right)$, we obtain: \small \begin{align*} &\int_0^{t}{T_{-1}(t-r)BS(r)\ \mathrm{d} r}\\ =&\sum_{k=0}^{n-1}{\int_{kt_0}^{(k+1)t_0}{T_{-1}(t-r)BS(r)\ \mathrm{d} r}}+\int_{nt_0}^t{T_{-1}(t-r)BS(r)\ \mathrm{d} r}\\ =&\sum_{k=0}^{n-1}{T_{-1}(t-(k+1)t_0)\int_{0}^{t_0}{T_{-1}(t_0-r)BS(r)\ \mathrm{d} r}\cdot S(kt_0)}+\int_0^{t_1}{T_{-1}(t_1-r)BS(r)\ \mathrm{d} r}\cdot S(nt_0)\\ =&\sum_{k=0}^{n-1}{T(t-(k+1)t_0)(S(t_0)-T(t_0))S(kt_0)}+(S(t_1)-T(t_1))S(nt_0)=S(t)-T(t). \end{align*} \normalsize The next step is to show that the resolvent set of $(A_{-1}+B)_{|X}$ is non-empty. For this we claim that $R(\lambda, A_{-1})B$ is bounded with $\left\|R(\lambda,A_{-1})B\right\|<1$ for $\lambda$ large enough. Choose $M\geq0$ and $\omega\in\mathbb{R}$ such that $\left\|T(t)\right\|\leq M\mathrm{e}^{\omega t}$ for all $t>0$. Then for $\lambda>\omega$ we obtain: \[ R(\lambda,A_{-1})B=\int_0^{\infty}{\mathrm{e}^{\lambda r}T_{-1}(r)B\ \mathrm{d} r}=\sum_{n=0}^{\infty}{\mathrm{e}^{-\lambda nt_0}T(nt_0)(V_BF_{\lambda})(t_0)} \] where $F_{\lambda}(r):=\mathrm{e}^{-\lambda(t_0-r)}\mathrm{I}\in\mathfrak{X}_{t_0}$. From this we obtain the following estimate: \[ \left\|R(\lambda,A_{-1})B\right\|\leq\left\|V_B\right\|+\frac{M\mathrm{e}^{(\omega-\lambda)t_0}}{1-\mathrm{e}^{(\omega-\lambda)t_0}}\left\|V_B\right\|. \] Since $\left\|V_B\right\|<1$ we conclude for sufficient large $\lambda$: \[ \left\|R(\lambda,A_{-1})B\right\|<1. \] This yields $1\in\rho(R(\lambda,A_{-1})B)$ for large $\lambda$ and then invertibility of $\lambda-(A_{-1}+B)_{|X}$, since \[ \lambda-(A_{-1}+B)_{|X}=(\lambda-A)(\mathrm{I}-R(\lambda,A_{-1})B). \] Hence the resolvent set of $(A_{-1}+B)_{|X}$ contains each sufficiently large $\lambda$. In the last step we will show that $(A_{-1}+B)_{|X}$ is actually the generator of the $\tau$-bi-continuous semigroup $(S(t))_{t\geq0}$. Denote by $(C,D(C))$ the generator of $(S(t))_{t\geq0}$. Let $\lambda>\max{\left(\omega_0(T),\omega_0(S)\right)}$, then by the variation of constant formula \eqref{eqn:VariPara}, the resolvent representation as Laplace transform \cite[Lemma 7]{Ku} and the fact that we may interchange the improper $\tau$-Riemann integral and the {$\tau_{-1}$-Riemann} integral by an application of \cite[Lemma 1.7]{KuPhD} we obtain \[ R(\lambda,C)=R(\lambda,A)+R(\lambda,A_{-1})BR(\lambda,C). \] Whence we conclude \[ (\mathrm{I}-R(\lambda,A_{-1})B)R(\lambda,C)=R(\lambda,A), \] and therefore \[ \mathrm{I}=(\lambda-A)(\mathrm{I}-R(\lambda,A_{-1})B)R(\lambda,C)=(\lambda-(A_{-1}+B)_{|X})R(\lambda,C). \] It follows that $C\subseteq (A_{-1}+B)_{|X}$ and by the previous observations $C=(A_{-1}+B)_{|X}$. \end{proof} \subsection{Abstract Favard Spaces and Comparison} We recall the definition of (abstract) Favard spaces from \cite[Chapter III, Sect. 5b]{EN} and \cite{BF}. Let $(A,D(A))$ be an operator with \emph{ray of minimal growth}, i.e., $(0,\infty)\subseteq\rho(A)$ and for some $M\geq 0$ \begin{equation}\label{eq:weakHY} \|\lambda R(\lambda,A)\|\leq M\quad\text{for all $\lambda>0$}. \end{equation} For $\alpha\in\left(0,1\right]$ the (abstract) Favard space $F_{\alpha}(A)$ is defined by \[ F_{\alpha}(A):=\left\{x\in X:\ \sup_{\lambda>0}\|\lambda^\alpha AR(\lambda,A)x\|<\infty\right\}. \] If in addition $(A,D(A))$ is the generator of a {($\tau$-bi-continuous)} semigroup $(T(t))_{t\geq0}$, then \[ F_{\alpha}(A)=\left\{x\in X_0:\ \sup_{s\in\left(0,1\right)}\frac{\|T(s)x-x\|}{s^{\alpha}}<\infty\right\}=:F_{\alpha}(T). \] The space $F_0(A)$ defined by \[ F_0(A):=F_1(A_{-1}), \] is called the extrapolated Favard class. The {restricted operator} $A_{-1}|_{F_0(A)}:F_0(A)\rightarrow F_1(A)$ is an isometric isomorphism. Moreover, if $(A,D(A))$ generates a {$C_0$-semigroup} $(T(t))_{t\geq0}$ it is shown in \cite{nagel1993inhomogeneous} that also \[ F_0(T)=F_1(T_{-1}) \] holds. {In the next proposition is we show that Desch--Schappacher perturbations of bi-continuous semigroups, which satisfy a special range condition concerning the extrapolated Favard class, gives us semigroups which are close to each other in some sense.} \begin{proposition}\label{prop:ImplDS2} Let $(T(t))_{t\geq0}$ be a $\tau$-bi-continuous semigroup on $X$ generated by $(A,D(A))$. Suppose that $B\in\mathcal{S}^{DS,\tau}_{t_0}$ with $\mathrm{Ran}(B)\subseteq F_0(A)$ and let $(S(t))_{t\geq0}$ be the perturbed semigroup. Then there exists $C\geq0$ such that for each $t\in\left[0,1\right]$ one has \[ \left\|T(t)-S(t)\right\|\leq Ct. \] \end{proposition} \begin{proof} {We may assume $\omega_0(T)<0$.} We find $M\geq0$ such that $\left\|T(t)\right\|\leq M$ and $\left\|S(t)\right\|\leq M$ for every $t\in\left[0,1\right]$. Since $\mathrm{Ran}(B)\subseteq F_0(A)$ we conclude that $A_{-1}^{-1}B:X\rightarrow F_1(A)$. Hence $A_{-1}^{-1}B$ is bounded by the closed graph theorem and we find $K\geq0$ such that $\left\|A_{-1}^{-1}Bx\right\|_{F_1(A)}\leq K\left\|x\right\|$ for each $x\in X$. Let $\mathscr{P}_{-1}$ the family of seminorms corresponding to the first extrapolation space (see Section \ref{subsec:Extrap}). By using \eqref{eqn:VariPara} we obtain \begin{align*} \left\|S(t)x-T(t)x\right\|&=\left\|A_{-1}\int_0^t{T(t-r)A_{-1}^{-1}BS(r)x\ \mathrm{d}{r}}\right\|\\ &=\left\|\mathcal{T}lim_{h\rightarrow0}{\frac{T_{-1}(h)-\mathrm{I}}{h}}\int_0^t{T(t-r)A_{-1}^{-1}BS(r)x\ \mathrm{d}{r}}\right\|\\ &=\left\|\mathcal{T}lim_{h\rightarrow0}\int_0^t{\frac{T(h)-\mathrm{I}}{h}T(t-r)A_{-1}^{-1}BS(r)x\ \mathrm{d}{r}}\right\|\\ &=\sup_{p\in\mathscr{P}_{-1}}{\lim_{h\rightarrow0}p\left(\int_0^t{\frac{T(h)-\mathrm{I}}{h}T(t-r)A_{-1}^{-1}BS(r)x\ \mathrm{d}{r}}\right)}\\ &\leq\sup_{p\in\mathscr{P}_{-1}}{\lim_{h\rightarrow0}\int_0^t{p\left(\frac{T(h)-\mathrm{I}}{h}T(t-r)A_{-1}^{-1}BS(r)x\right)\ \mathrm{d}{r}}}\\ &\leq\limsup_{h\rightarrow0}{\int_0^t{\left\|\frac{T(h)-\mathrm{I}}{h}T(t-r)A_{-1}^{-1}BS(r)x\right\|\ \mathrm{d}{r}}}\\ &\leq M\int_0^t{\left\|A_{-1}^{-1}BS(r)x\right\|_{F_1(A)}\ \mathrm{d}{r}}\\ &\leq tKM^2\cdot\left\|x\right\| \end{align*} for each $x\in X$ and $t\in\left[0,1\right]$. \end{proof} \begin{corollary} Let $(T(t))_{t\geq0}$ be a $\tau$-bi-continuous semigroup on $X$ generated by $(A,D(A))$. If $B\in\mathcal{S}_{t_0}^{DS,\tau}$ and $\mathrm{Ran}(B)\subseteq F_0(A)$, then the perturbed semigroup $(S(t))_{t\geq0}$ leaves the space of strong continuity $\underline{X}_0:=\overline{D(A)}^{\left\|\cdot\right\|}$ invariant. \end{corollary} \section{Admissible operators} \label{sec:adm} Next we consider a sufficient condition for $B:(X,\tau)\rightarrow(X_{-1},\tau_{-1})$ to be admissible. Throughout this section we denote the space of continuous functions $f:\left[0,t_0\right]\rightarrow(X,\tau)$ which are $\left\|\cdot\right\|$-bounded by $\mathrm{C}_{\mathrm{b}}\left(\left[0,t_0\right],(X,\tau)\right)$. If equipped with the sup-norm, $\mathrm{C}_{\mathrm{b}}\left(\left[0,t_0\right],(X,\tau)\right)$ becomes a Banach space. \begin{theorem}\label{thm:admDS} {Let $(T(t))_{t\geq0}$ be a $\tau$-bi-continuous semigroup with generator $(A,D(A))$ on a Banach space $X$.} Let $\mathscr{P}$ be the set of generating continuous seminorms corresponding to $\tau$. Let {$B\in \mathrm{L}LL(X,X_{-1})$} such that $B:(X,\tau)\rightarrow(X_{-1},\tau_{-1})$ is a linear and continuous operator, and let $t_0>0$ be such that \begin{abc} \item $\displaystyle{\int\limits_0^{t_0}{T_{-1}(t_0-r)Bf(r)\ \mathrm{d} r}\in X}$ for each $f\in\mathrm{C}_{\mathrm{b}}\left(\left[0,t_0\right],(X,\tau)\right)$. \item For every $\varepsilon>0$ and every $p\in\mathscr{P}$ there exists $q\in\mathscr{P}$ and $K>0$ such that for all $f\in\mathrm{C}_{\mathrm{b}}\left(\left[0,t_0\right],(X,\tau)\right)$ \begin{align} p\left(\int_0^{t_0}{T_{-1}(t_0-r)Bf(r)\ \mathrm{d} r}\right)\leq K\cdot\sup_{r\in\left[0,t_0\right]}{\left|q(f(r))\right|}+\varepsilon\left\|f\right\|_{\infty}. \end{align} \item There exists $M\in\left(0,\frac{1}{2}\right)$ such that for all $f\in\mathrm{C}_{\mathrm{b}}\left(\left[0,t_0\right],(X,\tau)\right)$ \begin{align} \left\|\int\limits_0^{t_0}{T_{-1}(t_0-r)Bf(r)\ \mathrm{d} r}\right\|\leq M\left\|f\right\|_{\infty}. \end{align} \end{abc} \noindent Then $B\in\mathcal{S}^{DS,\tau}_{t_0}$, and as a consequence the operator $(A_{-1}+B)_{|X}$ defined on the domain \[ D((A_{-1}+B)_{|X}):=\left\{x\in X:\ A_{-1}x+Bx\in X\right\} \] generates a $\tau$-bi-continuous semigroup. \end{theorem} \begin{proof}We first show $\mathrm{Ran}(V_B)\subseteq\mathfrak{X}_{t_0}$. Let $f\in\mathrm{C}_{\mathrm{b}}\left(\left[0,t_0\right],(X,\tau)\right)$ and define for $t\in\left[0,t_0\right]$ the auxiliary function $f_t:\left[0,t_0\right]\rightarrow X$ by \[ f_t(r):= \begin{cases} f(0),&\ r\in\left[0,t_0-t\right],\\ f(r+t-t_0),&\ r\in\left[t_0-t,t_0\right]. \end{cases} \] Then $f_t\in\mathrm{C}_{\mathrm{b}}\left(\left[0,t_0\right],(X,\tau)\right)$ and \begin{align} \int_0^t{T_{-1}(t-r)Bf(r)\ \mathrm{d} r}=\int_0^{t_0}{T_{-1}(t_0-r)Bf_t(r)\ \mathrm{d}{r}}-\int_t^{t_0}{T_{-1}(r)Bf(0)\ \mathrm{d}{r}}. \end{align} By Theorem \ref{thm:bicontprop} \[ \int_t^{t_0}{T_{-1}(r)Bf(0)\ \mathrm{d}{r}}=T(t)\int_0^{t_0-t}{T_{-1}(r)Bf(0)\ \mathrm{d}{r}}\inD(A_{-1})=X. \] We conclude that the map {$\psi:\left[0,t_0\right]\rightarrow X_{-1}$} defined by \begin{align} \psi(t):=\int_0^t{T_{-1}(t-r)Bf(r)\ \mathrm{d} r} \end{align} has values in $X$. Moreover, for $\varepsilon>0$ and $p\in\mathscr{P}$ we have the following estimate: \begin{align*} &p(\psi(t)-\psi(s))\\ =&p\left(\int_0^t{T_{-1}(t-r)Bf(r)\ \mathrm{d} r}-\int_0^s{T_{-1}(s-r)Bf(r)\ \mathrm{d} r}\right)\\ \leq& p\left(\int_0^{t_0}{T_{-1}(t_0-r)B(f_t(r)-f_s(r))\ \mathrm{d} r}\right)+p\left(\int_s^t{T_{-1}(r)Bf(0)\ \mathrm{d} r}\right)\\ \leq& K\cdot\sup_{r\in\left[0,t_0\right]}{q(f_t(r)-f_s(r))}+p\left(\int_s^t{T_{-1}(r)Bf(0)\ \mathrm{d} r}\right)+\varepsilon\left\|f_t-f_s\right\|_{\infty}\\ \leq& K\cdot\sup_{r\in\left[0,t_0\right]}{q(f_t(r)-f_s(r))}\\ &+L\cdot\left(\gamma\left(\int_s^t{T_{-1}(r)Bf(0)\ \mathrm{d} r}\right)+\gamma\left((T_{-1}(t)-T_{-1}(s))Bf(0)\right)\right)+\varepsilon\left\|f_t-f_s\right\|\\ \leq& K\cdot\sup_{r\in\left[0,t_0\right]}{\left|q(f_t(r)-f_s(r))\right|}\\ &+L\cdot\left(\int_s^t{\gamma(T_{-1}(r)Bf(0))\ \mathrm{d} r}+\gamma\left((T_{-1}(t)-T_{-1}(s))Bf(0)\right)\right)+2\varepsilon\left\|f\right\|_{\infty} \end{align*} where the $\gamma\in\mathscr{P}_{-1}$ of the second to last inequality comes from Remark \ref{rem:SemiEstim}. The extrapolated semigroup $(T_{-1}(t))_{t\geq0}$ is strongly $\tau_{-1}$-continuous and $\gamma\in\mathscr{P}_{-1}$, so that we can find $\delta_1>0$ such that \[ \gamma(T_{-1}(t)-T_{-1}(s)Bf(0))<\varepsilon\ \text{whenever}\ \left|t-s\right|<\delta_1. \] Moreover, $f$ is $\tau$-continuous and therefore uniformly $\tau$-continuous on compact sets, which gives us $\delta_2>0$ such that \[ \sup_{r\in\left[0,t_0\right]}{\left|q(f_t(r)-f_s(r))\right|}<\varepsilon\ \text{if}\ \left|t-s\right|<\delta_2. \] Last but not least, $\gamma(T_{-1}(r)Bf(0))$ is bounded by some constant $M>0$, so for $\delta_3=\frac{\varepsilon}{M}$ we have \[ \left|s-t\right|<\delta_3\ \mathrm{L}ongrightarrow\ \int_s^t{\gamma(T_{-1}(r)Bf(0))\ \mathrm{d} r}<\varepsilon. \] Now, we take $\delta:=\min\left\{\delta_1,\delta_2,\delta_3\right\}$ and obtain \[ p(\psi(t)-\psi(s))<(K+2\left\|f\right\|_{\infty}+2L)\varepsilon, \] showing that $\psi:\left[0,t_0\right]\rightarrow X$ is $\tau$-continuous. Next, we prove the norm-boundedness using the same techniques and arguments as in \cite[Chapter III, Sect. 3]{EN}. Let $f\in\mathrm{C}_{\mathrm{b}}\left(\left[0,t_0\right],(X,\tau)\right)$ and write \[f=\widetilde{f}_{\delta}+h_{\delta},\] where \begin{align*} h_{\delta}(x):=\begin{cases} \left(1-\frac{r}{\delta}\right)f(0),&\quad 0\leq r<\delta,\\ 0,&\quad \delta\leq r\leq t_0 \end{cases} \end{align*} for some $\delta>0$. Then $\widetilde{f}_{\delta}$ and $h_{\delta}$ are norm-bounded and continuous with respect to $\tau$, $\widetilde{f}_{\delta}(0)=0$ and $\|\widetilde{f}_{\delta}\|_{\infty}\leq2\|f\|_{\infty}$. Now we obtain \begin{align*} &\left\|\int_0^t{T_{-1}(t-r)Bf(r)\ \mathrm{d}{r}}\right\|\leq\left\|\int_0^t{T_{-1}(t-r)B\widetilde{f}_{\delta}(r)\ \mathrm{d}{r}}\right\|+\left\|\int_0^t{T_{-1}(t-r)Bh_{\delta}(r)\ \mathrm{d}{r}}\right\|\\ \leq& M\left\|\widetilde{f}_{\delta}\right\|_{\infty}+K\left(\left\|\int_0^t{T_{-1}(t-r)Bh_{\delta}(r)\ \mathrm{d}{r}}\right\|_{-1}+\left\|A_{-1}\int_0^t{T_{-1}(t-r)Bh_{\delta}(r)\ \mathrm{d}{r}}\right\|_{-1}\right)\\ \leq& M\left\|\widetilde{f}_{\delta}\right\|_{\infty}+K\left\|\int_0^{\delta}{T_{-1}(t-r)\left(1-\frac{r}{\delta}\right)Bf(0)\ \mathrm{d}{r}}\right\|_{-1}\\ &+K\left\|T_{-1}(t)Bf(0)-\frac{1}{\delta}\int_0^{\delta}{T_{-1}(t-r)Bf(0)\ \mathrm{d}{r}}\right\|_{-1}. \end{align*} By taking $\delta\searrow0$ we obtain \begin{align}\label{eqn:VBnorm} \left\|\int_0^t{T_{-1}(t-r)Bf(r)\ \mathrm{d}{r}}\right\|\leq 2M\left\|f\right\|_{\infty}. \end{align} We proceed with showing local bi-equicontinuity. For that let $(x_n)_{n\in\mathbb{N}}$ be a norm-bounded $\tau$-null-sequence. Let $\varepsilon>0$ and $p\in\mathscr{P}$, then by taking $f^n(r)=f(r)x_n$, we can find $q\in\mathscr{P}$ such that \begin{align*} p\left(V_BF(t)x_n\right)=&p\left(\int_0^{t}{T_{-1}(t-r)BF(r)x_n\ \mathrm{d} r}\right)\\ \leq&p\left(\int_0^{t_0}{T_{-1}(t_0-r)Bf^n(r)\ \mathrm{d} r}-\int_t^{t_0}{T_{-1}(r)Bf^n(0)\ \mathrm{d} r}\right)\\ \leq& K\cdot\sup_{r\in\left[0,t_0\right]}{\left|q(f^n(r))\right|}+p\left(\int_t^{t_0}{T_{-1}(r)Bf^n(0)\ \mathrm{d} r}\right)+\varepsilon\left\|f^n_t\right\|\\ \leq& K\cdot\sup_{r\in\left[0,t_0\right]}{\left|q(f^n(r))\right|}+\varepsilon\left\|f^n_t\right\|\\ &+L\cdot\left(\gamma\left(\int_t^{t_0}{T_{-1}(r)Bf^n(0)\ \mathrm{d} r}\right)+\gamma\left(\left(T_{-1}(t_0)-T_{-1}(t)\right)Bf^n(0)\right)\right). \end{align*} Now we can argue by the local bi-equicontinuity of $(T(t))_{t\geq0}$ and $(T_{-1}(t))_{t\geq0}$ and with the arbitrarily small $\varepsilon>0$ to conclude the {local} bi-equicontinuity of $V_BF$. Hence we see that $V_B$ maps $\mathfrak{X}_{t_0}$ to $\mathfrak{X}_{t_0}$ and by \eqref{eqn:VBnorm} that $\left\|V_B\right\|<1$ since by assumption $M\in\left(0,\frac{1}{2}\right)$. \end{proof} \section{{Perturbations of the translation semigroup}}\label{sec:trans} Take $X=\mathrm{C}_{\mathrm{b}}(\mathbb{R})$ and let $(T(t))_{t\geq0}$ be the translation semigroup defined by $T(t)f(x)=f(x+t)$ (see also Section \ref{sec:Pre}). As already mentioned in Section \ref{subsec:bicontsemi}, this semigroup is $\tau_{\mathrm{co}}$-bi-continuous. Moreover, the resulting extrapolation spaces, in the notation we used in Section \ref{subsec:Extrap}, are given by (see \cite{BF}): \begin{align*} \underline{X}_{-1}&=\left\{F=f-Df: f\in\mathrm{UC}_{\mathrm{b}}(\mathbb{R})\right\}\\ X_{-1}&=\left\{F=f-Df: f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R})\right\}, \end{align*} where $\mathrm{UC}_{\mathrm{b}}(\mathbb{R})$ denotes the space of bounded uniformly continuous functions and $Df$ the distributional derivative of $f$. {The generator of $(T(t))_{t\geq0}$ is $A=D$ with domain $D(A):=\mathrm{C}_{\mathrm{b}}^1(\mathbb{R})$, and also $A_{-1}=D$ with domain $D(A_{-1})=\mathrm{C}_{\mathrm{b}}(\mathbb{R})$. The extrapolated semigroup $(T_{-1}(t))_{t\geq 0}$ is the restriction to $X_{-1}$ of the left translation semigroup on the space $\mathscr{D}'(\mathbb{R})$ of distributions.} Consider the function $g:\mathbb{R}\rightarrow\mathbb{R}$ defined by \begin{align} g(x)= \begin{cases}\label{eqn:gExtr} 0,& \ x\leq-1,\ x>1,\\ x,& -1<x\leq0,\\ 2-x,& 0<x\leq1.\\ \end{cases} \end{align} Notice that $g\in X_{-1}$, since $g=h-Dh$ where $h$ is the tent function on the real line defined by \[ h(x)= \begin{cases} 0,& x\leq-1,\ x>1,\\ x+1,& -1<x\leq0,\\ -x+1,& 0<x\leq1. \end{cases} \] Let $\mu$ be a bounded regular Borel measure on $\mathbb{R}$ and define the continuous functional $\Phi:\mathrm{C}_{\mathrm{b}}(\mathbb{R})\rightarrow\mathbb{R}$ by $\Phi(f)=\int_{\mathbb{R}}{f\ \mathrm{d}\mu}$ and the operator $B:X\rightarrow X_{-1}$ by \[ Bf:=\Phi(f)g. \] {This operator $B$ is by construction continuous with respect to the local convex topologies on the spaces $X$ and $X_{-1}$, {and also for the norms}. Moreover, $B$ has all properties required in Theorem \ref{thm:admDS}. To see this let $f\in\mathrm{C}_{\mathrm{b}}\left(\left[0,t_0\right],(X,\tau)\right)$ be arbitrary. Define a map $\psi:\mathbb{R}\rightarrow\mathbb{R}$ by \[ \psi(\cdot)=\int_{0}^{t_0}{T_{-1}(t_0-r)Bf(r)(\cdot)\ \mathrm{d}{r}}. \]} Observe that \[ T_{-1}(t_0-r)Bf(r)(x)=T_{-1}(t_0-r)\Phi(f(r))g(x)=\Phi(f(r))g(x+t_0-r). \] {We claim that $\psi$ is continuous. Indeed, let $\varepsilon>0$ be arbitrary, and notice that by substitution for each $x\in \mathbb{R}$ \[ \int_0^{t_0}{\Phi(f(r))g(x+t_0-r)\ \mathrm{d}{r}}=\int_{x}^{x+t_0}{\Phi(f(x+t_0-s))g(s)\ \mathrm{d}{s}}. \] After this substitution we can make the following calculation for each $x,y\in \mathbb{R}$ \begin{align*} \psi(x)-\psi(y)&=\int_{x}^{x+t_0}{\Phi(f(x+t_0-s))g(s)\ \mathrm{d}{s}}-\int_{y}^{y+t_0}{\Phi(f(x+t_0-s))g(s)\ \mathrm{d}{s}}\\ &=\int_{0}^{x+t_0}{\Phi(f(x+t_0-s))g(s)\ \mathrm{d}{s}}-\int_{0}^{x}{\Phi(f(x+t_0-s))g(s)\ \mathrm{d}{s}}\\ &\quad-\int_{0}^{y+t_0}{\Phi(f(y+t_0-s))g(s)\ \mathrm{d}{s}}+\int_{0}^{y}{\Phi(f(y+t_0-s))g(s)\ \mathrm{d}{s}}\\ &=\int_{y+t_0}^{x+t_0}{\left(\Phi(f(x+t_0-s)-f(y+t_0-s))\right)g(s)\ \mathrm{d}{s}}\\ &\quad +\int_{x}^{y}{\left(\Phi(f(x+t_0-s)-f(y+t_0-s))\right)g(s)\ \mathrm{d}{s}}. \end{align*}} By the assumptions there exists $M>0$ such that \[ \left\|\left(\Phi(f(x+t_0-\cdot)-f(y+t_0-\cdot))\right)g(\cdot)\right\|_{\infty}\leq M. \] For $\delta:=\frac{\varepsilon}{2M}>0$ and for $x,y\in \mathbb{R}$ with $\left|x-y\right|<\delta$ we have \begin{align*} &\left|\psi(x)-\psi(y)\right|\\ \leq&\int_{y+t_0}^{x+t_0}{\left|\left(\Phi(f(x+t_0-s)-f(y+t_0-s))\right)g(s)\right|\ \mathrm{d}{s}}\\ &+\int_{x}^{y}{\left|\left(\Phi(f(x+t_0-s)-f(y+t_0-s))\right)g(s)\right|\ \mathrm{d}{s}}\\ \leq&2\left|x-y\right|\cdot\left\|\left(\Phi(f(x+t_0-\cdot)-f(y+t_0-\cdot))\right)g(\cdot)\right\|_{\infty}\\ \leq&2M\cdot\left|x-y\right|<\varepsilon. \end{align*} This proves that $\psi\in\mathrm{C}_{\mathrm{b}}(\mathbb{R})$. Observe that in general we only have \[ Q:=\int_{0}^{t_0}{T_{-1}(t_0-r)Bf(r)\ \mathrm{d}{r}}\in X_{-1}, \] so that point evaluation of this expression at $x\in \mathbb{R}$ does not make sense. {We know, however, that $\psi\in\mathrm{C}_{\mathrm{b}}(\mathbb{R})$, and that the pointwise Riemann-sums $R_n(x)$ for the integral \[ \int_{0}^{t_0}\Phi(f(r))g(x+t_0-r) \mathrm{d}{r} \] converges for all $x\in \mathbb{R}$ to $\psi(x)$.} If we can show that the sequence $(R_n)_{n\in\mathbb{N}}$ converges in the sense of distributions we can conclude that $Q:=\int_{0}^{t_0}{T_{-1}(t_0-r)Bf(r)\ \mathrm{d}{r}}=\psi\in X$. Let $\widetilde{\psi}\in\mathscr{D}(\mathbb{R})$ be a test function and define $\varphi:=\widetilde{\psi}-\mathscr{D}\widetilde{\psi}$. Then \[ \left\langle (1-A_{-1})^{-1}R_n,\varphi\right\rangle\rightarrow\left\langle(1-A_{-1})^{-1}Q,\varphi\right\rangle. \] By the meaning of this pairing we conclude that $\langle R_n,\widetilde{\psi}\rangle\rightarrow\langle Q,\widetilde{\psi}\rangle$. By the above we conclude that $Q\in X$. The next step is to estimate the norm. Notice that \begin{align*} \Bigl\|\int_{0}^{t_0}&{T_{-1}(t_0-r)Bf(r)\ \mathrm{d}{r}}\Bigr\|_{\infty} =\sup_{x\in\mathbb{R}}{\Bigl|\int_{0}^{t_0}{\Phi(f(r))g(x+t_0-r)\ \mathrm{d}{r}}\Bigr|}\\ &\leq\sup_{x\in\mathbb{R}}{\int_0^{t_0}{\left|\Phi(f(r))\right|\cdot\left|g(x+t_0-r)\right| \mathrm{d}{r}}} \leq2{\int_0^{t_0}{\left|\Phi(f(r))\right|\ \mathrm{d}{r}}}\\ &\leq2{\int_0^{t_0}{\int_{\mathbb{R}}{\left|f(r)(x)\right|\ \mathrm{d}\left|\mu\right|(x)}\ \mathrm{d}{r}}} \leq2\left|\mu\right|(\mathbb{R}){\int_0^{t_0}{\left\|f(r)\right\|_{\infty}}\ \mathrm{d}{r}}\\ &=2\left|\mu\right|(\mathbb{R}){\int_0^{t_0}{\left\|f(r)\right\|_{\infty}}\ \mathrm{d}{r}} \leq2\left|\mu\right|(\mathbb{R})t_0\left\|f\right\|_{\infty}. \end{align*} In particular we can choose $t_0$ so small that $M:=2\left|\mu\right|(\mathbb{R})t_0<\frac{1}{2}$. Hence condition (c) of Theorem \ref{thm:admDS} is fulfilled. Condition (b) from Theorem \ref{thm:admDS} can be proven similarly. Let $K\subseteq\mathbb{R}$ be an arbitrary compact set and $\varepsilon>0$. Then \begin{align*} p_K\left(\int_0^{t_0}{T_{-1}(t_0-r)Bf(r)(x)\ \mathrm{d}{r}}\right)&\leq\sup_{x\in K}{\int_0^{t_0}{\left|\Phi(f(r))\right|\cdot\left|g(x+t_0-r)\right| \mathrm{d}{r}}}\\ &\leq2\sup_{x\in K}{\int_0^{t_0}{\left|\Phi(f(r))\right|\ \mathrm{d}{r}}}\\ &\leq2t_0|\mu|(\mathbb{R})\sup_{r\in\left[0,t_0\right]}{\sup_{y\in K'}}{\left|f(r)(y)\right|}+\varepsilon\left\|f\right\|_{\infty}, \end{align*} {since by the regularity of the measure $\mu$ we choose $K'\subseteq\mathbb{R}$ such that $\left|\mu\right|(\mathbb{R}\setminus K')<\varepsilon$.} By Theorem \ref{thm:admDS} we conclude that $(A_{-1}+B)_{|X}$ generates again a $\tau_{\mathrm{co}}$-bi-continuous semigroup on $\mathrm{C}_{\mathrm{b}}(\mathbb{R})$. We now give an expression for the generator. Observe that $f\inD((A_{-1}+B)_{|X})$ if and only if $f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R})$ and $f'+\Phi(f)g\in\mathrm{C}_{\mathrm{b}}(\mathbb{R})$ and this is precisely then, when the following conditions are satisfied. \begin{align}\label{eqn:cont1} \begin{cases} \displaystyle{\lim_{t\nearrow-1}{\left(f'(t)+\Phi(f)g(t)\right)}=\lim_{t\searrow-1}{\left(f'(t)+\Phi(f)g(t)\right)}},\\ \displaystyle{\lim_{t\nearrow0}{\left(f'(t)+\Phi(f)g(t)\right)}=\lim_{t\searrow0}{\left(f'(t)+\Phi(f)g(t)\right)}},\\ \displaystyle{\lim_{t\nearrow1}{\left(f'(t)+\Phi(f)g(t)\right)}=\lim_{t\searrow1}{\left(f'(t)+\Phi(f)g(t)\right)}}. \end{cases} \end{align} By the explicit expression for $g:\mathbb{R}\rightarrow\mathbb{R}$ we can rewrite Equation \eqref{eqn:cont1} as follows: \begin{align} \begin{cases} \displaystyle{\lim_{t\nearrow-1}{f'(t)}=\lim_{t\searrow-1}{f'(t)-\Phi(f)}},\\ \displaystyle{\lim_{t\nearrow0}{f'(t)}=\lim_{t\searrow0}{f'(t)+2\Phi(f)}},\\ \displaystyle{\lim_{t\nearrow1}{f'(t)+\Phi(f)}=\lim_{t\searrow1}{f'(t)}}. \end{cases} \end{align} Or equivalently \begin{align}\label{eqn:condDom} \displaystyle{\lim_{t\nearrow-1}{f'(t)}-\lim_{t\searrow-1}{f'(t)=-\frac{1}{2}\left(\lim_{t\nearrow0}{f'(t)}-\lim_{t\searrow0}{f'(t)}\right)=\lim_{t\nearrow1}{f'(t)-\lim_{t\searrow1}{f'(t)}}}=-\Phi(f)}. \end{align} We see that the generator $(C,D(C))$ of the perturbed semigroup is given by \begin{align*} Cf&=f'+\int_{\mathbb{R}}{f\ \mathrm{d}{\mu}}\cdot g,\quad f\inD(C),\\ D(C)&=\left\{f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}):\ f\in\mathrm{C}_{\mathrm{b}}^1(\mathbb{R}\setminus\left\{-1,0,1\right\})\ \text{and}\ \eqref{eqn:condDom}\ \text{holds} \right\}. \end{align*} The previous example uses a function $g\in X_{-1}$ which has three points of discontinuity, {with one sided limits at each of these points}. We generalize this to a countable (discrete) set of {jump} discontinuities. For that assume that $g\in X_{-1}$ is a function such that $\left\|g\right\|_{\infty}<\infty$ and that the set of discontinuities of $g$ is discrete. One defines again an operator $B:X\rightarrow X_{-1}$ by \[ Bf:=\Phi(f)g:=\int_{\mathbb{R}}{f\ \mathrm{d}\mu}\cdot g,\quad f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}). \] {Notice that none of previous calculations and arguments depend on the number of discontinuities (in fact, we only used that $g$ is bounded).} So we can conclude that $(A_{-1}+B)_{|X}$ generates a $\tau_{\mathrm{co}}$-bi-continuous semigroup on $X$. The only issue we have to care about are the conditions mentioned in \eqref{eqn:condDom}, that is an ``explicit'' description of the domain. { Let $Z:=\left\{x_1,x_2,x_3,\ldots\right\}$ be the set of discontinuities of $g$ that is assumed to be discrete, and we suppose that all of these points are jump discontinuities. Let us define $a_n:=\lim_{t\nearrow x_n}{g(t)}$ and $b_n:=\lim_{t\searrow x_n}{g(t)}$. We observe that $f\inD((A_{-1}+B)_{|X})$ if and only if } \[ \displaystyle{\lim_{t\nearrow x_n}{f'(t)+\Phi(f)a_n}=\lim_{t\searrow x_n}{f'(t)+\Phi(f)b_n}},\quad \text{for each }n\in\mathbb{N}, \] or equivalently \[ \displaystyle{\lim_{t\nearrow x_n}{f'(t)}-\lim_{t\searrow x_n}{f'(t)}}=\Phi(f)(b_n-a_n),\quad\text{for each } n\in\mathbb{N}. \] We conclude that the operator $(C,D(C))$ given by \begin{align*} Cf&=f'+\int_{\mathbb{R}}{f\ \mathrm{d}\mu}\cdot g,\\ D(C)&=\left\{f\in\mathrm{C}_{\mathrm{b}}(\mathbb{R}):\ f\in\mathrm{C}_{\mathrm{b}}^1(\mathbb{R}\setminus Z), \displaystyle{\lim_{t\nearrow x_n}{f'(t)}-\lim_{t\searrow x_n}{f'(t)}}=\Phi(f)(b_n-a_n),\quad n\in\mathbb{N} \right\} \end{align*} generates a $\tau_{\mathrm{co}}$-bi-continuous semigroup on $\mathrm{C}_{\mathrm{b}}(\mathbb{R})$. \section{The left implemented semigroup}\label{sec:impl} Let $(T(t))_{t\geq0}$ be a $C_0$-semigroup on a Banach space $E$ with generator $(A,D(A))$. For simplicity we assume that the growth bound of $(T(t))_{t\geq0}$ satisfies $\omega_0(T)<0$. The left implemented semigroup $(\mathcal{U}(t))_{t\geq0}$ on $\mathrm{L}LL(E)$ is defined by \begin{align} \mathcal{U}(t)S:=T(t)S,\quad t\geq0,\ S\in\mathrm{L}LL(E). \end{align} Our purpose is to relate Desch--Schappacher perturbations of the $C_0$-semigroup $(T(t))_{t\geq0}$ and Desch--Schappacher perturbations of the left implemented semigroup $(\mathcal{U}(t))_{t\geq0}$. \begin{remark} \begin{iiv} \item We observe that the left implemented semigroup on $\mathrm{L}LL(E)$ is $\tau_{\mathrm{sot}}$-bi-continuous, where the strong operator topology $\tau_{\mathrm{sot}}$ is induced by the family of seminorms defined by $\mathscr{P}=\left\{\left\|\cdot~x\right\|_E:\ x\in E\right\}$. In fact $(\mathcal{U}(t))_{t\geq0}$ is a $C_0$-semigroup if and only if the semigroup $(T(t))_{t\geq0}$ is continuous with respect to the operator norm (see \cite{Alber2001} and \cite{KuPhD}). \item The extrapolation spaces of the underlying $C_0$-semigroup (in the notation of this paper $\underline{X}_{-n}$, $n\in\mathbb{N}$) are studied in detail in \cite{Alber2001}. The extrapolation spaces in the bi-continuous setting are determined in \cite{BF}. In particular, the first extrapolation spaces are given by \begin{align*} \underline{X}_{-1}&=\overline{\mathrm{L}LL(E)}^{\mathrm{L}LL(E,E_{-1})},\\ X_{-1}&=\overline{\mathrm{L}LL(E)}^{\mathrm{L}LL_{\mathrm{sot}}(E,E_{-1})}=\mathrm{L}LL(E,E_{-1}),\ \end{align*} where $E_{-1}$ is the extrapolation space of the $C_0$-semigroup $(T(t))_{t\geq0}$. \end{iiv} \end{remark} \subsection{Ideals in $\mathrm{L}LL(E)$ and module homomorphisms} Before we relate Desch--Schappacher perturbations of $C_0$-semigroups and of the corresponding implemented semigroups we need some auxiliary results. \begin{lemma}\label{lem:Ideal} Let $E$ be a Banach space and $(\mathcal{A},D(\mathcal{A}))$ a Hille--Yosida operator on $\mathrm{L}LL(E)$, i.e., suppose there exists $\omega\in\mathbb{R}$ and $M\geq1$ such that $(\omega,\infty)\subseteq\rho(\mathcal{A})$ and \[ \left\|R(\lambda,\mathcal{A})^n\right\|\leq\frac{M}{(\lambda-\omega)^n}, \] for each $\lambda>\omega$ and $n\in\mathbb{N}$. The following are equivalent: \begin{iiv} \item $D(\mathcal{A})$ is a right ideal of the Banach algebra $\mathrm{L}LL(E)$, i.e., $CB\inD(\mathcal{A})$ whenever $C\inD(\mathcal{A})$, $B\in\mathrm{L}LL(E)$, and $\mathcal{A}$ is a right $\mathrm{L}LL(E)$-module homomorphism, i.e., $\mathcal{A}(CB)=\mathcal{A}(C)B$ for $C\inD(\mathcal{A})$ and $B\in\mathrm{L}LL(E)$. \item There exists a Hille--Yosida operator $(A,D(A))$ such that $\mathcal{A}(C)=AC$, where $D(\mathcal{A})=\mathrm{L}LL(E,D(A))$. \item There exists a Hille--Yosida operator $(A,D(A))$ such that $\mathcal{A}(C)=A_{-1}C$, where $D(\mathcal{A})=\left\{C\in\mathrm{L}LL(E):\ A_{-1}C\in\mathrm{L}LL(E)\right\}$. \end{iiv} \end{lemma} \begin{proof} The implication (iii)$\Rightarrow$(i) is just a checking of properties of an explicitly given operator. The implication (ii)$\mathrm{L}eftrightarrow$(iii) follows from the fact that the operator $A$ and $A_{-1}$ coincide on the domain $D(A)$ of $A$. \noindent (i)$\Rightarrow$(ii) By definition one has $R(\lambda,\mathcal{A})\in\mathrm{L}LL(\mathrm{L}LL(E))$ whenever $\lambda\in\rho(\mathcal{A})$. Define for $\lambda\in\rho(\mathcal{A})$ \[ R(\lambda):=R(\lambda,\mathcal{A})(\mathrm{I}). \] Since $R(\lambda,\mathcal{A})$, $\lambda\in\rho(\mathcal{A})$ satisfy the resolvent identity also $R(\lambda)$, $\lambda\in\rho(\mathcal{A})$ do: \begin{align*} R(\lambda)-R(\mu)&=R(\lambda,\mathcal{A})(\mathrm{I})-R(\mu,\mathcal{A})(\mathrm{I})=\left(R(\lambda,\mathcal{A})-R(\mu,\mathcal{A})\right)(\mathrm{I})\\ &=\left((\lambda-\mu)R(\lambda,\mathcal{A})R(\mu,\mathcal{A})\right)(\mathrm{I})=(\lambda-\mu)R(\lambda)R(\mu) \end{align*} for each $\lambda,\mu\in\rho(\mathcal{A})$. Hence the family $(R(\lambda))_{\lambda\in\rho(\mathcal{A})}$ is a pseudoresolvent. If $R(\lambda)x=0$ for some $x\in E$, then \[ 0=\lambda R(\lambda)x=\lambda R(\lambda,\mathcal{A})(\mathrm{I})x. \] But since $\lambda R(\lambda,\mathcal{A})(\mathrm{I})\rightarrow\mathrm{I}$ as $\lambda\to\infty$ , it follow that $x=0$. Therefore $R(\lambda)$ is injective, and hence there exists a closed operator $(A,D(A))$ such that $R(\lambda)=R(\lambda,A)$, i.e., \[ R(\lambda,A)=R(\lambda,\mathcal{A})(\mathrm{I}), \] Let $C\inD(\mathcal{A})$, i.e., $C=R(\lambda,\mathcal{A})D$ for some $D\in\mathrm{L}LL(E)$. Then \begin{align*} \mathcal{A}(C)&=\mathcal{A}(R(\lambda,\mathcal{A})D)=\lambda R(\lambda,\mathcal{A})D-D=(\lambda R(\lambda,A)-\mathrm{I})D\\ &=(\lambda R(\lambda,A)-(\lambda-A)R(\lambda,A))D=AR(\lambda,A)D=AC. \end{align*} \end{proof} \begin{lemma}\label{lem:aux} Let $E$ be a Banach space and $(\mathcal{A},D(\mathcal{A}))$ a generator of a $\tau_{\mathrm{sot}}$-bi-continuous semigroup $(\mathcal{T}(t))_{t\geq0}$ on $\mathrm{L}LL(E)$. The following are equivalent: \begin{iiv} \item $D(\mathcal{A})$ is a right-ideal of $\mathrm{L}LL(E)$ and $\mathcal{A}$ is a right $\mathrm{L}LL(E)$-module homomorphism. \item The semigroup $(\mathcal{T}(t))_{t\geq0}$ is left implemented, i.e., there exists a $C_0$-semigroup $(S(t))_{t\geq0}$ such that $\mathcal{T}(t)C=S(t)C$ for each $t\geq0$. \end{iiv} Under these equivalent conditions, if $(B,D(B))$ is the generator of the $C_0$-semigroup $(S(t))_{t\geq0}$, then $\mathcal{A}(C)=B_{-1}C$ for each $C\in\mathrm{L}LL(E,E_{-1})=X_{-1}(\mathcal{A})$. \end{lemma} \begin{proof} (ii)$\Rightarrow\mathrm{(i)}:$ If $C\inD(\mathcal{A})$, then the limit \[ (\mathcal{A} C)(x):=\lim_{t\searrow0}{\frac{\mathcal{T}(t)Cx-Cx}{t}}, \] exists for each $x\in X$. Since $(\mathcal{T}(t))_{t\geq0}$ is left implemented we obtain for $B\in\mathrm{L}LL(E)$ \[ (\mathcal{A}(CB))(x)=\lim_{t\rightarrow0}{\frac{\mathcal{T}(t)(CB)x-(CB)x}{t}}=\lim_{t\rightarrow0}{\frac{(\mathcal{T}(t)C)(Bx)-C(Bx)}{t}}, \] and we conclude that $CB\inD(\mathcal{A})$ and $\mathcal{A}(CB)=\mathcal{A}(C)B$. \noindent(i)$\Rightarrow{\mathrm{(ii)}}:$ For $\lambda\in\rho(\mathcal{A})$, $C\inD(\mathcal{A})$, $B\in\mathrm{L}LL(E)$ one has \[ (\lambda-\mathcal{A})(CB)=\lambda CB-\mathcal{A}(C)B=(\lambda C-\mathcal{A}(C))B. \] Since $\lambda-\mathcal{A}$ is a bijective map we conclude that \[ R(\lambda,\mathcal{A})(DB)=(R(\lambda,\mathcal{A})D)B \] for each $D\in\mathrm{L}LL(E)$. By the Euler-Formula (see \cite[Thm. 4.6]{BF} and \cite[Chapter II, Sect. 3]{EN}) we obtain \[ \mathcal{T}(t)C=\mathop{\tau_{\mathrm{sot}}\mathrm{lim}}_{n\rightarrow\infty}\left(\frac{n}{t}R\left(\frac{n}{t},\mathcal{A}\right)\right)^nC. \] From this we deduce the equality \[ \mathcal{T}(t)(CB)(x)=\left(\mathop{\tau\mathrm{lim}}_{n\rightarrow\infty}\left(\frac{n}{t}R\left(\frac{n}{t},\mathcal{A}\right)\right)^nC\right)B=(\mathcal{T}(t)C)B. \] Set $S(t):=\mathcal{T}(t)\mathrm{I}$, and we are done \[ \mathcal{T}(t)C=\mathcal{T}(t)(\mathrm{I}\cdot C)=(\mathcal{T}(t)\mathrm{I})C=S(t)C. \] Finally, $\mathcal{A}$ is multiplication operator by the generator $(B,D(B))$ of the semigroup $(S(t))_{t\geq0}$ by Lemma \ref{lem:Ideal}. \end{proof} \begin{proposition}\label{prop:DSMult} Let $(T(t))_{t\geq0}$ and $(S(t))_{t\geq0}$ be $C_0$-semigroups on the Banach space $E$, and let $(A,D(A))$ denote the generator of $(T(t))_{t\geq0}$. Let $(\mathcal{U}(t))_{t\geq0}$ and $(\mathcal{V}(t))_{t\geq0}$ be the semigroups left implemented by $(T(t))_{t\geq0}$ and $(S(t))_{t\geq0}$, respectively. Let $(\mathcal{G},D(\mathcal{G}))$ be the generator of $(\mathcal{U}(t))_{t\geq0}$ and let $\mathcal{K}:\mathrm{L}LL(E)\rightarrow\mathrm{L}LL(E,E_{-1}(A))$ be such that $\mathcal{K}\in\mathcal{S}^{DS,\tau_{\mathrm{sot}}}_{t_0}(\mathcal{U})$ and such that $\mathcal{C}:=(\mathcal{G}_{-1}+\mathcal{K})_{|\mathrm{L}LL(E)}$ (with maximal domain) is the generator of $(\mathcal{V}(t))_{t\geq0}$. Then $\mathcal{K}$ has the property that \[ \mathcal{K}(CD)=\mathcal{K}(C)D, \] for each $C,D\in\mathrm{L}LL(E)$. \end{proposition} \begin{proof} Since by assumption $\mathcal{G}$ and $\mathcal{C}=(\mathcal{G}_{-1}+\mathcal{K})_{|\mathrm{L}LL(E)}$ both generate implemented semigroups we conclude by Lemma \ref{lem:aux} that $\mathcal{G}$, and hence $\mathcal{G}_{-1}$, and $\mathcal{C}$ are all multiplication operators. One has $\mathcal{G}_{-1}(C)=A_{-1}C$ for each $C\in\mathrm{L}LL(E)$ and there exists an operator $M:E\rightarrow E_{-1}(L)$ such that $\mathcal{C}(C)=MC$ for each $C\inD(\mathcal{C})$. We conclude that \[ \mathcal{K}(C)=MC-A_{-1}C \] for each $C\inD(\mathcal{C})$. Since $(\mathcal{C},D(\mathcal{C}))$ is bi-dense in $\mathrm{L}LL(E)$, for each $C\in\mathrm{L}LL(E)$, there exists a sequence of operators $(C_n)_{n\in\mathbb{N}}$ in $D(\mathcal{C})$ such that $\sup_{n\in\mathbb{N}}{\left\|C_n\right\|}<\infty$ and \[ C_nx\rightarrow Cx, \] for each $x\in E$. The continuity of $\mathcal{K}$ and $\mathcal{G}_{-1}$ yields \begin{align*} &\mathcal{K}(C_n)\stackrel{\tau_{\mathrm{sot}}}{\rightarrow}\mathcal{K}(C),\\ &\mathcal{G}(C_n)\stackrel{\tau_{\mathrm{sot}}}{\rightarrow}\mathcal{G}(C) \end{align*} with convergence in $\mathrm{L}LL_{\mathrm{sot}}(E,E_{-1}(A))$. Therefore, for each $x\in E$ the sequence $(MC_nx)_{n\in\mathbb{N}}$ is Cauchy in $E_{-1}(A)$ and we can define \[ Lx:=\lim_{n\rightarrow\infty}{MC_nx},\quad x\in E. \] By construction we obtain $L\in\mathrm{L}LL(E,E_{-1}(A))$ and $\mathcal{C}(C)=LC$ for each $C\in\mathrm{L}LL(E)$ and therefore \[ \mathcal{K}(C)=A_{-1}C+LC, \] for $C\inD(\mathcal{C})$. Now we define $B:=L+A_{-1}$ as an operator in $\mathrm{L}LL(E,E_{-1}(A))$ and conclude that \[ \mathcal{K}(C)=BC \] for each $C\in\mathrm{L}LL(E)$ and that was to be proven. \end{proof} \subsection{A one-to-one correspondence} We relate Desch--Schappacher perturbations of the implemented semigroup with the perturbations of the underlying $C_0$-semigroup. To do so we have to use the class of Desch--Schappacher admissible operators $\mathcal{S}_{t_0}^{DS}$ for $C_0$-semigroups. Recall from \cite[Chapter III, Section 3a]{EN} the following definitions for a strongly continuous semigroup $(T(t))_{t\geq0}$ on a Banach space $E$. We define \[ \mathcal{S}_{t_0}^{DS}(T):=\left\{B\in\mathrm{L}LL(E,E_{-1}):\ V_B\in\mathrm{L}LL\left(\mathrm{C}\left(\left[0,t_0\right],\mathrm{L}LL_{\mathrm{sot}}(E)\right)\right),\ \left\|V_B\right\|<1\right\}, \] where $V_B$ denotes the corresponding Volterra operator on $E$ defined by \[ (V_BF)(t):=\int_0^{t}{T_{-1}(t-r)BF(r)\ \mathrm{d}{r}},\quad F\in\mathrm{C}\left(\left[0,t_0\right],E\right),\ t\in\left[0,t_0\right]. \] The following result shows that Desch--Schappacher perturbations of a $C_0$-semigroup always give us Desch--Schappacher perturbations of the corresponding implemented semigroup. \begin{theorem}\label{thm:Impl1} Let $(\mathcal{U}(t))_{t\geq0}$ be the semigroup on $\mathrm{L}LL(E)$ left implemented by the $C_0$-semigroup $(T(t))_{t\geq0}$. Suppose that $B\in\mathcal{S}_{t_0}^{DS}$ and let $(S(t))_{t\geq0}$ be the perturbed $C_0$-semigroup. Define the operator $\mathcal{K}:\mathrm{L}LL(E)\rightarrow\mathrm{L}LL(E,E_{-1})$ by \[ \mathcal{K}S:=BS,\quad S\in\mathrm{L}LL(E). \] Then $\mathcal{K}\in\mathcal{S}^{DS,\tau_{\mathrm{sot}}}_{t_0}$ and the perturbed semigroup $(\mathcal{V}(t))_{t\geq0}$ is left implemented by $(S(t))_{t\geq0}$. \end{theorem} \begin{proof} First of all we show that $V_{\mathcal{K}}F(t)C\in\mathrm{L}LL(E)$ for $F\in\mathfrak{X}_{t_0}$, $t\in\left[0,t_0\right]$ and $C\in\mathrm{L}LL(E)$. Define $f\in\mathrm{C}\left(\left[0,t_0\right],\mathrm{L}LL_{\mathrm{sot}}(E)\right)$ by $f(r):=F(r)C$ and observe \begin{align*} (V_{\mathcal{K}}F)(t)Cx=\int_0^{t}{\mathcal{U}_{-1}(t-r)\mathcal{K}F(r)Cx\ \mathrm{d}{r}}=\int_0^t{T_{-1}(t-r)Bf(r)x\ \mathrm{d}{r}}. \end{align*} Since by assumption $B\in\mathcal{S}^{DS}_{t_0}$, we obtain $(V_{\mathcal{K}}F)(t)Cx\in E$. The following estimate will be crucial for what follows \begin{align*} \left\|(V_{\mathcal{K}}F)(t)Cx\right\|&=\left\|\int_0^t{\mathcal{U}_{-1}(t-r)\mathcal{K}F(r)Cx\ \mathrm{d}{r}}\right\|=\left\|\int_0^t{T_{-1}(t-r)BF(r)Cx}\right\|\\ &=\left\|(V_Bf)(t)x\right\|\leq \left\|V_B\right\|\cdot\left\|f\right\|\cdot\left\|Cx\right\|\leq \left\|V_B\right\|\cdot\left\|f\right\|\cdot\left\|C\right\|\cdot\left\|x\right\|. \end{align*} This estimate shows that $(V_{\mathcal{K}}F)(t)C\in\mathrm{L}LL(E)$. Moreover, we directly see that $\mathrm{Ran}(V_{\mathcal{K}})\subseteq\mathfrak{X}_{t_0}$, since $\tau_{\mathrm{sot}}$-strong continuity, norm boundedness and bi-equicontinuity of $V_{\mathcal{K}}F$ follow also from the previous estimate. Also the fact that $\left\|V_{\mathcal{K}}\right\|<1$ is immediate, due to the assumption that $B\in\mathcal{S}^{DS}_{t_0}$. Finally, we show that $(\mathcal{G}_{-1}+\mathcal{K})_{|\mathrm{L}LL(E)}$ generates the semigroup left implemented by $(S(t))_{t\geq0}$. For this notice that for sufficiently large $\lambda>0$ we have \begin{align*} R(\lambda,(A_{-1}+B)_{E})Cx&=\int_0^{\infty}{\mathrm{e}^{-\lambda t}S(t)Cx\ \mathrm{d} t}=\int_0^{\infty}{\mathrm{e}^{-\lambda t}\mathcal{V}(t)Cx\ \mathrm{d} t}\\ &=R(\lambda,(\mathcal{G}_{-1}+\mathcal{K})_{|\mathrm{L}LL(E)})Cx, \end{align*} for all $x\in E$ and $C\in \mathrm{L}LL(E)$. Whence we conclude that $(\mathcal{G}_{-1}+\mathcal{K})_{|\mathrm{L}LL(E)}$ generates the semigroup left implemented by $(S(t))_{t\geq0}$. \end{proof} Here is the converse of this theorem. \begin{theorem}\label{thm:ImplC0} Let $(\mathcal{U}(t))_{t\geq0}$ and $(\mathcal{V}(t))_{t\geq0}$ be two semigroups on $\mathrm{L}LL(E)$, left implemented by the $C_0$-semigroups $(T(t))_{t\geq0}$ and $(S(t))_{t\geq0}$, respectively. Let $(A,D(A))$ be the generator of $(T(t))_{t\geq0}$ and let $\mathcal{K}\in\mathcal{S}^{DS,\tau_{\mathrm{sot}}}_{t_0}(\mathcal{U})$ be such that $(\mathcal{V}(t))_{t\geq0}$ is the corresponding perturbed semigroup. Define $B\in\mathrm{L}LL(E,E_{-1})$ by \[ Bx:=(\mathcal{K}\mathrm{I})x,\quad x\in E. \] Then $B\in\mathcal{S}^{DS}_{t_0}(T)$ and $(A_{-1}+B)_{|E}$ generates $(S(t))_{t\geq0}$. \end{theorem} \begin{proof} Let $f\in\mathrm{C}\left(\left[0,t_0\right],\mathrm{L}LL_{\mathrm{sot}}(E)\right)$ and $x\in E$. We observe that by Lemma \ref{lem:Ideal} one has that $(\mathcal{K}\mathrm{I})f(r)=\mathcal{K}(\mathrm{I} f(r))=\mathcal{K}f(r)$ for each $r\in\left[0,t_0\right]$. For $f\in\mathrm{C}\left(\left[0,t_0\right],\mathrm{L}LL_{\mathrm{sot}}(E)\right)$ we define $F\in\mathfrak{X}_{t_0}$ by $F(r):=M_{f(r)}$, the multiplication with $f(r)$, i.e., $F(r)C=f(r)C$ for each $C\in\mathrm{L}LL(E)$. The following computation is crucial for the proof: \begin{align*} V_Bf(t)x&=\int_0^t{T_{-1}(t-r)Bf(r)x\ \mathrm{d}{r}}=\int_0^{t}{T_{-1}(t-r)(\mathcal{K}\mathrm{I})f(r)x\ \mathrm{d}{r}}\\ &=\int_0^t{\mathcal{U}_{-1}(t-r)\mathcal{K}f(r)x\ \mathrm{d}{r}}=\int_0^t{\mathcal{U}_{-1}(t-r)\mathcal{K}F(r)\mathrm{I} x\ \mathrm{d}{r}}\\ &=(V_{\mathcal{K}}F)(t)\mathrm{I} x. \end{align*} From this and from the assumption that $\mathcal{K}\in \mathcal{S}^{DS,\tau_{\mathrm{sot}}}_{t_0}$, we conclude that $B\in\mathcal{S}^{DS}_{t_0}$. Moreover we have \[ S(t)x=\mathcal{V}(t)\mathrm{I} x=\mathcal{U}(t)\mathrm{I} x+\int_0^t{\mathcal{U}_{-1}(t-r)\mathcal{K}\mathcal{V}(r)\mathrm{I} x\ \mathrm{d}{r}}=T(t)x+\int_0^t{T_{-1}(t-r)BS(r)x\ \mathrm{d}{r}}, \] for each $x\in E$. This yields that $(A_{-1}+B)_{|E}$ generates $(S(t))_{t\geq0}$. \end{proof} Summarizing Theorems \ref{thm:ImplC0} and \ref{thm:Impl1} we can state the following. \begin{corollary} Let $(\mathcal{U}(t))_{t\geq0}$ and $(\mathcal{V}(t))_{t\geq0}$ be two semigroups on $\mathrm{L}LL(E)$ left implemented by the $C_0$-semigroups $(T(t))_{t\geq0}$ and $(S(t))_{t\geq0}$ on $E$, respectively. Let us denote the generators of $(\mathcal{U}(t))_{t\geq0}$ and $(T(t))_{t\geq0}$ by $(\mathcal{G},D(\mathcal{G}))$ and $(A,D(A))$, respectively. The following are equivalent: \begin{iiv} \item There exists $\mathcal{K}\in\mathcal{S}^{DS,\tau}_{t_0}(\mathcal{U})$ such that $(\mathcal{V}(t))_{t\geq0}$ is generated by $(\mathcal{G}_{-1}+\mathcal{K})_{|\mathrm{L}LL(E)}$. \item There exists $B\in\mathcal{S}^{DS}_{t_0}(T)$ such that $(S(t))_{t\geq0}$ is generated by $(A_{-1}+B)_{|E}$. \end{iiv} \end{corollary} \begin{remark} Notice that not every Desch--Schappacher perturbation of a implemented semigroup gives again a implemented semigroup. To see this let $(\mathcal{G},D(\mathcal{G}))$ be the generator of the left implemented semigroup $(\mathcal{U}(t))_{t\geq0}$ and $\Phi\in(\mathrm{L}LL(E),\tau_{\mathrm{sot}})'$. Define, as above, an operator $\mathcal{K}:\mathrm{L}LL(E)\rightarrow\mathrm{L}LL(E,E_{-1})$ by \[ \mathcal{K}(C):=\Phi(C)\mathcal{G}_{-1}(\mathrm{I}),\quad C\in\mathrm{L}LL(E). \] Such an operator $\mathcal{K}$ is not multiplicative if $\Phi\neq0$. \end{remark} \subsection{Comparisons} Now we relate comparison properties of the implemented semigroup and properties of the underlying $C_0$-semigroup. First of all, for $B\in\mathrm{L}LL(E)$ we define the multiplication operator $M_B\in\mathrm{L}LL(\mathrm{L}LL(E),\mathrm{L}LL(E))$ by $M_BS:=BS$. Then one has $\left\|M_B\right\|=\left\|B\right\|$. By taking $B:=T(t)-S(t)$ for $t>0$ we directly obtain the following result. \begin{lemma}\label{lem:CompImpl} Let $(\mathcal{U}(t))_{t\geq0}$ and $(\mathcal{V}(t))_{t\geq0}$ be two semigroups on $\mathrm{L}LL(E)$ left implemented by the $C_0$-semigroups $(T(t))_{t\geq0}$ and $(S(t))_{t\geq0}$, respectively. Then the following are equivalent: \begin{iiv} \item There exists $M\geq0$ such that $\left\|\mathcal{U}(t)-\mathcal{V}(t)\right\|\leq Mt$ for each $t\in\left[0,1\right]$. \item There exists $M\geq0$ such that $\left\|T(t)-S(t)\right\|\leq Mt$ for each $t\in\left[0,1\right]$. \end{iiv} \end{lemma} Recall from \cite[Prop. 6.1]{BF} that the Favard spaces of the implemented semigroup and of the underlying semigroup for $\alpha\in\left[0,1\right]$ are connected by \begin{align}\label{eqn:FavImpl} F_{\alpha}(\mathcal{U})=\mathrm{L}LL(E,F_{\alpha}(T)). \end{align} This yields to the following result. \begin{lemma}\label{lem:FavImpl} For $B\in\mathrm{L}LL(E,E_{-1})$ we define $\mathcal{K}:\mathrm{L}LL(E)\rightarrow\mathrm{L}LL(E,E_{-1})$ by $\mathcal{K}S:=BS$. Then $\mathrm{Ran}(\mathcal{K})\subseteq F_{\alpha}(\mathcal{U})$ if and only if $\mathrm{Ran}(B)\subseteq F_{\alpha}(T)$. \end{lemma} By \cite{Alber2001} and \cite{BF} the extrapolated implemented semigroup is defined by \[ \mathcal{U}_{-1}(t)S=T_{-1}(t)S,\quad S\in\overline{\mathrm{L}LL(E)}^{\mathrm{L}LL_{\mathrm{sot}}(E,E_{-1})}=\mathrm{L}LL(E,E_{-1}). \] This gives \[ F_0(\mathcal{U})=F_1(\mathcal{U}_{-1})=\mathrm{L}LL(E,F_1(T_{-1}))=\mathrm{L}LL(E,F_0(T)). \] \begin{proposition}\label{prop:ImplDS1} Let $(\mathcal{U}(t))_{t\geq0}$ and $(\mathcal{V}(t))_{t\geq0}$ be two semigroups on $\mathrm{L}LL(E)$ left implemented by $(T(t))_{t\geq0}$ and $(S(t))_{t\geq0}$, respectively. Furthermore let $(\mathcal{G},D(\mathcal{G}))$ denote the generator of $(\mathcal{U}(t))_{t\geq0}$. Suppose that there exists $M\geq0$ such that \[ \left\|\mathcal{U}(t)-\mathcal{V}(t)\right\|\leq Mt \] for each $t\in\left[0,1\right]$. Then there exists $\mathcal{K}\in\mathcal{S}_{t_0}^{DS,\tau}$ with $\mathrm{Ran}(\mathcal{K})\subseteq F_0(\mathcal{G})$. \end{proposition} \begin{proof} Since $\left\|\mathcal{U}(t)-\mathcal{V}(t)\right\|\leq Mt$ for each $t\in\left[0,1\right]$ we can use Lemma \ref{lem:CompImpl} to conclude that $\left\|T(t)-S(t)\right\|\leq Mt$ for each $t\in\left[0,1\right]$. If $(A,D(A))$ denotes the generator of $(T(t))_{t\geq0}$, then by \cite[Chapter III, Thm. 3.9]{EN} we find $B\in\mathrm{L}LL(E,E_{-1})$ such that $B\in\mathcal{S}^{DS}_{t_0}$ and $\mathrm{Ran}(B)\subseteq F_0(A)$. As in Theorem \ref{thm:Impl1} this gives rise to an multiplication operator $\mathcal{K}:\mathrm{L}LL(E)\rightarrow\mathrm{L}LL(E,E_{-1})$ defined by \[ \mathcal{K}S:=BS,\quad S\in\mathrm{L}LL(E). \] By Lemma \ref{lem:FavImpl} we conclude that $\mathrm{Ran}(\mathcal{K})\subseteq F_0(\mathcal{G})$. It remains to show that $(\mathcal{G}_{-1}+\mathcal{K})_{|\mathrm{L}LL(E)}$ generates $(\mathcal{V}(t))_{t\geq0}$. But, by \cite[Chapter III, Thm. 3.9]{EN}, $(A_{-1}+B)_{|E}$ generates $(S(t))_{t\geq0}$. \end{proof} Combining Propositions \ref{prop:ImplDS1}, \ref{prop:ImplDS2} and \cite[Chapter III, Thm. 3.9]{EN} we obtain the following theorem. \begin{theorem}\label{thm:Impl2} Let $(\mathcal{U}(t))_{t\geq0}$ and $(\mathcal{V}(t))_{t\geq0}$ be two semigroups on $\mathrm{L}LL(E)$ left implemented by $(T(t))_{t\geq0}$ and $(S(t))_{t\geq0}$, respectively. Denote by $(\mathcal{G},D(\mathcal{G}))$ the generator of $(\mathcal{U}(t))_{t\geq0}$ and by $(A,D(A))$ the generator of $(T(t))_{t\geq0}$. If $\mathcal{K}\in\mathcal{S}^{DS,\tau}_{t_0}(\mathcal{U})$ such that $\mathrm{Ran}(\mathcal{K})\subseteq F_0(\mathcal{G})$, then there exists $B\in\mathcal{S}_{t_0}^{DS}(T)$ with $\mathrm{Ran}(B)\subseteq F_0(A)$ such that $\mathcal{K}S=BS$ for each $S\in\mathrm{L}LL(E)$. \end{theorem} \begin{proof} By Proposition \ref{prop:ImplDS2} we find $M\geq0$ such that $\left\|\mathcal{U}(t)-\mathcal{V}(t)\right\|\leq Mt$ for each $t\in\left[0,1\right]$. Following the proof of Proposition \ref{prop:ImplDS1} there exists $B\in\mathcal{S}^{DS}_{t_0}$ such that $\mathrm{Ran}(B)\subseteq F_0(A)$. \end{proof} From this we can deduce the following equivalence. \begin{theorem}\label{thm:CompEquiv} Let $(\mathcal{U}(t))_{t\geq0}$ and $(\mathcal{V}(t))_{t\geq0}$ be two semigroups on $\mathrm{L}LL(E)$ left implemented by the $C_0$-semigroups $(T(t))_{t\geq0}$ and $(S(t))_{t\geq0}$ on $E$, respectively. Let us denote the generators of $(\mathcal{U}(t))_{t\geq0}$ and $(T(t))_{t\geq0}$ by $(\mathcal{G},D(\mathcal{G}))$ and $(A,D(A))$, respectively. The following are equivalent: \begin{iiv} \item There exists $\mathcal{K}\in\mathcal{S}^{DS,\tau}_{t_0}(\mathcal{U})$ such that $\mathrm{Ran}(\mathcal{K})\subseteq F_0(\mathcal{G})$ and such that $(\mathcal{V}(t))_{t\geq0}$ is generated by $(\mathcal{G}_{-1}+\mathcal{K})_{|\mathrm{L}LL(E)}$. \item There exists $B\in\mathcal{S}^{DS}_{t_0}(T)$ such that $\mathrm{Ran}(B)\subseteq F_0(A)$ and such that $(S(t))_{t\geq0}$ is generated by $(A_{-1}+B)_{|E}$. \end{iiv} \end{theorem} \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR } \providecommand{\MRhref}[2]{ \href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2} \end{document}
\begin{document} \draft \title{Quantum Trajectories for Realistic Detection} \author{P. Warszawski${}^{1}$, H.M. Wiseman${}^{1,2}$, and H. Mabuchi${}^{2}$} \date{\today} \address{${}^{1}$ School of Science, Griffith University, Nathan, Brisbane, Queensland 4111 Australia\\ ${}^{2}$ Norman Bridge Laboratory of Physics 12-33, California Institute of Technology, Pasadena, CA 91125, USA} \maketitle \begin{abstract} Quantum trajectories describe the stochastic evolution of an open quantum system conditioned on continuous monitoring of its output, such as by an ideal photodetector. Here we derive (non-Markovian) quantum trajectories for {\em realistic} photodetection, including the effects of efficiency, dead time, bandwidth, electronic noise, and dark counts. We apply our theory to a realistic cavity QED scenario and investigate the impact of such detector imperfections on the conditional evolution of the system state. A practical theory of quantum trajectories with realistic detection will be essential for experimental and technological applications of quantum feedback in many areas. \end{abstract} \pacs{03.65.Yz, 03.65.Ta, 42.50.Lc, 42.50.Ar} \newcommand{\begin{equation}}{\begin{equation}} \newcommand{\end{equation}}{\end{equation}} \newcommand{\begin{eqnarray}}{\begin{eqnarray}} \newcommand{\end{eqnarray}}{\end{eqnarray}} \newcommand{\nonumber}{\nonumber} \newcommand{\nl}[1]{\nonumber \\ && {#1}\,} \newcommand{\erf}[1]{Eq.~(\ref{#1})} \newcommand{\erfs}[2]{Eqs.~(\ref{#1})--(\ref{#2})} \newcommand{^\dagger}{^\dagger} \newcommand{\rt}[1]{\sqrt{#1}\,} \newcommand{\smallfrac}[2]{\mbox{$\frac{#1}{#2}$}} \newcommand{\half}{\smallfrac{1}{2}} \newcommand{{\bigr)}a}[1]{\langle{#1}|} \newcommand{\ket}[1]{|{#1}\rangle} \newcommand{\ip}[2]{\langle{#1}|{#2}\rangle} \newcommand{Schr\"odinger }{Schr\"odinger } \newcommand{Schr\"odinger s}{Schr\"odinger's } \newcommand{Heisenberg }{Heisenberg } \newcommand{Heisenberg s}{Heisenberg's } \newcommand{{\bigl(}}{{\bigl(}} \newcommand{{\bigr)}}{{\bigr)}} \newcommand{It\^o }{It\^o } \newcommand{Stratonovich }{Stratonovich } \newcommand{\dbd}[1]{\frac{\partial}{\partial {#1}}} \newcommand{\sq}[1]{\left[ {#1} \right]} \newcommand{\cu}[1]{\left\{ {#1} \right\}} \newcommand{\ro}[1]{\left( {#1} \right)} \newcommand{\an}[1]{\left\langle{#1}\right\rangle} \newcommand{\Longrightarrow}{\Longrightarrow} \newcommand{\varepsilon}{\varepsilon} \begin{multicols}{2} \section{Introduction} The limited utility of quantum measurement theory as axiomatized by von Neumann \cite{Von32} for describing practical laboratory measurements has necessitated the development of more general measurement theories \cite{Dav76,Kra83}. In the past decade the application of such theories has become widespread in quantum optics, in particular for describing continuous monitoring of the photoemission from radiatively damped open systems. They describe the evolution of the conditioned system state in terms of quantum jumps \cite{DalCasMol92,GarParZol92,Car93b} for direct detection and quantum diffusion \cite{Car93b,WisMil93c} for dyne detection. The stochastic evolution equation, termed a quantum trajectory, has also been applied in mesoscopic electronics \cite{WisWah01}. Thus far, the main practical utility of quantum trajectory theory has been in improving the computational efficiency of simulations used to compare models with experimental data. But it is now gaining increasing importance as the quantum generalization of Kalman filtering, which provides essential signal-processing methods in classical estimation, communication, and control engineering. Quantum trajectory theory should in principle play the same pivotal role for emerging quantum analogs of these technologies \cite{Gamb01,Cira97,Dohe99b}. Before this can happen it is essential that the theory be extended to account for the imperfections of {\em realistic measurement devices}, as non-ideal detector dynamics can dramatically affect the proper inference from measured signals to the conditional quantum state of an observed system. In this paper we present the theory of quantum trajectories for realistic photodetection. We model both photon counters and photoreceivers (for homodyne detection) and include the effects of efficiency, dead time, bandwidth, electronic noise, and dark counts. The proper treatment of bandwidth limitations and electronic noise are of particular significance as these imperfections are inevitable and predominant concerns in any practical context. They are of central importance in the current generation of experiments on quantum-limited measurement in atomic \cite{MabYeKim99} and condensed matter \cite{Dev00} systems. Our theory works by embedding the system within a supersystem that obeys a Markovian equation. If the set of (classical) detector states is ${\bf S}$, then the supersystem is described by the set $\{\rho_{s}:s \in {\bf S}\}$. Here ${\rm Tr}[\rho_{s}]$ is the probability that the apparatus is in state $s$, and $\rho_{s}/{\rm Tr}[\rho_{s}]$ is the system state given this event. \section{The System} In this paper we take the monitored system to be a two-level atom (TLA), classically driven at Rabi frequency $\Omega$ and radiatively damped at rate $\Gamma$. The TLA obeys the unconditional master equation (ME) \begin{equation} \dot{\rho}={\cal L}\rho=-i(\Omega/2)\left[\sigma_{x}, \rho\right]+\Gamma \left(\sigma\rho\sigma^{\dag}-\half\{\sigma^{\dag}\sigma,\rho\}\right), \label{TLAME} \end{equation} where $\sigma$ is the atomic lowering operator, and $\sigma_{x}=\sigma+\sigma^\dagger$. Time arguments are not included unless they are necessary for the reader's understanding. In reality, it is difficult to detect a significant fraction of an atom's fluorescence. However, the ME (\ref{TLAME}) also describes, in a suitable regime \cite{Rice88}, the damping of an atom through a cavity mode. This produces an easily detectable output beam. In this scenario, the effective decay rate $\Gamma$ may be much larger than that of a bare atom, and we have this in mind when choosing $\Gamma=300$M$s^{-1}$ for our simulations. \section{Photon Counter} An avalanche photodiode (APD) operating in Geiger mode produces a macroscopic current pulse in response to an incident photon. It consists of a p-n junction operated under a reverse bias greater than the breakdown voltage \cite{OpFibV1}. Under these conditions we can describe the diode by just three classical states (see Fig.~\ref{PDDiag}). The first ($0$) is a stable low-current state in which there are no charge carriers in the depletion region of the junction. The transition from $0$ to the second state ($1$) takes place when an electron--hole pair is created in the depletion region by an incident photon (with quantum efficiency $\eta$) or by thermally initiated `dark counts' occurring at a rate $\gamma_{{\rm dk}}$. Further impact ionization leads to an avalanche, until the current reaches some threshold value and a detection is registered, thus changing the state of the APD to $2$. The transition from $1$ to $2$ has a random duration (we assume Poissonian) with mean $\gamma_{\rm r}^{-1}$ (the `response time'). The avalanche is then arrested by the application of a negative-going voltage pulse that temporarily brings the bias voltage below the breakdown value \cite{OpFibTech}. This results in a fixed `dead time', $\tau_{{\rm dd}}$, during which the APD cannot detect photons, after which it is restored to state 0. \begin{figure} \caption{\narrowtext Realistic photon counting by an avalanche photodiode. The quantum efficiency $\eta$ is represented by the beam splitter (BS). Single arrowheads within the realistic photodetector indicate Poisson processes. For details, see text.} \label{PDDiag} \end{figure} Our aim is to derive the quantum trajectories for the quantum system (the source of the light entering the APD) {\em conditioned on the observation of an avalanche}. For the TLA, we consider direct detection of fluorescence. In this case the supersystem is described by the set ($\tilde{\rho}_{0},\tilde{\rho}_{1},\tilde{\rho}_{2})$, where the tilde indicates that (for simplicity) we are using unnormalized system states, and the subscript indicates the associated detector states. The normalized conditioned TLA state is \begin{equation} \rho_{\rm c} = \tilde{\rho}_{\rm c}/{\rm Tr}[\tilde{\rho}_{\rm c}] \;;\;\; \tilde{\rho}_{{\rm c}}= \tilde{\rho}_{0}+\tilde{\rho}_{1}+\tilde{\rho}_{2}. \end{equation} Our description of internal dynamics of an APD can be simply translated into rate equations for the discrete detector state $s\in\{0,1,2\}$, which in turn imply the following stochastic generalization of the ME~(\ref{TLAME}): \begin{eqnarray} d\tilde{\rho}_{0}&=&dt\left\{\left[{\cal L}-\gamma_{{\rm dk}}- \eta\Gamma{\cal J}-\dot{{\cal N}}\right]\tilde{\rho}_{0}+{\dot {\cal N}}(t^{*}) \tilde{\rho}_{2}\right\}, \label{dp0}\\ d\tilde{\rho}_{1}&=&dt\left\{\left[{\cal L}-\gamma_{{\rm r}}-\dot{{\cal N}}\right]\tilde{\rho}_{1}+\eta\Gamma{\cal J}\tilde{\rho}_{0} +\gamma_{{\rm dk}}\tilde{\rho}_{0}\right\}\label{dp1},\\ d\tilde{\rho}_{2}&=&dt\left\{\left[{\cal L}-\dot{{\cal N}}(t^{*})\right]\tilde{\rho}_{2}+\dot{{\cal N}}\tilde{\rho}_{1}\right\}. \label{dp2} \end{eqnarray} Here ${\cal J}\tilde\rho_{0}$ denotes $\sigma\tilde\rho_{0}\sigma^\dagger$. We use ${\cal N}$ for the number of detections counted, so that $d{\cal N}(t)=\dot{{\cal N}}dt$ is a point process equal to $1$ in the infinitesimal interval when an avalanche is first observed and $0$ otherwise. The delayed process, $\dot{{\cal N}}(t^{*})\equiv \dot{{\cal N}}(t-\tau_{{\rm dd}})$, is used to return the detector to state $0$. The statistics of $d{\cal N}$ are defined by its expectation value $ {\rm E}[d{\cal N}]=\gamma_{{\rm r}}dt{\rm Tr}[\tilde{\rho}_{1}]/{\rm Tr}[\tilde{\rho}_{\rm c}]$. The detector imperfections lead to substantial changes in the conditional dynamics of the TLA, as compared to ideal quantum trajectories. Representative features can be seen in Fig.~\ref{TrajsPRL} (A) and (B). Plot (A) shows a typical portion of a trajectory for $z_{{\rm c}}$, while plot (B) shows the same, and $y_{{\rm c}}$, over a shorter time around $t\approx 4.9$, when an avalanche is registered. Unlike the case of ideal detection, the corresponding ``quantum jump'' does not take $z_{{\rm c}}\rightarrow -1,y_{{\rm c}}\rightarrow 0$, and the amplitude of subsequent oscillations in $z_{{\rm c}},y_{{\rm c}}$ are less than $1$. The jumps in the conditioned quantum state caused by the detection of avalanches are attenuated because of the finite detector response time in combination with the continuous Rabi oscillation, which evolves the TLA away from the ground state for a random and unknown time (with mean $\gamma_{{\rm r}}^{-1}$) between the ``actual'' spontaneous emission event and the registration of the photocurrent avalanche. During the APD dead time the effective efficiency is zero, and as a result the TLA's conditional state regresses towards the steady state of the unconditional ME (1). Even after the detector becomes ready again (by resetting to state 0) the Rabi oscillations in $z_{\rm c},y_{\rm c}$ decay because of the APD's non-unit efficiency and finite bandwidth. These imperfections cause the stationary ensemble-averaged conditional purity $p = \lim_{t\to\infty}{\rm E}\cu{{\rm Tr}[\rho_{\rm c}^{2}(t)]}$ to be substantially less than one for large $\Omega$. For small $\Omega$ however, even the unconditional (without measurement) stationary purity $p_{\rm u}$ of the TLA approaches unity. It is thus useful to define a scaled purity $\in [0,1]$ that measures how much improvement measurement gives: ${\rm Scaled\;}p=(p-p_{{\rm u}})/(1-p_{{\rm u}}).$ For the typical parameter values used in Fig.~\ref{TrajsPRL}, the Scaled $p \approx 0.052$. \begin{figure} \caption{\narrowtext In plot (A), $z_{{\rm c} \label{TrajsPRL} \end{figure} \section{Photoreceiver} When the incident photon flux is high, as in homodyne detection, a p-i-n photodiode connected to a transimpedance amplifier (see Fig.~\ref{PRDiag}) is an appropriate photoreceiver \cite{OpFibTech}. When a photon strikes the depletion region of the p-i-n junction, an electron--hole pair is produced, with probability equal to the quantum efficiency $\eta$. The charge carriers drift under the influence of the below-breakdown reverse bias, and the resultant current $I$ is fed into an operational amplifier (op-amp) set up as a transimpedance amplifier. This has a low effective input impedance, so that the diode acts as a current source, and $I$ is converted into a voltage drop $V$ across the feedback resistor, $R$. The capacitor $C$, in parallel with $R$, represents the total capacitance from the output of the op-amp back to its input, including capacitance added deliberately for the smoothing of noise and oscillations. If no electronic noise were present, the output voltage of the photoreceiver would be a filtered version of the input signal given, in the frequency domain, by \begin{equation} V(\omega )={-IR}/({1+i\omega RC}). \label{Vfreq1} \end{equation} \begin{figure} \caption{\narrowtext Homodyne detection by a photoreceiver. The output field of the TLA is combined with a LO before being detected by a realistic photoreceiver consisting of a p-i-n photodiode (of quantum efficiency $\eta$) that produces the photocurrent $I$, and an ideal op-amp with feedback resistor $R$, and capacitance, $C$ (see text). The output voltage ${\cal V} \label{PRDiag} \end{figure} It should be noted that if this were the case (that is, if there were no noise) then the input $I$ could be perfectly reconstructed from the filtered signal $V$. Thus the resultant quantum trajectories would be no different from those of a photoreceiver with infinite bandwidth. Everything of interest results therefore from the presence of excess noise. We include only the Johnson noise $V_{{\rm J}}$ from the feedback resistor, which has a flat spectrum $S_{{\rm J}}=4k_{{\rm B}}TR$. This simplification (neglecting contributions from voltage noise of the operational amplifier) can be justified for practical receivers with $R\sim 10$k$\Omega$. The output voltage ${\cal V}$ from the photoreceiver is given by the sum of the filtered signal and the Johnson noise \begin{equation} {\cal V}=V+V_{{\rm J}}. \label{Vo} \end{equation} Our aim is to find the quantum trajectory for the system, conditioned on continuously monitoring ${\cal V}$. Since the voltage $V$, which describes the detector state, is a continuous variable, in this case ${\bf S}= {\rm I\!R}$, the real line, and the supersystem can be described by an operator function $\rho(V)$. Finding the stochastic equation of motion for $\rho(V)$ is quite involved. We begin by taking the output current $I$ of the photodiode to be that from a perfect (apart from its efficiency $\eta$) unbalanced homodyne detection of the fluorescence of the TLA. For a LO tuned to the atomic transition frequency $\omega_{0}$, of power $P$, and phase $\phi$, the current is \cite{Car93b,WisMil93a} \begin{equation} I=e\sqrt{P/\hbar\omega_{0}}\left[\eta\sqrt{\Gamma}\langle e^{-i\phi}\sigma +e^{i\phi}\sigma^{\dag}\rangle +\rt{\eta}\xi(t)\right], \end{equation} where we have ignored the D.C. component due to the LO power. Here $\xi(t)$ is the Gaussian white noise \cite{Gar85} arising from the Poissonian statistics of the LO and $e$ is the electron charge. The evolution of the TLA conditioned on $I$ is given, in terms of the noise $\xi(t)$, by the following stochastic master equation \cite{WisMil93a} \begin{equation} d\rho_{I}=dt\left\{{\cal L}+\rt{\eta\Gamma}\xi(t){\cal H}[e^{-i\phi}\sigma]\right\}\rho , \label{rhoI} \end{equation} where ${\cal H}[A]\rho\equiv A\rho+\rho A^{\dag}-{\rm Tr}[A\rho+\rho A^{\dag}]\rho$. Now \erf{Vfreq1} is equivalent to the stochastic equation \begin{equation} I+V/R+C(dV/dt)=0. \label{LE} \end{equation} Since the voltage $V$ is not directly measured, we must consider a distribution $P(V)$ for it. Assuming that $C>0$, and, for the moment, that $I$ is known, \erf{LE} can be converted to an It\^o \cite{Gar85} stochastic Fokker-Planck equation for $P(V)$ conditioned on the photocurrent: \begin{equation} dP_{I}(V)=\left(\frac{\partial}{\partial V}\frac{V+IR} {RC}+\frac{P\eta e^{2}}{2\hbar\omega_{0}C^{2}}\frac{\partial^{2}} {\partial V^{2}}\right)P(V)dt. \label{PI} \end{equation} Here we are using the convention that subscripts indicate that the increment is conditioned on that result. That is, for example, $P_{I}(V)\equiv P(V|I)$. Next we need to determine the effect of the measurement of ${\cal V}$ on $P(V)$. This can be calculated by using Bayes' conditional probability theorem \begin{equation} P_{{\cal V}}(V)=P_{V}({\cal V})P(V)/P({\cal V}). \label{bayes} \end{equation} Remembering that the Johnson noise is white, it follows from \erf{Vo} that $P_{V}({\cal V})$ is a Gaussian with mean $V$ and variance $4k_{{\rm B}}TR/dt$. From this we find that \begin{equation} P({\cal V})=\int dVP_{V}({\cal V})P(V). \end{equation} is a Gaussian of mean $\langle V\rangle$ and variance $4k_{{\rm B}}TR/dt$. It follows that we can write \begin{equation} \label{defdWJ} {\cal V}=\langle V\rangle+\rt{4k_{{\rm B}}TR}{dW_{{\rm J}}(t)}/{dt}, \end{equation} where $dW_{{\rm J}}(t)/dt$ is another Gaussian white noise source, independent of $\xi(t)$. Substitution of $P_{V}({\cal V})$ and $P({\cal V})$ into \erf{bayes} yields the effect of the ${\cal V}$-measurement: \begin{equation} dP_{{\cal V}}(V)= \left(V-\langle V\rangle\right)P(V)dW_{{\rm J}}(t)/\sqrt{4k_{{\rm B}}TR}. \label{PV} \end{equation} Now, to see how ${\cal V}$ conditions the TLA, we form the quantity $\rho(V)=\rho P(V)$, where $\rho$ is here independent of $P(V)$ because we are imagining $I$ to be known at all times. The time evolution of $\rho(V)$, given that ${\cal V}$ and $I$ are known, is found from \begin{eqnarray} \rho(V)+d\rho_{I,{\cal V}}(V)&=&\left(\rho+d\rho_{I}\right)\nl{\times} \left[P(V)+dP_{I}(V)+dP_{{\cal V}}(V)\right], \end{eqnarray} with the use of \erf{PI}, \erf{PV} and \erf{rhoI}. Finally, in reality, ${\cal V}$ is known but $I$ is not. Therefore we should average over the vacuum noise $\xi(t)$, but keep the Johnson noise $dW_{\rm J}/dt$. We define a dimensionless voltage $v=V\sqrt{C/4k_{{\rm B}}T}$, a rate $\gamma=1/RC$ and a dimensionless noise power $N = 4k_{{\rm B}}T\hbar\omega_{0}/\eta RPe^{2}$. This last expression is the ratio of the low-frequency power in ${\cal V}$ from the Johnson noise to that from the vacuum noise. We then obtain the following stochastic nonlinear superoperator Fokker-Planck equation for $\rho(v)$: \begin{eqnarray} d\rho_{{\cal V}}(v)&=&dt\left({\cal L}+\frac{\gamma}{2N}\frac{\partial^{2}}{\partial v^{2}}+\gamma\frac{\partial}{\partial v}v \right)\rho(v) \nl{+}dt\frac{\partial}{\partial v}\sqrt{\frac{\gamma\Gamma\eta}{N}} \left[e^{-i\phi}\sigma\rho(v)+ e^{i\phi}\rho(v)\sigma^{\dag}\right] \nl{+} \rt{\gamma}dW_{{\rm J}}(t) \left(v-\langle v\rangle\right)\rho(v). \label{dpHom} \end{eqnarray} The dependence on ${\cal V}$ may be explicated by substituting $dt\gamma\ro{\rt{{C}/{ 4k_{\rm B}T}} {\cal V}-\an{v}}$ for $\rt{\gamma}dW_{{\rm J}}(t)$ [see \erf{defdWJ}]. In the above, $\langle v\rangle=\int dv{\rm Tr}[\rho(v)]v$. The normalized conditioned TLA state is $\rho_{\rm c}=\int\rho(v)dv$. A Typical trajectory for realistic homodyne $x$ ($\phi=0$) detection is shown in Fig.~\ref{TrajsPRLHom} (A). The main difference from the case of perfect detection is the reduced amplitude of variation in $x_{{\rm c}}$ (dotted) and $z_{{\rm c}}$ (solid). This is due to the effective bandwidth of the photoreceiver, which affects $z_{\rm c}$ more because of its faster dynamics. Plot (B) shows the photoreceiver output voltage ${\cal V}$ that is used in \erf{dpHom} to condition the TLA state. It is seen that ${\cal V}$ is correlated with $x_{\rm c}$ as expected. \begin{figure} \caption{\narrowtext Plot (A) shows $x_{{\rm c} \label{TrajsPRLHom} \end{figure} Plotted in (C) is the scaled purity as a function of the driving strength for both homodyne $x$ and $y$ ($\phi=\pi/2$) detection. As $\Omega$ increases, homodyne $y$ detection becomes increasingly worse than $x$ detection at following the evolution of the TLA. This is due to the finite bandwidth of the photoreceiver in combination with the conditional homodyne dynamics in the $\Omega \gg \Gamma$ limit \cite{WisMil93c}. For homodyne $x$ measurement the $x$ quadrature, which changes sign fairly infrequently, dominates the TLA state. The slow ($\Gamma$) dynamics allow the detector to track of the state reasonably well. In contrast, homodyne $y$ detection produces a conditional state dominated by fast ($\Omega$) Rabi cycling, which is poorly followed. \section{Conclusions} In conclusion, we have presented a theory of quantum trajectories for systems conditioned on realistic photodetection. The equations are tractable, as we have demonstrated by numerical simulations, and allow us to quantify the degree and manner by which imperfections such as a finite bandwidth modify the conditioning of quantum states by measurement in concrete experimental scenarios. Realistic quantum trajectory models will be of paramount importance in the field of real-time quantum feedback control \cite{Dohe2000}. The techniques we introduce here may also prove essential in describing other realistic measurements, such as in condensed matter systems \cite{WisWah01,Dev00}. \begin{references} \bibitem{Von32} J. von Neumann, {\em Mathematical Foundations of Quantum Mechanics} (Springer, Berlin, 1932); English translation (Princeton University Press, Princeton, 1955). \bibitem{Dav76} E. B. Davies, {\em Quantum Theory of Open Systems} (Academic Press, London, 1976). \bibitem{Kra83} K. Kraus, {\em States, Effects, and Operations: Fundamental Notions of Quantum Theory} (Springer, Berlin, 1983). \bibitem{DalCasMol92} J. Dalibard, Y. Castin and K. M\o lmer, Phys. Rev. Lett. {\bf 68}, 580 (1992). \bibitem{GarParZol92} C. W. Gardiner, A. S. Parkins, and P. Zoller, Phys. Rev. A {\bf 46}, 4363 (1992). \bibitem{Car93b} H. J. Carmichael, {\em An Open Systems Approach to Quantum Optics} (Springer, Berlin, 1993). \bibitem{WisMil93c} H. M. Wiseman and G. J. Milburn, Phys. Rev. A {\bf 47}, 1652 (1993). \bibitem{WisWah01} H. M. Wiseman {\em et al.}, Phys. Rev. B {\bf 63}, 235308 (2001) \bibitem{Gamb01} J. Gambetta and H. M. Wiseman, to appear in Phys. Rev. A (2001). \bibitem{Cira97} J.-I. Cirac, P. Zoller, H. J. Kimble, and H. Mabuchi, Phys. Rev. Lett. {\bf 78}, 3221 (1997). \bibitem{Dohe99b} A. C. Doherty and K. Jacobs, Phys. Rev. A {\bf 60}, 2700 (1999). \bibitem{MabYeKim99} H. Mabuchi, J. Ye, and H. J. Kimble, Appl. Phys. B {\bf 68}, 1095 (1999). \bibitem{Dev00} M. H. Devoret and R. J. Schoelkopf, Nature {\bf 406}, 1039 (2000). \bibitem{Rice88} P. R. Rice and H. J. Carmichael, IEEE J. Quantum Elect. {\bf 24}, 1351 (1988); Q. A. Turchette, R. J. Thompson, and H. J. Kimble, Appl. Phys. B {\bf 60}, S1 (1995). \bibitem{OpFibV1} B. T. Debney and A. C. Carter, Optical Fiber Sensors Components and Subsystems, Ch. 4, Vol. 1 (Artech House, Boston 1996). \bibitem{OpFibV3} B. Garside, Optical Fiber Sensors Components and Subsystems, Ch. 5, Vol. 3 (Artech House, Boston 1996). \bibitem{OpFibTech} J. D. C. Jones, Optical Fiber Sensor Technology, Ch. 4 (Chapman and Hill, London 1995). \bibitem{Gar85} C. W. Gardiner, {\em Handbook of Stochastic Methods} (Spring\-er, Berlin, 1985). \bibitem{WisMil93a} H. M. Wiseman and G. J. Milburn, Phys. Rev. A {\bf 47}, 642 (1993). \bibitem{Dohe2000} A. C. Doherty {\it et al.}, Phys. Rev. A {\bf 62}, 012105 (2000). \end{references} \end{multicols} \end{document}
\baregin{document} \baribliographystyle{plain} \baregin{abstract} In this paper, we explore the limit structure of a sequence of Riemannian manifolds with Bakry-\'{E}mery Ricci curvature bounded below in the Gromov-Hausdorff topology. By extending the techniques established by Cheeger-Cloding for Riemannian manifolds with Ricci curvature bounded below, we prove that each tangent space at a point of the limit space is a metric cone. We also analyze the singular structure of the limit space analogous to a work of Cheeger-Colding-Tian. Our results will be applied to study the limit space of a sequence of K\"ahler metrics arising from solutions of certain complex Monge-Amp\`ere equations for the existence of K\"ahler-Ricci solitons on a Fano manifold via the continuity method. \e_2and{abstract} \maketitle \tauableofcontents \sigmagmaetcounter{section}{-1} \sigmagmaection{Introduction} In a series of papers [CC1], [CC2], [CC3], Cheeger-Colding study the limit space of a sequence of Riemannian manifolds with Ricci curvature bounded below in the Gromov-Hausdorff topology. As one of fundamental results, they prove the existence of metric cone structure for each tangent cone on the limit space [CC2]. Namely, \baregin{theo}([CC2])\lambdaambdabel{thm-cc1} Let $(M_i,g_i;p_i)$ be a sequence of $n$-dimentional Riemannian manifolds which satisfy $${\rm Ric}_{M_i}(g_i)\mathfrak geq{- (n-1)\Lambdaambdambda^2g_i}~{\rm and}~{\rm vol}_{g_i}( B_{p_i}(1))\mathfrak geq v>0.$$ Then $(M_i,g_i;p_i)$ converge to a metric space $(Y;p_\sqrt{-1}nfty)$ in the pointed Gromov-Hausdorff topology. Moreover, for any $y\sqrt{-1}n Y$, each tangent cone $T_yY$ is a metric cone over another metric space whose diameter is less than $\partiali$. \e_2and{theo} Based on the above theorem, Cheeger-Colding introduce a notion of $\mathcal S_k$-typed ($k\lambdae n-1$) singularities of the limit space $Y$ as follows. \baregin{defi}\lambdaambdabel{singular-type} Let $(Y;p_{\sqrt{-1}nfty})$ be the limit of $(M_i,g_i;p_i)$ as in Theorem \ref{thm-cc1}. We call $y\sqrt{-1}n (Y;p_\sqrt{-1}nfty)$ a $\mathcal S_k$-typed singular point if there exists a tangent cone at $y$ which can be split out an euclidean space $\mathbb R^k$ isometrically with dimension at most $k$. \e_2and{defi} Applying Theorem \ref{thm-cc1} to appropriate tangent cone spaces, Cheeger-Colding show that the dimension of set $\mathcal S_k$ is less than $k$ [CC2]. In [CCT], Cheeger-Colding-Tian do a significant work to determine which kind of singularities can be excluded in the limit space $Y$ under certain curvature condition for the sequence of $(M_i,g_i)$. They prove \baregin{theo}([CCT])\lambdaambdabel{thm-cct} Let $(M_i, g_i; p_i)$ be a sequence of $n$-dimensional manifolds and $(Y,p_\sqrt{-1}nfty)$ its limit as in Theorem \ref{thm-cc1}. Suppose that the integrals of sectional curvature $${\mathfrak f}rac{1}{{\rm vol}_{g_i}(B_{p_i}(1))}\sqrt{-1}nt_{B_{p_i}(1)}|{\rm Rm}|^p d{\rm v}$$ are uniformly bounded. Then for any $\e_2apsilonsilon>0$, the following is true: $${\rm dim} (B_{p_\sqrt{-1}nfty}(1)\sigmagmaetminus R_\e_2apsilonsilon)\lambdaeq n-4,~\tauext{ if} ~p=2$$ and $$\mathcal H^{n-2p}(B_{p_{\sqrt{-1}nfty}}(1)\sigmagmaetminus R_\e_2apsilonsilon)<\sqrt{-1}nfty, ~\tauext{if}~ 1\lambdaeq p<2.$$ Here $R_\e_2apsilonsilon$ consists of points $y$ in $Y$ which satisfy $${\rm dist}_{GH}(B_y(1), B_0(1))\lambdaeq\e_2apsilonsilon$$ for the unit ball $ B_0(1) $ in $\mathbb R^n$ and a unit distance ball $B_y(1)$ in some tangent cone $T_yY$. \e_2and{theo} The purpose of the present paper is to extend the above Cheeger-Colding theorem and Cheeger-Colding-Tian theorem in the Bakry-\'Emery geometry. More precisely, we analyze the structure of Gromov-Hausdorff limit of a sequence of $n$-dimensional Riemannian manifolds in class $\mathcal{M}(A,\Lambdaambdambda, v)$ which defined by \baregin{align} \mathcal{M} (A,\Lambdaambdambda,v)=&\{(M,g;p)|~M \tauext{ is an $n$-dimensional}\nuotag\\ &\tauext{complete Riemannian manifold which satisfy} \nuotag\\ &{\rm Ric}_M(g)+\tauext{hess}(f)\mathfrak geq-(n-1)\Lambdaambdambda^2 g,\nuotag\\ &{\rm vol}_g(B_{p}(1))\mathfrak geq v>0,~ \tauext{and}~ |\nuablabla f|_g\lambdae A\}.\nuotag \e_2and{align} Here $f$ is a smooth function on $M$ and $\tauext{hess}(f)$ denotes Hessian tensor of $f$ with respect to $g$. ${\rm Ric}_M(g)+\tauext{hess}(f)$ is called Bakry-\'Emery Ricci curvature associated to $f$ [BE]. For simplicity, we denote it by ${\rm Ric}_{M,g}^f$ or just ${\rm Ric}_{g}^f$. Clearly, $\mathcal{M}(A,\Lambdaambdambda, v)$ consists of compact Ricci solitons [Ha], [TZh]. We show that both Theorem \ref{thm-cc1} and Theorem \ref{thm-cct} still hold for a sequence in $\mathcal{M}(A,\Lambdaambdambda, v)$ (cf. Section 4, 5). As in [CC1], we shall establish various integral comparison results for the gradient and Hessian estimates between appropriate functions and coordinate functions or distance functions on a Riemannian manifold with Bakry-\'Emery Ricci curvature bounded below. We will use $f$-harmonic functions to construct those appropriate functions instead of harmonic functions (cf. Section 2). Another technique is to generalize the segment inequality lemmas in [CC1] to our case of weighted volume form (cf. Lemma \ref{equ-seg}, Lemma \ref{equ-rad}, Lemma \ref{approxi-1}) so that the triangle lemmas in [Ch2] are still true on a Riemannian manifold with almost flat Bakry-\'Emery Ricci curvature (cf. Lemma \ref{cheeger-lemma}, Lemma \ref{cheeger-lemma-2}). These triangle lemmas are crucial in proofs of the splitting theorem and the metric cone theorem (cf. Theorem \ref{splitting-theorem}, Theorem \ref{existence-metric-cone}). We shall point out that various versions of such kind triangle lemmas were used by Colding, Cheeger-Colding in earlier papers to study the rigidity of of Riemannian metrics [Co1], [Co2], [CC1]. Another motivation of this paper is to study the limit space of a sequence of K\"ahler metrics $g_{t_i}$ $(t_i<1)$ arising from solutions of certain complex Monge-Amp\`ere equations for the existence of K\"ahler-Ricci soliton via the continuity method [TZ1], [TZ2]. We show that such metrics are naturally belonged to $\mathcal{M}(A,v,\Lambdaambdambda)$. As a consequence, for any sequence $\{g_{t_i}\}$ there exists a subsequence which converges to a metric space with complex codimention of singularities at least one in the Gromov-Hausdorff topology (cf. Theorem \ref{thm-kahler-1}, Section 6). Furthermore, in case of $t_i\tauo 1$, the complex codimention of singularities of limit space is at least two (cf. Theorem \ref{thm-kahler-2}). The later is corresponding to a sequence of called weak almost K\"ahler-Ricc solitons, which is a generalization of sequence of weak almost K\"ahler-Einstein metrics introduced by Tian-Wang in a recent paper [TW] (cf. Definifion \ref{almost-kr-soliton}). In fact, for such a kind of K\"ahler metrics sequence, we prove the following result: \baregin{theo}\lambdaambdabel{thm-kahler-3} Let $(M_i, g_{i})$ be a sequence of weak almost K\"ahler-Ricci solitons. Suppose that there exists a point $p_i$ at each $M_i$ such that \baregin{align}{\rm vol}_{M_i}(B_{p_i}(1))\mathfrak ge v>0.\e_2and{align} Then there exists a subsequence of $(M_i,g_i;p_i)$ which converge to a limit metric space $Y$ in the pointed Gromov-Hausdorff topology. Moreover $\mathcal{S}(Y)=\mathcal{S}_{2n-4}$. In particular, the complex codimension of singularities of $Y$ is at least 2. \e_2and{theo} As a corollary of Theorem \ref{thm-kahler-3}, we show that there exists a sequence of weak almost K\"ahler-Ricc solitons on $M$ which converges to a metric space $(M_\sqrt{-1}nfty, g_\sqrt{-1}nfty)$ with complex codimension of the singular set of $(M_\sqrt{-1}nfty, g_\sqrt{-1}nfty)$ at least two in the Gromov-Hausdorff topology if the modified Mabuchi $K$-energy defined in [TZ1] is bounded from below. In a sequel of papers [WZ] and [JWZ], we will further confirm that the regular part of $(M_\sqrt{-1}nfty, g_\sqrt{-1}nfty)$ is in fact a K\"ahler-Ricc soliton while $(M_\sqrt{-1}nfty, g_\sqrt{-1}nfty)$ admits a $Q$-Fano algebraic structure. The organization of paper is as follows. In Section 1, we first recall a $f$-Lapalace comparison result of Wei -Wylie for distance functions (cf. Lemma \ref{lapalace-esti-r}). Then as applications of Lemma \ref{lapalace-esti-r} we construct a cut-off function with bounded gradient and $f$-Lapalace (cf. Lemma \ref{cut-off}). In Section 2, we give various integral estimates for gradient and Hessian of $f$-harmonic functions. In Section 3 and Section 4, we will prove the splitting theorem (cf. Theorm \ref{splitting-theorem}) and the metric cone theorem (cf. Theorem \ref{existence-metric-cone}), respectively. In Section 5, we give a generalization of Cheeger-Colding-Tian's Theorem \ref{thm-cct} in the setting of Bakry-\'{E}mery geometry (cf. Theorem \ref{dimension-n-4}). In Section 6, we prove Theorem \ref {thm-kahler-1} and Theorem \ref {thm-kahler-3}. Section 7 is an appendix where we explain how to use the technique of conformal transformation in [TZh] to give another proof of Theorem \ref {thm-kahler-1} and Theorem \ref {thm-kahler-2}. Section 8 is another appendix where the relation (\ref{J-invariant}) in Section 6 is proved. \nuoindent {\barf Acknowledgements.} The authors would like to thank professor G. Tian for many valuable discussions on this work. They are also appreciated to professor T. Colding for his interest to the paper, particularly, for valuable comments on Lemma \ref{cheeger-lemma} and Lemma \ref{cheeger-lemma-2}. \vskip3mm \sigmagmaection{Distance function comparison and other comparison lemmas} The notion of Bakry-\'Emery Ricci curvature ${\rm Ric}_{M,g}^f$ associated to a smooth function $f$ on a Riemannian manifold $(M,g)$ was first appeared in [BE]. Related to the conformal geometry, one can introduce a weighted volume form and a $f$-Lapalace operator associated to $f$ on $(M,g)$ as follows, \baregin{align} d\tauext{v}^f=e^{-f}d\tauext{v}~\tauext{and}~ {\mathfrak D}elta^f={\mathfrak D}elta - \lambdaambdangle\nuablabla f,\nuablabla\ranglegle.\nuotag \e_2and{align} Then ${\mathfrak D}elta^f$ is a self-adjoint elliptic operator under the following weighted inner product, $$(u,v)=\sqrt{-1}nt_M uv d\tauext{v}^f,~~{\mathfrak f}orall ~u,v\sqrt{-1}n L^2(M).$$ That is $$\sqrt{-1}nt_M {\mathfrak D}elta^f u v d\tauext{v}^f=\sqrt{-1}nt_M \lambdaambdangle\nuablabla u,\nuablabla v\ranglegle d\tauext{v}^f=\sqrt{-1}nt_M {\mathfrak D}elta^f v u d\tauext{v}^f.$$ The divergence theorem with respect to ${\mathfrak D}elta^f$ is $$\sqrt{-1}nt_\Omegaega {\mathfrak D}elta^f u d\tauext{v}^f=\sqrt{-1}nt_{\partialartialrtial\Omegaega} \lambdaambdangle\nuablabla u,n\ranglegle e^{-f}d\sigmagmaigma,$$ where $\Omegaega$ is a domain in $M$ with piece-wise smooth boundary, $n$ denotes the outer unit normal vector field on $\partialartialrtial\Omegaega$ and $d\sigmagmaigma$ is an induced area form of $g$ on $\partialartialrtial\Omegaega$. Let $r=r(x)=\tauext{dist}(p,x)$ be a distance function on $(M,g)$. In [WW], Wei-Wylie compute the $f$-Laplacian for $r$ and got the following comparison result under the Bakry-Emery Ricci curvature condition. \baregin{lem}([WW])\lambdaambdabel{lapalace-esti-r} Let $(M,g)$ be an $n$-dimensional complete Riemannian manifold which satisfies \baregin{align}\lambdaambdabel{ricci-condition-1} {\rm Ric}_{g}^f\mathfrak geq-(n-1)\Lambdaambdambda^2g.\e_2and{align} Then \baregin{align}\lambdaambdabel{lapalace-r-1} {\mathfrak D}elta^fr\lambdaeq(n-1+4A)\Lambdaambdambda\coth\Lambdaambdambda r, ~\tauext{ if} ~ |f|\lambdae A,\e_2and{align} and \baregin{align} \lambdaambdabel{lapalace-r-3}{\mathfrak D}elta^fr\lambdaeq(n-1)\Lambdaambdambda\coth\Lambdaambdambda r+A, ~\tauext{ if} ~ |\nuablabla f|\lambdae A. \e_2and{align} \e_2and{lem} As an application of Lemma \ref{lapalace-esti-r}, Wei-Wylie prove the following weighted volume comparison theorem. \baregin{theo}([WW])\lambdaambdabel{volume-comparison} Let $(M,g)$ be an $n$-dimensional complete Riemannian manifold which satisfies (\ref{ricci-condition-1}). Then for any $0<r\lambdae R$, \baregin{align}\lambdaambdabel{volume-estimate-1} {\mathfrak f}rac{{\rm vol}^f(B_p(r))}{{\rm vol}^f(B_p(R))}\mathfrak geq{\mathfrak f}rac{{\rm vol}_{\Lambdaambdambda}^{n+4A}(B(r))}{{\rm vol}_{\Lambdaambdambda}^{n+4A}(B(R))}, ~\tauext{ if }~|f|\lambdae A,\e_2and{align} and \baregin{align}\lambdaambdabel{volume-estimate-2}{\mathfrak f}rac{{\rm vol}^f(B_p(r))}{{\rm vol}^f(B_p(R))}\mathfrak geq e^{-AR}{\mathfrak f}rac{{\rm vol}_{\Lambdaambdambda}^{n}(B(r))}{{\rm vol}_{\Lambdaambdambda}^{n}(B(R))}, \tauext{ if}~ |\nuablabla f|\lambdae A, \e_2and{align} where ${\rm vol}_{\Lambdaambdambda}^{n}(B(r))$ denotes the volume of geodesic ball $B(r)$ with radius $r$ in $n$-dimensional space form with constant curvature $-\Lambdaambdambda$. \e_2and{theo} Wei-Wylie's proof of Theorem \ref{volume-comparison} depends on a monotonic formula for the weighted volume form as follows. By choosing a polar coordinate with the origin at $p$, we write \baregin{align} e^{-f}d\tauext{v}= A^f(s,\tauhetaeta)ds\wedge d\tauhetaeta.\nuotag\e_2and{align} Then \baregin{align} {\mathfrak f}rac{d}{ds}A^f(s,\tauhetaeta)= A^f(s,\tauhetaeta) {\mathfrak D}elta^fr. \nuotag \e_2and{align} In case that $|\nuablabla f|\lambdae A$, it follows from (\ref{lapalace-r-3}), \baregin{align}\lambdaambdabel{mono-1} {\mathfrak f}rac{d}{ds}A^f(s,\tauhetaeta)\lambdae A^f(s,\tauhetaeta) l_{\Lambdaambdambda,A}(r), \e_2and{align} where $ l_{\Lambdaambdambda,A}(r)=(n-1)\Lambdaambdambda\coth\Lambdaambdambda r+A.$ Thus if we put \baregin{align}\lambdaambdabel{L-function} L_{\Lambdaambdambda,A}(r)=e^{Ar}({\mathfrak f}rac{\sigmagmainh\Lambdaambdambda r}{\Lambdaambdambda})^{n-1}, \e_2and{align} which is a solution of equation, \baregin{align}\lambdaambdabel{L-equation} {\mathfrak f}rac{L_{\Lambdaambdambda,A}'}{L_{\Lambdaambdambda,A}}=l_{\Lambdaambdambda,A}, ~{\mathfrak f}rac{L_{\Lambdaambdambda,A}(r)}{ r^{n-1}}\tauo 1~\tauext{as}~ r\tauo 0, \e_2and{align} (\ref{mono-1}) is equivalent to the following monotonic formula, \baregin{align}\lambdaambdabel{mono-formula} {\mathfrak f}rac{A^f(b,\tauhetaeta)}{A^f(a,\tauhetaeta)}\lambdaeq {\mathfrak f}rac{L_{\Lambdaambdambda,A}(b)}{L_{\Lambdaambdambda,A}(a)},~{\mathfrak f}orall~ b\mathfrak ge a.\e_2and{align} By a simple computation, we get (\ref{volume-estimate-2}) from (\ref{mono-formula}). Similarly, we can prove (\ref{volume-estimate-1}). Another application of Lemma \ref{lapalace-esti-r} is the following weighted Poincar\'{e} inequality. \baregin{lem}\lambdaambdabel{Poincare1} Let $(M,g)$ be a complete Riemannian manifold which satisfies \baregin{align}\lambdaambdabel{curvature-uasual-condition} {\rm Ric}_{g}^f\mathfrak ge -(n-1)\Lambdaambdambda^2 g,~\tauext{and}~|\nuablabla f|\lambdae A. \e_2and{align} Let $A_p(a,b)=B_p(b)\sigmagmaetminus \Omegaegaegaverline{B_p(a)}$ be an annulus in $M$. Then for any Liptischtz function $h$ in $A_p(a,b)$ with $h|_{\partialartialrtial A_p(a,b)}=0$, it holds \baregin{align}\lambdaambdabel{poincare-inequality} \sqrt{-1}nt_{A_p(a,b)}h^2e^{-f}d{\rm v} \lambdaeq c(a,b,A,\Lambdaambdambda)\sqrt{-1}nt_{A_p(a,b)}|\nuablabla h|^2e^{-f}d{\rm v} \e_2and{align} \e_2and{lem} \baregin{proof} By (\ref{lapalace-r-3}), it is easy to see that \baregin{align} {\mathfrak D}elta^f r^{-k} &\mathfrak geq -kr^{-k-1}l_{\Lambdaambdambda,A}(r)+k(k+1)r^{-k-2}\nuotag\\ &=kr^{-k-1}(-l_{\Lambdaambdambda,A}(r)+{\mathfrak f}rac{k+1}{r}), \e_2and{align} where $k$ is a positive real number. Putting ${\mathfrak f}rac{k+1}{b}=l_{\Lambdaambdambda,A}(a)+1$, we have $${\mathfrak D}elta^f r^{-k}\mathfrak geq c(a,b,\Lambdaambdambda,A)>0.$$ Thus for $h$ with zero boundary value, we get \baregin{align} \nuonumber c(a,b,\Lambdaambdambda,A)\sqrt{-1}nt_{A_p(a,b)}h^2e^{-f}d{\rm v}&\lambdaeq \sqrt{-1}nt_{A_p(a,b)}({\mathfrak D}elta^f r^{-k}) h^2e^{-f}d{\rm v}&\\ \nuonumber &=-2\sqrt{-1}nt_{A_p(a,b)}h\lambdaambdangle \nuablabla h,\nuablabla(r^{-k})\ranglegle e^{-f}d{\rm v}&\\ \nuonumber &\lambdaeq 2k\sqrt{-1}nt_{A_p(a,b)}h|\nuablabla h|e^{-f}d{\rm v}&\\ \nuonumber &\lambdaeq 2k (\sqrt{-1}nt_{A_p(a,b)}h^2e^{-f}d{\rm v})^{{\mathfrak f}rac{1}{2}}(\sqrt{-1}nt_{A_p(a,b)}|\nuablabla h|^2e^{-f}d{\rm v})^{{\mathfrak f}rac{1}{2}}. \e_2and{align} Hence, (\ref{poincare-inequality}) follows from the above immediately. \e_2and{proof} For the $f$-Lapalace operator, we have the following Bochner-typed identity, \baregin{align}\lambdaambdabel{bochner-inequ} &{\mathfrak f}rac{1}{2}{\mathfrak D}elta^{f}|\nuablabla u|^2\nuotag\\ &=|\tauext{hess }u|^2+\lambdaambdangle\nuablabla u,\nuablabla{\mathfrak D}elta^f u\ranglegle+{\rm Ric}_g^f(\nuablabla u,\nuablabla u), ~{\mathfrak f}orall~u\sqrt{-1}n C^\sqrt{-1}nfty(M). \e_2and{align} By (\ref{bochner-inequ}) and Lemma \ref{lapalace-esti-r}, we derive the following Li-Yau typed gradient estimate for $f$- harmonic functions on $(M,g)$. \baregin{prop}\lambdaambdabel{gradient-esti} Let $(M,g)$ be a complete Riemannian manifold which satisfies (\ref{curvature-uasual-condition}). Let $u>0$ be a $f$-harmonic function defined on the unit distance ball $B_p(1)\sigmagmaubset (M,g)$, i.e. $$\tauriangle^f u=0, ~ \tauext{in }~B_p(1).$$ Then \baregin{align} |\nuablabla u|^2\lambdaeq (C_1\Lambdaambdambda+C_2A^2+C_3)u^2, ~\tauext{in}~ B_p(1/2), \e_2and{align} where the constants $C_i$ $(1\lambdaeq i\lambdaeq 3)$ depend only on $n$. \e_2and{prop} \baregin{proof} The proof is standard as in the case $f=0$ for a harmonic function (cf. [SY]). We let $v=\lambdan u$. Then \baregin{align} {\mathfrak D}elta^f v&={\mathfrak D}elta v-\lambdaambdangle\nuablabla f,\nuablabla v\ranglegle=\nuablabla({\mathfrak f}rac{\nuablabla u}{u})-\lambdaambdangle\nuablabla f, {\mathfrak f}rac{\nuablabla u}{u}\ranglegle\nuotag\\ &={\mathfrak f}rac{{\mathfrak D}elta u}{u}-{\mathfrak f}rac{|\nuablabla u|^2}{u^2}-\lambdaambdangle\nuablabla f, {\mathfrak f}rac{\nuablabla u}{u}\ranglegle={\mathfrak f}rac{|\nuablabla u|^2}{u^2}. \e_2and{align} Note that $$|\tauext{hess } v|^2\mathfrak geq{\mathfrak f}rac{|{\mathfrak D}elta v|^2}{n}$$ and $$|{\mathfrak D}elta v|^2\mathfrak geq{\mathfrak f}rac{|{\mathfrak D}elta^f v|^2}{2}-C_1 A^2Q,$$ where $Q=|\nuablabla v|^2$. Thus applying (\ref{bochner-inequ}) to $v$, we get \baregin{align}\lambdaambdabel{ineq-Q} {\mathfrak f}rac{1}{2}{\mathfrak D}elta^f Q\mathfrak geq{\mathfrak f}rac{Q^2}{2n}-{\mathfrak f}rac{1}{n}C_1 A^2Q+\lambdaambdangle\nuablabla v,\nuablabla Q\ranglegle-\Lambdaambdambda^2Q . \e_2and{align} Choose a decreasing cut-off function $\e_2ata(t)$ on $t\sqrt{-1}n [0,\sqrt{-1}nfty]$ such that \baregin{align}&\e_2ata(t)=1 ~\tauext{if}~ t\lambdae {\mathfrak f}rac{1}{2}; \partialhi=0~\tauext{ if}~ t\mathfrak ge 1;\nuotag\\ &-C_2\e_2ata^{{\mathfrak f}rac{1}{2}}\lambdae \e_2ata^{\partialrime},~\tauext{if}~t\mathfrak ge {\mathfrak f}rac{1}{2};\nuotag\\ &|\e_2ata^{\partialrime\partialrime}|\lambdaeq C_2.\nuotag \e_2and{align} Then if let $\partialhi=\e_2ata(r(\cdot,p))$, $$|\nuablabla\partialhi|^2\partialhi^{-1}\lambdae C_2^2,$$ and by Lemma \ref{lapalace-esti-r}, \baregin{align}\lambdaambdabel{lapalace-phi}{\mathfrak D}elta^f\partialhi={\mathfrak D}elta^f\e_2ata(r)\mathfrak ge - C_3(A+\Lambdaambdambda). \e_2and{align} Hence, by (\ref{ineq-Q}), we obtain \baregin{align}\lambdaambdabel{ineq-phiQ} {\mathfrak D}elta^f(\partialhi Q)&={\mathfrak D}elta(\partialhi Q)+\lambdaambdangle\nuablabla f,\nuablabla(\partialhi Q)\ranglegle\nuotag\\ &= \partialhi{\mathfrak D}elta^f Q+Q{\mathfrak D}elta^f\partialhi+ 2\lambdaambdangle\nuablabla Q,\nuablabla \partialhi\ranglegle\nuotag\\ &\mathfrak ge \partialhi({\mathfrak f}rac{Q^2}{n}-({\mathfrak f}rac{2}{n}C_1 A^2+2\Lambdaambdambda^2)Q)-C_3(A+\Lambdaambdambda)Q\nuotag\\ &+2\lambdaambdangle\nuablabla v,\nuablabla Q\ranglegle+2\lambdaambdangle\nuablabla Q,\nuablabla \partialhi\ranglegle. \e_2and{align} Suppose that $(Q\partialhi)(q)=\max_M\{Q\partialhi\}$ for some $q\sqrt{-1}n M$. Then at this point, it holds $\nuablabla(Q\partialhi)=0$. It follows that $$\nuablabla Q=-{\mathfrak f}rac{Q\nuablabla \partialhi}{\partialhi},$$ and \baregin{align}|\lambdaambdangle\nuablabla Q,\nuablabla\partialhi\ranglegle|={\mathfrak f}rac{Q}{\partialhi}|\nuablabla\partialhi|^2\lambdaeq C_2^2Q. \nuotag \e_2and{align} Also \baregin{align} |\lambdaambdangle\nuablabla Q,\nuablabla v\ranglegle|\lambdaeq Q^{{\mathfrak f}rac{3}{2}}{\mathfrak f}rac{|\nuablabla\partialhi|}{\partialhi}\lambdae C_2Q^{{\mathfrak f}rac{3}{2}}\partialhi^{-{\mathfrak f}rac{1}{2}}.\nuotag\e_2and{align} Therefore, by applying the maximum principle to $\partialhi Q$ at the point $q$, we get from (\ref{ineq-phiQ}), \baregin{align} 0&\mathfrak geq\partialhi({\mathfrak f}rac{Q^2}{n}-{\mathfrak f}rac{2}{n}C_1A^2Q- 2C_2Q^{{\mathfrak f}rac{3}{2}}\partialhi^{-{\mathfrak f}rac{1}{2}})\nuotag\\ &-C_3(\Lambdaambdambda Q+A)-2C_2Q.\nuotag \e_2and{align} As a consequence, we derive $$ \partialhi Q\lambdae (\partialhi Q)(q)\lambdaeq C_4\Lambdaambdambda+C_5A^2+C_6, ~\tauext{in}~ B_p(1).$$ This proves the proposition.\e_2and{proof} As an application of Proposition \ref{gradient-esti}, we are able to construct a cut-off function with bounded gradient and $f$-Lapalace. Such a function will be used in the next section. \baregin{lem}\lambdaambdabel{cut-off} Under the condition (\ref{curvature-uasual-condition}) in Lemma \ref{Poincare1}, there exists a cut-off function $\partialhi$ supported in $B_p(2)$ such that i) $\partialhi\e_2aquiv1$, in $B_p(1)$; ii) \baregin{align} |\nuablabla \partialhi|, |{\mathfrak D}elta^f\partialhi| \lambdae C(n,\Lambdaambdambda, A).\nuotag \e_2and{align} \e_2and{lem} \baregin{proof} We will use an argument from Theorem 6.33 in [CC1]. First we consider a solution of ODE, \baregin{align} G''+G'l_{\Lambdaambdambda,A}=1, ~\tauext{on}~ [1,2], \e_2and{align} with $G(1)=a$ and $G(2)=0$. It is easy to see that there is a number $a=a(n,\Lambdaambdambda, A)$ such tha $G'<0$. Then by (\ref{lapalace-r-3}), we have $${\mathfrak D}elta^f G(d(p,\cdot))\mathfrak geq1.$$ Let $w$ be a solution of equation, \baregin{align} {\mathfrak D}elta^f w={\mathfrak f}rac{1}{a}, ~\tauext{in}~B_p(2)\sigmagmaetminus \Omegaegaegaverline{B_p(1)},\nuotag \e_2and{align} with $w=1$ on $\partialartialrtial B_p(1)$ and $ w=0$ on $\partialartialrtial B_p(2)$. Thus by the maximum principle, we get $$w\mathfrak geq {\mathfrak f}rac{G(d(.,p))}{a}.$$ Secondly, we choose another function $H$ with $H'>0$ which is a solution of ODE, \baregin{align} H''+H'l_{\Lambdaambdambda,A}=1, ~ \tauext{ on } [0,\sqrt{-1}nfty), \e_2and{align} with $H(0)=0$. Then by (\ref{lapalace-r-3}), we have $${\mathfrak D}elta^f H(d(x,\cdot))\lambdaeq 1,\tauext{ for any fixed point} ~ x. $$ Thus by the maximum principle, we get $$w(y)-{\mathfrak f}rac{H(d(x,y))}{a}\lambdaeq max\{1-{\mathfrak f}rac{H(d(x,p)-1)}{a},0\}$$ for any $y$ in the annulus $A_p(1,2)=B_p(2)\sigmagmaetminus \Omegaegaegaverline{B_p(1)}$. It follows $$w(x)\lambdaeq max\{1-{\mathfrak f}rac{H(d(x,p)-1)}{a}, 0\},~{\mathfrak f}orall~ x\sqrt{-1}n A_p(1,2).$$ Now we choose a number $\e_2ata(n,\Lambdaambdambda,A)$ such that ${\mathfrak f}rac{G(1+\e_2ata)}{a}>1-{\mathfrak f}rac{H(1-\e_2ata)}{a}$ and we define a function $\partialsi(x)$ on $[0,1]$ with bounded derivative up to second order, which satisfies \baregin{align} \partialsi(x)=1, \tauext{ if } x\mathfrak geq {\mathfrak f}rac{G(1+\e_2ata)}{a}\nuotag \e_2and{align} and \baregin{align} \partialsi(x)=0, \tauext{ if } x\lambdaeq max\{1-{\mathfrak f}rac{H(1-\e_2ata)}{a}, 0\}.\nuotag \e_2and{align} It is clear that $\partialhi=\partialsi\circ w$ is constant near the boundary of $A_p(1,2)$. So we can extend $\partialhi$ inside $B_p(1)$ by setting $\partialhi=1$. By Proposition \ref{gradient-esti}, one sees that $|\nuablabla\partialhi|$ is bounded by a constant $C(n,\Lambdaambdambda, A)$ in $B_2(p)$. Since $${\mathfrak D}elta^f \partialhi=\partialsi''|\nuablabla w|^2+\partialsi'{\mathfrak D}elta^f w, $$ we also derive that $|{\mathfrak D}elta^f\partialhi| \lambdae C(n,\Lambdaambdambda, A)$. \e_2and{proof} \vskip3mm \sigmagmaection{$L^2$-Integral estimates for Hessians of functions} In this section, we establish various integral comparisons of gradient and Hessian between appropriate $f$-harmonic functions and coordinate functions or distance functions. We start with a basic lemma about a distance function along a long approximate line in a manifold. \baregin{lem}\lambdaambdabel{coordinate-lemma1} Let $(M,g)$ be a complete Riemannian manifold which satisfies \baregin{align}\lambdaambdabel{flat-ricci-condition} {\rm Ric}_g^f\mathfrak geq-{\mathfrak f}rac{n-1}{R^2} g~\tauext{and}~|f|\lambdaeq A.\e_2and{align} Suppose that there are three points $p, q^+, q^-$ in $M$ which satisfy \baregin{align} d(p,q^+)+d(p,q^-)-d(q^+,q^-)<\e_2apsilonsilon\e_2and{align} and \baregin{align}d(p,q^+),d(p,q^-)> R. \e_2and{align} Then for any $q \sqrt{-1}n B_p(1)$, the following holds, $$E(q):=d(q,q^+)+d(q,q^-)-d(q^+,q^-) <{\mathfrak P}si(\e_2apsilonsilon,{\mathfrak f}rac{1}{R};A, n), $$ where the quantity ${\mathfrak P}si(\e_2apsilonsilon, {\mathfrak f}rac{1}{R}; A, n)$ means that it goes to zero as $\e_2apsilonsilon, {\mathfrak f}rac{1}{R}$ go to zero while $A, n$ are fixed. \e_2and{lem} \baregin{proof} Let \baregin{align}\lambdaambdabel{lap-com} \tauilde l(s)=(n-1+4A){\mathfrak f}rac{1}{R}\coth{\mathfrak f}rac{s}{R}. \e_2and{align} For given $t>0$, we construct a function $G=G_t(s)$ on $[0,t]$ which satisfies the ODE, \baregin{align}\lambdaambdabel{comparison-function} G''+\tauilde l(s)G'=1, G'(s)<0, \e_2and{align} with $G(0)=+\sqrt{-1}nfty$ and $G(t)=0$. Then $G(s)\sigmagmaim s^{2-n-4A}$ $(s\rightarrowghtarrow 0)$. Furthermore, by (\ref{lapalace-r-1}) in Lemma \ref{lapalace-esti-r}, we have \baregin{align} {\mathfrak D}elta^f G(d(x,.))=G'{\mathfrak D}elta^fd(x,.)+G''\mathfrak geq G''+G'\tauilde l(s)=1. \e_2and{align} By Lemma \ref{lapalace-esti-r}, \baregin{align} {\mathfrak D}elta^f E(q)\lambdaeq {\mathfrak f}rac{10(n-1+A)}{R}:=b. \e_2and{align} We claim: For any $0<c<1$ , $$E(q)\lambdaeq 2c+bG_1(c)+\e_2apsilonsilon,~\tauext{ if}~ bG_1(c)>\e_2apsilonsilon.$$ Suppose that the claim is not true. Then there exists point $q_0\sqrt{-1}n B_p(1)$ such that for some $c$, $$E(q_0)>2c+bG_1(c)+\e_2apsilonsilon.$$ We consider $u(x)=bG_1(d(q_0,x))-E(x)$ in the annulus $A_{q_0}(c,1)$. Clearly, $${\mathfrak D}elta^f u\mathfrak geq 0.$$ Note that we may assume that $p \sqrt{-1}n A_{q_0}(c,1)$. Otherwise $d(q_0,p)<c$ and $E(q_0)\lambdaeq E(p)+2c$, so the claim is true and the proof is complete. On the other hand, it is easy to see that on the inner boundary $\partialartialrtial B_{q_0}(c)$, \baregin{align} \nuonumber u(x)=bG_1(c)-E(x)\lambdaeq bG_1(c)-E(q_0)-2c\lambdaeq-\e_2apsilonsilon, \e_2and{align} and on the outer boundary $\partialartialrtial B_q(1)$, \baregin{align} \nuonumber u(x)=-E(x)\lambdaeq 0. \e_2and{align} Thus applying the maximum principle, it follows that $ u(p)\lambdae 0$. However, $$ u(p)=bG_1(d(p,q_0))-E(p)\mathfrak geq bG_1(c)-\e_2apsilonsilon, $$ which is impossible. Therefore, the claim is true. By choosing $c$ with the order $b^{{\mathfrak f}rac{1}{n-1+4A}}$ in the above claim, we prove Lemma \ref{coordinate-lemma1}. \e_2and{proof} Let $b^+(x)=d(q^+,x)- d(q^+,p)$ and let $h^+$ be a $f$-harmonic function which satisfies $$\tauriangle^f h^+=0,~ \tauext{in} ~B_p(1),$$ with $h^+=b^+$ on $\partialartialrtial B_p(1)$. Then \baregin{lem}\lambdaambdabel{harmonic-estimate} Under the conditions in Lemma \ref{coordinate-lemma1} with $|\nuablabla f|\lambdae A$, we have \baregin{align}\lambdaambdabel{c0-h} \|h^+-b^+\|_{C^0(B_p(1))}<{\mathfrak P}si(1/R,\e_2apsilonsilon; A),\e_2and{align} \baregin{align}\lambdaambdabel{h-gradient}{\mathfrak f}rac{1}{{\rm vol}(B_p(1))}\sqrt{-1}nt_{B_p(1)}|\nuablabla h^+-\nuablabla b^+|^2 e^{-f}\tauext{d{\rm v}} <{\mathfrak P}si(1/R,\e_2apsilonsilon; A),\e_2and{align} \baregin{align}\lambdaambdabel{hessian-integral}{\mathfrak f}rac{1}{{\rm vol}(B_p({\mathfrak f}rac{1}{2}))} \sqrt{-1}nt_{B_p({\mathfrak f}rac{1}{2})} |\tauext{hess }h^+|^2e^{-f}\tauext{d{\rm v}} <{\mathfrak P}si(1/R,\e_2apsilonsilon; A). \e_2and{align} \e_2and{lem} \baregin{proof} Choose a point $q$ in $\partialartialrtial B_p(2)$ and let $g=\varepsilonphi(d(q,\cdot))$, where $\varepsilonphi$ is a solution of (\ref{comparison-function}) restricted on the interval $[1,3]$. Then \baregin{align} {\mathfrak D}elta^fg=\varepsilonphi'{\mathfrak D}elta^fr+\varepsilonphi''\mathfrak geq\varepsilonphi' \tauilde l+\varepsilonphi'' =1,~\tauext{in}~ B_p(1). \e_2and{align} It follows that \baregin{align} \nuonumber {\mathfrak D}elta^f (h^+-b^++{\mathfrak P}si(1/R,\e_2apsilonsilon; A)g) > 0,~\tauext{in}~ B_p(1). \e_2and{align} Thus by the maximum principle, we get $$h^+-b^+ < {\mathfrak P}si(1/R,\e_2apsilonsilon; A).$$ On the other hand, we have $${\mathfrak D}elta^f ( -b^--h^++{\mathfrak P}si(1/R,\e_2apsilonsilon; A)g) > 0,~\tauext{in}~ B_p(1),$$ where $b^-=d(q^-,x)- d(p,q^{-})$. Since $b^++b^-$ is small as long as $1/R$ and $\e_2apsilonsilon$ are small by Lemma \ref{coordinate-lemma1}, by the maximum principle, we also get $$h^+-b^+ > -(b^++b^-)-{\mathfrak P}si(1/R,\e_2apsilonsilon; A) > -{\mathfrak P}si(1/R,\e_2apsilonsilon; A).$$ For the second estimate (\ref{h-gradient}), we see \baregin{align} & \sqrt{-1}nt_{B_p(1)}|\nuablabla h^+-\nuablabla b^+|^2e^{-f}d\tauext{v} \nuotag\\ &=\sqrt{-1}nt_{B_p(1)}(h^+-b^+)(\tauriangle^f b^+-\tauriangle^fh^+)e^{-f}d\tauext{v}\nuotag\\ & <{\mathfrak P}si(1/R,\e_2apsilonsilon; A)\sqrt{-1}nt_{B_p(1)}|\tauriangle^f b^+|e^{-f}d\tauext{v}.\nuotag \e_2and{align} and \baregin{align} & \sqrt{-1}nt_{B_p(1)}|\tauriangle^f b^+|e^{-f}d\tauext{v}\nuotag\\ &\lambdaeq|\sqrt{-1}nt_{B_p(1)}\tauriangle_f b^+e^{-f}d\tauext{v}|+2e^{A} \tauext{sup}_ {B_p(1)}(\tauriangle^f b^+){\rm vol}(B_p(1))\nuotag\\ &\lambdaeq e^{A}{\rm vol}({\partialartialrtial B_p(1)})+C(A){\rm vol}(B_p(1))\nuotag\\ &\lambdaeq C'(A){\rm vol}(B_p(1)).\nuotag \e_2and{align} Here we used (\ref{mono-formula}) at the last inequality. Then (\ref{h-gradient}) follows. To get (\ref{hessian-integral}), we choose a cut-off function $\varepsilonphi$ supported in $B_p(1)$ as constructed in Lemma \ref{cut-off}. Since \baregin{align} &\nuonumber {\mathfrak D}elta^f(|\nuablabla h^+|^2-|\nuablabla b^+|^2)\nuotag\\ &=|\tauext{hess }h^+|^2+{\rm Ric}_g^f(\nuablabla h^+,\nuablabla h^+), \nuotag \e_2and{align} multiplying both sides of the above by $\varepsilonphi e^{-f}d\tauext{v}$ and using integration by parts, we get \baregin{align}&\sqrt{-1}nt_{B_p(1)} \varepsilonphi|\tauext{Hess }h^+|^2 e^{-f}d\tauext{v}\nuotag\\ &\lambdae \sqrt{-1}nt_{B_p(1)} {\mathfrak D}elta^f \varepsilonphi (|\nuablabla h^+|^2-|\nuablabla b^+|^2) e^{-f}d\tauext{v}+{\mathfrak f}rac{n-1}{R^2}\sqrt{-1}nt_{B_p(1)} \varepsilonphi|\nuablabla h^+|^2 e^{-f}d\tauext{v}\nuotag. \e_2and{align} Note that $|\nuablabla h^+|$ is locally bounded by Proposition \ref{gradient-esti}, we derive (\ref{hessian-integral}) from (\ref{h-gradient}) immediately. \e_2and{proof} Next, we construct an approximate function to compare the square of a distance function with asymptotic integral gradient and Hessian estimates. Such estimates are crucial in the proof of metric-cone theorem in Section 4. Let $q\sqrt{-1}n M$ and $h$ be a solution of the following equation, \baregin{align}\lambdaambdabel{f-harmonic-radial} {\mathfrak D}elta^fh=n, ~\tauext{in}~ B_q(b)\sigmagmaetminus \Omegaegaegaverline{B_q(a)}, ~h|\partialartialrtial B_q(b)={\mathfrak f}rac{b^2}{2}~\tauext{ and}~ h|\partialartialrtial B_q(a)={\mathfrak f}rac{a^2}{2}. \e_2and{align} Let $p={\mathfrak f}rac{r(q,\cdot)^2}{2}$. Then \baregin{lem}\lambdaambdabel{harmonic-estimate-annual-1} Let $(M,g)$ be a complete Riemannian manifold which satisfies $${\rm Ric}^f_g\mathfrak geq-(n-1)\e_2apsilonsilon^2\Lambdaambdambda^2g ~\tauext{and}~|\nuablabla f|\lambdaeq\e_2apsilonsilon A.$$ Let $a<b$. Suppose that \baregin{align}\lambdaambdabel{cone-volume-condition-2} {\mathfrak f}rac{{\rm vol}^f(\partialartialrtial B_q(b))}{{\rm vol}^f(\partialartialrtial B_q(a))}\mathfrak geq(1-\Omegaegaegamega){\mathfrak f}rac{L_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(b)}{L_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(a)} \e_2and{align} for some $\Omegaegaegamega>0$, where $L_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(r)$ is the function defined by (\ref{L-function}) with respect to constants $\e_2apsilonsilon\Lambdaambdambda$ and $\e_2apsilonsilon A.$ Then \baregin{align}\lambdaambdabel{gradient-annual}{\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))} \sqrt{-1}nt_{A_q(a,b)}|\nuablabla p-\nuablabla h|^2 e^{-f}d\tauext{v} < {\mathfrak P}si(\Omegaegaegamega,\e_2apsilonsilon;\Lambdaambdambda, A,a,b). \e_2and{align} Moreover, \baregin{align}\lambdaambdabel{h-p-estimate} \|h-p\|_{C^0(A_q(a',b'))}< {\mathfrak P}si(\Omegaegaegamega,\e_2apsilonsilon;\Lambdaambdambda, A,a,b,a',b'), \e_2and{align} where $a<a'<b'<b$. \e_2and{lem} \baregin{proof} Since $${\mathfrak D}elta^f r\lambdaeq(n-1)\e_2apsilonsilon \Lambdaambdambda\coth(\e_2apsilonsilon \Lambdaambdambda r)+\e_2apsilonsilon A=l_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A},$$ we have \baregin{align}\lambdaambdabel{lapalace-p} {\mathfrak D}elta^fp=p''+p' {\mathfrak D}elta^f r < n+{\mathfrak P}si(\e_2apsilonsilon;\Lambdaambdambda, A, a, b), ~\tauext{ in }A(a,b). \e_2and{align} Thus we get \baregin{align}\lambdaambdabel{lemma-2-5-1} {\mathfrak f}rac{1}{{\rm vol}(A_q(a, b))}\sqrt{-1}nt_{A_q(a,b)}{\mathfrak D}elta^fpe^{-f} d\tauext{v}< e^{-f(0)}( n+{\mathfrak P}si(\e_2apsilonsilon;\Lambdaambdambda, A, a, b)). \e_2and{align} On the other hand, by the monotonicity formula (\ref{mono-formula}), we have \baregin{align} \nuonumber {\mathfrak f}rac{\sqrt{-1}nt_a^bL_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(s)ds}{L_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(b)}{\rm vol}^f(\partialartialrtial B_q(b))\lambdaeq{\rm vol}^f(A_q(a, b))\lambdaeq {\mathfrak f}rac{\sqrt{-1}nt_a^bL_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(s)ds}{L_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(a)}{\rm vol}^f(\partialartialrtial B_q(a)). \e_2and{align} It follows by (\ref{cone-volume-condition-2}), $${\rm vol}^f(A_q(a, b))\lambdaeq (1-\Omegaegaegamega)^{-1}{\mathfrak f}rac{\sqrt{-1}nt_a^bL_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(s)ds}{L_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(b)}{\rm vol}^f(\partialartialrtial B_q(b)).$$ Since \baregin{align} \nuonumber \sqrt{-1}nt_{A_q(a,b)}{\mathfrak D}elta^fpe^{-f} d\tauext{v}=b{\rm vol}^f(\partialartialrtial B_q(b)) -a{\rm vol}^f(\partialartialrtial B_q(a)), \e_2and{align} we get \baregin{align} \nuonumber &{\mathfrak f}rac{1}{{\rm vol}^f(A_q(a, b))} \sqrt{-1}nt_{A(a,b)}{\mathfrak D}elta^fpe^{-f} d\tauext{v}\\ &\mathfrak ge (1-\Omegaegaegamega){\mathfrak f}rac{L_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(b)} {\sqrt{-1}nt_a^bL_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(s)ds} ( b- a{\mathfrak f}rac{{\rm vol}^f(\partialartialrtial B_q(a))}{ {\rm vol}^f(\partialartialrtial B_q(b))}). \nuotag \e_2and{align} Observe that ${\rm vol}^f$ is close to $e^{-f(0)}{\rm vol}$ and ${\mathfrak f}rac{L_{\e_2apsilonsilon\Lambdaambdambda,\e_2apsilonsilon A}(s)}{s^{n-1}}$ is close to a constant as $\e_2apsilonsilon$ is small. Hence we derive immediately, \baregin{align}\lambdaambdabel{lemma-2-5-2} {\mathfrak f}rac{1}{{\rm vol}(A_q(a, b))}\sqrt{-1}nt_{A_q(a,b)}{\mathfrak D}elta^fpe^{-f} d\tauext{v}> e^{-f(0)} (n+{\mathfrak P}si(\Omegaegaegamega,\e_2apsilonsilon;\Lambdaambdambda, A, a, b)). \e_2and{align} By (\ref{lemma-2-5-1}) and (\ref{lemma-2-5-2}), we have $$|\sqrt{-1}nt_{A_q(a,b)}({\mathfrak D}elta^fp-n) e^{-f} d\tauext{v}|< {\rm vol}(A_q(a, b)) {\mathfrak P}si(\Omegaegaegamega,\e_2apsilonsilon;\Lambdaambdambda, A, a, b).$$ Then one can follow the argument for the estimate (\ref{h-gradient}) in Lemma \ref{harmonic-estimate} to obtain (\ref{gradient-annual}). Applying Lemma \ref{Poincare1} to the function $p-h$ together with the estimate (\ref{gradient-annual}), we see that \baregin{align} \nuonumber {\mathfrak f}rac{1}{{\rm vol}^f(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}|p-h|^2 e^{-f}d\tauext{v} < {\mathfrak P}si(\Omegaegaegamega,\e_2apsilonsilon;\Lambdaambdambda, A,a,b). \e_2and{align} Then for any point $x\sqrt{-1}n A_q(a',b')$, by (\ref{volume-estimate-2}), there is a point $y\sqrt{-1}n B_x(\e_2ata)$ such that \baregin{align} |p(y)-h(y)|^2&\lambdaeq {\mathfrak f}rac{{\rm vol}^f(A_q(a,b))}{{\rm vol}^f(B_x(\e_2ata))} {\mathfrak f}rac{1}{{\rm vol}^f(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}|p-h|^2 e^{-f}d\tauext{v}\nuotag\\ &< {\mathfrak f}rac{C(\Lambdaambdambda, A, b)}{\e_2ata^n}{\mathfrak P}si(\Omegaegaegamega,\e_2apsilonsilon;\Lambdaambdambda, A,a,b).\nuotag\e_2and{align} On the other hand, by Proposition \ref{gradient-esti}, we have \baregin{align} |(p(x)-h(x))-(p(y)-h(y))| &\lambdaeq (\|\nuablabla h\|_{C^0(A_q(a'-\e_2ata,b'+\e_2ata))}+1)\tauext{dist}(x,y)\nuotag\\ &\lambdaeq C(\Lambdaambdambda,A,a,b,a'-\e_2ata,b'+\e_2ata)\e_2ata.\nuotag\e_2and{align} Thus we derive \baregin{align} &|p(x)-h(x)|\nuotag\\ &< {\mathfrak f}rac{C(\Lambdaambdambda, A,b)}{\e_2ata^n}{\mathfrak P}si(\Omegaegaegamega,\e_2apsilonsilon;\Lambdaambdambda, A,a,b)+C(\Lambdaambdambda,A,a,b,a'-\e_2ata,b'+\e_2ata)\e_2ata.\nuotag \e_2and{align} Choosing $\e_2ata={\mathfrak P}si^{{\mathfrak f}rac{1}{n+1}}$, we prove (\ref{h-p-estimate}). \e_2and{proof} Furthermore, we have \baregin{lem}\lambdaambdabel{harmonic-estimate-annual-2} Under the condition in Lemma \ref{harmonic-estimate-annual-1}, it holds \baregin{align}&{\mathfrak f}rac{1}{{\rm vol}(A_q(a_2,b_2))}\sqrt{-1}nt_{A_q(a_2,b_2)}|{\rm hess }h-g|^2e^{-f} d\tauext{v}\nuotag\\ &< {\mathfrak P}si(\Omegaegaegamega,\e_2apsilonsilon;\Lambdaambdambda, A,a_1,b_1,a_2,b_2,a,b), \e_2and{align} where $a<a_1<a_2<b_2<b_1<b$. \e_2and{lem} \baregin{proof} First observe that \baregin{align} \nuonumber &{\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}|\tauext{hess }h-g|^2e^{-f} d\tauext{v}\\ \nuonumber &={\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}|{\rm hess}h|^2e^{-f} d\tauext{v} +{\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}(n-2{\mathfrak D}elta h)e^{-f}\tauext{v}. \e_2and{align} Let $\varepsilonphi$ be a cut-off function of $A_q(a,b)$ with support in $A_q(a_1,b_1)$ as constructed in Lemma \ref{cut-off} which satisfies, \baregin{align} & 1) ~\varepsilonphi\e_2aquiv 1,~\tauext{ in}~ A_q(a_2,b_2);\nuotag\\ & 2) ~|\nuablabla\varepsilonphi|, |\tauriangle^f\varepsilonphi|~\tauext{ is bounded in}~ A_q(a,b).\nuotag \e_2and{align} Then \baregin{align}\lambdaambdabel{two-parts} \nuonumber &{\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi|{\rm hess}h-g|^2e^{-f} d\tauext{v}\\ \nuonumber &={\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi|\tauext{hess }h|^2e^{-f} d\tauext{v}\\ &+{\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi(n-2{\mathfrak D}elta h)e^{-f} d\tauext{v}. \e_2and{align} By the Bochner formula (\ref{bochner-inequ}), we have \baregin{align} \nuonumber &{\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi|\tauext{hess }h|^2e^{-f} d\tauext{v}\\ \nuonumber &<{\mathfrak f}rac{1}{2{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi{\mathfrak D}elta^f|\nuablabla h|^2e^{-f} d\tauext{v}+ {\mathfrak P}si(\e_2apsilonsilon;\Lambdaambdambda,A,a_1,b_1,a_2,b_2,a,b). \e_2and{align} It follows by Lemma \ref{harmonic-estimate-annual-1}, \baregin{align} \nuonumber &{\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi|\tauext{hess }h|^2e^{-f} d\tauext{v}\\ \nuonumber &<{\mathfrak f}rac{1}{2{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi{\mathfrak D}elta^f|\nuablabla p|^2e^{-f} d\tauext{v}+ {\mathfrak P}si(\e_2apsilonsilon,\Omegaegaegamega;\Lambdaambdambda,A,a_1,b_1,a_2,b_2,a,b) \\ \nuonumber &={\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi{\mathfrak D}elta^fp e^{-f} d\tauext{v}+ {\mathfrak P}si(\e_2apsilonsilon,\Omegaegaegamega;\Lambdaambdambda,A,a_1,b_1,a_2,b_2,a,b). \e_2and{align} On the other hand, \baregin{align} \nuonumber &{\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi(n-2{\mathfrak D}elta h)e^{-f} d\tauext{v}&\\ \nuonumber & ={\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi(-n-2\lambdaambdangle\nuablabla f,\nuablabla h\ranglegle)e^{-f} d\tauext{v}&\\ \nuonumber &={\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}-n\varepsilonphi e^{-f} d\tauext{v}+{\mathfrak P}si(\e_2apsilonsilon,\Omegaegaegamega;\Lambdaambdambda,A,a_1,b_1,a,b).& \e_2and{align} Hence we derive from (\ref{two-parts}), \baregin{align} \nuonumber &{\mathfrak f}rac{1}{{\rm vol}(A_q(a_2,b_2))}\sqrt{-1}nt_{A_q(a_2,b_2)}|\tauext{hess }h-g|^2e^{-f} d\tauext{v}\\ \nuonumber & \lambdaeq{\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi|\tauext{hess }h-g|^2e^{-f} d\tauext{v}\\ \nuonumber &<{\mathfrak f}rac{1}{{\rm vol}(A_q(a,b))}\sqrt{-1}nt_{A_q(a,b)}\varepsilonphi({\mathfrak D}elta^fp-n)e^{-f} d\tauext{v}+ {\mathfrak P}si(\e_2apsilonsilon,\Omegaegaegamega;\Lambdaambdambda,A,a_1,b_1,a_2,b_2,a,b) \\ \nuonumber &< {\mathfrak P}si(\e_2apsilonsilon,\Omegaegaegamega;\Lambdaambdambda,A,a_1,b_1,a_2,b_2,a,b). \e_2and{align} Here we used (\ref{lapalace-p}) at last inequality. \e_2and{proof} \sigmagmaection{A splitting theorem} In this section, we prove the splitting theorem of Cheeger-Colding in the Bakry-\'{E}mery geometry [CC1]. Recall that $\mathfrak gammamma(t)$ $(t\sqrt{-1}n (-\sqrt{-1}nfty, \sqrt{-1}nfty))$ is a line in a metric space $Y$ if $$\tauext{dist}(\mathfrak gammamma(t_1),\mathfrak gammamma(t_2))=|t_1-t_2|, ~{\mathfrak f}orall~t_1,t_2\sqrt{-1}n (-\sqrt{-1}nfty,\sqrt{-1}nfty).$$ \baregin{theo}\lambdaambdabel{splitting-theorem} Let $(M_i,g_i; p_i)$ be a sequence of Riemannian manifolds which satisfy $${\rm Ric}_{M_i,g_i}^{f_i}\mathfrak geq{-\e_2apsilonsilon_i^2} g_i,~|f_i|,~|\nuablabla f_i|\lambdaeq A.$$ Let $(Y;y)$ be a limit metric space of $(M_i,g_i; p_i)$ in the pointed Gromov-Hausdorff topology as $\e_2apsilonsilon_i\rightarrowghtarrow 0$. Suppose that $Y$ contains a line passing $y$. Then $Y=\mathbb{R}\tauimes X$ for some metric space $X$. \e_2and{theo} We will follow the argument in [CC1] to prove Theorem \ref{splitting-theorem}. The proof depends on the following triangle lemma in terms of small integral Hessian of appropriate function. \baregin{lem}\lambdaambdabel{cheeger-lemma} Let $x,y,z$ be three points in a complete Riemannian manifold $M$. Let $\mathfrak gammamma(s)$ $(s\sqrt{-1}n [0,a], ~a=d(x,y))$ be a geodesic curve connecting $x,y$ and $\mathfrak gammamma_s(t)$ $(s\sqrt{-1}n [0,l(s)], ~l(s)=d(z,\mathfrak gammamma (s)))$ a family of geodesic curves connecting $z$ and $\mathfrak gammamma(s)$. Suppose that $h$ is a smooth function on $M$ which satisfies \baregin{align} &i) ~ |h(z)-h(x)|<\deltaelta<<1;\nuotag\\ & ii)~ \sqrt{-1}nt_{[0,a]}|\nuablabla h(\mathfrak gammamma(s))-\mathfrak gammamma'(s)|<\deltaelta<<1; \nuotag\\ &iii)~\sqrt{-1}nt_{[0,a]}\sqrt{-1}nt_{[0,l(s)]}|{\rm hess} ~h(\mathfrak gammamma_s(t))|dtds<\deltaelta<<1.\nuotag \e_2and{align} Then \baregin{align}\lambdaambdabel{triangular-equ} |d(z,x)^2+d(x,y)^2-d(y,z)^2|<\e_2apsilonsilon(\deltaelta)<<1.\e_2and{align} \e_2and{lem} \baregin{proof} The proof below comes essentially from Lemma 9.16 in [Ch2]. First by the condition ii), we have \baregin{align} |h(\mathfrak gammamma(s))-h(\mathfrak gammamma(0))-s|=|\sqrt{-1}nt_0^s(\lambdaambdangle \nuablabla h(\mathfrak gammamma(s))-\mathfrak gammamma'(s),\mathfrak gammamma'(s)\ranglegle|\lambdaeq \deltaelta.\nuotag \e_2and{align} Then $$s= h(\mathfrak gammamma(s))-h(x)+o(1).$$ By the condition i), it follows \baregin{align}{\mathfrak f}rac{1}{2} d(x,y)^2&=\sqrt{-1}nt_0^a sds\nuotag\\ &=\sqrt{-1}nt_0^a (h(\mathfrak gammamma(s))-h(x))ds+o(1)\nuotag\\ &=\sqrt{-1}nt_0^a (h(\mathfrak gammamma_s(l(s)))-h(\mathfrak gammamma_s(0)))ds+o(1)\nuotag\\ &=\sqrt{-1}nt_0^{l(s)}\sqrt{-1}nt_0^a \lambdaambdangle\nuablabla h(\mathfrak gammamma_s(t)),\mathfrak gammamma_s'(t)\ranglegle dtds+o(1).\nuotag \e_2and{align} On the other hand, \baregin{align} &| \lambdaambdangle\nuablabla h(\mathfrak gammamma_s(t)),\mathfrak gammamma_s'(t)\ranglegle- \lambdaambdangle\nuablabla h(\mathfrak gammamma_s(l(s))),\mathfrak gammamma_s'(l(s))\ranglegle|\nuotag\\ &=|\sqrt{-1}nt_t^{l(s)} {\rm hess}h(\mathfrak gammamma_s'(\tauau), \mathfrak gammamma_s'(\tauau)) d\tauau|\nuotag\\ &\lambdae \sqrt{-1}nt_0^{l(s)} |{\rm hess}h(\mathfrak gammamma_s'(t), \mathfrak gammamma_s'(t))| dt.\nuotag \e_2and{align} Hence from the condition iii), we get \baregin{align}\lambdaambdabel{short-distance}{\mathfrak f}rac{1}{2} d(x,y)^2&=\sqrt{-1}nt_0^{l(s)}\sqrt{-1}nt_0^a \lambdaambdangle\nuablabla h(\mathfrak gammamma_s(l(s))),\mathfrak gammamma_s'(l(s))\ranglegle dtds+o(1)\nuotag\\ &=\sqrt{-1}nt_0^a \lambdaambdangle\nuablabla h(\mathfrak gammamma_s(l(s))),\mathfrak gammamma_s'(l(s))\ranglegle l(s)ds+o(1)\nuotag\\ &=\sqrt{-1}nt_0^a \lambdaambdangle\nuablabla h(\mathfrak gammamma(s)),\mathfrak gammamma_s'(l(s))\ranglegle l(s)ds+o(1). \e_2and{align} Secondly, by the first variation formula of geodesic curve, we see that $$ l'(s)=\lambdaambdangle\mathfrak gammamma_s'(l(s)),\mathfrak gammamma'(s)\ranglegle.$$ Then by the condition ii), we obtain \baregin{align} &\sqrt{-1}nt_0^a \lambdaambdangle\nuablabla h(\mathfrak gammamma(s)),\mathfrak gammamma_s'(l(s))\ranglegle l(s)ds\nuotag\\ &=\sqrt{-1}nt_0^a l'(s) l(s)ds +o(1)\nuotag\\ &={\mathfrak f}rac{1}{2} (d(y,z)^2-d(z,x)^2).\nuotag \e_2and{align} Therefore, combining (\ref{short-distance}), we derive (\ref{triangular-equ}). \e_2and{proof} In order to get the above configuration in Lemma \ref{cheeger-lemma}, we need a segment inequality lemma in terms of the Bakry-\'{E}mery Ricci curvature. In the following, we will always assume that the manifold $(M,g)$ satisfies \baregin{align}\lambdaambdabel{be-curvature-condition} {\rm Ric}^f_g\mathfrak geq-(n-1)\Lambdaambdambda^2 g, ~|f|, |\nuablabla f|\lambdaeq A, \e_2and{align} and the volume form $d\tauext{v} $ is replaced by $d\tauext{v}^f=e^{-f}d\tauext{v}$. \baregin{lem}\lambdaambdabel{equ-seg} Let $A_1, A_2$ be two subsets of $M$ and $W$ another subset of $M$ such that $\barigcup_{y_1\sqrt{-1}n A_1,y_2\sqrt{-1}n A_2}\mathfrak gammamma_{y_1y_2}\sigmagmaubseteq W$, where $\mathfrak gammamma_{y_1y_2}$ is a minimal geodesic curve connecting $y_1$ and $y_2$ in $M$. Let $$D=sup\{d(y_1, y_2)|~y_1\sqrt{-1}n A_1,y_2\sqrt{-1}n A_2\}.$$ Then for any smooth function $e$ on $W$, it holds \baregin{align}\lambdaambdabel{segment-inequ} &\sqrt{-1}nt_{A_1\tauimes A_2}\sqrt{-1}nt_0^{d(y_1,y_2)}e(\mathfrak gammamma_{y_1,y_2}(s))ds\nuotag\\ &\lambdaeq c(n,\Lambdaambdambda,A)D[{\rm vol}^f(A_1)+{\rm vol}^f(A_2)]\sqrt{-1}nt_We, \e_2and{align} where $c(n,\Lambdaambdambda,A)=sup_{s,u}\{L_{\Lambdaambdambda, A}(s)/L_{\Lambdaambdambda, A}(u)|~0<{\mathfrak f}rac{s}{2}\lambdaeq u\lambdaeq s\}$. \e_2and{lem} \baregin{proof} Note that \baregin{align} \nuonumber &\sqrt{-1}nt_{A_1\tauimes A_2}\sqrt{-1}nt_0^{d(y_1,y_2)}e(\mathfrak gammamma_{y_1,y_2}(s))ds&\\ \nuonumber &=\sqrt{-1}nt_{A_1}dy_1\sqrt{-1}nt_{A_2}\sqrt{-1}nt_{{\mathfrak f}rac{d(y_1,y_2)}{2}}^{d(y_1,y_2)}e(\mathfrak gammamma_{y_1y_2}(s))dsdy_2\\ &\nuonumber+\sqrt{-1}nt_{A_2}dy_2\sqrt{-1}nt_{A_1}\sqrt{-1}nt_{{\mathfrak f}rac{d(y_1,y_2)}{2}}^{d(y_1,y_2)}e(\mathfrak gammamma_{y_1y_2}(s))dsdy_1.& \e_2and{align} On the other hand, for a fixed $y_1\sqrt{-1}n A_1$, by using the monotonicity formula (\ref{mono-formula}), we have \baregin{align} \nuonumber &\sqrt{-1}nt_{A_2}\sqrt{-1}nt_{{\mathfrak f}rac{d(y_1,y_2)}{2}}^{d(y_1,y_2)}e(\mathfrak gammamma_{y_1y_2}(s))dsdy_2\\ \nuonumber&=\sqrt{-1}nt_{A_2}\sqrt{-1}nt_{{\mathfrak f}rac{r}{2}}^{r}e(\mathfrak gammamma_{y_1y_2}(s))A^f(r,\tauhetaeta)drd\tauhetaeta ds&\\ \nuonumber &\lambdaeq c(n,\Lambdaambdambda,A)\sqrt{-1}nt_{A_2}\sqrt{-1}nt_{{\mathfrak f}rac{r}{2}}^{r}e(\mathfrak gammamma_{y_1y_2}(s))A^f(s,\tauhetaeta)drd\tauhetaeta ds\\ \nuonumber &\lambdaeq c(n,\Lambdaambdambda,A)D\sqrt{-1}nt_We.& \e_2and{align} Similarly, \baregin{align} \nuonumber &\sqrt{-1}nt_{A_1}\sqrt{-1}nt_{{\mathfrak f}rac{d(y_1,y_2)}{2}}^{d(y_1,y_2)}e(\mathfrak gammamma_{y_1y_2}(s))dsdy_1\\ \nuonumber &\lambdaeq c(n,\Lambdaambdambda,A)D\sqrt{-1}nt_We.& \e_2and{align} Then (\ref{segment-inequ}) follows from the above two inequalities. \e_2and{proof} Using the same argument above, we can prove \baregin{lem}\lambdaambdabel{equ-rad} Given two points $q^-,q$ with $d(q,q^-)\mathfrak ge 10$ and a smooth function $e$ with support in $B_p(1)$, then for any $B_q(r)\sigmagmaubset B_p(1)$ the following inequality holds, \baregin{align} \sqrt{-1}nt_{B_q(r)}dy\sqrt{-1}nt_0^{d(q^-,y)}e(\mathfrak gammamma_{q^-y}(s))ds\lambdaeq c(\Lambdaambdambda, A)\sqrt{-1}nt_{B_p(1)}e(y)dy. \e_2and{align} \e_2and{lem} Combining Lemma \ref{triangular-equ} and Lemma \ref{equ-rad}, we get another segment inequality lemma as follows. \baregin{lem}\lambdaambdabel{approxi-1} Let $ b^+(q)=d(q,q^+)-d(p,q^+)$ for any $q$ with $d(q,q^+)\mathfrak geq 10$. Let $h^+$ be a smooth function which satisfies \baregin{align} \nuonumber \sqrt{-1}nt_{B_p(1)}|\nuablabla h^+-\nuablabla b^+|\lambdaeq \e_2apsilonsilon {\rm vol}^f(B_p(1))\e_2and{align} and \baregin{align}\nuonumber \sqrt{-1}nt_{B_p(1)}|{\rm hess }~h^+|\lambdaeq \e_2apsilonsilon {\rm vol}^f(B_p(1)). \e_2and{align} We assume that Lemma \ref{equ-seg} and Lemma \ref{equ-rad} are true. Then for any two points $q,q' \sqrt{-1}n B_p({\mathfrak f}rac{1}{8})$ and any small number $\e_2ata>0$, there exist $y^*,z^*$ with $d(y^*,q)<\e_2ata, d(z^*,q')<\e_2ata$, and a minimal geodesic line $\mathfrak gammamma(t)$ $(0\lambdaeq t\lambdaeq l(y^*))$ from $y^*$ to $q^{-}$ with $\mathfrak gammamma(0)=y^*,\mathfrak gammamma(l(y^*))\sqrt{-1}n \partialartialrtial B_p({\mathfrak f}rac{1}{8})$ such that the following is true: \baregin{align}\lambdaambdabel{lemma-3.5-1} \sqrt{-1}nt_0^{l(y^*)}|\nuablabla h^+(s)-\mathfrak gammamma'(s)|ds\lambdaeq\e_2apsilonsilon{\mathfrak f}rac{{\rm vol}^f(B_q(2))}{{\rm vol}^f(B_q(\e_2ata))},\e_2and{align} \baregin{align}\lambdaambdabel{lemma-3.5-2} \sqrt{-1}nt_0^{l(y^*)} ds\sqrt{-1}nt_0^{d(z^*,\mathfrak gammamma(s))}|{\rm hess }~h^+(\mathfrak gammamma_s(t))|dt \lambdaeq\e_2apsilonsilon({\mathfrak f}rac{{\rm vol}^f(B_q(2))}{{\rm vol}^f(B_q(\e_2ata))})^2, \e_2and{align} where $\mathfrak gammamma_s(t)$ is the minimal geodesic curvse connecting $ \mathfrak gammamma(s)$ and $z^*$. \e_2and{lem} \baregin{proof} Choose a cut-off function $\partialhi=\partialhi(\tauext{dist}(p,\cdot))$ with support in $B_p(1)$. Let \baregin{align} \nuonumber e=\partialhi|\nuablabla h^+-\nuablabla b^+|,e_1=\partialhi|\tauext{hess }h^+|, \\ \nuonumber e_2(y)=\sqrt{-1}nt_{B_{q'}(\e_2ata)}dz\sqrt{-1}nt_0^{d(y,z)}e_1(\mathfrak gammamma_{yz})(s)ds. \e_2and{align} Then by Lemma \ref{equ-rad}, we have \baregin{align}\lambdaambdabel{estimate-e} \sqrt{-1}nt_{B_q(\e_2ata)}\sqrt{-1}nt_0^{d(q^-,y)}e(\mathfrak gammamma_{q^-y}(s))dsdy\lambdaeq c(A,\Lambdaambdambda)\sqrt{-1}nt_{B_p(1)}e(y)dy. \e_2and{align} On the other hand, by Lemma \ref{equ-seg}, one sees \baregin{align} \nuonumber \sqrt{-1}nt_{B_p(1)}e_2(y)dy&=\sqrt{-1}nt_{B_p(1)} dy\sqrt{-1}nt_{B_{q'}(\e_2ata)}dz\sqrt{-1}nt_0^{d(y,z)}e_1(\mathfrak gammamma_{yz})(s)ds\\ &\lambdaeq c_1(\Lambdaambdambda, A)\tauext{vol }^f (B_p(1))\sqrt{-1}nt_{B_p(1)}e_1(y)dy.\nuotag \e_2and{align} Thus by Lemma \ref{equ-rad}, we get \baregin{align}\lambdaambdabel{estimate-e2} &\sqrt{-1}nt_{B_q(\e_2ata)}\sqrt{-1}nt_0^{d(q^-,y)}e_2(\mathfrak gammamma_{q^-y}(s))dsdy\nuotag\\ &\lambdaeq c_2(\Lambdaambdambda,A)\sqrt{-1}nt_{B_p(1)}e_2(y)dy\\ \nuonumber &\lambdaeq \tauext{vol}^f (B_p(1))c_3(\Lambdaambdambda, A)\sqrt{-1}nt_{B_p(1)}e_1(y)dy. \e_2and{align} Observe that the left hand side of (\ref{estimate-e2}) is equal to \baregin{align} \nuonumber \sqrt{-1}nt_{B_{q}(\e_2ata)}dy\sqrt{-1}nt_{B_{q'}(\e_2ata)}dz\sqrt{-1}nt_0^{d(q^{-},y)}\sqrt{-1}nt_0^{d(\mathfrak gammamma_{q^{-}y}(s),z)}e_1({\mathfrak h}at \mathfrak gammamma_s(t))dtds, \e_2and{align} where ${\mathfrak h}at\mathfrak gammamma_s(t)$ is the minimal geodesic from $z$ to $\mathfrak gammamma_{q^{-}y}(s)$ with arc-length parameter $t$. Combining (\ref{estimate-e}) and (\ref{estimate-e2}), we find two points $y^*,z^*$ such that both (\ref{lemma-3.5-1}) and (\ref{lemma-3.5-2}) are satisfied. \e_2and{proof} Now we apply Lemma \ref{approxi-1} to prove a local version of Theorem \ref{splitting-theorem}. \baregin{prop}\lambdaambdabel{proof-splitting} Let $(M,g)$ be an $n$-dimensional complete Riemannian manifold which satisfies $${\rm Ric}_g^f\mathfrak geq-{\mathfrak f}rac{n-1}{R^2}, ~|f|,~|\nuablabla f|\lambdaeq A.$$ Suppose that there exist three points $p,q^+,q^-$ such that \baregin{align}\lambdaambdabel{splitting-condition-1} d(p,q^{+})+d(p,q^{-})-d(q^{+},q^{-})<\e_2apsilonsilon\e_2and{align} and \baregin{align}\lambdaambdabel{splitting-condition-2} d(p,q^{+})\mathfrak geq R, d(p,q^{-}) > R.\e_2and{align} Then there exists a map \baregin{align} u:B_p(1/8)\vskip .1cmngrightarrow B_{(0, x)}(1/8) \e_2and{align} as a ${\mathfrak P}si(1/R,\e_2apsilonsilon;A,n)$ Gromov-Hausdorff approximation, where $B_{(0, x)}(1/8) \sigmagmaubset\mathbb{R}\tauimes X$ is a ${\mathfrak f}rac{1}{8}$-radius ball centered at $(0,x) \sqrt{-1}n \mathbb{R}\tauimes X$ and $X$ is given by the level set $(h^+)^{-1}(0)$ as a metric space measured in the $B_p(1)$. \e_2and{prop} \baregin{proof} For simplicity, we denote the terms on the right-hand side of (\ref{c0-h}), (\ref{h-gradient}) and (\ref{hessian-integral}) in Lemma \ref{harmonic-estimate} by $\deltaelta=\deltaelta(\e_2apsilonsilon,{\mathfrak f}rac{1}{R})$. Define a map $u$ on $B_p(1)$ by $u(q)=(x_q,h^+(q))$, where $x_q$ is the nearest point to $q$ in $X$. We are going to prove that $u$ is a ${\mathfrak P}si(1/R,\e_2apsilonsilon; A)$ Gormov-Hausdorff approximation. Since $|\nuablabla h^{+}|\lambdaeq c=c(A)$ in $B_p({\mathfrak f}rac{1}{2})$, $$h^+(y)\lambdaeq 0,~ {\mathfrak f}orall~ y \sqrt{-1}n B_q(\e_2ata), ~ \tauext{if}~ h^{+}(q)<-c\e_2ata,$$ where $\e_2ata$ is an appropriate small number and it will be determined late. We call the area of $h^{+}(q)<-c\e_2ata$ the upper region, the area of $h^{+}(q)>c\e_2ata$ the lower region and the rest the middle region, respectively. Case 1. Both points $q_1$ and $q_2$ in the upper region ( we may assume that $h^+(q_1)>h^+(q_2)$). Let $q$ be a point in the upper region. Then by applying Lemma \ref{approxi-1} to $q,x_q$, we get a geodesic from a point $y$ near $q$ to $q^-$ whose direction is almost the same as $\nuablabla h^+$. Thus this geodesic must intersect $h^+=0.$ Applying Triangle Lemma \ref{cheeger-lemma}, we see that the intersection is near $x_q$. Hence for $q_1$ and $q_2$, we can find $y_1$ and $y_2$ nearby $q_1$ and $q_2$ respectively, such that two geodesics from $y_1$ and $y_2$ to $q^-$ intersect $X$ with points $x_1$ and $x_2$, respectively. Denote the geodesic from $x_2$ to $y_2$ by $\mathfrak gammamma(s):\mathfrak gammamma(0)=x_2, \mathfrak gammamma(h^+(y_2))=y_2$. Applying Triangle Lemma \ref{cheeger-lemma} to triples $\{y_1,y_2,\mathfrak gammamma(h^+(y_1))\}, \{x_2,y_1,\mathfrak gammamma(h^+(y_1))\}$ and $\{x_1,x_2,y_1\}$, respectively, we get $$ |d(y_1,y_2)^2-|h^+(y_2)-h^+(y_1)|^2-d(y_1,\mathfrak gammamma(h^+(y_1)))^2|\lambdaeq c(n,A){\mathfrak f}rac{\deltaelta}{\e_2ata^n},$$ $$|d(y_1,x_2)^2-d(y_1,\mathfrak gammamma(h^+(y_1)))^2-h^+(y_1)^2|\lambdaeq c(n,A){\mathfrak f}rac{\deltaelta}{\e_2ata^n},$$ and $$|d(y_1,x_2)^2-d(x_1,x_2)^2-h^+(y_1)^2| \lambdaeq c(n,A){\mathfrak f}rac{\deltaelta}{\e_2ata^n}.$$ Combining the above three relations, we derive \baregin{align}\lambdaambdabel{dist-appr} |d(q_1,q_2)-d(u(q_1),u(q_2))|\lambdaeq c(n,A){\mathfrak f}rac{\deltaelta}{\e_2ata^n}<<1 \e_2and{align} as $\deltaelta=o(\e_2ata^n)$. Case 2. $q_1$ is in the middle region and $q_2$ is in the upper region. Note that $x_q$ is near $q$ if $q$ is in the middle region. Then we can find two points $y_1$ and $y_2$ near $q_1$ and $q_2$ respectively, such that Triangle Lemma \ref{cheeger-lemma} holds for the triple $\{y_1,y_2,x_2\}$. Hence for such two points $q_1$ and $q_2$, we get (\ref{dist-appr}) immediately. Case 3. $q_1$ is in the lower region and $q_2$ is in the upper region. As in Case 1. we can get one geodesic from $q^+$ to a point near $q_1$ and another geodesic from $q^-$ to a point near $q_2$, respectively. Thus we can use same argument in Case 1 to obtain (\ref{dist-appr}). Similarly, we can settle down another two cases, both $q_1$ and $q_2$ in the lower region and both $q_1$ and $q_2$ in the middle region. \e_2and{proof} \baregin{proof}[Proof of Theorem \ref{splitting-theorem}] Suppose that the line in $Y$ is $\mathfrak gammamma(t)$ and $\mathfrak gammamma(0)=y$. Define a Busemann function $b$ along $\mathfrak gammamma$ by \baregin{align} \nuonumber b(y)=\lambdaim_{t\rightarrowghtarrow +\sqrt{-1}nfty}(d(y,\mathfrak gammamma(t))-t). \e_2and{align} Since \baregin{align} \nuonumber d_{GH}(B_{p_i}(j),B_y(j))\rightarrowghtarrow 0, ~\tauext{as}~i\rightarrowghtarrow \sqrt{-1}nfty, \e_2and{align} for any given integer number $j>0$, we may assume that \baregin{align} \nuonumber d_{GH}(B_{p_i}(j),B_y(j))<{\mathfrak f}rac{1}{j}, \e_2apsilonsilon_i<{\mathfrak f}rac{n-1}{j^2}\tauext{ for }i=i(j) \tauext{ large enough}. \e_2and{align} Choose a Gromov-Hausdorff approximation from $B_y(j)$ to $B_{p_i}(j)$ so that the images of endpoints $\mathfrak gammamma(j)$ and $\mathfrak gammamma(-j)$ of the line in $B_y(j)$ together with $p_i$ satisfy the conditions (\ref{splitting-condition-1}) and (\ref{splitting-condition-2}) in Proposition 3.6. Then we see that there exist a metric space $X_j$ and a Gromov-Hausdorff approximation $u_j:B_{p_i}(1)\tauo B_{0\tauimes x_j}(1)$ such that \baregin{align} \nuonumber d_{GH}( B_{p_i}(1), u_j(B_{p_i}(1))) <{\mathfrak P}si({\mathfrak f}rac{1}{j}). \e_2and{align} As a consequence, there exists a map ${\mathfrak h}at u_j:B_{_y}(1)\tauo B_{0\tauimes x_j}(1)$ such that \baregin{align} \nuonumber d_{GH}(B_y(1),{\mathfrak h}at u_j(B_y(1)))<{\mathfrak P}si. \e_2and{align} This implies that all the projection of $\mathbb{R}$ component from space $\mathbb{R}\tauimes X_j$ are close to the Buseman function $b$ along the given line in $Y$ for $j>>1$, so they are almost the same. Hence, $\{ X_j\}$ is a Cauchy sequence in Gromov-Hausdorff topology with a limit $X$. It follows that $B_y(1)=B_{0\tauimes x}(1)$ where $x$ is the limit point of $\{x_j\}$ in $X$. Since the number $1$ can be replaced by any positive number, we finish the proof of theorem. \e_2and{proof} \vskip3mm \sigmagmaection{Existence of metric cone} In this section, we prove an anology of Theorem \ref{thm-cc1} in the Bakry-\'Emery geometry. Namely, we prove the existence of metric cone of a tangent space on the limit space of a sequence in $\mathcal{M} (A,v,\Lambdaambdambda)$. Recall \baregin{defi} For a metric space $(Y, d)$, the limit of $(Y, \e_2apsilonsilon_i^{-2 }d;y)$ in the Gromov-Hausdorff topology as $\e_2apsilonsilon_i\tauo0$ is called a tangent cone of $Y$ at $y$ (if exists). We denote it by $T_yY$. \e_2and{defi} \baregin{defi}\lambdaambdabel{def-metric-cone} Given a metric space $X$, the space $\mathbb{R}^+\tauimes X$ with the metric defined by \baregin{align} \nuonumber &d((r_1,x_1),(r_2,x_2))=\sigmagmaqrt{r_1^2+r_2^2-2r_1r_2\cos d(x_1,x_2)}, \tauext{ if }d(x_1,x_2)\lambdaeq \partiali,\\ \nuonumber &d((r_1,x_1),(r_2,x_2))=r_1+r_2, \tauext{ if }d(x_1,x_2)\mathfrak geq \partiali \e_2and{align} is called a metric cone over $X$. We usually denote it by $C(X)$ with the metric $\mathbb{R}^+\tauimes_rX$. \e_2and{defi} The main theorem of this section can be stated as follows. \baregin{theo}\lambdaambdabel{existence-metric-cone} Let $\{(M_i,g_i;p_i)\}$ be a sequence of manifolds in $\mathcal{M}(A,v,\Lambdaambdambda)$. Then there exists a subsequence of $\{(M_i,g_i;p_i)\}$ converges to a metric space $(Y; y)$ in the pointed Gromov-Hausdorff topology. Moreover, for each $z\sqrt{-1}n (Y;y)$, each tangent cone $T_zY$ is a metric cone over another metric space whose diameter is less than $\partiali$. \e_2and{theo} The proof of Theorem \ref{existence-metric-cone} is similar to one of Splitting Theorem \ref{splitting-theorem}. We need another triangle lemma to estimate the distance. \baregin{lem}\lambdaambdabel{cheeger-lemma-2} Let $x,y$ be two points in a minimal geodesic from $p$ and denote the part of the geodesic curve from $x$ to $y$ by $\mathfrak gammamma(s)$. Let $\mathfrak gammamma_s(t)$ be a family of geodesic curves connecting $z$ and $\mathfrak gammamma(s)$ as in Lemma \ref{cheeger-lemma}. Suppose that there is a smooth function $h$ on $M$ which satisfies \baregin{align}&i)~ |h(z)-h(x)-{\mathfrak f}rac{r(z)^2-r(x)^2}{2}|<\deltaelta<<1;\nuotag\\ &ii)~ \sqrt{-1}nt_{[0,a]}|\nuablabla h(\mathfrak gammamma(s))-r(\mathfrak gammamma(s))\mathfrak gammamma'(s)|<\deltaelta<<1;\nuotag\\ & iii)~\sqrt{-1}nt_{[0,a]}\sqrt{-1}nt_{[0,l(s)]}|{\rm hess }~h-g|dtds <\deltaelta<<1.\nuotag \e_2and{align} Here $r(\cdot)=\tauext{dist}(p,\cdot)$. Then \baregin{align}\lambdaambdabel{cosine} &d(z,y)^2r(x)-d(x,z)^2r(y)\\ \nuonumber &+ r(z)^2(r(y)-r(x))-r(x)r(y)(r(y)-r(x)) <\e_2apsilonsilon(\deltaelta). \e_2and{align} \e_2and{lem} \baregin{proof} The proof is similar to one of Lemma \ref{cheeger-lemma}. First, we have \baregin{align} &|h(\mathfrak gammamma(s))-h(\mathfrak gammamma(0))-{\mathfrak f}rac{(s+r(x))^2}{2}+{\mathfrak f}rac{r^2(x)}{2}|\nuotag\\ &=|\sqrt{-1}nt_0^s\lambdaambdangle \nuablabla h(\mathfrak gammamma(s))-(s+r(x))\mathfrak gammamma'(s),\mathfrak gammamma'(s)\ranglegle|\lambdaeq \deltaelta.\nuotag \e_2and{align} Then $$\lambdaambdabel{vertical} h(\mathfrak gammamma_s(l(s)))= h(\mathfrak gammamma(s))=h(x)+{\mathfrak f}rac{(s+r(x))^2}{2}-{\mathfrak f}rac{r^2(x)}{2}+o(1).$$ Since \baregin{align} l(s) h'(\mathfrak gammamma_s(0))&=h(\mathfrak gammamma_s(l(s)))-h(z)-{\mathfrak f}rac{l^2(s)}{2}\nuotag\\ &-\sqrt{-1}nt_0^a \sqrt{-1}nt_0^{l(s)} (\tauext{hess}h(\mathfrak gammamma_s'(t),\mathfrak gammamma_s'(t)) -g(\mathfrak gammamma_s'(t),\mathfrak gammamma_s'(t))) dtds,\nuotag \e_2and{align} from the condition iii) and i), we get \baregin{align}l(s)h'(\mathfrak gammamma_s(0))&={\mathfrak f}rac{(s+r(x))^2}{2}-{\mathfrak f}rac{r^2(x)}{2}+h(x)-h(z)-{\mathfrak f}rac{l^2(s)}{2}+o(1)\nuotag\\ &={\mathfrak f}rac{(s+r(x))^2}{2}-{\mathfrak f}rac{r^2(z)}{2}-{\mathfrak f}rac{l^2(s)}{2}+o(1).\nuotag \e_2and{align} Consequently, we obtain \baregin{align} l(s)h'(\mathfrak gammamma_s(l(s)))&={\mathfrak f}rac{(r(x)+s)^2-r^2(z)}{2}+{\mathfrak f}rac{l^2(s)}{2}\nuotag\\ &+ l(s)\sqrt{-1}nt_0^{l(s)} (\tauext{hess }h(\mathfrak gammamma_s'(t),\mathfrak gammamma_s'(t)) -g(\mathfrak gammamma_s'(t),\mathfrak gammamma_s'(t))) dt+ o(1).\nuotag \e_2and{align} Hence we derive \baregin{align}\lambdaambdabel{short-distance-2} &\sqrt{-1}nt_0^a({\mathfrak f}rac{2l(s)h(\mathfrak gammamma_s'(l(s)))}{(s+r(x))^2}-{\mathfrak f}rac{l^2(s)}{(s+r(x))^2}) ds\nuotag\\ & =a+{\mathfrak f}rac{r^2(z)}{r(x)+a}-{\mathfrak f}rac{r^2(z)}{r(x)}+o(1). \e_2and{align} Secondly, by the first variation formula, $$ l'(s)=\lambdaambdangle\mathfrak gammamma_s'(l(s)),\mathfrak gammamma'(s)\ranglegle, $$ we get from the condition ii), \baregin{align} \sqrt{-1}nt_0^a({\mathfrak f}rac{l^2(s)}{s+r(x)})' ds&=\sqrt{-1}nt_0^a({\mathfrak f}rac{2l(s)l'(s)}{s+r(x)}-{\mathfrak f}rac{l^2(s)}{(s+r(x))^2}) ds\nuotag\\ &=\sqrt{-1}nt_0^a({\mathfrak f}rac{2l(s) (s+r(x))\lambdaambdangle \mathfrak gammamma_s'(l(s)),\mathfrak gammamma'(s)\ranglegle}{(s+r(x))^2}-{\mathfrak f}rac{l^2(s)}{(s+r(x))^2})ds \nuotag\\ &=\sqrt{-1}nt_0^a({\mathfrak f}rac{2l(s)\lambdaambdangle\mathfrak gammamma_s'(l(s)),\nuablabla h(\mathfrak gammamma(s))\ranglegle}{(s+r(x))^2}-{\mathfrak f}rac{l^2(s)}{(s+r(x))^2})ds+o(1)\nuotag\\ &=\sqrt{-1}nt_0^a ({\mathfrak f}rac{2l(s)h'(\mathfrak gammamma_s(l(s)))}{(s+r(x))^2} - {\mathfrak f}rac{l^2(s)}{(s+r(x))^2}) ds +o(1)\nuotag. \e_2and{align} Therefore, by combining (\ref{short-distance-2}), we get (\ref{cosine}) immediately. \e_2and{proof} It is easy to see the left-hand side of (\ref{cosine}) is zero in a metric cone $C(X)$ if $x,y$ lie in a radial direction. We need a few of lemmas more to prove Theorem \ref{existence-metric-cone}. \baregin{lem}\lambdaambdabel{distance-appro-3} Given $\e_2ata>0$, there exists $\Omegaegaegamega=\Omegaegaegamega(a,b,\e_2ata, A,\Lambdaambdambda)$ such that the following is true: if \baregin{align}\lambdaambdabel{ricci-condition-2} {\rm Ric}_{g}^f\mathfrak geq-(n-1)\Lambdaambdambda^2 g~\tauext{and}~|\nuablabla f|\lambdaeq A\e_2and{align} and \baregin{align}\lambdaambdabel{volume-condition-2} {\mathfrak f}rac{{\rm vol}^f(\partialartialrtial B_p(b))}{{\rm vol}^f(\partialartialrtial B_p(a))}\mathfrak geq(1-\Omegaegaegamega){\mathfrak f}rac{L_{\Lambdaambdambda, A}(b)}{L_{\Lambdaambdambda, A}(a)}, \e_2and{align} then for any point $q$ on $\partialartialrtial B_p(a)$, there exists $q'$ on $\partialartialrtial B_p(b)$ such that $$d(q,q')\lambdaeq b-a+\e_2ata.$$ \e_2and{lem} \baregin{proof} Suppose that the conclusion fails to hold for some $\e_2ata$ and $q_1\sqrt{-1}n \partialartialrtial B_p(a)$. Then for any point in $B_{q_1}({\mathfrak f}rac{\e_2ata}{3})$, there is no point $q$ on $\partialartialrtial B_p(b)$ such that $d(q_1,q)\lambdaeq b-a+{\mathfrak f}rac{\e_2ata}{3}$. Thus for any $r<{\mathfrak f}rac{\e_2ata}{3}$, any minimal geodesic from $p$ to $\partialartialrtial B_p(b)$ does not intersect with $B_{q_1}({\mathfrak f}rac{\e_2ata}{3})\cap \partialartialrtial B_p(a+r)$. Since \baregin{align} {\rm vol}^f(B_{q_1}({\mathfrak f}rac{\e_2ata}{3}))\mathfrak geq {\mathfrak f}rac{L_{\Lambdaambdambda, A}({\mathfrak f}rac{\e_2ata}{3})}{L_{\Lambdaambdambda, A}(2b)}{\rm vol}^f(A_p(a,b)), \nuotag\e_2and{align} by the coarea formula, there exists some ${\mathfrak f}rac{\e_2ata}{4}<r<{\mathfrak f}rac{\e_2ata}{3}$ such that \baregin{align} {\rm vol}^f( B_{q_1}({\mathfrak f}rac{\e_2ata}{3})\cap \partialartialrtial B_p(a+r))\mathfrak geq {\mathfrak f}rac{1}{\e_2ata}{\mathfrak f}rac{L_{\Lambdaambdambda, A}({\mathfrak f}rac{\e_2ata}{3})}{L_{\Lambdaambdambda, A}(2b)}{\rm vol}^f(A_p(a,b)).\nuotag \e_2and{align} Using the monotonicity formula (\ref{mono-formula}), we get \baregin{align} {\rm vol}^f(\partialartialrtial B_p(b)) &\lambdaeq {\rm vol}^f(\partialartialrtial B_p(a+r)\sigmagmaetminus B_{q_1}({\mathfrak f}rac{\e_2ata}{3})){\mathfrak f}rac{L_{\Lambdaambdambda, A}(b)}{L_{\Lambdaambdambda, A}(a+r)}\nuotag\\ &\lambdaeq ({\rm vol}^f(\partialartialrtial B_p(a+r))-{\mathfrak f}rac{1}{\e_2ata}{\mathfrak f}rac{L_{\Lambdaambdambda, A}({\mathfrak f}rac{\e_2ata}{3})}{L_{\Lambdaambdambda, A}(2b)}{\rm vol}^f(A_p(a,b))){\mathfrak f}rac{L_{\Lambdaambdambda, A}(b)}{L_{\Lambdaambdambda, A}(a+r)}.\nuotag \e_2and{align} It follows \baregin{align} {\rm vol}^f(\partialartialrtial B_p(b))&\lambdae (1+\deltaelta' (\e_2ata,b,a))^{-1} {\rm vol}^f(\partialartialrtial B_p(a+r) ) {\mathfrak f}rac{L_{\Lambdaambdambda, A}(b)}{L_{\Lambdaambdambda, A}(a+r)}\nuotag\\ &\lambdae (1+\deltaelta' (\e_2ata,b,a))^{-1} {\rm vol}^f(\partialartialrtial B_p(a) ) {\mathfrak f}rac{L_{\Lambdaambdambda, A}(b)}{L_{\Lambdaambdambda, A}(a)}.\nuotag \e_2and{align} But this is a contradiction to (\ref{volume-condition-2}) as $\Omegaegaegamega<{\mathfrak f}rac{1}{2}\deltaelta' (\e_2ata,b,a)$. Therefore, the lemma is proved. \e_2and{proof} By applying Theorem 3.6 in $[CC1]$ with the help of Lemma \ref{cheeger-lemma-2} and Lemma \ref{distance-appro-3}, we have the following proposition. \baregin{prop}\lambdaambdabel{segment-inequ-2} Given $\e_2ata>0$, there exist $\Omegaegaegamega=\Omegaegaegamega(a,b,\e_2ata)$ and $\deltaelta=\deltaelta(\e_2ata)$ such that if (\ref{ricci-condition-2}) and (\ref{volume-condition-2}) are satisfied, then there is a length space $X$ such that $$d_{GH}(A_p(a,b),(a,b)\tauimes_rX) <\e_2ata,$$ where $(a,b)\tauimes_rX$ is an annulus in $C(X)$ and the metric of $A_p(a,b)$ is measured in a slightly bigger annulus in $M$. \e_2and{prop} \baregin{proof} It suffices to verify the condition for distance function in Theorem 3.6 in $[CC1]$ . Let $x,y,z,w$ be four points in the annulus $A_p(a,b)$ such that both pairs $\{x,y\}$ and $\{z,w\}$ are in the radial direction from $p$. Then by applying the segment inequality of Lemma \ref{approxi-1} to the function $h$ in Lemma \ref{harmonic-estimate-annual-1} and Lemma \ref{harmonic-estimate-annual-2}, we can find another four points $x_1,y_1,z_1,w_1$ near the four points respectively such that Triangular Lemma \ref{cheeger-lemma-2} holds for two triples $\{x_1,y_1,z_1\}$ and $\{y_1,z_1,w_1\}$. Now we choose four points $x_2,y_2,z_2,w_2$ in the plane $\mathbb R^2$ such that both triples $ \{O, x_2, y_2\}$ and $\{O,z_2, w_2\}$ are co-linear. Moreover, we can require that $$r(x_2)=r(x_1),r(y_2)=r(y_1),r(z_2)=r(z_1),r(w_2)=r(w_1)$$ and $$d(x_1,z_1)=d(x_2,z_2).$$ Thus by using Triangle Lemma \ref{cheeger-lemma-2} to $\{x_1,y_1,z_1\}$ , it is easy to see that \baregin{align}\lambdaambdabel{dis-1} |d(y_2,z_2)-d(y_1,z_1)|< {\mathfrak P}si. \e_2and{align} Applying Triangle Lemma \ref{cheeger-lemma-2} to $\{y_1,z_1,w_1\}$ , we have \baregin{align}\lambdaambdabel{dis-2} &|d(y_1,z_1)^2r(w_1)+r(w_1)r(z_1)(r(w_1)-r(z_1))\nuotag\\ &-d(y_1,w_1)^2r(z_1)-r(y_1)^2d(z_1,w_1)|< {\mathfrak P}si. \e_2and{align} Note that the left hand side of (\ref{dis-2}) is zero when the triple $\{y_1,z_1,w_1\}$ is replaced by $\{y_2,z_2,w_2\}$ in the plane. Since $$|d(z_1,w_1)-(r(w_1)-r(z_1))|< {\mathfrak P}si,$$ we get from (\ref{dis-1}) and (\ref{dis-2}) that, \baregin{align} \nuonumber |d(y_1,w_1)-d(y_2,w_2)|< {\mathfrak P}si. \e_2and{align} On the other hand, $d(y_2,w_2)$ can be written as the following function: \baregin{align} d(y_2,w_2)=Q(r(x_2),r(y_2),r(z_2),r(w_2), d(x_2,z_2)). \nuotag \e_2and{align} Therefore \baregin{align} |d(y_1,w_1)-Q(r(x_1),r(y_1),r(z_1),r(w_1), d(x_1,z_1))|< {\mathfrak P}si.\nuotag \e_2and{align} It follows that \baregin{align}\lambdaambdabel{distance-function-condition} |d(y,w)-Q(r(x),r(y),r(z),r(w),d(x,z))| <{\mathfrak P}si. \e_2and{align} (\ref{distance-function-condition}) is just the condition for distance function in Theorem 3.6 in [CC1]. By (\ref{distance-function-condition}) and Lemma \ref{distance-appro-3} we see that two conditions in Theorem 3.6 in [CC1] are satisfied. Hence as a consequence of this theorem, we obtain Proposition \ref{segment-inequ-2}. In fact, $X$ is a level set of $r^{-1}(a)$ with a $\chi$-intrinsic metric defined by \baregin{align} l^\chi(x,y)={\mathfrak f}rac{1}{a}\sqrt{-1}nf\Sigmaigmagma_{i=1}^n d(x_{i-1},x_i), \e_2and{align} where the infimum is taken among all the sequences $\{x_i\} \sqrt{-1}n X$ which satisfy $x_0=x, x_n=y$ and $d(x_{i-1},x_i)\lambdaeq \chi$. \e_2and{proof} It remains to verify the condition (\ref{volume-condition-2}) in Lemma \ref{distance-appro-3}. \baregin{lem}\lambdaambdabel{volume-sphere} Given $0<a<b=a\Omegaega, \Omegaega>0$, there exists an integer $N=N(n,\Omegaega,\Lambdaambdambda, v,A)$ such that for any sequence of $r_i$ $(1\lambdaeq i\lambdaeq N)$ with $\Omegaega r_{i+1}\lambdaeq r_i\lambdaeq {\mathfrak f}rac{1}{b}$, the volume condition (\ref{volume-condition-2}) for any manifold $(M,g)\sqrt{-1}n\mathcal{M}(\Lambdaambdambda,v,A)$ in Lemma \ref{distance-appro-3} holds for some annulus $A_p(ar_k,br_k)\sigmagmaubset M $ $(1\lambdaeq k\lambdaeq N)$ with rescaling metric ${\mathfrak h}at g={\mathfrak f}rac{g}{r_k}$. \e_2and{lem} \baregin{proof} We only need to give an upper bound of $N$ in case that the following inequality \baregin{align}\lambdaambdabel {vol-con} {\mathfrak f}rac{{\rm vol}_{{\mathfrak h}at g}^f(\partialartialrtial B_p(br_k))}{L_{r_k\Lambdaambdambda,r_kA}(br_k)}\mathfrak geq e^{-\Omegaegaegamega}{\mathfrak f}rac{{\rm vol}_{{\mathfrak h}at g}^f(\partialartialrtial B_p(ar_k))}{L_{r_k\Lambdaambdambda,r_kA}(ar_k)} \e_2and{align} doesn't hold for any $1\lambdaeq k\lambdaeq N$. Then by the monotonicity formula (\ref{mono-formula}), we know that \baregin{align} \nuonumber {\mathfrak f}rac{{\rm vol}_{{\mathfrak h}at g}^f(\partialartialrtial B_p(br_N))}{L_{r_k\Lambdaambdambda,r_kA}(br_N)}\lambdaeq e^{-N\Omegaegaegamega}{\mathfrak f}rac{{\rm vol}_{{\mathfrak h}at g}^f(\partialartialrtial B_p(br_1))}{L_{r_k\Lambdaambdambda,r_kA}(br_1)}. \e_2and{align} Thus by the non-collapsing condition the left-hand side has a lower bound $c_1(n, \Lambdaambdambda,v,A)$, and by Volume Comparison Theorem \ref{volume-comparison} the right-hand side is not greater than $e^{-N\Omegaegaegamega}c_2(n, \Lambdaambdambda, v,A)$. Thus this helps us to get an upper bound of $N$. Hence, if $N$ is larger than this bound, there must be some $k$ such that (\ref{vol-con}) holds. The lemma is proved. \e_2and{proof} \baregin{proof}[Proof of Theorem\ref{existence-metric-cone}] Without loss of generality, we may assume that $z=y$ since each point in $(Y,d;y)$ is a limit of sequence of volume non-collapsing points in $M_i$. Also we note that the tangent cone $T_yY$ always exists in our case by Gromov's theorem [Gr]. By the contradiction argument, we suppose that $T_yY$ is not a metric cone. Then it is easy to see that there exist numbers $0<a<b,\e_2ata_0>0$ and a sequence $\{r_i\}$, which tends to $0$, such that for any length space $X$ annulus $A_y(ar_i,br_i)\sigmagmaubset (Y, {\mathfrak f}rac{d}{r_i};y)$ satisfy, \baregin{align}\lambdaambdabel{equ-1} d_{GH}(A_y(ar_i,br_i),(ar_i,br_i)\tauimes_rX)>3r_i\e_2ata_0. \e_2and{align} By taking a subsequence we may assume that $\Omegaega r_{i+1}\lambdaeq r_i$ $(\Omegaega={\mathfrak f}rac{b}{a})$ and $r_i$ is smaller than $\deltaelta$ in Lemma \ref{distance-appro-3}. On the other hand, since $Y$ is the limit of $M_i$, we can find an increasing sequence $m_i$ such that for every $j\mathfrak geq m_i$ \baregin{align}\lambdaambdabel{anulus-close-3} d_{GH}(A_y(ar_i,br_i),A_{p_j}(ar_i,br_i))<r_i\e_2ata_0. \e_2and{align} Let $\Omegaegaegamega$ be a small number as chosen in Proposition \ref{segment-inequ-2} and $N$ an integer such that Lemma \ref{volume-sphere} is true for the $\Omegaegaegamega>0$. Thus by (\ref{anulus-close-3}), we see that there exist a subsequence $\{r_{i_k}\} \tauo 0$ and a sequence $\{j_k\}\tauo\sqrt{-1}nfty$ such that \baregin{align}\lambdaambdabel{anulus-close-4} d_{GH}(A_y(ar_{i_k},br_{i_k}),A_{p_{j_k}}(ar_{i_{k}},br_{i_{k}}))<r_{i_k}\e_2ata_0, \e_2and{align} where annulus $A_{p_{j_{k}}}(ar_{i_k},br_{i_k})$ are chosen as in Lemma \ref{volume-sphere}. Now we can apply Proposition \ref{segment-inequ-2} to show that for each large $k$ there exists a length space $X$ such that \baregin{align} \nuonumber d_{GH}(A_{p_{j_{k}}}(ar_{i_k},br_{i_k}),(ar_{i_k},br_{i_k})\tauimes_rX)<r_{i_k}\e_2ata_0. \e_2and{align} But this is impossible by (\ref{equ-1}). Therefore, $T_yY$ must be a metric cone. The diameter estimate follows from Splitting Theorem \ref{splitting-theorem}. In fact, if $diam(X)>\partiali$, there will be two points $p,q$ in $X$ such that $d(p,q)=\partiali$. By Theorem \ref{splitting-theorem}, it follows that $C(X)=\mathbb{R}\tauimes Y_1$, where $Y_1$ is also a metric cone, i.e. $Y_1=C(X_1)$. It is clear that $diam(X_1)>\partiali$ since $diam(X)>\partiali$. Thus we can continue to apply Theorem \ref{splitting-theorem} to split off $X_1$. By the induction, $ C(X)$ should be an Euclidean space, and consequently $X$ is a standard sphere . But this is impossible by the assumption that $diam(X)>\partiali$. \e_2and{proof} Following the argument in the proofs of Theorem \ref{existence-metric-cone} and Proposition \ref{segment-inequ-2}, we actually prove the following strong approximation of Gromov-Hausdorff to the flat space. \baregin{cor}\lambdaambdabel{hausddorf-closed} For all $\e_2apsilonsilon>0$, there exists $\deltaelta=\deltaelta(n,\e_2apsilonsilon),\e_2ata=\e_2ata(n,\e_2apsilonsilon)$ such that if \baregin{align}\lambdaambdabel{small-curvature-2} {\rm Ric}^f_{g}\mathfrak geq-(n-1)\deltaelta^2 g, ~|\nuablabla f|\lambdaeq\e_2ata \e_2and{align} and \baregin{align}\lambdaambdabel{volume-condition-3} e^{-f(0)}{\rm vol}^f(B_p(1))\mathfrak geq(1-\deltaelta){\rm vol}(B_0(1)) \e_2and{align} are satisfied, then \baregin{align}\lambdaambdabel{conclusion-hausddorf-closed} d_{GH}(B_p(1),B_0(1))<\e_2apsilonsilon. \e_2and{align} \e_2and{cor} \baregin{proof} Suppose that the conclusion (\ref{conclusion-hausddorf-closed}) is not true. Then there exist sequences of $\{\deltaelta_i\}$ and $\{\e_2ata_i\}$ which tend $0$ both, and a sequence of manifolds $\{(M,g_i)\}$ with conditions (\ref{small-curvature-2}) and (\ref{volume-condition-3}) such that \baregin{align}\lambdaambdabel{conclusion-hausddorf-unclosed} d_{GH}(B_{p_i}(1),B_0(1))\mathfrak ge \e_2apsilonsilon_0>0, \e_2and{align} where $B_{p_i}(1)\sigmagmaubset M_i$. Then following the argument in the proofs of Theorem \ref{existence-metric-cone} and Proposition \ref{segment-inequ-2}, it is no hard to show that $B_{p_i}(1)$ converge to a limit $B_x(1)$ which is a metric ball with radius $1$ in a metric-cone $(C(X),d)$ with vertex $x$. Since the blowing-up space of $B_x(1)$ at $x$ is $C(X)$ itself, we see that there are subsequences $\{j\}$ and $\{i_j\}$, both of which tend to infinity, such that \baregin{align} \nuonumber (B_{p_{i_j}}(j),j^2g_{i_j},q_{i_j})\rightarrowghtarrow (C(X),d,x). \e_2and{align} For any $y\sqrt{-1}n X$, we choose a sequence of points $q_{i_j} \sqrt{-1}n B_{p_{i_j}}(j) \sigmagmaubset (M_{i_j},j^2g_{i_j})$ which tends to $y$. Then for any given $R>0$, we have \baregin{align} \nuonumber B_{q_{i_j}}(R)(\sigmagmaubseteq (M_{i_j}, j^2g_{i_j})) \rightarrowghtarrow B_y(R). \e_2and{align} Since the volume condition (\ref{volume-condition-3}) implies \baregin{align} e^{-f(0)}{\rm vol}^f(B_{q_{i_j}}(R))\rightarrowghtarrow {\rm vol}(B_0(R)), \e_2and{align} by the above argument, $B_y(R)$ is in fact a metric ball with radius $R$ in a metric cone $C(Y)$ with vertex $y$. Note that $R$ is arbitrary. We prove that $C(X)$ is also a cone with vertex at $y$. This shows that there exists a line connecting $x$ and $y$ in $C(X)$. By Splitting Theorem \ref{splitting-theorem}, $C(X)$ can split off a line along the direction $xy$. Since $y \sqrt{-1}n X$ can be taken in any direction, $C(X)$ must be an euclidean space. But this is impossible according to (\ref{conclusion-hausddorf-unclosed}). The Corollary is proved. \e_2and{proof} \baregin{rem} Corollary \ref{hausddorf-closed} is a generalization of Theorem 9.69 in [Ch2] in the Bakry-\'{E}mery geometry. It will be used in Section 5 and Section 6 for the blowing-up analysis. We also note that $e^{-f(0)}{\rm vol}^f(B_p(1))$ is close to ${\rm vol}(B_p(1))$ since $|\nuablabla f|$ is small enough. Thus the volume condition (\ref{volume-condition-3}) can be replaced by $${\rm vol}(B_p(1))\mathfrak ge (1-\deltaelta){\rm vol}(B_0(1)).$$ \e_2and{rem} For the rest of this section, we prove the Colding's volume convergence theorem in the Bakry-\'{E}mery geometry by using the Hessian estimates in Section 2 [Co3]. \baregin{theo}\lambdaambdabel{volume-convergence} Let $(M_i^n,g_i)$ be a sequence of Riemannian manifolds which satisfy (\ref{ricci-condition-2}). Suppose that $M_i$ converge to an $n$-dimensional compact manifold $M$ in the Gomov-Hausdorff topology. Then $$\lambdaim_{i\tauo \sqrt{-1}nfty}{\rm vol}(M_i,g_i)= {\rm vol}(M).$$ \e_2and{theo} We first prove a local version of Theorem \ref{volume-convergence} as follows. \baregin{lem}\lambdaambdabel{volume-estimate-4} Given $\e_2apsilonsilon>0$, there exist $R=R(\e_2apsilonsilon,\Lambdaambdambda,A,n)>1$ and $\deltaelta=\deltaelta(\e_2apsilonsilon,\Lambdaambdambda,A,n)$ such that if \baregin{align} {\rm Ric}^f_{M,g}\mathfrak geq-(n-1){\mathfrak f}rac{\Lambdaambdambda^2}{R^2} g,|\nuablabla f|\lambdaeq{\mathfrak f}rac{A}{R}, \e_2and{align} and \baregin{align}d_{GH}(B_p(R),B_0(R))<\deltaelta,\e_2and{align} then we have \baregin{align} {\rm vol}(B_p(1))> {\rm vol}(B_0(1))-\e_2apsilonsilon. \e_2and{align} \e_2and{lem} \baregin{proof} We need to construct a Gromov-Hausdorff approximation map by using $f$-harmonic functions constructed in Section 2. Choose $ n $ points $q_i$ in $B_p(R)$ which is close to $Re_i$ in $B_0(R)$, respectively. Let $l_i(q)=d(q,q_i)-d(q_i,p)$ and $h_i$ a solution of \baregin{align}{\mathfrak D}elta^fh_i=0, ~\tauext{in}~B_1(p), \nuotag\e_2and{align} with $ h_i=l_i$ on $\partialartialrtial B_1(p)$. Then by Lemma \ref {harmonic-estimate}, we have \baregin{align} \nuonumber {\mathfrak f}rac{1}{\tauext{vol }(B_p(1))}\sqrt{-1}nt_{B_p(1)}|\tauext{hess }h_i|^2 <{\mathfrak P}si(1/R,\deltaelta;A). \e_2and{align} By using an argument in [Co3] (cf. Lemma 2.9), it follows \baregin{align}\lambdaambdabel{orthorgonal-4} {\mathfrak f}rac{1}{\tauext{vol }(B_p(1))}\sqrt{-1}nt_{B_p(1)}|\lambdaambdangle\nuablabla h_i,\nuablabla h_j\ranglegle-\deltaelta_{ij}| <{\mathfrak P}si(1/R,\deltaelta;A). \e_2and{align} Define a map by $h=(h_1,h_2,...,h_n)$. It is easy to see that the map $h$ is a ${\mathfrak P}si({\mathfrak f}rac{1}{R}, \deltaelta; \Lambdaambdambda)$ Gromov-Hausdorff approximation to $B_p(1)$ by using the estimate (\ref{c0-h}) in Lemma \ref{harmonic-estimate}. Since $h$ maps $\partialartialrtial B_p(1)$ nearby $\partialartialrtial B_0(1)$ with distance less than ${\mathfrak P}si$, by a small modification to $h$ we may assume that \baregin{align} \nuonumber h:(B_p(1),\partialartialrtial B_p(1))\vskip .1cmngrightarrow(B_0(1-{\mathfrak P}si),\partialartialrtial B_0(1-{\mathfrak P}si)). \e_2and{align} Next we use a degree argument in [Ch2] to show that the image of $h$ contains $B_0(1-{\mathfrak P}si)$. By using Vitali covering lemma, there exists a point $x$ in $B_p({\mathfrak f}rac{1}{8})$ such that for any $r$ less than ${\mathfrak f}rac{1}{8}$ it holds \baregin{align}\lambdaambdabel{almost-hassian-zero-2} {\mathfrak f}rac{1}{\tauext{vol }(B_x(r))}\sqrt{-1}nt_{B_x(r)}|\tauext{hess }h_i|<{\mathfrak P}si \e_2and{align} and \baregin{align}\lambdaambdabel{orthogonal-2} {\mathfrak f}rac{1}{\tauext{vol }(B_x(r))}\sqrt{-1}nt_{B_x(r)}|\lambdaambdangle\nuablabla h_i,\nuablabla h_j\ranglegle-\deltaelta_{ij}|<{\mathfrak P}si. \e_2and{align} Let $\e_2ata={\mathfrak P}si^{{\mathfrak f}rac{1}{2n+1}}$. For any $y$ with $d(x,y)=r<{\mathfrak f}rac{1}{8}$, applying Lemma \ref{segment-inequ} to $A_1=B_x(\e_2ata r), A_2=B_y(\e_2ata r), e=|\tauext{hess }h_i|$ , we get from (\ref{almost-hassian-zero-2}), \baregin{align} &\sqrt{-1}nt_{B_x(\e_2ata r)\tauimes B_y(\e_2ata r)}\sqrt{-1}nt_{\mathfrak gammamma_{zw}}|\tauext{hess }h_i(\mathfrak gammamma',\mathfrak gammamma')|\nuotag\\ &< r(\tauext{vol }(B_x(\e_2ata r))+\tauext{vol }(B_y(\e_2ata r)))\tauext{vol }(B_x(r)) {\mathfrak P}si\nuotag.& \e_2and{align} It follows that \baregin{align} &\sqrt{-1}nt_{B_x(\e_2ata r)} [ Q(r,\e_2ata) \sqrt{-1}nt_{ B_y(\e_2ata r)} \sqrt{-1}nt_{\mathfrak gammamma_{zw}} \Sigmaigmagma_{i=1}^n |\tauext{hess }h_i(\mathfrak gammamma',\mathfrak gammamma')| + | \lambdaambdangle\nuablabla h_i,\nuablabla h_j\ranglegle-\deltaelta_{ij}|] \nuotag\\ &<\tauext{vol } (B_x(\e_2ata r)) {\mathfrak P}si\nuotag,& \e_2and{align} where $Q(r,\e_2ata)={\mathfrak f}rac{{\rm vol}B_x(\e_2ata r)}{ r(\tauext{vol }(B_x(\e_2ata r))+\tauext{vol }(B_y(\e_2ata r))){\rm vol}B_x(r)}$. Consider $$ Q(r,\e_2ata) \sqrt{-1}nt_{ B_y(\e_2ata r)} \sqrt{-1}nt_{\mathfrak gammamma_{zw}}\Sigmaigmagma_{i=1}^n\tauext{hess } |h_i(\mathfrak gammamma',\mathfrak gammamma')| + | \lambdaambdangle\nuablabla h_i,\nuablabla h_j\ranglegle-\deltaelta_{ij}|$$ as a function of $z\sqrt{-1}n B_x(\e_2ata r)$. Then one sees that there exists a point $x^*\sqrt{-1}n B_x(\e_2ata r)$ such that \baregin{align}\lambdaambdabel{orthogonal-3} |\lambdaambdangle\nuablabla h_i,\nuablabla h_j\ranglegle(x^*)-\deltaelta_{ij}|< {\mathfrak P}si \e_2and{align} and \baregin{align}\lambdaambdabel{hession-small-geodesic} \Sigmaigmagma_{i=1}^n\sqrt{-1}nt_{B_y(\e_2ata r)} \sqrt{-1}nt_{\mathfrak gammamma_{x^*w}}|\tauext{hess }h_i(\mathfrak gammamma',\mathfrak gammamma')| < r\tauext{vol }(B_x(r))\e_2ata^{-n}{\mathfrak P}si. \e_2and{align} Here at the last inequality, we used the volume comparison (\ref{volume-estimate-2}). Moreover by (\ref{hession-small-geodesic}), we can find a point $y^*\sqrt{-1}n B_y(\e_2ata r)$ such that \baregin{align}\lambdaambdabel{almost-hessian-zero-3} \Sigmaigmagma_{i=1}^n\sqrt{-1}nt_{\mathfrak gammamma_{x^*y^*}}|\tauext{hess }h_i(\mathfrak gammamma',\mathfrak gammamma')| <\e_2ata r. \e_2and{align} By a direct calculation with help of (\ref{orthogonal-3}) and (\ref{almost-hessian-zero-3}), we get \baregin{align}\lambdaambdabel{dist-appr-1} (h(x^*)-h(y^*))^2=(1+{\mathfrak P}si^{{\mathfrak f}rac{1}{2n+1}})r^2. \e_2and{align} This shows that $ h(x)\nueq h(y)$ for any $y$ with $d(y,x)\lambdaeq {\mathfrak f}rac{1}{8}$. On the other hand, for any $y$ with $d(y,x)\mathfrak geq {\mathfrak f}rac{1}{8}$, it is clear that $h(x)\nueq h(y)$ since $h$ is a ${\mathfrak P}si$ Gromov-Hausdorff approximation. Thus we prove that the pre-image of $h(x)$ is unique. Therefore the degree of $h$ is $1$, and consequently, $B_0(1-{\mathfrak P}si)\sigmagmaubset h(B_p(1))$. The lemma is proved because the volume of $B_p(1)$ is almost same to one of $h(B_p(1))$ by the fact (\ref{orthorgonal-4}). \e_2and{proof} \baregin{proof}[Proof of Theorem \ref{volume-convergence}] Choose finite $r_i$-balls $B(q_i, r_i)$ to cover $M$ with $r_i$ small enough to make all balls close to Euclidean balls so that $$\Sigmaigmagma _i{\rm vol}(B(q_i, r_i))<(1+\e_2apsilonsilon){\rm vol}(M)$$ for any given $\e_2apsilonsilon>0$. Then for $j$ sufficiently large, $M_j$ can be covered by $B(q_{ji}, r_{ji})$ with $r_{ji}\lambdaeq(1+\e_2apsilonsilon)r_i$. Thus by the volume comparison (\ref{volume-estimate-2}), we have \baregin{align} {\rm vol}(M_j)&\lambdaeq \Sigmaigmagma_i{\rm vol}(B(q_{ji}, r_{ji}))\nuotag\\ &< (1+{\mathfrak P}si(\deltaelta:\Lambdaambdambda, A))\Sigmaigmagma_i{\rm vol}(B(q_{i}, r_{i})). \e_2and{align} Here $\deltaelta={\rm max}\{r_i\}$. Hence we get $$\lambdaim_{j\rightarrowghtarrow\sqrt{-1}nfty}{\rm vol}(M_j)\lambdaeq {\rm vol}(M).$$ On the other hand, for any $\e_2apsilonsilon>0$, we choose small enough $N$ disjoint balls $B(q_i, r_i)$ in $M$ with $B(q_i, r_i)$ close to Euclidean balls so that \baregin{align}\lambdaambdabel{small-ball-volume-2} (1+\e_2apsilonsilon)\Sigmaigmagma _i \Omegaegaegamega_n r_i^n \mathfrak ge \Sigmaigmagma _i{\rm vol}(B(q_i, r_i)) >(1-\e_2apsilonsilon){\rm vol}(M). \e_2and{align} Then for a fixed large number $R$, we see that for $j$ large enough there are corresponding disjoint balls $B(q_{ij}, r_i)$ in $M_j$ such that $B(q_{ij}, Rr_i)$ is $\deltaelta(N)$-close to $B(q_i, Rr_i)$ in Gromov-Hausdorff topology, where $\deltaelta(N)$ is the number determined in Lemma \ref{volume-estimate-4} when $\e_2apsilonsilon$ is replaced by ${\mathfrak f}rac{\e_2apsilonsilon}{N}$. Apply the above lemma to each ball $B(q_{ij}, Rr_i)$ with rescaling metric ${\mathfrak f}rac{g_j}{r_i}$, we get from (\ref{small-ball-volume-2}), $$(1+\e_2apsilonsilon){\rm vol}(M_j)>(1-\e_2apsilonsilon){\rm vol}(M)-(1+\e_2apsilonsilon)\e_2apsilonsilon.$$ Taking $\e_2apsilonsilon$ to $0$ and $N$ to $\sqrt{-1}nfty$, it follows $$\lambdaim_{j\rightarrowghtarrow\sqrt{-1}nfty}{\rm vol}(M_j)\mathfrak ge {\rm vol}(M).$$ The theorem is proved. \e_2and{proof} \vskip3mm \sigmagmaection {Structure of singular set I: Case of Riemannian metrics} According to Theorem \ref{existence-metric-cone}, we may introduce a notion of $\mathcal S_k$-typed singular point $y$ in the limit space $(Y,d_\sqrt{-1}nfty;p_\sqrt{-1}nfty)$ of a sequence of Riemannian manifolds $\{(M_i,g_i;p_i)\}$ in $\mathcal{M}(A,v,\Lambdaambdambda)$ as Definition \ref{singular-type}, if there exists a tangent cone at $y$ which can be split out an euclidean space $\mathbb R^k$ isometrically with dimension at most $k$. By applying Metric Cone Theorem \ref{existence-metric-cone} to appropriate tangent cone spaces $T_yY$, we can follow the argument in [CC2] to show that dimension of $\mathcal{S}_k$ is less than $k$. Moreover, $\mathcal{S}=\mathcal{S}(Y)=\mathcal{S}_{n-2}$, where $\mathcal{S}(Y)= ^{{\rm cu}}p_{i=0}^{n-1}\mathcal{S}_i$. The latter is equivalent to that any tangent cone can't be the upper half space, which can be proved by using a topological argument as in the case of Ricci curvature bounded below (cf. Theorem 6.2 in [CC2]). Thus we have \baregin{theo}\lambdaambdabel{dimension-k} Let $\{(M_i, g_i; p_i)\}$ be a sequence of Riemannian manifolds in $\mathcal{M}(A,v,\Lambdaambdambda)$ and let $(Y,d_\sqrt{-1}nfty;p_\sqrt{-1}nfty)$ be its limit in the Gromov-Hausdorff topology. Then $\tauext{dim } \mathcal{S}_k\lambdaeq k$ and $\mathcal{S}(Y)=\mathcal{S}_{ n-2}$. \e_2and{theo} \baregin{rem}\lambdaambdabel{volume-covergerce-4} By Theorem $\ref{dimension-k}$, one sees that $\mathcal{H}^n( \mathcal{S})=0$. Thus by Theorem \ref{volume-convergence}, we have \baregin{align}\lambdaambdabel{volume-covergerce-5} \lambdaim_{i\rightarrowghtarrow\sqrt{-1}nfty}{\rm vol}(M_i)=\mathcal{H}^n(Y). \e_2and{align} Moreover, if $B_i(r)\sigmagmaubset M_i$ converge to $B_\sqrt{-1}nfty(r)\sigmagmaubset Y$, \baregin{align}\lambdaambdabel{volume-cone} \lambdaim_{i\rightarrowghtarrow\sqrt{-1}nfty}{\rm vol}(B_i(r))=\mathcal{H}^n(B_\sqrt{-1}nfty(r)),\e_2and{align} where $B_i(r)$ and $B_\sqrt{-1}nfty(r)$ are radius $r$-balls in $M_i$ and $Y$, respectively. \e_2and{rem} We define $\e_2apsilonsilon$-regular points in $Y$. \baregin{defi} $y\sqrt{-1}n (Y;p_\sqrt{-1}nfty)$ is called an $\e_2apsilonsilon$-regular point if there exist an $\e_2apsilonsilon$ and a sequence $\{r_i\}$ such that $${\rm dist}_{GH}((B_{y}(1),{\mathfrak f}rac{1}{r_i}d_\sqrt{-1}nfty), B_0(1))<\e_2apsilonsilon,~\tauext{as}~i\tauo \sqrt{-1}nfty.$$ Here $B_0(1)$ is the unit ball in $\mathbb{R}^{n}$. We denote the set of those points by $\mathcal{R}_{\e_2apsilonsilon}$. \e_2and{defi} In this section, our main purpose is to prove an anology of Theorem \ref{thm-cct} in the Bakry-\'Emery geometry. \baregin{theo}\lambdaambdabel{dimension-n-4} Let $\{(M_i, g_i; p_i)\}$ be a sequence in $\mathcal{M}(A,v,\Lambdaambdambda)$ and $(Y;p_\sqrt{-1}nfty)$ its limit as in Theorem \ref{dimension-k}. Suppose that \baregin{align}\lambdaambdabel{integral-curvature} {\mathfrak f}rac{1}{{\rm vol}(B_{p_i}(2))}\sqrt{-1}nt_{B_{p_i}(2)}|\rm Rm|^p<C.\e_2and{align} Then for any $\e_2apsilonsilon>0$, the following is true: i) \baregin{align} \lambdaambdabel{p-les-2} \mathcal{H}^{n-2p}(B_{p_\sqrt{-1}nfty}(1)\sigmagmaetminus \mathcal{R}_{2\e_2apsilonsilon})<\sqrt{-1}nfty,~ \tauext{if}~ 1\lambdaeq p<2;\e_2and{align} ii) \baregin{align}\lambdaambdabel{p=2} {\rm dim} (B_{p_\sqrt{-1}nfty}(1)\sigmagmaetminus \mathcal{R}_{2\e_2apsilonsilon})\lambdaeq n-4,~\tauext{ if}~ p=2.\e_2and{align} \e_2and{theo} The theorem is a consequence of following result of $\e_2apsilonsilon$-regularity. \baregin{prop}\lambdaambdabel{epsilon-regularity-1} For any $v,\e_2apsilonsilon>0$, there exist three small numbers $\deltaelta=\deltaelta(v,\e_2apsilonsilon,n)$, $\e_2ata=\e_2ata(v,\e_2apsilonsilon,n)$, $\tauau=\tauau(v,\e_2apsilonsilon,n)$ and a big number $l=l(v,\e_2apsilonsilon,n)$ such that if $(M^n,g)$ satisfies \baregin{align}\lambdaambdabel{condition-1-regularity} {\rm Ric}^f_{M,g} >-(n-1)\tauau^2, |\nuablabla f|<\tauau, {\rm vol }(B_p(1))\mathfrak geq v,\e_2and{align} \baregin{align}\lambdaambdabel{curvature-int} {\mathfrak f}rac{1}{{\rm vol}(B_{p}(3))}\sqrt{-1}nt_{B_p(3)}|{\rm Rm}|<\deltaelta, \e_2and{align} and for some metric space $X$, \baregin{align}\lambdaambdabel{g-h-close} {\rm d}_{GH}(B_{p}(l),B_{(0,x)}(l)) <\e_2ata \e_2and{align} holds for $k=2$ or $3$, where $(0,x)$ is the vertex in $\mathbb{R}^{n-k}\tauimes C(X)$, then \baregin{align} {\rm d}_{GH}(B_{p}(1),B_0(1)) <\e_2apsilonsilon. \e_2and{align} \e_2and{prop} To prove Proposition \ref{epsilon-regularity-1}, it suffices to prove that ${\rm vol} (B_{p}(1))$ is close to ${\rm vol}(B_0(1))$ according to Corollary \ref{hausddorf-closed}. The latter is equivalent to show that ${\rm vol}(B_0(1))$ is close to ${\rm vol}(B_{o,x}(1))$ by Remark \ref{volume-covergerce-4}. Thus we shall estimate the volume of section $X$. In the following, we will use the idea in [CCT] to turn into estimating volume of a pre-image of $X$ by constructing a Gromov-Hausdorff approximation. Let $h_i$ $(i=1,...,n-k)$ be $(n-k)$ $f$-harmonic functions on $B_p(5)$ with appropriate boundary values as constructed in the proof of Splitting Theorem \ref{splitting-theorem} (cf. Proposition \ref{proof-splitting}) and $h$ an approximation of ${\mathfrak f}rac{r^2}{2}$ as constructed in the proof of Metric Cone Theorem \ref{existence-metric-cone} (also Lemma \ref{harmonic-estimate-annual-1}, Lemma \ref{harmonic-estimate-annual-2}), which is a solution of \baregin{align} \nuonumber {\mathfrak D}elta^fh=n, ~\tauext{in}~B_p(5),~h|_{\partialartialrtial(B_p(5))}={\mathfrak f}rac{25}{2}. \e_2and{align} Let \baregin{align} \nuonumber w_0=2h-\Sigmaigmagma h_j^2. \e_2and{align} Define $w$ to be a solution of \baregin{align} \nuonumber {\mathfrak D}elta^fw=2k, ~ w|\partialartialrtial B_p(4)=w_0. \e_2and{align} Then $w$ is almost positive, so it can be transformed to be positive by adding a small number. Set \baregin{align} \nuonumber \mathbf{u}^2=w+{\mathfrak P}si>0. \e_2and{align} We recall some estimates for functions $h_i$, $h$ and $w$: \baregin{align}\lambdaambdabel{orthogonal-5} &{\mathfrak f}rac{1}{{\rm vol}(B_p(3))}\sqrt{-1}nt_{B_p(3)}\Sigmaigmagma_i|\tauext{hess }h_i|^2+\Sigmaigmagma_{i\nueq j}|\lambdaambdangle\nuablabla h_i,\nuablabla h_j\ranglegle|\nuotag\\ &+{\mathfrak f}rac{1}{{\rm vol}(B_p(3))}\sqrt{-1}nt_{B_p(3)}\Sigmaigmagma_i(|\nuablabla h_i|-1)^2 <{\mathfrak P}si,\e_2and{align} \baregin{align}{\mathfrak f}rac{1}{{\rm vol}(B_p(3))}\sqrt{-1}nt_{B_p(3)}(|\nuablabla h-\nuablabla r|^2+|\tauext{hess }h-g|^2) <{\mathfrak P}si, \e_2and{align} \baregin{align}\lambdaambdabel{hessian-small-4} {\mathfrak f}rac{1}{{\rm vol}(B_p(3))}\sqrt{-1}nt_{B_p(3)}|\tauext{hess }{w_0}-\tauext{hess }w|^2 <{\mathfrak P}si, \e_2and{align} and \baregin{align} {\mathfrak f}rac{1}{{\rm vol}(B_p(3))}\sqrt{-1}nt_{B_p(3)}|\nuablabla w_0-\nuablabla w|^2 <{\mathfrak P}si. \e_2and{align} The first two estimates are proved in Section 2 (cf. Lemma \ref{harmonic-estimate}, Lemma \ref{harmonic-estimate-annual-1}, Lemma \ref{harmonic-estimate-annual-2}). We note that the condition (\ref{cone-volume-condition-2}) in both Lemma \ref{harmonic-estimate-annual-1} and Lemma \ref{harmonic-estimate-annual-2} is satisfied by (\ref{g-h-close}) according to (\ref{volume-cone}) in Remark \ref{volume-covergerce-4}. The others can also be obtained in a similar way. We define maps ${\mathfrak P}hi$ and ${\mathfrak G}ammamma$ respectively by \baregin{align} {\mathfrak P}hi=(h_j):B_p(4)\vskip .1cmngrightarrow \mathbb{R}^{n-k}\nuotag \e_2and{align} and \baregin{align}{\mathfrak G}ammamma=(h_j,\mathbf{u}):B_p(4)\vskip .1cmngrightarrow \mathbb{R}^{n-k+1}.\nuotag \e_2and{align} Let \baregin{align} \nuonumber V_{{\mathfrak P}hi,u}(z)={\rm vol}({\mathfrak P}hi^{-1}(z)\cap U_u),\e_2and{align} where $U_u={\mathfrak G}ammamma^{-1}(B_0^{n-k}(1)\tauimes[0,u])$ for $u\lambdaeq 2$. Then \baregin{lem} \baregin{align}\lambdaambdabel{average-volume-3} {\mathfrak f}rac{1}{{\rm vol}(B_0^{n-k}(1))}\sqrt{-1}nt_{B_0^{n-k}(1)}|V_{{\mathfrak P}hi,u}(z)-{\mathfrak f}rac{u^{k}}{k}{\rm vol}(X)|<{\mathfrak P}si. \e_2and{align} \e_2and{lem} \baregin{proof} Set \baregin{align} \nuonumber v_{\mathfrak P}hi=\nuablabla h_1\wedge...\wedge\nuablabla h_{n-k}. \e_2and{align} Then $v_{\mathfrak P}hi$ is the Jacobian of ${\mathfrak P}hi$ in $B_0^{n-k}(1)$. By (\ref{orthogonal-5}), one can show that it is almost $1$ almost everywhere in $B_0^{n-k}(1)$. In fact, the proof is the same to one of (\ref{orthorgonal-4}). Hence by the coarea formula, we get \baregin{align}\lambdaambdabel{level-volume-2} & {\mathfrak f}rac{1}{{\rm vol}(B_0^{n-k}(1))} \sqrt{-1}nt_{B_0^{n-k}(1)}V_{{\mathfrak P}hi,u}(z)\nuotag\\ & ={\mathfrak f}rac{1}{{\rm vol}(B_0^{n-k}(1))}\sqrt{-1}nt_{U_u}|v_{\mathfrak P}hi|={\mathfrak f}rac{{\rm vol}(U_u)}{{\rm vol}(B_0^{n-k}(1))}+{\mathfrak P}si. \e_2and{align} To compute the variation of $V_{{\mathfrak P}hi,u}(z)$, we modify $V_{{\mathfrak P}hi,u}(z)$ to \baregin{align} J_{{\mathfrak P}hi,u,\deltaelta}=\sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)}\chi_\e_2apsilonsilon(|v_{\mathfrak P}hi|^2)\partialsi_{u,\deltaelta},\nuotag \e_2and{align} where $\partialsi_{\deltaelta,u}=\xi( \mathbf{u}^2)$ with a cut-off function $\xi$ which satisfies $$\xi(t)=1, \tauext{ for } t\sqrt{-1}n[0,((1-2\deltaelta)u)^2],$$ $$\xi(t)=0 \tauext{ for } t\sqrt{-1}n[((1-\deltaelta)u)^2,u^2], $$ and $\chi_\e_2apsilonsilon(t)$ is another cut-off function which satisfies $$\chi_\e_2apsilonsilon(t)=0, \tauext{ for } t\sqrt{-1}n[0,\e_2apsilonsilon],$$ $$\chi_\e_2apsilonsilon(t)=(1-\e_2apsilonsilon)t, \tauext{ for } t\sqrt{-1}n[2\e_2apsilonsilon,1-\e_2apsilonsilon],$$ $$\chi_\e_2apsilonsilon(t)=1, \tauext{ for } t\mathfrak geq 1,$$ $$|\chi_\e_2apsilonsilon'(t)|\lambdaeq 3.$$ A direct computation shows that \baregin{align}\lambdaambdabel{hessian-5} \nuonumber {\mathfrak f}rac{\partialartialrtial J_{{\mathfrak P}hi,u,\deltaelta}}{\partialartialrtial z_j}=\sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)\cap U_u}\chi_\e_2apsilonsilon'(|v_{\mathfrak P}hi|^2)\sigmagmaum_i a_{i,j}\nuablabla h_i(|v_{\mathfrak P}hi|^2)\partialsi_{u,\deltaelta},\\ \nuonumber +\sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)\cap U_u}\chi_\e_2apsilonsilon(|v_{\mathfrak P}hi|^2)\sigmagmaum_i a_{i,j}tr(\widehat{\tauext{hess } h_i})\partialsi_{u,\deltaelta},\\ +\sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)\cap U_u}\chi_\e_2apsilonsilon(|v_{\mathfrak P}hi|^2)\sigmagmaum_i a_{i,j}\lambdaambdangle\nuablabla \partialsi_\deltaelta,\nuablabla h_i\ranglegle. \e_2and{align} Here $a_{i,j}$ is the inverse of $\lambdaambdangle\nuablabla h_i,\nuablabla h_j\ranglegle$ so that ${\mathfrak P}hi_*(\sigmagmaum_i a_{i,j}\nuablabla h_i)={\mathfrak f}rac{\partialartialrtial}{\partialartialrtial z_j}$, and $tr(\widehat{\tauext{Hess }h_i})$ denotes the trace restricted to ${\mathfrak P}hi^{-1}(z)$. Using the coarea formula the integrations of the first two terms at the right side of (\ref{hessian-5}) in $B_0^{n-k}(1)$ can be controlled by the Hessian estimate in (\ref{orthogonal-5}). Moreover, similar to (\ref{orthorgonal-4}), by (\ref{orthogonal-5}) and (\ref{hessian-small-4}), one can show, \baregin{align} {\mathfrak f}rac{1}{{\rm vol}(B_p(1))}\sqrt{-1}nt_{B_p(1)}|\lambdaambdangle\nuablabla \mathbf{u}^2,\nuablabla h_j\ranglegle|<{\mathfrak P}si.\nuotag \e_2and{align} Thus the integration of the third term at the right side of (\ref{hessian-5}) in $B^{n-k}(1)$ is also small. Hence we get \baregin{align} {\mathfrak f}rac{1}{{\rm vol}(B_0^{n-k}(1))}\sqrt{-1}nt_{B_0^{n-k}(1)}|\nuablabla J_{{\mathfrak P}hi,u,\deltaelta}|< {\mathfrak P}si.\nuotag \e_2and{align} On the other hand, by (\ref{volume-cone}), it is easy to see \baregin{align} \nuonumber |{\mathfrak f}rac{{\rm vol}(U_u)}{{\rm vol}(B_0^{n-k}(1))}-{\mathfrak f}rac{u^k}{k}{\rm vol}(X)|<{\mathfrak P}si. \e_2and{align} Therefore, we derive (\ref{average-volume-3}) from (\ref{level-volume-2}), \e_2and{proof} Similar to (\ref{average-volume-3}), by using the above argument to the map ${\mathfrak G}ammamma$, one can also obtain the following estimate, \baregin{align}\lambdaambdabel{average-volume-4} {\mathfrak f}rac{1}{{\rm vol}(B^{n-k}(1))\tauimes [0,1])}\sqrt{-1}nt_{B^{n-k}(1)\tauimes [0,1]} |V_{\mathfrak G}ammamma (z,u)-u^{k-1}{\rm vol}(X) |<{\mathfrak P}si, \e_2and{align} where $V_{\mathfrak G}ammamma (z,u)={\rm vol}({\mathfrak G}ammamma^{-1}(z,u)).$ A similar proof can be also found in Theorem 2.63 in [CCT], so we omit it. Thus we see \baregin{lem}\lambdaambdabel{level-volume} There exists a subset of $D_{\e_2apsilonsilon,l}\sigmagmaubseteq B^{n-k}(1)\tauimes[0,1]$ which depending only on $\e_2apsilonsilon, l$ such that \baregin{align} {\rm vol}(D_{\e_2apsilonsilon,l}) >(1-{\mathfrak P}si){\rm vol}(B^{n-k}(1)\tauimes[0,1])\e_2and{align} and \baregin{align}\lambdaambdabel{cone-volume-3} |V_{\mathfrak G}ammamma (z,u)-u^{k-1}{\rm vol}(X)|<{\mathfrak P}si,~{\mathfrak f}orall~(z,u)\sqrt{-1}n D_{\e_2apsilonsilon,l}. \e_2and{align} \e_2and{lem} Next, we use the Bochner identity in terms of Bakry-Emery Ricci curvature to estimate the second fundamental forms of pre-image of ${\mathfrak P}hi,{\mathfrak G}ammamma$. Let $v_1,v_2,...,v_m$ be $m$ smooth vector fields. Put $v=v_1\wedge v_2\wedge...\wedge v_m$. We compute \baregin{align} \nuonumber &{\mathfrak D}elta^f|v|^2=2\lambdaambdangle{\mathfrak D}elta^fv,v\ranglegle+2|\nuablabla v|^2 \e_2and{align} and \baregin{align} \nuonumber &{\mathfrak D}elta^f(|v|^2+\e_2ata)^{\mathfrak f}rac{1}{2}=(|v|^2+\e_2ata)^{-{\mathfrak f}rac{1}{2}}(|\nuablabla v|^2 -{\mathfrak f}rac{\lambdaambdangle\nuablabla v,v\ranglegle^2}{|v|^2+\e_2ata})\\ &+(|v|^2+\e_2ata)^{-{\mathfrak f}rac{1}{2}}\lambdaambdangle{\mathfrak D}elta^fv,v\ranglegle,~{\mathfrak f}orall~\e_2ata>0.\nuotag \e_2and{align} It follows, \baregin{align}\lambdaambdabel{second-form-related} \nuonumber &(|v|^2+\e_2ata)^{-{\mathfrak f}rac{1}{2}}|\partiali(\nuablabla v)|^2\\ &\lambdaeq-{\mathfrak f}rac{|v|}{(|v|^2+\e_2ata)^{{\mathfrak f}rac{1}{2}}}(I-\partiali){\mathfrak D}elta^f v+{\mathfrak D}elta^f(|v|^2+\e_2ata)^{{\mathfrak f}rac{1}{2}}, \e_2and{align} where $\partiali:\wedge^m\tauext{TM}\tauo v^{\partialerp} $ is the compliment of orthogonal projection to $v$. On the other hand, if we choose $v_i=\nuablabla l_i$ and take map $F=(l_1,...l_m)$ and $v=v_F$, then \baregin{align}\lambdaambdabel{second-estimate-1} |v_F||{\mathfrak P}i_{F^{-1}(c)}|^2\lambdaeq |v_F|^{-1}|\partiali(\nuablabla v_F)|^2, \e_2and{align} where ${\mathfrak P}i_{F^{-1}(c)}$ denote the second fundamental form of the level set $F^{-1}(c)$ in $M$. Hence the quantity $(I-\partiali){\mathfrak D}elta^f v_F$ in (\ref{second-form-related}) gives us an estimate for the second fundamental form of map $F$. To estimate $(I-\partiali){\mathfrak D}elta^f v_F,$ we use the following formula, \baregin{align} \nuonumber&{\mathfrak D}elta^f \nuablabla l_i =\nuablabla {\mathfrak D}elta^f l_i+{\rm Ric}^f(\nuablabla l_i,\cdot). \e_2and{align} Note that in our case ${\mathfrak D}elta^f l_i$ is constant for map $F={\mathfrak P}hi$ or $F={\mathfrak G}ammamma=({\mathfrak P}hi,\mathbf{u}^2)$. Then it is easy to see \baregin{align}\lambdaambdabel{second-estimate-2} & (I-\partiali){\mathfrak D}elta^fv_F\nuotag\\ &=2(I-\partiali)(\Sigmaigmagma_{j_1<j_2}\nuablabla l_1\wedge...\wedge\nuablabla_{e_s}\nuablabla l_{j_1}\wedge... \wedge\nuablabla_{e_s}\nuablabla l_{j_2}\wedge...\nuablabla l_m)\nuotag\\ & +{\rm tr}({\rm Ric}^f), \e_2and{align} where ${\rm tr}({\rm Ric}^f)$ is the trace over the space spanning by $\nuablabla l_i$. \baregin{lem}\lambdaambdabel{small-second-form} There exists a subset $E_{\e_2apsilonsilon,l}\sigmagmaubseteq B^{n-k}(1)\tauimes[0,1]$, which depends only on $\e_2apsilonsilon, l$ and satisfies \baregin{align} {\rm vol}(E_{\e_2apsilonsilon,l})\mathfrak geq(1-{\mathfrak P}si){\rm vol}(B^{n-k}(1)\tauimes[0,1]), \e_2and{align} such that for any ($z,u)\sqrt{-1}n E_{\e_2apsilonsilon,l}$ it holds \baregin{align}\lambdaambdabel{second-flat} {\mathfrak f}rac{1}{V_{{\mathfrak P}hi,u}(z) }\sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)\cap U_u}|{\mathfrak P}i_{{\mathfrak P}hi^{-1}_z}|^2 <{\mathfrak P}si, \e_2and{align} \baregin{align}\lambdaambdabel{second-form-1} {\mathfrak f}rac{1}{ V_{\mathfrak G}ammamma (z,u) }\sqrt{-1}nt_{{\mathfrak G}ammamma^{-1} (z,u)}|{\mathfrak P}i_{{\mathfrak P}hi^{-1}_z}|^2 <{\mathfrak P}si, \e_2and{align} \baregin{align}\lambdaambdabel{second-form-2} {\mathfrak f}rac{1}{ V_{\mathfrak G}ammamma (z,u) } \sqrt{-1}nt_{{\mathfrak G}ammamma^{-1} (z,u)}|{\mathfrak P}i_{{\mathfrak G}ammamma^{-1}(z,u)}-u^{-1}g_{{\mathfrak G}ammamma^{-1}(z,u)}\Omegaegaegatimes\nuablabla u|^2 <{\mathfrak P}si. \e_2and{align} \e_2and{lem} \baregin{proof} Let $\partialhi$ be a cut-off function with support in $B_p(3)$ as constructed in Lemma \ref{cut-off}. Note that $v_{\mathfrak P}hi$ is almost $1$ almost everywhere in $U_u$ by the Hessian estimates in (\ref{orthogonal-5}). Then by (\ref{second-estimate-2}), we have \baregin{align} \nuonumber &\sqrt{-1}nt_{U_u} (|v|^2+\e_2ata)^{-{\mathfrak f}rac{1}{2}}|\partiali(\nuablabla v)|^2 e^{-f}d\tauext{v}\nuotag\\ &\lambdaeq \sqrt{-1}nt_{B_p(3)}|v_{\mathfrak P}hi|| (I-\partiali){\mathfrak D}elta^fv_{\mathfrak P}hi|e^{-f}d\tauext{v}+\sqrt{-1}nt_{B_p(3)}\partialhi{\mathfrak D}elta^f((|v_{\mathfrak P}hi|^2+\e_2ata)^{{\mathfrak f}rac{1}{2}}-1) e^{-f}d\tauext{v}\nuotag \\ & < {\mathfrak P}si+\sqrt{-1}nt_{B_p(3)}|{\mathfrak D}elta^f\partialhi||(|v_{\mathfrak P}hi|^2+\e_2ata)^{{\mathfrak f}rac{1}{2}}-1|e^{-f}d\tauext{v}.\nuotag \e_2and{align} By (\ref{second-estimate-1}), it follows \baregin{align}\lambdaambdabel{curvature-inequality-1} &\sqrt{-1}nt_{U_u}|v_{\mathfrak P}hi||{\mathfrak P}i_{{\mathfrak P}hi^{-1}(z)}|^2e^{-f}d\tauext{v}\nuotag\\ &\lambdae \lambdaim_{\e_2ata\rightarrowghtarrow 0}\sqrt{-1}nt_{U_u} (|v|^2+\e_2ata)^{-{\mathfrak f}rac{1}{2}}|\partiali(\nuablabla v)|^2 e^{-f}d\tauext{v} < {\mathfrak P}si. \e_2and{align} On the other hand, by the coarea formula, we have \baregin{align} \nuonumber \sqrt{-1}nt_{B^{n-k}(1)}\sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)\cap U_u}|{\mathfrak P}i_{{\mathfrak P}hi^{-1}(z)}|^2e^{-f}d\tauext{v} =\sqrt{-1}nt_{U_u}|v_{\mathfrak P}hi||{\mathfrak P}i_{{\mathfrak P}hi^{-1}(z)}|^2e^{-f}d\tauext{v}. \e_2and{align} Thus (\ref{second-flat}) follows from (\ref{curvature-inequality-1}) immediately. Again by the coarea formula we get (\ref{second-form-1}) from (\ref{second-flat}). (\ref{second-form-2}) can be also obtained by using the same argument above to the map ${\mathfrak G}ammamma$ (cf. Theorem 3.7 in [CCT]). \e_2and{proof} \baregin{proof}[Completion of Proof of Proposition \ref{epsilon-regularity-1}] We will finish the proof of Proposition \ref{epsilon-regularity-1} by applying the Gauss-Bonnet formula to an appropriate level set of ${\mathfrak G}ammamma$. In case $k=2$, by Lemma \ref{level-volume}, we see that there exists $(z,u)$ ($u$ is close to $1$) such that \baregin{align} \nuonumber |2\partiali t-{\mathfrak f}rac{1}{u} V_{\mathfrak G}ammamma (z,u)|<{\mathfrak P}si,\nuotag \e_2and{align} where $t$ is the radius of $X$. Note that $X$ is a circle here. On the other hand, applying the Guass-Bonnet formulam to ${\mathfrak P}hi^{-1}(z)\cap U_u$, we have \baregin{align} \nuonumber \sqrt{-1}nt_{{\mathfrak G}ammamma^{-1} (z,u)}H+\sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)\cap U_u} K=2\partiali\chi( {\mathfrak P}hi^{-1}(z)\cap U_u ), \e_2and{align} where $K$ and $H$ are Gauss curvature and mean curvature of ${\mathfrak P}hi^{-1}(z)\cap U_u$ and ${\mathfrak G}ammamma^{-1} (z,u)$, respectively. By (\ref{second-flat}) and (\ref{curvature-int}) together with the Gauss-Coddazzi equation, we see that \baregin{align} \nuonumber |\sqrt{-1}nt_{ {\mathfrak P}hi^{-1}(z)\cap U_u } K| <{\mathfrak P}si. \e_2and{align} Also we get from (\ref{second-form-2}), \baregin{align} \nuonumber |\sqrt{-1}nt_{{\mathfrak G}ammamma^{-1} (z,u)}H -{\mathfrak f}rac{1}{u}V_{\mathfrak G}ammamma (z,u)|<{\mathfrak P}si. \e_2and{align} Thus $t$ is close to $\chi( {\mathfrak P}hi^{-1}(z)\cap U_u )$ which is an integer. The non-collapsing condition implies that $\chi( {\mathfrak P}hi^{-1}(z)\cap U_u )$ is not zero. So $t > 1-{\mathfrak P}si$. As a consequence, the volume of ball $B(1)\sigmagmaubset \mathbb{R}^{n-1}\tauimes C(X)$ is close to one of a unit flat ball. Hence by Remark \ref{volume-covergerce-4}, we see that ${\rm vol}(B_p(1))$ is close to ${\rm vol}(B_0(1))$. Therefore, we prove that $B_p(1)$ is close to $B_0(1)$ by Corollary \ref{hausddorf-closed}. In case $k=3$, we see that there exists $(z,u)$ in Lemma \ref{level-volume} such that \baregin{align}\lambdaambdabel{area-2} | V_{\mathfrak G}ammamma (z,u)-{\rm vol} (X)| <{\mathfrak P}si, \e_2and{align} as $u$ is close to 1. On the other hand, by the Gauss-Bonnet formula, we have \baregin{align}\lambdaambdabel{gauss-bonnet} \sqrt{-1}nt_{ {\mathfrak G}ammamma^{-1} (z,u)}K=2\partiali\chi( {\mathfrak G}ammamma^{-1} (z,u)). \e_2and{align} Since by (\ref{second-form-1}) and (\ref{curvature-int}) together with the Gauss-Coddazzi equation, \baregin{align}\lambdaambdabel{curvature-int-sub} \sqrt{-1}nt_{ {\mathfrak G}ammamma^{-1} (z,u)}|R_{{\mathfrak P}hi^{-1}(z)}|< {\mathfrak P}si,\nuotag \e_2and{align} where $R_{{\mathfrak P}hi^{-1}(z)}$ is the curvature tensor of the submanifold ${\mathfrak P}hi^{-1}(z)$, (\ref{second-form-2}) implies that \baregin{align} \nuonumber |\sqrt{-1}nt_{ {\mathfrak G}ammamma^{-1} (z,u)}K-V_{\mathfrak G}ammamma (z,u)| <{\mathfrak P}si. \e_2and{align} By (\ref{gauss-bonnet}), it follows $$ V_{\mathfrak G}ammamma (z,u) > 4\partiali-{\mathfrak P}si,$$ since the Euler number is even. Thus by (\ref{area-2}), we get $${\rm vol}(B_p(1))> {\rm vol}(B(1))-{\mathfrak P}si > {\rm vol}(B_0(1))-{\mathfrak P}si.$$ As a consequence, the volume $B_p(1)$ is close to one of $B_0(1)$. Therefore, we also prove that $B_p(1)$ is close to $B_0(1)$ by Corollary \ref{hausddorf-closed}. \e_2and{proof} \baregin{proof}[Proof of Theorem\ref{dimension-n-4}] First we define a distribution $|\widetilde{\rm Rm}|^p$ ($p\sqrt{-1}n [1,2])$ on $B_{p_\sqrt{-1}nfty}(2)$ by $$\sqrt{-1}nt_{B_{p_\sqrt{-1}nfty}(2)} | \widetilde{\rm Rm}|^p~ h = \Omegaegaegaverline{ \lambdaim_i}\sqrt{-1}nt_{B_{p_i}(2)}|{\rm Rm}(g_i)|^p h( {\mathfrak P}si_i(\cdot)),$$ where ${\mathfrak P}si_i: B_{p_i}(2) \tauo B_{p_\sqrt{-1}nfty}(2)$ is a sequence of Gromov-Hausdorff approximations and $h\sqrt{-1}n C^0(B_{p_\sqrt{-1}nfty}(2))$ with ${\rm supp}(h)\sigmagmaubset B_{p_\sqrt{-1}nfty}(2)$. Then $| \widetilde{\rm Rm}|^p $ induces a measure $\mu$ on $B_{p_\sqrt{-1}nfty}(2)$ by $$\mu(E)=\sigmagmaup_h \{\sqrt{-1}nt_{B_{p_\sqrt{-1}nfty}(2)} | \widetilde{\rm Rm}|^p~ h|~ 0\lambdae h\lambdae 1, ~h\sqrt{-1}n C^0(B_{p_\sqrt{-1}nfty}(2))~{\rm and} ~{\rm supp}(h)\sigmagmaubset E\},$$ where $E\sigmagmaubset B_{p_\sqrt{-1}nfty}(2)$ is any closed subset. In particular, $\mu(B_{p_\sqrt{-1}nfty}({\mathfrak f}rac{3}{2}))<\sqrt{-1}nfty$. Let $\e_2apsilonsilon$ be a small number and $\deltaelta=\deltaelta(\e_2apsilonsilon)$ the constant determined in Proposition \ref{epsilon-regularity-1}. Let $\deltaelta'= (C_0^{-1}\deltaelta(\e_2apsilonsilon))^p$, where the constant $C_0$ will be determined lately. Define a subset in $B_{p_\sqrt{-1}nfty}(2)\sigmagmaubset M_\sqrt{-1}nfty$ for $\tauhetaeta\lambdaeq\tauau$ by \baregin{align} Q(\tauhetaeta)=\{q\sqrt{-1}n B_{p_\sqrt{-1}nfty}(1)|~{\mathfrak f}rac{ \mu (B_q(s))}{\tauext{vol}(B_q(s))} \mathfrak geq\deltaelta' s^{-2p}, ~ \e_2axists ~s\lambdaeq \tauhetaeta \}. \e_2and{align} We prove \baregin{claim}\lambdaambdabel{claim-sing-set} \baregin{align} B_{p_\sqrt{-1}nfty}(1)\sigmagmaubseteq \mathcal{R}_{3\e_2apsilonsilon}^{{\rm cu}}p Q(\tauhetaeta)^{{\rm cu}}p \mathcal{S}_{n-4}. \e_2and{align} \e_2and{claim} Suppose that the claim is not true. Then there exist a point $z\barar {\sqrt{-1}n}\mathcal{R}_{3\e_2apsilonsilon}^{{\rm cu}}p Q(\tauhetaeta)^{{\rm cu}}p \mathcal{S}_{n-4}$ and a tangent cone $T_zY$ which is $\mathbb{R}^{n-k}\tauimes C(X)$ for $k=2$ or $3$ and ${\rm d}_{GH}(B_{z_\sqrt{-1}nfty}(1),B_0(1))>3\e_2apsilonsilon$, where $z_\sqrt{-1}nfty\cong z$. Thus there is a sequence $r_i$ approaching $0$ such that $(Y,{\mathfrak f}rac{d}{r_i};z)\rightarrowghtarrow T_zY$ in the Gromov-Hausdorff topology. Hence for large enough $i$, we have \baregin{align} {\rm d}_{GH}(B_z(r_i),B_0(r_i))\mathfrak geq 3\e_2apsilonsilon r_i, \nuotag\e_2and{align} \baregin{align} {\rm d}_{GH}(B_z(lr_i),B_{(0,x)}(lr_i))\lambdaeq {\mathfrak f}rac{1}{2} r_i\e_2ata, \nuotag \e_2and{align} and \baregin{align} {\mathfrak f}rac{ \mu (B_z(4r_i))}{\tauext{vol}(B_z(4r_i))} < \deltaelta' (4r_i)^{-2p},\nuotag \e_2and{align} where $\e_2ata=\e_2ata(\e_2apsilonsilon)<<1$ and $l=l(\e_2apsilonsilon)>>1$ are both determined in Proposition \ref{epsilon-regularity-1}. For fixed $i$ in the above inequalities, we take $j$ large enough and choose a point $z_j\sqrt{-1}n M_j\tauo z$ such that \baregin{align}\lambdaambdabel{equ-con-1} {\rm d}_{GH}(B_{z_j}(r_i),B_0(r_i))\mathfrak geq2\e_2apsilonsilon r_i,\e_2and{align} \baregin{align}\lambdaambdabel{l-ball-closed} {\rm d}_{GH}(B_{z_j}(lr_i),B_{(0,x)}(lr_i))\lambdaeq r_i\e_2ata, \e_2and{align} and \baregin{align}\lambdaambdabel{small-p-energy} {\mathfrak f}rac{1}{\tauext{vol}(B_{z_j}(4r_i))} \sqrt{-1}nt_{B_{z_j}(3r_i)}|{\rm Rm}(g_i)|^p < 2 \deltaelta' (4r_i)^{-2p}. \e_2and{align} By the volume comparison, we get from (\ref{small-p-energy}), \baregin{align} {\mathfrak f}rac{1}{\tauext{vol}(B_{z_j}(3r_i))} \sqrt{-1}nt_{B_{z_j}(3r_i)}|{\rm Rm}(g_i)|^p < C_0' \deltaelta' (3r_i)^{-2p},\nuotag \e_2and{align} where $C_0=C_0(A,v,\Lambdaambdambda)$. The H\"older inequality implies \baregin{align}\lambdaambdabel{curvaure-comparison} {\mathfrak f}rac{1}{\tauext{vol}(B_{z_j}(3r_i))} \sqrt{-1}nt_{B_{z_j}(3r_i)}|{\rm Rm}(g_i)| < C_0 ( \deltaelta' )^{{\mathfrak f}rac{1}{p}}(3r_i)^{-2}=\deltaelta(\e_2apsilonsilon) (3r_i)^{-2}. \e_2and{align} Thus applying Proposition \ref{epsilon-regularity-1} to the manifold $M_j$ with rescaling metric ${\mathfrak f}rac{g_i}{ r_i}$ together with conditions (\ref{equ-con-1}), (\ref{l-ball-closed}) and (\ref{curvaure-comparison} ), we obtain \baregin{align}{\rm d}_{GH}(B_{z_j}(r_i),B_0(r_i))\lambdaeq\e_2apsilonsilon r_i.\nuotag \e_2and{align} But this is impossible by (\ref{equ-con-1}). The claim is proved. By Claim \ref{claim-sing-set}, $B_{p_\sqrt{-1}nfty}(1)\sigmagmaetminus \mathcal{R}_{2\e_2apsilonsilon}\sigmagmaubseteq Q(\tauhetaeta)^{{\rm cu}}p \mathcal{S}_{n-4}$. Now we estimate $\mathcal{H}^{n-2p}(Q(\tauhetaeta))$. By Vitali covering lemma, for any $r>0$ there is a collection of disjoint balls $B_{q_j}(s_j) \sigmagmaubset Q(\tauhetaeta)$ ($s_j\lambdae r)$ such that $\barigcup B_{q_j}(5s_j)\sigmagmaupseteq Q(\tauhetaeta)$ with property $${\mathfrak f}rac{1}{{\rm vol}(B_{q_j}(s_j))} \mu(B_{q_j}(s_j)) \mathfrak geq\deltaelta' s_j^{-2p}, $$ where $q_j\sqrt{-1}n Q(\tauhetaeta)$. By the volume comparison, it follows \baregin{align} \Sigmaigmagma s_j^{n-2p}\lambdaeq {\mathfrak f}rac{c(\Lambdaambdambda,v,A) \mu(B_{p_\sqrt{-1}nfty}({\mathfrak f}rac{3}{2}))}{\deltaelta}. \e_2and{align} Taking $ r\tauo 0$, we get \baregin{align} \mathcal{H}^{n-2p}(Q(\tauhetaeta))<\sqrt{-1}nfty.\nuotag \e_2and{align} Hence (\ref{p-les-2})and (\ref{p=2}) follows from the above estimate immediately. \e_2and{proof} \vskip3mm \sigmagmaection{Structure of singular set II: Case of K\"ahler metrics} In this section, we study the limit space of a sequence of K\"ahler metrics arising from solutions of certain complex Monge-Amp\`ere equations for the existence of K\"ahler-Ricci soliton on a Fano manifold via the continuity method [TZ1], [TZ2]. We assume that $(M,g)$ is a compact K\"ahler manifold with positive first Chern class $c_1(M)>0$ (namely, $M$ is Fano), and $\Omegaegaegamega_g$ is the K\"ahler form of $g$ in $2\partiali c_1(M)$. Then there exists a Ricci potential $h$ of the metric $g$ such that \baregin{align}{\rm Ric}(g)-\Omegaegaegamega_g=\sigmagmaqrt{-1}\partialartialrtial\Omegaegaegaverline\partialartialrtial h,~ \sqrt{-1}nt_M e^h\Omegaegaegamega_g^n=\sqrt{-1}nt_M\Omegaegaegamega^n=V.\nuotag \e_2and{align} In [TZ1], Tian and Zhu considered a family of complex Monge-Amp\`ere equations for K\"ahler potentials $\partialhi$ on $M$, \baregin{align}\lambdaambdabel{ma-equ} \tauext{det}(g_{i\Omegaegaegaverline j}+\partialhi_{i\Omegaegaegaverline j})=\tauext{det}(g_{i\Omegaegaegaverline j})e^{h-\tauhetaeta_X-X(\partialhi)-t\partialhi}, \e_2and{align} where $t\sqrt{-1}n [0,1]$ is a parameter and $\tauhetaeta_X$ is a real valued potential of a reductive holomorphic vector field on $M$ which is defined \baregin{align}\barar{\partialartialrtial}\tauhetaeta_X=i_X\Omegaegaegamega_{g}, ~ \sqrt{-1}nt_M e^{\tauhetaeta_X}\Omegaegaegamega_g^n=V,\nuotag \e_2and{align} according to the choice of $g$ with $K_X$-invariant. The equations (\ref{ma-equ}) are equal to \baregin{align}\lambdaambdabel{ricci-equ} {\rm Ric}(\Omegaegaegamega_{\partialhi})-L_X\Omegaegaegamega_{\partialhi}=t\Omegaegaegamega_{\partialhi}+(1-t)\Omegaegaegamega_g. \e_2and{align} Thus $\Omegaegaegamega_{\partialhi}$ will define a K\"ahler-Ricci soliton if $\partialhi$ is a solution of (\ref{ma-equ}) at $t=1$. It was proved the set $I$ of $t$ for which (\ref{ma-equ}) is solvable is open [TZ1]. In the other words, there exists $T\lambdae 1$ such that $I=[0,T)$. (\ref{ricci-equ}) implies \baregin{align}\lambdaambdabel{curvature-condition-kahler} {\rm Ric}(\Omegaegaegamega_{\partialhi})+\sigmagmaqrt{-1}\partialartialrtial\Omegaegaegaverline\partialartialrtial(-\tauhetaeta_X(\partialhi))\mathfrak ge t\Omegaegaegamega_{\partialhi},\e_2and{align} where $\tauhetaeta_X(\partialhi)=\tauhetaeta_X +X(\partialhi)$ is a potential of $X$ associated to $\Omegaegaegamega_\partialhi$, which is uniformly bounded [Zh]. \baregin{lem}\lambdaambdabel{lemma-partial-theta} $|\Omegaegaegaverline\partialartialrtial(\tauhetaeta_X+X(\partialhi))|=|X|_{\Omegaegaegamega_{\partialhi}}$ and ${\mathfrak D}elta_{\Omegaegaegaverline\partialartialrtial}(\tauhetaeta_X(\partialhi))$ are both uniformly bound by $C(M,\Omegaegaegamega, X)$, where ${\mathfrak D}elta_{\Omegaegaegaverline\partialartialrtial}={\mathfrak f}rac{1}{2}{\mathfrak D}elta$ is a $\Omegaegaegaverline\partialartialrtial$-Lapalace operator associated to $\Omegaegaegamega_{\partialhi}$. \e_2and{lem} \baregin{proof} We will use the maximum principle to prove the lemma. First we recall that $\tauhetaeta_X(\partialhi)$ satisfies an identity [Fu], $${\mathfrak D}elta_{\Omegaegaegaverline\partialartialrtial}[\tauhetaeta_X(\partialhi)]+\tauhetaeta_X(\partialhi)+X(h)=0,$$ where $ h$ is a Ricci potential of K\"ahler form $\Omegaegaegamega_{\partialhi}$ at $t$. Note that $$h =\tauhetaeta_X(\partialhi)+(t-1)\partialhi$$ by (\ref{ricci-equ}). Thus $\tauhetaeta_X(\partialhi)$ satisfies \baregin{align}\lambdaambdabel{lapalace-theta} {\mathfrak D}elta_{\Omegaegaegaverline\partialartialrtial}[\tauhetaeta_X(\partialhi)]+ |\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi)|^2+\tauhetaeta_X(\partialhi)=(1-t)X(\partialhi). \e_2and{align} By the Bochner formula, one sees \baregin{align} &{\mathfrak D}elta_{\Omegaegaegaverline\partialartialrtial} (|\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi)|^2)\nuotag\\ &=|\nuablabla\Omegaegaegaverline\nuablabla\tauhetaeta_X(\partialhi)|^2+2\tauext{re}( \lambdaambdangle\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi),\Omegaegaegaverline\partialartialrtial{\mathfrak D}elta_{\Omegaegaegaverline\partialartialrtial}\tauhetaeta_X(\partialhi)\ranglegle)+{\rm Ric}(\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi),\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi))\nuotag \e_2and{align} It follows \baregin{align} &({\mathfrak D}elta_{\Omegaegaegaverline\partialartialrtial} +X)(|\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi)|^2)\nuotag\\ &=|\nuablabla\Omegaegaegaverline\nuablabla \tauhetaeta_X(\partialhi)|^2+2\tauext{re}( \lambdaambdangle\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi),\Omegaegaegaverline\partialartialrtial({\mathfrak D}elta_{\Omegaegaegaverline\partialartialrtial}\tauhetaeta_X(\partialhi)+|\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi)|^2)\ranglegle)\nuotag\\ &+ ({\rm Ric}-\nuablabla\Omegaegaegaverline\nuablabla\tauhetaeta_X(\partialhi))(\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi), \Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi)).\nuotag\e_2and{align} Thus by (\ref{lapalace-theta}), we get \baregin{align}\lambdaambdabel{lapalace-partial-theta} ({\mathfrak D}elta_{\Omegaegaegaverline\partialartialrtial} +X)(|\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi)|^2)= |\nuablabla\Omegaegaegaverline\nuablabla \tauhetaeta_X(\partialhi)|^2-t|\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi)|^2 -(1-t)|X|_{g}^2. \e_2and{align} Note that $$ |\nuablabla\Omegaegaegaverline\nuablabla \tauhetaeta_X(\partialhi)|^2\mathfrak ge {\mathfrak f}rac{({\mathfrak D}elta_{\Omegaegaegaverline\partialartialrtial}\tauhetaeta_X(\partialhi))^2}{n} \mathfrak ge {\mathfrak f}rac{(|\Omegaegaegaverline\partialartialrtial\tauhetaeta_X(\partialhi)|^2-C_1)^2}{n}, $$ where $C_1=\max_M\{|\tauhetaeta_X(\partialhi)-(1-t)X(\partialhi)|\}.$ Apply the maximum principle to $|\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi)|^2$ in (\ref{lapalace-partial-theta}), we derive at a maximal point of $|\Omegaegaegaverline\partialartialrtial \tauhetaeta_X(\partialhi)|^2$, \baregin{align} 0\mathfrak geq {\mathfrak f}rac{1}{n}(|\Omegaegaegaverline\partialartialrtial\tauhetaeta_X(\partialhi)|^2-C_1)^2 -t|\Omegaegaegaverline\partialartialrtial\tauhetaeta_X(\partialhi)|^2-C_2. \e_2and{align} Therefore, the gradient estimate of $\tauhetaeta_X(\partialhi)$ follows from the above inequality immediately. By (\ref{lapalace-theta}), we also get the $\Omegaegaegaverline\partialartialrtial$-Lapalace estimate of $\tauhetaeta_X(\partialhi)$. \e_2and{proof} By Lemma \ref{lemma-partial-theta} and Theorem \ref{dimension-k}, we prove \baregin{theo}\lambdaambdabel{thm-kahler-1} For any sequence of K\"ahler metrics $g_{t_i}$ associated to solutions $\partialhi_{t_i}$ of equations (\ref{ma-equ}) at $t=t_i\sqrt{-1}n I$, there exists a subsequence which converge to a limit metric space $Y$ in the Gromov-Hausdorff topology. Moreover, $\mathcal{S}(Y)=\mathcal{S}_{2n-2}$. In particular, the complex codimension of singularities of $Y$ is at least 1. \e_2and{theo} \baregin{proof} We suffice to verify that \baregin{align}\lambdaambdabel{volume-lower-bound}{\rm vol}_{g_t}(B_{p}(1))\mathfrak geq v>0.~{\mathfrak f}orall~p\sqrt{-1}n M.\e_2and{align} But this is just a consequence of application of Volume Comparison Theorem \ref{volume-comparison} since the diameter of $g_t$ is uniformly bounded by a result of Mabuchi [Ma]. \e_2and{proof} In a special case $t_i\tauo 1$ when $I=[0,1)$ in Theorem \ref{thm-kahler-1}, we can strengthen Theorem \ref{thm-kahler-1} as follows. \baregin{theo}\lambdaambdabel{thm-kahler-2} Let $g_{t_i}$ be a sequence of K\"ahler metrics in Theorem \ref{thm-kahler-1} with $t_i\tauo 1$. Then $\mathcal{S}(Y)=\mathcal{S}_{2n-4}$. In particular, the complex codimension of singularities of $Y$ is at least 2. \e_2and{theo} $I=[0,1)$ can be guaranteed when the modified Mabuchi $K$-energy is bounded below and $X$ is a soliton holomorphic vector field which determined by the modified Futaki-invariant [TZ2]. This can be proved following an argument by Futaki for the study of almost K\"ahler-Einstein metric under an assumption that the Mabuchi $K$-energy is bounded below on a Fano manifold [Fu]. Thus as a corollary of Theorem \ref{thm-kahler-2}, we have \baregin{cor} Suppose that the modified $K$-energy is bounded below on a Fano manifold. There exists a subsequence of weak almost K\"ahler-Ricci solitons on $M$ which converge to a limit metric space $Y$ in the Gromov-Hausdorff topology. Moreover, the complex codimension of singularities of $Y$ is at least 2. \e_2and{cor} \baregin{rem}In case that $X=0$, the modified Mabuchi $K$-energy is just the Mabuchi $K$-energy. In this case, the $K$-energy is bounded from below is equivalent to that the Fano manifold is $K$-semistable by a recent work of Li [Li]. \e_2and{rem} It is useful to introduce a more general sequence of K\"ahler metrics than one in Theorem \ref{thm-kahler-2} inspired by a recent work of Wang and Tian [WT]. \baregin{defi}\lambdaambdabel{almost-kr-soliton} We call a sequence of K\"ahler metrics $(M_i, J_i, g_i)$ weak almost K\"ahler-Ricci solitons if there are uniform constants $\Lambdaambdambda$ and $A$ such that \baregin{align} & i)~ {\rm Ric}(g_i)+ \nuablabla\Omegaegaegaverline\nuablabla f_i\mathfrak ge -\Lambdaambdambda^2 g_i,~\nuablabla\nuablabla f_i=0;\nuotag\\ &ii)~ \|\Omegaegaegaverline\partialartialrtial f_i\|_{g_i}\lambdae A;\nuotag\\ &iii) ~\lambdaim_{i\tauo\sqrt{-1}nfty}\|{\rm Ric}(g_i)-g_i+\nuablabla\Omegaegaegaverline\nuablabla f_i\|_{L^1(g_i)}= 0.\nuotag \e_2and{align} Here $f_i$ are some smooth functions and $\barar\partialartialrtial f_i$ define reductive holomorphic vector fields on Fano manifolds $(M_i,J_i)$. \e_2and{defi} \baregin{lem}\lambdaambdabel{condtion-almost-kr-soliton} Let $\{g_{t_i}\}$ be a sequence of K\"ahler metrics in Theorem \ref{thm-kahler-1} with $t_i\tauo 1$. Then $\{g_{t_i}\}$ is a sequence of weak almost K\"ahler-Ricci solitons on $M$. \e_2and{lem} \baregin{proof} By Lemma \ref{lemma-partial-theta}, it suffice to check the condition iii) in Definition \ref{almost-kr-soliton}. In fact, we have \baregin{align} &\sqrt{-1}nt_M|{\rm Ric}(\Omegaegaegamega_\partialhi)-\sigmagmaqrt{-1}\partialartialrtial\barar{\partialartialrtial}\tauhetaeta_X(\partialhi)-\Omegaegaegamega_\partialhi|\nuotag\\ &\lambdae\sqrt{-1}nt_M|{\rm Ric}(\Omegaegaegamega_\partialhi)-\sigmagmaqrt{-1}\partialartialrtial\barar{\partialartialrtial}\tauhetaeta_X(\partialhi)-t\Omegaegaegamega_\partialhi |+n(1-t){\rm vol}(M)\nuotag\\ &=\sqrt{-1}nt_M({\rm Ric}(\Omegaegaegamega_\partialhi)-\sigmagmaqrt{-1}\partialartialrtial\barar{\partialartialrtial}\tauhetaeta_X(\partialhi)-t\Omegaegaegamega_\partialhi)\wedge {\mathfrak f}rac{\Omegaegaegamega_\partialhi^{n-1}}{(n-1)!}+n(1-t){\rm vol}(M)\nuotag\\ &=2n(1-t)\tauext{Vol}(M)\tauo 0\nuotag. \e_2and{align} \e_2and{proof} We now begin to prove Theorem \ref{thm-kahler-3}. As in the proof of Theorem \ref{dimension-n-4}. We need the following $\e_2apsilonsilon$-regularity result for the tangent cone. \baregin{lem}\lambdaambdabel{epsilon-regularity} For any $\mu_0,\e_2apsilonsilon>0$, there exist small numbers $\deltaelta=\deltaelta(v,\e_2apsilonsilon,n)$, $\e_2ata=\e_2ata (v,\e_2apsilonsilon, n)$ , $\tauau=\tauau(v,\e_2apsilonsilon,n)$ and a big number $l=l(v,\e_2apsilonsilon,n)$ such that if a K\"{a}hler manifold $(M^n,g)$ satisfies \baregin{align} &i)~ {\rm Ric}^f_M(g) >-(n-1)\tauau^2 g, \nuablabla\nuablabla f=0,\nuotag\\ &ii)~ {\rm vol}_g(B_p(1))\mathfrak geq \mu_0,\nuotag\\ &iii)~ |\nuablabla f|< \tauau,\nuotag\\ &iv)~ {\mathfrak f}rac{1}{{\rm vol}(B_p(2))}\sqrt{-1}nt_{B_p(2)}|{\rm Ric}(g)+\nuablabla\Omegaegaegaverline\nuablabla f|dV_g <\deltaelta,\nuotag\\ &v)~ {\rm d}_{GH}(B_p(l),B_{(0,x)}(l))<\e_2ata,\nuotag \e_2and{align} where $B_{(0,x)}(l)$ is a $l$-radius ball in cone $\mathbb{R}^{2n-2}\tauimes C(X)$ centered at the vertex $(0,x)$ for some metric space $X$, then \baregin{align} {\rm d}_{GH}(B_p(1),B(1))<\e_2apsilonsilon. \e_2and{align} \e_2and{lem} \baregin{proof} The proof of Lemma \ref{epsilon-regularity} is a modification to one of Proposition \ref{epsilon-regularity-1}. Note that $X$ is a circle of radius $t$ in present case. It suffices to show that $t$ is close to $2\partiali$ by Lemma \ref{hausddorf-closed}. Let $ {\mathfrak P}hi=(h_1,...,h_{2n-2})$ and ${\mathfrak G}ammamma=({\mathfrak P}hi, \mathbf{u})$ be two maps constructed in Proposition \ref{epsilon-regularity-1}. By Proposition \ref{J-invariant-property} in Appendix 2, we may also assume \baregin{align}\lambdaambdabel{J-invariant} \sqrt{-1}nt_{B_{p}(3)}|\nuablabla h_{n-1+i}-\mathbf{J}\nuablabla h_{i}|^2< {\mathfrak P}si(\tauau, \e_2apsilonsilon,{\mathfrak f}rac{1}{l};v). \e_2and{align} We shall compute the differential characteristic $\widehat{c_{1,\nuablabla}}$ of tangent bundle $(TM,$ $\nuablabla)$ restricted on ${\mathfrak G}ammamma^{-1}(z,u)={\mathfrak P}hi^{-1}(z) \cap U_u$ with fixed $z$ (cf. [Ch3]), where $\nuablabla$ is the Levi-Civita connection on $TM$ and $(z,u)$ is a regular point of ${\mathfrak G}ammamma$ such that both Lemma \ref{level-volume} and Lemma \ref{small-second-form} hold. It is easy to see that by the coarea formula and the condition iv), the set \baregin{align} \nuonumber D=\{ z|~& {\mathfrak P}hi^{-1}(z)\cap U_u ~\tauext {is a regular surface in }~M~\tauext{and}\nuotag\\ &\sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)\cap U_u}|{\rm Ric}(g)+\nuablabla\Omegaegaegaverline\nuablabla f| <c\deltaelta\} \e_2and{align} has a positive volume in $\mathbb R^{2n-2}$ for some constant $c$ which depends only on $n$. For each $z\sqrt{-1}n D$, we have the estimate \baregin{align}\lambdaambdabel{small-class-1} &|\sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)\cap U_u}\tauext{Ric }(\Omegaegaegamega_g)|\nuotag\\ &\lambdae \sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)\cap U_u} |{\rm Ric}(g)+\nuablabla\Omegaegaegaverline\nuablabla f| +|\sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)\cap U_u} \sigmagmaqrt{-1} \partialartialrtial\Omegaegaegaverline\partialartialrtial f |\nuotag \\ &\lambdaeq c\deltaelta+\sqrt{-1}nt_{ {\mathfrak G}ammamma^{-1}(z,u)}|\nuablabla f|\lambdaeq c\deltaelta+{\rm vol}({\mathfrak G}ammamma^{-1}(z,u))\tauau. \e_2and{align} Since \baregin{align} \sqrt{-1}nt_{ {\mathfrak G}ammamma^{-1}(z,u)}\widehat{c_{1,\nuablabla}}=\sqrt{-1}nt_{{\mathfrak P}hi^{-1}(z)\cap U_u}\tauext{Ric }(\Omegaegaegamega_g), ~ \tauext{mod } \mathbb{Z},\nuotag \e_2and{align} we get \baregin{align}\lambdaambdabel{small-class-2} \sqrt{-1}nt_{ {\mathfrak G}ammamma^{-1}(z,u)}\widehat{c_{1,\nuablabla}}={\mathfrak P}si, ~ \tauext{mod } \mathbb{Z}. \e_2and{align} To compute the left term of (\ref{small-class-2}), we will decompose the tangent bundle $(TM,$ $\nuablabla)$ over ${\mathfrak G}ammamma^{-1}(z,u)$ as follows. By our construction of the map ${\mathfrak G}ammamma$, using the coarea formula, we may assume that \baregin{align} & i)~ \sqrt{-1}nt_{{\mathfrak G}ammamma^{-1}(z,u) }|\lambdaambdangle\nuablabla h_i,\nuablabla h_j\ranglegle-\deltaelta_{ij}|< {\mathfrak P}si, \nuotag\\ &ii)~\sqrt{-1}nt_{{\mathfrak G}ammamma^{-1}(z,u)}|\tauext{hess }h_i|< {\mathfrak P}si, \nuotag\\ &iii)~\sqrt{-1}nt_{{\mathfrak G}ammamma^{-1}(z,u) }|\lambdaambdangle\nuablabla \mathbf u^2,\nuablabla h_j\ranglegle| < {\mathfrak P}si, \nuotag\\ &iv)\sqrt{-1}nt_{ {\mathfrak G}ammamma^{-1}(z,u)}|\nuablabla\lambdaambdangle\nuablabla \mathbf u^2,\nuablabla h_j\ranglegle|< {\mathfrak P}si.\nuotag \e_2and{align} Since ${\mathfrak G}ammamma^{-1}(z,u)$ is one dimensional manifold with bounded length, the conditions i- ii) and iii-iv) imply $$|\lambdaambdangle\nuablabla h_i,\nuablabla h_j\ranglegle-\deltaelta_{ij}|~\tauext{and} ~|\lambdaambdangle\nuablabla \mathbf u^2,\nuablabla h_j\ranglegle|$$ are both small on ${\mathfrak G}ammamma^{-1}(z,u)$, respectively. Moreover, applying the coarea formula to (\ref{J-invariant}) together with the above condition ii), we also get \baregin{align} \nuonumber |\nuablabla h_{n-1+i}- \mathbf J\nuablabla h_i|< {\mathfrak P}si. \e_2and{align} Hence by using the Gram-Schmidt process, we obtain $(2n-1)$ orthogonal sections of $TM$ over ${\mathfrak G}ammamma^{-1}(z,u)$, $$e_i,\mathbf{J}(e_i)~(1\lambdaeq i\lambdaeq n-1), \mathbf{N}$$ from sections $\nuablabla h_i$ $(1\lambdaeq i\lambdaeq n-1)$, $\nuablabla \mathbf{u}$. Denote $\mathbb{E}$ to be the sub-bundle spanning by $e_i,\mathbf{J}(e_i)$ and decompose $TM$ into \baregin{align} TM=\mathbb{E}\Omegaegaegaplus\mathbb{E}^\partialerp \e_2and{align} where $\mathbb{E}^\partialerp$ is the orthogonal complement of $\mathbb{E}$. We introduce a Whitney sum connection $\nuablabla'$ on $TM$ over ${\mathfrak G}ammamma^{-1}(z,u)$ by combining two projection connections on $\mathbb{E}$ and $\mathbb{E}^\partialerp$, which are both induced by $\nuablabla$. Then by the condition ii), it is easy to show \baregin{align}\lambdaambdabel{small -connection-1} \sqrt{-1}nt_{{\mathfrak G}ammamma^{-1}(z,u)}|\nuablabla-\nuablabla'|< {\mathfrak P}si, \e_2and{align} where $\nuablabla-\nuablabla'$ is regarded as a 1-form on $\tauext{End}(TM)$. Also we can introduce another connection $\nuablabla''$ which is flat on $\mathbb{E}$. Namely, $\nuablabla''$ satisfies $$\nuablabla''(e_i)=\nuablabla''(\mathbf{J}(e_i))=0.$$ Similar to (\ref{small -connection-1}), we have \baregin{align}\lambdaambdabel{small -connection-2} \sqrt{-1}nt_{{\mathfrak G}ammamma^{-1}(z,u)}|\nuablabla''-\nuablabla'|< {\mathfrak P}si. \e_2and{align} Therefore, combining (\ref{small -connection-1}) and (\ref{small -connection-2}), we derive $$|(\widehat{c_{1,\nuablabla''}}-\widehat{c_{1,\nuablabla}})({\mathfrak G}ammamma^{-1}(z,u))|<<1.$$ On the other hand, by the flatness of $\nuablabla''$ on $\mathbb{E}$ over ${\mathfrak G}ammamma^{-1}(z,u)$, the quantity $2\partiali \widehat{c_{1,\nuablabla''}} ({\mathfrak G}ammamma^{-1}(z,u))$ is just equal to the holonomy of the connection around ${\mathfrak G}ammamma^{-1}(z,u)$ (measured by angle), \baregin{align} 2\partiali \widehat{c_{1,\nuablabla''}} ({\mathfrak G}ammamma^{-1}(z,u))=\sqrt{-1}nt_{{\mathfrak G}ammamma^{-1}(z,u)}\lambdaambdangle \nuablabla''_X \mathbf{N}, \mathbf J\mathbf N\ranglegle, \e_2and{align} where $X$ is the unit tangent vector of ${\mathfrak G}ammamma^{-1}(z,u)$. Thus by the choice of $\mathbf{N}$ together with (\ref{small -connection-1}), (\ref {small -connection-2}) and (\ref{second-form-2}), we see that the angle is close to the length of ${\mathfrak G}ammamma^{-1}(z,u)$. By (\ref{small-class-2}), it follows that ${\mathfrak f}rac{{\rm vol}({\mathfrak G}ammamma^{-1}(z,u))}{2\partiali}$ is close to zero modulo integers. Hence, the non-collapsing of $ B_{(0,x)}(1)$ implies that ${\rm vol}({\mathfrak G}ammamma^{-1}(z,u))$ is close to $2\partiali$. Consequently, we prove that $t$ is close to $2\partiali$ by (\ref{cone-volume-3}) in Lemma \ref{level-volume}. \e_2and{proof} \baregin{proof}[Proof of Theorem \ref{thm-kahler-3}] By Volume Comparison Theorem \ref {volume-comparison}, for any $r\lambdae 1$, we have $${\rm vol}_{g_i}({\rm vol}(B_p(r))\mathfrak ge \lambdaambdambda_0r^n,~{\mathfrak f}orall ~p\sqrt{-1}n ~M_i,$$ where $\lambdaambdambda_0$ depends only on the constants $\Lambdaambdambda, A, v$ in Definition \ref{almost-kr-soliton}. Thus by Gromov's compactness theorem [Gr], there exists a subsequence of $(M_i,g_i;p_i)$ which converge to a metric space $Y_\sqrt{-1}nfty$ in the pointed Gromov-Hausdorff topology. In the remaining, we show that $\mathcal{S}(Y_\sqrt{-1}nfty)=\mathcal{S}_{2n-4}$. We will use the argument by contradiction. On the contrary, for a ball $B_y(1)\sigmagmaubset Y$, by Proposition \ref{prop-even-dim} in Appendix 2, there exists a point $z\sqrt{-1}n S\cap B_y(1)\nusubseteqq S_{2n-4}$ and there exists a sequence $\{r_i\}~(r_i\tauo 0)$ such that $(Y,{\mathfrak f}rac{d}{r_i^2};z)$ converge a tangent cone $T_zY=\mathbb{R}^{2n-2}\tauimes C(X)$. This implies that exists an $\e_2apsilonsilon>0$ such that the unit metric ball $B_{z_\sqrt{-1}nfty}(1)\sigmagmaubset T_zY$ centered at $z_\sqrt{-1}nfty\cong z$ satisfies \baregin{align} {\rm d}_{GH}(B_{z_\sqrt{-1}nfty}(1),B(1))>2\e_2apsilonsilon,\e_2and{align} and for any $l>>1$ and $\e_2apsilonsilon<<1$ one can choose sufficiently large numbers $i$ and $k$ such that \baregin{align}\lambdaambdabel{assumption-thm-kahler-3} &{\rm d}_{GH}({\mathfrak h}at B_{z_k}(1),B(1))>\e_2apsilonsilon,\\ &{\rm d}_{GH}({\mathfrak h}at B_{z_k}(l), B_{(0,x)}(l))< \e_2ata\nuotag, \e_2and{align} where $z_k\sqrt{-1}n M_k\tauo z\sqrt{-1}n Y$ as $k\tauo \sqrt{-1}nfty$, and $ {\mathfrak h}at B_{z_k}(1)$ and ${\mathfrak h}at B_{z_k}(l)$ are two balls with radius $1$ and $l$ respectively in $(M_k,{\mathfrak f}rac{g_k}{r_i^2})=( M_k,{\mathfrak h}at g_k)$ . On the other hand, by using Volume Comparison Theorem \ref{volume-comparison}, for fixed $i$, we can choose large enough $k$ such that \baregin{align} {\mathfrak f}rac{r_i^2}{{\rm vol}( B_{z_k}(2r_i))}\sqrt{-1}nt_{ B_{z_k}(2r_i)}|{\rm Ric}(g_k)-g_k+\nuablabla\Omegaegaegaverline\nuablabla f_k| d\tauext{v}_{g_k}<{\mathfrak f}rac{1}{2} \deltaelta.\nuotag \e_2and{align} Since \baregin{align} {\mathfrak f}rac{r_i^2}{{\rm vol}( B_{z_k}(2r_i))}\sqrt{-1}nt_{ B_{z_k}(2r_i)}|g_k|d\tauext{v}_{g_k}\lambdaeq c(n,C)r_i^2\nuotag, \e_2and{align} we have \baregin{align} {\mathfrak f}rac{1}{{\rm vol}( {\mathfrak h}at B_{z_k}(2))}\sqrt{-1}nt_{{\mathfrak h}at B_{z_k}(2)}|{\rm Ric}({\mathfrak h}at g_k)+\nuablabla\Omegaegaegaverline\nuablabla f_k| d\tauext{v}_{{\mathfrak h}at g_k}< \deltaelta. \e_2and{align} Hence, for large $k$, $( M_k,{\mathfrak h}at g_k)$ satisfies the conditions i-v) in Lemma \ref{epsilon-regularity}, and consequently, we get $${\rm d}_{GH}({\mathfrak h}at B_{z_k}(1),B(1))< \e_2apsilonsilon,$$ which is a contradiction to (\ref{assumption-thm-kahler-3}). The theorem is proved. \e_2and{proof} Theorem \ref{thm-kahler-2} follows from Theorem \ref{thm-kahler-3} with the help of Lemma \ref{condtion-almost-kr-soliton} and the relation (\ref{volume-lower-bound}). \vskip3mm \sigmagmaection{Appendix 1} This appendix is a discussion about how to use the technique of conformal transformation from [TZh] to prove Theorem \ref {thm-kahler-1} and Theorem \ref {thm-kahler-2} in Section 6. We would like to emphasis on the different situation after the change of Ricci curvature by the conformal transformation. First, Theorem \ref {thm-kahler-1} can be proved by using the conformal technique. In fact, by the formula of Ricci curvature for conformal metric $e^{2u}g$, \baregin{align}\lambdaambdabel{conformal-curvature} &\tauext{Ric }(e^{2u}g)\nuotag\\ &=\tauext{Ric }(g)-(n-2)(\tauext{hess }u-du\Omegaegaegatimes du)+({\mathfrak D}elta u+(n-2)|\nuablabla u|^2)g, \e_2and{align} the condition $\tauext{Ric }^f_M(g)\mathfrak geq -C$ implies that Ricci curvature $\tauext{Ric }(e^{-{\mathfrak f}rac{2f}{n-2}}g)$ of conformal metric $e^{-{\mathfrak f}rac{2f}{n-2}}g$ is bounded below if both $\nuablabla f$ and ${\mathfrak D}elta f$ are bounded. Thus by Lemma \ref{lemma-partial-theta}, we see that $$\tauext{Ric }(e^{{\mathfrak f}rac{2\tauhetaeta_X(\partialhi_t)}{n-2}}g_{t})$$ is uniformly bounded below. Hence, Theorem \ref {thm-kahler-1} follows from Theorem 6.2 in [CC2] immediately. Secondly, following the proof of Theorem 5.4 in [Ch3], Lemma \ref{epsilon-regularity} with an additional condition $ \tauext{vi)}~ |{\mathfrak D}elta f|<\tauau$ can be proved by using the conformal change of the bundle metric. We note that the condition vi) can be guaranteed for the K\"ahler manifolds $(M,g_t)$ in Theorem \ref {thm-kahler-2} with blowing-up metrics. Thus by (\ref{conformal-curvature}), the Ricci curvature of blowing-up metric of $e^{{\mathfrak f}rac{2\tauhetaeta_X(\partialhi_t)}{n-2}}g_{t}$ is almost positive. For a K\"{a}hler manifold $(M,g,\mathbf{J})$, the $(1,0)$-type Hermitian connection $\nuablabla$ on the holomorphic bundle $(TM,h)$ is same as the Levi-Civita connection, where $h$ is the Hermitian metric corresponding to $g$. Then $c_{1,\nuablabla}$ of $(TM,h)$ is the same as the Ricci form of $g$. If we choose a Hermitian metric $e^{\partialsi}g$ for a smooth function $\partialsi$, then $$\tauilde{\nuablabla}=\nuablabla+\partialartialrtial \partialsi$$ is the corresponding $(1,0)$-type Hermitian connection. It follows \baregin{align}F^{\tauilde{\nuablabla}}=F^\nuablabla+d\partialartialrtial \partialsi\nuotag\e_2and{align} and \baregin{align}\lambdaambdabel{equ-conf} \sigmagmaqrt{-1}tr(F^{\tauilde{\nuablabla}})=\sigmagmaqrt{-1}tr(F^\nuablabla)-n\sigmagmaqrt{-1}\partialartialrtial\barar{\partialartialrtial}\partialsi, \e_2and{align} where $F^\nuablabla$ ($F^{\tauilde{\nuablabla}}$) denotes the curvature of the connection $\nuablabla$ ($\tauilde \nuablabla$) on $TM$. Thus by putting $\partialsi=-{\mathfrak f}rac{2\partiali}{n}f$ and using (\ref{equ-conf}), we have \baregin{align}\lambdaambdabel{modified-small-connection} \widehat{c_{1,\tauilde{\nuablabla}}}( {\mathfrak G}ammamma^{-1}(z,u) )=\sqrt{-1}nt_{ {\mathfrak G}ammamma^{-1}(z,u)}|\tauext{Ric }(\Omegaegaegamega_g)+\sigmagmaqrt{-1}\partialartialrtial\barar{\partialartialrtial}f|, ~ \tauext{mod } \mathbb{Z}, \e_2and{align} where the map ${\mathfrak G}ammamma$ is defined as in Section 5 and Section 6 for the conformal metric $\tauilde g=e^{-{\mathfrak f}rac{2f}{n-2}}g$. Thus $\widehat{c_{1,\tauilde{\nuablabla}}}({\mathfrak G}ammamma^{-1}(z,u))$ is small modulo integers. Moreover, by Theorem 3.7 in [CCT] (compared to Lemma \ref{small-second-form} in Section 5) , it holds \baregin{align}\lambdaambdabel{second-form-conformal-metric} {\mathfrak f}rac{1}{ V_{\mathfrak G}ammamma (z,u) } \sqrt{-1}nt_{{\mathfrak G}ammamma^{-1} (z,u)}|{\mathfrak P}i_{{\mathfrak G}ammamma^{-1}(z,u)}-u^{-1}\tauilde g_{{\mathfrak G}ammamma^{-1}(z,u)}\Omegaegaegatimes\nuablabla u|^2<{\mathfrak P}si. \e_2and{align} On the other hand, since the Ricci curvature of $\tauilde g$ is almost positive, for the connection $\tauilde\nuablabla$, we can follow the argument in proof of Theorem 5.4 [Ch3] to show that the quantity $2\partiali \widehat{c_{1,\tauilde\nuablabla}} ({\mathfrak G}ammamma^{-1}(z,u))$ is close to a holonomy of another perturbation connection $\tauilde\nuablabla''$ of $\tauilde\nuablabla$ around ${\mathfrak G}ammamma^{-1}(z,u)$ (also see the argument in proof of Lemma \ref{epsilon-regularity}). The late is close to $$\sqrt{-1}nt_{{\mathfrak G}ammamma^{-1} (z,u)}{\mathfrak P}i_{{\mathfrak G}ammamma^{-1}(z,u)}.$$ Thus combining (\ref{modified-small-connection}) and (\ref{second-form-conformal-metric}), we get $$ |\widehat{c_{1,\tauilde\nuablabla}}({\mathfrak G}ammamma^{-1}(z,u))-{\mathfrak f}rac{{\rm vol}({\mathfrak G}ammamma^{-1}(z,u))}{2\partiali}|<{\mathfrak P}si.$$ It follows that the diameter of section $X$ in two dimensional cone $C(X)$ with rescaled cone metric is close to $2\partiali$. Thus the Gromov-Hausdorff distance between $B_p(1)$ and $B_{(0,x)}(1)$ both with rescaled metrics is close to zero. By Theorem 9.69 in [Co3], we prove Lemma \ref{epsilon-regularity} with the additional condition vi). Theorem \ref {thm-kahler-2} follows from applying Lemma \ref{epsilon-regularity} to the sequence $\{(M,g_t)\}$ $(t\tauo 1)$ with blowing-up metrics, for details to see the proof of Theorem \ref {thm-kahler-3} in the end of Section 6. \vskip3mm \sigmagmaection{Appendix 2} In this appendix, we prove (\ref{J-invariant}) in Section 6. We need several lemmas. First, as an application of Lemma \ref{harmonic-estimate-annual-2}, we have \baregin{lem} \lambdaambdabel{almost-gradient-vector} Under the conditions of Lemma \ref{harmonic-estimate-annual-1}, for a vector field $X$ on $A_p(a,b)$ which satisfies \baregin{align}\lambdaambdabel{conditions} |X|_{C^0(A_p(a,b))}\lambdaeq D , {\mathfrak f}rac{1}{{\rm vol }^f(A_p(a,b))}\sqrt{-1}nt_{A_p(a,b)}|\nuablabla X|^2 d\tauext{v}^f< \deltaelta, \e_2and{align} there exists a $f$-harmonic function $\tauhetaeta$ defined in $A_p(a_2,b_2)$ such that \baregin{align}\lambdaambdabel{gradient-est-app} {\mathfrak f}rac{1}{{\rm vol }^f(A_p(a_2,b_2))}\sqrt{-1}nt_{A_p(a_2,b_2)}|\nuablabla\tauhetaeta-X|^2 d\tauext{v}^f < {\mathfrak P}si(\e_2apsilonsilon,\Omegaegaegamega, \deltaelta;A,a_1,b_1,a_2,a,b),\e_2and{align} and \baregin{align}&{\mathfrak f}rac{1}{{\rm vol }^fA_p(a_3,b_3)}\sqrt{-1}nt_{A_p(a_3,b_3)}|{\rm hess }~\tauhetaeta|^2 d{\rm v}^f\nuotag\\ &<{\mathfrak P}si(\e_2apsilonsilon,\Omegaegaegamega, \deltaelta;A,a_1,b_1,a_2,b_2,a_3,b_3,a,b)\lambdaambdabel{hessian-est}, \e_2and{align} where $A_p(a_3,b_3)$ is an even smaller annulus in $A_p(a_2,b_2)$. \e_2and{lem} \baregin{proof} Let $h$ be the $f$-harmonic function constructed in (\ref{f-harmonic-radial}) in Section 2 and $\tauhetaeta_1=\lambdaambdangle X,\nuablabla h\ranglegle$. Then \baregin{align} \nuonumber \nuablabla \tauhetaeta_1=\lambdaambdangle\nuablabla X, \nuablabla h\ranglegle+\lambdaambdangle X,{\rm hess }~h\ranglegle, \e_2and{align} It follows \baregin{align} \nuonumber &\sqrt{-1}nt_{A_p(a_2,b_2)}|\nuablabla\tauhetaeta_1-X|^2d\tauext{v}^f\\ &\lambdaeq 2\sqrt{-1}nt_{A_p(a_2,b_2)}(\lambdaambdangle\nuablabla X, \nuablabla h\ranglegle^2d{\rm v}^f+\lambdaambdangle X,{\rm hess }~h-g\ranglegle^2)d{\rm v}^f.\nuotag \e_2and{align} Thus by (\ref{conditions}) and Lemma \ref{harmonic-estimate-annual-2}, we get \baregin{align}\lambdaambdabel{gradient-est} {\mathfrak f}rac{1}{{\rm vol }^f(A_p(a_2,b_2))}\sqrt{-1}nt_{A_p(a_2,b_2)}|\nuablabla\tauhetaeta_1-X|^2 d{\rm v}^f < {\mathfrak P}si.\e_2and{align} Let $\tauhetaeta$ be a solution of equation, \baregin{align} {\mathfrak D}elta^f\tauhetaeta=0,~{ \rm in}~A_p(a_2,b_2), \e_2and{align} with $\tauhetaeta=\tauhetaeta_1$ ~on $\partialartialrtial A_p(a_2,b_2)$. Then \baregin{align} &\sqrt{-1}nt_{A_p(a_2,b_2)}(\lambdaambdangle \nuablabla\tauhetaeta-\nuablabla\tauhetaeta_1,X\ranglegle+(\tauhetaeta-\tauhetaeta_1){\rm div } X)d{\rm v}^f\nuotag\\ &=\sqrt{-1}nt_{A_p(a_2,b_2)}{\rm div } ((\tauhetaeta-\tauhetaeta_1)X)d{\rm v}^f= \sqrt{-1}nt_{A_p(a_2,b_2)} (\tauhetaeta-\tauhetaeta_1)\lambdaambdangle \nuablabla f, X\ranglegle d{\rm v}^f.\nuotag \e_2and{align} It follows \baregin{align}\lambdaambdabel{divergence} \sqrt{-1}nt_{A_p(a_2,b_2)}\lambdaambdangle \nuablabla\tauhetaeta-\nuablabla\tauhetaeta_1,X\ranglegle d{\rm v}^f< {\mathfrak P}si. \e_2and{align} On the other hand, since \baregin{align} \nuonumber \sqrt{-1}nt_{A_p(a_2,b_2)}\lambdaambdangle \nuablabla\tauhetaeta_1-\nuablabla \tauhetaeta, \nuablabla\tauhetaeta\ranglegle d{\rm v}^f=\sqrt{-1}nt_{A_p(a_2,b_2)}(\tauhetaeta-\tauhetaeta_1){\mathfrak D}elta^f\tauhetaeta d{\rm v}^f=0, \e_2and{align} we have \baregin{align} \sqrt{-1}nt_{A_p(a_2,b_2)} |\nuablabla \tauhetaeta|^2 d{\rm v}^f=\sqrt{-1}nt_{A_p(a_2,b_2)} \lambdaambdangle \nuablabla\tauhetaeta,\nuablabla\tauhetaeta_1\ranglegle d\tauext{v}^f.\nuotag \e_2and{align} By the H\"older inequality, we get $$\sqrt{-1}nt_{A_p(a_2,b_2)} |\nuablabla \tauhetaeta|^2 d{\rm v}^f\lambdae \sqrt{-1}nt_{A_p(a_2,b_2)} |\nuablabla \tauhetaeta_1|^2 d{\rm v}^f<C.$$ Hence, \baregin{align} \nuonumber & \sqrt{-1}nt_{A_P(a_2,b_2)}\lambdaambdangle \nuablabla \tauhetaeta-X\ranglegle^2d{\rm v}^f\\ &=\sqrt{-1}nt_{A_p(a_2,b_2)}(|\nuablabla \tauhetaeta|^2+|X|^2-2\lambdaambdangle \nuablabla\tauhetaeta ,X\ranglegle)d{\rm v}^f\nuotag\\ \nuonumber &=\sqrt{-1}nt_{A_p(a_2,b_2)}(\lambdaambdangle\nuablabla \tauhetaeta, \nuablabla\tauhetaeta_1\ranglegle+|X|^2-2\lambdaambdangle \nuablabla\tauhetaeta ,X\ranglegle)d{\rm v}^f&\\ &=\sqrt{-1}nt_{A_p(a_2,b_2)}(\lambdaambdangle \nuablabla\tauhetaeta_1-X,\nuablabla\tauhetaeta\ranglegle+\lambdaambdangle X,X-\nuablabla\tauhetaeta_1\ranglegle+\lambdaambdangle X,\nuablabla\tauhetaeta_1-\nuablabla \tauhetaeta\ranglegle)d{\rm v}^f.& \e_2and{align} Therefore, combining (\ref{conditions}) and (\ref{divergence}), we derive (\ref{gradient-est-app}) immediately. To get (\ref{hessian-est}), we choose a cut-off function which is $\partialhi$ supported in $A_p(a_2,b_2)$ with bounded gradient and $f$-Lapalace as in Lemma \ref{cut-off} in Section 1. Then by the Bochner identity, we have \baregin{align} \sqrt{-1}nt_{A_p(a_2,b_2)}{\mathfrak f}rac{1}{2}\partialhi{\mathfrak D}elta^f|\nuablabla \tauhetaeta|^2d{\rm v}^f=\sqrt{-1}nt_{A_p(a_2,b_2)}\partialhi(|{\rm hess }~\tauhetaeta|^2+{\rm Ric }(\nuablabla \tauhetaeta,\nuablabla \tauhetaeta)) d{\rm v}^f.\nuotag \e_2and{align} Since \baregin{align} \sqrt{-1}nt_{A_p(a_2,b_2)}{\mathfrak f}rac{1}{2}\partialhi{\mathfrak D}elta^f|X|^2d{\rm v}^f =-\sqrt{-1}nt_{A_p(a_2,b_2)}\lambdaambdangle\nuablabla \partialhi,\lambdaambdangle X,\nuablabla X\ranglegle\ranglegle d{\rm v}^f,\nuotag \e_2and{align} we obtain \baregin{align} \nuonumber \sqrt{-1}nt_{A_p(a_2,b_2)}\partialhi(|{\rm hess }\tauhetaeta|^2 d{\rm v}^f& < \sqrt{-1}nt_{A_p(a_2,b_2)}{\mathfrak f}rac{1}{2}\partialhi{\mathfrak D}elta^f(|\nuablabla \tauhetaeta|^2-|X|^2)d{\rm v}^f\\ &+{\mathfrak P}si(\e_2apsilonsilon,\Omegaegaegamega, \deltaelta;A,a_1,b_1,a_2,b_2,a_3,b_3,a,b).& \e_2and{align} Therefore, using integration by parts, we derive (\ref{hessian-est}) from (\ref{gradient-est-app}). \e_2and{proof} Next, we generalize Proposition \ref{proof-splitting} to the case without the assumption of the existence of an almost line. \baregin{lem}\lambdaambdabel{split-integral} Let $(M,g)$ be a Riemannian manifold which satisfies (\ref{be-curvature-condition}). Let $h^+$ be a $f$-harmonic function which satisfies \baregin{align}\lambdaambdabel{gradient-C^0-app}|\nuablabla h^+|\lambdaeq c(n,\Lambdaambdambda,A), \e_2and{align} \baregin{align} \lambdaambdabel{gradient-condition-app} {\mathfrak f}rac{1}{\rm {vol}^f (B_p(1))}|\sqrt{-1}nt_{B_p(1)}|\nuablabla h^+|^2-1| d{\rm v}^f< \deltaelta, \e_2and{align} \baregin{align}\lambdaambdabel{hessian-condition-app}{\mathfrak f}rac{1}{{\rm vol}^f (B_p(1))}\sqrt{-1}nt_{B_p(1)}|{\rm hess }~h^+|^2 d\tauext{v}^f < \deltaelta. \e_2and{align} Then there exists a ${\mathfrak P}si(\deltaelta; A,\Lambdaambdambda,n)$ Gromov-Hausdorff approximation from $B_p({\mathfrak f}rac{1}{8})$ to $B_{(0\tauimes x)}({\mathfrak f}rac{1}{8})\sigmagmaubset\mathbb{R}\tauimes X$. \e_2and{lem} The proof of Lemma \ref{split-integral} depends on the following fundamental lemma which is in fact a consequence of Theorem 16.32 and Lemma 8.17 in [Ch1]. \baregin{lem}\lambdaambdabel{level-set-function} Under the condition (\ref{be-curvature-condition}), for a $f$-harmonic function $h^+$ which satisfies (\ref{gradient-C^0-app}), (\ref{gradient-condition-app}) and (\ref{hessian-condition-app}) in $B_p(1)$, there exists a Lipschitz function $\rho$ in $B_p({\mathfrak f}rac{1}{4})$ such that $|h^+-\rho|< {\mathfrak P}si$ and \baregin{align}\lambdaambdabel{app-dis} ||\rho(z)-t|-d(z,\rho^{-1}(t))|< {\mathfrak P}si. \e_2and{align} \e_2and{lem} \baregin{proof} First, we notice that the following Poincar\'{e} inequality holds for any $C^1$-function $h$, \baregin{align}\lambdaambdabel{Poincare} &{\mathfrak f}rac{1}{{\rm vol}^f(B_p({\mathfrak f}rac{1}{2}))}\sqrt{-1}nt_{B_p({\mathfrak f}rac{1}{2})}|h-a|^2 d{\rm v}^f\nuotag\\ &\lambdaeq c(n,\Lambdaambdambda, A){\mathfrak f}rac{1}{{\rm vol}^f(B_p(1))}\sqrt{-1}nt_{B_p(1)}|\nuablabla h|^2 d{\rm v}^f, \e_2and{align} where $$a={\mathfrak f}rac{1}{{\rm vol}^f(B_p({\mathfrak f}rac{1}{2}))}\sqrt{-1}nt_{B_p({\mathfrak f}rac{1}{2})}h d{\rm v}^f.$$ This is in fact a consequence of Lemma \ref{segment-inequ} by applying the function $e$ to $|\nuablabla h|^2$, because \baregin{align} & {\mathfrak f}rac{1}{{\rm vol}^f(B_p({\mathfrak f}rac{1}{2}))} \sqrt{-1}nt_{B_p({\mathfrak f}rac{1}{2})}|h(x)-a|^2 d{\rm v}^f\nuotag\\ &= {\mathfrak f}rac{1}{{\rm vol}^f(B_p({\mathfrak f}rac{1}{2}))} \sqrt{-1}nt _{B_p({\mathfrak f}rac{1}{2})} d{\rm v}_x^f[ {\mathfrak f}rac{1}{{\rm vol}^f(B_p({\mathfrak f}rac{1}{2}))} \sqrt{-1}nt_{B_p({\mathfrak f}rac{1}{2})}(h(x)-h(y))d {\rm v}_y^f]^2 \nuotag\\ & \lambdaeq {\mathfrak f}rac{1}{{\rm vol}^f(B_p({\mathfrak f}rac{1}{2}))} \sqrt{-1}nt_{B_p({\mathfrak f}rac{1}{2})} {\mathfrak f}rac{1}{{\rm vol}^f(B_p({\mathfrak f}rac{1}{2}))} \sqrt{-1}nt_{B_p({\mathfrak f}rac{1}{2})}(h(x)-h(y))^2 d {\rm v}_x^f d{\rm v}_y^f\nuotag\\ &\lambdaeq {\mathfrak f}rac{1}{{\rm vol}^f(B_p({\mathfrak f}rac{1}{2}))} \sqrt{-1}nt_{B_p({\mathfrak f}rac{1}{2})} {\mathfrak f}rac{1}{{\rm vol}^f(B_p({\mathfrak f}rac{1}{2}))} \sqrt{-1}nt_{B_p({\mathfrak f}rac{1}{2})}\sqrt{-1}nt_0^{d(x,y)}|\nuablabla h((\mathfrak gammamma(s))|^2 d{\rm v}_x^f d {\rm v}_y^f\nuotag\\ &\lambdaeq c(n,\Lambdaambdambda, A) {\mathfrak f}rac{1}{{\rm vol}^f(B_p(1))} \sqrt{-1}nt_{B_p(1)}|\nuablabla h|^2 d{\rm v}^f.\nuotag \e_2and{align} Thus by taking $h=|\nuablabla h^+|^2$, we get from (\ref{gradient-C^0-app})-(\ref{hessian-condition-app}), \baregin{align}\lambdaambdabel{gradient-condition-2-app}{\mathfrak f}rac{1}{{\rm vol}^f (B_p({\mathfrak f}rac{1}{2}))}\sqrt{-1}nt_{B_p({\mathfrak f}rac{1}{2})}||\nuablabla h^+|^2-1| d{\rm v}^f< {\mathfrak P}si. \e_2and{align} Next we apply Theorem 16.32 in [Ch1] to $h^+$ with the condition (\ref{gradient-C^0-app}), (\ref{gradient-condition-app}) and (\ref{gradient-condition-2-app}). We suffice to check a doubling condition for the measure $d{\rm v}^f$ and an $(\e_2apsilonsilon,\deltaelta)$-inequality. The $(\e_2apsilonsilon,\deltaelta)$-inequality says, for any $\e_2apsilonsilon,\deltaelta>0$ and two points $x,y\sqrt{-1}n M$ with $d(x,y)=r$, there exist $C_{\e_2apsilonsilon,\deltaelta}$ and another two points $x', y'$ with ${\rm d}(x', x)\lambdaeq \deltaelta r $ and $d(y', y)\lambdaeq \deltaelta r $, respectively such that \baregin{align} &F_{\partialhi,\e_2apsilonsilon}(z_1',z_2')\lambdaeq {\mathfrak f}rac{C_{\e_2apsilonsilon,\deltaelta}r}{{\rm vol}^f(B_{z_1}((1+\deltaelta)(1+2\e_2apsilonsilon)r))}\sqrt{-1}nt_{B_{z_1}((1+\deltaelta)(1+2\e_2apsilonsilon)r)}\partialhi d{\rm v}^f, \e_2and{align} where $$F_{\partialhi,\e_2apsilonsilon}(x,y)=\sqrt{-1}nf\sqrt{-1}nt_0^l \partialhi(c(s))ds, ~{\mathfrak f}orall ~\partialhi(\mathfrak ge 0)\sqrt{-1}n ~C^0(M),$$ and the infinimum takes among all curves from $x$ to $y$ with length $l\lambdaeq (1+\e_2apsilonsilon){\rm d}(x,y)$. The doubling condition follows from Volume Comparison Theorem \ref{volume-comparison}, and $(\e_2apsilonsilon,\deltaelta)$-inequality follows from Volume Comparison Theorem \ref{volume-comparison} and the segment inequality in Lemma \ref{equ-seg}. Thus we can construct a Lipschitz function $\rho$ from $h^+$ such that $|h^+-\rho|\lambdaeq {\mathfrak P}si$. Moreover, by Lemma 8.17 in [Ch1], we get (\ref{app-dis}). \e_2and{proof} \baregin{proof}[Proof of Lemma \ref{split-integral}] As in the proof of Proposition \ref{proof-splitting}, we define $X=(h^+)^{-1}(0)$ and the map $u$ by \baregin{align} \nuonumber u(q)=(h^+(q),x_q), \e_2and{align} where $x_q$ is the nearest point in $X$ to $q$. To show that $u$ is a Gromov-Hausdorff approximation, we shall use Lemma \ref{cheeger-lemma}. In fact, by (\ref{app-dis}) in Lemma \ref{level-set-function}, we see \baregin{align}\lambdaambdabel{gradient-instead} ||h^+(z)-t|-{\rm d}(z,(h^+)^{-1}(t))|< {\mathfrak P}si. \e_2and{align} Then instead of (\ref{triangular-equ}) by (\ref{gradient-instead}), Lemma \ref{cheeger-lemma} is still true since (\ref{hessian-condition-app}) holds [C2]. Hence the proof in Proposition \ref{proof-splitting} works for Lemma \ref{split-integral}. \e_2and{proof} Now we begin to prove (\ref{J-invariant}) in Section 6. Let $(M,g)$ be a K\"{a}hler manifold which satisfies (\ref{condition-1-regularity}). Let $B_p(l)\sigmagmaubset M$ and $B_{(0\tauimes x)}(l)\sigmagmaubset \mathbb{R}^{2n-2}\tauimes X$ be two $l$-radius distance balls as in Section 6. Then \baregin{prop}\lambdaambdabel{J-invariant-property} Suppose that \baregin{align}\lambdaambdabel{cone-condition-app} {\rm d}_{GH}(B_p(l),B_{(0\tauimes x)}(l)) <\e_2ata.\e_2and{align} Then either $B_p({\mathfrak f}rac{1}{8})$ is close to an Euclidean ball in the Gromov-Hausdorff topology or for a suitable choice of the orthogonal coordinates in $\mathbb{R}^{2n-2}$, the map ${\mathfrak P}hi=(h_1,...,h_{2n-1})$ constructed in Section 5 satisfies \baregin{align}\lambdaambdabel{almost-complex} {\mathfrak f}rac{1}{{\rm vol}^f B_p(1)}\sqrt{-1}nt_{B_p(1)}|\nuablabla h_{n-1+i}-\mathbf{J}\nuablabla h_i|^2 d{\rm v}^f <{\mathfrak P}si(\tauau,\e_2ata,{\mathfrak f}rac{1}{l};v). \e_2and{align} \e_2and{prop} \baregin{proof} Roughly speaking, if the space spanned by $\nuablabla h_i$ is not almost $\mathbf{J}$ invariant, we can find a vector field nearly perpendicular to these $\nuablabla h_i$, and it satisfies the condition (\ref{conditions}) in Lemma \ref{almost-gradient-vector}. Then by Lemma \ref{split-integral}, $B_p(1)$ will be almost split off along a new line. This implies that $B_p({\mathfrak f}rac{1}{8})$ is close to an Euclidean ball. Let $V$ be a $(4n-4)$-dimensional line space spanned by $\nuablabla h_i,\mathbf{J}\nuablabla h_i$ with the $L^2$-inner product, $$(b_i,b_j)_{L^2}=\sqrt{-1}nt_{B_p(1)} \lambdaambdangle b_i, b_j\ranglegle d\tauext{v}.$$ Then $\mathbf{J}$ induces an complex structure on $V$ such that the inner product is $\mathbf{J}$-invariant. We introduce a distance in Grassmanian $G(2n,k)$ as follows, \baregin{align} {\rm d}(\Lambdaambdambda_1,\Lambdaambdambda_2)^2=\mathop{\sigmagmaum}_j \|{\rm pr}_{\Lambdaambdambda_2}^{\partialerp}(e_j)\|_{L^2}^2 \e_2and{align} for any two $k$-dimensional subspaces $\Lambdaambdambda_1,\Lambdaambdambda_2$ in $\mathbb{R}^{2n}$, where $e_i$ is an unit orthogonal basis of $\Lambdaambdambda_1$ and $pr_{\Lambdaambdambda_2}^{\partialerp}$ is the compliment of orthogonal projection to $\Lambdaambdambda_2$. First we suppose that $${\rm d}(W,\mathbf{J} W)^2 < {\mathfrak P}si,$$ where $W={\rm span}\{\nuablabla h_i|i=1,2,...,2n-2\}$. Then by the Gram-Schmidt process, one can find a unit orthogonal basis $w_i$ of $W$ such that $$\|\mathbf{J}w_i-w_{n-1+i}\|_{L^2} < {\mathfrak P}si.$$ It is equivalent to that there exists a matrix $a_{ij}\sqrt{-1}n GL(2n-2,\mathbb{R})$ which is nearly orthogonal such that $$w_i=\Sigmaigmagma_j a_{ij}\nuablabla h_j. $$ Thus by changing an orthogonal basis in $\mathbb{R}^{2n-2}$, (\ref{almost-complex}) will be true. Secondly, we suppose that $${\rm d}(W,\mathbf{J}W) >\deltaelta_0.$$ This implies that there exists some $j$ such that $$ \|{\rm pr}_W^\partialerp(\mathbf{J}\nuablabla h_i)\|_{L^2}=\|\mathbf{J}\nuablabla h_i-{\rm pr}_W(\mathbf{J}\nuablabla h_i) \|_{L^2}> {\mathfrak f}rac{\deltaelta_0}{2n}.$$ Let \baregin{align}\lambdaambdabel{vector} X={\mathfrak f}rac{{\rm pr}_W^\partialerp(\mathbf{J}\nuablabla h_i)}{\|{\rm pr}_W^{\partialerp}(\mathbf{J}\nuablabla h_i)\|_{L^2}} \e_2and{align} Then ${\rm pr}_W^\partialerp(\mathbf{J}\nuablabla h_i)$ is perpendicular to $W$ with $\|{\rm pr}_W^\partialerp(\mathbf{J}\nuablabla h_i) \|_{L^2}=1$ and it satisfies the condition (\ref{conditions}) in Lemma \ref{almost-gradient-vector}. Thus we see that there exists a $f$-harmonic function $\tauhetaeta$ which satisfies the conditions (\ref{gradient-C^0-app}), (\ref{gradient-condition-app}) and (\ref{hessian-condition-app}) in Lemma \ref{split-integral}. As a consequence, $B_p({\mathfrak f}rac{1}{8})$ will almost spilt off along a new line associated to the coordinate function $\tauhetaeta$. Since $X\sqrt{-1}n W^{\partialerp}$, $B_p({\mathfrak f}rac{1}{8})$ in fact split off $\mathbb{R}^{2n-1}$ almost. But the late implies that $B_p({\mathfrak f}rac{1}{8})$ is close to an Euclidean ball in the Gromov-Hausdorff topology by using a topological argument as in Theorem 6.2 in [CC2] or by the following Proposition \ref{prop-even-dim} for K\"ahler manifolds. \e_2and{proof} \baregin{prop}\lambdaambdabel{prop-even-dim} Let $Y$ be a limit space of a sequence of K\"ahler manifolds in Theorem \ref{dimension-k}. Then $$\mathcal{S}(Y)=\mathcal{S}_{2k+1}=\mathcal{S}_{2k}.$$ \e_2and{prop} \baregin{proof} We suffice to show that if a tangent cone $T_yY$ at a point $y\sqrt{-1}n Y$ can split off $\mathbb{R}^{2k+1}$, $T_yY$ can split off $\mathbb{R}^{2k+2}$. Let $h_i$ be $2k+1$ $f$-harmonic functions which approximate $2k+1$ distance functions with different directions as constructed in Section 2 and Section 3. Then as in the proof of Proposition \ref{J-invariant-property}, we consider a linear space $V=\tauext{span}\{\nuablabla h_i,\mathbf{J}\nuablabla h_i\}$ with $L^2$-inner product. Since the dimension of $W=\tauext{span}\{\nuablabla h_i\}$ is odd, we have $${\rm d}(W,\mathbf{J}W)\mathfrak geq 1.$$ Thus $T_yY$ will split off a new line. The proposition is proved. \e_2and{proof} \vskip3mm \baregin{thebibliography}{99} \nuonumber\baribitem{BE}[BE] Bakry, D. and Emery M., Diffusions hypercontractives, In S\'{e}minaire de probabilit\'{e}s, XIX, 1983/84, Lecture Notes in Math., vol. 1123, 177-206, Springer, Berlin, 1985. \nuonumber\baribitem{CC1}[CC1] Cheeger, J. and Colding, T., Lower bounds on Ricci curvature and almost rigidity of warped product, Ann. of Math. 144 (1996), 189-237. \nuonumber\baribitem{CC2}[CC2] Cheeger, J. and Colding T., On the structure of spaces with Ricci curvature bounded below I, J. Differential Geom. 45 (1997), 406-480. \nuonumber\baribitem{CC3}[CC3] Cheeger, J. and Colding, T., On the structure of spaces with Ricci curvature bounded below II, J. Differential Geom. 54 (2000), 13-35. \nuonumber\baribitem{Ch1}[Ch1] Cheeger, J., Differentiability of Lpischitz functions on metric measure spaces, GAFA Vol. 9 (1999), 428-517. \nuonumber\baribitem{Ch2}[Ch2] Cheeger, J., Degeneration of Riemannian metrics under Ricci curvature bounds, Scuola Normale Superiore, Pisa (2001). \nuonumber\baribitem{Ch3}[Ch3] Cheeger, J., Integral bound on curvature, elliptic estimates and rectifiablity of singular sets , GAFA Vol. 13 (2003), 20-72. \nuonumber\baribitem{Co1}[Co1] Colding, T., Shape of manifolds with positive Ricci curvature, Invent. Math. 124 (1996), 175-191. \nuonumber\baribitem{Co2}[Co2] Colding, T., Large manifolds with positive Ricci curvature, Invent. Math. 124 (1996), 193-214. \nuonumber\baribitem{Co3}[Co3] Colding, T., Ricci curvature and the volume convergence, Ann. of Math. 145 (1997), 477-504. \nuonumber\baribitem{CCT}[CCT] Cheeger, J., Colding, T. and Tian, G., On the singularities of spaces with bounded Ricci curvature, GAFA Vol. 12 (2002), 873-914. \nuonumber\baribitem{Fu}[Fu] Futaki, A., K\"ahler-Einstein metrics and integral invariants, Lecture notes in Math, vol. 1314 (1988), Springer-Verlag, Berlin, New-York. \nuonumber\baribitem{Gr}[Gr] Gromov, M., Structures m\'{e}triques pour les vari\'{e}t\'{e}s riemanniennes. Edited by J. Lafontaine and P. Pansu. Textes Math\'{e}matiques, 1. CEDIC, Paris, 1981. \nuonumber\baribitem{Ha}[Ha] Hamilton, R. S., The formation of singularities in the Ricci flow, Surv. Diff. Geom., vol. 2 (1995), 7-136, International Press. \nuonumber\baribitem{JWZ}[JWZ] Jiang, W., Wang, F. and Zhu, X., Bergman Kernels and algebraic structure of limit space for a sequence of almost K\"{a}hler-Ricci solitons, preprint, 2013. \nuonumber\baribitem{Li}[Li] Li, C., Yau-Tian-Donaldson correspondence for $K$-semistable Fano manifolds, arXiv: math.DG/1302.6681v2. \nuonumber\baribitem{Ma}[Ma] Mabuchi, T., Multiplier hermitian structures on K\"{a}hler manifolds, Nagoya Math. J. 170 (2003), 73-115. \nuonumber\baribitem{SY}[SY] Scheon, R. and Yau, S.T., Lectures on Differential Geometry, Conf. Proc. and Lecture Notes in Geometry and Topology, vol. 1 (1994), International Press. \nuonumber\baribitem {TZ1}[TZ1] Tian, G. and Zhu, X.H., Uniqueness of K\"{a}hler-Ricci soltions, Acta Math., 184 (2000), 271-305. \nuonumber\baribitem {TZ2}[TZ2] Tian, G. and Zhu, X.H., A new holomorphic invariant and uniqueness of K\"{a}hler-Ricci solitons, Comment. Math. Helv. 77 (2002), 297-325. \nuonumber\baribitem{TZh}[TZh] Tian, G. and Zhang, Z., Degeneration of K\"{a}hler Ricci solitons, Intern. Math. Res. Notices, 2012, 957-985. \nuonumber\baribitem{TW}[TW] Tian, G. and Wang, B., On the structure of almost Einstein manifolds, J. Amer. Math. Soc. 28 (2015), no. 4, 1169-1209. \nuonumber\baribitem{WW}[WW] Wei, G. and Wylie, W., Comparison geometry for Bakry-Emery Ricci curvature, J. Differential Geom. 83 (2009), 337-405. \nuonumber\baribitem{WZ}[WZ] Wang, F. and Zhu, X.H., Fano manifolds with weak almost K\"ahler-Ricci solitons, Int. Math. Res. Not., 9 ( 2015), 2437-2464. \nuonumber\baribitem {[Zh]}[Zh] Zhu, X.H., K\"ahler-Ricci soliton type equations on compact complex manifolds with $C_1(M)>0$, J. Geom. Anal., 10 (2000), 759-774. \e_2and{thebibliography} \e_2and{document}
\begin{document} \vspace*{2em} \begin{center} {\LARGE\bfseries Ineffective descent of genus one curves} \\[1em] Wouter Zomervrucht \\ \today \end{center} \begin{quote} \small {\bfseries Abstract.} Raynaud proved in 1968 that étale descent of genus one curves is not effective in general. In this paper we provide an alternative, simplified construction of this phenomenon. Our counterexample is fully explicit. \end{quote} \section{Introduction} \label{sec:intro} Let $\U = \{U_i : i \in I\}$ be a cover of a scheme $S$, in some topology. A \emph{descent datum of schemes} relative to $\U$ consists of schemes $X_i$ over $U_i$ for all $i \in I$ and isomorphisms $\phi_{ji} \colon X_i \ftimes{U_i} U_{ij} \to X_j \ftimes{U_j} U_{ij}$ over $U_{ij}$ for all $i,j \in I$, satisfying the cocycle condition $\phi_{ki} = \phi_{kj} \phi_{ji}$ on $U_{ijk}$. In favorable situations the descent datum is \emph{effective}, i.e. descends to a scheme $X$ over $S$. For instance, if $\U$ is a Zariski open cover, descent data are better known as gluing data and always effective. In larger topologies, such as the étale topology, ineffective descent data occur. An example can be found in \cite[03FN]{bib:stacks}. A natural next step is to determine classes of schemes for which étale descent is effective. In this paper we consider the case of genus one curves. First, let us once and for all fix our notion of a (relative) curve. \begin{definition} \label{def:curve} Let $S$ be a scheme. A \emph{curve of genus $g$} over $S$ is a proper smooth scheme $X/S$ of relative dimension one, whose geometric fibers are connected curves of genus $g$. \end{definition} For genus one curves, the result is negative. \begin{theorem} \label{thm:genus_one} There exist ineffective étale descent data of genus one curves. \end{theorem} This theorem is due to Raynaud \cite[XIII 3.2]{bib:raynaud}. The aim of the current paper is to provide a simplified counterexample. In the case of genus $g \neq 1$ curves, it is well-known that étale, or even fpqc, descent is effective. Indeed, outside genus one the canonical bundle (or its dual) is ample, and the descent comes from descent of quasi-coherent sheaves. See e.g. \cite[4.39]{bib:vistoli} for more details. On genus one curves the canonical bundle is fiberwise trivial and the argument does not apply. Note however that for \emph{elliptic curves} the zero section provides an ample line bundle, and fpqc descent is again effective. Theorem \ref{thm:genus_one} can also be interpreted as follows. Let $\F$ be the fibered category over $\Sch$ that assigns to a scheme $S$ the groupoid $\F(S)$ of genus one curves over $S$. Then $\F$ is not an étale stack. Instead one works with the fibered category $\M_1$ where $\M_1(S)$ is the groupoid of proper smooth algebraic spaces over $S$ whose geometric fibers are genus one curves. Now $\M_1$ is an étale (even fppf) stack; it is the fppf stackification of $\F$. \noindent \textbf{Organization.} The next section reduces the proof of theorem \ref{thm:genus_one} to one concerning torsors under elliptic curves. Section \ref{sec:construction} contains the actual construction. In section \ref{sec:raynaud} we compare it with the original counterexample by Raynaud. \noindent \textbf{Acknowledgements.} The research in this paper is part of the author's master's thesis. I would like to thank Lenny Taelman and Bas Edixhoven for their valuable contributions. \section{Torsors} \label{sec:torsors} Attached to a genus one curve $X/S$ is its Jacobian $E = \Jac(X/S)$. It is an elliptic curve over $S$, endowed with a natural action on $X$ that makes $X$ into an étale $E$-torsor. Now let $\U$ be an étale cover of $S$. A descent datum of genus one curves relative to $\U$ descends to a (not necessarily representable) sheaf of sets $X$ on $S$. Also, by functoriality of $\Jac$ we obtain a descent datum of elliptic curves relative to $\U$. The latter descends to an elliptic curve $E/S$. Again the natural action of $E$ on $X$ makes $X$ into an étale $E$-torsor. Conversely, any étale $E$-torsor gives rise to a descent datum of genus one curves relative to some étale cover. So the problem at hand is really to find a non-representable torsor under some elliptic curve. The following theorem from \cite[XIII 2.6]{bib:raynaud} will be useful. \begin{theorem} \label{thm:raynaud} Let $S$ be a local scheme and $E/S$ an elliptic curve. \begin{itemize} \item If $S$ is normal, an étale $E$-torsor is representable if and only if it has finite order in $\HH^1(S,E)$. \item If $S$ is regular, all étale $E$-torsors are representable. \end{itemize} \end{theorem} Here and further on, $\HH^1(S,E)$ always denotes sheaf cohomology on $(\Sch/S)_\et$. Recall that \emph{normal} means all local rings of $S$ are integrally closed domains. \section{Construction} \label{sec:construction} We shall now construct a noetherian normal local scheme $S$, an elliptic curve $\tilde{E}/S$, and a class $\tau \in \HH^1(S,\tilde{E})$ of infinite order. Necessarily $S$ is irregular, so not of dimension $0$ or $1$. Let $k$ be a field of characteristic not $2$. Let $E/k$ be an elliptic curve with a given embedding in $\PP^2$. Let $C \subset \AA^3$ be the affine cone over $E$. Let $s \in C$ be the top of the cone, and $S$ the localization of $C$ at $s$. It is normal by Serre's criterion \cite[23.8]{bib:matsumura} since $S$ is a complete intersection with its singularity in codimension $2$. \begin{lemma} \label{lem:cover} There exists a finite étale cover $\pi \colon S' \to S$ of degree $2$ whose fiber over $s$ consists of two distinct $k$-rational points $s_0,s_1$, such that $E(S' \setminus \{s_1\}) = E(S' \setminus \{s_0\}) = E(k)$. \end{lemma} \begin{proof} Let $E \subset \PP^2$ be given by some cubic $f \in k[x,y,z]$. The ring of $S$ is the integral domain $R = k[x,y,z]_{(x,y,z)} / (f)$. Since $2$ is invertible in $k$, a degree $2$ finite étale cover of $S$ may be constructed by adjoining to $R$ the square root of a unit $u \in R^\times$. Set $R' = R[t]/(t^2-u)$ and $S' = \Spec{R'}$. Then $\pi \colon S' \to S$ is split above $s$ if $u(s) \in \kappa(s)$ is a square. We choose $u = 1+x$. For the second part it suffices to prove that all $k$-morphisms $\alpha \colon S' \setminus \{s_1\} \to E$ are constant. Take a point $(a:b:c) \in E(k)$. Let $L \subset C$ be the corresponding ray, and $\eta \in S$ the generic point of $L$. As long as $a$ is non-zero, $u$ is not a square at $\eta$. Then the fiber of $\pi$ at $\eta$ is a single point $\eta'$ with rational function field. Therefore, $\alpha$ must send $\eta'$ to some closed point $p \in E$. By continuity we have $\alpha(s_0) = p$ as well. After passage to an algebraic closure of $k$, the points $\eta'$ as above lie dense in $S' \setminus \{s_1\}$. So $\alpha$ maps a dense subset of $S' \setminus \{s_1\}$ to $p$. Since $p$ is closed, $\alpha$ is constant. \end{proof} \begin{remark} \label{rmk:char} The preceding lemma is still true in characteristic $2$, where one may construct a suitable cover by means of an Artin--Schreier extension. Therefore, the restriction on $k$ is not necessary; it is imposed only to simplify the exposition. \end{remark} Write $U = S \setminus \{s\}$, $U_0 = S' \setminus \{s_1\}$, $U_1 = S' \setminus \{s_0\}$, and $U_{01} = U_0 \cap U_1$. Then $\U = \{U_0,U_1\}$ is an open cover of $S'$. The associated first Čech cohomology is given by the Mayer--Vietoris exact sequence \begin{equation} \label{eq:cech} \begin{tikzcd}[column sep=small] 0 \arrow{r} & E(S') \arrow{r} & E(U_0) \times E(U_1) \arrow{r} & E(U_{01}) \arrow{r} & \HH^1(\U,E) \arrow{r} & 0. \end{tikzcd} \end{equation} By construction we have $E(S') = E(U_0) = E(U_1) = E(k)$. So \eqref{eq:cech} reduces to \begin{displaymath} \begin{tikzcd}[column sep=small] 0 \arrow{r} & E(k) \arrow{r} & E(U_{01}) \arrow{r} & \HH^1(\U,E) \arrow{r} & 0. \end{tikzcd} \end{displaymath} The Galois group $G = \Aut(S'/S)$ acts on this sequence: the involution $\sigma \in G$ acts on $E(k)$ by inversion, on $E(U_{01})$ by $a \mapsto -\sigma^*a$, and on $\HH^1(\U,E)$ by $[X] \mapsto [\sigma^{-1}X]$. In fact this action comes from the natural $G$-action on \eqref{eq:cech}. Taking anti-invariants yields \begin{displaymath} \begin{tikzcd}[column sep=small] 0 \arrow{r} & E(k) \arrow{r} & E(U) \arrow{r} & \HH^1(\U,E)^{-\sigma} \end{tikzcd} \end{displaymath} where for any $G$-module $M$ we denote by $M^{-\sigma} = \{m \in M : \sigma m = -m\}$ its subgroup of anti-invariants. Let $\tilde{E} = S' \otimes_{\AUT(S'/S)} E_S$ be the $-1$-twist of $E_S$ along $\pi$. In other words, $\tilde{E}$ is the quotient of $S' \times E_S$ by $\AUT(S'/S)$, where $\sigma$ acts as $(x,a) \mapsto (\sigma x,-a)$. Then $\tilde{E}$ is an elliptic curve over $S$ with a canonical $S'$-isomorphism $E_{S'} \cong \tilde{E}_{S'}$. \begin{lemma} \label{lem:cohom} There is a natural map $\HH^1(S',E)^{-\sigma} \to \HH^1(S,\tilde{E})$ whose kernel is $2$-torsion. \end{lemma} \begin{proof} Let $A = \pi_* E_{S'}$ be the Weil restriction of $E_{S'}$ to $S$. By \cite[VIII 5.6]{bib:sga4-2} the pushforward map $\pi_* \colon \HH^1(S',E) \to \HH^1(S,A)$ is an isomorphism. It is also equivariant for the natural $G$-action on $\HH^1(S,A)$, so $\pi_*$ restricts to an isomorphism $\HH^1(S',E)^{-\sigma} \to \HH^1(S,A)^{-\sigma}$. We have $\tilde{E} = A^{-\sigma}$, or more precisely $\tilde{E}(T) = A(T)^{-\sigma}$ for all schemes $T/S$. Consider the map $\id - \sigma \colon A \to \tilde{E}$ and the inclusion $\tilde{E} \to A$. The induced composition \begin{displaymath} \begin{tikzcd}[column sep=small] \HH^1(S,A) \arrow{r} & \HH^1(S,\tilde{E}) \arrow{r} & \HH^1(S,A). \end{tikzcd} \end{displaymath} is again $\id - \sigma$. Restricted to $\HH^1(S,A)^{-\sigma}$ this is simply multiplication by $2$. Hence the kernel of $\HH^1(S,A)^{-\sigma} \to \HH^1(S,\tilde{E})$ is $2$-torsion. \end{proof} The map $U \to C \setminus \{s\} \to E$ is an element of $E(U)$, no multiple of which is constant. Via the injection $E(U)/E(k) \to \HH^1(S',E)^{-\sigma}$ and lemma \ref{lem:cohom} we obtain a non-torsion class $\tau \in \HH^1(S,\tilde{E})$. The corresponding $\tilde{E}$-torsor on $S$ is not representable by theorem \ref{thm:raynaud}. As we have explained in section \ref{sec:torsors}, this proves theorem \ref{thm:genus_one}. \section{Raynaud's approach} \label{sec:raynaud} In this section we briefly compare our construction with that by Raynaud in \cite[XIII 3.2]{bib:raynaud}. We sketch his approach. Let $R$ be a discrete valuation ring in which $2$ is invertible. (As before, the characteristic condition is imposed only to simplify the exposition.) Let $E/R$ be an elliptic curve, and let $V \subset E$ be the complement of the zero section. \begin{lemma} \label{lem:normal} There exist a normal scheme $Z$ over $R$ and an $R$-morphism $f \colon V \to Z$ that is an isomorphism on the generic fiber and constant on the special fiber. \end{lemma} \begin{proof} (This proof is different from Raynaud's in \cite[XIII 3.2 b]{bib:raynaud}.) Embedding $E$ in the projective plane, $V$ is isomorphic to the spectrum of $R[x,y] / (y^2 - x^3 - ax^2 - bx -c)$ for suitable $a,b,c \in R$. Let $t \in R$ be a uniformizer. Let $Z$ be the spectrum of $R[u,v] / (v^2 - u^3 - t^2au^2 - t^4bu - t^6c)$ and define $f \colon V \to Z$ on rings by $u \mapsto t^2x$, $v \mapsto t^3y$. Then $Z$ is regular in codimension $1$, hence normal \cite[23.8]{bib:matsumura}. On the generic fiber, $t$ is a unit so $f$ is an isomorphism. On the special fiber $f$ is constant with image $(0,0)$. \end{proof} Let $s \in Z$ be the image of $f$ on the special fiber. Let $S$ be the localization of $Z$ at $s$. It is normal by construction. We may choose $R$ such that it admits a connected finite étale cover $\Spec{R'} \to \Spec{R}$ of degree $2$ that is split on the special fiber. Set $S' = S_{R'}$, then $\pi \colon S' \to S$ is finite étale of degree $2$ as well. Let $s_0,s_1$ be the lifts of $s$ to $S'$. Write $U = S \setminus \{s\}$, $U_0 = S' \setminus \{s_1\}$, $U_1 = S' \setminus \{s_0\}$, and $U_{01} = U_0 \cap U_1$. \begin{lemma} \label{lem:maps} $E(U_0) = E(U_1) = E(R')$. \end{lemma} \begin{proof} Throughout the proof, primes indicate base change along $R \to R'$, e.g. $E' = E_{R'}$. By symmetry, it suffices to prove that all $R'$-morphisms $\alpha \colon U_0 \to E'$ factor over $\Spec{R'}$. Note that $\alpha$ extends to an $R'$-morphism $W \to E'$ for some open $W \subseteq Z'$ containing $U_0$. Consider the $R'$-rational map \begin{displaymath} \begin{tikzcd}[column sep=small] h \colon E' \arrow[dashed]{r} & V' \arrow{r} & Z' \arrow[dashed]{r} & W \arrow{r} & E'. \end{tikzcd} \end{displaymath} On the generic fiber, $h$ is a rational map of elliptic curves over a field, hence extends to a morphism. Since $E'$ is the Néron model of its generic fiber \cite[1.2.8]{bib:neron}, $h$ actually extends to an $R'$-morphism $E' \to E'$. Note that $h$ is constant on the fiber over the closed point of $\Spec{R'}$ under $s_0$. Hence $h$ comes from $E(R')$ by rigidity \cite[6.1]{bib:git} because $\Spec{R'}$ is connected. \end{proof} Let $\U$ be the open cover $\{U_0,U_1\}$ of $S'$. The Mayer--Vietoris sequence \eqref{eq:cech} now reduces to \begin{displaymath} \begin{tikzcd}[column sep=small] 0 \arrow{r} & E(R') \arrow{r} & E(U_{01}) \arrow{r} & \HH^1(\U,E) \arrow{r} & 0 \end{tikzcd} \end{displaymath} and taking anti-invariants for the action of the involution $\sigma \in \Aut(S'/S)$ yields \begin{displaymath} \begin{tikzcd}[column sep=small] 0 \arrow{r} & E(R) \arrow{r} & E(U) \arrow{r} & \HH^1(\U,E)^{-\sigma}. \end{tikzcd} \end{displaymath} Let $\tilde{E}$ be the $-1$-twist of $E_S$ along $\pi$. As in lemma \ref{lem:cohom} we have a map $\HH^1(S',E)^{-\sigma} \to \HH^1(S,\tilde{E})$ with $2$-torsion kernel. Let $\eta$ be the generic point of $\Spec{R}$. We have a map $U_\eta \to Z_\eta \to E_\eta$ by inverting $f_\eta$. Since $U$ is normal of dimension $1$ and $E$ is proper, it extends uniquely to a map $U \to E$. This map is an element of $E(U)$, no multiple of which comes from $E(R)$. We obtain a non-torsion class $\tau \in \HH^1(S,\tilde{E})$. By theorem \ref{thm:raynaud} it corresponds to a non-representable $\tilde{E}$-torsor on $S$, proving theorem \ref{thm:genus_one}. \begin{remark} \label{rem:raynaud} It is worthwile to observe that Raynaud proves slightly less. He constructs a non-torsion element in $\HH^1(S',E)$ as above. Let $A = \pi_*E_{S'}$ be the Weil restriction of $E_{S'}$ to $S$. The pushforward map $\pi_* \colon \HH^1(S',E) \to \HH^1(S,A)$ is an isomorphism by \cite[VIII 5.6]{bib:sga4-2}, so we find a non-torsion element $\gamma \in \HH^1(S,A)$. Consider the short exact sequence \begin{displaymath} \begin{tikzcd}[column sep=small] 0 \arrow{r} & E_S \arrow{r} & A \arrow{r} & \tilde{E} \arrow{r} & 0 \end{tikzcd} \end{displaymath} of abelian schemes over $S$. In the long exact sequence of cohomology, either $\gamma$ maps to a non-torsion class in $\HH^1(S,\tilde{E})$, or $\gamma$ lifts to a non-torsion class in $\HH^1(S,E)$. This proves that there exists a non-representable torsor under either $\tilde{E}$ or $E_S$, hence theorem \ref{thm:genus_one}. However, this shorter proof does not permit us to explicitly write down a counterexample. \end{remark} \end{document}
\begin{document} \title{Population-calibrated multiple imputation for a binary/categorical covariate in categorical regression models} \begin{abstract} Multiple imputation (MI) has become popular for analyses with missing data in medical research. The standard implementation of MI is based on the assumption of data being missing at random (MAR). However, for missing data generated by missing not at random (MNAR) mechanisms, MI performed assuming MAR might not be satisfactory. For an incomplete variable in a given dataset, its corresponding population marginal distribution might also be available in an external data source. We show how this information can be readily utilised in the imputation model to calibrate inference to the population, by incorporating an appropriately calculated offset termed the `calibrated-$\delta$ adjustment'. We describe the derivation of this offset from the population distribution of the incomplete variable and show how in applications it can be used to closely (and often exactly) match the post-imputation distribution to the population level. Through analytic and simulation studies, we show that our proposed calibrated-$\delta$ adjustment MI method can give the same inference as standard MI when data are MAR, and can produce more accurate inference under two general MNAR missingness mechanisms. The method is used to impute missing ethnicity data in a type 2 diabetes prevalence case study using UK primary care electronic health records, where it results in scientifically relevant changes in inference for non-White ethnic groups compared to standard MI. Calibrated-$\delta$ adjustment MI represents a pragmatic approach for utilising available population-level information in a sensitivity analysis to explore potential departure from the MAR assumption. \end{abstract} \section{Introduction}\label{sec1} Multiple imputation (MI) \cite{Rubin1987} has increasingly become a popular tool for analyses with missing data in medical research \cite{Sterne2009, Klebanoff2008}; the method is now incorporated in many standard statistical software packages.\cite{StataCorp2015a, VanBuuren2011, Yuan2011} In MI, several completed datasets are created and in each, missing data are replaced with values drawn from an imputation model which is the Bayesian posterior predictive distribution of the missing data, given the observed data. Each completed dataset is then analysed using the substantive analysis model that would have been used had there been no missing data. This process generates several sets of parameter estimates, which are then combined into a single set of results using Rubin's rules.\cite{Rubin1987, Barnard1999} Given congenial specification of the imputation model, Rubin's rules provide estimates of standard errors and confidence intervals that correctly reflect the uncertainty introduced by missing data. \par The standard implementation of MI in widely available software packages provides valid inference under the assumption that missing values are missing completely at random (MCAR) or missing at random (MAR). However, in many applied settings, it is possible that the unseen data are missing not at random (MNAR). For example, in primary care, individuals with more frequent blood pressure readings may, on average, have higher blood pressure compared to the rest of the primary care population. Although MI can be used when data are MNAR, imputation becomes more difficult because a model for the missing data mechanism needs to be specified, which describes how missingness depends on both observed and unobserved quantities. This implies that in practice, it is necessary to define a model for either the association between the probability of observing a variable and its unseen values (selection models) \cite{Little-Rubin2002}; or the difference in the distribution of subjects with and without missing data (pattern-mixture models).\cite{Little1993, Little1994} Due to the potential complexity of modelling the missingness mechanism under MNAR, analyses assuming MNAR are relatively infrequently performed and reported in the applied literature. Instead, in practice, researchers more often try to enhance the plausibility of the MAR assumption as much as possible by including many variables in the imputation model.\cite{White2011, Collins2001} \par The extra model specification requirement in MI for MNAR data raises several issues. Firstly, the underlying MAR and MNAR mechanisms are not verifiable from the observed data alone. Secondly, there can be an infinite number of possible MNAR models for any dataset, and it is very rare to know which of these models is appropriate for the missingness mechanism. However, for an incomplete variable in a given dataset, its corresponding population marginal distribution might be available from an external data source, such as a population census or survey. If our study sample in truth comes from such a population, it is sensible to feed this population information into the imputation model, in order to calibrate inference to the population. \par In this paper, we propose a version of MI for an incomplete binary/categorical variable, termed \textit{calibrated-$\delta$ adjustment MI}, which exploits such external information. In this approach, the population distribution of the incomplete variable can be used to calculate an adjustment in the imputation model's intercept, which is used in MI such that the post-imputation distribution much more closely (and often exactly) matches the population distribution. The idea of the calibrated-$\delta$ adjustment is motivated by van Buuren et al.'s $\delta$ adjustment (offset) approach in MI.\cite{VanBuuren1999} However, while values of $\delta$ are often chosen arbitrarily (and independently of covariates in the imputation model) in van Buuren et al.'s approach, the incomplete variable's population distribution is used to derive the value of $\delta$ in calibrated-$\delta$ adjustment MI. We show that our proposed method gives equivalent inference to standard MI when data are MAR, and can produce unbiased inference under two general MNAR mechanisms. \par From a practical point of view, the development of calibrated-$\delta$ adjustment MI is motivated by the issue incomplete recording of ethnicity data in UK primary care electronic health records. Routine recording of ethnicity has been incorporated at the general practice level in the UK, and the variable is therefore available in many large primary care databases. However, research addressing ethnicity has been constrained by the low level of recording.\cite{Kumarapeli2006, Aspinall2007, Mathur2013b} Studies often handle missing data in ethnicity by either dropping ethnicity from the analysis \cite{Osborn2015}, performing a complete record analysis (i.e. excluding individuals with missing data), or single imputation of missing values with the White ethnic group \cite{Hippisley-Cox2008}; these methods will generally lead to biased estimates of association and standard errors.\cite{Sterne2009} In addition, the probability that ethnicity is recorded in primary care may well vary systematically by ethnic groups, even after adjusting for other variables.\cite{Mathur2013b} This implies a potential MNAR mechanism for ethnicity, and as a result, standard MI might fail to give valid inference for the underlying population. Since the population marginal distribution of ethnicity is available in the UK census data, the plausibility of the MAR assumption for ethnicity in UK primary care data can be assessed by using standard MI to handle missing data, and comparing the resulting ethnicity distribution to that in the census. In earlier work, we explored departures from the MAR assumption for other incomplete heath indicators by comparing the results with external nationally representative datasets.\cite{Marston2010, Marston2014} As an example of this, Marston et al. (2014) reported that if smoking status is missing for a patient then he or she is typically either an ex-smoker or non-smoker, and accordingly proposed only allowing imputed data to take one of these two values \cite{Marston2014}. The method we describe here supersedes this ad-hoc approach, providing a way to incorporate population distribution information into MI. \par This paper focuses on missing data in an incomplete binary/categorical covariate in an analysis model, where the outcome variable and other covariates are all binary/categorical and fully observed. The remainder of this paper is structured as follows. \Cref{sec2} works through a simple example analytically to describe the derivation of the calibrated-$\delta$ adjustment. In \cref{sec3}, we formally introduce the procedure of calibrated-$\delta$ adjustment MI and evaluate the performance of the method in simulation studies. \Cref{sec4} illustrates the use of this MI method in a case study which uses electronic health records to examine the association between ethnicity and the prevalence of type 2 diabetes diagnoses in UK primary care. We conclude the paper with a discussion in \cref{sec5}. \section{Analytic study -- bias in a $2\times 2$ contingency table} \label{sec2} In this section, we present the development of calibrated-$\delta$ adjustment MI in a simple setting of a $2 \times 2$ contingency table and describe the derivation of the calibrated-$\delta$ adjustment. \par Suppose it is of interest to study the association between a binary variable $x$ taking values $j=0,1$ and a binary outcome $y$ taking values $k=0,1$, whose full-data distribution is given in Table \ref{tab:analytic_study_full_data}. The full-data distribution is assumed to be identical to the population distribution, such that the population marginal distribution of $x$ is given by $p_{j}^{\text{pop}} = \frac{n_{j+}}{n_{++}}$. The data generating model is \begin{equation*} \text{logit}\left[p\left(y=1\mid x\right)\right] = \beta_{0} + \beta_{x}x, \end{equation*} whose parameters can be written in terms of cell counts, $\beta_{0} = \text{ln}\left(\frac{n_{01}}{n_{00}}\right)$ and $\beta_{x} = \text{ln}\left(\frac{n_{11}n_{00}}{n_{01}n_{10}}\right)$. \begin{table}[b!] \renewcommand{1}{1.1} \centering \caption{Analytic study: distribution of $x$ and $y$ and selection models for missingness in $x$.} \begin{subtable}[t]{\linewidth} \centering \subcaption{Distribution in the full data of size $n$.} \label{tab:analytic_study_full_data} \begin{tabular}{lccc} \toprule & $y=0$ & $y=1$ & $\sum_{j=0}^{1}x$ \\ \midrule $x=0$ & $n_{00}$ & $n_{01}$ & $n_{0+}$ \\ $x=1$ & $n_{10}$ & $n_{11}$ & $n_{1+}$ \\ \midrule $\sum_{k=0}^{1} y$ & $n_{+0}$ & $n_{+1}$ & $n_{++}$ \\ \bottomrule \end{tabular} \end{subtable} \vskip 15pt \begin{subtable}[t]{\linewidth} \centering \subcaption{Distribution among subjects with observed $x$ ($y$ is fully observed).} \label{tab:analytic_study_r=1} \centering \begin{tabular}{lcccc} \toprule & $y=0 \mid r=1$ & $y=1 \mid r=1$ & $\sum_{j=0}^{1} x\mid r=1$ & Population \\ \midrule $x=0 \mid r=1$ & $n_{00}^{\text{obs}}$ & $n_{01}^{\text{obs}}$ & $n_{0+}^{\text{obs}}$ & $n_{0+}$ \\ $x=1 \mid r=1$ & $n_{10}^{\text{obs}}$ & $n_{11}^{\text{obs}}$ & $n_{1+}^{\text{obs}}$ & $n_{1+}$ \\ \midrule $\sum_{k=0}^{1}y\mid r=1$ & $n_{+0}^{\text{obs}}$ & $n_{+1}^{\text{obs}}$ & $n_{++}^{\text{obs}}$ & \\ $\sum_{k=0}^{1}y\mid r=0$ & $n_{+0}^{\text{mis}}$ & $n_{+1}^{\text{mis}}$ & $n_{++}^{\text{mis}}$ & \\ \bottomrule \end{tabular} \end{subtable} \vskip 15pt \begin{subtable}[t]{\linewidth} \centering \subcaption{Models for missingness in $x$.} \label{tab:analytic_study_selections} \begin{tabular}{lcc} \toprule \begin{tabular}[l]{@{}l@{}}Linear predictor of selection model\\ $\text{logit}\left[p\left[(r=1 \mid x, y\right)\right]$\end{tabular} & \begin{tabular}[c]{@{}c@{}}Selection probability\\ $p\left(r_{jk} = 1\right)$\end{tabular} & \begin{tabular}[c]{@{}c@{}}Label\end{tabular} \\ \midrule $\alpha_{0}$ & $p_{r}$ & M1 \\ $\alpha_{0} + \alpha_{y}y$ & $p_{r_{k}}$ & M2 \\ $\alpha_{0} + \alpha_{x}x$ & $p_{r_{j}}$ & M3 \\ $\alpha_{0} + \alpha_{x}x + \alpha_{y}y$ & $p_{r_{jk}}$ & M4 \\ \bottomrule \end{tabular} \end{subtable} \\ \fnote{Note: $r$: response indicator of $x$; $j$ and $k$: index categories of $x$ and $y$, respectively; $j, k$ take values $0/1$.} \end{table} \par In addition, suppose that $y$ is fully observed, while some data in $x$ are set to missing (i.e. the sample contains no individuals with missing $y$ and observed $x$, Table \ref{tab:analytic_study_r=1}). Let $r$ be the response indicator taking values $1$ if $x$ is observed and $0$ if $x$ is missing. Four different missingness mechanisms considered for $x$ and the corresponding selection models are presented in Table \ref{tab:analytic_study_selections}. Observed cell counts, $n_{jk}^{\text{obs}}$, can be written as a product of the full-data cell counts, $n_{jk}$, and the cell-wise probability of observing $x$, $p_{r_{jk}}$, such that $n_{jk}^{\text{obs}} = n_{jk}p_{r_{jk}}$. \par To perform standard MI of missing values in $x$, an imputation model \begin{equation} \label{eq:standard_imp_model} \text{logit}\left[p\left(x=1\mid y\right)\right] = \theta_{0} + \theta_{y}y, \end{equation} is fitted to the $n_{++}^{\text{obs}}$ complete records (Table \ref{tab:analytic_study_r=1}) to obtain the $\theta$ parameter estimates, where \begin{equation*} \theta_{0}^{\text{obs}} = \text{ln}\left(\frac{n_{10}^{\text{obs}}}{n_{00}^{\text{obs}}}\right); \quad \theta_{y}^{\text{obs}} = \text{ln}\left(\frac{n_{11}^{\text{obs}}n_{00}^{\text{obs}}}{n_{01}^{\text{obs}}n_{10}^{\text{obs}}}\right). \end{equation*} When $x$ is MCAR or MAR conditional on $y$, we can obtain an unbiased estimate of the association between $x$ and $y$ in the missing data by fitting the above logistic regression imputation model to the complete records. No adjustment is needed in the intercept of the imputation model, and standard MI provides unbiased estimates of the marginal distribution of $x$ as well as the association between $x$ and $y$. We focus on two general MNAR mechanisms described below. \subsection{$x$ is MNAR dependent on $x$} Under this missingness mechanism, the posited model for the response indicator $r$ of $x$ is given by \begin{equation} \label{eq:selection_mnar_x} \text{logit}\left[p\left(r=1\mid x\right)\right] = \alpha_{0} + \alpha_{x}x, \end{equation} and the corresponding probabilities of observing $x$ are \begin{equation*} p\left(r=1 \mid x=j\right) = p_{r_{j}} = \text{expit}\left(\alpha_{0} + \alpha_{x}x\right); \quad j=0,1. \end{equation*} For imputation model \eqref{eq:standard_imp_model}, the log odds ratios of $x=1$ for $y=1$ compared to $y=0$ in the observed and missing data are \begin{align*} \left[\theta_{y} \mid r=1\right] &= \theta_{y}^{\text{obs}} = \text{ln}\left(\frac{n_{00}p_{r_{0}}n_{11}p_{r_{1}}}{n_{01}p_{r_{0}}n_{10}p_{r_{1}}}\right) = \text{ln}\left(\frac{n_{00}n_{11}}{n_{01}n_{10}}\right); \\ \left[\theta_{y} \mid r=0\right] &= \theta_{y}^{\text{mis}} = \text{ln}\left(\frac{n_{00}\left(1-p_{r_{0}}\right)n_{11}\left(1-p_{r_{1}}\right)}{n_{01}\left(1-p_{r_{0}}\right)n_{10}\left(1-p_{r_{1}}\right)}\right) = \text{ln}\left(\frac{n_{00}n_{11}}{n_{01}n_{10}}\right), \end{align*} respectively. Hence, $\theta_{y}^{\text{obs}} = \theta_{y}^{\text{mis}}$, which are also the same as the log odds ratio $\theta_{y}$ in the full data (i.e. before values in $x$ are set to missing). The log odds of $x=1$ for $y=0$ in the observed and missing data are given by \begin{align*} \left[\theta_{0} \mid r=1\right] &= \theta_{0}^{\text{obs}} = \text{ln}\left(\frac{n_{10}p_{r_{1}}}{n_{00}p_{r_{0}}}\right); \\ \left[\theta_{0} \mid r=0\right] &= \theta_{0}^{\text{mis}} = \text{ln}\left(\frac{n_{10}\left(1-p_{r_{1}}\right)}{n_{00}\left(1-p_{r_{0}}\right)}\right), \end{align*} respectively. This implies that the correct adjustment in the imputation model's intercept should be \begin{align*} \theta_{0}^{\text{mis}} - \theta_{0}^{\text{obs}} &= \text{ln}\left(\frac{\left(1-p_{r_{1}}\right)p_{r_{0}}}{\left(1-p_{r_{0}}\right)p_{r_{1}}}\right) \\ &= \text{ln}\left(\frac{\text{exp}\left(\alpha_{0}\right)}{\text{exp}\left(\alpha_{0}+\alpha_{x}\right)}\right) \\ &= -\alpha_{x}, \end{align*} which is minus the log odds ratio of observing $x$ for $x = 1$ compared to $x = 0$ in \eqref{eq:selection_mnar_x}. \subsection{$x$ is MNAR dependent on $x$ and $y$} Under this missingness mechanism, the posited model for the response indicator $r$ of $x$ is given by \begin{equation} \label{eq:selection_mnar_x_y} \text{logit}\left[p\left(r=1 \mid x,y\right)\right] = \alpha_{0} + \alpha_{x}x + \alpha_{y}y, \end{equation} and the corresponding probabilities of observing $x$ are \begin{equation*} p\left(r=1 \mid x=j, y=k\right) = p_{r_{jk}} = \text{expit}\left(\alpha_{0} + \alpha_{x}x + \alpha_{y}y\right); \quad j,k = 0,1. \end{equation*} For imputation model \eqref{eq:standard_imp_model}, the log odds ratios of $x=1$ for $y=1$ compared to $y=0$ in the observed and missing data are \begin{align} \theta_{y}^{\text{obs}} &= \text{ln}\left(\frac{n_{00}p_{r_{00}}n_{11}p_{r_{11}}}{n_{01}p_{r_{01}}n_{10}p_{r_{10}}}\right); \label{eq:thetay_obs_mnar_x_y}\\ \theta_{y}^{\text{mis}} &= \text{ln}\left(\frac{n_{00}\left(1-p_{r_{00}}\right)n_{11}\left(1-p_{r_{11}}\right)}{n_{01}\left(1-p_{r_{01}}\right)n_{10}\left(1-p_{r_{10}}\right)}\right). \label{eq:thetay_mis_mnar_x_y} \end{align} Again, it can be shown from \eqref{eq:thetay_obs_mnar_x_y} and \eqref{eq:thetay_mis_mnar_x_y} that $\theta_{y}^{\text{obs}} = \theta_{y}^{\text{mis}}$, since \begin{align*} \theta_{y}^{\text{mis}} - \theta_{y}^{\text{obs}} &= \text{ln}\left(\frac{\left(1-p_{r_{00}}\right)\left(1-p_{r_{11}}\right)p_{r_{01}}p_{r_{10}}}{\left(1-p_{r_{01}}\right)\left(1-p_{r_{10}}\right)p_{r_{00}}p_{r_{11}}}\right)\\ &= \text{ln}\left(\frac{\text{exp}\left(\alpha_{0} + \alpha_{x}\right)\text{exp}\left(\alpha_{0} + \alpha_{y}\right)}{\text{exp}\left(\alpha_{0}\right)\text{exp}\left(\alpha_{0} + \alpha_{x} + \alpha_{y}\right)}\right) \\ &= 0. \end{align*} The log odds of $x=1$ for $y=0$ in the observed and missing data are given by \begin{align*} \theta_{0}^{\text{obs}} = \text{ln}\left(\frac{n_{10}p_{r_{10}}}{n_{00}p_{r_{00}}}\right); \\ \theta_{0}^{\text{mis}} = \text{ln}\left(\frac{n_{10}\left(1-p_{r_{10}}\right)}{n_{00}\left(1-p_{r_{00}}\right)}\right), \end{align*} which implies that the correct adjustment in the imputation model's intercept should be \begin{align*} \theta_{0}^{\text{mis}} - \theta_{0}^{\text{obs}} &= \text{ln}\left(\frac{\left(1-p_{r_{10}}\right)p_{r_{00}}}{\left(1-p_{r_{00}}\right)p_{r_{10}}}\right) \\ &= \text{ln}\left(\frac{\text{exp}\left(\alpha_{0}\right)}{\text{exp}\left(\alpha_{0}+\alpha_{x}\right)}\right) \\ &= -\alpha_{x}, \end{align*} which is again minus the log odds ratio of observing $x$ in \eqref{eq:selection_mnar_x_y}. \subsection{Derivation of the calibrated-$\delta$ adjustment} The analytic calculations above confirm that in a $2\times 2$ contingency table setting, appropriately adjusting the intercept of the imputation model for the covariate $x$ can sufficiently correct bias introduced by MNAR mechanisms under which missingness in $x$ depends on either its values or both its values and the outcome (M3 and M4). The population distribution of $x$ can be used to calculate the correct adjustment in the imputation model's intercept. This adjustment is referred to as the \textit{calibrated-$\delta$ adjustment} to clarify its relationship to van Buuren et al.'s $\delta$ adjustment.\cite{VanBuuren1999} \par The probability of $x=1$ can be written in terms of the conditional probabilities among subjects with observed and missing $x$ \begin{equation*} p\left(x=1\right) = p\left(x=1 \mid r=1\right)p\left(r=1\right) + p\left(x=1 \mid r=0\right)p\left(r=0\right), \end{equation*} where $p\left(x=1\right)$ is the population proportion; $p\left(x=1 \mid r=1\right)$ , $p\left(r=1\right)$, and $p\left(r=0\right)$ can be obtained from the observed data. Thus, $p\left(x=1 \mid r=0\right)$ can be solved for as \begin{equation} \label{eq:p_x=1_partitioned2} p\left(x=1 \mid r=0\right) = \frac{p\left(x=1\right) - p\left(x=1 \mid r=1\right)p\left(r=1\right)}{p\left(r=0\right)}. \end{equation} Note that $p\left(x=1 \mid r=0\right)$ can be further written as \begin{align} p\left(x=1 \mid r=0\right) &= \sum_{k=0}^{1}p\left(x=1 \mid y=k, r=0\right)p\left(y=k \mid r=0\right) \nonumber\\ &= \sum_{k=0}^{1}\text{expit}\left(\theta_{0}^{\text{mis}} + \theta_{y}^{\text{mis}} I\left[y=k\right]\right)\frac{n_{+k}^{\text{mis}}}{n_{++}^{\text{mis}}} \nonumber\\ &= \frac{1}{n_{++}^{\text{mis}}}\text{expit}\left(\theta_{0}^{\text{mis}} + \theta_{y}^{\text{mis}} I\left[y=k\right]\right)n_{+k}^{\text{mis}}, \label{eq:p_x=1_r=0} \end{align} where $I\left[A\right]$ is an indicator function taking values 1 if $A$ is true and 0 otherwise. It is shown earlier that when $x$ is MNAR dependent on either the values of $x$ or both $x$ and $y$, $\theta_{y}^{\text{obs}}=\theta_{y}^{\text{mis}}$; \eqref{eq:p_x=1_r=0} is therefore equal to \begin{align*} p\left(x=1 \mid r=0\right) &= \frac{1}{n_{++}^{\text{mis}}}\text{expit}\left(\theta_{0}^{\text{mis}} + \theta_{y}^{\text{obs}} I\left[y=k\right]\right)n_{+k}^{\text{mis}} \\ &= \frac{1}{n_{++}^{\text{mis}}}\text{expit}\left(\left(\theta_{0}^{\text{obs}} + \delta\right) + \theta_{y}^{\text{obs}} I\left[y=k\right]\right)n_{+k}^{\text{mis}} \\ &= \frac{1}{n^{\text{mis}}}\sum_{i=1}^{n^{\text{mis}}}\text{expit}\left(\left(\theta_{0}^{\text{obs}} + \delta \right) + \theta_{y}^{\text{obs}}y_{i}\right), \end{align*} where $\delta$ is the adjustment factor in the intercept of the imputation model for $x$. The value of the calibrated-$\delta$ adjustment can be obtained numerically from \eqref{eq:p_x=1_partitioned2} and \eqref{eq:p_x=1_r=0} using interval bisection \cite{Russ1980,Burden2011} (or any other root-finding method). \par When the population marginal distribution of the incomplete covariate $x$ is available, a natural alternative to adjusting the intercept of the imputation model based on this information is to weight the complete records in the imputation model (which we term `weighted multiple imputation'), in order to match the post-imputation distribution of $x$ to the population. In the supporting information section we explore two such weighting approaches, marginal and conditional weighted MI; we show analytically that while these methods can provide more accurate results compared to standard MI under certain MNAR mechanisms, they do not provide a general solution as does calibrated-$\delta$ adjustment MI. \section{Simulation studies}\label{sec3} This section presents univariate simulation studies to evaluate performance measures of the calibrated-$\delta$ adjustment MI method for an incomplete binary covariate $x$, when the fully observed outcome variable $y$ is also binary. The term `univariate' is used here to refer to the setting where missingness occurs in a single covariate. The aims of these simulation studies are (i) to examine finite-sample properties of calibrated-$\delta$ adjustment MI including bias in parameter estimates, efficiency in terms of the empirical and average model standard errors (SE), and coverage of $95\%$ confidence intervals (CI); and (ii) to compare the method with standard MI and complete record analysis (CRA) under various missingness mechanisms for $x$. \subsection{When the population distribution is `known'} \label{subsec3.1} Below we consider the setting where the population distribution of the incomplete variable is obtained from a population census or equivalent, i.e. it is `known'. The uncertainty associated with having to estimate the population distribution is explored in \cref{subsec3.2}. \subsubsection{Method} \label{subsubsec3.1.1} Similar to the analytic study presented in \cref{sec2}, the analysis model in this simulation study is a logistic regression model for a fully observed binary outcome $y$ on an incomplete binary covariate $x$. Calibrated-$\delta$ adjustment MI is compared to standard MI and CRA under four missingness mechanisms of increase complexity. The data generating mechanism and analysis procedures are as follows. \begin{enumerate} \item Simulate $n=5\,000$ complete values of the binary $0/1$ covariate $x$ and binary $0/1$ outcome $y$ from the following models \begin{align} &x \sim \text{Bernoulli}\left(p_{x}^{\text{pop}} = 0.7\right); \nonumber\\ & \text{logit}\left[p\left(y=1 \mid x\right)\right] = \beta_{0} + \beta_{x}x, \label{eq:moi_sim1} \end{align} where $\beta_{0}$ and $\beta_{x}$ are arbitrarily set to $\text{ln}\left(0.5\right)$ and $\text{ln}\left(1.5\right)$, respectively. The same values of the $\beta$ parameters are used throughout to make bias comparable across all simulation settings. This sample size is chosen to minimise the issue of small-sample bias associated with the logistic regression \cite{Nemes2009}; \item Simulate a binary indicator of response $r$ of $x$ from each of the selection models M1--M4 (Table \ref{tab:analytic_study_selections}). Values of $1.5$ and $-1.5$ are chosen for $\alpha_{y}$ and $\alpha_{x}$ in M2 and M3, respectively, to reflect strong odds ratios (OR) of observing $x$ (OR $= 4.5$ and $0.2$, respectively). For M4, $\alpha_{y} = 1.5$ and $\alpha_{x} = -1.5$ are chosen as bias in the three MI methods under evaluation is likely to be apparent with these coefficients predicting missingness in $x$. For all selection models, $\alpha_{0}$ is altered to achieve approximately $45\%$ missing $x$. For M1, $\alpha_{0}$ is calculated directly as $\text{ln}\left(\frac{0.55}{0.45}\right)$; for M2--M4, $\alpha_{0} = -0.2; 1.35$; and $0.75$ appear to work well; \item For $i=1 \ldots 5\,000$, set $x_{i}$ to missing if $r_{i}=0$; \item Impute missing values in $x$ $M=50$ times using standard MI and calibrated-$\delta$ adjustment MI in turn; \item In each MI method, fit the analysis model \eqref{eq:moi_sim1} to each completed dataset and combine the results using Rubin's rules.\cite{Rubin1987, Barnard1999} \end{enumerate} Steps 1--5 are repeated $S=2\,000$ times under each of the four selection models M1--M4, so the same set of simulated independent datasets is used to compare the three MI methods under the same missingness scenario, but a different set of datasets is generated for each missingness scenario.\cite{Burton2006} The parameters of interest are $\beta_{0}$ and $\beta_{x}$, although in practice $\beta_{x}$ is usually of more interest. Bias, efficiency of $\hat{\beta}_{0}$ and $\hat{\beta}_{x}$ in terms of the empirical standard errors, and coverage of 95\% CIs are calculated over $2\,000$ repetitions for each combination of simulation settings,\cite{White2010a} with analyses of full data (i.e. before any values in $x$ are set to missing) and complete records also provided for comparison. \par All simulations are performed in Stata 14 \cite{StataCorp2015b}; \texttt{mi impute logit} is used for standard MI, the community-contributed command \texttt{uvis logit} \cite{Royston2004} for calibrated-$\delta$ adjustment MI, and \texttt{mi estimate: logit} for fitting the analysis model to the completed datasets and combining the results using Rubin's rules.\cite{Rubin1987, Barnard1999} Simulated datasets are analysed using the community-contributed command \texttt{simsum}.\cite{White2010a} \par Based on the analytic calculations presented in \cref{sec2}, we propose the following procedure for imputing missing values in the covariate $x$ using calibrated-$\delta$ adjustment MI. \begin{enumerate} \item Fit a logistic regression imputation model for $x$ conditional on $y$ to the complete records to obtain the maximum likelihood estimates of the imputation models' parameters $\hat{\theta}$ and their asymptotic sampling variance $\widehat{\boldsymbol{U}}$; \item Draw new parameters $\tilde{\boldsymbol{\theta}}$ from the large-sample normal approximation $N(\widehat{\boldsymbol{\theta}}, \widehat{\boldsymbol{U}})$ of their posterior distribution, assuming non-informative priors; \item Draw a new probability of observing $x$, $\tilde{p}_{r}$, from the normal approximation $N\left(\hat{p}_{r}, \frac{\hat{p}_{r}\left(1-\hat{p}_{r}\right)}{n}\right)$, where $\hat{p}_{r}$ is the sample proportion of the response indicator of $x$, $\hat{p}_{r} = \frac{n_{++}^{\text{obs}}}{n_{++}}$; \item Draw a new probability of observed $x=1$, $\tilde{p}_{x}$, from the normal approximation $N\left(\hat{p}_{x}, \frac{\hat{p}_{x}\left(1-\hat{p}_{x}\right)}{n}\right)$, where $\hat{p}_{x}$ is the observed proportion of $x=1$, $\hat{p}_{x} = \frac{n_{1+}^{\text{obs}}}{n_{++}^{\text{obs}}}$; \item Derive the value of the calibrated-$\delta$ adjustment from the equation \begin{equation*} \frac{1}{n^{\text{mis}}}\sum_{i=1}^{n^{\text{mis}}}\text{expit}\left(\left(\tilde{\theta}_{0} + \delta\right) + \tilde{\theta}_{y}y_{i}\right) = \frac{p_{x}^{\text{pop}} - \tilde{p}_{x}}{\tilde{p}_{r}}, \end{equation*} where $p_{x}^{\text{pop}}$ is the probability of $x=1$ in the population; \item Fit the logistic regression imputation model for $x$ conditional on $y$ (in step 1) to the complete records with the intercept adjustment fixed to $\delta$ to obtain the maximum likelihood estimates of the imputation models' parameters $\hat{\boldsymbol{\theta}}$ and their asymptotic sampling variance $\widehat{\boldsymbol{U}}$; \item Draw new parameters $\dot{\boldsymbol{\theta}}$ from the large-sample normal approximation $N(\widehat{\boldsymbol{\theta}}, \widehat{\boldsymbol{U}})$ of their posterior distribution, assuming non-informative priors; \item Draw imputed values for $x$ from the above logistic regression imputation model, using the newly drawn parameters $\dot{\boldsymbol{\theta}}$ and calibrated-$\delta$ adjustment. \end{enumerate} \subsubsection{Results} \label{subsubsec3.1.2} Results of the simulation study are summarised graphically in Figure \ref{fig:sim_base}. Full data and CRA both give the results that the theory predicts. Analysis of full data is always unbiased with coverage close to the $95\%$ level and the smallest standard errors of all methods. CRA is unbiased under M1 and M3 as expected,\cite{White2010b} but bias is observed under the other two missingness mechanisms. Coverage is correspondingly low when bias is present, and efficiency is lower than that in full data. \par Under M1, when $x$ is MCAR, all methods appear unbiased, with comparable empirical and average model standard errors and correct coverage. This is as expected. \par Under M2, when $x$ is MAR conditional on $y$, CRA is severely biased in the estimate of $\beta_{0}$ and the corresponding coverage of 95\% CIs falls to 0. However, the method provides an unbiased estimate of $\beta_{x}$ with correct coverage. This result is specific to this simulation set-up, where the probability of being a complete record depends on the outcome, and the analysis model is a logistic regression. This mimics case-control sampling, where the log odds of the logistic regression is biased in case-control studies but the log odds ratio is not.\cite{White2010b, Bartlett2015} The outcome--covariate association can therefore be estimated consistently among the complete records. Standard MI and calibrated-$\delta$ adjustment MI are unbiased for both parameter estimates. Standard MI yields comparable empirical and average model standard errors and coverage attains the nominal level. In calibrated-$\delta$ adjustment MI, empirical standard errors are slightly smaller than the average model counterparts, leading to a minimal increase in coverage. \par \begin{figure} \caption{Simulation study: bias in point estimates, empirical and average model SE, and coverage of 95\% CIs under different missingness mechanism for $x$.} \label{fig:sim_base} \end{figure} Under M3, when $x$ is MNAR dependent on $x$, CRA yields unbiased estimates of both parameters. Standard MI is biased in the estimate of $\beta_{0}$ but provides an unbiased estimate of $\beta_{x}$ due to the symmetry property of the odds ratios. Generally, in logistic regression with an incomplete covariate $x$, when the missingness mechanism is such that both standard MI and CRA are unbiased, standard MI tends not to be more efficient than CRA in estimating $\beta_{x}$.\cite{White2010b} This is because without auxiliary variables in the imputation model, standard MI does not carry any extra information on the odds ratio compared to CRA. This is seen in the simulation results for $\beta_{x}$ under models M1--M3. Under M3, calibrated-$\delta$ adjustment MI is also unbiased in both parameter estimates. Given that all three methods are unbiased for $\beta_{x}$ under M3, there is a small gain in efficiency in the estimate of $\beta_{x}$ in calibrated-$\delta$ adjustment MI, as the empirical standard error for this parameter is slightly smaller than that in CRA. Under this missingness mechanism, empirical and average model standard errors are comparable across methods; for methods that are unbiased, their corresponding coverage of $95\%$ CIs generally attains the nominal level. \par Under M4, when $x$ is MNAR dependent on $x$ and $y$, standard MI and CRA are again biased in both parameter estimates, leading to coverage close or equal to 0. In contrast, calibrated-$\delta$ adjustment MI produces unbiased estimates of both parameters. In this method, empirical standard errors are again slightly smaller than the average model counterparts (as seen previously under M2), which leads to coverage slightly exceeding the $95\%$ level. \subsection{When the population distribution is estimated with uncertainty} \label{subsec3.2} So far, the population distribution of the incomplete covariate that is used to derive the calibrated-$\delta$ adjustment is assumed to be obtained from a population census or equivalent. In other words, it is assumed that there is no uncertainty associated with estimating the reference distribution, and hence, the adjustment. In calibrated-$\delta$ adjustment MI, we believe that the extra uncertainty in estimating the calibrated-$\delta$ adjustment should be ignored when the population distribution of the incomplete covariate is assumed to be invariant, unless the reference population is not a census or equivalent. Since MI is a Bayesian procedure in which all sources of uncertainty are modelled, this explains why, if there is uncertainty about the population distribution of the incomplete covariate, this uncertainty needs to be accounted for in the derivation of the calibrated-$\delta$ adjustment across imputations. \par When the population distribution of the incomplete covariate is not `known' and is estimated, a natural approach for incorporating this extra uncertainty would be to draw values of the population proportions from their distribution and calculate the calibrated-$\delta$ adjustment using these draws, so that this uncertainty is reflected in the MI variance estimation. This additional step is expected to have an effect on the between-imputation variance of Rubin's variance estimator. \par An extension of the simulation study presented in section \ref{subsec3.1} is conducted to explore this setting. \subsubsection{Method} \label{subsubsec3.2.1} This extended simulation study of a fully observed binary outcome y and a partially observed binary covariate $x$ follows the same method described in \cref{subsubsec3.1.1}, except that two variations of the population proportions of $x$ are evaluated in the imputation step of calibrated-$\delta$ adjustment MI. The reference distribution is assumed to either come from a census or equivalent (case 1), or be estimated in an external dataset of larger size (case 2) or smaller size (case 3) than the study sample. \par Suppose that in an external dataset of size $n^{\text{ex}}$ which comes from the same population as the study sample, the sample proportion $\hat{p}_{x}^{\text{pop}}$ provides an unbiased estimate of the population proportion $p_{x}^{\text{pop}}$. Assuming that the sampling distribution of the sample proportions is approximately normal, its standard error is given by \begin{equation*} \text{SE}\left(\hat{p}_{x}^{\text{pop}}\right) = \sqrt{\frac{\hat{p}_{x}^{\text{pop}}\left(1-\hat{p}_{x}^{\text{pop}}\right)}{n^{\text{ex}}}}. \end{equation*} The data generating mechanism and analysis procedures are as follows. \begin{enumerate} \item For cases 2 and 3, the following two steps are performed to incorporate the sampling behaviour of $\hat{p}_{x}^{\text{pop}}$, which is estimated in an external dataset of size $n^{\text{ex}}$, into the data generating mechanism in repeated simulations. \begin{enumerate}[label=\alph*.] \item Simulate $n^{\text{ex}} = 10\,000$ (case 2) or $1\,000$ (case 3) complete values of the binary $0/1$ covariate $x$ from the model \begin{equation*} x \sim \text{Bernoulli}\left(p_{x}^{\text{pop}} = 0.7\right); \end{equation*} \item Obtain the sample proportion $\hat{p}_{x}^{\text{pop}}$ of $x$, which is an unbiased estimate of the population proportion $p_{x}^{\text{pop}}$; \end{enumerate} \item Simulate $n=5\,000$ complete values of the binary $0/1$ covariate $x$ and binary $0/1$ covariate $y$ from the models \begin{align} &x \sim \text{Bernoulli}\left(p_{x}^{\text{pop}} = 0.7\right); \nonumber \\ &\text{logit}\left[p\left(y=1 \mid x\right)\right] = \beta_{0} + \beta_{x}x \label{eq:moi_sim2}, \end{align} where $\beta_{0}$ and $\beta_{x}$ are arbitrarily set to $\text{ln}\left(0.5\right)$ and $\text{ln}\left(1.5\right)$, respectively. The same values of the $\beta$ coefficients are used throughout to make bias comparable across all simulation settings; \item Simulate a binary indicator of response $r$ of $x$ from each of the selection models M1--M4 (Table \ref{tab:analytic_study_selections}). Values of $1.5$ and $-1.5$ are chosen for $\alpha_{y}$ and $\alpha_{x}$ in M2 and M3, respectively. For M4, $\alpha_{y} = 1.5$ and $\alpha_{x} = -1.5$ are used. In all selection models, $\alpha_{0}$ is altered to achieve approximately $45\%$ missing $x$. For M1, $\alpha_{0}$ is calculated directly as $\text{ln}\left(\frac{0.55}{0.45}\right)$; for M2--M4, $\alpha_{0} = -0.2; 1.35$; and $0.75$ are used; \item For $i=1, \ldots, 5\,000$, set $x_{i}$ to missing if $r_{i} = 0$; \item Impute missing values in $x$ $M=10$ times using standard MI and calibrated-$\delta$ adjustment MI in turn. For cases 2 and 3, calibrated-$\delta$ adjustment MI is performed as follows. \begin{enumerate}[label=\alph*.] \item Draw a value $\tilde{p}_{x}^{\text{pop}}$ from the normal approximation $N\left(\hat{p}_{x}^{\text{pop}}, \frac{\hat{p}_{x}^{\text{pop}}\left(1-\hat{p}_{x}^{\text{pop}}\right)}{n^{\text{ex}}}\right)$, with values of $n^{\text{ex}} = 10\,000$ (case 2) and $1\,000$ (case 3). This is done by first taking a draw $\tilde{z}$ from the standard normal distribution, $z \sim N\left(0,1\right)$, followed by drawing $\tilde{p}_{x}^{\text{pop}} = \hat{p}_{x}^{\text{pop}} + \tilde{z}\sqrt{\frac{\hat{p}_{x}^{\text{pop}}\left(\hat{p}_{x}^{\text{pop}}\right)}{n^{\text{ex}}}}$; \item Derive the calibrated-$\delta$ adjustment and perform MI according to the algorithm set out in \cref{subsubsec3.1.1}, using $\tilde{p}_{x}^{\text{pop}}$ as the reference proportion; \end{enumerate} \item For each MI method, fit the analysis model \eqref{eq:moi_sim2} to each completed dataset and combine the results using Rubin's rules.\cite{Rubin1987, Barnard1999} \end{enumerate} Step 5 is designed to mimic the full Bayesian sampling process, which is always the aim in proper (or Rubin's) MI. Again, steps 1--6 are repeated $S=2\,000$ times under each of the four selection models M1--M4, so the same set of simulated independent datasets is used to compare the two MI methods under the same missingness scenario, but a different set of datasets is generated for each missingness scenario.\cite{Burton2006} The parameters of interest are $\beta_{0}$ and $\beta_{x}$ . Bias in $\hat{\beta}_{0}$ and $\hat{\beta}_{x}$, efficiency in terms of the empirical and average model standard errors, and coverage of $95\%$ CIs are calculated over $2\,000$ repetitions for each combination of simulation settings,\cite{White2010a} with analyses of full data and complete records also provided for comparison. \par All simulations are performed in Stata 14 \cite{StataCorp2015b} with \texttt{mi impute logit} for standard MI, the community-contributed command \texttt{uvis logit} \cite{Royston2004} for calibrated-$\delta$ adjustment MI, and \texttt{mi estimate: logit} for fitting the analysis model to the completed datasets and combining the results using Rubin's rules \cite{Rubin1987, Barnard1999}; simulated datasets are analysed using the community-contributed command \texttt{simsum}.\cite{White2010a} \subsubsection{Results} \label{subsubsec3.2.2} Results of the extended simulation study are presented in Figure \ref{fig:sim_var}. Bias in point estimates is similar when $p_{x}^{\text{pop}}$ is invariant or estimated in a large external dataset (cases 1 and 2, respectively). Bias slightly increases, particularly under M2 and M4, when $p_{x}^{\text{pop}}$ is estimated in a small external dataset with higher variance (case 3). \begin{figure} \caption{Extended simulation study: bias in point estimates, empirical and average model SE, and coverage of 95\% CIs under different missingness mechanism for $x$; the population distribution of $x$ is assumed to be invariant (case 1) or estimated in an external dataset of size 10\,000 (case 2) or 1\,000 (case 3).} \label{fig:sim_var} \end{figure} \par Empirical and average model standard errors are comparable and remain stable for calibrated-$\delta$ adjustment MI across the three cases under M1 and M3. Under M2 and M4, the discrepancy previously seen between the empirical and average model standard errors in calibrated-$\delta$ adjustment MI (\cref{subsubsec3.1.2}) decreases in case 3 compared to cases 1 and 2. When there is increased uncertainty in estimating the population proportions of $x$ (case 3 compared to case 1), there is also a marked increase in both the empirical and average model standard errors in calibrated-$\delta$ adjustment MI. This extra uncertainty is reflected in the variation of the point estimates across the simulation repetitions according to how the simulation is set up, and is also acknowledged by an increase in the between-imputation variance component of Rubin's variance estimator (results for between-imputation variances not shown). \par In line with results seen for the standard errors, coverage attains the nominal level for calibrated-$\delta$ adjustment MI under M1 and M3. Under M2 and M4, since the empirical standard errors are closer to the average model standard errors in case 3 compared to case 1, the slight over-coverage of 95\% CIs seen in case 1 seems to disappear in case 3. \section{Case study -- ethnicity and the prevalence of type 2 diabetes diagnoses in The Health Improvement Network primary care database} \label{sec4} This case study is conducted to illustrate the use of calibrated-$\delta$ adjustment MI for handling missing data in ethnicity in UK primary care electronic health records, when ethnicity is included as a covariate in the analysis model. In particular, this is a cross-sectional study which examines the association between ethnicity and the prevalence of type 2 diabetes diagnoses in a large UK primary care database in 2013. Prevalence of type 2 diabetes is chosen as the outcome variable to illustrate the application of the calibrated-$\delta$ adjustment MI method as developed and evaluated in \cref{sec2,sec3}. \subsection{The Health Improvement Network database} \label{subsec4.1} The Health Improvement Network (THIN) \cite{IMSHealth2015} is one of the largest databases in the UK to collect information on patient demographics, disease symptoms and diagnoses, and prescribed medications in primary care. THIN contains anonymised electronic health records from over 550 general practices across the UK, with more than 12 million patients contributing data. The database is broadly generalisable to the UK population in terms of demographics and crude prevalences of major health conditions.\cite{Blak2011, Bourke2004} \par Information is recorded during routine patient consultations with General Practitioners (GP) from when the patients register to general practices contributing data to THIN to when they die or transfer out. Symptoms and diagnoses of disease are recorded using Read codes, a hierarchical coding system.\cite{Chisholm1990, Dave2009} THIN also provides information on referrals made to secondary care and anonymised free text information. Patient demographics include information on year of birth, sex, and social deprivation status measured in quintiles of the Townsend deprivation score.\cite{Townsend1988} \par The acceptable mortality reporting (AMR) \cite{Maguire2009} and the acceptable computer usage (ACU) \cite{Horsfall2013} dates are jointly used for data quality assurance in THIN. The AMR date is the date after which the practice is deemed to be reporting a rate of all-cause mortality sufficiently similar to that expected for a practice with the same demographics, based on data from the Office for National Statistics (ONS).\cite{Maguire2009} The ACU date is designed to exclude the transition period between the practice switching from paper-based records to complete computerisation; it is defined as the date from which the practice is consistently recording on average at least two drug prescriptions, one medical record and one additional health record per patient per year.\cite{Horsfall2013} \par Use of THIN for scientific research was approved by the NHS South-East Multi-Centre Research Ethics in 2003. Scientific approval to undertake this study was obtained from IQVIA World Publications Scientific Review Committee in September 2017 (SRC Reference Number: 17THIN083). \subsection{Study sample} \label{subsec4.2} All individuals who are permanently registered with general practices in London contributing data to THIN are considered for inclusion in the study sample. This sample is chosen since it is not only more practical to perform MI on a smaller dataset, but also because London is the most ethnically diverse region in the UK, and hence incorrect assignment of ethnicity from imputing missing data with the White ethnic group is expected to be more apparent compared to other regions. \par For each individual, a start date is defined as the latest of: date of birth, ACU and AMR dates,\cite{Maguire2009, Horsfall2013} and registration date. Similarly, an end date is defined as the earliest of: date of death, date of transfer out of practice, and date of last data collection from the practice. Point prevalence of type 2 diabetes on 01 January 2013 is calculated, since THIN is a dynamic database in which individuals register with and leave their general practices at different times. Individuals are selected into the study sample if they are actively registered to THIN practices in London on 01 January 2013, and in addition they need to have been registered with the same general practices for at least 12 months by this date. This criterion is introduced to ensure that there is enough time for the individuals to have their type 2 diabetes diagnoses recorded in their electronic health data, after registration with their general practices. \subsection{Outcome variable and main covariate} \label{subsec4.3} The recording of diabetes diagnoses and management in THIN is comprehensive and therefore there are several ways an individual may be identified as diabetic. For this study, an algorithm developed by Sharma et al. \cite{Sharma2016a} is used to identify individuals with diabetes mellitus, as well as to distinguish between type 1 and type 2 diabetes. According to this algorithm, individuals are identified as having diabetes if they have at least two of the following records: a diagnostic code for diabetes, supporting evidence of diabetes (e.g. screening for diabetic retinophany), or prescribed treatment for diabetes. In this study, the first record of any of these three is considered as the date of diagnosis. In addition to identifying individuals with diabetes, the algorithm also distinguishes between type 1 and type 2 diabetes based on individuals' age at diagnosis, types of treatment and timing of the diabetes diagnosis. \cite{Sharma2016a, Sharma2016b} After the study sample is selected using the method described in \cref{subsec4.2}, prevalent cases of type 2 diabetes are defined as individuals who have a diagnosis of type 2 diabetes on or before 01 January 2013. \par Ethnicity is typically recorded in THIN using the Read code system \cite{Chisholm1990}; it can also be recorded using free text entries. A list containing Read codes related to ethnicity is developed using a published method.\cite{Dave2009} The majority of ethnicity records are identified by searching both the medical and additional health data files for Read codes in the ethnicity code list. Minimal additional information is found by searching the pre-anonymised free text as well as other free text linked to ethnicity-related Read codes. Ethnicity is then coded into the five-level ONS classification as White, Mixed, Asian, Black, and Other ethnic groups.\cite{OfficeforNationalStatistics2012} Subsequently, the Mixed and Other ethnic groups are combined due to the small counts and heterogeneity in these two groups. Searching for ethnicity-related Read codes reveals that there is a small number of individuals with multiple inconsistent records of ethnicity. For these individuals, it can not be determined with certainty whether their ethnicity is in fact one of the recorded categories or if all the recorded categories are incorrect. Therefore, their ethnicity is set to missing for simplicity, since the issue of inconsistency in ethnicity recording is not the focus of this study. \subsection{Statistical analysis} \label{subsec4.4} The analysis model in this study is a logistic regression model for a binary indicator of whether an individual has a diagnosis of type 2 diabetes on or before 01 January 2013, conditional on the individual's age in 2013, sex, Townsend deprivation score (five quintiles, from the least to the most deprived), and ethnic group (White, Asian, Black, Mixed/Other). Age is analysed in $10$-year age groups for individuals aged 0--79 years, and all individuals aged 80 years and above are grouped into the $80+$ category. Ethnicity information is extracted and categorised as described in \cref{subsec4.3}. Since this study is conducted to illustrate the application of calibrated-$\delta$ adjustment MI in a univariate missing data setting where missing data occurs in a single covariate (ethnicity), individuals with incomplete information on age, sex, and deprivation status were excluded from the analysis. \par Missing values in ethnicity are handled by (i) a CRA, (ii) single imputation with the White ethnic group, (iii) standard MI, and (iv) calibrated-$\delta$ adjustment MI using the 2011 ONS census distribution of ethnicity in London \cite{OfficeforNationalStatistics2012} as the reference distribution. For MI of ethnicity, a multinomial logistic regression imputation model is constructed for ethnicity using all variables in the analysis model, including individuals' age group in 2013, sex, and quintiles of the Townsend score. In MI, the outcome variable must be explicitly included in the imputation model for the incomplete covariate. \cite{Sterne2009} Since the analysis model is a logistic regression model, the type 2 diabetes indicator is also included as a covariate in the imputation model for ethnicity. \par In this study, ethnicity is analysed as a four-level categorical variable. Therefore, the calibrated-$\delta$ adjustment MI method for handling missing data in an incomplete binary covariate discussed in \cref{sec2,sec3} can be generalised for handling missing values in ethnicity as a categorical covariate. The overall proportion of the $j$th level of ethnicity, $j=1, \ldots, 4$ can be written as \begin{equation} \label{eq:eth_partition} p\left(\text{eth} = j\right) = p\left(\text{eth} = j \mid r = 1\right) p\left(r=1\right) + p\left(\text{eth} = j \mid r = 0\right) p\left(r=0\right), \end{equation} where $p\left(\text{eth} = j\right)$ is available in the census; $p\left(\text{eth} = j \mid r = 1\right)$, $p\left(r=1\right)$, and $p\left(r=0\right)$ can be obtained in the observed data. \par A multinomial logistic regression imputation model for ethnicity conditional on age group (40--49 years old as the base level), sex (male as the base level), Townsend score (quintile 1 as the base level), and the binary indicator of type 2 diabetes (no diagnosis as the base level) is fitted to the observed data. Setting the first level of ethnicity (White, $j = 1$) as the base level to identify the model, the probability of the level $j$th of ethnicity in the observed data, $j=2, \ldots, 4$ can be written in terms of the observed-data linear predictors, $\text{linpred}_{j}^{\text{obs}}$, which is estimated from the multinomial logistic regression model for ethnicity as \begin{equation} \label{eq:pethj_r1} p\left(\text{eth}=j \mid r=1\right) = \frac{1}{n^{\text{obs}}} \sum_{i=1}^{n^{\text{obs}}}\frac{1}{1+\sum_{j=2}^{4}\left(\text{linpred}_{ij}^{\text{obs}}\right)}, \end{equation} where $i$ indexes individuals in the dataset, and \begin{align} \label{eq:linpred_obs} \text{linpred}_{ij}^{\text{obs}} &= \theta_{j0}^{\text{obs}} + \sum_{a=10}^{30} \theta_{j\text{age}_{a}}^{\text{obs}}I\left[\text{age}_{ij}=a\right] + \sum_{a=50}^{80} \theta_{j\text{age}_{a}}^{\text{obs}}I\left[\text{age}_{ij}=a\right] + \theta_{j\text{sex}}^{\text{obs}}I\left[\text{sex}_{ij} = \text{female}\right] \nonumber \\ & + \sum_{t=2}^{5}\theta_{j\text{town}_{t}}^{\text{obs}}I\left[\text{Townsend}_{ij} = t\right] + \theta_{j\text{t2d}}^{\text{obs}}I\left[\text{type 2 diabetes}_{ij} = \text{yes}\right]. \end{align} \par Following the methods outlined in \cref{sec3}, since covariates in the imputation model for ethnicity are all binary or categorical, the relative risk ratios are the same among those with ethnicity observed and missing. The linear predictors in the missing data, $\text{linpred}_{j}^{\text{mis}}$, can therefore be written as \begin{align} \label{eq:linpred_mis} \text{linpred}_{ij}^{\text{mis}} &= \left(\theta_{j0}^{\text{obs}} + \delta_{j0}\right)+ \sum_{a=10}^{30} \theta_{j\text{age}_{a}}^{\text{obs}}I\left[\text{age}_{ij}=a\right] + \sum_{a=50}^{80} \theta_{j\text{age}_{a}}^{\text{obs}}I\left[\text{age}_{ij}=a\right] + \theta_{j\text{sex}}^{\text{obs}}I\left[\text{sex}_{ij} = \text{female}\right] \nonumber \\ & + \sum_{t=2}^{5}\theta_{j\text{town}_{t}}^{\text{obs}}I\left[\text{Townsend}_{ij} = t\right] + \theta_{j\text{t2d}}^{\text{obs}}I\left[\text{type 2 diabetes}_{ij} = \text{yes}\right], \end{align} where $\delta_{j0}$ is the level-$j$ intercept adjustment in the multinomial logistic regression imputation model for ethnicity. Hence, the probability of the $j$th level of ethnicity in the missing data, $j=2, \ldots, 4$, is given by \begin{equation} \label{eq:pethj_r0} p\left(\text{eth}=j \mid r=0\right) = \frac{1}{n^{\text{mis}}} \sum_{i=1}^{n^{\text{mis}}}\frac{1}{1+\sum_{j=2}^{4}\left(\text{linpred}_{ij}^{\text{mis}}\right)}. \end{equation} \par From \cref{eq:eth_partition,eq:pethj_r1,eq:linpred_obs,eq:pethj_r0,eq:linpred_mis}, to implement calibrated-$\delta$ adjustment MI, we need to find the solutions $\delta_{j0}$, $j=2, \ldots, 4$, of a system of three non-linear equations for the three categories of ethnicity. The solutions of this system of equations can be obtained simultaneously using the Stata base command \texttt{nl} \cite{StataCorp2015b} and defining a function evaluator program. Once the values of the calibrated-$\delta$ adjustments are obtained, the imputation is performed using the same procedure as outlined in \cref{subsec3.1}. \par Both MI methods are performed using $M=30$ imputations, and Rubin's rules \cite{Rubin1987, Barnard1999} are used to obtain estimates of association and standard errors. All analyses are conducted using Stata 14, \cite{StataCorp2015b} where \texttt{mi impute mlogit} is used for standard MI, the community-contributed command \texttt{uvis mlogit} \cite{Royston2004} for calibrated-$\delta$ adjustment MI, and \texttt{mi estimate: logit} for performing the main analysis in the completed datasets and obtaining the final results using Rubin's rules.\cite{Rubin1987, Barnard1999} \subsection{Results} \label{subsec4.5} Figure \ref{fig:flowchart} depicts a flowchart of the selection criteria used to obtain the relevant sample for this study. In total, data from 13\,532\,630 individuals are extracted from THIN, of which 2\,137\,874 (15.8\%) individuals are not permanently registered, 293 (less than 0.1\%) individuals do not have their year of birth recorded, 1\,308 (less than 0.1\%) individuals have missing sex, 1\,376\,098 (10.2\%) individuals have invalid or missing Townsend score, and 2\,160\,435 (16.0\%) have their start date after their end date. Applying the selection criteria results in 9\,065\,617 (70.0\%) individuals who are eligible for inclusion in this study. In this eligible sample, there are 1\,090\,248 (8.1\%) individuals who are registered to THIN general practices in London, of whom 470\,863 (3.5\%) individuals are actively registered on 01 January 2013. Finally, $n=404\,318 \left(3.0\%\right)$ individuals have at least 12 months of follow-up by 01 January 2013 and make up the sample for this study. Table \ref{tab:example2_vars} presents a summary of variables considered in this study. The sample comprises $51\%$ women; the majority of individuals in the sample (approximately $80\%$) are below 60 years of age; there are slightly more than $70\%$ of the individuals with quintiles of the Townsend score of 3 and above; and $5.5\%$ of the individuals have a diagnosis of type 2 diabetes on or before 01 January 2013. \par Ethnicity is recorded for $309\,684 \left(76.6\%\right)$ and missing for $94\,634 \left(23.4\%\right)$ individuals (Table \ref{tab:dist_etht2d}). Among individuals with ethnicity recorded, the estimated proportion of the White ethnic group is higher, and the non-White ethnic groups lower compared to the corresponding ethnic breakdown in the 2011 ONS census data for London (Table \ref{tab:dist_etht2d}). Single imputation with the White ethnic group further overestimates the White group and underestimates the other non-White groups, under the assumption that the ethnicity distribution in THIN should match that in the census (Table \ref{tab:dist_etht2d}). \begin{figure} \caption{Case study: flowchart of selection criteria for THIN sample.} \label{fig:flowchart} \end{figure} \begin{table}[ht!] \renewcommand{1}{1.1} \setlength{\tabcolsep}{5pt} \centering \caption{Case study: summary of variables in the analysis; $n = 404\,318$.} \begin{subtable}{\linewidth} \centering \subcaption{Distribution of age group, sex, Townsend deprivation score, and type 2 diabetes diagnoses.} \label{tab:example2_vars} \begin{tabular}{lcc} \toprule Variable & Frequency & \% \\ \midrule \textit{Age group (years)} & & \\ 0--9 & 41\,601 & 10.29 \\ 10--19 & 45\,664 & 11.29 \\ 20--29 & 50\,065 & 12.38 \\ 30--39 & 65\,695 & 16.25 \\ 40--49 & 64\,837 & 16.04 \\ 50--59 & 53\,272 & 13.18 \\ 60--69 & 39\,427 & 9.75 \\ 70--79 & 25\,348 & 6.27 \\ 80+ & 18\,409 & 4.55 \\ \textit{Sex} & & \\ Male & 198\,301 & 49.05 \\ Female & 206\,017 & 50.95 \\ \textit{Townsend score} & & \\ Quintile 1 (least deprived) & 48\,934 & 12.10 \\ Quintile 2 & 64\,788 & 16.02 \\ Quintile 3 & 101\,305 & 25.06 \\ Quintile 4 & 102\,626 & 25.38 \\ Quintile 5 (most deprived) & 86\,665 & 21.43 \\ \textit{Type 2 diabetes} & 22\,100 & 5.47 \\ \bottomrule \end{tabular} \end{subtable} \vskip 15pt \begin{subtable}{\linewidth} \centering \subcaption{Distribution of ethnicity when missing values are included, excluded, and imputed with the White ethnic group.} \label{tab:dist_etht2d} \begin{tabular}{lcccccc} \toprule Ethnicity & Frequency & \begin{tabular}[c]{@{}c@{}}\%\\ including \\ missing\end{tabular} & \begin{tabular}[c]{@{}c@{}}\%\\ excluding \\ missing\end{tabular} & \begin{tabular}[c]{@{}c@{}}Frequency\\ missing\\ imputed\\ with White\end{tabular} & \begin{tabular}[c]{@{}c@{}}\%\\ missing\\ imputed\\ with White\end{tabular} & \begin{tabular}[c]{@{}c@{}}\%\\ 2011 ONS \\ census \\ London\end{tabular} \\ \midrule White & 224\,403 & 55.50 & 72.46 & 319\,037 & 78.91 & 59.8 \\ Asian & 35\,027 & 8.66 & 11.31 & 35\,027 & 8.66 & 18.8 \\ Black & 30\,771 & 7.61 & 9.94 & 30\,771 & 7.61 & 13.3 \\ Other & 19\,483 & 4.82 & 6.29 & 19\,483 & 4.82 & 8.4 \\ Missing & 94\,634 & 23.41 & & & & \\\midrule $\sum$ including missing & 404\,318 & & & & & \\ $\sum$ excluding missing & 309\,684 & \\ \bottomrule \end{tabular} \end{subtable} \end{table} Figure \ref{fig:dist_eth} shows the distribution of four-level ethnicity after missing values in ethnicity are handled by the various methods for missing data. CRA, single imputation with the White ethnic group, and standard MI overestimate the White group while underestimating the other non-White ethnic proportions, compared to the corresponding census statistics. In calibrated-$\delta$ adjustment MI, the majority of missing values in ethnicity are imputed with the Asian and Black groups. This method recovers the ethnic breakdown in the census as expected, since the census distribution is used as the reference. \begin{figure} \caption{Case study: distribution of four-level ethnicity in different methods for handling missing ethnicity data, compared to the 2011 ONS census distribution for London (horizontal black lines).} \label{fig:dist_eth} \end{figure} \par Figure \ref{fig:combine_methodf} and Table \ref{tab:case_study_or} present estimated odds ratios of type 2 diabetes diagnosis and $95\%$ CIs for age group, sex, Townsend score, and ethnicity in the analysis model. Age 40--49 years, male, quintile 1, and the White ethnic group are selected as base levels for age group, sex, Townsend score, and ethnicity, respectively. $M = 30$ imputations produce Monte Carlo errors for point estimates of less than 10\% of the estimated standard errors for all parameters. The relative efficiency versus an infinite number of imputations is above 0.988 for all parameter estimates and MI methods. Overall, the odds of being diagnosed with type 2 diabetes increase relatively smoothly with older age groups and higher quintiles of the Townsend score; are lower in women compared to men; and are higher in the Asian, Black, and Mixed/Other ethnic groups compared to the White group in all methods for handling missing data in ethnicity. \par Compared to the other three methods under consideration, calibrated-$\delta$ adjustment MI produces comparable estimated odds ratios for the younger age groups, and smaller estimated odds ratios for the older ($60+$) age groups. Calibrated-$\delta$ adjustment MI leads to slightly higher estimated odds ratio for women compared to CRA, single imputation with the White ethnic group, and standard MI; this increase is towards the null. All missing data methods produce odds ratios that increase with more deprived quintiles of the Townsend score. Calibrated-$\delta$ adjustment MI yields similar estimated odds ratios compared to the other methods for the first three quintiles of the Townsend score, and higher estimates for the top two quintiles. \par The most noticeable differences in point estimates associated with the prevalence of type 2 diabetes diagnoses are seen in the estimated odds ratios for ethnicity. CRA, single imputation, and standard MI again return similar results, in which the odds of having a diagnosis of type 2 diabetes are around 3.6 times higher in the Asian ethnic group compared to the White group, and individuals in the Black ethnic group are about 2.3 times more likely to receive a diagnosis of type 2 diabetes compared to those of White ethnic background. Single imputation with the White ethnic group slightly increases the estimated odds ratios for the non-White groups. This is because explanatory analyses conducted to examine predictors of both ethnicity and missingness in ethnicity suggest that individuals with missing ethnicity are, on average, less likely to have a diagnosis of type 2 diabetes (OR of observing ethnicity for type 2 diabetes (adjusted for age group, sex, Townsend score) = 1.39, $95\%$ CI 1.34 to 1.44, full results not shown). Replacing missing values with the White ethnic group means that this group will contain a lower percentage of type 2 diabetes diagnoses, which implies that the estimated odds ratios for the non-White ethnic groups will increase. Compared to CRA, single imputation with the White ethnic group, and standard MI, calibrated-$\delta$ adjustment MI leads to a reduction in the estimated odds ratios for the non-White ethnic groups (Figure \ref{fig:combine_methodf} and Table \ref{tab:case_study_or}). For these groups, the $95\%$ CIs of the ethnicity point estimates in calibrated-$\delta$ adjustment MI do not cross that of the other methods. \begin{figure} \caption{Case study: estimated odds ratio of type 2 diabetes diagnosis for age group (base level: 40-49 years), sex (base level: male), social deprivation status (base level: quintile 1 of the Townsend score), and ethnicity (base level: White) in different methods for handling missing ethnicity data.} \label{fig:combine_methodf} \end{figure} \par Fraction of missing information (FMI) \cite{White2011} for the estimates of association between ethnicity and the prevalence of type 2 diabetes diagnoses was 0.132 (Monte Carlo standard error (MCSE) $=0.003$); 0.193 (MCSE $= 0.05$); 0.230 (MCSE $= 0.066$) for Asian, Black, and Mixed/Other ethnic group, respectively in standard MI. The corresponding quantities for these three groups in calibrated-$\delta$ adjustment MI are 0.283 (MCSE $=0.052$); 0.245 (MCSE $=0.045$); 0.327 (MCSE $=0.051$). Calibrated-$\delta$ adjustment MI appears to have higher FMI compared to standard MI. This could be explained by the fact that non-White ethnic groups, which are under-represented in the observed data, are imputed more often in calibrated-$\delta$ adjustment MI than in standard MI. Therefore, the between-imputation variance relies on more imputed values in the non-White ethnic groups and less frequently imputed values in the White group, which leads to the non-White proportion estimates being more variable across the completed datasets. \begin{landscape} \begin{table} \centering \caption{Case study: adjusted ORs and $95\%$ CIs from a multivariable logistic regression model for the prevalence of type 2 diabetes diagnoses, conditional on age group in 2013, sex, Townsend deprivation score, and ethnic group in different methods for handling missing data in ethnicity, $n=404\,138$.} \label{tab:case_study_or} \begin{tabular}{lcccccccc} \toprule & \multicolumn{2}{c}{CRA} & \multicolumn{2}{c}{Single imputation with White} & \multicolumn{2}{c}{Standard MI} & \multicolumn{2}{c}{Calibrated-$\delta$ adjustment MI} \\ \cmidrule(l{2pt}r{2pt}){2-3} \cmidrule(l{2pt}r{2pt}){4-5} \cmidrule(l{2pt}r{2pt}){6-7} \cmidrule(l{2pt}r{2pt}){8-9} & OR & 95\% CI & OR & 95\% CI & OR & 95\% CI & OR & 95\% CI \\ \midrule \textit{Age group (years)} & & & & & & & & \\ 0-9 & 0.010 & 0.006 to 0.016 & 0.010 & 0.006 to 0.016 & 0.010 & 0.006 to 0.016 & 0.010 & 0.006 to 0.017 \\ 10-19 & 0.022 & 0.016 to 0.032 & 0.026 & 0.020 to 0.035 & 0.025 & 0.019 to 0.033 & 0.025 & 0.019 to 0.033 \\ 20-29 & 0.120 & 0.103 to 0.139 & 0.122 & 0.107 to 0.139 & 0.120 & 0.106 to 0.137 & 0.122 & 0.107 to 0.139 \\ 30-39 & 0.308 & 0.283 to 0.336 & 0.316 & 0.292 to 0.342 & 0.320 & 0.296 to 0.347 & 0.330 & 0.305 to 0.357 \\ 40-49 & 1 & & 1 & & 1 & & & \\ 50-59 & 2.641 & 2.495 to 2.796 & 2.605 & 2.474 to 2.743 & 2.604 & 2.473 to 2.742 & 2.516 & 2.390 to 2.649 \\ 60-69 & 5.255 & 4.968 to 5.559 & 5.190 & 4.933 to 5.46 & 5.309 & 5.044 to 5.587 & 4.928 & 4.685 to 5.184 \\ 70-79 & 7.662 & 7.230 to 8.120 & 7.748 & 7.352 to 8.166 & 7.984 & 7.573 to 8.417 & 7.484 & 7.102 to 7.886 \\ 80+ & 8.154 & 7.655 to 8.685 & 8.003 & 7.560 to 8.472 & 8.379 & 7.910 to 8.876 & 7.596 & 7.175 to 8.043 \\ \textit{Sex} & & & & & & & & \\ Male & 1 & & 1 & & 1 & & 1 & \\ Female & 0.727 & 0.704 to 0.751 & 0.752 & 0.731 to 0.774 & 0.760 & 0.738 to 0.782 & 0.773 & 0.751 to 0.796 \\ \textit{Townsend score} & & & & & & & & \\ Quintile 1 (least deprived) & 1 & & 1 & & 1 & & 1 & \\ Quintile 2 & 1.125 & 1.057 to 1.196 & 1.121 & 1.060 to 1.185 & 1.115 & 1.054 to 1.179 & 1.119 & 1.058 to 1.183 \\ Quintile 3 & 1.217 & 1.149 to 1.288 & 1.242 & 1.180 to 1.307 & 1.208 & 1.147 to 1.272 & 1.249 & 1.187 to 1.316 \\ Quintile 4 & 1.376 & 1.300 to 1.457 & 1.420 & 1.349 to 1.496 & 1.381 & 1.312 to 1.455 & 1.474 & 1.400 to 1.553 \\ Quintile 5 (most deprived) & 1.693 & 1.596 to 1.796 & 1.783 & 1.691 to 1.879 & 1.708 & 1.619 to 1.802 & 1.864 & 1.768 to 1.966 \\ \textit{Ethnic group} & & & & & & & & \\ White & 1 & & 1 & & 1 & & 1 & \\ Asian & 3.588 & 3.431 to 3.753 & 3.629 & 3.474 to 3.789 & 3.577 & 3.425 to 3.735 & 2.355 & 2.259 to 2.456 \\ Black & 2.253 & 2.135 to 2.378 & 2.257 & 2.142 to 2.379 & 2.254 & 2.136 to 2.379 & 1.638 & 1.555 to 1.725 \\ Mixed/Other & 1.606 & 1.486 to 1.736 & 1.617 & 1.497 to 1.746 & 1.615 & 1.491 to 1.749 & 1.174 & 1.085 to 1.270 \\ \bottomrule \end{tabular} \end{table} \end{landscape} \section{Discussion} \label{sec5} Our proposed calibrated-$\delta$ adjustment MI method for missing data in a binary/categorical covariate involves utilising population-level information about the incomplete covariate to generate a calibrated-$\delta$ adjustment, which is then used in the intercept of the imputation model in order to improve the analysis of data suspected to be MNAR. The development of this method was motivated by van Buuren et al.'s \cite{VanBuuren1999} $\delta$ (offset) approach in MI, but where $\delta$ is derived based on external information instead of chosen arbitrarily or based on expert's belief (which is arguably not arbitrary, but can be subjective). Direct linkage to external data has also increasingly been used for the analysis of missing data generated by a MNAR mechanism.\cite{Cornish2015} However, external linked data might not always be available, or the linkage might not be possible, whereas our proposed calibrated-$\delta$ adjustment MI method does not require records from the same individuals to be directly linked between the datasets. \par Under the MNAR assumption of missing data, MI results rely on subtle, untestable assumptions, and may depend heavily on the particular way the missingness mechanism is modelled. This issue emphasises the central role of sensitivity analysis, which explores how inference may vary under different missingness mechanisms.\cite{Kenward2007} MI offers flexibility for sensitivity analysis, since the imputation model can be tuned to incorporate possible departures from the MAR assumption.\cite{Kenward2007, White2011} Unfortunately, a sensitivity analysis is often not performed or reported sufficiently in practice,\cite{Wood2004, HayatiRezvan2015} a tendency abetted by the practical constraints of many applied projects. When the population-level information about the incomplete covariate is available, our proposed calibrated-$\delta$ adjustment MI method provides a useful tool for performing a single, calibrated sensitivity analysis to assess the impact of potential departures from the MAR assumption. \par The analytic study of a $2\times2$ contingency table with a binary outcome variable $y$ and a binary covariate $x$ gave insights into how the method works, and will work for more general contingency table settings with one incomplete variable. The analytic study explored the appropriate derivation of the calibrated-$\delta$ adjustment under increasingly complex missingness mechanisms. We showed that when data in $x$ were MNAR dependent on $x$ or both $x$ and $y$, appropriately adjusting the intercept of the imputation model sufficiently corrected bias in the analysis model's parameter estimates. Based on this setting, simulation studies were conducted to explore scenarios when the population distribution of $x$ was either invariant (i.e. `known') or estimated in an external dataset with uncertainty. Calibrated-$\delta$ adjustment MI was shown to perform as well as standard MI in terms of bias when data were MAR. Further, calibrated-$\delta$ adjustment MI also produced unbiased parameter estimates with good coverage, and was preferred to standard MI under the two general MNAR mechanisms being evaluated. \par In the analytic and simulation studies, we did not consider the MNAR selection model where the probability of observing $x$ depends on both $x$, $y$, and their interaction. We suspect that calibrated-$\delta$ adjustment MI with a single intercept adjustment calculated based on the marginal distribution of $x$ alone will not fully correct bias introduced by this missingness mechanism; and that an additional sensitivity parameter for the $x$--$y$ association is present. Information about the population distribution of $x$ conditional on $y$ might be required to produce unbiased estimates when the probability of observing $x$ given $x$ differs across the levels of $y$. However, such information might not always be available in practice. Similarly, when the outcome variable $y$ is continuous, a second sensitivity parameter for the covariate--outcome association in the imputation model is needed; we will explore this setting in another paper. \par In the case study which examined the association between ethnicity and the prevalence of type 2 diabetes diagnoses in THIN, calibrated-$\delta$ adjustment MI using information from census data yielded a more plausible estimate of the ethnicity distribution compared to CRA, single imputation of missing values with the White ethnic group, and standard MI. Subsequently, estimates of association for the non-White ethnic groups produced by calibrated-$\delta$ adjustment MI were lower than that in the other methods. Previously, it was found that ethnicity was more likely to be recorded for individuals with a diagnosis of type 2 diabetes. By imputing missing values with the non-White ethnic groups more frequently, calibrated-$\delta$ adjustment MI led to a decrease in the percentage of prevalent type 2 diabetes cases among these groups, which we thought was the primary reason explaining the lower odds ratios compared to the other methods. In addition, it was also possible that the explanatory power of ethnicity for type 2 diabetes was partially diluted by the stronger effect of deprivation status, which compensated for the reduction in the odds ratios for ethnicity. The odds ratios for Townsend deprivation score were higher in calibrated-$\delta$ adjustment MI compared to CRA for the top two quintiles. These findings seemed to suggest that some effect of ethnicity was absorbed in Townsend score in calibrated-$\delta$ adjustment MI, where deprivation status explained some of the effect which might otherwise have been explained by ethnicity. This could be attributed to a possibility that individuals of Asian and Black ethnic groups, whose ethnicity was not recorded, were more likely to belong to the more deprived quintiles of the Townsend score. \par Given the missingness mechanisms considered thus far for the development of calibrated-$\delta$ adjustment MI in \cref{sec2,sec3}, results in the case study suggested a potential departure from the MAR assumption for missingness in ethnicity. This was because, conditional on the outcome variable type 2 diabetes and other fully observed variables included in the analysis model, standard MI did not yield a distribution of ethnicity that was comparable to the census ethnic breakdown. Ethnicity was also not likely to be MNAR dependent only on the values of ethnicity, since the point estimates in CRA and standard MI were broadly comparable. Results from the exploratory analyses examining the associations between covariates in the imputation model for ethnicity and missingness in ethnicity among the complete records suggested that age group, sex, Townsend score, and type 2 diabetes were factors likely to be associated with whether ethnicity was recorded. This finding indicated that ethnicity was likely to be MNAR depending on the ethnic groups, fully observed outcome variable (type 2 diabetes diagnoses), as well as other fully observed covariates in the analysis model (age group, sex, and deprivation status). \par The major strength of calibrated-$\delta$ adjustment MI is its flexibility to be adapted to impute variables in a given dataset whose distributions might be available in some external data. Here we used census data for ethnicity in primary care electronic health records, but information obtained from other nationally representative datasets (such as the Health Survey for England \cite{UKDataService}) could similarly be used to impute missing data in other health indicators routinely recorded in primary care such as smoking status or alcohol consumption. In such instances, the variability associated with estimating the reference distribution used for calibration needs to be accounted for in calibrated-$\delta$ adjustment MI as illustrated in \cref{subsec3.2}, although this source of uncertainty might be negligible depending on the size of the external dataset. \par Throughout this paper, we restricted our development of calibrated-$\delta$ adjustment MI to the case of a single partially observed covariate. However, we believe this approach can be extended for handling missing data in more than one variable. Multivariate imputation by chained equations (MICE) \cite{VanBuuren1999, VanBuuren2011} is a popular procedure for performing MI of multivariate missing data, and is commonly implemented under the MAR assumption.\cite{Marston2010, Marston2014} MICE is an iterative procedure which requires the specification of an imputation model for each incomplete variable, conditional on all other variables. Our proposed univariate calibrated-$\delta$ adjustment MI method can, in principle, be embedded into MICE to impute certain MNAR variables whose distributions are available externally, while the standard MI method can be used for the imputation of other variables assuming data are MAR. Under the MICE framework, when there are several MNAR variables to be imputed, information from more than one external data source can potentially be drawn on and utilised in calibrated-$\delta$ adjustment MI for these variables. \par Finally, returning to the analytic and simulation studies, we did not consider the setting where both the outcome variable $y$ and the covariate $x$ are incomplete. When $y$ is MNAR dependent on its values and in addition to the population information on $x$ we can obtain the marginal distribution of $y$ from an external dataset, then this information can be used in calibrated-$\delta$ adjustment MI for $y$ when $y$ is imputed in the MICE algorithm. If $y$ is MAR then there must be some artificial mechanism whereby the dataset is divided into two subsets; one where $y$ is MAR dependent on the observed values of $x$ and another one where $x$ is MNAR dependent on its values. In this setting, our proposed MI method should work for $x$ when it is imputed in the MICE algorithm. The more complex missingness settings involving several incomplete covariates are subjected to on-going work and will be reported in the future. \section*{Conflict of interest} The authors declare no potential conflict of interests. \section*{Supporting information} \appendix \counterwithin{table}{section} \counterwithin{figure}{section} \section{Weighted multiple imputation for a binary/categorical covariate} The procedure of the weighted multiple imputation is as follows. In the imputation step, weights derived from the population marginal distribution of the incomplete variable are attached to the complete records, and a weighted (multinomial) logistic regression model is fitted to the complete records to obtain the maximum likelihood estimates of the imputation model's parameters $\widehat{\boldsymbol{\theta}}$ and their asymptotic sampling variance $\widehat{\boldsymbol{U}}$. New parameters are then drawn from the large-sample normal approximation $N(\widehat{\boldsymbol{\theta}}, \widehat{\boldsymbol{U}})$ of its posterior distribution, assuming non-informative priors. Finally, imputed values are drawn from the (multinomial) logistic regression using these new parameters. Note that \textit{no weights} are used when fitting the substantive scientific model to the imputed data. \subsection{Derivation of the marginal weights} The idea of augmenting the standard MI method with weights is related to the technique of post-stratification weighting, which is commonly used in survey non-responses when the population distributions are known.\cite{Raghunathan2015} To post-stratify the sample, weights are calculated to bring the sample distribution in line with the population. Suppose that in a survey, one of the variables measured is ethnicity, which is categorised into four groups (White, Black, Asian, and Other). If the population distribution of ethnicity is available, the distribution of ethnicity among survey respondents can be compared with the population distribution. Suppose that a proportion $p^{\text{obs}} = 0.8$ of the survey respondents give their ethnicity as White, whereas the population has $p^{\text{pop}} = 0.6$ in this category. The White category is over-represented in the survey respondents, but can be made representative of the population by assigning to the responses a post-stratification weight $w^{\text{ps}} < 1$, such that \begin{equation*} w^{\text{ps}} = 1/(p^{\text{obs}}/p^{\text{pop}}) = 1/(0.8/0.6)=0.75. \end{equation*} In adapting this idea to MI, we need to address the complication arising because the \textit{completed} data obtained after MI consist of both observed and imputed (missing) data. Naive use of post-stratification weights in MI will recover the correct population distribution in the imputed data. However, since the observed data remain the same, the distribution in the completed data will not be matched to that in the population. Therefore, some \textit{compensation} for the lack of representativeness in the observed data is needed in the imputed data so that the correct population distribution can be recovered after imputation. Continuing with the survey example, suppose that we survey $200$ individuals, $100$ of whom respond with their ethnicity. A proportion $p^{\text{obs}} = 0.8$ of these $100$ responses are in the White group. If the population proportion of this group is $p^{\text{pop}} = 0.6$, we would expect to have $120$ White individuals in the survey sample. This implies that among the $100$ individuals with missing ethnicity, we need to impute ethnicity of $40$ individuals as White, i.e. the proportion of the White category required in the missing data, $p^{\text{req}}$, is equal to $0.4$. To make the completed (observed and imputed) data of this category representative of the population, we need to weight respondents of this category in the imputation model by \begin{equation*} 1/(p^{\text{obs}}/p^{\text{req}}) = 1/(0.8/0.4) = 0.5, \end{equation*} \noindent which is smaller than the corresponding naive post-stratification weight above, since it compensates for the over-representation among the survey respondents of White ethnicity. \par More generally, suppose that we seek to collect a $J$-level variable $x$ in a sample of size $n$, resulting in $x$ being observed for $n^{\text{obs}}$ subjects and missing for $n^{\text{mis}}$ subjects, $n^{\text{obs}}+n^{\text{mis}}=n$. Let $p_{j}^{\text{obs}}$ and $p_{j}^{\text{req}}$ denote the level-$j$ proportions of $x$ in the observed and imputed data respectively, such that $p_{j}^{\text{obs}}n^{\text{obs}}= n_{j}^{\text{obs}}$, and $p_{j}^{\text{req}}n^{\text{mis}}= n_{j}^{\text{req}}$, where $j = 1, \ldots, J$. Let $p_{j}^{\text{pop}}$ denote the level-$j$ proportion of $x$ in the population, which is assumed to be known. The aim here is to find $p_{j}^{\text{req}}$ for each level of $x$ such that the number of subjects in the completed data after imputation is equal to the expected number implied by the corresponding population proportion, i.e. $n_{j}^{\text{obs}}+n_{j}^{\text{req}}=p_{j}^{\text{pop}}n$. The level-$j$ proportion of $x$ required in the imputed data, $p_{j}^{\text{req}}$, is given by \begin{equation*} p_{j}^{\text{req}}=\frac{p_{j}^{\text{pop}}n - p_{j}^{\text{obs}} n^{\text{obs}}}{n^{\text{mis}}}. \end{equation*} \noindent Therefore, the weight for group $j$, which we refer to as the `marginal weight' and denote by $w_{j}^{\text{m}}$, is \begin{equation*} w_{j}^{\text{m}}=1/(p_{j}^{\text{obs}}/p_{j}^{\text{req}}). \end{equation*} \subsection{Derivation of the conditional weights} The marginal weights introduced above only depend on the population distribution of the incomplete variable. However, if there are (fully observed) covariates in the imputation model, the associations between these variables and the incomplete variable distribution are not reflected in such weights. We therefore adjust the marginal weights to obtain another set of weights, termed the `conditional weights', which account for covariates in the imputation model. These weights are derived using the marginal distribution of the incomplete variable obtained after having estimated the parameters of an imputation model assuming MAR in the complete records. Suppose that an imputation model is fitted to the complete records, and the corresponding predicted probabilities of the incomplete variable (averaged over the covariates) are obtained and applied to the missing data. Let ${p}_{j}^{\text{pred}}$ denote the resulting predicted level-$j$ proportion of $x$ in the completed data, then the level-$j$ proportion required in the imputed data is given by \begin{equation*} p_{j}^{\text{req}} = \frac{p_{j}^{\text{pop}} n - {p}_{j}^{\text{pred}} n^{\text{obs}}}{n^{\text{mis}}}, \end{equation*} \noindent and the conditional weight for group $j$, denoted by $w_{j}^{\text{c}}$ , is \begin{equation*} w_{j}^{\text{c}}=1/({p}_{j}^{\text{pred}}/p_{j}^{\text{req}}). \end{equation*} \par In this approach, the effects of covariates in the imputation model are reflected in the predicted probabilities ${p}_{j}^{\text{pred}}$, which are then used to derive the conditional weights for weighted MI. \section{Analytic study -- bias in a $2 \times 2$ contingency table} \label{app2} In the $2\times 2$ contingency table of a complete binary outcome variable $y$ and an incomplete binary covariate $x$ (\cref{sec2}), we calculate analytic bias in the analysis model's parameter estimates (defined as $\hat{\beta} - \beta$) after missing values in $x$ are handled by (i) a CRA, (ii) standard MI, (iii) marginal weighted MI, and (iv) conditional weighted MI. The analytic calculations are then verified by simulating a full-data sample with $n=10\,000$ observations of $x$ and $y$ from the following model \begin{align*} & x \sim \text{Bernoulli}\left(p_{x}^{\text{pop}} = 0.7\right);\\ &\text{logit}\left[p\left(y=1 \mid x\right)\right] = \beta_{0} +\beta_{x}x, \end{align*} where $\beta_{0}=\text{ln}\left(0.5\right)$ and $\beta_{x}=\text{ln}\left(1.5\right)$. Missing values in $x$ are generated using selection models M1--M4 with a range of values for the selection parameters $\alpha$ (Table \ref{tab:anstudy_selectionparam}). \begin{table}[t!] \renewcommand{1}{1} \setlength{\tabcolsep}{5pt} \centering \caption{Analytic study: values of selection parameters for generating missingness in $x$ used in simulations conducted to verify analytic calculations.} \label{tab:anstudy_selectionparam} \begin{tabular}{cccccc} \toprule \multirow{2}{*}{\begin{tabular}[c]{@{}c@{}}Missingness \\ model\end{tabular}} & \multirow{2}{*}{\begin{tabular}[c]{@{}c@{}}Linear predictor of selection model\\ $\text{logit}\left[p\left[(r=1 \mid x, y\right)\right]$\end{tabular}} & \multicolumn{3}{c}{Selection parameter} & \multirow{2}{*}{\begin{tabular}[c]{@{}l@{}}\% missing $x$\end{tabular}} \\\cmidrule(l{2pt}r{2pt}){3-3}\cmidrule(l{2pt}r{2pt}){4-4}\cmidrule(l{2pt}r{2pt}){5-5} & & $\alpha_{0}$ & $\alpha_{x}$ & $\alpha_{y}$ & \\ \midrule M1 & $\alpha_{0}$ & $\left[-3,3\right]$ & & & $5$--$95$ \\ M2 & $\alpha_{0} + \alpha_{y}y$ & $\left[-3,3\right]$ & & $\left[-3,3\right]$ & $3$--$97$ \\ M3 & $\alpha_{0} + \alpha_{x}x$ & $\left[-3,3\right]$ & $\left[-3,3\right]$ & & $2$--$98$ \\ M4 & $\alpha_{0} + \alpha_{x}x + \alpha_{y}y$ & $0.5$ & $\left[-3,3\right]$ & $\left[-3,3\right]$ & $9$--$84$ \\ \bottomrule \end{tabular} \fnote{Note: $r$: response indicator of $x$.} \end{table} \par Figure \cref{fig:biascal_mary,fig:biascal_mnarx,fig:biascal_mnarxy} present the analytic bias in CRA, standard MI, marginal and conditional weighted MI under MAR and MNAR mechanisms with the various values of the selection parameters. When $x$ is MCAR (M1), all methods provide unbiased parameter estimates, as suggested by the calculations (results not shown). \par When $x$ is MAR conditional on $y$ (M2, Figure \ref{fig:biascal_mary}), standard MI and conditional weighted MI are unbiased, while bias is observed for CRA in $\beta_{0}$, and for marginal weighted MI in both parameter estimates. This bias is due to the marginal weights not accounting for the association between $x$ and $y$ in the imputation model for $x$. As a result, marginal weights do not successfully recover the correct distribution of $x$ after MI. \par Both parameter estimates are unbiased in marginal weighted MI when $x$ is MNAR dependent on $x$ (M3, Figure \ref{fig:biascal_mnarx}), while standard MI leads to noticeable bias in the estimate of $\beta_{0}$. Bias in conditional weighted MI is small and occurs for extreme values of the selection parameters. Since missingness in $x$ does not depend on $y$ under M3, CRA is unbiased in both parameter estimates as the theory predicts. \par Under the last missingness mechanism when $x$ is MNAR dependent on both $x$ and $y$ (M4, Figure \ref{fig:biascal_mnarxy}), none of the methods result in unbiased parameter estimates. However, bias appears to be the smallest in conditional weighted MI. Although bias is present in both standard MI and marginal weighted MI, the magnitude of bias is smaller in marginal weighted MI compared to standard MI. Under this missingness mechanism, conditional weighted MI can be regarded as a hybrid of marginal weighted MI and standard MI. The conditional weights correct for some bias introduced by $x$ in the selection model in a similar manner to the marginal weights under M3; the method also alleviates some residual bias similarly to standard MI under M2. \par Overall, these results suggest that under the missingness mechanisms considered in this paper, calibrated-$\delta$ adjustment MI provides a more general solution for accommodating missing data in $x$ and is therefore the preferred method compared to standard MI and marginal and conditional weighted MI. \begin{figure} \caption{Analytic study: analytic bias when $x$ is MAR conditional on $y$ (M2).} \label{fig:biascal_mary} \end{figure} \begin{figure} \caption{Analytic study: analytic bias when $x$ is MNAR dependent on $x$ (M3).} \label{fig:biascal_mnarx} \end{figure} \begin{figure} \caption{Analytic study: analytic bias when $x$ is MNAR dependent on $x$ and $y$ (M4).} \label{fig:biascal_mnarxy} \end{figure} \end{document}
\begin{document} \title{\TheTitle} \begin{abstract} We study the homogenization of the Poisson equation with reaction term and of the eigenvalue problem associated to the generator of multiscale Langevin dynamics. Our analysis extends the theory of two-scale convergence to the case of weighted Sobolev spaces in unbounded domains. We provide convergence results for the solution of the multiscale problems above to their homogenized surrogate. A series of numerical examples corroborates our analysis. \end{abstract} \textbf{AMS subject classifications.} 35B27, 35P20, 46E35, 47A75, 60H10. \textbf{Key words.} Langevin equation, infinitesimal generator, homogenization, eigenvalue problem, two-scale convergence, weighted Sobolev spaces. \section{Introduction} Multiscale diffusion processes are a powerful tool for modeling chemical reactions with species reacting at different speeds \cite{LeS16}, the evolution of markets with macro/micro structures \cite{ZMP05,AMZ06,AiJ14}, and phenomena in oceanography and atmospheric sciences \cite{CoP09,YMV19}. In all these scenarios, it is relevant to extract single-scale surrogates, which are effective for modeling the slowest component of the system, which often governs its macroscopic behavior. We consider in this paper multiscale models of the overdamped-Langevin kind, whose solution $X_t^\varepsilon$ satisfies for a potential $V^\varepsilon\colon \mathbb{R}^d \to \mathbb{R}$ and a $d$-dimensional Brownian motion $W_t$ the stochastic differential equation (SDE) \begin{equation} \label{eq:Langevin_ms} \mathrm{d} X_t^\varepsilon = -\nabla V^\varepsilon(X_t^\varepsilon) \,\mathrm{d} t + \sqrt{2\sigma} \,\mathrm{d} W_t, \end{equation} where $\sigma > 0$ is a diffusion coefficient. Under assumptions on the potential $V^\varepsilon$ of scale separation, periodicity and dissipativity, which we will make more precise in the following, there exists in this case a homogenized process $X_t^0$, which, for a potential $V^0\colon \mathbb{R}^d \to \mathbb{R}$ and a symmetric positive definite diffusion matrix $\Sigma \in \mathbb{R}^{d \times d}$, solves the stochastic differential equation \begin{equation} \label{eq:Langevin_hom} \mathrm{d} X_t^0 = -\nabla V^0\left(X_t^0\right) \,\mathrm{d} t + \sqrt{2\Sigma} \,\mathrm{d} W_t. \end{equation} The process $X_t^0$ is indeed a surrogate for the slow-scale component of the system \eqref{eq:Langevin_ms}, and in particular $X_t^\varepsilon \to X_t^0$ in a weak sense. We remark that this multiscale model has been frequently an object of study in the field of parameter estimation due to its numerous applications and the fact that its surrogate dynamics admits a closed form expression which is easy to determine. Among the multiple examples we mention \cite{PaS07,PPS09,PPS12,AGP21,APZ21,GaZ21}, where the aim is fitting a coarse-grained model from data originating from the multiscale equation. In this paper, we consider the infinitesimal generator $\mathcal L^\varepsilon$ of \eqref{eq:Langevin_ms} and study first the partial differential equation (PDE) \begin{equation} \label{eq:intro_poisson} - \mathcal L^\varepsilon u^\varepsilon + \eta u^\varepsilon = f, \end{equation} where $\eta > 0$, and then the eigenvalue problem \begin{equation} \label{eq:intro_eigen} - \mathcal L^\varepsilon\phi^\varepsilon = \lambda^\varepsilon \phi^\varepsilon. \end{equation} We analyze the homogenization of problems \eqref{eq:intro_poisson} and \eqref{eq:intro_eigen} providing asymptotic results for their solutions in the limit of vanishing $\varepsilon$. In particular, we show that they converge to the solutions of the corresponding problems for the generator $\mathcal L^0$ of the homogenized diffusion \eqref{eq:Langevin_hom}. We remark that these equations are defined on the whole space $\mathbb{R}^d$, and this leads us to the introduction of weighted Sobolev spaces where the weight function is the invariant density of the homogenized process \eqref{eq:Langevin_hom}. The proof of the convergence results relies on the theory of two-scale convergence, which we extend to the case of weighted Sobolev spaces in order to make it fit into our framework. The Poisson problem for elliptic operators corresponding to infinitesimal generators of diffusion processes has been thoroughly investigated in \cite{PaV01,PaV03,PaV05}, where the authors prove the existence and uniqueness of the solution in suitable weighted Sobolev spaces and its continuity with respect to parameters in the equations. Moreover, the Poisson problem for an extended generator defined in terms of an appropriate version of the Dynkin formula is analyzed in \cite{VeK11}. Regarding the study of the homogenization of the eigenvalue problem for elliptic operators, several results exist in the context of bounded domains \cite{Kes79a,Kes79b,ACP04}, and additional first-order corrections for the eigenvalues of the homogenized generator are provided in \cite{MoV97}. Our theoretical analysis is based on the notion of two-scale convergence, which was initially introduced in \cite{Ngu89} and then studied in greater detail in \cite{All92,All94}. Our contribution to this field consists in the extension of this theory from Lebesgue spaces in bounded domains to the more general case of weighted Sobolev spaces in unbounded domains. We also mention that a further motivation to study the asymptotic properties of the multiscale eigenvalue problem is that it provides the framework for various spectral methods for the estimation of unknown parameters or the computation of numerical solutions, which have been proposed in literature. Two different inference procedures relying on the eigenvalues and eigenfunctions of the generator of the dynamic are presented in \cite{KeS99,CrV06} for single-scale problems and then extended to multiscale diffusions in \cite{APZ21,CrV11}, respectively. Moreover, spectral methods are also employed to solve multiscale SDEs at the diffusive time scale in \cite{APV16}, where an alternative approach to the heterogeneous multiscale method based on the spectral properties of the generator of the coarse-grained model is proposed. The main contribution of our work, in addition to the extension of the theory of two-scale convergence to weighted Sobolev spaces in unbounded domains, is the homogenization of the Poisson equation with reaction term \eqref{eq:intro_poisson} and of the eigenvalue problem \eqref{eq:intro_eigen} for the generator of multiscale Langevin dynamics. In particular, we show: \begin{itemize}[leftmargin=0.5cm] \item strong convergence in $L^2$ sense and weak convergence in $H^1$ sense of the solution of the multiscale equation \eqref{eq:intro_poisson} to the solution of the corresponding homogenized problem, \item convergence of the eigenvalues of the multiscale generator to the corresponding eigenvalues of the homogenized generator; \item strong convergence in $L^2$ sense and weak convergence in $H^1$ sense of the eigenvectors of the multiscale generator to the corresponding eigenvectors of the homogenized generator. \end{itemize} \paragraph{Notation.} Let $\rho \colon \mathbb{R}^d \to \mathbb{R}$ be a probability density functions. Then, the following functional spaces will be employed throughout the paper. \begin{itemize}[leftmargin=0.5cm] \item $L^2_\rho(\mathbb{R}^d)$ is the space of measurable functions $u \colon \mathbb{R}^d \to \mathbb{R}$ such that \begin{equation} \norm{u}_{L^2_\rho(\mathbb{R}^d)} \coloneqq \left( \int_{\mathbb{R}^d} u(x)^2 \rho(x) \,\mathrm{d} x \right)^{1/2} < \infty. \end{equation} \item $L^2_\rho(\mathbb{R}^d \times Y)$ is the space of measurable functions $u \colon \mathbb{R}^d \times Y \to \mathbb{R}$ such that \begin{equation} \norm{u}_{L^2_\rho(\mathbb{R}^d \times Y)} \coloneqq \left( \int_{\mathbb{R}^d} \int_Y u(x,y)^2 \rho(x) \,\mathrm{d} y \,\mathrm{d} x \right)^{1/2} < \infty. \end{equation} \item $H^1_\rho(\mathbb{R}^d)$ is the space of measurable weakly differentiable functions $u \colon \mathbb{R}^d \to \mathbb{R}$ such that \begin{equation} \norm{u}_{H^1_\rho(\mathbb{R}^d)} \coloneqq \left( \int_{\mathbb{R}^d} u(x)^2 \rho(x) \,\mathrm{d} x + \int_{\mathbb{R}^d} \abs{\nabla u(x)}^2 \rho(x) \,\mathrm{d} x \right)^{1/2} < \infty. \end{equation} \item $C^k_{\mathrm{per}}(Y)$ with $k\in[0,\infty]$ is the subspace of $C^k(\mathbb{R}^d)$ of $Y-$periodic functions. \item $H^1_{\mathrm{per}}(Y)$ is the closure of $C^\infty_{\mathrm{per}}(Y)$ with respect to the norm in $H^1(Y)$. \item $\mathcal W_{\mathrm{per}}(Y)$ is the quotient space $H^1_{\mathrm{per}}(Y) / \mathbb{R}$ and it is endowed with the norm \begin{equation} \norm{u}_{\mathcal W_{\mathrm{per}}(Y)} = \norm{\nabla u}_{L^2(Y)}. \end{equation} \item $L^2_\rho(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$ is the space of measurable functions $u \colon x \mapsto u(x) \in C^0_{\mathrm{per}}(Y)$ such that $\norm{u(x)}_{L^\infty(\mathbb{R}^d)} \in L^2_\rho(\mathbb{R}^d)$ and it is endowed with the norm \begin{equation} \norm{u}_{L^2_\rho(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))} = \left( \int_{\mathbb{R}^d} \sup_{y \in Y} \abs{u(x,y)}^2 \rho(x) \,\mathrm{d} x \right)^{1/2}. \end{equation} \item $L^2_\rho(\mathbb{R}^d; \mathcal W_{\mathrm{per}}(Y))$ is the space of measurable functions $u \colon x \mapsto u(x) \in \mathcal W_{\mathrm{per}}(Y)$ such that $\norm{u(x)}_{\mathcal W_{\mathrm{per}}(Y)} \in L^2_\rho(\mathbb{R}^d)$ and it is endowed with the norm \begin{equation} \norm{u}_{L^2_\rho(\mathbb{R}^d; \mathcal W_{\mathrm{per}}(Y))} = \left( \int_{\mathbb{R}^d} \int_Y \abs{\nabla_y u(x,y)}^2 \rho(x) \,\mathrm{d} y \,\mathrm{d} x \right)^{1/2}. \end{equation} \end{itemize} \paragraph{Outline.} The remainder of the paper is organized as follows. In \cref{sec:setting} we introduce the multiscale Langevin dynamics and the weighted Sobolev spaces which are employed in the analysis. Then, in \cref{sec:poisson,sec:eigen} we study the homogenization of the Poisson problem with reaction term and of the eigenvalue problem for the generator, respectively. Finally, in \cref{sec:experiments} we present numerical examples which confirm our theoretical findings. \section{Problem setting} \label{sec:setting} In this section we present the main properties of the class of diffusions under investigation. Let us consider the $d$-dimensional multiscale overdamped Langevin equation \eqref{eq:Langevin_ms} and assume that the potential $V^\varepsilon$ admits a clear separation between the fastest and the slowest scale. In particular, let \begin{equation} \label{eq:potential_def} V^\varepsilon(x) = V(x) + p \left( \frac{x}{\varepsilon} \right), \end{equation} where $V \colon \mathbb{R}^d \to \mathbb{R}$ and $p \colon \mathbb{R}^d \to \mathbb{R}$ are respectively the slow and the fast components of the potential. Moreover, we consider the same dissipative framework of \cite[Assumption 3.1]{PaS07}, i.e., we work under the following assumption. \begin{assumption} \label{ass:dissipativity} The potentials $p$ and $V$ satisfy: \begin{enumerate} \item $p \in C^\infty(\mathbb{R}^d)$ is $L$-periodic in all directions for some $L > 0$; \item $V \in C^\infty(\mathbb{R}^d)$ is polynomially bounded from above and bounded form below, and there exist $a,b > 0$ such that \begin{equation} - \nabla V(x) \cdot x \le a - b\abs{x}^2. \end{equation} \end{enumerate} \end{assumption} Then, replacing the potential \eqref{eq:potential_def} into equation \eqref{eq:Langevin_ms} we have \begin{equation} \label{eq:SDE_ms} \,\mathrm{d} X_t^\varepsilon = - \nabla V(X_t^\varepsilon) \,\mathrm{d} t - \frac1\varepsilon \nabla p \left( \frac{X_t^\varepsilon}\varepsilon \right) \,\mathrm{d} t + \sqrt{2 \sigma} \,\mathrm{d} W_t, \end{equation} which is the model that we will consider from now on. Employing the theory of homogenization (see, e.g., \cite[Chapter 3]{BLP78} or \cite[Chapter 18]{PaS08}), we deduce the existence of the homogenized SDE \begin{equation} \label{eq:SDE_hom} \,\mathrm{d} X_t^0 = - K \nabla V(X_t^0) \,\mathrm{d} t + \sqrt{2 \Sigma} \,\mathrm{d} W_t, \end{equation} whose solution $X_t^0$ is the limit in law of the solution $X_t^\varepsilon$ of equation \eqref{eq:SDE_ms} as random variables in $\mathcal C^0([0,T]; \mathbb{R}^d)$. The new diffusion coefficient is given by $\Sigma = K \sigma \in \mathbb{R}^{d \times d}$, where the symmetric positive-definite matrix $K$ has the explicit formula \begin{equation} \label{eq:K_def} K = \int_Y (I + \nabla \Phi(y)) \mu(y) \,\mathrm{d} y = \int_Y (I + \nabla \Phi(y)) (I + \nabla \Phi(y))^\top \mu(y) \,\mathrm{d} y, \end{equation} with $Y = [0,L]^d$ and \begin{equation} \label{eq:mu_def} \mu(y) = \frac{1}{C_\mu} e^{- \frac1\sigma p(y)} \qquad \text{with} \qquad C_\mu = \int_Y e^{- \frac1\sigma p(y)} \,\mathrm{d} y, \end{equation} and where $\Phi \colon Y \to \mathbb{R}^d$ is the unique solution of the $d$-dimensional cell problem \begin{equation} \label{eq:Phi_equation} - \sigma \Delta \Phi(y) + \nabla \Phi(y) \nabla p(y) = - \nabla p(y), \qquad y \in Y, \end{equation} endowed with periodic boundary conditions, which satisfies the constraint in $\mathbb{R}^d$ \begin{equation} \int_Y \Phi(y) \mu(y) \,\mathrm{d} y = 0. \end{equation} Let us remark that for an $\mathbb{R}^d$-valued function $\Phi$, we denote by $\nabla \Phi$ and $\Delta \Phi$ the Jacobian matrix and the component-wise Laplacian, respectively. Under \cref{ass:dissipativity}, it has been shown in \cite{PaS07} that the processes $X_t^\varepsilon$ and $X_t^0$ are geometrically ergodic with unique invariant distributions $\rho^\varepsilon$ and $\rho^0$, respectively, given by \begin{equation} \label{eq:rho_ms} \rho^\varepsilon(x) = \frac{1}{C_{\rho^\varepsilon}} e^{- \frac1\sigma \left( V(x) + p \left( \frac x \varepsilon \right) \right)} \qquad \text{with} \qquad C_{\rho^\varepsilon} = \int_{\mathbb{R}^d} e^{- \frac1\sigma \left( V(x) + p \left( \frac x \varepsilon \right) \right)} \,\mathrm{d} x, \end{equation} and \begin{equation} \label{eq:rho_hom} \rho^0(x) = \frac{1}{C_{\rho^0}} e^{- \frac1d \operatorname{tr}(\Sigma^{-1}K) V(x)} \qquad \text{with} \qquad C_{\rho^0} = \int_{\mathbb{R}^d} e^{- \frac1d \operatorname{tr}(\Sigma^{-1}K) V(x)} \,\mathrm{d} x. \end{equation} Notice that $\operatorname{tr}(\Sigma^{-1}K)/d = 1 / \sigma$ since $\Sigma = K\sigma$ and that $\rho^\varepsilon \rightharpoonup \rho^0$ in $L^1(\mathbb{R}^d)$ by \cite[Proposition 5.2]{PaS07}. We finally introduce the generators $\mathcal{L}^\varepsilon$ and $\mathcal{L}^0$ of the multiscale process \eqref{eq:SDE_ms} and its homogenized counterpart \eqref{eq:SDE_hom}, respectively, which are defined for all $u \in \mathcal C^2(\mathbb{R}^d)$ as \begin{equation} \label{eq:generator_ms} \mathcal{L}^\varepsilon u(x) = - \left(\nabla V(x) + \frac1\varepsilon \nabla p \left( \frac x \varepsilon \right) \right) \cdot \nabla u(x) + \sigma \Delta u(x), \end{equation} and \begin{equation} \label{eq:generator_hom} \mathcal{L}^0 = - K \nabla V(x) \cdot \nabla u(x) + \Sigma : \nabla^2 u(x), \end{equation} where $:$ denotes the Frobenius inner product and $\nabla^2$ the Hessian matrix. Since the process $X_t^\varepsilon$ is close in a weak sense to the process $X_t^0$ as $\varepsilon \to 0$, we then expect that also the generators $\mathcal{L}^\varepsilon$ and $\mathcal{L}^0$ behave similarly when the multiscale parameter vanishes. \subsection{Preliminary results} In this section we introduce the main functional spaces which will be employed in the following analysis and we study their relations. Let us consider the weighted Sobolev spaces $L^2_{\rho^\varepsilon}(\mathbb{R}^d)$, $L^2_{\rho^0}(\mathbb{R}^d)$, $H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ and $H^1_{\rho^0}(\mathbb{R}^d)$, where the weight functions are the invariant measures defined in \eqref{eq:rho_ms} and \eqref{eq:rho_hom}. First, we show that the weighted Lebesgue spaces $L^2_{\rho^\varepsilon}(\mathbb{R}^d)$ and $L^2_{\rho^0}(\mathbb{R}^d)$ describe the same space of functions but they are endowed with different norms. \begin{lemma} \label{lem:equivalence_L2} Under \cref{ass:dissipativity}, there exist two constants $C_{\mathrm{low}}, C_{\mathrm{up}} > 0$ independent of $\varepsilon$ such that \begin{equation} C_{\mathrm{low}} \norm{u}_{L^2_{\rho^0}(\mathbb{R}^d)} \le \norm{u}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} \le C_{\mathrm{up}} \norm{u}_{L^2_{\rho^0}(\mathbb{R}^d)}. \end{equation} In particular, the injections $I_{L^2_{\rho^\varepsilon}(\mathbb{R}^d) \hookrightarrow L^2_{\rho^0}(\mathbb{R}^d)}$ and $I_{L^2_{\rho^0}(\mathbb{R}^d) \hookrightarrow L^2_{\rho^\varepsilon}(\mathbb{R}^d)}$ are continuous. \end{lemma} \begin{proof} Since $p \in C^\infty(\mathbb{R}^d)$ is $Y$-periodic, then there exists a constant $M > 0$ such that $\abs{p(y)} \le M$ for all $y \in \mathbb{R}^d$. Therefore, we have \begin{equation} 0 < e^{-\frac{M}{\sigma}} \le e^{- \frac1\sigma p \left( \frac x \varepsilon \right)} \le e^{\frac{M}{\sigma}}, \end{equation} which implies \begin{equation} e^{-\frac{M}{\sigma}} \norm{u}_{L^2_{\rho^0}(\mathbb{R}^d)} \le \norm{u}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} \le e^{\frac{M}{\sigma}} \norm{u}_{L^2_{\rho^0}(\mathbb{R}^d)}. \end{equation} Finally, defining $C_{\mathrm{low}} \coloneqq e^{-\frac{M}{\sigma}}$ and $C_{\mathrm{up}} \coloneqq e^{\frac{M}{\sigma}}$ we obtain the desired result. \end{proof} An analogous result holds true also for the weighted Sobolev spaces $H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ and $H^1_{\rho^0}(\mathbb{R}^d)$ and follows directly from \cref{lem:equivalence_L2}. \begin{corollary} \label{cor:equivalence_H1} Under \cref{ass:dissipativity}, there exist two constants $C_{\mathrm{low}}, C_{\mathrm{up}} > 0$ independent of $\varepsilon$ such that \begin{equation} C_{\mathrm{low}} \norm{u}_{H^1_{\rho^0}(\mathbb{R}^d)} \le \norm{u}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)} \le C_{\mathrm{up}} \norm{u}_{H^1_{\rho^0}(\mathbb{R}^d)}. \end{equation} In particular, the injections $I_{H^1_{\rho^\varepsilon}(\mathbb{R}^d) \hookrightarrow H^1_{\rho^0}(\mathbb{R}^d)}$ and $I_{H^1_{\rho^0}(\mathbb{R}^d) \hookrightarrow H^1_{\rho^\varepsilon}(\mathbb{R}^d)}$ are continuous. \end{corollary} Let us now consider the injections $I_{H^1_{\rho^0}(\mathbb{R}^d) \hookrightarrow L^2_{\rho^0}(\mathbb{R}^d)}$ and $I_{H^1_{\rho^\varepsilon}(\mathbb{R}^d) \hookrightarrow L^2_{\rho^\varepsilon}(\mathbb{R}^d)}$, which are continuous since by definition we have \begin{equation} \norm{u}_{L^2_{\rho^0}(\mathbb{R}^d)} \le \norm{u}_{H^1_{\rho^0}(\mathbb{R}^d)} \qquad \text{and} \qquad \norm{u}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} \le \norm{u}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)}. \end{equation} We remark that these injections are not compact in general, differently from classical non weighted Sobolev spaces in bounded and regular domains, where the compactness is always guaranteed by the Rellich--Kondrachov theorem \cite[Theorem 5.7.1]{Eva98}. Hence, in order to ensure the compactness of the injections $I_{H^1_{\rho^0}(\mathbb{R}^d) \hookrightarrow L^2_{\rho^0}(\mathbb{R}^d)}$ and $I_{H^1_{\rho^\varepsilon}(\mathbb{R}^d) \hookrightarrow L^2_{\rho^\varepsilon}(\mathbb{R}^d)}$ we make the following additional assumption. \begin{assumption} \label{ass:compactness} The slow-scale potential $V$ satisfies \begin{equation} \lim_{\abs{x} \to +\infty} \left( \frac14 \abs{\nabla V(x)}^2 - \frac12 \Delta V(x) \right) = +\infty \qquad \text{and} \qquad \lim_{\abs{x} \to +\infty} \abs{\nabla V(x)} = +\infty. \end{equation} \end{assumption} Then, as shown in \cite[Proposition A.4]{APV16}, it follows that the injection $I_{H^1_{\rho^0}(\mathbb{R}^d) \hookrightarrow L^2_{\rho^0}(\mathbb{R}^d)}$ is compact and the measure $\rho^0$ satisfies the Poincaré inequality for all $u \in H^1_{\rho^0}(\mathbb{R}^d)$ and for a constant $C_P > 0$ \begin{equation} \label{eq:poincare_0} \int_{\mathbb{R}^d} (u(x) - \bar u^0)^2 \rho^0(x) \,\mathrm{d} x \le C_P \int_{\mathbb{R}^d} \abs{\nabla u(x)}^2 \rho^0(x) \,\mathrm{d} x, \end{equation} where $\bar u^0 = \int_{\mathbb{R}^d} u(x) \rho^0(x) \,\mathrm{d} x$. \begin{remark} \cref{ass:compactness} is not the only sufficient condition to ensure the compactness of the injection $I_{H^1_{\rho^0}(\mathbb{R}^d) \hookrightarrow L^2_{\rho^0}(\mathbb{R}^d)}$. Two other necessary and sufficient conditions are presented in Proposition 1.3 and Lemma 2.2 in \cite{Gan10}. In particular, it is required that the potential $V$ is such that either the Schrödinger operator \begin{equation} \mathcal S = - \Delta + \frac14 \abs{\nabla \rho^0}^2 - \frac12 \Delta \rho^0, \end{equation} or the operator \begin{equation} \mathcal P = - \Delta + \nabla \rho^0 \cdot \nabla, \end{equation} has compact resolvent. Moreover, another sufficient condition is given in \cite[Theorem 3.1]{Hoo81}, where it is proved that the potentials of the form $V = \abs{x}^{2p}$ with $p$ integer greater than zero satisfy the condition. \end{remark} Given \cref{ass:compactness} and using \cite[Proposition A.4]{APV16}, we can now prove that the same compactness result holds true also for the spaces $H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ and $L^2_{\rho^\varepsilon}(\mathbb{R}^d)$. \begin{lemma} \label{lem:injection_compact_e} Under \cref{ass:dissipativity,ass:compactness}, the injection $I_{H^1_{\rho^\varepsilon}(\mathbb{R}^d) \hookrightarrow L^2_{\rho^\varepsilon}(\mathbb{R}^d)}$ is a compact operator and the measure $\rho^\varepsilon$ satisfies the Poincaré inequality for all $u \in H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ and for a constant $\widetilde C_P > 0$ \begin{equation} \label{eq:poincare_e} \int_{\mathbb{R}^d} (u(x) - \bar u^\varepsilon)^2 \rho^\varepsilon(x) \,\mathrm{d} x \le \widetilde C_P \int_{\mathbb{R}^d} \abs{\nabla u(x)}^2 \rho^\varepsilon(x) \,\mathrm{d} x, \end{equation} where $\bar u^\varepsilon = \int_{\mathbb{R}^d} u(x) \rho^\varepsilon(x) \,\mathrm{d} x$. \end{lemma} \begin{proof} Let $V^\varepsilon$ be defined in \eqref{eq:potential_def}. Then, due to \cref{ass:dissipativity} there exists a constant $M>0$ such that $\abs{p(y)} \le M$, $\abs{\nabla p(y)} \le M$ and $\abs{\Delta p(y)} \le M$ for all $y \in Y$, which implies that for any $\varepsilon>0$ the potential $V^\varepsilon$ satisfies \cref{ass:compactness}. Therefore, following the same argument of \cite[Proposition A.4]{APV16} we obtain the desired result. \end{proof} \section{Poisson equation with reaction term} \label{sec:poisson} In this section we study the problem for the multiscale generator \begin{equation} \label{eq:PDE_ms} - \mathcal{L}^\varepsilon u^\varepsilon + \eta u^\varepsilon = f, \end{equation} with $f \in L^2_{\rho^\varepsilon}(\mathbb{R}^d)$ and where the reaction term with coefficient $\eta > 0$ is added in order to ensure the well-posedness of the problem, and we analyze its homogenization. In particular, we show that the solution $u^\varepsilon$ converges in some sense which will be specified later to the solution $u^0$ of the Poisson problem for the homogenized generator with reaction term \begin{equation} \label{eq:PDE_hom} - \mathcal{L}^0 u^0 + \eta u^0 = f, \end{equation} where, in view of \cref{lem:equivalence_L2}, $f$ is now seen as a function of $L^2_{\rho^0}(\mathbb{R}^d)$. \begin{remark} We decided to study the Poisson equation with reaction term with coefficient $\eta > 0$ so that, as we will see later, the bilinear form of the corresponding weak formulation is coercive. This guarantees the well-posedness of the problem without additional conditions on the solution and on the right-hand side, which would be otherwise needed if the bilinear form was only weakly coercive as in the case $\eta = 0$. Moreover, this PDE will be useful in the study of the homogenization of the eigenvalue problem for the generator, which is the focus of \cref{sec:eigen} and the main purpose of this work. \end{remark} \subsection{Weak formulation} We first write the weak formulation of problems \eqref{eq:PDE_ms} and \eqref{eq:PDE_hom} and, applying the Lax--Milgram lemma, we prove that they admit a unique solution respectively in the spaces $H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ and $H^1_{\rho^0}(\mathbb{R}^d)$. Since the proof is analogous for both the cases, we present the details only in the multiscale setting. Letting $\psi \in H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ be a test function, multiplying equation \eqref{eq:PDE_ms} by $\psi(x) \rho^\varepsilon(x)$ and integrating over $\mathbb{R}^d$ and by parts we obtain \begin{equation} \sigma \int_{\mathbb{R}^d} \nabla u^\varepsilon(x) \cdot \nabla \psi(x) \rho^\varepsilon(x) \,\mathrm{d} x + \eta \int_{\mathbb{R}^d} u^\varepsilon(x) \psi(x) \rho^\varepsilon(x) \,\mathrm{d} x = \int_{\mathbb{R}^d} f(x) \psi(x) \rho^\varepsilon(x) \,\mathrm{d} x. \end{equation} Therefore, the weak formulation of problem \eqref{eq:PDE_ms} reads: \begin{equation} \label{eq:weakPDE_ms} \text{find } u^\varepsilon \in H^1_{\rho^\varepsilon}(\mathbb{R}^d) \text{ such that } B^\varepsilon(u^\varepsilon, \psi) = F^\varepsilon(\psi) \text{ for all } \psi \in H^1_{\rho^\varepsilon}(\mathbb{R}^d), \end{equation} where $B^\varepsilon \colon H^1_{\rho^\varepsilon}(\mathbb{R}^d) \times H^1_{\rho^\varepsilon}(\mathbb{R}^d) \to \mathbb{R}$ and $F^\varepsilon \colon H^1_{\rho^\varepsilon}(\mathbb{R}^d) \to \mathbb{R}$ are defined as \begin{equation} \label{eq:Be_def} \begin{aligned} B^\varepsilon(\varphi, \psi) &= \sigma \int_{\mathbb{R}^d} \nabla \varphi(x) \cdot \nabla \psi(x) \rho^\varepsilon(x) \,\mathrm{d} x + \eta \int_{\mathbb{R}^d} \varphi(x) \psi(x) \rho^\varepsilon(x) \,\mathrm{d} x, \\ F^\varepsilon(\psi) &= \int_{\mathbb{R}^d} f(x) \psi(x) \rho^\varepsilon(x) \,\mathrm{d} x. \end{aligned} \end{equation} Similarly, the weak formulation of problem \eqref{eq:PDE_hom} reads: \begin{equation} \label{eq:weakPDE_hom} \text{find } u^0 \in H^1_{\rho^0}(\mathbb{R}^d) \text{ such that } B^0(u^0, \psi) = F^0(\psi) \text{ for all } \psi \in H^1_{\rho^0}(\mathbb{R}^d), \end{equation} where $B^0 \colon H^1_{\rho^0}(\mathbb{R}^d) \times H^1_{\rho^0}(\mathbb{R}^d) \to \mathbb{R}$ and $F^0 \colon H^1_{\rho^0}(\mathbb{R}^d) \to \mathbb{R}$ are defined as \begin{equation} \label{eq:B0_def} \begin{aligned} B^0(\varphi, \psi) &= \int_{\mathbb{R}^d} \Sigma \nabla \varphi(x) \cdot \nabla \psi(x) \rho^0(x) \,\mathrm{d} x + \eta \int_{\mathbb{R}^d} \varphi(x) \psi(x) \rho^0(x) \,\mathrm{d} x, \\ F^0(\psi) &= \int_{\mathbb{R}^d} f(x) \psi(x) \rho^0(x) \,\mathrm{d} x. \end{aligned} \end{equation} Then, the well-posedness of the two problems is given by the following lemmas. \begin{lemma} \label{lem:weakPDE_ms} Problem \eqref{eq:weakPDE_ms} has a unique solution $u^\varepsilon \in H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ which satisfies \begin{equation} \label{eq:estimatePDE_ms} \norm{u^\varepsilon}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)} \le \frac{1}{\min \{ \sigma, \eta \}} \norm{f}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}. \end{equation} \end{lemma} \begin{proof} The existence and uniqueness of the solution follow from the Lax--Milgram lemma once we show the continuity and coercivity of $B^\varepsilon$ and the continuity of $F^\varepsilon$ defined in \eqref{eq:Be_def}. Applying the Cauchy--Schwarz inequality we obtain \begin{equation} \abs{B^\varepsilon(\varphi, \psi)} \le 2 \max \{ \sigma, \eta \} \norm{\varphi}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)} \norm{\psi}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)}, \end{equation} and \begin{equation} \label{eq:Fe_continuous} \abs{F^\varepsilon(\psi)} \le \norm{f}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} \norm{\psi}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)}, \end{equation} which prove the continuity of $B^\varepsilon$ and $F^\varepsilon$. Moreover, we also have \begin{equation} \label{eq:Be_coercive} B^\varepsilon(\psi, \psi) \ge \min \{ \sigma, \eta \} \norm{\psi}^2_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)}, \end{equation} which gives the coercivity of $B^\varepsilon$. Finally, due to inequalities \eqref{eq:Fe_continuous} and \eqref{eq:Be_coercive} we deduce \begin{equation} \min \{ \sigma, \eta \} \norm{u^\varepsilon}^2_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)} \le B^\varepsilon(u^\varepsilon, u^\varepsilon) = F^\varepsilon(u^\varepsilon) \le \norm{f}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} \norm{u^\varepsilon}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)} \end{equation} which implies estimate \eqref{eq:estimatePDE_ms} and concludes the proof. \end{proof} \begin{lemma} \label{lem:weakPDE_hom} Problem \eqref{eq:weakPDE_hom} has a unique solution $u^0 \in H^1_{\rho^0}(\mathbb{R}^d)$ which satisfies \begin{equation} \label{eq:estimatePDE_hom} \norm{u^0}_{H^1_{\rho^0}(\mathbb{R}^d)} \le \frac{1}{\min \{ \lambda_{\mathrm{min}}(\Sigma), \eta \}} \norm{f}_{L^2_{\rho^0}(\mathbb{R}^d)}, \end{equation} where $\lambda_{\mathrm{min}}(\Sigma) > 0$ is the minimum eigenvalue of the matrix $\Sigma$. \end{lemma} We omit the proof of \cref{lem:weakPDE_hom} since it follows the same argument of \cref{lem:weakPDE_ms}. \subsection{Two-scale convergence} We now focus on the homogenization of problem \eqref{eq:weakPDE_ms} and our strategy is based on the two-scale convergence method outlined in \cite[Chapter 9]{CiD99}. We remark that we extend this theory to the case of weighted Sobolev spaces in unbounded domains, hence also the definition of two-scale convergence has to be adapted and it is given in \cref{def:2scale_convergence}. We first introduce some preliminary results, and in the last part of this section we prove the main convergence theorem. \begin{definition} \label{def:2scale_convergence} A sequence of functions $\{ \varphi^\varepsilon \}$ in $L^2_{\rho^0}(\mathbb{R}^d)$ is said to \emph{two-scale converge} to the limit $\varphi^0 \in L^2_{\rho^0}(\mathbb{R}^d \times Y)$ if for any function $\psi \in L^2_{\rho^0}(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$ it holds \begin{equation} \lim_{\varepsilon \to 0} \int_{\mathbb{R}^d} \varphi^\varepsilon(x) \psi \left( x, \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x = \frac{1}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y \varphi^0(x,y) \psi(x,y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x. \end{equation} We then write $\varphi^\varepsilon \rightsquigarrow \varphi^0$. \end{definition} \begin{remark} \label{rem:2scale_implies_weak} From \cref{def:2scale_convergence} it follows that two-scale convergence implies weak convergence. In fact, choosing $\psi$ independent of $y$ we obtain \begin{equation} \varphi^\varepsilon \rightharpoonup \frac{1}{\abs{Y}} \int_Y \varphi^0(\cdot, y) \,\mathrm{d} y \qquad \text{in } L^2_{\rho^0}(\mathbb{R}^d), \end{equation} and if also the two-scale limit is independent of $y$ then $\varphi^\varepsilon \rightharpoonup \varphi^0$ in $L^2_{\rho^0}(\mathbb{R}^d)$. \end{remark} The following lemmas are technical results which will be useful in the proof of next theorems. The former studies the properties of the space $L^2_{\rho^0}(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$ and the latter is a convergence result for two-scale functions in the same space. \begin{lemma} \label{lem:separable_dense} The space $L^2_{\rho^0}(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$ is separable and dense in $L^2_{\rho^0}(\mathbb{R}^d \times Y)$. \end{lemma} \begin{proof} Since the space $C^0_{\mathrm{per}}(Y)$ is separable, then by \cite[Proposition 3.55]{CiD99} it follows that the space $L^2(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$ is separable. Moreover, $L^2(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$ is isomorphic to $L^2_{\rho^0}(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$ through the isomorphism \begin{equation} T \colon L^2_{\rho^0}(\mathbb{R}^d; C^0_{\mathrm{per}}(Y)) \to L^2(\mathbb{R}^d; C^0_{\mathrm{per}}(Y)), \qquad u \mapsto T(u) = \sqrt{\rho^0} u, \end{equation} and thus the space $L^2_{\rho^0}(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$ is separable as well. Concerning the density result, since $\mathcal D(Y)$ is dense in $L^2(Y)$, then $L^2_{\rho^0}(\mathbb{R}^d; \mathcal D(Y))$ is dense in $L^2_{\rho^0}(\mathbb{R}^d; L^2(Y))$. Finally, the property that $L^2_{\rho^0}(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$ is dense in $L^2_{\rho^0}(\mathbb{R}^d \times Y)$ follows from the inclusion $L^2_{\rho^0}(\mathbb{R}^d; \mathcal D(Y)) \subset L^2_{\rho^0}(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$ and the fact that $L^2_{\rho^0}(\mathbb{R}^d; L^2(Y)) = L^2_{\rho^0}(\mathbb{R}^d \times Y)$. \end{proof} \begin{lemma} Let $\psi \in L^2_{\rho^0}(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$. Then \begin{equation} \label{eq:formula_twoscale} \lim_{\varepsilon \to 0} \int_{\mathbb{R}^d} \psi \left( x, \frac{x}{\varepsilon} \right)^2 \rho^0(x) \,\mathrm{d} x = \frac{1}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y \psi(x,y)^2 \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x. \end{equation} \end{lemma} \begin{proof} The proof follows the same steps of the proof of Lemma 5.2 in \cite{All92}, where the spaces $L^1(\Omega)$ and $L^1(\Omega; C^0_{\mathrm{per}}(Y))$ are replaced by $L^2_{\rho^0}(\mathbb{R}^d)$ and $L^2_{\rho^0}(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$, respectively. Accordingly, the integrals $\int_{\Omega} v(x) \,\mathrm{d} x$ for a function $v = v(x)$ are replaced by $\int_{\mathbb{R}^d} v(x) \rho^0(x) \,\mathrm{d} x$. \end{proof} The following propositions are compactness results in the spaces $L^2_{\rho^0}(\mathbb{R}^d)$ and $H^1_{\rho^0}(\mathbb{R}^d)$, respectively, which highlight the importance of the notion of two-scale convergence and thus justify the introduction of \cref{def:2scale_convergence}. The proof of \cref{pro:subsequence_2scale_grad} is based on the proof of Theorem 9.9 in \cite{CiD99}. \begin{proposition} \label{pro:subsequence_2scale} Let $\{ \varphi^\varepsilon \}$ be a bounded sequence in $L^2_{\rho^0}(\mathbb{R}^d)$. Then, there exist a subsequence $\{ \varphi^{\varepsilon'} \}$ and a function $\varphi^0 \in L^2_{\rho^0}(\mathbb{R}^d \times Y)$ such that \begin{equation} \varphi^{\varepsilon'} \rightsquigarrow \varphi^0. \end{equation} \end{proposition} \begin{proof} The proof follows the same steps of the proof of Theorem 9.7 in \cite{CiD99}, where Proposition 3.61, equation (9.2) and the spaces $L^2(\Omega)$, $L^2(\Omega \times Y)$, $L^2(\Omega; C^0_{\mathrm{per}}(Y))$ are replaced by \cref{lem:separable_dense}, equation \eqref{eq:formula_twoscale} and $L^2_{\rho^0}(\mathbb{R}^d)$, $L^2_{\rho^0}(\mathbb{R}^d \times Y)$, $L^2_{\rho^0}(\mathbb{R}^d; C^0_{\mathrm{per}}(Y))$, respectively. Accordingly, the integrals $\int_{\Omega} v(x) \,\mathrm{d} x$ for a function $v = v(x)$ are replaced by $\int_{\mathbb{R}^d} v(x) \rho^0(x) \,\mathrm{d} x$. \end{proof} \begin{proposition} \label{pro:subsequence_2scale_grad} Let $\{ \varphi^\varepsilon \}$ be a sequence of functions in $H^1_{\rho^0}(\mathbb{R}^d)$ such that \begin{equation} \label{eq:convergence_assumption} \varphi^\varepsilon \rightharpoonup \varphi^0 \qquad \text{in } H^1_{\rho^0}(\mathbb{R}^d). \end{equation} Then, $\varphi^\varepsilon \rightsquigarrow \varphi^0$ and there exist a subsequence $\{ \varphi^{\varepsilon'} \}$ and $\varphi_1 \in L^2_{\rho^0}(\mathbb{R}^d; \mathcal W_{\mathrm{per}}(Y))$ such that \begin{equation} \nabla \varphi^{\varepsilon'} \rightsquigarrow \nabla \varphi^0 + \nabla_y \varphi_1. \end{equation} \end{proposition} \begin{proof} By \cref{pro:subsequence_2scale}, there exists a subsequence (still denoted by $\varepsilon$) such that \begin{equation} \label{eq:2scale_convergence_f_df} \varphi^\varepsilon \rightsquigarrow \varphi \in L^2_{\rho^0}(\mathbb{R}^d \times Y) \qquad \text{and} \qquad \nabla \varphi^\varepsilon \rightsquigarrow \mathbb{X}i \in (L^2_{\rho^0}(\mathbb{R}^d \times Y))^d. \end{equation} Letting $\psi \in (\mathcal D(\mathbb{R}^d; C^\infty_{\mathrm{per}}(Y)))^d$ and integrating by parts we have \begin{equation} \begin{aligned} \int_{\mathbb{R}^d} \nabla \varphi^\varepsilon(x) \cdot \psi \left( x, \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x &= - \int_{\mathbb{R}^d} \varphi^\varepsilon(x) \left[ \mathrm{d}ivergence_x \psi \left( x, \frac{x}{\varepsilon} \right) + \frac{1}{\varepsilon} \mathrm{d}ivergence_y \psi \left( x, \frac{x}{\varepsilon} \right) \right] \rho^0(x) \,\mathrm{d} x \\ &\quad + \frac1\sigma \int_{\mathbb{R}^d} \varphi^\varepsilon(x) \psi \left( x, \frac{x}{\varepsilon} \right) \cdot \nabla V(x) \rho^0(x) \,\mathrm{d} x, \end{aligned} \end{equation} which implies \begin{equation} \begin{aligned} \int_{\mathbb{R}^d} \varphi^\varepsilon(x) \mathrm{d}ivergence_y \psi \left( x, \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x &= \varepsilon \int_{\mathbb{R}^d} \varphi^\varepsilon(x) \left[ \frac1\sigma \psi \left( x, \frac{x}{\varepsilon} \right) \cdot \nabla V(x) - \mathrm{d}ivergence_x \psi \left( x, \frac{x}{\varepsilon} \right) \right] \rho^0(x) \,\mathrm{d} x \\ &\quad - \varepsilon \int_{\mathbb{R}^d} \nabla \varphi^\varepsilon(x) \cdot \psi \left( x, \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x. \end{aligned} \end{equation} Passing to the limit as $\varepsilon \to 0$ and due to \eqref{eq:2scale_convergence_f_df} we obtain \begin{equation} \frac{1}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y \varphi(x,y) \mathrm{d}ivergence_y \psi (x,y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x = 0, \end{equation} which yields for all $\psi \in (\mathcal D(\mathbb{R}^d \times Y))^d$ \begin{equation} \int_{\mathbb{R}^d} \int_Y \nabla_y \varphi(x,y) \cdot \psi(x,y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x = 0. \end{equation} Hence, by \cite[Theorem 1.44]{CiD99} and since $\rho^0(x) > 0$ for all $x \in \mathbb{R}^d$ we get \begin{equation} \nabla_y \varphi = 0 \qquad a.e. \text{ on } \mathbb{R}^d \times Y. \end{equation} Therefore, from \cite[Proposition 3.38]{CiD99} with $\Omega$ replaced by $Y$ and $x$ fixed we deduce that $\varphi$ does not depend on $y$ and due to \cref{rem:2scale_implies_weak} and assumption \eqref{eq:convergence_assumption} this implies that $\varphi = \varphi^0 \in H^1_{\rho^0}(\mathbb{R}^d)$. Let now $\psi \in (\mathcal D(\mathbb{R}^d; C^\infty_{\mathrm{per}}(Y)))^d$ such that $\mathrm{d}ivergence_y \psi = 0$. Integrating by parts and by \eqref{eq:2scale_convergence_f_df} we obtain \begin{equation} \begin{aligned} \lim_{\varepsilon \to 0} \int_{\mathbb{R}^d} \nabla \varphi^\varepsilon(x) \cdot \psi \left( x, \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x &= \lim_{\varepsilon \to 0} \int_{\mathbb{R}^d} \varphi^\varepsilon(x) \left[ \frac1\sigma \psi \left( x, \frac{x}{\varepsilon} \right) \cdot \nabla V(x) - \mathrm{d}ivergence_x \psi \left( x, \frac{x}{\varepsilon} \right) \right] \rho^0(x) \,\mathrm{d} x \\ &= \frac{1}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y \varphi^0(x) \left[ \frac1\sigma \psi(x,y) \cdot \nabla V(x) - \mathrm{d}ivergence_x \psi(x,y) \right] \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x \\ &= \frac{1}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y \nabla \varphi^0(x) \cdot \psi(x,y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x. \end{aligned} \end{equation} Due to \eqref{eq:2scale_convergence_f_df} we also have \begin{equation} \lim_{\varepsilon \to 0} \int_{\mathbb{R}^d} \nabla \varphi^\varepsilon(x) \cdot \psi \left( x, \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x = \frac{1}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y \mathbb{X}i(x,y) \cdot \psi(x,y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x, \end{equation} and defining $\widetilde \psi(x,y) = \sqrt{\rho^0(x)} \psi(x,y)$ it follows that \begin{equation} \int_{\mathbb{R}^d} \int_Y \sqrt{\rho^0(x)} \left[ \mathbb{X}i(x,y) - \nabla \varphi^0(x) \right] \cdot \widetilde \psi(x,y) \,\mathrm{d} y \,\mathrm{d} x = 0, \end{equation} for all $\widetilde \psi \in (\mathcal D(\mathbb{R}^d; C^\infty_{\mathrm{per}}(Y)))^d$ such that $\mathrm{d}ivergence_y \psi = 0$. Therefore, by a classical result (see, e.g., \cite{GiR86,Tem79}) there exists a unique function $\widetilde \varphi_1 \in L^2(\mathbb{R}^d; \mathcal W_{\mathrm{per}}(Y))$ such that \begin{equation} \left( \mathbb{X}i(x,y) - \nabla \varphi^0(x) \right) \sqrt{\rho^0(x)} = \nabla_y \widetilde \varphi_1(x,y). \end{equation} Finally, defining $\varphi_1 \in L^2_{\rho^0}(\mathbb{R}^d; \mathcal W_{\mathrm{per}}(Y))$ as $\varphi_1(x,y) = \widetilde \varphi_1(x,y) / \sqrt{\rho^0(x)}$ gives the desired result. \end{proof} \subsection{Homogenization} We are now ready to state and prove the homogenization of problem \eqref{eq:PDE_ms} employing the two-scale convergence methodology introduced in the previous section. The proof of next theorem is inspired by \cite[Section 9.3]{CiD99}. \begin{theorem} \label{thm:homogenization_pde} Let $u^\varepsilon$ and $u^0$ be respectively the unique solutions of problems \eqref{eq:weakPDE_ms} and \eqref{eq:weakPDE_hom}. Then, under \cref{ass:dissipativity,ass:compactness} and as $\varepsilon \to 0$ \begin{enumerate} \item $u^\varepsilon \to u^0$ in $L^2_{\rho^0}(\mathbb{R}^d)$, \item $u^\varepsilon \rightharpoonup u^0$ in $H^1_{\rho^0}(\mathbb{R}^d)$. \end{enumerate} \end{theorem} \begin{proof} By \cref{lem:equivalence_L2,cor:equivalence_H1,lem:weakPDE_ms} we have \begin{equation} \norm{u^\varepsilon}_{H^1_{\rho^0}(\mathbb{R}^d)} \le \frac{1}{C_{\mathrm{low}}} \norm{u}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)} \le \frac{1}{C_{\mathrm{low}} \min \{ \sigma, \eta \}} \norm{f}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} \le \frac{C_{\mathrm{up}}}{C_{\mathrm{low}} \min \{ \sigma, \eta \}} \norm{f}_{L^2_{\rho^0}(\mathbb{R}^d)}, \end{equation} which implies that the sequence $\{ u^\varepsilon \}$ is bounded in $H^1_{\rho^0}(\mathbb{R}^d)$. Then, there exist $\widetilde u \in H^1_{\rho^0}(\mathbb{R}^d)$ and a subsequence (still denoted by $\varepsilon$) such that \begin{equation} u^\varepsilon \rightharpoonup \widetilde u \quad \text{in } H^1_{\rho^0}(\mathbb{R}^d) \qquad \text{and} \qquad u^\varepsilon \to \widetilde u \quad \text{in } L^2_{\rho^0}(\mathbb{R}^d). \end{equation} Due to \cref{pro:subsequence_2scale_grad} there exists $u_1 \in L^2_{\rho^0}(\mathbb{R}^d; \mathcal W_{\mathrm{per}}(Y))$ such that, up to a subsequence \begin{equation} u^\varepsilon \rightsquigarrow \widetilde u \qquad \text{and} \qquad \nabla u^\varepsilon \rightsquigarrow \nabla \widetilde u + \nabla_y u_1. \end{equation} We now want to prove that $\widetilde u$ is the unique solution of problem \eqref{eq:weakPDE_hom}, i.e., $\widetilde u = u^0$. Let $\psi_0 \in \mathcal D(\mathbb{R}^d)$ and $\psi_1 \in \mathcal D(\mathbb{R}^d; C^\infty_{\mathrm{per}}(Y))$ and note that $\psi_0(\cdot) + \varepsilon \psi_1 \left( \cdot, \frac\cdot\varepsilon \right) \in H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ and thus it can be chosen as a test function in \eqref{eq:weakPDE_ms}. We then have \begin{equation} \label{eq:expansion_weak} \begin{aligned} &\sigma \int_{\mathbb{R}^d} \nabla u^\varepsilon(x) \cdot \left( \nabla \psi_0(x) + \varepsilon \nabla_x \psi_1 \left( x, \frac{x}{\varepsilon} \right) + \nabla_y \psi_1 \left( x, \frac{x}{\varepsilon} \right) \right) \rho^\varepsilon(x) \,\mathrm{d} x \\ &\hspace{1.5cm}+ \eta \int_{\mathbb{R}^d} u^\varepsilon(x) \left( \psi_0(x) + \varepsilon \psi_1 \left( x, \frac{x}{\varepsilon} \right) \right) \rho^\varepsilon(x) \,\mathrm{d} x = \int_{\mathbb{R}^d} f(x) \left( \psi_0(x) + \varepsilon \psi_1 \left( x, \frac{x}{\varepsilon} \right) \right) \rho^\varepsilon(x) \,\mathrm{d} x, \end{aligned} \end{equation} and noting that \begin{equation} \label{eq:relation_distributions} \rho^\varepsilon(x) = \frac{C_\mu C_{\rho^0}}{C_{\rho^\varepsilon}} \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x), \end{equation} where $\mu$ is defined in \eqref{eq:mu_def}, equation \eqref{eq:expansion_weak} can be rewritten as \begin{equation} \label{eq:decomposition_weak} I_{1,1}^\varepsilon + I_{1,2}^\varepsilon + \varepsilon \left( I_{2,1}^\varepsilon + I_{2,2}^\varepsilon \right) = J_1^\varepsilon + \varepsilon J_2^\varepsilon, \end{equation} where \begin{equation} \begin{aligned} I_{1,1}^\varepsilon &\coloneqq \sigma \int_{\mathbb{R}^d} \nabla u^\varepsilon(x) \cdot \left( \nabla \psi_0(x) + \nabla_y \psi_1 \left( x, \frac{x}{\varepsilon} \right) \right) \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x, \\ I_{1,2}^\varepsilon &\coloneqq \eta \int_{\mathbb{R}^d} u^\varepsilon(x) \psi_0(x) \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x, \\ I_{2,1}^\varepsilon &\coloneqq \sigma \int_{\mathbb{R}^d} \nabla u^\varepsilon(x) \cdot \nabla_x \psi_1 \left( x, \frac{x}{\varepsilon} \right) \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x, \\ I_{2,2}^\varepsilon &\coloneqq \eta \int_{\mathbb{R}^d} u^\varepsilon(x) \psi_1 \left( x, \frac{x}{\varepsilon} \right) \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x, \\ J_1^\varepsilon &\coloneqq \int_{\mathbb{R}^d} f(x) \psi_0(x) \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x, \\ J_2^\varepsilon &\coloneqq \int_{\mathbb{R}^d} f(x) \psi_1 \left( x, \frac{x}{\varepsilon} \right) \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x. \end{aligned} \end{equation} Passing to the limit as $\varepsilon \to 0$ in equation \eqref{eq:decomposition_weak} and by two-scale convergence we obtain \begin{equation} \begin{aligned} \lim_{\varepsilon \to 0} I_{1,1}^\varepsilon &= \frac{\sigma}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y \left( \nabla \widetilde u(x) + \nabla_y u_1(x,y) \right) \cdot \left( \nabla \psi_0(x) + \nabla_y \psi_1 (x,y) \right) \mu(y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x, \\ \lim_{\varepsilon \to 0} I_{1,2}^\varepsilon &= \frac{\eta}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y \widetilde u(x) \psi_0(x) \mu(y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x = \frac{\eta}{\abs{Y}} \int_{\mathbb{R}^d} \widetilde u(x) \psi_0(x) \rho^0(x) \,\mathrm{d} x, \\ \lim_{\varepsilon \to 0} I_{2,1}^\varepsilon &= \frac{\sigma}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y \left( \nabla \widetilde u(x) + \nabla_y u_1(x,y) \right) \cdot \nabla_x \psi_1(x,y) \mu(y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x, \\ \lim_{\varepsilon \to 0} I_{2,2}^\varepsilon &= \frac{\eta}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y \widetilde u(x) \psi_1(x,y) \mu(y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x, \\ \lim_{\varepsilon \to 0} J_1^\varepsilon &= \frac{1}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y f(x) \psi_0(x) \mu(y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x = \frac{1}{\abs{Y}} \int_{\mathbb{R}^d} f(x) \psi_0(x) \rho^0(x) \,\mathrm{d} x, \\ \lim_{\varepsilon \to 0} J_2^\varepsilon &= \frac{1}{\abs{Y}} \int_{\mathbb{R}^d} \int_Y f(x) \psi_1(x,y) \mu(y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x, \end{aligned} \end{equation} which yield \begin{equation} \label{eq:decomposition_weak_limit} \begin{aligned} \sigma \int_{\mathbb{R}^d} \int_Y \left( \nabla \widetilde u(x) + \nabla_y u_1(x,y) \right) \cdot & \left( \nabla \psi_0(x) + \nabla_y \psi_1(x,y) \right) \mu(y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x \\ &+ \eta \int_{\mathbb{R}^d} \widetilde u(x) \psi_0(x) \rho^0(x) \,\mathrm{d} x = \int_{\mathbb{R}^d} f(x) \psi_0(x) \rho^0(x) \,\mathrm{d} x. \end{aligned} \end{equation} We now show that equation \eqref{eq:decomposition_weak_limit} is a variational equation in the functional space \begin{equation} \mathcal H = H^1_{\rho^0}(\mathbb{R}^d) \times L^2_{\rho^0}(\mathbb{R}^d; \mathcal W_{\mathrm{per}}(Y)), \end{equation} endowed with the norm \begin{equation} \norm{\Psi}_{\mathcal H} = \left( \norm{\psi_0}_{H^1_{\rho^0}(\mathbb{R}^d)}^2 + \norm{\psi_1}_{L^2_{\rho^0}(\mathbb{R}^d; \mathcal W_{\mathrm{per}}(Y))}^2 \right)^{1/2}, \qquad \text{for all } \Psi = (\psi_0, \psi_1) \in \mathcal H, \end{equation} and that the hypotheses of the Lax--Milgram lemma are satisfied. Let $a \colon \mathcal H \times \mathcal H \to \mathbb{R}$ be the bilinear form defined for any $\mathbb{X}i = (\xi_0, \xi_1) \in \mathcal H$ and $\Psi = (\psi_0, \psi_1) \in \mathcal H$ by \begin{equation} \begin{aligned} a(\mathbb{X}i, \Psi) &= \sigma \int_{\mathbb{R}^d} \int_Y \left( \nabla \xi_0(x) + \nabla_y \xi_1(x,y) \right) \cdot \left( \nabla \psi_0(x) + \nabla_y \psi_1(x,y) \right) \mu(y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x \\ &\quad + \eta \int_{\mathbb{R}^d} \xi_0(x) \psi_0(x) \rho^0(x) \,\mathrm{d} x, \end{aligned} \end{equation} and let $F \colon \mathcal H \to \mathbb{R}$ be the linear functional defined by \begin{equation} F(\Psi) = \int_{\mathbb{R}^d} f(x) \psi_0(x) \rho^0(x) \,\mathrm{d} x. \end{equation} Notice that due to the definition of $\mu$ in \eqref{eq:mu_def} and the hypotheses on $p$ in \cref{ass:dissipativity}(i) there exist two constants $C_1,C_2 > 0$ such that $0 < C_1 \le \abs{\mu(y)} \le C_2$ for all $y \in Y$. It follows that $a$ and $F$ are continuous, in fact applying the Cauchy--Schwarz inequality we get \begin{equation} \abs{a(\mathbb{X}i, \Psi)} \le \left( 2\sigma(1+C_2) + \eta \right) \norm{\mathbb{X}i}_{\mathcal H} \norm{\Psi}_{\mathcal H}, \end{equation} and \begin{equation} \abs{F(\Psi)} \le \norm{f}_{L^2_{\rho^0}(\mathbb{R}^d)} \norm{\Psi}_{\mathcal H}. \end{equation} Moreover, we also have \begin{equation} \begin{aligned} &a(\Psi, \Psi) \ge C_1 \sigma \int_{\mathbb{R}^d} \int_Y \abs{\nabla \psi_0(x) + \nabla_y \psi_1(x,y)}^2 \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x + \eta \int_{\mathbb{R}^d} \psi_0(x)^2 \rho^0(x) \,\mathrm{d} x \\ &\quad = C_1 \sigma \abs{Y} \int_{\mathbb{R}^d} \abs{\nabla \psi_0(x)}^2 \rho^0(x) \,\mathrm{d} x + \eta \int_{\mathbb{R}^d} \psi_0(x)^2 \rho^0(x) \,\mathrm{d} x + C_1 \sigma \int_{\mathbb{R}^d} \int_Y \abs{\nabla_y \psi_1(x,y)}^2 \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x \\ &\quad \ge \min \{ C_1 \sigma \abs{Y}, \eta, C_1 \sigma \} \norm{\Psi}_{\mathcal H}, \end{aligned} \end{equation} which shows that $a$ is coercive and where we used the fact that due to the periodicity of $\psi_1(x,\cdot)$ in $Y$ for all $x\in \mathbb{R}^d$ \begin{equation} \begin{aligned} \int_{\mathbb{R}^d} \int_Y \nabla \psi_0(x) \cdot \nabla_y \psi_1(x,y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x &= \int_{\mathbb{R}^d} \int_Y \mathrm{d}ivergence_y \left( \nabla \psi_0(x) \psi_1(x,y) \right) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x \\ &= \int_{\mathbb{R}^d} \int_{\partial Y} \psi_1(x,y) \nabla \psi_0(x) \cdot \mathbf{n_y} \rho^0(x) \,\mathrm{d} \gamma_y \,\mathrm{d} x = 0, \end{aligned} \end{equation} where $\mathbf{n}_y$ denotes the outward unit normal vector to $\partial Y$. Therefore, the Lax-Milgram lemma gives the existence and uniqueness of the solution $U = (\widetilde{u}, u_1) \in \mathcal H$ of equation \eqref{eq:decomposition_weak_limit} for any $\Psi = (\psi_0, \psi_1) \in \mathcal H$. Then, notice that the components of the unique solution $U$ must satisfy \begin{equation} \widetilde u(x) = u^0(x) \qquad \text{and} \qquad \nabla_y u_1(x,y) = (\nabla \Phi(y))^\top \nabla u^0(x), \end{equation} where $u^0$ is the unique solution of problem \eqref{eq:weakPDE_hom} and $\Phi$ solves equation \eqref{eq:Phi_equation}. In fact, replacing $U$ into \eqref{eq:decomposition_weak_limit} we obtain \begin{equation} \label{eq:almost_homogenized} \begin{aligned} &\left( \int_{\mathbb{R}^d} \sigma \left( \int_Y (I + \nabla \Phi(y)^\top) \mu(y) \,\mathrm{d} y \right) \nabla u^0(x) \cdot \nabla \psi_0(x) \rho^0(x) \,\mathrm{d} x \right) + \eta \int_{\mathbb{R}^d} u^0(x) \psi_0(x) \rho^0(x) \,\mathrm{d} x \\ &\hspace{1.5cm} + \sigma \int_{\mathbb{R}^d} \int_Y \left( I + \nabla \Phi(y)^\top \right) \nabla u^0(x) \cdot \nabla_y \psi_1(x,y) \mu(y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x = \int_{\mathbb{R}^d} f(x) \psi_0(x) \rho^0(x) \,\mathrm{d} x, \end{aligned} \end{equation} and, due to definition \eqref{eq:K_def} and problem \eqref{eq:weakPDE_hom}, equation \eqref{eq:almost_homogenized} holds true for all $\Psi = (\psi_0,\psi_1) \in \mathcal H$ if we show that for any $\psi_1 \in L^2_{\rho^0}(\mathbb{R}^d; \mathcal W_{\mathrm{per}}(Y))$ \begin{equation} I \coloneqq \sigma \int_{\mathbb{R}^d} \int_Y \left( I + \nabla \Phi(y)^\top \right) \nabla u^0(x) \cdot \nabla_y \psi_1(x,y) \mu(y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x = 0. \end{equation} Integrating by parts and by definition of $\mu$ in \eqref{eq:mu_def} we indeed have \begin{equation} \begin{aligned} I &= \sigma \int_{\mathbb{R}^d} \int_{\partial Y} \left( I + \nabla \Phi(y)^\top \right) \nabla u^0(x) \psi_1(x,y) \mu(y) \rho^0(x) \cdot \mathbf{n}_y \,\mathrm{d} \gamma_y \,\mathrm{d} x \\ &\quad - \int_{\mathbb{R}^d} \int_Y (\sigma \Delta \Phi(y) - \nabla \Phi(y) \nabla p(y) - \nabla p(y)) \cdot \nabla u^0 (x) \psi_1(x,y) \mu(y) \rho^0(x) \,\mathrm{d} y \,\mathrm{d} x \\ &= 0, \end{aligned} \end{equation} where the last equality is given by \eqref{eq:Phi_equation} and the periodicity of the functions $\Phi, \psi_1(x,\cdot)$ and $\mu$ in $Y$. We have thus proved that the only admissible limit for the subsequence of $\{ u^\varepsilon \}$ is the solution $u^0$ of problem \eqref{eq:weakPDE_hom}, which implies that the whole sequence $\{ u^\varepsilon \}$ converges to $u^0$ and completes the proof. \end{proof} The previous result can be generalized to the case where also the right-hand side depends on the multiscale parameter $\varepsilon$. \begin{corollary} \label{cor:homogenization_pde_rhse} Let $\{ f^\varepsilon \}$ be a sequence in $L^2_{\rho^\varepsilon}(\mathbb{R}^d)$ such that $f^\varepsilon \to f^0$ in $L^2_{\rho^0}(\mathbb{R}^d)$ and let $u^\varepsilon$ be the unique solution of problem \begin{equation} \label{eq:problem_ee} B^\varepsilon(u^\varepsilon, \psi) = \inprod{f^\varepsilon}{\psi}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}, \qquad \text{for all } \psi \in H^1_{\rho^\varepsilon}(\mathbb{R}^d), \end{equation} where $\inprod{\cdot}{\cdot}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}$ denotes the inner product in $L^2_{\rho^\varepsilon}(\mathbb{R}^d)$. Then, under \cref{ass:dissipativity,ass:compactness} and as $\varepsilon \to 0$ \begin{equation} u^\varepsilon \rightharpoonup u^0 \text{ in } H^1_{\rho^0}(\mathbb{R}^d) \qquad \text{and} \qquad u^\varepsilon \to u^0 \text{ in } L^2_{\rho^0}(\mathbb{R}^d), \end{equation} where $u^0$ is the unique solution of the problem \begin{equation} B^0(u^0, \psi) = \inprod{f^0}{\psi}_{L^2_{\rho^0}(\mathbb{R}^d)}, \qquad \text{for all } \psi \in H^1_{\rho^0}(\mathbb{R}^d), \end{equation} where $\inprod{\cdot}{\cdot}_{L^2_{\rho^0}(\mathbb{R}^d)}$ denotes the inner product in $L^2_{\rho^0}(\mathbb{R}^d)$. \end{corollary} \begin{proof} Let $\widetilde u^\varepsilon$ be the solution of problem \begin{equation} \label{eq:problem_e0} B^\varepsilon(\widetilde u^\varepsilon, \psi) = \inprod{f^0}{\psi}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}, \qquad \text{for all } \psi \in H^1_{\rho^\varepsilon}(\mathbb{R}^d), \end{equation} and notice that by \cref{thm:homogenization_pde} and as $\varepsilon \to 0$ \begin{equation} \label{eq:convergences_etilde} \widetilde u^\varepsilon \rightharpoonup u^0 \text{ in } H^1_{\rho^0}(\mathbb{R}^d) \qquad \text{and} \qquad \widetilde u^\varepsilon \to u^0 \text{ in } L^2_{\rho^0}(\mathbb{R}^d). \end{equation} Consider now the difference between problems \eqref{eq:problem_ee} and \eqref{eq:problem_e0} \begin{equation} B^\varepsilon(u^\varepsilon - \widetilde u^\varepsilon, \psi) = \inprod{f^\varepsilon - f^0}{\psi}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}, \end{equation} and choose $\psi = u^\varepsilon - \widetilde u^\varepsilon$. Since $B^\varepsilon$ is coercive by \eqref{eq:Be_coercive} and using the Cauchy--Schwarz inequality we have \begin{equation} \begin{aligned} \min \{ \sigma, \eta \} \norm{u^\varepsilon - \widetilde u^\varepsilon}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)}^2 &\le B^\varepsilon(u^\varepsilon - \widetilde u^\varepsilon, u^\varepsilon - \widetilde u^\varepsilon) \\ &= \inprod{f^\varepsilon - f^0}{u^\varepsilon - \widetilde u^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} \\ &\le \norm{f^\varepsilon - f^0}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} \norm{u^\varepsilon - \widetilde u^\varepsilon}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)}, \end{aligned} \end{equation} which implies \begin{equation} \norm{u^\varepsilon - \widetilde u^\varepsilon}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)} \le \frac{1}{\min \{ \sigma, \eta \}} \norm{f^\varepsilon - f^0}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}, \end{equation} and employing \cref{lem:equivalence_L2,cor:equivalence_H1} we obtain \begin{equation} \norm{u^\varepsilon - \widetilde u^\varepsilon}_{H^1_{\rho^0}(\mathbb{R}^d)} \le \frac{C_{\mathrm{up}}}{C_{\mathrm{low}} \min \{ \sigma, \eta \}} \norm{f^\varepsilon - f^0}_{L^2_{\rho^0}(\mathbb{R}^d)}. \end{equation} Therefore, since $f^\varepsilon \to f^0$ in $L^2_{\rho^0}(\mathbb{R}^d)$ we deduce that $u^\varepsilon - \widetilde u^\varepsilon \to 0$ in $H^1_{\rho^0}(\mathbb{R}^d)$, which together with the limits in \eqref{eq:convergences_etilde} gives the desired result. \end{proof} Finally, the next theorem is a corrector result which justifies the two first term in the asymptotic expansion of the solution $u^\varepsilon$ of \eqref{eq:weakPDE_ms} \begin{equation} \label{eq:asyptotic_expansion} u^\varepsilon(x) = u^0(x) + \varepsilon u_1 \left( x, \frac{x}{\varepsilon} \right) + \varepsilon^2 u_2 \left( x, \frac{x}{\varepsilon} \right) + \mathrm{d}ots, \end{equation} which is usually employed in homogenization theory. \begin{theorem} \label{thm:corrector} Let $u^\varepsilon$ and $u^0$ be respectively the unique solutions of problems \eqref{eq:weakPDE_ms} and \eqref{eq:weakPDE_hom}. Then, under \cref{ass:dissipativity,ass:compactness} \begin{equation} \lim_{\varepsilon \to 0} \norm{u^\varepsilon - u^0 - \varepsilon u_1 \left( \cdot, \frac{\cdot}{\varepsilon}\right) }_{H^1_{\rho^0}(\mathbb{R}^d)} = 0, \end{equation} where $u_1(x,y) = \Phi(y) \cdot \nabla u^0(x)$ and $\Phi$ is the solution of \eqref{eq:Phi_equation}. \end{theorem} \begin{proof} Let us first recall that from the proof of \cref{thm:homogenization_pde} we know that as $\varepsilon \to 0$ \begin{equation} \label{eq:2conv} u^\varepsilon \rightsquigarrow u^0 \qquad \text{and} \qquad \nabla u^\varepsilon \rightsquigarrow \nabla u^0 + \nabla_y u_1. \end{equation} Let $z^\varepsilon$ be defined as \begin{equation} z^\varepsilon(x) \coloneqq u^\varepsilon(x) - u^0(x) - \varepsilon u_1 \left( x, \frac{x}{\varepsilon}\right), \end{equation} and let $\bar z^\varepsilon$ be its mean with respect to the invariant distribution $\rho^0$, i.e., \begin{equation} \bar z^\varepsilon \coloneqq \int_{\mathbb{R}^d} z^\varepsilon(x) \rho^0(x) \,\mathrm{d} x. \end{equation} Then, applying the Poincaré inequality \eqref{eq:poincare_0} we obtain \begin{equation} \label{eq:bound_z} \begin{aligned} \norm{z^\varepsilon}_{H^1_{\rho^0}(\mathbb{R}^d)}^2 &= \norm{z^\varepsilon}_{L^2_{\rho^0}(\mathbb{R}^d)}^2 + \norm{\nabla z^\varepsilon}_{(L^2_{\rho^0}(\mathbb{R}^d))^d}^2 \\ &= \norm{z^\varepsilon - \bar z^\varepsilon}_{L^2_{\rho^0}(\mathbb{R}^d)}^2 + (\bar z^\varepsilon)^2 + \norm{\nabla z^\varepsilon}_{(L^2_{\rho^0}(\mathbb{R}^d))^d}^2 \\ &\le (\bar z^\varepsilon)^2 + (C_P+1)\norm{\nabla z^\varepsilon}_{(L^2_{\rho^0}(\mathbb{R}^d))^d}^2, \end{aligned} \end{equation} and we now study the two terms in the right-hand side separately. First, by the two-scale convergence \eqref{eq:2conv} and the fact that $\Phi$ is bounded by \cite[Lemma 5.5]{PaS07} we have \begin{equation} \label{eq:limit_zbar} \lim_{\varepsilon \to 0} \bar z^\varepsilon = \lim_{\varepsilon \to 0} \left( \int_{\mathbb{R}^d} u^\varepsilon(x) \rho^0(x) \,\mathrm{d} x - \int_0 u^0(x) \rho^0(x) \,\mathrm{d} x - \varepsilon \int_{\mathbb{R}^d} \Phi \left( \frac{x}{\varepsilon} \right) \cdot \nabla u^0(x) \rho^0(x) \,\mathrm{d} x \right) = 0. \end{equation} We then consider the second term in the right-hand side of \eqref{eq:bound_z} and using \cref{lem:equivalence_L2} we have \begin{equation} \begin{aligned} \norm{\nabla z^\varepsilon}_{(L^2_{\rho^0}(\mathbb{R}^d))^d}^2 &\le \frac{1}{C_{\mathrm{low}}^2} \norm{\nabla z^\varepsilon}_{(L^2_{\rho^\varepsilon}(\mathbb{R}^d))^d}^2 \\ &= \frac{1}{C_{\mathrm{low}}^2} \int_{\mathbb{R}^d} \abs{ \nabla u^\varepsilon(x) - \left( I + \nabla \Phi \left( \frac{x}{\varepsilon} \right)^\top \right) \nabla u^0(x) - \varepsilon \nabla^2 u^0(x) \Phi \left( \frac{x}{\varepsilon} \right)}^2 \rho^\varepsilon(x) \,\mathrm{d} x \\ &\le \frac{2}{C_{\mathrm{low}}^2} (I_1^\varepsilon + I_2^\varepsilon), \end{aligned} \end{equation} where \begin{equation} \begin{aligned} I_1^\varepsilon &\coloneqq \varepsilon^2 \int_{\mathbb{R}^d} \abs{ \nabla^2 u^0(x) \Phi \left( \frac{x}{\varepsilon} \right)}^2 \rho^\varepsilon(x) \,\mathrm{d} x \\ I_2^\varepsilon &\coloneqq \int_{\mathbb{R}^d} \abs{ \nabla u^\varepsilon(x) - \left( I + \nabla \Phi \left( \frac{x}{\varepsilon} \right)^\top \right) \nabla u^0(x)}^2 \rho^\varepsilon(x) \,\mathrm{d} x. \end{aligned} \end{equation} Since $\Phi$ and $\mu$ are bounded, due to equation \eqref{eq:relation_distributions} and noting that \begin{equation} \label{eq:limit_coefficients} \lim_{\varepsilon \to 0} \frac{C_\mu C_{\rho^0}}{C_{\rho^\varepsilon}} = \abs{Y}, \end{equation} we obtain \begin{equation} \label{eq:limitI1_0} \lim_{\varepsilon \to 0} I_1^\varepsilon = \lim_{\varepsilon \to 0} \varepsilon^2 \frac{C_\mu C_{\rho^0}}{C_{\rho^\varepsilon}} \int_{\mathbb{R}^d} \abs{\nabla^2 u^0(x) \Phi \left( \frac{x}{\varepsilon} \right)}^2 \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x = 0. \end{equation} Moreover, since $u^\varepsilon$ solves problem \eqref{eq:weakPDE_ms} we have \begin{equation} \begin{aligned} \sigma I_2^\varepsilon &= \int_{\mathbb{R}^d} f(x) u^\varepsilon(x) \rho^\varepsilon(x) \,\mathrm{d} x + \sigma \int_{\mathbb{R}^d} \abs{\left( I + \nabla \Phi \left( \frac{x}{\varepsilon} \right)^\top \right) \nabla u^0(x)}^2 \rho^\varepsilon(x) \,\mathrm{d} x \\ &- 2 \sigma \int_{\mathbb{R}^d} \left( I + \nabla \Phi \left( \frac{x}{\varepsilon} \right)^\top \right) \nabla u^0(x) \cdot \nabla u^\varepsilon(x) \rho^\varepsilon(x) \,\mathrm{d} x - \eta \int_{\mathbb{R}^d} u^\varepsilon(x)^2 \rho^\varepsilon(x) \,\mathrm{d} x, \end{aligned} \end{equation} which by equation \eqref{eq:relation_distributions} yields \begin{equation} \begin{aligned} \frac{\sigma C_{\rho^\varepsilon}}{C_\mu C_{\rho^0}} I_2^\varepsilon &= \int_{\mathbb{R}^d} f(x) u^\varepsilon(x) \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x + \sigma \int_{\mathbb{R}^d} \abs{\left( I + \nabla \Phi \left( \frac{x}{\varepsilon} \right)^\top \right) \nabla u^0(x)}^2 \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x \\ &- 2 \sigma \int_{\mathbb{R}^d} \left( I + \nabla \Phi \left( \frac{x}{\varepsilon} \right)^\top \right) \nabla u^0(x) \cdot \nabla u^\varepsilon(x) \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x - \eta \int_{\mathbb{R}^d} u^\varepsilon(x)^2 \mu \left( \frac{x}{\varepsilon} \right) \rho^0(x) \,\mathrm{d} x. \end{aligned} \end{equation} Passing to the limit as $\varepsilon \to 0$, due to the two-scale converge \eqref{eq:2conv}, equation \eqref{eq:limit_coefficients} and the definition of $K$ in \eqref{eq:K_def} we have \begin{equation} \lim_{\varepsilon \to 0} \sigma I_2^\varepsilon = \int_{\mathbb{R}^d} f(x) u^0(x) \rho^0(x) - \int_{\mathbb{R}^d} \Sigma \nabla u^0(x) \cdot \nabla u^0(x) \rho^0(x) \,\mathrm{d} x - \eta \int_{\mathbb{R}^d} u^0(x)^2 = 0, \end{equation} where the last equality follows from the fact that $u^0$ is the solution of problem \eqref{eq:weakPDE_hom}, and which together with \eqref{eq:limitI1_0} implies \begin{equation} \label{eq:limit_zprime} \lim_{\varepsilon \to 0} \norm{\nabla z^\varepsilon}_{(L^2_{\rho^0}(\mathbb{R}^d))^d}^2 = 0. \end{equation} Finally, bound \eqref{eq:bound_z} and limits \eqref{eq:limit_zbar} and \eqref{eq:limit_zprime} imply the desired result. \end{proof} \section{Eigenvalue problem} \label{sec:eigen} In this section we study the homogenization of the eigenvalue problem for the multiscale generator $\mathcal{L}^\varepsilon$. Let $(\lambda^\varepsilon, \phi^\varepsilon)$ be a couple eigenvalue-eigenvector of $\mathcal{L}^\varepsilon$ which solves \begin{equation} \label{eq:eigen_ms} - \mathcal{L}^\varepsilon \phi^\varepsilon = \lambda^\varepsilon \phi^\varepsilon, \end{equation} and let $(\lambda^0, \phi^0)$ be a couple eigenvalue-eigenvector of $\mathcal{L}^0$ which solves \begin{equation} \label{eq:eigen_hom} - \mathcal{L}^0 \phi^0 = \lambda^0 \phi^0. \end{equation} We first show that the spectra of the generators $\mathcal{L}^\varepsilon$ and $\mathcal{L}^0$ are discrete and afterwards we prove the convergence of the eigenvalues and the eigenfunctions of the former to the eigenvalues and the eigenfunctions of the latter as the multiscale parameter $\varepsilon$ vanishes. \begin{lemma} \label{lem:spectrum_ms} Let $\mathcal{L}^\varepsilon$ be the generator defined in \eqref{eq:generator_ms}. Under \cref{ass:dissipativity,ass:compactness}, there exists a sequence of couples eigenvalue-eigenvector $\{ (\lambda_n^\varepsilon, \phi_n^\varepsilon) \}_{n\in\mathbb{N}}$ which solve \eqref{eq:eigen_ms}. Moreover, the eigenvalues satisfy \begin{equation} 0 = \lambda_0^\varepsilon < \lambda_1^\varepsilon < \lambda_2^\varepsilon < \mathrm{d}ots < \lambda_n^\varepsilon < \mathrm{d}ots \nearrow + \infty, \end{equation} and the eigenfunctions belong to $H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ with $\phi_0^\varepsilon \equiv 1$ and \begin{equation} \label{eq:normH_eigenvector} \norm{\phi_n^\varepsilon}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)} = \sqrt{1 + \frac{\lambda_n^\varepsilon}{\sigma}}, \end{equation} and form an orthonormal basis of $L^2_{\rho^\varepsilon}(\mathbb{R}^d)$. \end{lemma} \begin{proof} By \cref{lem:injection_compact_e} and in particular the Poincaré inequality \eqref{eq:poincare_e}, the generator $\mathcal{L}^\varepsilon$ has a spectral gap. Therefore, by \cite[Section 4.7]{Pav14} $-\mathcal{L}^\varepsilon$ is a non-negative self-adjoint operator in $L^2_{\rho^\varepsilon}(\mathbb{R}^d)$ with discrete spectrum. Hence, the eigenvalues are real, non-negative, simple and can be ordered as \begin{equation} 0 = \lambda_0^\varepsilon < \lambda_1^\varepsilon < \lambda_2^\varepsilon < \mathrm{d}ots < \lambda_n^\varepsilon < \mathrm{d}ots \nearrow + \infty. \end{equation} Notice that $\lambda_0^\varepsilon = 0$ and $\phi_0^\varepsilon \equiv 1$. Moreover, using the the unitary transformation which maps the generator to a Schrödinger operator, it follows that the eigenfunctions $\{ \phi_n^\varepsilon \}_{n=0}^\infty$ span $L^2_{\rho^\varepsilon}(\mathbb{R}^d)$ and can be normalized such that they form an orthonormal basis (see, e.g., \cite{ReS75,HiS96}). It now only remains to show that the eigenfunctions belong to $H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ and the equality \eqref{eq:normH_eigenvector}. Let us consider problem \eqref{eq:weakPDE_ms}, which has a unique solution due to \cref{lem:weakPDE_ms} and let us denote by $\mathcal S_\eta^\varepsilon \colon L^2_{\rho^\varepsilon}(\mathbb{R}^d) \to H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ the operator which maps the right-hand side $f$ to the solution $u^\varepsilon$, i.e., $\mathcal S_\eta^\varepsilon f = u^\varepsilon$. A couple $(\lambda_n^\varepsilon, \phi_n^\varepsilon)$ satisfies for all $\psi \in H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ \begin{equation} \label{eq:weak_form_eigen} B^\varepsilon(\phi_n^\varepsilon, \psi) = \inprod{(\lambda_n^\varepsilon + \eta) \phi_n^\varepsilon}{\psi}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}, \end{equation} where $B^\varepsilon$ is defined in \eqref{eq:Be_def} and $\inprod{\cdot}{\cdot}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}$ denotes the inner product in $L^2_{\rho^\varepsilon}(\mathbb{R}^d)$, and hence \begin{equation} \mathcal S_\eta^\varepsilon \phi_n^\varepsilon = \frac{1}{\lambda_n^\varepsilon + \eta} \phi_n^\varepsilon, \end{equation} which shows that $\phi_n^\varepsilon$ is also an eigenfunction of $\mathcal S_\eta^\varepsilon$ with corresponding eigenvalue $1/(\lambda_n^\varepsilon + \eta)$ and therefore $\phi_n^\varepsilon \in H^1_{\rho^\varepsilon}(\mathbb{R}^d)$. Finally, choosing $\psi = \phi_n^\varepsilon$ in \eqref{eq:weak_form_eigen} and since $\norm{\phi_n^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} = 1$ we deduce that \begin{equation} \norm{(\phi_n^\varepsilon)'}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}^2 = \frac{\lambda_n^\varepsilon}{\sigma}, \end{equation} which yields equation \eqref{eq:normH_eigenvector} and concludes the proof. \end{proof} An analogous results holds true also for the homogenized generator $\mathcal{L}^0$, for which we omit the details since the proof is similar to proof of the previous lemma. \begin{lemma} \label{lem:spectrum_hom} Let $\mathcal{L}^0$ be the generator defined in \eqref{eq:generator_hom}. Under \cref{ass:dissipativity,ass:compactness}, there exists a sequence of couples eigenvalue-eigenvector $\{ (\lambda_n^0, \phi_n^0) \}_{n\in\mathbb{N}}$ which solve \eqref{eq:eigen_hom}. Moreover, the eigenvalues satisfy \begin{equation} 0 = \lambda_0^0 < \lambda_1^0 < \lambda_2^0 < \mathrm{d}ots < \lambda_n^0 < \mathrm{d}ots \nearrow + \infty, \end{equation} and the eigenfunctions belong to $H^1_{\rho^0}(\mathbb{R}^d)$ with $\phi_0^0 \equiv 1$ and \begin{equation} \sqrt{1 + \frac{\lambda_n^0}{\lambda_{\mathrm{max}}(\Sigma)}} \le \norm{\phi_n^0}_{H^1_{\rho^0}(\mathbb{R}^d)} \le \sqrt{1 + \frac{\lambda_n^0}{\lambda_{\mathrm{min}}(\Sigma)}}, \end{equation} and form an orthonormal basis of $L^2_{\rho^0}(\mathbb{R}^d)$. \end{lemma} We remark that the eigenvalues and the eigenfunctions of the generators $\mathcal{L}^\varepsilon$ and $\mathcal{L}^0$ can be computed employing the Rayleigh quotients $R^\varepsilon$ and $R^0$, respectively, which are defined as \begin{equation} \label{eq:Rayleigh_def} \begin{aligned} R^\varepsilon(\psi) &= \sigma \frac{\norm{\nabla \psi}_{(L^2_{\rho^\varepsilon}(\mathbb{R}^d))^d}^2}{\norm{\psi}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}^2} \qquad &\text{for all } \psi \in H^1_{\rho^\varepsilon}(\mathbb{R}^d), \quad \psi \neq 0, \\ R^0(\psi) &= \frac{\inprod{\Sigma \nabla \psi}{\nabla \psi}_{(L^2_{\rho^0}(\mathbb{R}^d))^d}}{\norm{\psi}_{L^2_{\rho^0}(\mathbb{R}^d)}^2} \qquad &\text{for all } \psi \in H^1_{\rho^0}(\mathbb{R}^d), \quad \psi \neq 0. \end{aligned} \end{equation} Let $E^\varepsilon_n$ be the finite dimensional subspace of $H^1_{\rho^\varepsilon}(\mathbb{R}^d)$ spanned by the first $n$ eigenfunctions $\{ \phi_0^\varepsilon, \phi_1^\varepsilon, \mathrm{d}ots, \phi_n^\varepsilon \}$ and let $E^0_n$ be the finite dimensional subspace of $H^1_{\rho^0}(\mathbb{R}^d)$ spanned by the first $n$ eigenfunctions $\{ \phi_0^0, \phi_1^0, \mathrm{d}ots, \phi_n^0 \}$. Then, the ``minimax principle'' (see, e.g., \cite{CoH62,StF73}) gives the characterization for the $n$-th eigenvalue \begin{equation} \label{eq:minimax_principle} \begin{aligned} \lambda^\varepsilon_n &= R^\varepsilon(\phi_n^\varepsilon) = \max_{\psi \in E^\varepsilon_n} R^\varepsilon(\psi) = \min_{\psi \in H^1_{\rho^\varepsilon}(\mathbb{R}^d), \psi \perp E^\varepsilon_{n-1}} R^\varepsilon(\psi) = \min_{W \in D^\varepsilon_n} \max_{\psi \in W} R^\varepsilon(\psi), \\ \lambda^0_n &= R^0(\phi_n^0) = \max_{\psi \in E^0_n} R^0(\psi) = \min_{\psi \in H^1_{\rho^0}(\mathbb{R}^d), \psi \perp E^0_{n-1}} R^0(\psi) = \min_{W \in D^0_n} \max_{\psi \in W} R^0(\psi), \end{aligned} \end{equation} where \begin{equation} \begin{aligned} D^\varepsilon_n &= \{ W \subset H^1_{\rho^\varepsilon}(\mathbb{R}^d) \colon \mathrm{d}im W = n \}, \\ D^0_n &= \{ W \subset H^1_{\rho^0}(\mathbb{R}^d) \colon \mathrm{d}im W = n \}. \end{aligned} \end{equation} We can now state and prove the homogenization of the spectrum of the multiscale generator, whose proof is inspired by the proof of Theorem 2.1 in \cite{Kes79a}. \begin{theorem} \label{thm:homogenization_eigen} Let $(\lambda_n^\varepsilon, \phi_n^\varepsilon)$ and $(\lambda_n^0, \phi_n^0)$ be ordered couples eigenvalue-eigenfunction of the generators $\mathcal{L}^\varepsilon$ and $\mathcal{L}^0$, respectively, with $\norm{\phi_n^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} = 1$ and $\norm{\phi_n^0}_{L^2_{\rho^0}(\mathbb{R}^d)} = 1$. Then, under \cref{ass:dissipativity,ass:compactness} and choosing the sign of $\phi_n^\varepsilon$ such that $\inprod{\phi_n^\varepsilon}{\phi_n^0}_{L^2_{\rho^0}(\mathbb{R}^d)} > 0$, it holds for all $n \in \mathbb{N}$ and as $\varepsilon \to 0$ \begin{enumerate} \item $\lambda_n^\varepsilon \to \lambda_n^0$, \item $\phi_n^\varepsilon \to \phi_n^0$ in $L^2_{\rho^0}(\mathbb{R}^d)$, \item $\phi_n^\varepsilon \rightharpoonup \phi_n^0$ in $H^1_{\rho^0}(\mathbb{R}^d)$. \end{enumerate} \end{theorem} \begin{proof} The proof is divided into several steps. \\ \textbf{Step 1}: \emph{Boundedness of eigenvalues and eigenfunctions.} \\ Let $\psi \in H^1_{\rho^\varepsilon}(\mathbb{R}^d)$, which due to \cref{cor:equivalence_H1} belongs to $H^1_{\rho^0}(\mathbb{R}^d)$ as well. Employing \cref{lem:equivalence_L2} we have \begin{equation} \frac{C_{\mathrm{low}} \norm{\nabla \psi}_{(L^2_{\rho^0}(\mathbb{R}^d))^d}}{C_{\mathrm{up}} \norm{\psi}_{L^2_{\rho^0}(\mathbb{R}^d)}} \le \frac{\norm{\nabla \psi}_{(L^2_{\rho^\varepsilon}(\mathbb{R}^d))^d}}{\norm{\psi}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}} \le \frac{C_{\mathrm{up}} \norm{\nabla \psi}_{(L^2_{\rho^0}(\mathbb{R}^d))^d}}{C_{\mathrm{low}} \norm{\psi}_{L^2_{\rho^0}(\mathbb{R}^d)}}, \end{equation} which by the definitions of the Rayleigh quotients in \eqref{eq:Rayleigh_def} implies \begin{equation} \frac{C_{\mathrm{low}}^2}{\lambda_{\mathrm{max}}(K) C_{\mathrm{up}}^2} R^0(\psi) \le R^\varepsilon(\psi) \le \frac{C_{\mathrm{up}}^2}{\lambda_{\mathrm{min}}(K) C_{\mathrm{low}}^2} R^0(\psi), \end{equation} where $K$ is defined in \eqref{eq:K_def}. Then, applying the ``minimax principle'' in \eqref{eq:minimax_principle} we obtain for all $n \in \mathbb{N}$ \begin{equation} \frac{C_{\mathrm{low}}^2}{\lambda_{\mathrm{max}}(K) C_{\mathrm{up}}^2} \lambda^0_n \le \lambda^\varepsilon_n \le \frac{C_{\mathrm{up}}^2}{\lambda_{\mathrm{min}}(K) C_{\mathrm{low}}^2} \lambda^0_n, \end{equation} which shows that the sequence of eigenvalues $\{ \lambda^\varepsilon_n \}$ is bounded for all $n \in \mathbb{N}$. Moreover, due to equation \eqref{eq:normH_eigenvector} and \cref{cor:equivalence_H1} we deduce that also the sequence of eigenfunctions $\{ \phi^\varepsilon_n \}$ is bounded in $H^1_{\rho^0}(\mathbb{R}^d)$, in fact we have \begin{equation} \norm{\phi_n^\varepsilon}_{H^1_{\rho^0}(\mathbb{R}^d)} \le \frac{1}{C_{\mathrm{low}}} \norm{\phi_n^\varepsilon}_{H^1_{\rho^\varepsilon}(\mathbb{R}^d)} \le \frac{1}{C_{\mathrm{low}}} \sqrt{1 + \frac{C_{\mathrm{up}}^2}{\lambda_{\mathrm{min}}(\Sigma) C_{\mathrm{low}}^2} \lambda^0_n}. \end{equation} \textbf{Step 2}: \emph{Extraction of a subsequence.} \\ Due to Step 1 we can extract a subsequence $\varepsilon'$ of $\varepsilon$ such that $\{ \lambda^{\varepsilon'}_0 \}$ is convergent and $\{ \phi^{\varepsilon'}_0 \}$ is weakly convergent in $H^1_{\rho^0}(\mathbb{R}^d)$ and strongly convergent in $L^2_{\rho^0}(\mathbb{R}^d)$ and a further subsequence $\varepsilon''$ of $\varepsilon'$ such that $\{ \lambda^{\varepsilon''}_0 \}$ and $\{ \lambda^{\varepsilon''}_1 \}$ are convergent and $\{ \phi^{\varepsilon''}_0 \}$ and $\{ \phi^{\varepsilon''}_1 \}$ are weakly convergent in $H^1_{\rho^0}(\mathbb{R}^d)$ and strongly convergent in $L^2_{\rho^0}(\mathbb{R}^d)$. Repeating this procedure for all $n \in \mathbb{N}$ and choosing the standard diagonal subsequence we can find a subsequence, which is still denoted by $\varepsilon$, such that for all $n \in \mathbb{N}$ \begin{equation} \lambda_n^\varepsilon \to \widetilde \lambda_n, \qquad \phi_n^\varepsilon \rightharpoonup \widetilde \phi_n \text{ in } H^1_{\rho^0}(\mathbb{R}^d), \qquad \phi_n^\varepsilon \to \widetilde \phi_n \text{ in } L^2_{\rho^0}(\mathbb{R}^d), \end{equation} where $\widetilde \lambda_n \in \mathbb{R}$ and $\widetilde \phi_n \in H^1_{\rho^0}(\mathbb{R}^d)$. From now on we will always consider this final subsequence, if not stated differently. \\ \textbf{Step 3}: \emph{Identification of the limits.} \\ A couple eigenvalue-eigenfunction $(\lambda_n^\varepsilon, \phi_n^\varepsilon)$ of the multiscale generator $\mathcal{L}^\varepsilon$ solves the problem \begin{equation} B^\varepsilon(\phi_n^\varepsilon, \psi) = \inprod{(\lambda_n^\varepsilon + \eta )\phi_n^\varepsilon}{\psi}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}, \qquad \text{for all } \psi \in H^1_{\rho^\varepsilon}(\mathbb{R}^d), \end{equation} where $B^\varepsilon$ is defined in \eqref{eq:Be_def} and by Step 2 \begin{equation} (\lambda_n^\varepsilon + \eta) \phi_n^\varepsilon \to (\widetilde \lambda_n + \eta) \widetilde \phi_n \text{ in } L^2_{\rho^0}(\mathbb{R}^d). \end{equation} Hence, by \cref{cor:homogenization_pde_rhse} and the uniqueness of the limit it follows that the couple $(\widetilde \lambda_n, \widetilde \phi_n)$ solves the problem \begin{equation} B^0(\widetilde \phi_n, \psi) = \inprod{(\widetilde \lambda_n + \eta ) \widetilde \phi_n}{\psi}_{L^2_{\rho^0}(\mathbb{R}^d)}, \qquad \text{for all } \psi \in H^1_{\rho^0}(\mathbb{R}^d), \end{equation} where $B^0$ is defined in \eqref{eq:B0_def} and therefore it is a couple eigenvalue-eigenfunction of the homogenized generator $\mathcal{L}^0$. \\ \textbf{Step 4}: \emph{Ordering of the limits.} \\ We now show that the sequence of limits $\{ \widetilde \lambda_n \}_{n \in \mathbb{N}}$ is such that $\widetilde \lambda_0 < \widetilde \lambda_1 < \widetilde \lambda_2 < \cdots < \widetilde \lambda_n < \cdots$. First, due to \cref{lem:spectrum_ms} we know that $\lambda_0^\varepsilon < \lambda_1^\varepsilon < \lambda_2^\varepsilon < \cdots < \lambda_n^\varepsilon < \cdots$, hence their limits must satisfy $\widetilde \lambda_0 \le \widetilde \lambda_1 \le \widetilde \lambda_2 \le \cdots \le \widetilde \lambda_n \le \cdots$. Let us now assume by contradiction that there exist $l,m \in \mathbb{N}$ such that $\widetilde \lambda_l = \widetilde \lambda_m \eqqcolon \widetilde \lambda$. Since the eigenfunctions $\phi_l^\varepsilon$ and $\phi_m^\varepsilon$ corresponding to the eigenvalues $\lambda_l^\varepsilon$ and $\lambda_m^\varepsilon$ are orthogonal in $L^2_{\rho^\varepsilon}(\mathbb{R}^d)$, then \begin{equation} \inprod{\phi_l^\varepsilon}{\phi_m^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} = 0, \end{equation} and passing to the limit as $\varepsilon$ vanishes we obtain \begin{equation} \label{eq:eigenvectors_orthogonal} \inprod{\widetilde \phi_l}{\widetilde \phi_m}_{L^2_{\rho^0}(\mathbb{R}^d)} = 0. \end{equation} In fact, we have \begin{equation} \label{eq:convergence_inner_product_1} \begin{aligned} &\abs{\inprod{\widetilde \phi_l}{\widetilde \phi_m}_{L^2_{\rho^0}(\mathbb{R}^d)} - \inprod{\phi_l^\varepsilon}{\phi_m^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}} \le \\ &\hspace{2cm} \abs{\inprod{\widetilde \phi_l}{\widetilde \phi_m}_{L^2_{\rho^0}(\mathbb{R}^d)} - \inprod{\widetilde \phi_l}{\widetilde \phi_m}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}} + \abs{\inprod{\widetilde \phi_l}{\widetilde \phi_m}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} - \inprod{\phi_l^\varepsilon}{\phi_m^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}}, \end{aligned} \end{equation} where the first term in the right hand side vanishes due to the convergence of the measure with density $\rho^\varepsilon$ towards the measure with density $\rho^0$ and the second term tends to zero due to the convergence of the eigenvectors and because by Cauchy--Schwarz inequality and \cref{lem:equivalence_L2} we have \begin{equation} \label{eq:convergence_inner_product_2} \abs{\inprod{\widetilde \phi_l}{\widetilde \phi_m}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} - \inprod{\phi_l^\varepsilon}{\phi_m^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}} \le \norm{\widetilde \phi_l \widetilde \phi_m - \phi_l^\varepsilon \phi_m^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} \le C_{\mathrm{up}} \norm{\widetilde \phi_l \widetilde \phi_m - \phi_l^\varepsilon \phi_m^\varepsilon}_{L^2_{\rho^0}(\mathbb{R}^d)}. \end{equation} Therefore, equality \eqref{eq:eigenvectors_orthogonal} implies that the eigenvectors $\widetilde \phi_l$ and $\widetilde \phi_m$ corresponding to the eigenvalue $\widetilde \lambda$ are linearly independent and hence $\widetilde \lambda$ is not a simple eigenvalue, which is impossible due to \cref{lem:spectrum_hom}. \\ \textbf{Step 5}: \emph{Entire spectrum.} \\ We now prove that there is no eigenvalue of the homogenized generator $\mathcal{L}^0$ other than those in the sequence $\{ \widetilde \lambda_n \}_{n \in \mathbb{N}}$. Let us assume by contradiction that $\{ \widetilde \lambda_n \}_{n \in \mathbb{N}}$ is a subsequence of $\{ \lambda_n^0 \}_{n \in \mathbb{N}}$, i.e., that there exists an eigenvalue $\lambda \in \mathbb{R}$ of the homogenized generator $\mathcal{L}^0$ such that $\lambda \neq \widetilde \lambda_n$ for all $n \in \mathbb{N}$ and let $\phi \in H^1_{\rho^0}(\mathbb{R}^d)$ be its corresponding normalized eigenfunction, which due to \cref{lem:spectrum_hom} satisfies \begin{equation} \inprod{\phi}{\widetilde \phi_n}_{L^2_{\rho^0}(\mathbb{R}^d)} = 0, \qquad \text{for all } n \in \mathbb{N}. \end{equation} Then, there exists $m \in \mathbb{N}$ such that $\lambda < \widetilde \lambda_{m+1}$. Let $\varphi^\varepsilon$ be the solution of the problem \begin{equation} \label{eq:phie_def} B^\varepsilon(\varphi^\varepsilon, \psi) = (\lambda + \eta) \inprod{\phi}{\psi}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}, \qquad \text{for all } \psi \in H^1_{\rho^\varepsilon}(\mathbb{R}^d), \end{equation} and notice that due to \cref{thm:homogenization_pde} \begin{equation} \label{eq:convergence_phie} \varphi^\varepsilon \rightharpoonup \phi \text{ in } H^1_{\rho^0}(\mathbb{R}^d) \qquad \text{and} \qquad \varphi^\varepsilon \to \phi \text{ in } L^2_{\rho^0}(\mathbb{R}^d). \end{equation} Choosing $\psi = \varphi^\varepsilon$ in \eqref{eq:phie_def} we then have \begin{equation} \label{eq:limit_Rayleigh_phie} \lim_{\varepsilon \to 0} R^\varepsilon(\varphi^\varepsilon) = \lim_{\varepsilon \to 0} \sigma \frac{\norm{(\varphi^\varepsilon)'}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}}{\norm{\varphi^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}} = \lim_{\varepsilon \to 0} \frac{(\lambda+\eta) \inprod{\phi}{\varphi^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}}{\norm{\varphi^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)}} - \eta = \lambda, \end{equation} where the last equality is justified by an argument similar to \eqref{eq:convergence_inner_product_1} and \eqref{eq:convergence_inner_product_2}. Let now $\xi^\varepsilon$ be defined as \begin{equation} \label{eq:xie_def} \xi^\varepsilon \coloneqq \varphi^\varepsilon - \sum_{n=0}^{m} \inprod{\varphi^\varepsilon}{\phi^\varepsilon_n}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} \phi^\varepsilon_n, \end{equation} which has the same limit as $\varphi^\varepsilon$, i.e., \begin{equation} \xi^\varepsilon \rightharpoonup \phi \text{ in } H^1_{\rho^0}(\mathbb{R}^d) \qquad \text{and} \qquad \xi^\varepsilon \to \phi \text{ in } L^2_{\rho^0}(\mathbb{R}^d), \end{equation} since a similar computation to \eqref{eq:convergence_inner_product_1} and \eqref{eq:convergence_inner_product_2} yields \begin{equation} \label{eq:limit_inner_product_0} \lim_{\varepsilon \to 0} \inprod{\varphi^\varepsilon}{\phi_n^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} = \inprod{\phi}{\widetilde \phi_n}_{L^2_{\rho^0}(\mathbb{R}^d)} = 0. \end{equation} Moreover, due to \eqref{eq:limit_inner_product_0} also its Rayleigh quotient has the same limit as \eqref{eq:limit_Rayleigh_phie}, i.e., \begin{equation} \lim_{\varepsilon \to 0} R^\varepsilon(\xi^\varepsilon) = \lambda, \end{equation} and by definition \eqref{eq:xie_def} it follows for all $n = 1, \mathrm{d}ots, m$ \begin{equation} \inprod{\xi^\varepsilon}{\phi_n^\varepsilon}_{L^2_{\rho^\varepsilon}(\mathbb{R}^d)} = 0 . \end{equation} Therefore, by the ``minimax principle'' \eqref{eq:minimax_principle}, $\lambda_{m+1}^\varepsilon \le R^\varepsilon(\xi^\varepsilon)$ and passing to the limit as $\varepsilon \to 0$ we deduce that $\widetilde \lambda_{m+1} \le \lambda$ which contradicts the fact that $m$ is such that $\lambda < \widetilde \lambda_{m+1}$. \\ \textbf{Step 6}: \emph{Convergence to the homogenized spectrum.} \\ From Steps 3,4,5 and by \cref{lem:spectrum_hom} it follows that the sequence of limits $\{ \widetilde \lambda_n \}_{n \in \mathbb{N}}$ is the same as the sequence of eigenvalues $\{ \lambda_n^0 \}_{n \in \mathbb{N}}$ of the homogenized generator $\mathcal{L}^0$, hence we have $\widetilde \lambda_n = \lambda_n^0$ for all $n \in \mathbb{N}$. Moreover, since the eigenfunctions are normalized, then the limit $\widetilde \phi_n$ can be either $\phi_n^0$ or $-\phi_n^0$. The hypothesis that the sign of $\phi_n^\varepsilon$ is chosen such that $(\phi_n^\varepsilon, \phi_n^0)_{L^2_{\rho^0}(\mathbb{R}^d)} > 0$ implies that the positive sign is the right one, i.e., $\widetilde \phi_n = \phi_n^0$ for all $n \in \mathbb{N}$. \\ \textbf{Step 7}: \emph{Convergence of the whole sequence.} \\ For all $n \in \mathbb{N}$ the fact that the only admissible limit for the subsequence $\{ \lambda_n^\varepsilon \}$ is $\lambda_n^0$ implies that the whole sequence converges to $\lambda_n^0$. Indeed, assuming by contradiction that $\{ \lambda_n^\varepsilon \}$ does not converge to $\lambda_n^0$ gives the existence of a subsequence $\{ \lambda_n^{\varepsilon'} \}$ and $\mathrm{d}elta > 0$ such that \begin{equation} \label{eq:to_contradict} \abs{\lambda_n^{\varepsilon'} - \lambda_n^0} > \mathrm{d}elta. \end{equation} However, repeating all the previous steps we can extract a subsequence $\{ \lambda_n^{\varepsilon''} \}$ from $\{ \lambda_n^{\varepsilon'} \}$ such that \begin{equation} \lim_{\varepsilon \to 0} \lambda_n^{\varepsilon''} = \lambda_n^0, \end{equation} which contradicts \eqref{eq:to_contradict}. Finally, a similar argument shows the convergence of the whole sequence of eigenfunctions $\{ \phi_n^\varepsilon \}$ to $\phi_n^0$ and concludes the proof. \end{proof} \section{Numerical illustration} \label{sec:experiments} In this section we present an example complementing our theoretical results. We consider the one-dimensional ($d = 1$) multiscale Ornstein--Uhlenbeck process with slow-scale potential $V(x) = x^2/2$, fast-scale potential $p(y) = \cos(y)$ and diffusion coefficient $\sigma = 1$. The numerical results are obtained setting the discretization size $h = \varepsilon^2$ and replacing the real line $\mathbb{R}$ with a truncated domain $D = [-R,R]$ with $R = 5$. The error introduced by this approximation is negligible since the invariant measures $\rho^0$ and $\rho^\varepsilon$, which appear as weight functions in the integrals, decay exponentially fast for $\abs{x} \to \infty$. \subsection{Poisson problem with reaction term} \begin{figure} \caption{Multiscale and homogenized solution of the Poisson problem with reaction term.} \label{fig:plotPoisson} \end{figure} \begin{figure} \caption{Poisson problem with reaction term varying $\varepsilon$. Left: distance between the multiscale and homogenized solution. Right: distance between the multiscale solution and its first order expansion.} \label{fig:ratePoisson} \end{figure} We consider the Poisson problems \eqref{eq:PDE_ms} and \eqref{eq:PDE_hom} with reaction coefficient $\eta = 1$ and right-hand side $f(x) = x$. In this particular case the homogenized equation \eqref{eq:PDE_hom} admits the analytical solution \begin{equation} u^0(x) = \frac{x}{K + \eta}. \end{equation} In \cref{fig:plotPoisson} we plot the numerical solutions $u^\varepsilon$ and $u^0$ setting $\varepsilon = 0.1$, and we observe that the multiscale solution oscillates around the homogenized one. We then solve equation \eqref{eq:PDE_ms} for different values of the mutiscale parameter $\varepsilon = 0.025, 0.05, 0.1, 0.2, 0.4$, and we compute the distance between $u^\varepsilon$ and $u^0$ both in $L^2_{\rho^0}(\mathbb{R})$ and $H^1_{\rho^0}(\mathbb{R})$. On the left of \cref{fig:ratePoisson} we observe that the theoretical results given by \cref{thm:homogenization_pde} are confirmed in practice. In particular, $\norm{u^\varepsilon - u^0}_{L^2_{\rho^0}(\mathbb{R})}$ decreases as $\varepsilon$ vanishes, while $\norm{u^\varepsilon - u^0}_{H^1_{\rho^0}(\mathbb{R})}$ remains constant. Indeed, the solution $u^\varepsilon$ converges to $u^0$ strongly in $L^2_{\rho^0}(\mathbb{R})$ but only weakly in $H^1_{\rho^0}(\mathbb{R})$. We now consider a better approximation of the multiscale solution $u^\varepsilon$, which is given by the first order expansion \begin{equation} \widetilde u^\varepsilon(x) = u^0(x) + \varepsilon u_1 \left( x, \frac{x}{\varepsilon} \right), \end{equation} where \begin{equation} u_1(x,y) = (u^0)'(x) \Phi(y). \end{equation} The analytical solution $\Phi$ of equation \eqref{eq:Phi_equation}, which is periodic in $Y = [0,L]$ and has zero-mean with respect to $\mu$, is \begin{equation} \Phi(y) = C_\Phi - y + \frac{L}{\widehat C_\mu} \int_0^y e^{\frac1\sigma p(z)} \,\mathrm{d} z, \end{equation} where \begin{equation} \widehat C_\mu = \int_0^L e^{\frac1\sigma p(y)} \,\mathrm{d} y, \end{equation} and \begin{equation} C_\Phi = \frac{1}{C_\mu} \int_0^L y e^{-\frac1\sigma p(y)} \,\mathrm{d} y - \frac{L}{C_\mu \widehat C_\mu} \int_0^L \int_0^y e^{\frac1\sigma(p(z) - p(y))} \,\mathrm{d} z \,\mathrm{d} y. \end{equation} On the right of \cref{fig:ratePoisson} we plot the distance between $u^\varepsilon$ and its first order approximation $\widetilde u^\varepsilon$ both in $L^2_{\rho^0}(\mathbb{R})$ and $H^1_{\rho^0}(\mathbb{R})$, and we observe that we now also have strong convergence in $H^1_{\rho^0}(\mathbb{R})$ as shown by \cref{thm:corrector}. \subsection{Eigenvalue problem} \begin{figure} \caption{First four eigenvalues and eigenfunctions of the multiscale and homogenized generator.} \label{fig:plotEigen} \end{figure} \begin{figure} \caption{Distance between the first four eigenvalues and eigenfunctions of the multiscale and homogenized generator varying $\varepsilon$.} \label{fig:rateEigen} \end{figure} We now consider the homogenization of the eigenvalue problem for the multiscale generator. First, in \cref{fig:plotEigen} we set $\varepsilon = 0.1$ and plot the first four eigenvalues and eigenfunctions of both $\mathcal{L}^\varepsilon$ and $\mathcal{L}^0$. We observe that the eigenvalues $\lambda_n^\varepsilon$ are close to the eigenvalues $\lambda_n^0$ and that the mismatch increases for $n$ bigger, i.e., for eigenvalues with greater magnitude. Moreover, the eigenfunctions behave similarly to the solution of the Poisson problem, in the sense that $\phi_n^\varepsilon$ oscillates around $\phi_n^0$. We remark that in the particular case of the Ornstein--Uhlenbeck process the eigenvalue problem for the homogenized generator $\mathcal{L}^0$ can be solved analytically and the eigenfunctions are given by the normalized Hermite polynomials \cite[Section 4.4]{Pav14}. In particular, we have for all $n \in \mathbb{N}$ that $\lambda_n^0 = Kn$ and \begin{equation} \phi_n^0(x) = \frac{1}{\sqrt{n!}} H_n \left( \sqrt{\frac{K}{\Sigma}} x \right), \end{equation} where \begin{equation} H_n(z) = (-1)^n e^{\frac{z^2}{2}} \frac{\mathrm{d}^n}{\,\mathrm{d} z^n} \left( e^{-\frac{z^2}{2}} \right). \end{equation} We then solve the eigenvalue problem for different values of the multiscale parameter $\varepsilon = 0.025, 0.05, 0.1, 0.2, 0.4$, and we compute the distance between the multiscale and homogenized eigenvalues and eigenfunctions. \cref{fig:rateEigen} demonstrates numerically what we proved theoretically in \cref{thm:homogenization_eigen}, i.e., that we have convergence of the eigenvalues and strong convergence in $L^2_{\rho^0}(\mathbb{R})$, but only weak in $H^1_{\rho^0}(\mathbb{R})$, of the eigenfunctions. \section{Conclusion} \label{sec:conclusion} We presented the homogenization of two problems involving the infinitesimal generator of the multiscale overdamped Langevin SDE. We first considered the Poisson problem with reaction term and, after introducing appropriate weighted Sobolev spaces and extending the theory of two-scale convergence, we proved in \cref{thm:homogenization_pde} the strong convergence in $L^2$ sense and the weak convergence in $H^1$ sense of the multiscale solution to the solution of the same problem where the multiscale generator is replaced by its homogenized surrogate. In \cref{thm:corrector} we also provided a corrector result which justifies the two first terms in the usual asymptotic expansion in homogenization theory. We then analyzed the eigenvalue problem and in \cref{thm:homogenization_eigen} we showed homogenization results for the eigenvalues and the eigenfunctions of the multiscale generator. In particular, we demonstrated the convergence of the eigenvalues and the strong convergence in $L^2$ sense and the weak convergence in $H^1$ sense of the eigenvectors towards the corresponding eigenvalues and eigenfunctions of the generator of the coarse-grained dynamics. Finally, we verified numerically our theoretical results simulating the multiscale one-dimensional Ornstein--Uhlenbeck process. Our work provides rigorous convergence results in the setting of the Langevin dynamics, but we believe that similar theorems can be proved for more general classes of diffusion processes and we will return to this problem in future work. \end{document}
\begin{document} \title{ Fate of multiparticle entanglement when one particle becomes classical } \author{Zhen-Peng Xu} \email{[email protected]} \affiliation{School of Physics and Optoelectronics Engineering, Anhui University, 230601 Hefei, People’s Republic of China} \affiliation{Naturwissenschaftlich-Technische Fakult\"at, Universit\"at Siegen, Walter-Flex-Stra{\ss}e 3, 57068 Siegen, Germany} \author{Satoya Imai} \email{[email protected]} \affiliation{Naturwissenschaftlich-Technische Fakult\"at, Universit\"at Siegen, Walter-Flex-Stra{\ss}e 3, 57068 Siegen, Germany} \author{Otfried Gühne} \email{[email protected]} \affiliation{Naturwissenschaftlich-Technische Fakult\"at, Universit\"at Siegen, Walter-Flex-Stra{\ss}e 3, 57068 Siegen, Germany} \date{\today} \begin{abstract} We study the change of multiparticle entanglement if one particle becomes classical, in the sense that this particle is destructed by a measurement, but the gained information is encoded into a new register. We present an estimation of this change for different entanglement measures and ways of encoding. We first simplify the numerical calculation to analyze the change of entanglement under classicalization in special cases. Second, we provide general upper and lower bounds on the entanglement change. Third, we show that the entanglement change caused by classicalization of one qubit only can still be arbitrarily large. Finally, we discuss cases where no entanglement is left under classicalization for any possible measurement. Our results shed light on the storage of quantum resources and help to develop a novel direction in the field of quantum resource theories. \end{abstract} \pacs{03.65.Ta, 03.65.Ud} \maketitle \section{Introduction} Different types of quantum resources~\cite{chitambar2019quantum} are essential for quantum information tasks, like quantum computation \cite{divincenzo1995quantum}, quantum key distribution \cite{scarani2009security}, and quantum metrology \cite{giovannetti2006quantum}, where they can provide a decisive advantage over the classical regime. One main problem for many quantum resources is their sensitivity to the disturbance from the environment. Their protection with tools like quantum error correction~\cite{lidar2013quantum} is usually expensive, especially if larger systems are considered. In practice, some fraction of the particles of a larger quantum system can inevitably become classical, e.g., caused by a measurement or decoherence process. In fact, the particles may even be completely lost. It is a natural question to ask how multiparticle entanglement \cite{horodecki2009quantum, guhne2009entanglement} is affected by such processes. Many works have considered the influence of decoherence on multiparticle entanglement~\cite{simon2002robustness,dur2004stability,carvalho2004decoherence,hein2005entanglement,guhne2008multiparticle,aolita2008scaling}. Other works considered the robustness of multiparticle entanglement under particle loss~\cite{briegel2001persistent,brunner2012persistency,neven2018entanglement,luo2021robust}. Moreover, the sharp change of bipartite entanglement caused by the complete loss of one particle in one party has been studied as the concept of lockable entanglement~\cite{horodecki2005locking,christandl2005uncertainty,leung2009survey, yang2009squashed}. There can, however, still be information left in the environment after loss of {particles}. For example, in the case of the Stern-Gerlach experiment, the left information is given by the location of the spots on the screen. As another example one can consider the decay of particles due to decoherence, where it may be reasonable to gather some information from the particles before their complete decay. The usefulness of this classical information has been extensively explored in the form of the entanglement of assistance~\cite{divincenzo1998entanglement}, where a third party (Charly) optimizes the measurement and the resulting information to assist the two original parties (Alice and Bob) to reveal as much quantum entanglement as possible. Most research on the entanglement of assistance has focused on the case where the global state is pure~\cite{li2010evolution,smolin2005entanglement,gour2005deterministic}. As it turns out~\cite{divincenzo1998entanglement}, the entanglement of assistance depends only on the reduced state for Alice and Bob, and the exact three-partite initial state is not important. \begin{figure} \caption{The change of multiparticle entanglement if the particle $C$ becomes classical. In this process of classicalization the particle $C$ is first {destroyed} \label{fig:scenario} \end{figure} In this paper we consider a {different scenario}: One or more particles in a multiparticle system is destructed by a measurement. The gained classical information is then encoded in a quantum state. Our question is how much the multiparticle entanglement is affected in this process of classicalization, see also Fig.~\ref{fig:scenario}. This scenario is practically relevant, as one may not have the perfect `assistance' when the size and performance of the register system are limited. Consequently, our approach can provide guidance for the storage of quantum entanglement robust to particle loss and for finding the optimal strategy of entanglement recovery with the gained classical information and a small register system. In comparison with the concept of quantum assistance, we consider mixed quantum states where the entanglement is stored and it is not the aim of the measured party to increase the bipartite entanglement between the remaining ones. Most importantly, the initial quantum state plays a major role in the change of the entanglement due to classicalization. We stress that there are further related concepts. The so-called hidden entanglement~\cite{d2014hidden} has been introduced as the difference between the entanglement without the decomposition information of a mixed state and the one with the decomposition information. Besides, the role of one particle in the change of entanglement has also been considered in distributed entanglement~\cite{chuan2012quantum,streltsov2012quantum}, where the particle is transferred from one party to another one rather than it is destroyed. \section{Notations and definitions}\label{sec:notations} We focus on tripartite {systems} in this paper, other multipartite {systems} can be analyzed similarly. We denote the {initial state} as $\rho_{ABC}$. First, suppose that one party of this state {is} measured in a process that completely destroys the measured party, such as the detection of the photon polarization. Without loss of generality, we here assume that the destructive measurement ${M}=\{m_i\}$ acts on the party $C$. After the measurement, the particles belonging to party $C$ {vanish}, but the post-measurement information from the associated outcome is available. That is, {each classical outcome $i$} can be encoded into a new register system $E$ as associated post-measurement states $\tau_i$. We say that this encoding is perfect, if $\tau_i = \ket{i}\!\bra{i}$ for an orthogonal basis $\{\ket{i}\}$. In practice, of course, the encoding may not be perfect due to the interaction with the environment. { We can write the above process as the operation \begin{align} \label{eq:operation} \Phi_C(\rho_{ABC})= \sum_i p_i \sigma_i \otimes \tau_i, \end{align} where $p_i = \tr(\rho_{ABC} m_i)$, $\sigma_i = \tr_C(\rho_{ABC} m_i)/p_i$ and $\tau_i$ is the register state related to the outcome $i$. We say that this encoding is {perfect}, if $\tau_i = \ket{i}\!\bra{i}$ for an orthogonal basis $\{\ket{i}\}$. In practice, of course, the encoding may not be perfect due to the interaction with the environment or the limited memory of the register. } We denote by $\mathcal{N}_C$ the set of {all possible operations in the form in Eq.~\eqref{eq:operation} on the party $C$.} We stress that the set $\mathcal{N}_C$ is equivalent to the set of entanglement breaking channels~\cite{horodecki2003entanglement} acting on the party $C$. So far, we have not imposed any assumption on the destructive measurements and the encoding, but in practice, there can be extra limitations on them. Our central question is how much the global entanglement in $\rho_{ABC}$ is changed by the operation $\Phi_C$. The maximal change happens usually when there is no classical information left or it has not been employed, that is, the $\tau_i$ are the same for all outcomes $i$'s, a similar question has been explored already under the concept of lockable entanglement~\cite{horodecki2005locking}, see more details in Sec.~\ref{sec:lockability}. Here we are particularly interested in the minimal amount of entanglement change {with remaining classical information}, {which corresponds to the optimal operation $\Phi_C$ to keep as much entanglement as possible.} For this purpose, we define the quantity $\Delta_{\mathcal{E}}(\rho_{ABC})$ as \begin{align} \Delta_{\mathcal{E}}(\rho_{ABC}) &= \min_{\Phi_C\in \mathcal{N}_C} \left\{\mathcal{E}[\rho_{ABC}] - \mathcal{E}[\Phi_C(\rho_{ABC})] \right\}, \end{align} where $\mathcal{E}$ is a tripartite entanglement measure. The practical choice of $\mathcal{E}$ may depend on the quantum information task under consideration. {For the choice} of entanglement measures, it is necessary to require that $\mathcal{E}$ does not increase under local operations and classical communication (LOCC)~\cite{chitambar2014everything}{, called monotonicity under LOCC}. In this case, $\Delta_{\mathcal{E}}(\rho_{ABC})$ is always non-negative. Two further remarks are in order. First, if $\mathcal{E}$ is a measure of genuine multipartite entanglement, then $\Delta_{\mathcal{E}}(\rho_{ABC}) = \mathcal{E}[\rho_{ABC}]$, since $\Phi_C(\rho_{ABC})$ is always separable with respect to the bipatition $AB|C$ for any $\Phi_C$ and $\rho_{ABC}$. Second, {if we restrict the set $\mathcal{N}_C$ with limitations on measurements and register states, the amount of $\Delta_{\mathcal{E}}(\rho_{ABC})$ can be affected. One example is to consider the operations which keep the dimension of the system.} \section{Simplification}\label{sec:simplification} In general it is difficult to calculate $\Delta_{\mathcal{E}}(\rho_{ABC})$, due to the complexity of characterizing the set $\mathcal{N}_C$. Here we provide a method to simplify the calculation. By default, we assume the entanglement measure $\mathcal{E}$ is monotonic under LOCC. Then we have: \begin{observation}\label{ob:encdingextreme} If the entanglement measure $\mathcal{E}$ is convex, we only need to consider $M = \{m_i\}$ as an extremal point in the considered measurement set $\mathcal{M}$. More precisely: \begin{align}\label{eq:simplify} \hspace{-0.5em}\Delta_{\mathcal{E}}(\rho_{ABC})\! =\! \min_{M\in \partial \mathcal{M}} \left\{\mathcal{E}[\rho_{ABC}] \!-\! \sum_i p_i \mathcal{E}[\sigma_i\!\otimes\! |0\rangle\langle 0|]\right\}, \end{align} where $\partial \mathcal{M}$ is the set of extremal points in $\mathcal{M}$, $p_i = \tr(\rho_{ABC} m_i)$ and $\sigma_i = \tr_C(\rho_{ABC} m_i)/p_i$. \end{observation} {The proof of Observation~\ref{ob:encdingextreme} is given in Sec.~A in the Supplemental Material~\cite{supplementalmaterial}.} The Observation shows that the actual calculation of $\Delta_{\mathcal{E}}(\rho_{ABC})$ can be reduced to the set of extremal points in $\mathcal{M}$, which { has been well characterized in Ref.}~\cite{d2005classical}. In the following, we will address this problem for two special cases. The first case is that the party $C$ is a qubit and the measurement information from the outcomes is also registered in a qubit system $E$~\cite{ruskai2003qubit}. For convenience, we denote by $\mathcal{N}_1$ the set of those operations, which is equivalent to the set of all entanglement breaking channels mapping qubit to qubit. The second case is that the measurement ${M}$ is a dichotomic POVM~\cite{d2005classical}, where $C$ is not necessarily a qubit. We denote this set as $\mathcal{N}_2$. Now we can present the following observation: \begin{observation}\label{ob:dichotomic} For a convex entanglement measure $\mathcal{E}$, {if we replace $\mathcal{N}_C$ by $\mathcal{N}_1$ or $\mathcal{N}_2$ in the definition of $\Delta_{\mathcal{E}}$}, then the value of $\Delta_{\mathcal{E}}(\rho_{ABC})$ can be achieved with projective measurements. \end{observation} {The proof of Observation~\ref{ob:dichotomic} is given in Sec.~B in the Supplemental Material~\cite{supplementalmaterial}.} Observation~\ref{ob:encdingextreme} and Observation~\ref{ob:dichotomic} make the numerical calculation possible with only few parameters as in the following examples. \subsection{Example: three-qubit systems} Here we look at three-qubit systems and analyze $\Delta_{\mathcal{E}}(\rho_{ABC})$ {with $\mathcal{N}_1$ and $\mathcal{N}_2$.} Important examples of multipartite entanglement measures that satisfy {convexity} and monotonicity under LOCC are the multipartite negativity~\cite{sabin2008classification} and multipartite squashed entanglement~\cite{yang2009squashed,christandl2004squashed}: \begin{align} N_{ABC}(\rho_{ABC})&= N_{AB|C} +N_{BC|A} +N_{AC|B},\\ E_{sq}(\rho_{ABC})&=\min_{\gamma_{ABCX}} \frac{1}{2} I(A:B:C|X). \end{align} Here, $N_{X|Y}= \left|\sum_{\lambda_i<0}\lambda_i\right|$ is the negativity for a bipatition $X|Y$ with eigenvalues $\lambda_i$ of the partial transposed state $\rho^{T_Y}$ with respect to the subsystem $Y$, where $Y=A,B,C$. Also, $I(A:B:C|X)=S(AX)+S(BX)+S(CX)-S(ABCX)-2S(X)$ is the quantum conditional mutual information, where $\gamma_{ABCX}$ is any extension of $\rho_{ABC}$, i.e., $\rho_{ABC} = \tr_X[\gamma_{ABCX}]$, and $S(M)$ is the von Neumann entropy of system $M$. For a pure state $\rho_{ABC}$, the quantum conditional mutual information can be simplified as $I(A:B:C|X)=S(A)+S(B)+S(C)$, which is independent of system $X$. As the first example, we consider the superposition of Greenberger-Horne-Zeilinger (GHZ) states and W states: \begin{align} \label{eq:pureghzestate} \ket{\psi(p)}=\sqrt{p} \ket{\rm GHZ} + \sqrt{1-p} \ket{\rm W}, \end{align} where $0\leq p \leq 1$, $\ket{\rm GHZ} = (\ket{000}+\ket{111})/\sqrt{2}$, and $\ket{\rm W} = (\ket{001}+\ket{010}+\ket{100})/\sqrt{3}$. The numerical relation between $\Delta_\mathcal{E}$ and $p$ is presented in Fig.~\ref{fig:pureghzWW} for $\mathcal{E} = N_{ ABC}, \, E_{sq}$, details about the optimization method are given in in Sec.~C in the Supplemental Material~\cite{supplementalmaterial}. Interestingly, we find that the maximal value of $\Delta_{\mathcal{E}}(\ket{\psi})$ is given by the W state, while the minimal value is not achieved by the GHZ state but the state at $p = 0.4$. {We remark that both of $N_{ABC}(\ket{\psi(p)})$ and $E_{sq}(\ket{\psi(p)})$ are minimized when $p=0.4$. However, it is an open problem to understand why this state should also have minimal entanglement change.} { Moreover, let us consider a three-qutrit case and compute the tuple of $\Delta_\mathcal{E}$ for $\mathcal{E} = (N_{ABC}, \, E_{sq})$. The GHZ state $\sum_{i=0}^2\ket{iii} /\sqrt{3}$ has $(1.667,\, 0.792489)$, while the state $( \ket{012}+\ket{120}+\ket{201} +\ket{021}+\ket{210}+\ket{102} )/\sqrt{6}$ has $(1.86747,\, 0.971332)$. More deitals are in Sec.~C in the Supplemental Material~\cite{supplementalmaterial}. } \begin{figure} \caption{$\Delta_{\mathcal{E} \label{fig:pureghzWW} \end{figure} \section{General bounds}\label{sec:general} {In general, it may be hard to obtain the exact value of $\Delta_{\mathcal{E}}(\rho_{ABC})$ for some entanglement measure $\mathcal{E}$. To address this situation, we now derive upper and lower bounds that can be useful for the estimation.} First, we present a general lower bound. \begin{observation} \label{ob:lower} For a convex entanglement measure $\mathcal{E}$, {and for the set $\mathcal{N}_C$}, we have \begin{equation}\label{ob:lowerbound} \Delta_{\mathcal{E}}(\rho_{ABC}) \ge \min_{|x\rangle} \left\{ \mathcal{E}[\rho_{ABC}]- \mathcal{E}[\sigma_{|x\rangle} \otimes |0\rangle\langle 0|] \right\}, \end{equation} where $|x\rangle$ is a measurement direction on the party $C$ and $\sigma_{|x\rangle} = \langle x|\rho_{ABC}|x\rangle / \tr[\langle x|\rho_{ABC}|x\rangle]$ is a normalized state. \end{observation} The proof of Observation~\ref{ob:lower} is given in Sec.~D in the Supplemental Material~\cite{supplementalmaterial}. This lower bound can be used to characterize the complete entanglement loss, as we will see later in Sec.~\ref{sec:complete}. Furthermore, suppose that we remove all the classical information of the measurement outcomes, that is, we encode all the measurement outcomes into the same state $|0\rangle$. Then we find an upper bound: \begin{align} \Delta_{\mathcal{E}}(\rho_{ABC}) \le \tilde{\Delta}_{\mathcal{E}}(\rho_{ABC}), \end{align} for any convex entanglement measure $\mathcal{E}$, where \begin{equation}\label{eq:uppberbound} \tilde{\Delta}_{\mathcal{E}}(\rho_{ABC}) = \mathcal{E}[\rho_{ABC}] - \mathcal{E}[\rho_{AB}\otimes |0\rangle\langle 0|], \end{equation} with $\rho_{AB} = \tr_C(\rho_{ABC})$. {We remark that $\tilde{\Delta}_{\mathcal{E}}(\rho_{ABC})$ is the maximal entanglement change, since we can always map any encoding into the state $|0\rangle\langle 0|$ with a local operation on the system $C$.} {Let us compare $\Delta_{\mathcal{E}}$ with its lower and upper bounds using the tripartite negativity $N_{ABC}$. Figs.~\ref{fig:lowerboundpure} and \ref{fig:lowerboundmixed} illustrate the cases of the pure three-qubit state $\ket{\psi(p)}$ in Eq.~(\ref{eq:pureghzestate}) and the mixed three-qubit state $\rho(q) = q \rho_{\rm GHZ} + (1-q)\rho_{\rm W}$, where $\rho_{\rm GHZ}=\ket{\rm GHZ}\!\bra{\rm GHZ}$ and $\rho_{\rm W}=\ket{\rm W}\!\bra{\rm W}$.} We find that the lower bound is {relatively close to $\Delta_{\mathcal{E}}$}, especially {if the state approximates the GHZ state.} The gap between $\Delta_{\mathcal{E}}$ and $\tilde{\Delta}_{\mathcal{E}}$ shows that the post-measurement information is more relevant for the GHZ state than for the W state. \begin{figure} \caption{Comparison between $\Delta_{\mathcal{E} \label{fig:lowerboundpure} \end{figure} \begin{figure} \caption{Comparison between $\Delta_{\mathcal{E} \label{fig:lowerboundmixed} \end{figure} Next, let us connect {entanglement change} to quantum discord. For that, we consider the multipartite relative entropy of entanglement, which is the sum of the relative entropies of entanglement~\cite{linden1999reversibility} for all bipartitions, i.e., \begin{align} R_{ABC}(\rho_{ABC}) = R_{AB|C} + R_{BC|A} + R_{AC|B}, \end{align} where $R_{X|Y} =\min_{\sigma\in {\rm SEP}} S(\rho_{XY}||\sigma)$ is the relative entropy of entanglement for a bipatition $X|Y$, $S(\rho||\sigma)=\tr[\rho \, (\log{\rho}-\log{\sigma})]$ is the von Neumann relative entropy and ${\rm SEP}$ is the set of {bipartite} separable states. Similarly, the amount of quantum discord~\cite{modi2012classical} can be also measured by the relative entropy: $D_{XY}(\rho_{XY}) = \min_{\rho'\in \Lambda}S(\rho_{XY}||\rho')$, where $\Lambda$ is the set of quantum-classical states $\rho'=\sum_{i} p_i \sigma_i \otimes \ket{i}\!\bra{i}$ with orthonormal basis $\{\ket{i}\}$. Now we can formulate the following two Observations: \begin{observation}\label{ob:bounds} {For the entanglement measure $\mathcal{E}$ being } the tripartite relative entropy of entanglement $R_{ABC}$, we have \begin{align} R_{AB|C}(\rho_{ABC}) \leq \Delta_{\mathcal{E}}(\rho_{ABC}) \leq 3 D_{AB|C}(\rho_{ABC}). \end{align} \end{observation} \begin{observation}\label{ob:relative} More generally, if $D_{AB|C}(\rho_{ABC}) = 0$, then we have $\Delta_{\mathcal{E}}(\rho_{ABC}) = 0$ for any entanglement measure $\mathcal{E}$. \end{observation} {The proofs of Observation~\ref{ob:bounds} and Observation~\ref{ob:relative} are given in Sec.~E and Sec.~F in the Supplemental Material~\cite{supplementalmaterial}.} From Observation~\ref{ob:relative}, the condition $D_{AB|C}(\rho_{ABC}) = 0$ is a sufficient condition for $\Delta_{\mathcal{E}}(\rho_{ABC}) = 0$ for any measure $\mathcal{E}$. On the other hand, this is not a necessary condition. For instance, if the initial state $\rho_{ABC}$ is fully separable, clearly $\Delta_{\mathcal{E}}(\rho_{ABC}) = 0$, but this does not mean $D_{AB|C}(\rho_{ABC})=0$. From the conceptional perspective, quantum discord is the difference of quantum correlation before and after a projective measurement, whereas $\Delta_{\mathcal{E}}(\rho_{ABC})$ {quantifies} the difference of entanglement, which is only one sort of quantum correlations. \section{Lockability}\label{sec:lockability} Previous works~\cite{horodecki2005locking,christandl2005uncertainty,leung2009survey} have studied a similar issue under the name of lockability of entanglement measures. There, one asks for the quantitative change of entanglement by the loss of one particle, (e.g., one qubit) {\it within} one party. For example, in the bipartite scenario, one considers the situation where Alice and Bob have both five qubits and then one asks how the entanglement changes if Alice looses one of her qubits. If the entanglement change can be arbitrarily large, the entanglement measure is {called} lockable. For instance, all convex entanglement measures are known to be lockable, while the relative entropy of entanglement is not~\cite{horodecki2005locking}. {The lockable entanglement is related to our consideration in the following sense. For a given triparite state $\rho_{ABC}$, if we choose the convex entanglement measure $\mathcal{E}$ to only measure the entanglement between the bipartition $A|BC$ (or $AC|B$), then $\tilde{\Delta}_{\mathcal{E}}$ defined in Eq.~\eqref{eq:uppberbound} is the quantity considered in lockable entanglement. More precisely, for any convex entanglement measure $\mathcal{E}$ for the bipartition $A|BC$, we have \begin{equation} \tilde{\Delta}_{\mathcal{E}}(\rho_{ABC}) = \mathcal{E}[\rho_{ABC}] - \mathcal{E}[\rho_{AB}], \end{equation} where we used that $\mathcal{E}[\rho_{AB}\otimes \ket{0}\!\bra{0}]=\mathcal{E}[\rho_{AB}]$, see Theorem 2 in Ref.~\cite{horodecki2005simplifying}.} In order to understand the difference between the behaviour of entanglement under classicalization and the lockability problem, one has to analyze the role of the information coming from the measurement results. We know already from Fig.~\ref{fig:lowerboundpure} and \ref{fig:lowerboundmixed} that this information makes some difference for the entanglement change. In the following, we will show that this difference can be arbitrarily large. \subsection{Example: Flower state} {First, let us consider} the so-called flower state on $d \otimes d\otimes 2$-dimensional systems~\cite{christandl2005uncertainty}: \begin{align} \omega_{ABC} = &\frac{2}{d(d+1)} P^{(+)}_{AB}\otimes \frac{d+1}{2d} |0\rangle\langle 0|_C\nonumber\\ \quad &+ \frac{2}{d(d-1)} P^{(-)}_{AB}\otimes \frac{d-1}{2d} |1\rangle\langle 1|_C, \end{align} where $P^{(\pm)}_{AB}$ are the projections onto the symmetric and anti-symmetric subspaces, that is $P^{(\pm)}_{AB} = (\mathds{1}_{AB}\pm V_{AB})/2$ with the SWAP operator $V_{AB}$, acting as $V_{AB}\ket{v_A}\otimes\ket{v_B}=\ket{v_B}\otimes\ket{v_A}$. Notice that, {the quantum discord of $\omega_{ABC}$ for the bipartition $AB|C$ is $0$,} i.e., $D_{AB|C}(\omega_{ABC})=0$. From Observation~\ref{ob:relative}, we conclude that $\Delta_{\mathcal{E}}(\omega_{ABC}) = 0$ for any entanglement measure $\mathcal{E}$. However, we have $\tilde{\Delta}_{\mathcal{E}}(\omega_{ABC}) = \mathcal{E}(\omega_{ABC}) > 0$ {, because $\tr_C(\omega_{ABC}) \otimes |0\rangle\langle 0|$ is fully separable. } In fact, if the entanglement measure $\mathcal{E}$ is taken as the squashed entanglement, then $\mathcal{E}(\omega_{ABC})$ can be arbitrarily large~\cite{christandl2005uncertainty}. This directly implies that the difference $\tilde{\Delta}_{\mathcal{E}} - {\Delta}_{\mathcal{E}}$ can be arbitrarily large {by choosing $d$ properly}. Hence, although the information from the measurement {at the flower state} is only one bit, a large amount of entanglement can be saved by collecting it. \subsection{Example: $n$-pairs of Bell states} {On the other hand, we will see that the entanglement change $\Delta_{\mathcal{E}}$ can also be arbitrarily large even if only one qubit has become classical.} As example, let us consider a pure state made of $n$ pairs of Bell state $\ket{\Psi^+}=(\ket{00}+\ket{11})/\sqrt{2}$. We label the $i$-th pair of particles with $a_i, b_i$. Suppose that the party $A$ owns the particles $\{a_i\}_{i=1}^n$, the party $B$ owns the particles $\{b_i\}_{i=1}^{n-1}$, and the party $C$ owns the particle $b_n$. We denote this state as $\beta_{ABC}=|{\Psi^+}\rangle\langle {\Psi^+}|^{\otimes n}$. Now we can present the following {observation which is proven in Sec.~G in the Supplemental Material~\cite{supplementalmaterial}.} \begin{observation} \label{ob:lockneg} For {the entanglement measure $\mathcal{E}$ to be} the tripartite negativity $N_{ABC}$, we have \begin{align} \Delta_{\mathcal{E}}(\beta_{ABC}) = 2^{n-2} + 1/2. \end{align} Thus, $\Delta_{\mathcal{E}}(\beta_{ABC})$ can be arbitrary large {by choosing $n$ properly}. \end{observation} { Inspired by those two examples, an interesting question arises whether there exist entanglement measures $\mathcal{E}$ and states $\rho_{ABC}$ such that both $\Delta_{\mathcal{E}}(\rho_{ABC})$ and $\tilde{\Delta}_{\mathcal{E}}(\rho_{ABC}) - {\Delta}_{\mathcal{E}}(\rho_{ABC})$ can be arbitrarily large {in the sense that they are not limited by the size of $C$}, even if $C$ is only a qubit. We leave this question for further research.} \section{Complete entanglement loss under classicalization}\label{sec:complete} By definition, $\Delta_{\mathcal{E}}(\rho_{ABC}) \le \mathcal{E}[\rho_{ABC}]$ {always holds.} We are now concerned about the case where this inequality is saturated, i.e., $\Delta_{\mathcal{E}}(\rho_{ABC}) =\mathcal{E}[\rho_{ABC}]$, or equivalently, $\max_{\Phi_C\in \mathcal{N}_C}\mathcal{E}[\Phi_C(\rho_{ABC})]=0$. First of all, Observation~\ref{ob:lower} implies a sufficient condition for complete entanglement loss under classicalization, {which can be formulated as follows.} \begin{condition}\label{cond1} If, after a projective measurement in any direction $\ket{x}$ on $C$, the post-measurement state $\sigma_{\ket{x}} \rm PRopto \langle x|\rho_{ABC}|x\rangle$ is always separable, then the entanglement is completely lost under classicalization. \end{condition} Clearly, Condition \ref{cond1} is stronger than the condition that the reduced state $\rho_{AB}$ is separable. For instance, let us consider the GHZ state. Its reduced state $\tr_C[\rho_{\text{GHZ}}]$ is separable, but its post-measurement state $\sigma_{\ket{x}}$ can be entangled if measurement bases are $\{\ket{+}, \ket{-}\}$. The existence of genuine multipartite entangled states which satisfy Condition~\ref{cond1}, however, has already been reported in Ref.~\cite{miklin2016multiparticle}. {We will propose observations using Condition \ref{cond1} and provide more examples in in Sec.~H and Sec.~I in the Supplemental Material~\cite{supplementalmaterial}.} \section{Conclusion and discussion}\label{sec:conclusion} Multiparticle quantum entanglement is an important quantum resource and the preservation of entanglement is a practical {issue}. We have studied the change of multiparticle entanglement under classicalization of one particle. Clearly, the results usually depend on the choice of the entanglement quantifier, and the change of entanglement is difficult to compute. We provided simplifications for important special scenarios and upper and lower bounds for the general case. One crucial question is whether one small part {like one qubit} can change a lot quantum resources like quantum entanglement or not. Our results show that the entanglement change can be still arbitrarily large even with complete measurement information left. Besides, the measurement information can also make an arbitrary large difference. Finally, we provide conditions under which quantum entanglement is always completely lost under classicalization. While we focused on the difference of original quantum resource and the remaining resource if one party becomes classical, the behaviour of quantum resources {\it during} the quantum to classical transition is also interesting, and it may have a richer theoretical structure. We believe that our work paves a way to the design of concepts for quantum resource storage and may help to develop a novel direction in the field of quantum resource theories. \begin{acknowledgments} We thank H. Chau Nguyen, Martin Pl\'{a}vala, Benjamin Yadin, and Xiao-Dong Yu for discussions. This work was supported by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation, project numbers 447948357 and 440958198), the Sino-German Center for Research Promotion (Project M-0294), the ERC (Consolidator Grant No. 683107/TempoQ), the German Ministry of Education and Research (Project QuKuK, BMBF Grant No. 16KIS1618K), the Humboldt foundation, and the DAAD. \end{acknowledgments} \onecolumngrid \renewcommand\thesection{\Alph{section}} \addtocounter{theorem}{-7} \addtocounter{section}{-7} \section{Proof of Observation~\ref{ob:encdingextreme2}}\label{ap:a} \begin{observation}\label{ob:encdingextreme2} If the entanglement measure $\mathcal{E}$ is convex, we only need to consider $M = \{m_i\}$ as an extremal point in the considered measurement set $\mathcal{M}$. More precisely: \begin{align} \hspace{-0.5em}\Delta_{\mathcal{E}}(\rho_{ABC})\! =\! \min_{M\in \partial \mathcal{M}} \left\{\mathcal{E}[\rho_{ABC}] \!-\! \sum_i p_i \mathcal{E}[\sigma_i\!\otimes\! |0\rangle\langle 0|]\right\}, \end{align} where $\partial \mathcal{M}$ is the set of extremal points in $\mathcal{M}$, $p_i = \tr(\rho_{ABC} m_i)$ and $\sigma_i = \tr_C(\rho_{ABC} m_i)/p_i$. \end{observation} \begin{proof} For any entanglement-breaking channel $\Phi_C$, we have the decomposition: \begin{align}\label{eq:operation-appendix} \Phi_C(\rho_{ABC})= \sum_i p_i \sigma_i \otimes \tau_i, \end{align} where $M = \{m_i\}$ is a measurement acting on $C$, $p_i = \tr(\rho_{ABC} m_i)$, $\sigma_i = \tr_C(\rho_{ABC} m_i)/p_i$, and $\tau_i$ is the state encoding the measurement outcome $i$. Since the set $\mathcal{M}$ of all POVMs acting on $C$ is convex, any POVM $M = \{m_i\}$ can be decomposed into the convex combinations of extreme points of $\mathcal{M}$. That is, we have \begin{align} m_i = \sum_k c_k m_i^{(k)}, \forall i, \end{align} where $M^{(k)} = \{m_i^{(k)}\}$ is an extreme point in the set $\mathcal{M}$ and $0<c_k \leq 1$ with $\sum_k c_k = 1$. Consequently, the operation $\Phi_C$ can be rewritten as \begin{align} \Phi_C(\rho_{ABC}) = \sum_k c_k \Phi_C^{(k)}(\rho_{ABC}), \end{align} where \begin{equation} \Phi_C^{(k)}(\rho_{ABC}) = \sum_i \tr_C\left(\rho_{ABC} m_i^{(k)}\right) \otimes \tau_i. \end{equation} In the case that the entanglement measure $\mathcal{E}$ is convex, we have \begin{align} \mathcal{E}[\Phi_C(\rho_{ABC})] \le \sum_k c_k \mathcal{E}\left[\Phi_C^{(k)}(\rho_{ABC})\right] \le \max_k \mathcal{E}\left[\Phi_C^{(k)}(\rho_{ABC})\right]. \end{align} This implies that the maximal value of $\mathcal{E}[\Phi_C(\rho_{ABC})]$, or equivalently, the value of $\Delta_{\mathcal{E}}(\rho_{ABC})$, can always be achieved by extreme POVMs. That is, \begin{equation}\label{eq:step1} \max_{\Phi_C\in \mathcal{N}_C} \mathcal{E}\left[\Phi_C(\rho_{ABC})\right] = \max_{M\in \partial\mathcal{M}, {\{\tau_i\}}} \mathcal{E}\left(\sum_i p_i \sigma_i \otimes \tau_i\right), \end{equation} where $\partial\mathcal{M}$ is the set of all extreme POVMs. Note that, any imperfect encoding can be generated from the perfect one by local operations. Since the entanglement measure $\mathcal{E}$ is LOCC monotonic, { we have $\mathcal{E}\left(\sum_i p_i \sigma_i \otimes \tau_i\right) \le \mathcal{E}\left(\sum_i p_i \sigma_i \otimes |i\rangle\langle i|_C\right)$. } This implies that, \begin{equation}\label{eq:step21} \max_{M\in \partial\mathcal{M}, {\{\tau_i\}}} \mathcal{E}\left(\sum_i p_i \sigma_i \otimes \tau_i\right) \le \max_{M\in \partial\mathcal{M}} \mathcal{E}\left(\sum_i p_i \sigma_i \otimes |i\rangle\langle i|_C\right). \end{equation} Since $\{\tau_i = |i\rangle\langle i|\}$ is just a special encoding, we have \begin{equation}\label{eq:step22} \max_{M\in \partial\mathcal{M}, {\{\tau_i\}}} \mathcal{E}\left(\sum_i p_i \sigma_i \otimes \tau_i\right) \ge \max_{M\in \partial\mathcal{M}} \mathcal{E}\left(\sum_i p_i \sigma_i \otimes |i\rangle\langle i|_C\right). \end{equation} In total, we know that \begin{equation}\label{eq:step2} \max_{M\in \partial\mathcal{M}, {\{\tau_i\}}} \mathcal{E}\left(\sum_i p_i \sigma_i \otimes \tau_i\right) = \max_{M\in \partial\mathcal{M}} \mathcal{E}\left(\sum_i p_i \sigma_i \otimes |i\rangle\langle i|_C\right). \end{equation} Besides, we have \begin{align}\label{eq:step3} \mathcal{E}\left(\sum_i p_i \sigma_i \otimes \ket{i}\!\bra{i}_C\right) &= \mathcal{E}\left(\sum_i p_i \sigma_i \otimes (\ket{0}\!\bra{0} \otimes \ket{i}\!\bra{i})_C\right)\nonumber\\ &= \sum_i p_i \mathcal{E}[\sigma_i \otimes \ket{0}\!\bra{0}_C], \end{align} {where the equalities in the first line holds since $\{|i\rangle\langle i|\}$ and $\{|0\rangle\langle 0|\otimes|i\rangle\langle i|\}$ can be converted to each other by LOCC, the equality in the second line is from the flag condition satisfied by any entanglement measure which is monotonic under LOCC,} see Theorem~2 in Ref.~\cite{horodecki2005simplifying}. By putting Eq.~\eqref{eq:step1}, Eq.~\eqref{eq:step2} and Eq.~\eqref{eq:step3} together, we complete the proof. \end{proof} {We recommend the reader to refer to Ref.~\cite{d2005classical} for more characterization of extreme POVMs, like necessary conditions and sufficient conditions.} \section{Proof of Observation~\ref{ob:dichotomic2}}\label{ap:b} \begin{observation}\label{ob:dichotomic2} For a convex entanglement measure $\mathcal{E}$, {if we replace $\mathcal{N}_C$ by $\mathcal{N}_1$ or $\mathcal{N}_2$ in the definition of $\Delta_{\mathcal{E}}$}, then the value of $\Delta_{\mathcal{E}}(\rho_{ABC})$ can be achieved with projective measurements. \end{observation} \begin{proof} From Observation~\ref{ob:encdingextreme2}, we know that for a convex entanglement measure $\mathcal{E}$ that satisfies the monotonicity condition, the optimal value of $\Delta_{\mathcal{E}}(\rho_{ABC})$ can always be obtained by the extreme points of destructive measurements in the sets $\mathcal{N}_1$ and $\mathcal{N}_2$. Then it is sufficient to show that these extreme points are given by projective measurements. { First, we consider the case of $\mathcal{N}_1$.} As proven in Ref.~\cite{ruskai2003qubit}, any entanglement breaking channel from qubit to qubit, i.e., any channel in $\mathcal{N}_1$, can be decomposed as a convex combination of classical-quantum channels. Here recall that a channel $\Phi_C$ is called a classical-quantum channel if \begin{equation} \Phi_C(\rho) = \sum_{i} \langle x_i|\rho|x_i\rangle \otimes \tau_i, \end{equation} where $\{|x_i\rangle\}$ is an orthonormal basis. By definition, the classical-quantum channel is written in the composition of projective measurements and local state preparation. That is, the extreme point in $\mathcal{N}_1$ is obtained by projective measurements. {Next, we consider the case of $\mathcal{N}_2$.} It is known that a POVM $\{m_1,\ldots,m_k\}$ is extreme if $m_i, m_j$ have disjoint supports for any $i\neq j$~\cite{d2005classical}. In the dichotomic case, $m_1 = \mathds{1} - m_2$, thus, $m_1, m_2$ can be diagonalized simultaneously. Then, there is no overlap between the supports of $m_1, m_2$ if and only if they are orthogonal projectors. Hence, the extremal points in $\mathcal{N}_2$ are also obtained by projective measurements. \end{proof} \section{Details of computation in figures}\label{ap:c} Since we consider the set of entanglement breaking channels from qubit to qubit in the examples, we only need to focus on dichomatic projective measurements $M = \{m_0, m_1\}$ and perfect encoding of the outcomes according to Observation~\ref{ob:encdingextreme2} and Observation~\ref{ob:dichotomic2}. In this case we have, \begin{align}\label{eq:simplified} \Delta_{\mathcal{E}}(\rho_{ABC}) = \mathcal{E}[\rho_{ABC}] - \max_{M\in \mathcal{P}}\sum_{i=0,1} p_i \mathcal{E}[\sigma_i\!\otimes\! |0\rangle\langle 0|], \end{align} where $\mathcal{P}$ is the set of all dichotomatic projective measurements on qubit $C$, $p_i = \tr(\rho_{ABC} m_i)$, and $\sigma_i = \tr_C(\rho_{ABC} m_i)/p_i$. Here, the entanglement measure $\mathcal{E}$ is taken to be either the multipartite negativity $N_{ABC}$ or the multipartite squashed entanglement $E_{sq}$. First, let us consider the case of the multipartite negativity $N_{ABC}$. Then we have \begin{align} N_{ABC}(\sigma_i\!\otimes\! |0\rangle\langle 0|)&= N_{AB|C}(\sigma_i\!\otimes\! |0\rangle\langle 0|) +N_{BC|A}(\sigma_i\!\otimes\! |0\rangle\langle 0|) +N_{AC|B}(\sigma_i\!\otimes\! |0\rangle\langle 0|)\nonumber\\ &= N_{B|A}(\sigma_i) +N_{A|B}(\sigma_i)\nonumber\\ &= 2N_{A|B}(\sigma_i), \end{align} where the second equality is from the fact that $\sigma_{i}^{T_A}\otimes |0\rangle\langle 0|$ has same non-zero eigenvalues as $\sigma_{i}^{T_A}$ as well as for the case $B$. Second, let us consider the case of the multipartite squashed entanglement $E_{sq}$. Note that, for any $4$-partite state $\eta_{ABCX}$ such that $\tr_X(\eta_{ABCX}) = \sigma_i\!\otimes\! |0\rangle\langle 0|$, it can only be in the form $\gamma_{ABX}\!\otimes\! |0\rangle\langle 0|$, where $\tr_X(\gamma_{ABX}) = \sigma_i$. Thus, \begin{align} E_{sq}(\sigma_i\!\otimes\! |0\rangle\langle 0|)&=\min_{\gamma_{ABX}\!\otimes\! |0\rangle\langle 0|} \frac{1}{2} I(A:B:C|X)\nonumber\\ &=\min_{\gamma_{ABX}\!\otimes\! |0\rangle\langle 0|} \frac{1}{2} [S(AX)+S(BX)+S(CX)-S(ABCX)-2S(X)]\nonumber\\ &=\min_{\gamma_{ABX}\!\otimes\! |0\rangle\langle 0|} \frac{1}{2} [S(AX)+S(BX)+S(X)-S(ABX)-2S(X)]\nonumber\\ &=\min_{\gamma_{ABX}} \frac{1}{2} [S(AX)+S(BX)-S(ABX)-S(X)]\nonumber\\ &=E_{sq}^{(2)}(\sigma_i), \end{align} where in the third line we employ the additivity of the von Neumann entropy, and we denote $E_{sq}^{(2)}$ the bipartite squashed entanglement~\cite{christandl2004squashed}. In the case that $\rho_{ABC}$ is a pure state, each $\sigma_i$ is also a pure state. From the result of Ref.~\cite{christandl2004squashed}, we have \begin{align} E_{sq}^{(2)}(\sigma_i) &= S(A)+S(B). \end{align} Therefore, once we have parameterized the $2$-dimensional projective measurement $M$, the numerical calculation of $\Delta_{\mathcal{E}}(\rho_{ABC})$ can be easily performed by brute force optimization in each example. {To be more explicitly, each $2$-dimensional rank-$1$ projective measurement $M$ corresponds to a vector which can be parameterized as $\langle v| = (\cos x, e^{it} \sin x)$ such that $M = \{|v\rangle\langle v|, \mathds{1} - |v\rangle\langle v|\}$. In the calculation, we have taken $x$ in the discrete set $\{\pi k/300\}_{k=0}^{300}$ and $t$ in the set $\{\pi j/50\}_{j=0}^{50}$. For each measurement direction defined by the pair $(x,t)$, the poset-selected bipartite states and their entanglement can be computed directly by the definition of the entanglement measure. By choosing the maximal entanglement of the post-measurement state over all pairs $(x,t)$, we obtain the numerical approximation of $\Delta_{\mathcal{E}}(\rho_{ABC})$ for $\mathcal{E}$ either to be $N_{ABC}$ or $E_{sq}$. We remark that the three-dimensional non-trivial projective measurements can also be parameterized by $M = \{|v\rangle\langle v|, \mathds{1} - |v\rangle\langle v|\}$, where $|v\rangle$ is a three-dimensional complex vector $(\cos x_1, e^{it_1} \sin x_1\cos x_2, e^{it_2} \sin x_1\sin x_2)$. Note that for the sake of simplicity we considered the case of only real parameters to obtain the result of three-qutrit states in the main text. } \section{Proof of Observation~\ref{ob:lower2}}\label{ap:d} \begin{observation} \label{ob:lower2} For a convex entanglement measure $\mathcal{E}$, {and for the set $\mathcal{N}_C$}, we have \begin{equation} \Delta_{\mathcal{E}}(\rho_{ABC}) \ge \min_{|x\rangle} \left\{ \mathcal{E}[\rho_{ABC}]- \mathcal{E}[\sigma_{|x\rangle} \otimes |0\rangle\langle 0|] \right\}, \end{equation} where $|x\rangle$ is a measurement direction on the party $C$ and $\sigma_{|x\rangle} = \langle x|\rho_{ABC}|x\rangle / \tr[\langle x|\rho_{ABC}|x\rangle]$ is a normalized state. \end{observation} \begin{proof} For a given entanglement breaking channel $\Phi_C$, it can be equivalently characterized~\cite{horodecki2003entanglement} by a POVM with $M=\{q_i |x_i\rangle\langle x_i|\}$ and a preparation $\{|\psi_i\rangle\langle \psi_i|\}$. That is, \begin{align} \Phi_C(\rho_{ABC}) &= \sum_i q_i \langle x_i|\rho_{ABC} |x_i\rangle \otimes |\psi_i\rangle\langle \psi_i| \nonumber\\ &= \sum_i q_i p_i \sigma_{|x_i\rangle} \otimes |\psi_i\rangle\langle \psi_i|, \end{align} where $p_i = \tr(\langle x_i|\rho_{ABC} |x_i\rangle)$, $\sigma_{|x_i\rangle}$ is the normalized state of $\langle x_i|\rho_{ABC} |x_i\rangle$, and $\sum_{i}q_i p_i =1$. For any convex entanglement measure $\mathcal{E}$, we then have \begin{align} \mathcal{E}[\Phi_C(\rho_{ABC})] &\le \sum_i q_i p_i \mathcal{E}[\sigma_{|x_i\rangle} \otimes |\psi_i\rangle\langle \psi_i|] \nonumber\\ &\le \max_i \mathcal{E}[\sigma_{|x_i\rangle} \otimes |\psi_i\rangle\langle \psi_i|]\nonumber\\ &\le \max_{|x\rangle} \mathcal{E}[\sigma_{|x\rangle}\otimes |0\rangle\langle 0|], \end{align} where in the last line we apply local untary operations on the party $C$ to rotate the states to $\ket{0}$ and maximize over a more general range of measurement directions. \end{proof} {In principle, the optimization can be done similarly as in Appendix C. As for the application of Observation~\ref{ob:lower2} in Condition~7, we only need to show that $\sigma_{|x\rangle}$ is separable for each $|x\rangle$, which can be checked by the PPT condition in the case that $A$ and $B$ are two-dimensional subsystems with symbolic calculations. For this purpose, we do not need to specify the values of parameters in $|x\rangle$.} \section{Proof of Observation~\ref{ob:bounds2}}\label{ap:e} \begin{observation}\label{ob:bounds2} {For the entanglement measure $\mathcal{E}$ being } the tripartite relative entropy of entanglement $R_{ABC}$, we have \begin{align} R_{AB|C}(\rho_{ABC}) \leq \Delta_{\mathcal{E}}(\rho_{ABC}) \leq 3 D_{AB|C}(\rho_{ABC}). \end{align} \end{observation} \begin{proof} We begin by noting that Lemma 1 in Ref.~\cite{chuan2012quantum}: for a given tripartite state $\rho_{ABC}$, it holds that \begin{equation} \hspace{-0.5em}R_{BC|A}(\rho_{ABC}) \!\leq\! {D}_{AB|C}(\rho_{ABC})\! +\! R_{BC|A}[\Phi_C(\rho_{ABC})], \end{equation} where $\Phi_C(\rho_{ABC})= \sum_i p_i \sigma_i^{AB} \otimes \ket{i}\!\bra{i}^C$ where $\tau_i = \ket{i}\!\bra{i}^C$. Exchanging $A$ and $B$, we similarly have \begin{align} \hspace{-0.8em}R_{AC|B}(\rho_{ABC}) \!\leq\! {D}_{AB|C}(\rho_{ABC}) \!+\! R_{AC|B}[\Phi_C(\rho_{ABC})]. \end{align} Summarizing both inequalities leads to \begin{align} R_{BC|A}(\rho_{ABC}) + R_{AC|B}(\rho_{ABC}) \leq 2{D}_{AB|C}(\rho_{ABC}) + R_{ABC}[\Phi_C(\rho_{ABC})], \end{align} where we use the fact that $R_{AB|C}[\Phi_C(\rho_{ABC})] = 0$ since $\Phi_C(\rho_{ABC})$ is separable with respect to $AB|C$. Rewriting this left hand side as $R_{ABC}(\rho_{ABC})-R_{AB|C}(\rho_{ABC})$, we have \begin{align} R_{ABC}(\rho_{ABC}) - R_{ABC}[\Phi_C(\rho_{ABC})] \leq 2{D}_{AB|C}(\rho_{ABC}) + R_{AB|C}(\rho_{ABC}). \end{align} By definition, $\Delta_{\mathcal{E}}(\rho_{ABC})$ is always no more than this left hand side, {since $\Phi_C$ is just a special entanglement-breaking channel}. Then we obtain \begin{align} \Delta_{\mathcal{E}}(\rho_{ABC}) \leq 2{D}_{AB|C}(\rho_{ABC}) + R_{AB|C}(\rho_{ABC}). \end{align} Finally, since $R_{AB|C}(\rho_{ABC}) \leq {D}_{AB|C}(\rho_{ABC})$, we find the upper bound. Concerning the lower bound, we have \begin{align} \Delta_{\mathcal{E}}(\rho_{ABC}) &= \min_{\Phi_C \in \mathcal{N}_C} \left\{R_{ABC}(\rho_{ABC})- R_{ABC}[\Phi_C(\rho_{ABC})]\right\}\nonumber\\ &\geq R_{AB|C}(\rho_{ABC}) +\min_{\Phi_C \in \mathcal{N}_C} \left\{R_{BC|A}(\rho_{ABC})- R_{BC|A}[\Phi_C(\rho_{ABC})]\right\}\nonumber\\ &+\min_{\Phi_C \in \mathcal{N}_C} \left\{R_{AC|B}(\rho_{ABC})- R_{AC|B}[\Phi_C(\rho_{ABC})]\right\}, \end{align} where we again use that $R_{AB|C}[\Phi_C(\rho_{ABC})] = 0$. Since the relative entropy of entanglement satisfies the monotonicity condition, we have that $R_{BC|A}(\rho_{ABC})- R_{BC|A}[\Phi_C(\rho_{ABC})] \geq 0$ and $R_{AC|B}(\rho_{ABC})- R_{AC|B}[\Phi_C(\rho_{ABC})] \geq 0$. Then we arrive at the lower bound. \end{proof} \section{Proof of Observation~\ref{ob:relative2}}\label{ap:f} \begin{observation}\label{ob:relative2} More generally, if $D_{AB|C}(\rho_{ABC}) = 0$, then we have $\Delta_{\mathcal{E}}(\rho_{ABC}) = 0$ for any entanglement measure $\mathcal{E}$. \end{observation} \begin{proof} We note that $D_{AB|C}(\rho_{ABC})=0$ if and only if there exists an entanglement-breaking channel $\Phi_C$ such that $\Phi_C(\rho_{ABC}) = \rho_{ABC}$ (see Proposition 21 in Ref.~\cite{seshadreesan2015fidelity} for more details). By definition, { \begin{align} \Delta_{\mathcal{E}}(\rho_{ABC}) &= \min_{\Phi'_C\in \mathcal{N}_C} \left\{\mathcal{E}[\rho_{ABC}] - \mathcal{E}[\Phi'_C(\rho_{ABC})] \right\},\nonumber\\ &\le \mathcal{E}[\rho_{ABC}] - \mathcal{E}[\Phi_C(\rho_{ABC})] \nonumber\\ &= \mathcal{E}[\rho_{ABC}] - \mathcal{E}[\rho_{ABC}]\nonumber\\ &=0. \end{align} Since $\Delta_{\mathcal{E}}(\rho_{ABC})$ is nonnegative for any entanglement measure $\mathcal{E}$ which is monotonic under LOCC, } this eventually implies that $\Delta_{\mathcal{E}}(\rho_{ABC}) = 0$ for any entanglement measure $\mathcal{E}$ { which is assumed to be monotonic under LOCC}. \end{proof} \section{Proof of Observation~\ref{ob:lockneg2}}\label{ap:g} \begin{observation} \label{ob:lockneg2} For {the entanglement measure $\mathcal{E}$ to be} the tripartite negativity $N_{ABC}$, we have \begin{align} \Delta_{\mathcal{E}}(\beta_{ABC}) = 2^{n-2} + 1/2. \end{align} Thus, $\Delta_{\mathcal{E}}(\beta_{ABC})$ can be arbitrary large. \end{observation} \begin{proof} To prove this, we first show that for a $d\times d$-dimensional bipartite state, its negativity is no more than $(d-1)/2$. Since the negativity is a convex function, we only need to prove it for {pure states}. Let us write a pure state $|\psi\rangle$ as \begin{equation} \ket{\psi} = \sum_{i=1}^d \lambda_i \ket{a_ib_i}, \,\, \sum_i \lambda_i^2 = 1, \,\, \lambda_i \ge 0. \end{equation} Then direct calculation yields that \begin{equation} \label{eq:uppernegativity} N(\ket{\psi}) = \sum_{1\le i<j\le n} \lambda_i\lambda_j \le \frac{d-1}{2} \sum_{i=1}^d \lambda_i^2 = \frac{d-1}{2}. \end{equation} Here the maximal value $(d-1)/2$ can be saturated by the maximally entangled state $\ket{\Psi^+_d} = \frac{1}{\sqrt{d}}\sum_{i=0}^{d-1}\ket{ii}$. Next, let us recall the $n$-copy of Bell state $\beta_{ABC}=|{\Psi^+}\rangle\langle {\Psi^+}|^{\otimes n}$. We remark that this $n$-copy state can be represented by the maximally entangled state in $(2^n\times2^n)$-dimensional systems $\ket{\Psi^+_{2^n}}$. This leads to \begin{align}\label{eq:g4} N_{BC|A}(\beta_{ABC}) = (2^n - 1) /2. \end{align} Suppose that an entanglement breaking channel $\Phi_C$ acts on the $n$-th particle of the last party $b_n$, equivalently, on the party $C$. Since all entanglement breaking channels can be decomposed into measure and prepare operations, we again write the measure process for $\Phi_C$ as the form of the POVM with $M=\{q_i|x_i\rangle\langle x_i|\}$ and the preparation process as $\{|\psi_i\rangle\langle \psi_i|\}$, i.e., \begin{align} \Phi_C(\beta_{ABC}) = \sum_i q_i p_i\sigma_{\ket{x_i}} \otimes |\psi_i\rangle\langle \psi_i|, \end{align} where $p_i = \tr(\langle x_i|\beta_{ABC}|x_i\rangle)$, $\sigma_{\ket{x_i}}$ is the normalized pure state of $\langle x_i|\beta_{ABC}|x_i\rangle$, and $\sum_{i} q_i p_i =1$. Then we have \begin{align} N_{BC|A}[\Phi_C(\beta_{ABC})] &=N_{BC|A}\left(\sum_i q_i p_i \sigma_{\ket{x_i}} \otimes |\psi_i\rangle\langle \psi_i|\right)\nonumber\\ &\le\sum_i q_i p_i N_{BC|A}\left( \sigma_{\ket{x_i}} \otimes |\psi_i\rangle\langle \psi_i|\right)\nonumber\\ &=\sum_i q_i p_i N_{BC|A}\left( \sigma_{\ket{x_i}} \otimes |0\rangle\langle 0|\right)\nonumber\\ &= \sum_i q_i p_i N_{AB}(\sigma_{\ket{x_i}})\nonumber\\ &\le \sum_i q_i p_i (2^{n-1} -1) /2\nonumber\\ &= (2^{n-1} - 1) /2, \end{align} where in the second line we employ the convexity of negativity. In the third line we apply local untary operations on the party $C$ to rotate the states $|\psi_i\rangle$'s to $\ket{0}$. In the fourth line, we use fact that negativity is invariant under local unitaries and adding local ancillas, see \cite{horodecki2005simplifying}. {In the fifth line, we apply the upper bound given in Eq.~(\ref{eq:uppernegativity}).} On the other hand, we obtain \begin{align} N_{BC|A}[\Phi_C(\beta_{ABC})] &\ge N_{BC|A}[\tr_C(\Phi_C(\beta_{ABC}))\otimes |0\rangle\langle 0|_C]\nonumber\\ &= N_{B|A}[\tr_C(\beta_{ABC})]\nonumber\\ &= N_{B|A}[(|\Psi^+\rangle\langle \Psi^+|_{AB})^{\otimes (n-1)}\otimes \tr_C(|\Psi^+\rangle\langle \Psi^+|_{AC})]\nonumber\\ &= N_{B|A}[(|\Psi^+\rangle\langle \Psi^+|_{AB})^{\otimes (n-1)}]\nonumber\\ &= (2^{n-1} - 1) /2. \end{align} In the first line we use the LOCC monotonicity, and in the second line we make use of the fact that $\tr_C \circ \, \Phi_C = \tr_C$. In the fourth line, we use fact that negativity is invariant under adding local ancillas, see \cite{horodecki2005simplifying}. Thus, independently of the entanglement breaking channel $\Phi_C$, we show \begin{align} N_{BC|A}[\Phi_C(\beta_{ABC})] = (2^{n-1} - 1) /2. \end{align} This result directly leads to \begin{equation}\label{nega:bc|a} {N}_{BC|A}(\beta_{ABC}) - {N}_{BC|A}\left[\Phi_C(\beta_{ABC})\right] = 2^{n-2}. \end{equation} Also, since negativity is invariant under adding local ancillas, we have \begin{align}\label{eq:g10} {N}_{B|CA}(\beta_{ABC}) = {N}_{B|CA}\left[\Phi_C(\beta_{ABC})\right] = {N}_{B|A}\left[(|\Psi^+\rangle\langle \Psi^+|_{AB})^{\otimes (n-1)}\right] = (2^{n-1}-1)/2, \end{align} which implies \begin{align}\label{nega:b|ca} {N}_{B|CA}(\beta_{ABC}) - {N}_{B|CA}\left[\Phi_C(\beta_{ABC})\right] = 0. \end{align} Similarly, we have \begin{align}\label{eq:g12} {N}_{AB|C}(\beta_{ABC}) = {N}_{A|C}\left[|\Psi^+\rangle\langle \Psi^+|_{AC}\right] = 1/2. \end{align} The fact that $\Phi_C$ is an entanglement-breaking channel implies that \begin{equation} {N}_{AB|C}\left[\Phi_C(\beta_{ABC})\right] = 0. \end{equation} Consequently, we have \begin{align}\label{nega:ab|c} {N}_{AB|C}(\beta_{ABC}) - {N}_{AB|C}\left[\Phi_C(\beta_{ABC})\right] = 1/2. \end{align} By definition of $\Delta_{\mathcal{E}}(\beta_{ABC})$ with $N_{ABC}$ using Eqs.~(\ref{nega:bc|a}, \ref{nega:b|ca}, \ref{nega:ab|c}), we complete the proof: \begin{equation} \Delta_{\mathcal{E}}(\beta_{ABC}) = 2^{n-2} + 1/2. \end{equation} { We have one remark. From Eq.~\eqref{eq:g4}, Eq.~\eqref{eq:g10} and Eq.~\eqref{eq:g12}, we know that the original tripartite negativity is \begin{equation} N_{ABC}(\beta_{ABC}) = 2^{n-1} + 2^{n-2} - 1/2, \end{equation} which is strictly larger than $\Delta_{\mathcal{E}}(\beta_{ABC})$ whenever $n\ge 2$. Furthermore, $N_{ABC}(\beta_{ABC})/\Delta_{\mathcal{E}}(\beta_{ABC})$ goes to $2$ as $n$ goes to infinity. } \end{proof} \section{Observations on complete entanglement loss under classicalization}\label{ap:h} In this Appendix, we propose two observations for the entangled states satisfying Condition~7. A similar observation has been made for pure states in Ref.~\cite{neven2018entanglement}. \addtocounter{theorem}{1} \begin{observation} \label{ob:subrank2} Suppose that a tripartite state $\rho_{ABC}$ satisfies Condition 7. If $\rho_{ABC}$ is entangled {for the bipartitions $A|BC$ and $B|AC$}, then the reduced state $\rho_{AB} = \tr_C(\rho_{ABC})$ should have rank more than $2$. \end{observation} We remark that the generalization of Observation~\ref{ob:subrank2} to the $n$-partite case is given in Appendix~\ref{ap:j} for $n>3$. \begin{proof} First we denote that $p_x = \tr[\langle x|\rho_{ABC} |x\rangle]$ and {$\sigma_{\ket{x}} = \langle x|\rho_{ABC} |x\rangle/p_x$}. Let us begin by recalling that any tripartite quantum state can be written as \begin{align} \rho_{ABC} = \sum_{i,j} M_{ij} \otimes \ket{i}\!\bra{j}, \end{align} where $M_{ij}=\tr_C[\rho_{ABC} ( \mathds{1}_{AB} \otimes \ket{j}\!\bra{i})]$. For $i=j$, we have that $M_{ii} = p_i \sigma_{\ket{i}}$. For $i\neq j$, $M_{ij}$ can be written as linear combinations of $p_x \sigma_{\ket{x}}$ for some $\ket{x}$, since any $\ket{j}\!\bra{i}$ can be decomposed using some projectors $\ket{x}\!\bra{x}$. The more explicit form will be given below. In the following, we will show the {contraposition} of the observation, that is, if $\rho_{ABC}$ satisfies Condition 7 and $\rho_{AB}$ has rank no more than $2$, then $\rho_{ABC}$ is {either separable for the bipartition $A|BC$ or separable for the bipartition $B|AC$}. { Since $\rho_{AB} = \sum_i p_i \sigma_{\ket{i}}$ where $\{\ket{i}\}$ is the computational orthonormal basis, and $\sigma_{\ket{i}}$ is separable for any $\ket{i}$ according to Condition 7, then $\rho_{AB}$ is also separable. } If $\rho_{AB}$ {has} rank $1$, it is easy to see that $\rho_{ABC}$ is a pure product state. { Further, let us consider the case that the separable state $\rho_{AB}$ has exactly rank $2$. Up to local unitary, we can assume the following decomposition: \begin{align} \rho_{AB} = \alpha(\lambda \ket{00}\!\bra{00} +(1-\lambda)\ket{ab}\!\bra{ab}) + (1-\alpha) \sum_{i} \lambda_i |a_ib_i\rangle\langle a_ib_i|, \end{align} where $\ket{ab} \neq \ket{00}$, $\alpha, \lambda, \lambda_i \in [0,1]$. Denote $|\psi_1\rangle, |\psi_2\rangle$ the eigenstates of $\rho_{AB}$ with non-zero eigenvalues. Then $\ket{00}, \ket{ab}, \ket{a_ib_i}$ should be superpositions of $|\psi_1\rangle, |\psi_2\rangle$. Since $\ket{ab} \neq \ket{00}$, $|\psi_1\rangle, |\psi_2\rangle$ can also be written as superpositions of $\ket{00}, \ket{ab}$. Consequently, any $\ket{a_ib_i}$ can be written as superpositions of $\ket{00}, \ket{ab}$. In the case that $|a\rangle = |0\rangle$, we have $|a_i\rangle = |0\rangle$, which implies that $\rho_A = \tr_{BC}(\rho_{ABC}) = \tr_B(\rho_{AB}) = |0\rangle\langle 0|$. Hence, $\rho_{ABC} = |0\rangle\langle 0|\otimes \rho_{BC}$, which contradicts the assumption that $\rho_{ABC}$ is entangled for the bipartition $A|BC$. Thus, $|a\rangle \neq |0\rangle$ should hold. Similarly, we have $|b\rangle \neq |0\rangle$. Since $|a\rangle \neq |0\rangle$, $|b\rangle \neq |0\rangle$, then any non-trivial superposition of them is entangled. This leads to that $\ket{a_ib_i}$ should either be $|00\rangle$ or $|ab\rangle$ up to a phase. } Since the range of $\sigma_{\ket{x}}$ belongs to the range of $\rho_{AB}$ {and $\sigma_{\ket{x}}$ is separable}, we have \begin{align} \sigma_{\ket{x}} = \lambda_x \ket{00}\!\bra{00} +(1-\lambda_x)\ket{ab}\!\bra{ab}, \end{align} where $\sum_x p_x \lambda_x =\lambda$. {Since $M_{ij}$ is a combination of $\sigma_{|x\rangle}$, $M_{ij}$ can be written as} \begin{align} M_{ij} = X_{ij} \ket{00}\!\bra{00} +Y_{ij}\ket{ab}\!\bra{ab}, \end{align} where the coefficients $X_{ij}$ and $Y_{ij}$ are given by combinations of $p_x \lambda_x$ for some $x$. Accordingly, we can write \begin{align} \rho_{ABC} = \ket{00}\!\bra{00} \otimes \tau_x + \ket{ab}\!\bra{ab} \otimes \tau_y, \end{align} where $\tau_x=\sum_{i,j} X_{ij} \ket{i}\!\bra{j}$ and $\tau_y=\sum_{i,j} Y_{ij} \ket{i}\!\bra{j}$. To show that $\rho_{ABC}$ is fully separable, it is sufficient to prove that the matrices $\tau_x$ and $\tau_y$ are positive semidefinite. For that, we note that since $\ket{ab} \neq \ket{00}$, there exists a bipartite pure state $\ket{\alpha \beta}$ such that $\braket{ab|\alpha \beta} = 0$ and $\braket{00|\alpha \beta} \neq 0$. Then it holds that \begin{equation} \braket{\alpha \beta \gamma|\rho_{ABC}|\alpha \beta \gamma} = |\braket{\alpha \beta|00}|^2 \braket{\gamma|\tau_x|\gamma} \geq 0, \end{equation} for any $\ket{\gamma}$. This implies that $\braket{\gamma|\tau_x|\gamma} \geq 0$, that is, $\tau_x$ is positive semidefinite. Similarly, we can show that $\tau_y$ is positive semidefinite. Hence, we conclude that $\rho_{ABC}$ is fully separable{, which contradicts the assumption}. \end{proof} In the case that the party $C$ is not entangled with $A$ and $B$, we have a similar requirement of the global state as in the following observation. \begin{observation} \label{ob:wholerank2} Suppose that a tripartite state $\rho_{ABC}$ satisfies Condition 7. If $\rho_{ABC}$ is {entangled {for the bipartitions $A|BC$ and $B|AC$}} separable for $AB|C$, then it should have rank more than $2$. \end{observation} \begin{proof} Here we prove {the statement} by contradiction. Let us assume $\rho_{ABC}$ satisfies Condition 7 and has rank no more than $2$. Since $\rho_{ABC}$ is separable for the bipartition $AB|C$, we have the decomposition \begin{equation} \rho_{ABC} = \sum_{i} p_i \ket{\psi_i\phi_i}\bra{\psi_i\phi_i}, \end{equation} where $\ket{\psi_i}, \ket{\phi_i}$ are states for parties $A, B$ and party $C$, respectively. By assumption, the dimension of the space spanned by $\{\ket{\psi_i\phi_i}\}$ is no more than $2$, this leads to that the dimension of the space spanned by $\{\ket{\psi_i}\}$ is no more than $2$. Thus, $\rho_{AB} = \tr_C(\rho_{ABC}) = \sum_i p_i \ket{\psi_i}\!\bra{\psi_i}$ has rank no more than $2$. By applying Observation~\ref{ob:subrank2}, we finish the proof. \end{proof} { We have two remarks. First, one can indeed find tripartite entangled states satisfying Condition 7 and separable for the bipartition $AB|C$. {Especially, there exist tripartite entangled states which are separable for any bipartition~\cite{bennett1999unextendible,acin2001classification}, which satisfy Condition 7 automatically.} We collect more such examples in Appendix~\ref{ap:i}. Second, Observations~\ref{ob:subrank2}, \ref{ob:wholerank2} may provide insight into a type of quantum marginal problem: whether a global state can be separable or entangled if its marginal systems are subjected to separability conditions and rank constraints. } \section{Examples for three-qubit states}\label{ap:i} Here, we discuss three-qubit entangled states that satisfy Condition 7 for the complete entanglement change. In this Appendix, we will first propose a nontrivial three-qubit state that is entangled $A|BC$ and $AC|B$ but separable for $AB|C$. Next, we will connect the complete entanglement change with bound entanglement. \subsection*{I.1: Complete entanglement change with separability for $AB|C$} To find a nontrivial three-qubit entangled state that satisfy Condition 7, we employ the method of entanglement witnesses: For an Hermitian operator $W$, it is called an entanglement witness if $\mathrm{tr} (W \rho_{s}) \geq 0$ for all separable states $\rho_{s}$, and $\mathrm{tr} (W \rho_{e}) < 0$ for some entangled states $\rho_{e}$. The latter allows us to detect entanglement. In particular, we adopt the entanglement witness that can have the negative eigenvalues of its partial transpose (NPT) state. This witness is described as follows: Suppose that a state $\rho_{e}$ is NPT. Then one can find a negative eigenvalue $\lambda <0$ of $\rho_{e}^{T_A}$ and the corresponding eigenvector $\ket{\phi_C}$. Hence the operator $\ket{\phi_C}\!\bra{\phi_C}^{T_A}$ can be an witness to detect the entangled state $\rho_{e}$. In practice, entanglement witnesses can be implemented by semi-definite programming (SDP). For our purpose, we use the following conditions that are compatible with the SDP method. First, to impose the separability condition for the bipartition $AB|C$, we apply the fact that if a $2 \otimes N$ state $\rho_{XY}$ obeys $\rho_{XY} = \rho_{XY}^{T_X}$, then it is separable, see Theorem $2$ in Ref.~\cite{kraus2000separability}. That is, we require that $\rho_{ABC}=\rho_{ABC}^{T_C}$. Second, for the separability condition of the two-qubit post-measurement state $\sigma_{\ket{x}}$, we employ the positive partial transpose (PPT) criterion, which is necessary and sufficient for two-qubit separability. Third, for the sake of simplicity, we suppose that the state $\rho_{ABC}$ is invariant under exchange between $A$ and $B$ using SWAP operator $\mathrm{SWAP} \ket{a}\ket{b} = \ket{b}\ket{a}$. Since the set of NPT states is not convex, we use the see-saw method with entanglement witnesses. This is a numerical iteration technique for non-convex optimization, which allows us to find states with the (local) minimal value as a solution. From the numerical solution, we can find an analytical form of the state and verify that it satisfies Condition $1$ for any measurement direction. Our finding is the following entangled state: \begin{align} \Tilde{\rho} &=\frac{1}{8} \begin{bmatrix} 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 2 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 & 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 & 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & 1 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 & 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 2 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ \end{bmatrix}. \end{align} This state has the following properties. First, the matrix rank of $\Tilde{\rho}$ is $4$. Second, one can show that the state $\sigma_{\ket{x}}$ with $\ket{x} = (\cos t, e^{i a} \sin t)$ is PPT and therefore separable for any $t,a$. Third, the minimum eigenvalue of $\Tilde{\rho}^{T_A}$ is equal to $-1/8$. Fourth, the party $C$ is not entangled with the other two parties. Nevertheless, the discord $D_{AB|C}(\tilde{\rho})>0$, which is necessary for complete entanglement change according to Observation~\ref{ob:relative2}. \subsection*{I.2: Complete entanglement change and bound entanglement} We have found the existence of state $\Tilde{\rho}$ that is entangled states for $A|BC$ and $AC|B$ but separable for $AB|C$ that can achieve the complete entanglement change. Now we are also interested in the case where the separability for $AB|C$ is replaced by bound entanglement. Such a state is already known as the $4\otimes 2$ bound entangled state~\cite{horodecki1997separability}, denoted by \begin{equation} \rho_{\rm HDK}=\frac{1}{h} \begin{bmatrix} 2 t & 0 & 0 & 0 & 0 & 0 & 2 t & 0 \\ 0 & 2 t & 0 & 0 & 0 & 0 & 0 & 2 t \\ 0 & 0 & t+1 & 0 & 0 & 0 & 0 & t^\rm PRime \\ 0 & 0 & 0 & 2 t & 2 t & 0 & 0 & 0 \\ 0 & 0 & 0 & 2 t & 2 t & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 2 t & 0 & 0 \\ 2 t & 0 & 0 & 0 & 0 & 0 & 2 t & 0 \\ 0 & 2 t & t^\rm PRime & 0 & 0 & 0 & 0 & t+1 \end{bmatrix}, \end{equation} where $t^\rm PRime=\sqrt{1-t^2}$, $h={2(1+7t)}$ and $0<t<1$. Here the parties $AB$ are in $4$-dimensional systems and the party $C$ is $2$-dimensional systems. We remark that this state satisfies Condition $1$. Since this state is NPT entangled for $A|BC$ and $AC|B$ but PPT entangled for $AB|C$, we cannot apply Observation~\ref{ob:wholerank2}. On the other hand, its reduced state $\rho_{AB}$ has rank $4$, and therefore, it complies with Observation~\ref{ob:subrank2}. To proceed further, we now present the following: \begin{observation} If a tripartite state $\rho_{ABC}$ is separable either for the bipartition $A|BC$ or the bipartition $B|AC$, then $\rho_{ABC}$ satisfies Condition $1$. \end{observation} \begin{proof} If $\rho_{ABC}$ is separable either for $A|BC$ or $B|AC$, then the normalized state of $\bra{x}\rho_{ABC}\ket{x}$ is separable for any measurement direction $\ket{x}$ on $C$. Thus, Observation~\ref{ob:lower2} implies that the entanglement change must be complete. \end{proof} In the following, we collect entangled states for complete entanglement change which are even separable for any bipartition: \begin{align} \rho_{\rm UPB} &=\frac{1}{32} \begin{bmatrix} 7 & 1 & 1 & \bar{1} & 1 & \bar{1} & \bar{1} & 1 \\ 1 & 3 & \bar{1} & 1 & \bar{1} & \bar{3} & 1 & \bar{1} \\ 1 & \bar{1} & 3 & \bar{3} & \bar{1} & 1 & 1 & \bar{1} \\ \bar{1} & 1 & \bar{3} & 3 & 1 & \bar{1} & \bar{1} & 1 \\ 1 & \bar{1} & \bar{1} & 1 & 3 & 1 & \bar{3} & \bar{1} \\ \bar{1} & \bar{3} & 1 & \bar{1} & 1 & 3 & \bar{1} & 1 \\ \bar{1} & 1 & 1 & \bar{1} & \bar{3} & \bar{1} & 3 & 1 \\ 1 & \bar{1} & \bar{1} & 1 & \bar{1} & 1 & 1 & 7 \\ \end{bmatrix}, \,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\, \rho_{\rm ADMA} =\frac{1}{n} \begin{bmatrix} 1 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\ 0 & a & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & b & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & c & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & \frac{1}{c} & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & \frac{1}{b} & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & \frac{1}{a} & 0 \\ 1 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\ \end{bmatrix},\\ \rho_{AK} &=\frac{1}{8(1+y)} \begin{bmatrix} x & 0 & 0 & 0 & 0 & 0 & 0 & 2 \\ 0 & y & 0 & 0 & 0 & 0 & 2 & 0 \\ 0 & 0 & y & 0 & 0 & \bar{2} & 0 & 0 \\ 0 & 0 & 0 & y & 2 & 0 & 0 & 0 \\ 0 & 0 & 0 & 2 & y & 0 & 0 & 0 \\ 0 & 0 & \bar{2} & 0 & 0 & y & 0 & 0 \\ 0 & 2 & 0 & 0 & 0 & 0 & y & 0 \\ 2 & 0 & 0 & 0 & 0 & 0 & 0 & x \\ \end{bmatrix}, \,\,\,\,\,\,\,\, \rho_{PH} =\frac{1}{m} \begin{bmatrix} 2 z & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 \\ 0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & \frac{1}{z} & 0 & 0 & 0 & 0 \\ 0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & \frac{1}{z} & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & \frac{1}{z} & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ \end{bmatrix}, \end{align} where $\bar{1} = -1, \bar{2} = -2, \bar{3} = -3$, $a,b,c,x,y,z > 0$, $abc\neq 1$, $x=y+4$, $n=2 + 1/a + a + 1/b + b + 1/c + c$, and $m=3 + 3/z + 2 z$. These states have been already known: $\rho_{\rm UPB}$ in Ref.~\cite{bennett1999unextendible}, $\rho_{\rm ADMA}$ in Ref.~\cite{acin2001classification}, $\rho_{AK}$ in Ref.~\cite{kay2011optimal}, and {the Hyllus state} $\rho_{PH}$ in Eq.~(2.105) in Ref.~\cite{hyllus2005witnessing}. Note that $\rho_{AK}$ is entangled for $2\leq y\leq 2.828$ but separable for $y \geq 2\sqrt{2}$. Also $\rho_{\rm UPB}$ is permutationally symmetric. Let us summarize the property of these states. The first common property of them is that they are separable for any bipartition, but not fully separable. In that sense, they are not multipartite distillable and then bound entangled~\cite{guhne2011entanglement}. Here we remark that GHZ diagonal states that are PPT for any bipartition are separable for any bipartition \cite{nagata2009necessary}. Second, their matrix ranks are, respectively, given by $\text{Rank}(\rho_{\rm UPB})=4, \, \text{Rank}(\rho_{\rm ADMA})=7, \, \text{Rank}(\rho_{\rm AK})=8, \, \text{Rank}(\rho_{\rm PH})=5.$ This follows the results of Observation~\ref{ob:wholerank2}. Finally, these bound entangled states can be detected with the help of the previously presented entanglement criteria in Refs.~\cite{horodecki2006separability, guhne2010separability, guhne2011entanglement}. The last example is the three-qubit thermal state with Heisenberg chain model: \begin{align} &\rho_H = \exp{(-H_H/T)}/Z,\\ &H_{H}=\sum_{i=1,2,3} \sigma_X^i \sigma_X^{i+1} +\sigma_Y^i \sigma_Y^{i+1} +\sigma_Z^i \sigma_Z^{i+1}, \end{align} with temperature $T$ and $Z=\tr[\exp{(-H_H/T)}]$. This thermal state has been shown to be bound entangled in the temperature range $T \in [4.33, 5.46]$, in the sense that they are separable for any bipartition but not fully separable in Refs.~\cite{eggeling2001separability, toth2007optimal} and Table II in \cite{toth2009spin}, where the bound entanglement can be detected by the optimal spin squeezing inequality. \section{Generalization of Observation~\ref{ob:subrank2}}\label{ap:j} \begin{observation}\label{ob:multisubrank} Let $\rho_{A_1\ldots A_{n-1} A_{n}}$ be a $n$-partite quantum state and let $P_{n}^{\ket{x}}=\ket{x}\bra{x}$ be a projector on the subsystem $A_{n}$ with $\sum_x P_{n}^{\ket{x}}=I$. Suppose that the normalized state $\sigma^{\ket{x}} = \tr_{n}(P_{n}^{\ket{x}} \rho_{A_1\ldots A_{n-1} A_{n}})/p_n$ with $p_n=\tr(P_{n}^{\ket{x}} \rho_{A_1\ldots A_{n-1} A_{n}})$ is fully separable for any $\ket{x}$, and the reduced state $\rho_{A_1\ldots A_{n-1}}=\tr_{n} (\rho_{A_1\ldots A_{n-1} A_{n}})$ can be written as \begin{equation} \rho_{A_1\ldots A_{n-1}} = \sum_{i=1}^k p_i |\psi_i\rangle\langle \psi_i|, \end{equation} where $\{|\psi_i\rangle\}_{i=1}^k$ are linearly independent fully product states, i.e., $\ket{\psi_i}=\bigotimes_{j=1}^n \ket{\psi_j^i}$, and any superposition of $\{|\psi_i\rangle\}_{i=1}^k$ given by $\sum_i c_i \ket{\psi_i}$ is not a fully product state. In this case, $\rho_{A_1\ldots A_{n-1} A_{n}}$ should be fully separable. \end{observation} \begin{proof} We begin by recalling that any $n$-particle state can be written as \begin{align} \rho_{A_1\ldots A_{n-1} A_{n}} = \sum_{i,j} M_{ij}\otimes \ket{i}\!\bra{j}, \end{align} where $M_{ij}$ is a matrix on the $A_1\ldots A_{n-1}$ system and $\ket{i}\!\bra{j}$ is on the $A_n$ system. Then, from the assumption, we notice \begin{align} \rho_{A_1\ldots A_{n-1}}=\sum_i M_{ii}=\sum_{i=1}^k p_i |\psi_i\rangle\langle \psi_i|. \end{align} Note that $M_{ii}=\sigma^{\ket{i}}$, which implies that the range of $\sigma^{\ket{x}}$ is in the subspace spanned by $\{\ket{\psi_i}\}$. From the assumption that $\sigma^{\ket{x}}$ is separable and any superposition of $\{\ket{\psi_i}\} $ is entangled, we have \begin{align} \sigma^{\ket{x}} = \sum_j q_j^x \ket{\psi_j}\!\bra{\psi_j}. \end{align} Also $M_{ij}$ can be written in the linear combination of $\sigma^{\ket{x}}$. Accordingly, we have \begin{align} \rho_{A_1\ldots A_{n-1} A_{n}} = \sum_{i,j,k} c_{ijk} \ket{\psi_k}\!\bra{\psi_k} \otimes \ket{i}\!\bra{j} = \sum_{k} \ket{\psi_k}\!\bra{\psi_k} \otimes \tau_k, \end{align} where $\tau_k =\sum_{ij} c_{ijk} \ket{i}\!\bra{j}$, and $c_{ijk}$ is the coefficient of $\ket{\psi_k}\!\bra{\psi_k}$ when we expand $M_{ij}$. Below we show that $\tau_k$ is positive semidefinite. From the assumption that $\{|\psi_i\rangle\}_{i=1}^k$ are linearly independent, we know that there are states $\{|\phi_i\rangle\}_{i=1}^k$ such that \begin{align} &\braket{\psi_i|\phi_j} = 0,\text{ if } i\neq j,\ \braket{\psi_i|\phi_i} > 0. \end{align} Then, for any $\ket{v}$, \begin{equation} \bra{\phi_i v} \rho_{A_1\ldots A_{n-1} A_{n}} \ket{\phi_i v} = \braket{\phi_i|\psi_i}^2 \bra{v}\tau_i\ket{v} \ge 0, \end{equation} that is, $\bra{v}\tau_i\ket{v} \ge 0$. This implies that $\tau_i$ is positive semidefinite. Hence, $\rho_{A_1\ldots A_{n-1} A_{n}}$ is a fully separable state. \end{proof} \end{document}
\begin{document} \big\langlerge \title[A Singular limit problem of Rosenau-KdV type]{A singular limit problem for conservation laws \\ related to the Rosenau-Korteweg-de Vries equation} \author[G. M. Coclite and L. di Ruvo]{Giuseppe Maria Coclite and Lorenzo di Ruvo} a^{{\bf p}artialta}dress[Giuseppe Maria Coclite and Lorenzo di Ruvo] {\noindentewline Department of Mathematics, University of Bari, via E. Orabona 4, 70125 Bari, Italy} {\bf e}mail[]{[email protected], [email protected]} \urladdr{http://www.dm.uniba.it/Members/coclitegm/} \keywords{Singular limit, compensated compactness, Rosenau-KdV-equation, entropy condition.} \subjclass[2000]{35G25, 35L65, 35L05} \thanks{The authors are members of the Gruppo Nazionale per l'Analisi Matematica, la Probabilit\`a e le loro Applicazioni (GNAMPA) of the Istituto Nazionale di Alta Matematica (INdAM)} \begin{abstract} We consider the Rosenau-Korteweg-de Vries equation, which contains nonlinear dispersive effects. We prove that as the diffusion parameter tends to zero, the solutions of the dispersive equation converge to discontinuous weak solutions of the Burgers equation. The proof relies on deriving suitable a priori estimates together with an application of the compensated compactness method in the $L^p$ setting. {\bf e}nd{abstract} \maketitle s_\varepsilonction{Introduction}\big\langlebel{sec:intro} Dynamics of shallow water waves that is observed along lake shores and beaches has been a research area for the past few decades in oceanography (see \cite{AB,ZZZC}). There are several models proposed in this context: Boussinesq equation, Peregrine equation, regularized long wave (RLW) equation, Kawahara equation, Benjamin-Bona-Mahoney equation, Bona-Chen equation etc. These models are derived from first principles under various different hypothesis and approximations. They are all well studied and very well understood. In this context, there is also the Korteweg-de Vries equation \begin{equation} \big\langlebel{eq:ZIU7} {\bf p}artial_t u +{\bf p}artial_x u^2 +\beta{\bf p}artial_x xx u=0. {\bf e}nd{equation} Observe that, if we send $\beta\to0$ in {\bf e}qref{eq:ZIU7}, we pass from {\bf e}qref{eq:ZIU7} to the Burgers equation \begin{equation} \big\langlebel{eq:BU} {\bf p}artial_t u +{\bf p}artial_x u^2 =0. {\bf e}nd{equation} In cite \cite{LN,SC}, the convergence of the solution of {\bf e}qref{eq:ZIU7} to the unique entropy solution of {\bf e}qref{eq:BU} is proven, under the assumption \begin{equation} \big\langlebel{eq:assu-1} u_{0}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^2(\mathbb{R})\cap L^4(\mathbb{R}), {\bf q}uad \beta=o\left(\varepsilon^2\right). {\bf e}nd{equation} \cite[Appendixes $A$ and $B$]{Cd6} show that it is possible to obtain the same result of convergence, under the following assumptions \begin{equation} \big\langlebel{eq:assu-2} \begin{split} & u_{0}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^2(\mathbb{R}),{\bf q}uad -\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty<\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{0}(x) dx<\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty, {\bf q}uad \beta=o\left(\varepsilon^3\right),\\ & u_{0}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^2(\mathbb{R}),{\bf q}uad \beta=o\left(\varepsilon^4\right). {\bf e}nd{split} {\bf e}nd{equation} One generalization of {\bf e}qref{eq:ZIU7} is the Ostrovsky equation (see \cite{O}): \begin{equation} \big\langlebel{eq:OHbeta} {\bf p}artial_x ({\bf p}artial_t u+{\bf p}artial_x u^2-\beta {\bf p}artial_x xx u)=\gamma u, {\bf q}uad \beta,\gamma\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{R}. {\bf e}nd{equation} {\bf e}qref{eq:OHbeta} describes small-amplitude long waves in a rotating fluid of a finite depth by the additional term induced by the Coriolis force. If we send $\beta\to 0$ in {\bf e}qref{eq:OHbeta}, we pass from {\bf e}qref{eq:OHbeta} to the Ostrovsky-Hunter equation (see \cite{B}). \begin{equation} \big\langlebel{eq:OH} {\bf p}artial_x ({\bf p}artial_t u+{\bf p}artial_x u^2)=\gamma u,{\bf q}quad t>0, {\bf q}uad x\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{R}. {\bf e}nd{equation} In \cite{Cd1,CdK,dR}, the wellposedness of the entropy solutions of {\bf e}qref{eq:OH} is proven, in the sense of the following definition: \begin{definition} \big\langlebel{def:sol} We say that $u\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T)\times\mathbb{R}),\,T>0,$ is an entropy solution of the initial value problem {\bf e}qref{eq:OH} if \begin{itemize} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$i$)] $u$ is a distributional solution of {\bf e}qref{eq:OH}; \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$ii$)] for every convex function ${\bf e}ta\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin C^2(\mathbb{R})$ the entropy inequality \begin{equation} \big\langlebel{eq:OHentropy} {\bf p}artial_t {\bf e}ta(u)+ {\bf p}artial_x q(u)-\gamma{\bf e}ta'(u) P\le 0, {\bf q}quad q(u)=\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint^u f'(\xi) {\bf e}ta'(\xi)\, d\xi, {\bf e}nd{equation} holds in the sense of distributions in $(0,\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty)\times\mathbb{R}$. {\bf e}nd{itemize} {\bf e}nd{definition} Under the assumption {\bf e}qref{eq:assu-1}, in \cite{Cd2}, the convergence of the solutions of {\bf e}qref{eq:OHbeta} to the unique entropy solution of {\bf e}qref{eq:OH} is proven. The dynamics of dispersive shallow water waves, on the other hand, is captured with slightly different models, like the Rosenau-Kawahara equation and the Rosenau-KdV-RLW equation \cite{BTL,EMTYB,HXH,LB,RAB}. The Rosenau-Korteweg-de Vries-RLW equation is following one: \begin{equation} \big\langlebel{eq:RKV-1} {\bf p}artial_t u +a{\bf p}artial_x u +k{\bf p}artial_x u^{n}+b_1{\bf p}artial_x xx u +b_2{\bf p}artial_txx u + c{\bf p}artial_txxxx u=0,{\bf q}uad a,\,k,\,b_1,\,b_2,\,c\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{R}. {\bf e}nd{equation} Here $u(t,x)$ is the nonlinear wave profile. The first term is the linear evolution one, while $a$ is the advection or drifting coefficient. The two dispersion coefficients are $b_1$ and $b_2$. The higher order dispersion coefficient is $c$, while the coefficient of nonlinearity is $k$ where $n$ is nonlinearity parameter. These are all known and given parameters. In \cite{RAB}, the authors analyzed {\bf e}qref{eq:RKV-1}. They got solitary waves, shock waves and singular solitons along with conservation laws. Considering the $n=2,\, a=0,\, k=1,\, b_1=1,\, b_2=-1,\, c=1$: \begin{equation} \big\langlebel{eq:RKV-23} {\bf p}artial_t u +{\bf p}artial_x u^2 +{\bf p}artial_x xx u -{\bf p}artial_txx u +{\bf p}artial_txxxx u=0. {\bf e}nd{equation} If $n=2, \, a=0,\, k=1,\, b_1=0,\, b_2=-1,\, c=1$, {\bf e}qref{eq:RKV-1} reads \begin{equation} \big\langlebel{eq:RKV-30} {\bf p}artial_t u +{\bf p}artial_x u^2 -{\bf p}artial_txx u +{\bf p}artial_txxxx u=0, {\bf e}nd{equation} which is known as Rosenau-RLW equation. Arguing in \cite{CdREM}, we re-scale the equations as follows \begin{align} \big\langlebel{eq:T1} {\bf p}artial_t u +{\bf p}artial_x u^2 +\beta{\bf p}artial_x xx u -\beta{\bf p}artial_txx u +\beta^2{\bf p}artial_txxxx u&=0,\\ \big\langlebel{eq:T2} {\bf p}artial_t u +{\bf p}artial_x u^2 -\beta{\bf p}artial_txx u +\beta^2{\bf p}artial_txxxxu_{\bf e}b&=0, {\bf e}nd{align} where $\beta$ is the diffusion parameter. In \cite{Cd5}, the authors proved that the solutions of {\bf e}qref{eq:T1} and {\bf e}qref{eq:T2} converge to the unique entropy solution of {\bf e}qref{eq:BU}, under the assumptions \begin{equation} \big\langlebel{eq:uo-l4} u_{0}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^2(\mathbb{R})\cap L^4(\mathbb{R}), {\bf q}uad \beta=\mathbf{\mathcal{O}}\left(\varepsilon^4\right). {\bf e}nd{equation} {\bf e}qref{eq:ZIU7} has also been used in very wide applications and undergone research which can be used to describe wave propagation and spread interaction (see \cite{Ba,CM,OK,ZUZO}). In the study of the dynamics of dense discrete systems, the case of wave-wave and wave-wall interactions cannot be described using {\bf e}qref{eq:ZIU7}. To overcome this shortcoming of {\bf e}qref{eq:ZIU7}, Rosenau proposed the following equation (see \cite{Ro1,Ro2}): \begin{equation} \big\langlebel{eq:ROUS1} {\bf p}artial_t u + {\bf p}artial_x u^2 + {\bf p}artial_txxxx u =0, {\bf e}nd{equation} which is also obtained by {\bf e}qref{eq:RKV-1}, taking $n=2,\, a=0,\, k=1,\, b_1=0,\, b_2=0,\, c=1$. The existence and the uniqueness of the solution for {\bf e}qref{eq:ROUS1} is proved in \cite{P}, but it is difficult to find the analytical solution for {\bf e}qref{eq:ROUS1}. Therefore, much work has been done on the numerical methods for {\bf e}qref{eq:ROUS1} (see \cite{CH1,CHH,CHP,KL,MPC,OAAK}). On the other hand, for the further consideration of the nonlinear wave, the viscous term ${\bf p}artial_x xx u$ needs to be included (see \cite{Z}). In this case, {\bf e}qref{eq:ROUS1} reads \begin{equation} \big\langlebel{eq:ROUS2} {\bf p}artial_t u + + {\bf p}artial_x u^2 +{\bf p}artial_x xx u + {\bf p}artial_txxxx u =0, {\bf e}nd{equation} which is known as the Rosenau-Korteweg-de Vries (KdV) equation, and is also obtianed by {\bf e}qref{eq:RKV-1}, taking $n=2,\, a=0,\, k=1,\, b_1=1,\, b_2=0,\, c=1$. In \cite{Z}, the author discussed the solitary wave solutions and {\bf e}qref{eq:ROUS2}. In \cite{HXH}, a conservative linear finite difference scheme for the numerical solution for an initial-boundary value problem of Rosenau-KdV equation is considered. In \cite{E,RTB}, authors discussed the solitary solutions for {\bf e}qref{eq:ROUS2} with usual solitary ansatz method. The authors also gave the two invariants for {\bf e}qref{eq:ROUS2}. In particular, in \cite{RTB}, the authors not only studied the two types of soliton solution, one is solitary wave solution and the other is singular soliton. In \cite{ZZ}, the authors proposed an average linear finite difference scheme for the numerical solution of the initial-boundary value problem for {\bf e}qref{eq:ROUS2}. Consider {\bf e}qref{eq:ROUS1}. Arguing as \cite{CdREM}, we re-scale the equations as follows \begin{equation} \big\langlebel{eq:P90} {\bf p}artial_t u +{\bf p}artial_x u^2 +\beta^2{\bf p}artial_txxxxu_{\bf e}b=0. {\bf e}nd{equation} In \cite{Cd6}, the authors proved that the solutions of {\bf e}qref{eq:P90} converge to the unique entropy solution of {\bf e}qref{eq:BU}, choosing the initial datum in two different ways. The first one is: \begin{equation} \big\langlebel{eq:uo-l2} u_{0}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^2(\mathbb{R}), {\bf q}uad \beta=o\left(\varepsilon^4\right). {\bf e}nd{equation} The second choice is given by {\bf e}qref{eq:uo-l4}. In this paper, we analyze {\bf e}qref{eq:ROUS2}. Arguing as \cite{CdREM}, we re-scale the equations as follows \begin{equation} \big\langlebel{eq:RKV33} {\bf p}artial_t u+ {\bf p}artial_x u^2+\beta{\bf p}artial_x xx u+\beta^2{\bf p}artial_txxxx u=0. {\bf e}nd{equation} We are interested in the no high frequency limit, we send $\beta\to 0$ in {\bf e}qref{eq:RKV33}. In this way we pass from {\bf e}qref{eq:RKV33} to {\bf e}qref{eq:BU}. We prove that, as $\beta\to0$, the solutions of {\bf e}qref{eq:RKV33} to the unique entropy solution of {\bf e}qref{eq:BU}. In other to do this, we can choose the initial datum and $\beta$ in two different ways. Following \cite[Theorem $7.1$]{CRS}, the first choice is given by {\bf e}qref{eq:uo-l2} (see Theorem \ref{th:main-1}). Since $\noindentorm{\cdot}_{L^4}$ is a conserved quantity for {\bf e}qref{eq:RKV33}, the second choice is given by {\bf e}qref{eq:uo-l4} (see Theorem \ref{th:main-13}). It is interesting to observe that, while the summability on the initial datum in {\bf e}qref{eq:uo-l4} is greater than the one of {\bf e}qref{eq:uo-l2}, the assumption on $\beta$ in {\bf e}qref{eq:uo-l4} is weaker than the one in {\bf e}qref{eq:uo-l2}. From the mathematical point of view, the two assumptions require two different arguments for the $L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}-$estimate (see Lemmas \ref{lm:50} and \ref{lm:562}). Indeed, the proof of Lemma \ref{lm:50}, under the assumption {\bf e}qref{eq:uo-l2}, is more technical than the one of Lemma \ref{lm:562}. Moreover, due to the presence of the third order term, Lemmas \ref{lm:50} and \ref{lm:t3} is finer than \cite[Lemmas $2.2$ and $3.2$]{Cd6}. Indeed, with respect to \cite[Lemma $2.2$]{Cd6}, in Lemma \ref{lm:50} we need to prove the existence of two positive constants, while, with respect to \cite[Lemma $3.2$]{Cd6}, in Lemma \ref{lm:t3} we need to prove the existence of four positive constants. The paper is is organized in four sections. In Section \ref{sec:Ro1}, we prove the convergence of {\bf e}qref{eq:RKV33} to {\bf e}qref{eq:BU} in $L^{p}$ setting, with $1\le p< 2$. In Section \ref{sec:D1}, we prove the convergence of {\bf e}qref{eq:RKV33} to {\bf e}qref{eq:BU} in $L^{p}$ setting, with $1\le p< 4$. The Section \ref{appen1} is an appendix where we prove that the solutions of the the Benjamin-Bona-Mahony equation converge to discontinuous weak solutions of {\bf e}qref{eq:BU} in in $L^{p}$ setting, with $1\le p< 2$. s_\varepsilonction{The Rosenau-KdV-equation: $u_0\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^2(\mathbb{R})$.}\big\langlebel{sec:Ro1} In this section, we consider {\bf e}qref{eq:RKV33}, and assume {\bf e}qref{eq:uo-l2} on the initial datum. We study the dispersion-diffusion limit for {\bf e}qref{eq:RKV33}. Therefore, we fix two small numbers $0 < \varepsilon,\,\beta < 1$ and consider the following fifth order approximation \begin{equation} \big\langlebel{eq:Ro-eps-beta} \begin{cases} {\bf p}artial_tu_{\bf e}b+ {\bf p}artial_x u_{\bf e}b^2 +\beta{\bf p}artial_x xxu_{\bf e}b +\beta^2{\bf p}artial_txxxxu_{\bf e}b=\varepsilon{\bf p}artial_x xu_{\bf e}b, &{\bf q}quad t>0, \ x\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{R} ,\\ u_{\bf e}b(0,x)=u_{\varepsilon,\beta,0}(x), &{\bf q}quad x\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{R}, {\bf e}nd{cases} {\bf e}nd{equation} where $u_{\varepsilon,\beta,0}$ is a $C^\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty$ approximation of $u_{0}$ such that \begin{equation} \begin{split} \big\langlebel{eq:u0eps-1} &u_{\varepsilon,\,\beta,\,0} \to u_{0} {\bf q}uad \theta_{\bf e}psxtrm{in $L^{p}_{loc}(\mathbb{R})$, $1\le p < 2$, as $\varepsilon,\,\beta \to 0$,}\\ &\noindentorm{u_{\varepsilon,\beta, 0}}^2_{L^2(\mathbb{R})}+(\beta^{\frac{1}{2}}+ \varepsilon^2) \noindentorm{{\bf p}artial_x u_{\varepsilon,\beta,0}}^2_{L^2(\mathbb{R})}\le C_0,{\bf q}uad \varepsilon,\beta >0,\\ &\left(\beta^2 +\beta\varepsilon^2\right) \noindentorm{{\bf p}artial_x x u_{\varepsilon,\beta,0}}^2_{L^2(\mathbb{R})} +\beta^{\frac{5}{2}}\noindentorm{{\bf p}artial_x xx u_{\varepsilon,\beta,0}}^2_{L^2(\mathbb{R})}\le C_0,{\bf q}uad \varepsilon,\beta >0, {\bf e}nd{split} {\bf e}nd{equation} and $C_0$ is a constant independent on $\varepsilon$ and $\beta$. The main result of this section is the following theorem. \begin{theorem} \big\langlebel{th:main-1} Assume that {\bf e}qref{eq:uo-l2} and {\bf e}qref{eq:u0eps-1} hold. Fix $T>0$, if \begin{equation} \big\langlebel{eq:beta-eps-2} \beta=\mathbf{\mathcal{O}}\left(\varepsilon^4\right), {\bf e}nd{equation} then, there exist two sequences $\{\varepsilon_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$, $\{\beta_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$, with $\varepsilon_n, \beta_n \to 0$, and a limit function \begin{equation*} u\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T); L^2(\mathbb{R})), {\bf e}nd{equation*} such that \begin{itemize} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$i)$] $u_{\varepsilon_n, \beta_n}\to u$ strongly in $L^{p}_{loc}(\mathbb{R}^{+}\times\mathbb{R})$, for each $1\le p <2$, \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$ii)$] $u$ is a distributional solution of {\bf e}qref{eq:BU}. {\bf e}nd{itemize} Moreover, if \begin{equation} \big\langlebel{eq:beta-eps-4} \beta=o\left(\varepsilon^{4}\right), {\bf e}nd{equation} \begin{itemize} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$iii)$] $u$ is the unique entropy solution of {\bf e}qref{eq:BU}. {\bf e}nd{itemize} {\bf e}nd{theorem} Let us prove some a priori estimates on $u_{\bf e}b$, denoting with $C_0$ the constants which depend only on the initial data. \begin{lemma}\big\langlebel{lm:38} For each $t>0$, \begin{equation} \big\langlebel{eq:l-2-u1} \noindentorm{u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\beta^2\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + 2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\le C_0. {\bf e}nd{equation} {\bf e}nd{lemma} \begin{proof} We begin by observing that \begin{equation*} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x xxu_{\bf e}b dx =0. {\bf e}nd{equation*} Therefore, arguing as \cite[Lemma $2.1$]{Cd6}, we have {\bf e}qref{eq:l-2-u1}. {\bf e}nd{proof} \begin{lemma}\big\langlebel{lm:50} Fix $T>0$. Assume {\bf e}qref{eq:beta-eps-2} holds. There exists $C_0>0$, independent on $\varepsilon,\,\beta$ such that \begin{equation} \big\langlebel{eq:u-infty-3} \noindentorm{u_{\bf e}b}_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T)\times\mathbb{R})}\le C_0\beta^{-\frac{1}{4}}. {\bf e}nd{equation} Moreover, \begin{itemize} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$i)$] the families $\{\beta^{\frac{1}{2}}{\bf p}artial_x u_{\bf e}b\}_{\varepsilon,\,\beta},\,\{\beta^{\frac{1}{4}}\varepsilon{\bf p}artial_x u_{\bf e}b\}_{\varepsilon,\,\beta},\,\{\beta^{\frac{3}{4}}\varepsilon{\bf p}artial_x xu_{\bf e}b\}_{\varepsilon,\,\beta}, \{\beta^{\frac{3}{2}}{\bf p}artial_x xxu_{\bf e}b\}_{\varepsilon,\,\beta},$\\ are bounded in $L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T);L^{2}(\mathbb{R}))$; \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$ii)$] the families $\{\beta^{\frac{3}{4}}\varepsilon^{\frac{1}{2}}{\bf p}artial_txu_{\bf e}b\}_{\varepsilon,\,\beta},$ $\{\beta^{\frac{7}{4}}\varepsilon^{\frac{1}{2}}{\bf p}artial_txxxu_{\bf e}b\}_{\varepsilon,\,\beta},$ $\{\beta^{\frac{1}{4}}\varepsilon{\bf p}artial_tu_{\bf e}b\}_{\varepsilon,\,\beta},$ \\ $\{\beta^{\frac{5}{4}}\varepsilon^{\frac{1}{2}}{\bf p}artial_txxu_{\bf e}b\}_{\varepsilon,\,\beta}$, $\{\beta^{\frac{1}{2}}\varepsilon^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b\}_{\varepsilon,\,\beta}$ are bounded in $L^2((0,T)\times\mathbb{R})$. {\bf e}nd{itemize} {\bf e}nd{lemma} \begin{proof} Let $0<t<T$. Let $A,\,B$ be some positive constants which will be specified later. Multiplying {\bf e}qref{eq:Ro-eps-beta} by $-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b -A\beta\varepsilon {\bf p}artial_txxu_{\bf e}b +B\varepsilon {\bf p}artial_tu_{\bf e}b$, we have \begin{equation} \big\langlebel{eq:p456} \begin{split} &\left(-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b -A\beta\varepsilon {\bf p}artial_txxu_{\bf e}b +B\varepsilon {\bf p}artial_tu_{\bf e}b\right){\bf p}artial_tu_{\bf e}b\\ &{\bf q}quad{\bf q}uad +2\left(-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b -A\beta\varepsilon {\bf p}artial_txxu_{\bf e}b+B\varepsilon {\bf p}artial_tu_{\bf e}b\right)u_{\bf e}b{\bf p}artial_x u_{\bf e}b\\ &{\bf q}quad{\bf q}uad\beta \left(-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b -A\beta\varepsilon {\bf p}artial_txxu_{\bf e}b+ B\varepsilon{\bf p}artial_tu_{\bf e}b\right){\bf p}artial_x xxu_{\bf e}b\\ &{\bf q}quad{\bf q}uad +\beta^2\left(-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b - A\beta\varepsilon {\bf p}artial_txxu_{\bf e}b +B\varepsilon {\bf p}artial_tu_{\bf e}b\right){\bf p}artial_txxxxu_{\bf e}b\\ &{\bf q}quad=\varepsilon\left(-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b -A\beta\varepsilon {\bf p}artial_txxu_{\bf e}b +B\varepsilon {\bf p}artial_tu_{\bf e}b\right){\bf p}artial_x xu_{\bf e}b. {\bf e}nd{split} {\bf e}nd{equation} We observe that \begin{equation} \big\langlebel{eq:L1} \begin{split} &\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\left(-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b -A\beta\varepsilon {\bf p}artial_txxu_{\bf e}b +B\varepsilon {\bf p}artial_tu_{\bf e}b\right){\bf p}artial_tu_{\bf e}b dx\\ &{\bf q}quad= \frac{\beta^{\frac{1}{2}}}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \beta\varepsilon A\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +B\varepsilon\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{split} {\bf e}nd{equation} Since \begin{align*} 2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b -A\beta\varepsilon {\bf p}artial_txxu_{\bf e}b +B\varepsilon {\bf p}artial_tu_{\bf e}b\right)u_{\bf e}b{\bf p}artial_x u_{\bf e}b dx\\ =& -2\beta^{\frac{1}{2}}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_x xu_{\bf e}b dx -2A\beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_txxu_{\bf e}b dx \\ &+2B\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_tu_{\bf e}b dx,\\ \beta \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b -A\beta\varepsilon {\bf p}artial_txxu_{\bf e}b+ B\varepsilon{\bf p}artial_tu_{\bf e}b\right){\bf p}artial_x xxu_{\bf e}b dx \\ =& A\beta^2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}{\bf p}artial_x xu_{\bf e}b{\bf p}artial_txxxu_{\bf e}b dx + B\beta\varepsilon \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}} {\bf p}artial_x u_{\bf e}b{\bf p}artial_txxu_{\bf e}b dx,\\ \beta^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b - A\beta\varepsilon {\bf p}artial_txxu_{\bf e}b +B\varepsilon {\bf p}artial_tu_{\bf e}b\right){\bf p}artial_txxxxu_{\bf e}b dx\\ =& \frac{\beta^{\frac{5}{2}}}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + A\beta^3\varepsilon\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &+B\beta^2\varepsilon\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ \varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b -A\beta\varepsilon {\bf p}artial_txxu_{\bf e}b +B\varepsilon {\bf p}artial_tu_{\bf e}b\right){\bf p}artial_x xu_{\bf e}b dx\\ =& -\beta^{\frac{1}{2}}\varepsilon\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} -\frac{A\beta\varepsilon^2}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad -\frac{B\varepsilon^2}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}, {\bf e}nd{align*} an integration on $\mathbb{R}$ of {\bf e}qref{eq:L1} gives \begin{equation} \big\langlebel{eq:L6} \begin{split} &\frac{d}{dt}\left(\frac{\beta^{\frac{1}{2}} + B\varepsilon^2}{2}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{A\beta\varepsilon^2}{2} \noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} \right)\\ &{\bf q}quad{\bf q}uad + \frac{\beta^{\frac{5}{2}}}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \beta\varepsilon A\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +B\varepsilon\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + A\beta^3\varepsilon\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +B\beta^2\varepsilon\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \beta^{\frac{1}{2}}\varepsilon\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad= 2\beta^{\frac{1}{2}}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_x xu_{\bf e}b dx +2A\beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_txxu_{\bf e}b dx \\ &{\bf q}quad{\bf q}uad -2B\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_tu_{\bf e}b dx - A\beta^2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}{\bf p}artial_x xu_{\bf e}b{\bf p}artial_txxxu_{\bf e}b dx\\ &{\bf q}quad{\bf q}uad - B\beta\varepsilon \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}} {\bf p}artial_x u_{\bf e}b{\bf p}artial_txxu_{\bf e}b dx. {\bf e}nd{split} {\bf e}nd{equation} Using {\bf e}qref{eq:u0eps-1}, $0<\beta<1$, and the Young inequality, \begin{align*} 2\beta^{\frac{1}{2}}&\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}v_\varepsilonrtu_{\bf e}b{\bf p}artial_x u_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_x xu_{\bf e}bv_\varepsilonrt dx= \beta^{\frac{1}{2}}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}} \leftv_\varepsilonrt\frac{2u_{\bf e}b{\bf p}artial_x u_{\bf e}b}{\varepsilon^{\frac{1}{2}}}\rightv_\varepsilonrt\leftv_\varepsilonrt \varepsilon^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b(t,\cdot)\rightv_\varepsilonrt dx \\ \le& \frac{2\beta^{\frac{1}{2}}}{\varepsilon}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2({\bf p}artial_x u_{\bf e}b)^2 dx +\frac{\beta^{\frac{1}{2}}\varepsilon}{2}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ \le& C_{0}\varepsilon\noindentorm{u_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +\frac{\beta^{\frac{1}{2}}\varepsilon}{2}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ 2A\beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&v_\varepsilonrtu_{\bf e}b{\bf p}artial_x u_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_txxu_{\bf e}bv_\varepsilonrt dx= \varepsilon \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrt \frac{2Au_{\bf e}b{\bf p}artial_x u_{\bf e}b}{\sqrt{B}}\rightv_\varepsilonrt\leftv_\varepsilonrt\sqrt{B}\beta{\bf p}artial_txxu_{\bf e}b\rightv_\varepsilonrt dx\\ \le& \frac{2A^2\varepsilon}{B}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2({\bf p}artial_x u_{\bf e}b)^2 dx + \frac{B\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ \le&\frac{2A^2\varepsilon}{B}\noindentorm{u_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{B\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ 2B\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&v_\varepsilonrtu_{\bf e}b{\bf p}artial_x u_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_tu_{\bf e}bv_\varepsilonrt dx = B\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}} \leftv_\varepsilonrt 2u_{\bf e}b{\bf p}artial_x u_{\bf e}b\rightv_\varepsilonrt \leftv_\varepsilonrt {\bf p}artial_tu_{\bf e}b\rightv_\varepsilonrt dx \\ \le& 2B\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2({\bf p}artial_x u_{\bf e}b)^2 dx +\frac{B\varepsilon}{2}\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ \le& 2B\varepsilon\noindentorm{u_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\frac{B\varepsilon}{2}\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ A\beta^2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&v_\varepsilonrt{\bf p}artial_x xu_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_txxxu_{\bf e}bv_\varepsilonrt dx = A\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrt\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b\rightv_\varepsilonrt\leftv_\varepsilonrt\beta^{\frac{3}{2}}{\bf p}artial_txxxu_{\bf e}b \rightv_\varepsilonrt dx \\ \le& \frac{A\beta\varepsilon}{2}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{A\beta^3\varepsilon}{2}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ \le& \frac{A\beta^{\frac{1}{2}}\varepsilon}{2}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{A\beta^3\varepsilon}{2}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ B\beta\varepsilon \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}& {\bf p}artial_x u_{\bf e}b{\bf p}artial_txxu_{\bf e}b dx=\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrt{\bf p}artial_x u_{\bf e}b\right v_\varepsilonrt\leftv_\varepsilonrt B\beta{\bf p}artial_txxu_{\bf e}b\rightv_\varepsilonrt dx\\ \le& \frac{\varepsilon}{2}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{B^2\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{align*} Therefore, {\bf e}qref{eq:L6} gives \begin{equation} \big\langlebel{eq:L9} \begin{split} &\frac{d}{dt}\left(\frac{\beta^{\frac{1}{2}} + B\varepsilon^2}{2}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{A\beta\varepsilon^2}{2} \noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} \right)\\ &{\bf q}quad{\bf q}uad + \frac{\beta^{\frac{5}{2}}}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \beta\varepsilon A\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\frac{B\varepsilon}{2}\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{A\beta^3\varepsilon}{2}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\frac{B}{2}\beta^2\varepsilon\left(1-B\right)\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{\beta^{\frac{1}{2}}\varepsilon}{2}\left(1-A\right)\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad \le C_{0}\varepsilon\noindentorm{u_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\frac{\varepsilon}{2}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad+\frac{2A^2\varepsilon}{B}\noindentorm{u_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad + 2B\varepsilon\noindentorm{u_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{split} {\bf e}nd{equation} Choosing $\displaystyle A=\frac{1}{2},\,B=\frac{1}{2}$, from {\bf e}qref{eq:L9}, we have \begin{align*} &\frac{d}{dt}\left(\frac{2\beta^{\frac{1}{2}} + \varepsilon^2}{4}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{\beta\varepsilon^2}{4} \noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} \right)\\ &{\bf q}quad{\bf q}uad + \frac{\beta^{\frac{5}{2}}}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{\beta\varepsilon}{2} \noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\frac{\varepsilon}{4}\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{\beta^3\varepsilon}{4}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\frac{\beta^2\varepsilon}{8}\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{\beta^{\frac{1}{2}}\varepsilon}{4}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad \le C_{0}\varepsilon\noindentorm{u_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\frac{\varepsilon}{2}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{align*} {\bf e}qref{eq:u0eps-1}, {\bf e}qref{eq:l-2-u1}, and an integration on $(0,t)$ give \begin{equation} \big\langlebel{eq:L10} \begin{split} &\frac{2\beta^{\frac{1}{2}} + \varepsilon^2}{4}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{\beta\varepsilon^2}{4} \noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\frac{\beta^{\frac{5}{2}}}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{\beta\varepsilon}{2} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad{\bf q}uad +\frac{\varepsilon}{4}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_tu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds + \frac{\beta^3\varepsilon}{4}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_0^t\noindentorm{{\bf p}artial_txxxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad{\bf q}uad+\frac{\beta^2\varepsilon}{8}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds+ \frac{\beta^{\frac{1}{2}}\varepsilon}{4}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t} \noindentorm{{\bf p}artial_x xu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad \le C_{0} + C_{0}\varepsilon\noindentorm{u_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x u_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad{\bf q}uad+\frac{\varepsilon}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x u_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds \le C_{0}\left(1+\noindentorm{u_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\right). {\bf e}nd{split} {\bf e}nd{equation} We prove {\bf e}qref{eq:u-infty-3}. Due to {\bf e}qref{eq:l-2-u1}, {\bf e}qref{eq:L10}, and the H\"older inequality, \begin{align*} u_{\bf e}b^2(t,x) =& 2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{-\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}^{x}u_{\bf e}b{\bf p}artial_x u_{\bf e}b dx \le 2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}v_\varepsilonrtu_{\bf e}b{\bf p}artial_x u_{\bf e}bv_\varepsilonrt dx \\ \le & \noindentorm{u_{\bf e}b(t,\cdot)}_{L^2(\mathbb{R})}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}_{L^2(\mathbb{R})}\\ \le& \frac{C_{0}}{\beta^{\frac{1}{4}}}\sqrt{\left(1+\noindentorm{u_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\right)}, {\bf e}nd{align*} that is \begin{equation*} \noindentorm{u_{\bf e}b}^4_{L^2((0,T)\times\mathbb{R})}\frac{C_0}{\beta^{\frac{1}{2}}}\left(1+\noindentorm{u_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\right). {\bf e}nd{equation*} Arguing as \cite[Lemma $2.2$]{Cd6}, we have {\bf e}qref{eq:u-infty-3}. It follows from {\bf e}qref{eq:u-infty-3} and {\bf e}qref{eq:L10} that \begin{align*} &\frac{2\beta^{\frac{1}{2}} + \varepsilon^2}{4}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{\beta\varepsilon^2}{4} \noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\frac{\beta^{\frac{5}{2}}}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{\beta\varepsilon}{2} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad{\bf q}uad +\frac{\varepsilon}{4}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_tu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds + \frac{\beta^3\varepsilon}{4}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad{\bf q}uad+\frac{\beta^2\varepsilon}{8}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds+ \frac{\beta^{\frac{1}{2}}\varepsilon}{4}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}ds \le C_{0}\beta^{-\frac{1}{2}}, {\bf e}nd{align*} that is, \begin{align*} &\frac{2\beta + \beta^{\frac{1}{2}}\varepsilon^2}{4}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{\beta^{\frac{3}{2}}\varepsilon^2}{4} \noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\frac{\beta^3}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{\beta^{\frac{3}{2}}\varepsilon}{2} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad{\bf q}uad +\frac{\beta^{\frac{1}{2}}\varepsilon}{4}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_tu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds + \frac{\beta^{\frac{7}{2}}\varepsilon}{4}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad{\bf q}uad+\frac{\beta^{\frac{5}{2}}\varepsilon}{8}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds+ \frac{\beta\varepsilon}{4}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}ds \le C_{0}. {\bf e}nd{align*} Hence, \begin{align*} \beta^{\frac{1}{2}}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}_{L^2(\mathbb{R})}\le &C_{0},\\ \beta^{\frac{1}{4}}\varepsilon\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}_{L^2(\mathbb{R})}\le &C_{0},\\ \beta^{\frac{3}{4}}\varepsilon\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}_{L^2(\mathbb{R})}\le &C_{0},\\ \beta^{\frac{3}{2}}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}_{L^2(\mathbb{R})}\le &C_{0},\\ \beta^{\frac{3}{2}}\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\le&C_{0},\\ \beta^{\frac{1}{2}}\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_tu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\le&C_{0},\\ \beta^{\frac{7}{2}}\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\le&C_{0},\\ \beta^{\frac{5}{2}}\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\le&C_{0},\\ \beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}ds\le&C_{0}, {\bf e}nd{align*} for every $0<t<T$. {\bf e}nd{proof} To prove Theorem \ref{th:main-1}. The following technical lemma is needed \cite{Murat:Hneg}. \begin{lemma} \big\langlebel{lm:1} Let $\Omega_{\bf e}psga$ be a bounded open subset of $ \mathbb{R}^2$. Suppose that the sequence $\{\mathcal L_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$ of distributions is bounded in $W^{-1,\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\Omega_{\bf e}psga)$. Suppose also that \begin{equation*} \mathcal L_{n}=\mathcal L_{1,n}+\mathcal L_{2,n}, {\bf e}nd{equation*} where $\{\mathcal L_{1,n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$ lies in a compact subset of $H^{-1}_{loc}(\Omega_{\bf e}psga)$ and $\{\mathcal L_{2,n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$ lies in a bounded subset of $\mathcal{M}_{loc}(\Omega_{\bf e}psga)$. Then $\{\mathcal L_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$ lies in a compact subset of $H^{-1}_{loc}(\Omega_{\bf e}psga)$. {\bf e}nd{lemma} Moreover, we consider the following definition. \begin{definition} A pair of functions $({\bf e}ta, q)$ is called an entropy--entropy flux pair if\\ ${\bf e}ta :\mathbb{R}\to\mathbb{R}$ is a $C^2$ function and $q :\mathbb{R}\to\mathbb{R}$ is defined by \begin{equation*} q(u)=2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{u} \xi{\bf e}ta'(\xi) d\xi. {\bf e}nd{equation*} An entropy-entropy flux pair $({\bf e}ta,\, q)$ is called convex/compactly supported if, in addition, ${\bf e}ta$ is convex/compactly supported. {\bf e}nd{definition} We begin by proving the following result \begin{lemma}\big\langlebel{lm:259} Assume that {\bf e}qref{eq:uo-l2}, {\bf e}qref{eq:u0eps-1} and {\bf e}qref{eq:beta-eps-2} hold. Then for any compactly supported entropy--entropy flux pair $({\bf e}ta, \,q)$, there exist two sequences $\{\varepsilon_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}},\,\{\beta_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$, with $\varepsilon_n,\,\beta_n\to0$, and a limit function \begin{equation*} u\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T);L^2(\mathbb{R})), {\bf e}nd{equation*} such that \begin{align} \big\langlebel{eq:con-u-1} &u_{\varepsilon_{n},\,\beta_{n}}\to u {\bf q}uad \theta_{\bf e}psxtrm{in $L^p_{loc}((0,T)\times\mathbb{R})$, for each $1\le p<2$},\\ \big\langlebel{eq:u-dist12} &u {\bf q}uad \theta_{\bf e}psxtrm{is a distributional solution of {\bf e}qref{eq:BU}}. {\bf e}nd{align} {\bf e}nd{lemma} \begin{proof} Let us consider a compactly supported entropy--entropy flux pair $({\bf e}ta, q)$. Multiplying {\bf e}qref{eq:Ro-eps-beta} by ${\bf e}ta'(u_{\bf e}b)$, we have \begin{align*} {\bf p}artial_t{\bf e}ta(u_{\bf e}b) + {\bf p}artial_x q(u_{\bf e}b) =&\varepsilon {\bf e}ta'(u_{\bf e}b) {\bf p}artial_x xu_{\bf e}b -\beta{\bf p}artial_x xxu_{\bf e}b -\beta^2{\bf e}ta'(u_{\bf e}b){\bf p}artial_txxxxu_{\bf e}b \\ =& I_{1,\,\varepsilon,\,\beta}+I_{2,\,\varepsilon,\,\beta}+ I_{3,\,\varepsilon,\,\beta} + I_{4,\,\varepsilon,\,\beta}+I_{5,\,\varepsilon,\,\beta} + I_{6,\,\varepsilon,\,\beta}, {\bf e}nd{align*} where \begin{equation} \begin{split} \big\langlebel{eq:12000} I_{1,\,\varepsilon,\,\beta}&={\bf p}artial_x (\varepsilon{\bf e}ta'(u_{\bf e}b){\bf p}artial_x u_{\bf e}b),\\ I_{2,\,\varepsilon,\,\beta}&= -\varepsilon{\bf e}ta''(u_{\bf e}b)({\bf p}artial_x u_{\bf e}b)^2,\\ I_{3,\,\varepsilon,\,\beta}&= -{\bf p}artial_x (\beta{\bf e}ta'(u_{\bf e}b){\bf p}artial_x xu_{\bf e}b),\\ I_{4,\,\varepsilon,\,\beta}&= \beta{\bf e}ta''(u_{\bf e}b){\bf p}artial_x u_{\bf e}b{\bf p}artial_x xu_{\bf e}b,\\ I_{5,\,\varepsilon,\,\beta}&= -{\bf p}artial_x (\beta^2{\bf e}ta'(u_{\bf e}b){\bf p}artial_txxxu_{\bf e}b),\\ I_{6,\,\varepsilon,\,\beta}&= \beta^2{\bf e}ta''(u_{\bf e}b){\bf p}artial_x u_{\bf e}b{\bf p}artial_txxxu_{\bf e}b. {\bf e}nd{split} {\bf e}nd{equation} Fix $T>0$. Arguing in \cite[Lemma $3.2$]{Cd2}, we have that $I_{1,\,\varepsilon,\,\beta}\to0$ in $H^{-1}((0,T) \times\mathbb{R})$, and $\{I_{2,\,\varepsilon,\,\beta}\}_{\varepsilon,\beta >0}$ is bounded in $L^1((0,T)\times\mathbb{R})$. Arguing in \cite[Theorem $B.1$]{Cd6}, $I_{3,\,\varepsilon,\,\beta}\to0$ in $H^{-1}((0,T) \times\mathbb{R})$, and $I_{4,\,\varepsilon,\,\beta}\to0$ in $L^{1}((0,T) \times\mathbb{R})$, while arguing in \cite[Lemma $2.4$]{Cd6}, $I_{5,\,\varepsilon,\,\beta}\to0$ in $H^{-1}((0,T) \times\mathbb{R})$, and $\{I_{6,\,\varepsilon,\,\beta}\}_{\varepsilon,\beta >0}$ is bounded in $L^1((0,T)\times\mathbb{R})$. Therefore, {\bf e}qref{eq:con-u-1} follows from Lemmas \ref{lm:38}, \ref{lm:1} and the $L^p$ compensated compactness of \cite{SC}.\\ Arguing in \cite[Theorem $2.1$]{Cd5}, we have {\bf e}qref{eq:u-dist12}. {\bf e}nd{proof} Following \cite{LN}, we prove the following result \begin{lemma}\big\langlebel{lm:452} Assume that {\bf e}qref{eq:uo-l2}, {\bf e}qref{eq:u0eps-1} and {\bf e}qref{eq:beta-eps-4} hold. Then for any compactly supported entropy--entropy flux pair $({\bf e}ta, \,q)$, there exist two sequences $\{\varepsilon_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}},\,\{\beta_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$, with $\varepsilon_n,\,\beta_n\to0$, and a limit function \begin{equation*} u\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T);L^2(\mathbb{R})), {\bf e}nd{equation*} such that {\bf e}qref{eq:con-u-1} holds and \begin{equation} \big\langlebel{eq:u-entro-sol-12} u {\bf q}uad \theta_{\bf e}psxtrm{is the unique entropy solution of {\bf e}qref{eq:BU}}. {\bf e}nd{equation} {\bf e}nd{lemma} \begin{proof} Let us consider a compactly supported entropy-entropy flux pair $({\bf e}ta,\,q)$. Multiplying {\bf e}qref{eq:Ro-eps-beta} by ${\bf e}ta'(u_{\bf e}b)$, we have \begin{align*} {\bf p}artial_t{\bf e}ta(u_{\bf e}b) + {\bf p}artial_x q(u_{\bf e}b) =&\varepsilon {\bf e}ta'(u_{\bf e}b) {\bf p}artial_x xu_{\bf e}b-\beta{\bf p}artial_x xxu_{\bf e}b -\beta^2{\bf e}ta'(u_{\bf e}b){\bf p}artial_txxxxu_{\bf e}b \\ =& I_{1,\,\varepsilon,\,\beta}+I_{2,\,\varepsilon,\,\beta}+ I_{3,\,\varepsilon,\,\beta} + I_{4,\,\varepsilon,\,\beta}+ I_{5,\,\varepsilon,\,\beta} + I_{6,\,\varepsilon,\,\beta} {\bf e}nd{align*} where $I_{1,\,\varepsilon,\,\beta},\,I_{2,\,\varepsilon,\,\beta},\, I_{3,\,\varepsilon,\,\beta},\, I_{4,\,\varepsilon,\,\beta},\,I_{5,\,\varepsilon,\,\beta},\,I_{6,\,\varepsilon,\,\beta}$ are defined in {\bf e}qref{eq:12000}. As in Lemma \ref{lm:259}, we obtain that $I_{1,\,\varepsilon,\,\beta}\to 0$ in $H^{-1}((0,T)\times\mathbb{R})$, $\{I_{2,\,\varepsilon,\,\beta}\}_{\varepsilon,\beta>0}$ is bounded in $L^1((0,T)\times\mathbb{R})$, $I_{3,\,\varepsilon,\,\beta}\to 0$ in $H^{-1}((0,T)\times\mathbb{R})$, $I_{4,\,\varepsilon,\,\beta}\to 0$ in $L^1((0,T)\times\mathbb{R})$, $I_{5,\,\varepsilon,\,\beta}\to 0$ in $H^{-1}((0,T)\times\mathbb{R})$, while arguing in \cite[Lemma $2.4$]{Cd6}, $I_{6,\,\varepsilon,\,\beta}\to 0$ in $L^1((0,T)\times\mathbb{R})$ Arguing in \cite[Theorem $2.1$]{Cd5}, we have {\bf e}qref{eq:u-entro-sol-12}. {\bf e}nd{proof} \begin{proof}[Proof of Theorem \ref{th:main-1}] Theorem \ref{th:main-1} follows from Lemmas \ref{lm:259} and \ref{lm:452}. {\bf e}nd{proof} s_\varepsilonction{The Rosenau-KdV-equation. $u_0\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^2(\mathbb{R})\cap L^4(\mathbb{R})$.}\big\langlebel{sec:D1} In this section, we consider {\bf e}qref{eq:RKV33}, and assume {\bf e}qref{eq:uo-l4} on the initial datum. We consider the approximation {\bf e}qref{eq:Ro-eps-beta}, where $u_{\varepsilon,\beta,0}$ is a $C^\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty$ approximation of $u_{0}$ such that \begin{equation} \begin{split} \big\langlebel{eq:u0eps-14} &u_{\varepsilon,\,\beta,\,0} \to u_{0} {\bf q}uad \theta_{\bf e}psxtrm{in $L^{p}_{loc}(\mathbb{R})$, $1\le p < 2$, as $\varepsilon,\,\beta \to 0$,}\\ &\noindentorm{u_{\varepsilon,\beta, 0}}^4_{L^4(\mathbb{R})}+\noindentorm{u_{\varepsilon,\beta, 0}}^2_{L^2(\mathbb{R})}+\left(\beta^{\frac{1}{2}}+ \varepsilon^2\right) \noindentorm{{\bf p}artial_x u_{\varepsilon,\beta,0}}^2_{L^2(\mathbb{R})}\le C_0,{\bf q}uad \varepsilon,\beta >0,\\ &\left(\beta^2+\beta\varepsilon^2\right) \noindentorm{{\bf p}artial_x x u_{\varepsilon,\beta,0}}^2_{L^2(\mathbb{R})} +\left(\beta^{\frac{5}{2}}+\beta^2\varepsilon^2\right)\noindentorm{{\bf p}artial_x xx u_{\varepsilon,\beta,0}}^2_{L^2(\mathbb{R})}\le C_0,{\bf q}uad \varepsilon,\beta >0,\\ &\beta^4\noindentorm{{\bf p}artial_x xxx u_{\varepsilon,\beta,0}}^2_{L^2(\mathbb{R})}\le C_0,{\bf q}uad \varepsilon,\beta >0, {\bf e}nd{split} {\bf e}nd{equation} and $C_0$ is a constant independent on $\varepsilon$ and $\beta$. The main result of this section is the following theorem. \begin{theorem} \big\langlebel{th:main-13} Assume that {\bf e}qref{eq:uo-l4} and {\bf e}qref{eq:u0eps-14} hold. Fix $T>0$, if {\bf e}qref{eq:beta-eps-2} holds, there exist two sequences $\{\varepsilon_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$, $\{\beta_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$, with $\varepsilon_n, \beta_n \to 0$, and a limit function \begin{equation*} u\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T); L^2(\mathbb{R})\cap L^4(\mathbb{R})), {\bf e}nd{equation*} such that \begin{itemize} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$i)$] $u_{\varepsilon_n, \beta_n}\to u$ strongly in $L^{p}_{loc}(\mathbb{R}^{+}\times\mathbb{R})$, for each $1\le p <4$, \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$ii)$] $u$ is the unique entropy solution of {\bf e}qref{eq:BU}. {\bf e}nd{itemize} {\bf e}nd{theorem} Let us prove some a priori estimates on $u_{\bf e}b$, denoting with $C_0$ the constants which depend only on the initial data. \begin{lemma}\big\langlebel{lm:562} Fix $T>0$. Assume {\bf e}qref{eq:beta-eps-2} holds. There exists $C_0>0$, independent on $\varepsilon,\,\beta$ such that {\bf e}qref{eq:u-infty-3} holds. In particular, we have \begin{equation} \big\langlebel{eq:Z45} \begin{split} \beta\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}&+ \beta^3\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &+\frac{3\beta\varepsilon}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x xu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\le C_0, {\bf e}nd{split} {\bf e}nd{equation} for every $0<t<T$. Moreover, \begin{equation} \big\langlebel{eq:Z46} \noindentorm{{\bf p}artial_x u_{\bf e}b}_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T)\times\mathbb{R})}\le C_0\beta^{-\frac{3}{4}}. {\bf e}nd{equation} {\bf e}nd{lemma} \begin{remark} Observe that the proof of Lemma \ref{lm:562} is simpler than the one of Lemma \ref{lm:50}. Indeed, we only need to prove {\bf e}qref{eq:u-infty-3}. {\bf e}nd{remark} \begin{proof}[Proof of Lemma \ref{lm:562}.] Let $0<t<T$. Multiplying {\bf e}qref{eq:Ro-eps-beta} by $-\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b$, we have \begin{equation} \big\langlebel{eq:K23} \begin{split} -\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b{\bf p}artial_tu_{\bf e}b &- 2\beta^{\frac{1}{2}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_x xu_{\bf e}b\\ &+\beta^{\frac{3}{2}}{\bf p}artial_x xu_{\bf e}b{\bf p}artial_x xxu_{\bf e}b-\beta^{\frac{5}{2}}{\bf p}artial_txxxxu_{\bf e}b{\bf p}artial_x xu_{\bf e}b= -\beta^{\frac{1}{2}}\varepsilon({\bf p}artial_x xu_{\bf e}b)^2. {\bf e}nd{split} {\bf e}nd{equation} We note that \begin{equation*} \beta^{\frac{3}{2}}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}{\bf p}artial_x xu_{\bf e}b{\bf p}artial_x xxu_{\bf e}b dx =0. {\bf e}nd{equation*} Therefore, arguing as \cite[Lemma $3.1$]{Cd6}, we have {\bf e}qref{eq:u-infty-3}, {\bf e}qref{eq:Z45} and {\bf e}qref{eq:Z46}. {\bf e}nd{proof} Following \cite[Lemma $2.2$]{Cd}, or \cite[Lemma $4.2$]{CK}, we prove the following result. \begin{lemma}\big\langlebel{lm:t3} Fix $T>0$. Assume {\bf e}qref{eq:beta-eps-2} holds. Then: \begin{itemize} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$i)$] the family $\{u_{\bf e}b\}_{\varepsilon,\,\beta}$ is bounded in $L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T);L^{4}(\mathbb{R}))$; \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$ii)$] the families $\{\varepsilon{\bf p}artial_x u_{\bf e}b\}_{\varepsilon,\,\beta},\,\{\beta^{\frac{1}{2}}\varepsilon{\bf p}artial_x xu_{\bf e}b\}_{\varepsilon,\beta},\, \{\beta{\bf p}artial_x xu_{\bf e}b\}_{\varepsilon,\beta},$\\ $\{\beta\varepsilon{\bf p}artial_x xxu_{\bf e}b\}_{\varepsilon,\beta},\, \{\beta{\bf p}artial_x xxxu_{\bf e}b\}_{\varepsilon,\beta} $are bounded in $L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T);L^{2}(\mathbb{R}))$; \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$iii)$] the families $\{\beta^{\frac{1}{2}}\varepsilon^{\frac{1}{2}}{\bf p}artial_txu_{\bf e}b\}_{\varepsilon,\,\beta},\,\{\varepsilon^{\frac{1}{2}}{\bf p}artial_tu_{\bf e}b\}_{\varepsilon,\,\beta},\, \{\beta^{\frac{3}{2}}\varepsilon^{\frac{1}{2}}{\bf p}artial_txxxu_{\bf e}b\}_{\varepsilon,\,\beta},$\\ $\{\beta\varepsilon^{\frac{1}{2}}{\bf p}artial_txxu_{\bf e}b\}_{\varepsilon,\,\beta},\,\{\varepsilon^{\frac{1}{2}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b\}_{\varepsilon,\,\beta} \{\varepsilon^{\frac{3}{2}}{\bf p}artial_x xu_{\bf e}b\}_{\varepsilon,\,\beta},\, \{\beta\varepsilon^{\frac{1}{2}}{\bf p}artial_x xxu_{\bf e}b\}_{\varepsilon,\,\beta},\,$ are bounded in $L^{2}((0,T)\times\mathbb{R})$; {\bf e}nd{itemize} {\bf e}nd{lemma} \begin{proof} Let $0<t<T$. Let $A,\,B,\,C,\,E$ be some positive constants which will be specified later. Multiplying {\bf e}qref{eq:Ro-eps-beta} by \begin{equation*} u_{\bf e}b^3 -A\varepsilon^2{\bf p}artial_x xu_{\bf e}b -B\beta\varepsilon{\bf p}artial_txxu_{\bf e}b +C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b, {\bf e}nd{equation*} we have \begin{equation} \big\langlebel{eq:P1} \begin{split} &\left(u_{\bf e}b^3 -A\varepsilon^2{\bf p}artial_x xu_{\bf e}b -B\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_tu_{\bf e}b\\ &{\bf q}quad{\bf q}uad +\left(C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b\right){\bf p}artial_tu_{\bf e}b\\ &{\bf q}quad{\bf q}uad+2\left(u_{\bf e}b^3 -A\varepsilon^2{\bf p}artial_x xu_{\bf e}b -B\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right)u_{\bf e}b{\bf p}artial_x u_{\bf e}b\\ &{\bf q}quad{\bf q}uad +2\left(C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b\right)u_{\bf e}b{\bf p}artial_x u_{\bf e}b\\ &{\bf q}quad{\bf q}uad +\beta\left(u_{\bf e}b^3 -A\varepsilon^2{\bf p}artial_x xu_{\bf e}b -B\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_x xxu_{\bf e}b\\ &{\bf q}quad{\bf q}uad +\beta\left(C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b\right){\bf p}artial_x xxu_{\bf e}b\\ &{\bf q}quad{\bf q}uad +\beta^2 \left(u_{\bf e}b^3 -A\varepsilon^2{\bf p}artial_x xu_{\bf e}b -B\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_txxxxu_{\bf e}b\\ &{\bf q}quad{\bf q}uad +\beta^2\left(C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b\right){\bf p}artial_txxxxu_{\bf e}b\\ &{\bf q}quad= \varepsilon\left(u_{\bf e}b^3 -A\varepsilon^2{\bf p}artial_x xu_{\bf e}b -B\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_x xu_{\bf e}b\\ &{\bf q}quad{\bf q}uad +\varepsilon \left(C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b\right){\bf p}artial_x xu_{\bf e}b. {\bf e}nd{split} {\bf e}nd{equation} Since \begin{align*} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(u_{\bf e}b^3 -A\varepsilon^2{\bf p}artial_x xu_{\bf e}b -B\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_tu_{\bf e}b dx\\ =&\frac{1}{4}\frac{d}{dt}\noindentorm{u_{\bf e}b(t,\cdot)}^4_{L^4(\mathbb{R})} + \frac{A\varepsilon^2}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} \\ & +B\beta\varepsilon\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b\right){\bf p}artial_tu_{\bf e}b dx\\ =& C\varepsilon\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{E\beta^2}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ 2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(u_{\bf e}b^3 -A\varepsilon^2{\bf p}artial_x xu_{\bf e}b -B\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right)u_{\bf e}b{\bf p}artial_x u_{\bf e}b dx \\ =& -2A\varepsilon^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_x xu_{\bf e}b dx -2B\beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_txxu_{\bf e}b dx,\\ 2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b\right)u_{\bf e}b{\bf p}artial_x u_{\bf e}b dx\\ =& 2C\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_tu_{\bf e}b dx -2E\beta^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}({\bf p}artial_x u_{\bf e}b)^2{\bf p}artial_x xxu_{\bf e}b dx\\ &-2E\beta^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x xu_{\bf e}b{\bf p}artial_x xxu_{\bf e}b dx,\\ -2E\beta^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&({\bf p}artial_x u_{\bf e}b)^2{\bf p}artial_x xxu_{\bf e}b dx-2E\beta^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x xu_{\bf e}b{\bf p}artial_x xxu_{\bf e}b dx\\ =&5E\beta^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}({\bf p}artial_x xu_{\bf e}b)^2{\bf p}artial_x u_{\bf e}b dx=-\frac{5E\beta^2}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}({\bf p}artial_x u_{\bf e}b)^2{\bf p}artial_x xxu_{\bf e}b dx,\\ 2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b\right)u_{\bf e}b{\bf p}artial_x u_{\bf e}b dx\\ =& 2C\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_tu_{\bf e}b dx-\frac{5E\beta^2}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}({\bf p}artial_x u_{\bf e}b)^2{\bf p}artial_x xxu_{\bf e}b dx,\\ \beta\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(u_{\bf e}b^3 -A\varepsilon^2{\bf p}artial_x xu_{\bf e}b -B\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_x xxu_{\bf e}b dx\\ =& -3\beta\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2{\bf p}artial_x u_{\bf e}b{\bf p}artial_x xu_{\bf e}b dx - B\beta^2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}{\bf p}artial_txxu_{\bf e}b{\bf p}artial_x xxu_{\bf e}b dx,\\ \beta\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b\right){\bf p}artial_x xxu_{\bf e}b=C\beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}{\bf p}artial_txxu_{\bf e}b{\bf p}artial_x u_{\bf e}b dx,\\ \beta^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(u_{\bf e}b^3 -A\varepsilon^2{\bf p}artial_x xu_{\bf e}b -B\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_txxxxu_{\bf e}b dx\\ =& -3\beta^2 \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2{\bf p}artial_x u_{\bf e}b {\bf p}artial_txxxu_{\bf e}b dx +\frac{A\beta^2\varepsilon^2}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &+B\beta^3\varepsilon\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}, \\ \beta^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b\right){\bf p}artial_txxxxu_{\bf e}b dx \\ =& C\beta^2\varepsilon\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +\frac{E\beta^4}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ \varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}&\left(u_{\bf e}b^3 -A\varepsilon^2{\bf p}artial_x xu_{\bf e}b -B\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_x xu_{\bf e}b dx\\ =& -3\varepsilon\noindentorm{u_{\bf e}b(t,\cdot){\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} -A \varepsilon^3\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ & -\frac{B\beta\varepsilon^2}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ \varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}& \left(C\varepsilon{\bf p}artial_tu_{\bf e}b +E\beta^2{\bf p}artial_x xxxu_{\bf e}b\right){\bf p}artial_x xu_{\bf e}b dx\\ =& -\frac{C\varepsilon^2}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} -E\beta^2\varepsilon\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{align*} an integration on $\mathbb{R}$ of {\bf e}qref{eq:P1} gives \begin{equation} \big\langlebel{eq:P9} \begin{split} &\frac{d}{dt}\left( \frac{1}{4}\noindentorm{u_{\bf e}b(t,\cdot)}^4_{L^4(\mathbb{R})}+ \frac{\left(A+C\right)\varepsilon^2}{2}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} \right)\\ &{\bf q}quad{\bf q}uad+\frac{d}{dt}\left(\frac{A\beta^2\varepsilon^2}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{E\beta^4}{2}\noindentorm{{\bf p}artial_x xxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\right)\\ &{\bf q}quad{\bf q}uad +\frac{B\beta\varepsilon^2+E\beta^2}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +B\beta\varepsilon\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +C\varepsilon\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+B\beta^3\varepsilon\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad+C\beta^2\varepsilon\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +3\varepsilon\noindentorm{u_{\bf e}b(t,\cdot){\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +A \varepsilon^3\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+E\beta^2\varepsilon\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad= 2A\varepsilon^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_x xu_{\bf e}b dx +2B\beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_txxu_{\bf e}b dx\\ &{\bf q}quad{\bf q}uad +2C\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_tu_{\bf e}b dx+\frac{5E\beta^2}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}({\bf p}artial_x u_{\bf e}b)^2{\bf p}artial_x xxu_{\bf e}b dx\\ &{\bf q}quad{\bf q}uad +3\beta\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2{\bf p}artial_x u_{\bf e}b{\bf p}artial_x xu_{\bf e}b dx + B\beta^2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}{\bf p}artial_txxu_{\bf e}b{\bf p}artial_x xxu_{\bf e}b dx\\ &{\bf q}quad{\bf q}uad -C\beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}{\bf p}artial_txxu_{\bf e}b{\bf p}artial_x u_{\bf e}b dx +3\beta^2 \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2{\bf p}artial_x u_{\bf e}b {\bf p}artial_txxxu_{\bf e}b dx. {\bf e}nd{split} {\bf e}nd{equation} Due to the Young inequality, \begin{align*} &2A\varepsilon^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}v_\varepsilonrtu_{\bf e}b{\bf p}artial_x u_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_x xu_{\bf e}bv_\varepsilonrt dx = \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrt \varepsilon^{\frac{1}{2}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b\rightv_\varepsilonrt\leftv_\varepsilonrt 2A \varepsilon^{\frac{3}{2}}{\bf p}artial_x xu_{\bf e}b\rightv_\varepsilonrt dx\\ &{\bf q}quad\le \frac{\varepsilon}{2}\noindentorm{u_{\bf e}b(t,\cdot){\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + 2A^2\varepsilon^3\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ &2B\beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_txxu_{\bf e}b dx = \varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrtu_{\bf e}b{\bf p}artial_x u_{\bf e}b\rightv_\varepsilonrt\leftv_\varepsilonrt 2B\beta{\bf p}artial_txxu_{\bf e}b\rightv_\varepsilonrt dx\\ &{\bf q}quad \frac{\varepsilon}{2}\noindentorm{u_{\bf e}b(t,\cdot){\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + 4B^2\beta^2\varepsilon\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ &2C\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}v_\varepsilonrtu_{\bf e}b{\bf p}artial_x u_{\bf e}bv_\varepsilonrt{\bf p}artial_tu_{\bf e}b dx= \varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrtu_{\bf e}b{\bf p}artial_x u_{\bf e}b\rightv_\varepsilonrt\leftv_\varepsilonrt 2C{\bf p}artial_tu_{\bf e}b\rightv_\varepsilonrt dx\\ &{\bf q}quad \le \frac{\varepsilon}{2}\noindentorm{u_{\bf e}b(t,\cdot){\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +2C^2\varepsilon\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ &B\beta^2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}v_\varepsilonrt{\bf p}artial_txxu_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_x xxu_{\bf e}bv_\varepsilonrt dx= \beta^2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrt 2B{\bf p}artial_txxu_{\bf e}b \rightv_\varepsilonrt\leftv_\varepsilonrt\frac{{\bf p}artial_x xxu_{\bf e}b}{2}\rightv_\varepsilonrt dx\\ &{\bf q}quad \le 4B^2\beta^2\varepsilon\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +\frac{\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ &C\beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}v_\varepsilonrt{\bf p}artial_txxu_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_x u_{\bf e}bv_\varepsilonrt dx = C\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrt\beta{\bf p}artial_txxu_{\bf e}b\rightv_\varepsilonrt\leftv_\varepsilonrt{\bf p}artial_x u_{\bf e}b\rightv_\varepsilonrt dx\\ &{\bf q}quad\le \frac{C\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\frac{C\varepsilon}{2}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{align*} Therefore, from {\bf e}qref{eq:P9}, we have \begin{equation} \big\langlebel{eq:P11} \begin{split} &\frac{d}{dt}\left( \frac{1}{4}\noindentorm{u_{\bf e}b(t,\cdot)}^4_{L^4(\mathbb{R})}+ \frac{\left(A+C\right)\varepsilon^2}{2}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} \right)\\ &{\bf q}quad{\bf q}uad+\frac{d}{dt}\left(\frac{A\beta^2\varepsilon^2}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{E\beta^4}{2}\noindentorm{{\bf p}artial_x xxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\right)\\ &{\bf q}quad{\bf q}uad +\frac{B\beta\varepsilon^2+E\beta^2}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +B\beta\varepsilon\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\left(1-2C\right)C\varepsilon\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +B\beta^3\varepsilon\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\left(\frac{C}{2} -8B^2\right)\beta^2\varepsilon\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\frac{3\varepsilon}{2}\noindentorm{u_{\bf e}b(t,\cdot){\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\left(A -2A^2\right)\varepsilon^3\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\left(E-\frac{1}{2}\right)\beta^2\varepsilon\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad \le \frac{5E\beta^2}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}({\bf p}artial_x u_{\bf e}b)^2v_\varepsilonrt{\bf p}artial_x xxu_{\bf e}bv_\varepsilonrt dx + 3\beta\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2v_\varepsilonrt{\bf p}artial_x u_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_x xu_{\bf e}bv_\varepsilonrt dx\\ &{\bf q}quad{\bf q}uad \le 3\beta^2 \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2v_\varepsilonrt{\bf p}artial_x u_{\bf e}bv_\varepsilonrt v_\varepsilonrt{\bf p}artial_txxxu_{\bf e}bv_\varepsilonrt dx + \frac{C\varepsilon}{2}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{split} {\bf e}nd{equation} From {\bf e}qref{eq:beta-eps-2}, we get \begin{equation} \big\langlebel{eq:P12} \beta\le D^2\varepsilon^4, {\bf e}nd{equation} where $D$ is a positive constant that which will be specified later. It follows from {\bf e}qref{eq:Z46}, {\bf e}qref{eq:P12} and, the Young inequality that \begin{align*} &\frac{5E\beta^2}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}({\bf p}artial_x u_{\bf e}b)^2v_\varepsilonrt{\bf p}artial_x xxu_{\bf e}bv_\varepsilonrt dx=E\beta^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\frac{5}{2\varepsilon^{\frac{1}{2}}}({\bf p}artial_x u_{\bf e}b)^2\leftv_\varepsilonrt\varepsilon^{\frac{1}{2}}{\bf p}artial_x xxu_{\bf e}b\rightv_\varepsilonrt dx\\ &{\bf q}quad\le \frac{25E\beta^2}{8}{\varepsilon}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}({\bf p}artial_x u_{\bf e}b)^4 dx + \frac{E\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad\le \frac{25E\beta^2}{8\varepsilon}\noindentorm{{\bf p}artial_x u_{\bf e}b}^2_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T)\times\mathbb{R})}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad + \frac{E\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad\le \frac{C_{0}\beta^{\frac{1}{2}}}{\varepsilon}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{E\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad\le C_0D\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{E\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ &3\beta\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2v_\varepsilonrt{\bf p}artial_x u_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_x xu_{\bf e}bv_\varepsilonrt dx \le 3\beta\noindentorm{u_{\bf e}b}^2_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T)\times\mathbb{R})}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}v_\varepsilonrt{\bf p}artial_x u_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_x xu_{\bf e}bv_\varepsilonrt dx\\ &{\bf q}quad \le 3C_{0}D\varepsilon^2\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}v_\varepsilonrt{\bf p}artial_x u_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_x xu_{\bf e}bv_\varepsilonrt dx = 3\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrt\varepsilon^{\frac{1}{2}}{\bf p}artial_x u_{\bf e}b\rightv_\varepsilonrt \leftv_\varepsilonrt C_{0}D\varepsilon^{\frac{3}{2}}{\bf p}artial_x xu_{\bf e}b\rightv_\varepsilonrt dx\\ &{\bf q}quad \le \frac{3\varepsilon}{2}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + C^2_{0}D^2\varepsilon^3\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ &3\beta^2 \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2v_\varepsilonrt{\bf p}artial_x u_{\bf e}bv_\varepsilonrt v_\varepsilonrt{\bf p}artial_txxxu_{\bf e}bv_\varepsilonrt dx= \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrt\frac{3\beta^{\frac{1}{2}}u_{\bf e}b^2{\bf p}artial_x u_{\bf e}b}{\sqrt{B}\varepsilon^{\frac{1}{2}}}\rightv_\varepsilonrt\leftv_\varepsilonrt\sqrt{B}\beta^{\frac{3}{2}}\varepsilon^{\frac{1}{2}}{\bf p}artial_txxxu_{\bf e}b\rightv_\varepsilonrt dx\\ &{\bf q}quad\le \frac{3\beta}{2B\varepsilon}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^4({\bf p}artial_x u_{\bf e}b)^2 dx + \frac{B\beta^3\varepsilon}{2}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad\le \frac{3\beta}{2B\varepsilon}\noindentorm{u_{\bf e}b}^2_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}((0,T)\times\mathbb{R})}\noindentorm{u_{\bf e}b(t,\cdot){\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad + \frac{B\beta^3\varepsilon}{2}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad\le \frac{C_{0}D\varepsilon}{B}\noindentorm{u_{\bf e}b(t,\cdot){\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{B\beta^3\varepsilon}{2}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{align*} Then, it follows from {\bf e}qref{eq:P11} that \begin{equation} \big\langlebel{eq:P15} \begin{split} &\frac{d}{dt}\left( \frac{1}{4}\noindentorm{u_{\bf e}b(t,\cdot)}^4_{L^4(\mathbb{R})}+ \frac{\left(A+C\right)\varepsilon^2}{2}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} \right)\\ &{\bf q}quad{\bf q}uad+\frac{d}{dt}\left(\frac{A\beta^2\varepsilon^2}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \frac{E\beta^4}{2}\noindentorm{{\bf p}artial_x xxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\right)\\ &{\bf q}quad{\bf q}uad +\frac{B\beta\varepsilon^2+E\beta^2}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +B\beta\varepsilon\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\left(1-2C\right)C\varepsilon\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +\frac{B\beta^3\varepsilon}{2}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\left(\frac{C}{2} -8B^2\right)\beta^2\varepsilon\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\left(E-1\right)\frac{\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\left(A -2A^2-C^2_0D^2\right)\varepsilon^3\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad+\left(\frac{3}{2}-\frac{C_0 D}{B}\right)\varepsilon\noindentorm{u_{\bf e}b(t,\cdot){\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} \le C_{0}\varepsilon\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{split} {\bf e}nd{equation} We search $A,\,B,\,C,\,E$ such that \begin{equation*} \begin{cases} \displaystyle 1-2C >0,\\ \displaystyle \frac{C}{2} -8B^2 >0,\\ \displaystyle E-1>0,\\ \displaystyle A -2A^2-C^2_0D^2>0,\\ \displaystyle \frac{3}{2}-\frac{C_0 D}{B}>0, {\bf e}nd{cases} {\bf e}nd{equation*} that is \begin{equation} \big\langlebel{eq:P16} \begin{cases} \displaystyle C <\frac{1}{2},\\ \displaystyle B^2 < \frac{C}{16},\\ \displaystyle E>1,\\ \displaystyle 2A^2 -A +C^2_0D^2<0,\\ \displaystyle D<\frac{3B}{2C_{0}}. {\bf e}nd{cases} {\bf e}nd{equation} We choose \begin{equation} \big\langlebel{eq:P17} C=\frac{1}{4},{\bf q}uad E=2. {\bf e}nd{equation} It follows from the second inequality of {\bf e}qref{eq:P16}, and {\bf e}qref{eq:P17} that \begin{equation*} B<\frac{1}{8}. {\bf e}nd{equation*} Hence, we can choose \begin{equation} \big\langlebel{eq:P18} B=\frac{1}{9}. {\bf e}nd{equation} Substituting {\bf e}qref{eq:P18} in the fifth inequality of {\bf e}qref{eq:P16}, we have \begin{equation} \big\langlebel{eq:P19} D<\frac{1}{6C_0}. {\bf e}nd{equation} The fourth inequality admits solution when \begin{equation} \big\langlebel{eq:P20} D<\frac{2\sqrt{2}}{8C_0}. {\bf e}nd{equation} It follows from {\bf e}qref{eq:P19} and {\bf e}qref{eq:P20} that \begin{equation} \big\langlebel{eq:P21} D< \min\left\{\frac{1}{6C_0}, \frac{2\sqrt{2}}{8C_0}\right\}=\frac{1}{6C_0}. {\bf e}nd{equation} Therefore, from {\bf e}qref{eq:P16} and {\bf e}qref{eq:P21}, there exist $0<A_1<A_2$ such that \begin{equation} \big\langlebel{eq:w1} {\bf q}uad 0<A_1<A<A_2. {\bf e}nd{equation} Substituting {\bf e}qref{eq:P17}, {\bf e}qref{eq:P18}, and {\bf e}qref{eq:P21} in {\bf e}qref{eq:P15}, from {\bf e}qref{eq:w1}, we get \begin{align*} &\frac{d}{dt}\left( \frac{1}{4}\noindentorm{u_{\bf e}b(t,\cdot)}^4_{L^4(\mathbb{R})}+ \frac{\left(4A+1\right)\varepsilon^2}{8}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} \right)\\ &{\bf q}quad{\bf q}uad+\frac{d}{dt}\left(\frac{A\beta^2\varepsilon^2}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \beta^4\noindentorm{{\bf p}artial_x xxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\right)\\ &{\bf q}quad{\bf q}uad +\frac{\beta\varepsilon^2+18\beta^2}{18}\frac{d}{dt}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +\frac{\beta\varepsilon}{9}\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\frac{\varepsilon}{8}\noindentorm{{\bf p}artial_tu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +\frac{\beta^3\varepsilon}{18}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\frac{73\beta^2\varepsilon}{648}\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\frac{\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +K_2\varepsilon^3\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+K_2\varepsilon\noindentorm{u_{\bf e}b(t,\cdot){\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad\le C_{0}\varepsilon\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}, {\bf e}nd{align*} for some $K_1,\,K_2>0$. An integration on $(0,t)$, {\bf e}qref{eq:l-2-u1}, and {\bf e}qref{eq:u0eps-14} give \begin{align*} &\frac{1}{4}\noindentorm{u_{\bf e}b(t,\cdot)}^4_{L^4(\mathbb{R})}+ \frac{\left(4A+1\right)\varepsilon^2}{8}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad+\frac{A\beta^2\varepsilon^2}{2}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+ \beta^4\noindentorm{{\bf p}artial_x xxxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad +\frac{\beta\varepsilon^2+18\beta^2}{18}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +\frac{\beta\varepsilon}{9}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad{\bf q}uad +\frac{\varepsilon}{8}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_tu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds +\frac{\beta^3\varepsilon}{18}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad{\bf q}uad +\frac{73\beta^2\varepsilon}{648}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds+\frac{\beta^2\varepsilon}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad{\bf q}uad +K_2\varepsilon^3\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}ds+K_2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{u_{\bf e}b(s,\cdot){\bf p}artial_x u_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad\le C_{0} +C_{0}\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x u_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds \le C_0. {\bf e}nd{align*} Hence, \begin{align*} \noindentorm{u_{\bf e}b(t,\cdot)}_{L^4(\mathbb{R})}\le & C_0,\\ \varepsilon\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}_{L^2(\mathbb{R})}\le &C_0,\\ \beta\varepsilon\noindentorm{{\bf p}artial_x xxu_{\bf e}b(t,\cdot)}_{L^2(\mathbb{R})}\le &C_0,\\ \beta^2\noindentorm{{\bf p}artial_x xxxu_{\bf e}b(t,\cdot)}_{L^2(\mathbb{R})}\le &C_0,\\ \beta^{\frac{1}{2}}\varepsilon\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}_{L^2(\mathbb{R})}\le &C_0,\\ \beta\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}_{L^2(\mathbb{R})}\le &C_0,\\ \beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\le &C_0,\\ \varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_tu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\le &C_0,\\ \beta^3\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txxxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\le &C_0,\\ \beta^2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\le &C_0,\\ \beta^2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x xxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\le &C_0,\\ \varepsilon^3\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}ds\le &C_0,\\ \varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{u_{\bf e}b(s,\cdot){\bf p}artial_x u_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\le &C_0, {\bf e}nd{align*} for every $0<t<T$. {\bf e}nd{proof} We are ready for the proof of Theorem \ref{th:main-13}. \begin{proof}[Proof of Theorem \ref{th:main-13}.] Let us consider a compactly supported entropy--entropy flux pair $({\bf e}ta,\,q)$. Multiplying {\bf e}qref{eq:Ro-eps-beta} by ${\bf e}ta'(u_{\bf e}b)$, we have \begin{align*} {\bf p}artial_t{\bf e}ta(u_{\bf e}b) + {\bf p}artial_x q(u_{\bf e}b) =&\varepsilon {\bf e}ta'(u_{\bf e}b) {\bf p}artial_x xu_{\bf e}b-\beta{\bf p}artial_x xxu_{\bf e}b -\beta^2{\bf e}ta'(u_{\bf e}b){\bf p}artial_txxxxu_{\bf e}b \\ =& I_{1,\,\varepsilon,\,\beta}+I_{2,\,\varepsilon,\,\beta}+ I_{3,\,\varepsilon,\,\beta} + I_{4,\,\varepsilon,\,\beta}+ I_{5,\,\varepsilon,\,\beta} + I_{6,\,\varepsilon,\,\beta} {\bf e}nd{align*} where $I_{1,\,\varepsilon,\,\beta},\,I_{2,\,\varepsilon,\,\beta},\, I_{3,\,\varepsilon,\,\beta},\, I_{4,\,\varepsilon,\,\beta},\,I_{5,\,\varepsilon,\,\beta},\,I_{6,\,\varepsilon,\,\beta}$ are defined in {\bf e}qref{eq:12000}. As in \cite[Theorem $3.1$]{Cd6}, we obtain that $I_{1,\,\varepsilon,\,\beta}\to 0$ in $H^{-1}((0,T)\times\mathbb{R})$, $\{I_{2,\,\varepsilon,\,\beta}\}_{\varepsilon,\beta>0}$ is bounded in $L^1((0,T)\times\mathbb{R})$, $I_{4,\,\varepsilon,\,\beta}\to 0$ in $H^{-1}((0,T)\times\mathbb{R})$, $I_{5,\,\varepsilon,\,\beta}\to 0$ in $L^1((0,T)\times\mathbb{R})$, while as in \cite[Theorem $2.1$]{Cd5} $I_{3,\,\varepsilon,\,\beta}\to 0$ in $H^{-1}((0,T)\times\mathbb{R})$, and, $I_{4,\,\varepsilon,\,\beta}\to 0$ in $L^1((0,T)\times\mathbb{R})$ Arguing in \cite[Theorem $2.1$]{Cd5}, we have {\bf e}qref{eq:u-entro-sol-12}. {\bf e}nd{proof} \appendix s_\varepsilonction{The Benjamin-Bona-Mahony equation}\big\langlebel{appen1} In this appendix, we consider The Benjamin-Bona-Mahony equation \begin{equation} \big\langlebel{eq:BBM} {\bf p}artial_t u +u{\bf p}artial_x u -\beta{\bf p}artial_txx u =0. {\bf e}nd{equation} We augment {\bf e}qref{eq:BBM} with the initial condition \begin{equation} u(0,x)=u_{0}(x), {\bf e}nd{equation} on which we assume {\bf e}qref{eq:uo-l2} We study the dispersion-diffusion limit for {\bf e}qref{eq:BBM}. Therefore, we fix two small numbers $\varepsilon,\,\beta$ and consider the following third order problem \begin{equation} \big\langlebel{eq:AB2} \begin{cases} {\bf p}artial_tu_{\bf e}b+ u_{\bf e}b{\bf p}artial_x u_{\bf e}b -\beta{\bf p}artial_txxu_{\bf e}b =\varepsilon{\bf p}artial_x xu_{\bf e}b, &{\bf q}quad t>0, \ x\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{R} ,\\ u_{\bf e}b(0,x)=u_{\varepsilon,\beta,0}(x), &{\bf q}quad x\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{R}, {\bf e}nd{cases} {\bf e}nd{equation} where $u_{\varepsilon,\beta,0}$ is a $C^\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty$ approximation of $u_{0}$ such that \begin{equation} \begin{split} \big\langlebel{eq:AB3} &u_{\varepsilon,\,\beta,\,0} \to u_{0} {\bf q}uad \theta_{\bf e}psxtrm{in $L^{p}_{loc}(\mathbb{R})$, $1\le p < 2$, as $\varepsilon,\,\beta \to 0$,}\\ &\noindentorm{u_{\varepsilon,\beta, 0}}^2_{L^2(\mathbb{R})}+\left(\beta+\beta^{\frac{1}{2}}\right)\noindentorm{{\bf p}artial_x u_{\varepsilon,\beta,0}}^2_{L^2(\mathbb{R})}\le C_0,{\bf q}uad \varepsilon,\beta >0, \\ &\left(\beta^{\frac{3}{2}}+\beta\varepsilon^2\right)\noindentorm{{\bf p}artial_x x u_{\varepsilon,\beta,0}}^2_{L^2(\mathbb{R})}\le C_0,{\bf q}uad \varepsilon,\beta >0, {\bf e}nd{split} {\bf e}nd{equation} and $C_0$ is a constant independent on $\varepsilon$ and $\beta$. The main result of this section is the following theorem. \begin{theorem} \big\langlebel{th:main-A2} Assume that {\bf e}qref{eq:uo-l2} and {\bf e}qref{eq:AB3} hold. If {\bf e}qref{eq:beta-eps-2} holds, then, there exist two sequences $\{\varepsilon_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$, $\{\beta_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$, with $\varepsilon_n, \beta_n \to 0$, and a limit function \begin{equation*} u\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R}^{+}; L^2(\mathbb{R})), {\bf e}nd{equation*} such that \begin{itemize} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$i)$] $u_{\varepsilon_n, \beta_n}\to u$ strongly in $L^{p}_{loc}(\mathbb{R}^{+}\times\mathbb{R})$, for each $1\le p <2$, \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$ii)$] $u$ a distributional solution of {\bf e}qref{eq:BU}. {\bf e}nd{itemize} Moreover, if {\bf e}qref{eq:beta-eps-4} holds \begin{itemize} \ifmmode\mathit{\mathchar"7010 }\else\char"10 \fitem[$iii)$] $u$ is the unique entropy solution of {\bf e}qref{eq:BU}. {\bf e}nd{itemize} {\bf e}nd{theorem} Let us prove some a priori estimates on $u_{\bf e}b$, denoting with $C_0$ the constants which depend only on the initial data. Arguing as \cite{SC}, we have the following result \begin{lemma}\big\langlebel{lm:N23} For each $t>0$, \begin{equation} \big\langlebel{eq:AB31} \noindentorm{u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \beta\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + 2\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} \le C_0. {\bf e}nd{equation} Moreover, \begin{equation} \big\langlebel{eq:AB2*} \noindentorm{u_{\bf e}b(t,\cdot)}_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R})}\le C_{0}\beta^{-\frac{1}{4}}. {\bf e}nd{equation} {\bf e}nd{lemma} \begin{lemma} Assume {\bf e}qref{eq:beta-eps-2}. For each $t>0$, \begin{equation} \big\langlebel{eq:BC3} \begin{split} &\beta\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\frac{2\beta^2 + \beta^{\frac{3}{2}}\varepsilon^2}{2} \noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad + \frac{3\beta\varepsilon}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x xu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})} +\frac{\beta^{\frac{5}{2}}\varepsilon}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad{\bf q}uad +\beta^{\frac{3}{2}}\varepsilon\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\le C_0. {\bf e}nd{split} {\bf e}nd{equation} {\bf e}nd{lemma} \begin{proof} Let $t>0$. Multiplying {\bf e}qref{eq:AB2} by $-2\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b-\beta\varepsilon{\bf p}artial_txxu_{\bf e}b$, we have \begin{equation} \big\langlebel{eq:AB3*} \begin{split} \left(-2\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b-\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_tu_{\bf e}b&+ \left(-2\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b-\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right)u_{\bf e}b{\bf p}artial_x u_{\bf e}b\\ &-\beta\left(-2\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b-\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_txxu_{\bf e}b\\ =&\varepsilon\left(-2\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b-\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_x xu_{\bf e}b. {\bf e}nd{split} {\bf e}nd{equation} Since \begin{align*} &\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}} \left(-2\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b-\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_tu_{\bf e}b dx\\ &{\bf q}quad =\beta^{\frac{1}{2}}\frac{d}{dt}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +\beta\varepsilon\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ &-\beta\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\left(-2\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b-\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_txxu_{\bf e}b dx \\ &{\bf q}quad=\beta^{\frac{3}{2}}\frac{d}{dt}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\beta^2\varepsilon\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})},\\ &\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\left(-2\beta^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b-\beta\varepsilon{\bf p}artial_txxu_{\bf e}b\right){\bf p}artial_x xu_{\bf e}b dx\\ &{\bf q}quad = -2\beta^{\frac{1}{2}}\varepsilon\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}-\frac{\beta\varepsilon^2}{2}\frac{d}{dt}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}, {\bf e}nd{align*} integrating {\bf e}qref{eq:AB3*} on $\mathbb{R}$, we get \begin{equation} \big\langlebel{eq:AB4} \begin{split} &\frac{d}{dt}\left(\beta^{\frac{1}{2}}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\frac{2\beta^{\frac{3}{2}} + \beta\varepsilon^2}{2} \noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\right)\\ &{\bf q}quad +2\beta^{\frac{1}{2}}\varepsilon\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +\beta^2\varepsilon\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad +\beta\varepsilon\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad= 2\beta^{\frac{1}{2}}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_x xu_{\bf e}b dx -\beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b{\bf p}artial_x u_{\bf e}b{\bf p}artial_txxu_{\bf e}b dx. {\bf e}nd{split} {\bf e}nd{equation} Due to {\bf e}qref{eq:beta-eps-2}, {\bf e}qref{eq:AB2*}, and the Young inequality, \begin{equation} \big\langlebel{eq:AB5} \begin{split} &2\beta^{\frac{1}{2}}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}v_\varepsilonrtu_{\bf e}b{\bf p}artial_x u_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_x xu_{\bf e}bv_\varepsilonrt dx =\beta^{\frac{1}{2}}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrt\frac{2u_{\bf e}b{\bf p}artial_x u_{\bf e}b}{\varepsilon^{\frac{1}{2}}}\rightv_\varepsilonrt\leftv_\varepsilonrt \varepsilon^{\frac{1}{2}}{\bf p}artial_x xu_{\bf e}b\rightv_\varepsilonrt dx \\ &{\bf q}quad \le \frac{2\beta^{\frac{1}{2}}}{\varepsilon}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2({\bf p}artial_x u_{\bf e}b)^2 dx +\frac{\beta^{\frac{1}{2}}\varepsilon}{2}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad \le C_{0}\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2({\bf p}artial_x u_{\bf e}b)^2 dx + \frac{\beta^{\frac{1}{2}}\varepsilon}{2}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad \le C_{0}\varepsilon\noindentorm{u_{\bf e}b(t,\cdot)}^2_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R})}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{\beta^{\frac{1}{2}}\varepsilon}{2}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad\le\frac{C_{0}\varepsilon}{\beta^{\frac{1}{2}}}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{\beta^{\frac{1}{2}}\varepsilon}{2}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{split} {\bf e}nd{equation} Thanks to {\bf e}qref{eq:AB2*}, and the Young inequality, \begin{equation} \big\langlebel{eq:AB6} \begin{split} &\beta\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}v_\varepsilonrtu_{\bf e}b{\bf p}artial_x u_{\bf e}bv_\varepsilonrtv_\varepsilonrt{\bf p}artial_txxu_{\bf e}bv_\varepsilonrt dx= \varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}\leftv_\varepsilonrtu_{\bf e}b{\bf p}artial_x u_{\bf e}b\rightv_\varepsilonrt\leftv_\varepsilonrt\beta{\bf p}artial_txxu_{\bf e}b\rightv_\varepsilonrt dx\\ &{\bf q}quad\le\frac{\varepsilon}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}u_{\bf e}b^2({\bf p}artial_x u_{\bf e}b)^2 dx + \frac{\beta\varepsilon}{2}\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad\le\frac{\varepsilon}{2}\noindentorm{u_{\bf e}b(t,\cdot)}^2_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R})}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{\beta\varepsilon}{2}\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad\le \frac{\varepsilon}{2\beta^{\frac{1}{2}}}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} + \frac{\beta\varepsilon}{2}\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{split} {\bf e}nd{equation} It follows from {\bf e}qref{eq:AB4}, {\bf e}qref{eq:AB5}, and {\bf e}qref{eq:AB6} that \begin{align*} &\frac{d}{dt}\left(\beta^{\frac{1}{2}}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\frac{2\beta^{\frac{3}{2}} + \beta\varepsilon^2}{2} \noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\right)\\ &{\bf q}quad +\frac{3\beta^{\frac{1}{2}}\varepsilon}{2}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +\frac{\beta^2\varepsilon}{2}\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad+\beta\varepsilon\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\le \frac{C_{0}\varepsilon}{\beta^{\frac{1}{2}}}\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{align*} Hence, \begin{align*} &\frac{d}{dt}\left(\beta\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\frac{2\beta^2 + \beta^{\frac{3}{2}}\varepsilon^2}{2} \noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\right)\\ &{\bf q}quad +\frac{3\beta\varepsilon}{2}\noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})} +\frac{\beta^{\frac{5}{2}}\varepsilon}{2}\noindentorm{{\bf p}artial_txxu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad +\beta^{\frac{3}{2}}\varepsilon\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\le C_0\varepsilon\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}. {\bf e}nd{align*} An integration on $(0,t)$ and {\bf e}qref{eq:AB31} give \begin{align*} &\beta\noindentorm{{\bf p}artial_x u_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}+\frac{2\beta^2 + \beta^{\frac{3}{2}}\varepsilon^2}{2} \noindentorm{{\bf p}artial_x xu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\\ &{\bf q}quad{\bf q}uad + \frac{3\beta\varepsilon}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x xu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})} +\frac{\beta^{\frac{5}{2}}\varepsilon}{2}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_txxu_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds\\ &{\bf q}quad+\beta^{\frac{3}{2}}\varepsilon\noindentorm{{\bf p}artial_txu_{\bf e}b(t,\cdot)}^2_{L^2(\mathbb{R})}\le C_0+ C_0\varepsilon\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{t}\noindentorm{{\bf p}artial_x u_{\bf e}b(s,\cdot)}^2_{L^2(\mathbb{R})}ds \le C_0, {\bf e}nd{align*} that is {\bf e}qref{eq:BC3}. {\bf e}nd{proof} We continue by proving the following result \begin{lemma}\big\langlebel{lm:9000} Assume that {\bf e}qref{eq:uo-l2}, {\bf e}qref{eq:beta-eps-2}, and {\bf e}qref{eq:AB3} hold. Then, for any compactly supported entropy--entropy flux pair $({\bf e}ta, \,q)$, there exist two sequences $\{\varepsilon_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}},\,\{\beta_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$, with $\varepsilon_n,\,\beta_n\to0$, and a limit function \begin{equation*} u\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R}^{+};L^2(\mathbb{R})), {\bf e}nd{equation*} such that {\bf e}qref{eq:con-u-1} holds and \begin{equation} \big\langlebel{eq:A40} \theta_{\bf e}psxtrm{$u$ is a distributional solution of {\bf e}qref{eq:BU}}. {\bf e}nd{equation} {\bf e}nd{lemma} \begin{proof} Let us consider a compactly supported entropy--entropy flux pair $({\bf e}ta, q)$. Multiplying {\bf e}qref{eq:AB2} by ${\bf e}ta'(u_{\bf e}b)$, we have \begin{align*} {\bf p}artial_t{\bf e}ta(u_{\bf e}b) + {\bf p}artial_x q(u_{\bf e}b) =&\varepsilon {\bf e}ta'(u_{\bf e}b) {\bf p}artial_x xu_{\bf e}b +\beta{\bf e}ta'(u_{\bf e}b){\bf p}artial_txxu_{\bf e}b \\ =& I_{1,\,\varepsilon,\,\beta}+I_{2,\,\varepsilon,\,\beta}+ I_{3,\,\varepsilon,\,\beta} + I_{4,\,\varepsilon,\,\beta}, {\bf e}nd{align*} where \begin{equation} \begin{split} \big\langlebel{eq:1200013} I_{1,\,\varepsilon,\,\beta}&={\bf p}artial_x (\varepsilon{\bf e}ta'(u_{\bf e}b){\bf p}artial_x u_{\bf e}b),\\ I_{2,\,\varepsilon,\,\beta}&= -\varepsilon{\bf e}ta''(u_{\bf e}b)({\bf p}artial_x u_{\bf e}b)^2,\\ I_{3,\,\varepsilon,\,\beta}&= {\bf p}artial_x (\beta{\bf e}ta'(u_{\bf e}b){\bf p}artial_txu_{\bf e}b),\\ I_{4,\,\varepsilon,\,\beta}&= -\beta{\bf e}ta''(u_{\bf e}b){\bf p}artial_x u_{\bf e}b{\bf p}artial_txu_{\bf e}b. {\bf e}nd{split} {\bf e}nd{equation} Fix $T>0$. Arguing in \cite[Lemma $3.2$]{Cd2}, we have that $I_{1,\,\varepsilon,\,\beta}\to0$ in $H^{-1}((0,T) \times\mathbb{R})$, and $\{I_{2,\,\varepsilon,\,\beta}\}_{\varepsilon,\beta >0}$ is bounded in $L^1((0,T)\times\mathbb{R})$.\\ We claim that \begin{equation*} I_{3,\,\varepsilon,\,\beta}\to0 {\bf q}uad \theta_{\bf e}psxt{in $H^{-1}((0,T) \times\mathbb{R}),\,T>0,$ as $\varepsilon\to 0$.} {\bf e}nd{equation*} By {\bf e}qref{eq:beta-eps-2} and {\bf e}qref{eq:BC3}, \begin{align*} &\noindentorm{ \beta{\bf e}ta'(u_{\bf e}b){\bf p}artial_txu_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\\ &{\bf q}quad\le \beta^2 \noindentorm{{\bf e}ta'}_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R})}\noindentorm{{\bf p}artial_txu_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\\ &{\bf q}quad= \noindentorm{{\bf e}ta'}_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R})}\frac{\beta^2\varepsilon}{\varepsilon}\noindentorm{{\bf p}artial_txu_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})}\\ &{\bf q}quad=\noindentorm{{\bf e}ta'}_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R})}\frac{\beta^{\frac{1}{2}}\beta^{\frac{3}{2}}\varepsilon}{\varepsilon}\noindentorm{{\bf p}artial_txu_{\bf e}b}^2_{L^2((0,T)\times\mathbb{R})} \le C_{0}\noindentorm{{\bf e}ta'}_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R})}\varepsilon\to 0. {\bf e}nd{align*} Let us show that \begin{equation*} I_{4,\,\varepsilon,\,\beta}{\bf q}uad \theta_{\bf e}psxt{is bounded in $L^1((0,T) \times\mathbb{R}),\,T>0,$.} {\bf e}nd{equation*} Thanks to {\bf e}qref{eq:beta-eps-2}, {\bf e}qref{eq:AB31}, {\bf e}qref{eq:BC3}, and the H\"older inequality, \begin{align*} &\noindentorm{\beta{\bf e}ta''(u_{\bf e}b){\bf p}artial_x u_{\bf e}b{\bf p}artial_txu_{\bf e}b}_{L^1((0,T)\times\mathbb{R})}\\ &{\bf q}quad\le\beta\noindentorm{{\bf e}ta''}_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R})}\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{0}^{T}\!\!\!\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fint_{\mathbb{R}}v_\varepsilonrt{\bf p}artial_x u_{\bf e}b{\bf p}artial_txu_{\bf e}bv_\varepsilonrt dsdx\\ &{\bf q}quad=\noindentorm{{\bf e}ta''}_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R})}\frac{\beta^{\frac{1}{4}}\beta^{\frac{3}{4}}\varepsilon}{\varepsilon}\noindentorm{{\bf p}artial_x u_{\bf e}b}_{L^2((0,T)\times\mathbb{R})}\noindentorm{{\bf p}artial_txu_{\bf e}b}_{L^2((0,T)\times\mathbb{R})}\\ &{\bf q}quad\le C_{0}\noindentorm{{\bf e}ta''}_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R})}\frac{\beta^{\frac{1}{4}}}{\varepsilon}\le C_{0}\noindentorm{{\bf e}ta''}_{L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R})}. {\bf e}nd{align*} Arguing as in \cite{SC}, we have {\bf e}qref{eq:A40}. {\bf e}nd{proof} \begin{lemma}\big\langlebel{eq:10034} Assume {\bf e}qref{eq:uo-l2}, {\bf e}qref{eq:beta-eps-4}, and {\bf e}qref{eq:AB3} hold. Then, for any compactly supported entropy--entropy flux pair $({\bf e}ta, \,q)$, there exist two sequences $\{\varepsilon_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}},\,\{\beta_{n}\}_{n\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin\mathbb{N}}$, with $\varepsilon_n,\,\beta_n\to0$, and a limit function \begin{equation*} u\ifmmode\mathit{\mathchar"7010 }\else\char"10 \fin L^{\ifmmode\mathit{\mathchar"7010 }\else\char"10 \finfty}(\mathbb{R}^{+};L^2(\mathbb{R})), {\bf e}nd{equation*} such that {\bf e}qref{eq:con-u-1} and {\bf e}qref{eq:u-entro-sol-12} hold. {\bf e}nd{lemma} \begin{proof} Let us consider a compactly supported entropy--entropy flux pair $({\bf e}ta, q)$. Multiplying {\bf e}qref{eq:AB2} by ${\bf e}ta'(u_{\bf e}b)$, we have \begin{align*} {\bf p}artial_t{\bf e}ta(u_{\bf e}b) + {\bf p}artial_x q(u_{\bf e}b) =&\varepsilon {\bf e}ta'(u_{\bf e}b) {\bf p}artial_x xu_{\bf e}b +\beta{\bf e}ta'(u_{\bf e}b){\bf p}artial_txxu_{\bf e}b \\ =& I_{1,\,\varepsilon,\,\beta}+I_{2,\,\varepsilon,\,\beta}+ I_{3,\,\varepsilon,\,\beta} + I_{4,\,\varepsilon,\,\beta}, {\bf e}nd{align*} where $I_{1,\,\varepsilon,\,\beta},\,I_{2,\,\varepsilon,\,\beta},\, I_{3,\,\varepsilon,\,\beta},\, I_{4,\,\varepsilon,\,\beta}$ are defined in {\bf e}qref{eq:1200013}. As in Lemma \ref{lm:259}, we have that $I_{1,\,\varepsilon,\,\beta},\,I_{3,\,\varepsilon,\,\beta} \to 0$ in $H^{-1}((0,T)\times\mathbb{R})$, $\{ I_{2,\,\varepsilon,\,\beta}\}_{\varepsilon,\beta>0}$ is bounded in $L^1((0,T)\times\mathbb{R})$, while $I_{4,\,\varepsilon,\,\beta}\to0$ in $L^1((0,T)\times\mathbb{R})$. Arguing as in \cite{LN}, we have {\bf e}qref{eq:u-entro-sol-12}. {\bf e}nd{proof} \begin{proof}[Proof of Theorem \ref{th:main-A2}] Theorem \ref{th:main-A2} follows from Lemmas \ref{lm:9000} and \ref{eq:10034}. {\bf e}nd{proof} \begin{thebibliography}{40} \bibitem{AB} {\sc M. Antonova and A. Biswas.} \noindentewblock Adiabatic parameter dynamics of perturbed solitary waves. \noindentewblock{{\bf e}m Communications in Nonlinear Science and Numerical Simulation}, 14:734--748, 2009. \bibitem{Ba} {\sc A. R. Bahadır} \noindentewblock Exponential finite--difference method applied to Korteweg--de Vries equation for small times. \noindentewblock {{\bf e}m Applied Mathematics and Computation}, 160(3):675--682, 2005. \bibitem{BTL} {\sc A. Biswas, H. Triki and M. Labidi.} \noindentewblock Bright and dark solitons of the Rosenau-Kawahara equation with power law nonlinearity. \noindentewblock{{\bf e}m Physics of Wave Phenomena}, 19:24--29, 2011. \bibitem{B} {\sc J. Boyd.} \noindentewblock Ostrovsky and HunterÕs generic wave equation for weakly dispersive waves: matched asymptotic and pseudospectral study of the paraboloidal travelling waves (corner and near-corner waves). \noindentewblock {{\bf e}m Euro. Jnl. of Appl. Math.}, 16(1):65--81, 2005. \bibitem{CH1} {\sc S. K. Chung.} \noindentewblock Finite difference approximate solutions for the Rosenau equation. \noindentewblock{{\bf e}m Applicable Analysis}, vol. 69, no. 1--2, pp. 149--156, 1998. \bibitem{CHH} {\sc S. K. Chung and S.N.Ha.} \noindentewblock Finite element Galerkin solutions for the Rosenau equation. \noindentewblock {{\bf e}m Applicable Analysis}, vol. 54, no. 1--2, pp. 39--56, 1994. \bibitem{CHP} {\sc S. K. Chung and A. K. Pani.} \noindentewblock Numerical methods for the Rosenau equation. \noindentewblock {{\bf e}m Applicable Analysis}, vol. 77, no. 3--4, pp. 351--369, 2001. \bibitem{Cd5} {\sc G. M. Coclite and L. di Ruvo.} \noindentewblock A singular limit problem for the Rosenau-Korteweg-de Vries regularized long wave and Rosenau Korteweg-de Vries equation. \noindentewblock Submitted. \bibitem{Cd6} {\sc G. M. Coclite and L. di Ruvo.} \noindentewblock A singular limit problem for the Rosenau equation. \noindentewblock Submitted. \bibitem{Cd} {\sc G. M. Coclite and L. di Ruvo.} \noindentewblock A singular limit problem for conservation laws realted to the Kudryashov-Sinelshchikov equation. \noindentewblock Submitted. \bibitem{Cd1} {\sc G.~M. Coclite and L. di Ruvo.} \noindentewblock Oleinik type estimate for the Ostrovsky-Hunter equation. \noindentewblock {{\bf e}m J. Math. Anal. Appl.}, 423:162--190, 2015. \bibitem{Cd2} {\sc G.~M. Coclite and L. di Ruvo.} \noindentewblock Convergence of the Ostrovsky Equation to the Ostrovsky-Hunter One. \noindentewblock {{\bf e}m J. Differential Equations}, 256:3245--3277, 2014. \bibitem{CdK} {\sc G. M. Coclite, L. di Ruvo, and K. H. Karlsen}. \noindentewblock Some wellposedness results for the Ostrovsky-Hunter Equation. \noindentewblock {{\bf e}m Hyperbolic conservation laws and related analysis with applications}, 143-159, Springer Proc. Math. Stat., 49, Springer, Heidelberg, 2014. \bibitem{CdREM} {\sc G. M. Coclite, L. di Ruvo, J. Ernest, and S. Mishra.} \noindentewblock Convergence of vanishing capillarity approximations for scalar conservation laws with discontinuous fluxes. \noindentewblock {{\bf e}m Netw. Heterog. Media}, 8(4):969--984, 2013. \bibitem{CK} {\sc G. ~M. Coclite and K.~H. Karlsen.} \noindentewblock A singular limit problem for conservation laws related to the Camassa-Holm shallow water equation. \noindentewblock {{\bf e}m Comm. Partial Differential Equations}, 31:1253--1272, 2006. \bibitem{CRS} {\sc A. Corli, C. Rohde, and V. Schleper.} \noindentewblock Parabolic approximations of diffusive-dispersive equations. \noindentewblock {{\bf e}m J. Math. Anal. Appl.} 414:773--798, 2014. \bibitem{CM} {\sc Y. Cui and D.k. Mao} \noindentewblock Numerical method satisfying the first two conservation laws for the Korteweg-de Vries equation. \noindentewblock{{\bf e}m J. of Computational Physics}, 227(1):376--399, 2007. \bibitem{dR} {\sc L. di Ruvo.} \noindentewblock Discontinuous solutions for the Ostrovsky--Hunter equation and two phase flows. \noindentewblock {{\bf e}m Phd Thesis, University of Bari}, 2013. \noindentewblock{www.dm.uniba.it/home/dottorato/dottorato/tesi/}. \bibitem{E} {\sc A. Esfahani.} \noindentewblock Solitary wave solutions for generalized Rosenau-KdV equation. \noindentewblock {{\bf e}m Communications in Theoretical Physics}, 55(3):396--398, 2011. \bibitem{EMTYB} {\sc G. Ebadi, A. Mojaver, H. Triki, A. Yildirim, and A. Biswas.} \noindentewblock Topological solitons and other solutions of the Rosenau-KdV equation with power law nonlinearity. \noindentewblock {{\bf e}m Romanian J. of Physics}, 58:3--14, 2013. \bibitem{HXH} {\sc J. Hu, Y. Xu, and B. Hu.} \noindentewblock Conservative Linear Difference Scheme for Rosenau-KdV Equation. \noindentewblock {{\bf e}m Adv. Math. Phys.}, 423718, 2013. \bibitem{KL} {\sc Y. D. Kim and H. Y. Lee.} \noindentewblock The convergence of finite element Galerkin solution for the Roseneau equation. \noindentewblock {{\bf e}m The Korean Journal of Computational \& Applied Mathematics}, 5(1):171--180, 1998. \bibitem{LB} {\sc M. Labidi and A. Biswas.} \noindentewblock Application of He’s principles to Rosenau-Kawahara equation. \noindentewblock{{\bf e}m Mathematics in Engineering, Science and Aerospace}, 2:183--197, 2011. \bibitem{LN} {\sc P. G. LeFloch and R. Natalini.} \noindentewblock Conservation laws with vanishing nonlinear diffusion and dispersion. \noindentewblock {{\bf e}m Nonlinear Anal. 36, no. 2, Ser. A: Theory Methods}, 212--230, 1992 \bibitem{MPC} {\sc S. A.V.Manickam, A. K. Pani, and S. K.Chung.} \noindentewblock A second-order splitting combined with orthogonal cubic spline collocation method for the Rosenau equation. \noindentewblock {{\bf e}m Numerical Methods for Part. Diff. Equations}, 14(6):695--716, 1998. \bibitem{Murat:Hneg} {\sc F.~Murat.} \noindentewblock L'injection du c\^one positif de ${H}\sp{-1}$\ dans ${W}\sp{-1,\,q}$\ est compacte pour tout $q<2$. \noindentewblock {{\bf e}m J. Math. Pures Appl. (9)}, 60(3):309--322, 1981. \bibitem{OAAK} {\sc K. Omrani, F. Abidi, T. Achouri, and N. Khiari.} \noindentewblock A new conservative finite difference scheme for the Rosenau equation. \noindentewblock {{\bf e}m Applied Mathematics and Computation}, vol. 201, no. 1--2, pp. 35--43, 2008. \bibitem{OK} {\sc S. \"Ozer and S. Kutluay.} \noindentewblock An analytical-numerical method for solving the Korteweg-de Vries equation. \noindentewblock {{\bf e}m Applied Mathematics and Computation}, 164(3):789--797, 2005. \bibitem{O} {\sc L. A. Ostrovsky.} \noindentewblock Nonlinear internal waves in a rotating ocean. \noindentewblock {{\bf e}m Okeanologia}, 18:181--191, 1978. \bibitem{P} {\sc M. A. Park.} \noindentewblock On the Rosenau equation. \noindentewblock {{\bf e}m Matem\'atica Aplicada e Computacional}, 9(2):145--152, 1990. \bibitem{RAB} {\sc P. Razborova, B. Ahmed, and A. Biswas.} \noindentewblock Solitons, shock waves and conservation laws of Rosenau-KdV-RLW equation with power law nonlinearity \noindentewblock {{\bf e}m Appl. Math. Inform. Sci.}, 8:485--491, 2014. \bibitem{RTB} {\sc P. Razborova, H. Triki, and A. Biswas.} \noindentewblock Perturbation of dispersive shallow water waves. \noindentewblock{{\bf e}m Ocean Engineering}, 63:1--7, 2013. \bibitem{Ro1} {\sc P. Rosenau.} \noindentewblock A quasi-continuous description of a nonlinear transmission line. \noindentewblock {{\bf e}m Physica Scripta},34:827--829, 1986. \bibitem{Ro2} {\sc P. Rosenau.} \noindentewblock Dynamics of dense discrete systems. \noindentewblock{{\bf e}m Progress of Theoretical Physics}, 79:1028--1042, 1988. \bibitem{SC} {\sc M. E. Schonbek.} \noindentewblock {Convergence of solutions to nonlinear dispersive equations} \noindentewblock {{\bf e}m Comm. Partial Differential Equations}, 7(8):959--1000, 1982. \bibitem{ZZ} {\sc M. Zheng and J. Zhou.} \noindentewblock An average linear difference scheme for the generalized Rosenau--KdV Equation. \noindentewblock{{\bf e}m J. of Appl. Math.} vol.2014, pages 9, 2014. \bibitem{ZUZO} {\sc S. Zhu and J. Zhao.} \noindentewblock The alternating segment explicit--implicit scheme for the dispersive equation. \noindentewblock{{\bf e}m Applied Mathematics Letters}, 14(6):657--662, 2001. \bibitem{Z} {\sc J. M. Zuo.} \noindentewblock Solitons and periodic solutions for the Rosenau-KdV and Rosenau-Kawahara equations. \noindentewblock {{\bf e}m Applied Mathematics and Computation}, 215(2):835--840, 2009. \bibitem{ZZZC} {\sc J.M. Zuo, Y.M. Zhang, T.D. Zhang and F. Chang}. \noindentewblock A new conservative difference scheme for the generalized Rosenau-RLW equation. \noindentewblock {{\bf e}m Boundary Value Problems}, 516260, 2010. {\bf e}nd{thebibliography} {\bf e}nd{document}
\begin{document} \title{ A Novel SO(3) Picture for Quantum Searching} \author{ Gui Lu Long${}^{1,2,3}$, Chang Cun Tu${}^{1}$, Yan Song Li${}^{1}$, Wei Lin Zhang${}^{1}$ and Hai Yang Yan${}^{1}$ } \address{ ${}^{1}$Department of Physics, Tsinghua University, Beijing 100084, China\\ ${}^{2}$Institute of Theoretical Physics, Chinese Academy of Sciences, Beijing 100080, China \\ ${}^{3}$Center of Nuclear Theory, National Laboratory of Heavy Ion Accelerator, Chinese Academy of Sciences, Lanzhou, 730000, China} \maketitle \begin{abstract} An $SO(3)$ picture of the generalized Grover's quantum searching algorithm,with arbitrary unitary transformation and with arbitrary phase rotations, is constructed. In this picture, any quantum search operation is a rotation in a 3 dimensional space. Exact formulas for the rotation angle and rotational axis are given. The probability of finding the marked state is just $(z+1)/2$, where $z$ is the $z$-component of the state vector. Exact formulas for this probability is easily obtained. The phase matching requirement and the failure of algorithm when phase mismatches are clearly explained. \end{abstract} \pacs{03.67-a, 03.67.Lx,Quantum searching, Phase matching, $SO(3)$ group} Grover's quantum search algorithm \cite{r1,r2} is one of the most celebrated quantum computing algorithms. It has been shown that the algorithm is optimal \cite{r3}. The algorithm can be generalized to arbitrary initial amplitude distribution \cite{r4}. It has many important applications, for instance, in the Simon problem \cite{r5} and quantum counting \cite{r6}. In the case where multiple marked state is involved, it can even search the data by just one query \cite{r6'}. Recently, it has been generalized to an arbitrarily entangled initial states \cite{r7}. Since Grover's algorithm involves only simple gate operations, it has been realized in 2 qubits \cite{r8,r9,r10}, and 3 qubit NMR systems \cite{r11'}. Grover's original algorithm has a simple geometric interpretation \cite{r2,r12,r14}. When the Hadmard transformation is substituted by any arbitrary unitary transformation, it has been shown there is an $SU(2)$ group structure in the generalized algorithm \cite{r2,r14}. However, when generalizing the algorithm to arbitrary phase rotations, phase matching is vital \cite{r15,r16}. In \cite{r16} we have given an approximate formula for the amplitude of the marked state. But it is difficult to understand the phase matching requirement, as it is contrary to what one expects from an continuity argument. In this Letter, we give a novel $SO(3)$ picture of the generalized quantum search algorithm by exploiting the relation between $SO(3)$ and $SU(2)$. In this $SO(3)$ picture the process of quantum search is crystalline transparent. The behavior of the algorithm with phase matching or mismatching are clearly understood. This helps us to understand the various aspects of the algorithm, and to further develop the algorithm. The operator for quantum search\cite{r2} can be written as $Q=-I_\gamma U^{-1} I_\tau U$, where $|\tau\rangle$ is the marked state, $|\gamma\rangle$ is the prepared state, usually $|\gamma\rangle$$=|0\rangle$. For arbitrary phase rotations, $I_\gamma =I-(-e^{i\theta}+1)$ $|\gamma \rangle\langle\gamma|$, $I_\tau =I-(-e^{i\phi}+1)$ $|\tau\rangle \langle\tau|$. In the basis where $ |1\rangle=U^{-1}|\tau\rangle$, $|2\rangle=-(|\gamma\rangle-U_{\tau\gamma} U^{-1}|\tau\rangle) /\sqrt{1-| U_{\tau\gamma} |^{2}}$, $Q$ can be written as \begin{equation} Q=\left( \begin{array}{cc} -e^{-i{\phi \over 2}}(\cos{\theta \over 2}+i\cos2\beta\sin{\theta \over 2}) & -ie^{-i {\phi\over 2}}\sin2\beta\sin{\theta \over 2} \\ -ie^{i {\phi\over 2}}\sin2\beta\sin{\theta \over 2} & -e^{i{\phi \over 2}}(\cos{\theta \over 2}-i\cos2\beta\sin {\theta \over 2}) \end{array} \right), \end{equation} where we have written $U_{\tau\gamma}=e^{i \xi}\sin\beta$ (in Grover's original algorithm, $U_{\tau\gamma}={1\over\sqrt{N}}$, $\xi=0$, $\sin \beta={1\over\sqrt{N}}$), and an overall phase factor has been neglected. It is easy to check that $det(Q)=1$, and $Q$ is an element of the $SO(3)$ group. As is well known, each unitary matrix $u$ in $SU(2)$ group corresponds to a rotation $R_u$ in $SO(3)$ group \cite{r14'}. Here operator $Q$ corresponds to the rotation, \begin{equation} \left( \begin{array}{ccc} R_{11} & R_{12} & R_{13} \\ R_{21} & R_{22} & R_{23} \\ R_{31} & R_{32} & R_{33} \end{array}\right) \end{equation} where $ R_{11}=\cos\phi(\cos^2 2\beta\cos\theta+\sin^2 2\beta) +\cos2\beta\sin\theta\sin\phi$, $ R_{12}=\cos2\beta\cos\phi\sin\theta -\cos\theta \sin\phi $, $ R_{13}=-\cos\phi \sin4\beta \sin^2 {\theta \over 2} +\sin2\beta \sin\theta \sin\phi $, $ R_{21}=-\cos(2\beta)\cos\phi\sin\theta +(\cos^2 {\theta \over 2} -\cos4\beta\sin^2 {\theta \over 2})\sin\phi$, $ R_{22}=\cos\theta \cos\phi +\cos2\beta \sin\theta \sin\phi $, $ R_{23}=-\cos\phi \sin2\beta \sin\theta -\sin4\beta \sin^2 {\theta \over 2} \sin\phi $, $ R_{31}=-\sin4\beta\sin^2 {\theta \over 2} $, $ R_{32}=\sin2\beta\sin\theta$, $ R_{33}=\cos^2 2\beta+\cos\theta\sin^2 2\beta$. A spinor in $SU(2)$ which describes the state of the quantum computer, $\Psi=\left(\begin{array}{c}a+bi\\c+di\end{array}\right)$ corresponds to a vector in $R^3$ \begin{equation} {\bf r}=\Psi^{\dag} {\bf \sigma} \Psi =\left(\begin{array}{c} x \\ y \\ z \\ \end{array}\right) =\left(\begin{array}{c} 2(ac+bd) \\ 2(-bc+ad) \\ a^2 +b^2 -c^2 -d^2\\ \end{array}\right). \end{equation} The probability of finding the marked state is $P=a^2 +b^2 =(z+1)/2$. The $z$ component of the polarization vector is a measure of the probability. For instance, the evenly distribution state $\Psi_o=(\frac{1}{\sqrt{N}}$, $\frac{\sqrt{N-1}}{\sqrt{N}} )^{\dag} $, corresponds to vector ${\bf r}_o =(2\sqrt{1- {1\over N}} \sqrt{1\over N}, 0,-1+ {2 \over N})^{T} $, which is nearly parallel to the $-z$ axis when $N$ is large. The marked state $\psi_a =(1,0)^{\dag}$, corresponds to ${\bf r}_a =(0,0,1)^T$, which is on the $+z$ axis. Thus the process of quantum searching in the $SO(3)$ picture is to rotate the state vector from a position nearly parallel to $-z$ axis to $+z$ axis. The rotational axis of (3) can be found by solving the eigen-value problem, $ R_u {\bf l}={\bf l}$. This gives $ {\bf l}=\left(\begin{array}{ccc} \cot{\phi \over 2} & 1 & -\cot 2\beta \cot {\phi \over 2}+\cot{\theta \over 2}\csc 2\beta \end{array}\right)^{T}$. Each iteration of $Q$ rotates about this axis an angle \begin{equation} \alpha =\arccos[{1\over 4}(\cos4\beta+3)\cos\theta\cos\phi +\sin^2 2\beta({1\over 2}\cos\phi-\sin^2 {\theta\over 2}) +\cos2\beta \sin\theta \sin\phi], \end{equation} about the rotational axis. In Grover's original algorithm, $\theta = \phi =\pi$, the rotation axis is exactly the $y-$axis, and the rotational angle is equal to the maximum value of $4\beta$ (remember the relation between $SU(2)$ and $SO(3)$ , this corresponds an angle of $2\beta$ in the $SU(2)$). The state vector ${\bf r}$ is being rotated within the $x-z$ plane from approximately $-z$ to $+z$ axis, where the marked state achieves maximum probability amplitude. The number of step requires to reach $+z$ axis is ${{\pi-2\beta} \over \alpha}$$\approx 0.785\sqrt{N}-0.5$$\approx 0.785\sqrt{N}$. The trace of tip of the state vector is shown in Fig.1. In the most general case with arbitrary $\theta$ and $\phi$, the trace of the tip of state vector ${\bf r}$ is a circle. The state vector spans a cone with the top at the origin. During the rotation, the vector ${\bf r}-{\bf r}_o$ is orthogonal to the rotational axis ${\bf l}$ at any time: $({\bf r}-{\bf r}_o)\cdot{\bf l}=0$. If the state vector passes through $+z$ axis, that is ${\bf r}=(0,0,1)^T$ be in the trace, by solving equation ${\bf r} -{\bf r}_a \cdot {\bf l}=0$, we have $ \cot{\phi \over 2} =\cot{\theta \over 2}$, or $\phi=\theta$, the phase matching requirement which has been found in an approximate manner. However, the rotational axis is now $ {\bf l}=\left(\begin{array}{ccc} \cos{\phi \over 2} & \sin{\phi \over 2} & \cos{\phi \over 2}\tan\beta \end{array}\right)^T$, which is no longer the $y$ axis. The rotation angle is \begin{equation} \alpha=\arccos\{2[(\cos2\beta-1)\sin^2{\phi\over 2}+1]^2-1\}. \end{equation} If $N $ is very large, $ {\bf l}\approx\left(\begin{array}{ccc} \cos{\phi \over 2} & \sin{\phi \over 2} & 0 \end{array}\right)$, which is in the $x-y$ plane, and the initial state vector is nearly the $-z$ axis. The trace of tip of the state vector is a circle in the $x-z$ plane. Each interation rotates the state vector an angle $\alpha$ given by $(11)$. To first order in $\beta$, $\alpha\approx 4\beta \sin\frac{\phi}{2}$, which corresponds a rotation of $2\beta \sin\frac{\phi}{2}$ in $SU(2)$. The number of steps requires to seach the marked state is larger than that in the original version, as given in \cite{r16}. However in this case , the centre of the circle is no longer the origin. The state vector can pass the $+z$ axis, that is,it can reach the marked state with near certainty, but not the $-z$ where the amplitude of the marked state is zero. This has clearly been demonstrated in the numerical calculation in Ref. \cite{r16}. When $\theta\neq\phi$, the trace the tip of the state vector is still a circle. But it is very tilted. In Figure 2, it is drawn for the case of $\theta={\pi\over 2}$, $\phi={\pi\over 10}$. Here we see the rotating axis is nearly the $z$ axis, the circle span by the state vector tip is nearly parallel to the $x-y$ plane. Therefore the amplitude of the marked state can not reach $1$, neither can it reach zero. This explains naturally the intringuing narrowlly bounded behavor of the algorithm we have found in Ref. \cite{r15}. To summarize, we have given a novel $SO(3)$ interpretation of the quantum search algorithm. In this picture , the effect of quantum search is clearly displayed. In particular, the phase-mismatching are clearly understood. This throws new light on the algorithm, and we hope it helpful for further development of the algorithm. \begin{figure} \caption{The trace of the vector state tip when phase matching is satisfied.} \end{figure} \begin{figure} \caption{3D plot of the trace of the vector state tip when phase mismatches.} \end{figure} \end{document}
\begin{document} \title{Hyperbolic polynomials and canonical sign patterns} \author{Vladimir Petrov Kostov} \address{Universit\'e C\^ote d’Azur, CNRS, LJAD, France} \email{[email protected]} \begin{abstract} A real univariate polynomial is hyperbolic if all its roots are real. By Descartes' rule of signs a hyperbolic polynomial (HP) with all coefficients nonvanishing has exactly $c$ positive and exactly $p$ negative roots counted with multiplicity, where $c$ and $p$ are the numbers of sign changes and sign preservations in the sequence of its coefficients. We discuss the question: If the moduli of all $c+p$ roots are distinct and ordered on the positive half-axis, then at which positions can the $p$ moduli of negative roots be depending on the positions of the positive and negative signs of the coefficients of the polynomial? We are especially interested in the choices of these signs for which exactly one order of the moduli of the roots is possible.\\ {\bf Key words:} real polynomial in one variable; hyperbolic polynomial; sign pattern; Descartes' rule of signs\\ {\bf AMS classification:} 26C10; 30C15 \end{abstract} \maketitle \section{Introduction} We consider real univariate polynomials with nonvanishing coefficients. Such a polynomial is {\em hyperbolic} if all its roots are real. Various problems concerning hyperbolic polynomials (HPs) are exposed in~\cite{Ko}. In this paper we discuss the following question: Suppose that the moduli of all roots of a HP are distinct and ordered on the positive half-axis. Then at which positions can the moduli of the negative roots be depending on the signs of the coefficients of the HP? In this sense we say that we are interested in the possible orders on the positive half-axis of the moduli of roots of HPs with given signs of their coefficients. Without loss of generality we consider only monic polynomials. A {\em sign pattern (SP)} is a finite sequence of $(+)$- and/or $(-)$-signs. The SP defined by the polynomial $P:=\sum _{j=0}^da_jx^j$, $a_j\in \mathbb{R}^*$, $a_d=1$, is the vector $$\sigma (P)~:=~(~+~,~{\rm sgn}(a_{d-1})~,~{\rm sgn}(a_{d-2})~,~\ldots ~,~ {\rm sgn}(a_0)~)~.$$ \begin{nota}\label{nota1} {\rm When we write $\sigma (P)=\Sigma _{m_1,m_2,\ldots ,m_s}$, $m_i\in \mathbb{N}^*$, $m_1+\cdots +m_s=d+1$, this means that the SP $\sigma (P)$ begins with a sequence of $m_1$ signs $+$ followed by a sequence of $m_2$ signs $-$ followed by a sequence of $m_3$ signs $+$ etc. The number $s-1$ is the number of sign changes and the number $d-s+1$ is the number of sign preservations of the SP $\sigma (P)$. } \end{nota} The classical Descartes' rule of signs says that the polynomial $P$ has not more than $s-1$ positive roots. When applied to the polynomial $P(-x)$, this rule implies that $P$ has not more than $d-s+1$ negative roots. Hence if $P$ is hyperbolic, then it has exactly $s-1$ positive and exactly $d-s+1$ negative roots (all roots are counted with multiplicity). \begin{rem} {\rm Fourier has made Descartes' rule of signs about real (but not necessarily hyperbolic) polynomials more precise by showing that the number of positive roots differs from $s-1$ by an even integer, see~\cite{Four}. For such polynomials, Descartes' rule of signs proposes only necessary conditions. Attempts to clarify the question how far from sufficient they are have been carried out in \cite{AlFu}, \cite{AJS}, \cite{FoKoSh}, \cite{FoNoSh}, \cite{Gr}, \cite{KoCzMJ} and~\cite{KoMB}.} \end{rem} \begin{defi}\label{defiCPP} {\rm Given a SP (of length $d+1$ and beginning with $+$) we construct its corresponding {\em change-preservation pattern (CPP)} (of length~$d$) as follows. For $j\geq 2$, there is a $p$ (resp. a $c$) in position $j-1$ of the CPP if in positions $j-1$ and $j$ of the SP there are two equal (resp. two different) signs. It is clear that the correspondence between SPs beginning with $+$ and CPPs is bijective. Example: for $d=6$, to the SP $\sigma _0:=(+,+,-,-,+,+,+)$ there corresponds the CPP $(p,c,p,c,p,p)$.} \end{defi} \begin{defi}\label{defiCO} {\rm (1) Suppose that a degree $d$ HP $P$ is given which defines the SP $\sigma$ of length $d+1$, suppose that the moduli of its roots are ordered on the real positive half-line, and suppose that all moduli of roots are distinct. We define formally the {\em canonical order} of the moduli of roots like this: the CPP corresponding to the given SP $\sigma$ is read from the back, each $p$ is replaced by an $N$ and each $c$ by a $P$. For the SP $\sigma _0$ from Definition~\ref{defiCPP} this gives $(N,N,P,N,P,N)$ which means that the moduli of the roots are $0<\gamma _1<\cdots <\gamma _6$, where the polynomial has positive roots $\gamma _3$ and $\gamma _5$, and negative roots $-\gamma _1$, $-\gamma _2$, $-\gamma _4$ and~$-\gamma _6$. (2) For a HP $P$ and the SP $\sigma (P)$, we say that the SP $\sigma (P)$ is {\em realizable} by~$P$.} \end{defi} \begin{prop}\label{propevery} Every SP $\sigma$ of length $d+1$, $d\geq 1$, is realizable by a degree $d$ HP with canonical order of the moduli of its roots. \end{prop} \begin{proof} We construct the HP in $d$ steps. At the first step we set $P_1:=x+1$ if the first component of the CPP is a $p$ and $P_1:=x-1$ if it is a $c$. Suppose that the degree $k$ HP $P_k$ is constructed which defines the SP $\sigma _k$ obtained from $\sigma$ by deleting its last $d-k$ components. Set $P_{k+1}(x):=P_k(x)(x+\varepsilon )$ if the last two components of $\sigma _{k+1}$ are equal or $P_{k+1}(x):=P_k(x)(x-\varepsilon )$ if they are different, where $\varepsilon >0$. One chooses $\varepsilon$ so small that: 1) the signs of the first $k+1$ coefficients of $P_{k+1}$ are the same as the ones of $P_k$; 2) the number $\varepsilon$ is smaller than all the moduli of roots of $P_k$. It is clear that for $k=d$, the HP $P_d$ thus obtained defines the SP $\sigma$ and that the order of the moduli of its roots is the canonical one. \end{proof} \begin{rems}\label{rems1} {\rm (1) The proposition can be generalized for real, but not necessarily hyperbolic polynomials, see Lemmas~14 and 17 in~\cite{FoKoSh}. The way of constructing new polynomials by adding new roots of modulus much smaller than the already existing moduli (which preserves the signs of the first $d+1$ coefficients) can be called {\em concatenation of polynomials (or of SPs)}. The construction described in the proof of Proposition~\ref{propevery} extends at each step the SP by adding a $(+)$- or $(-)$-sign at its rear. (2) One can propose a similar concatenation, i.e. construction of HPs, in which each new root has a modulus much larger than the moduli of the already existing roots. Namely, given a degree $d$ HP $P(x)$ with no vanishing coefficients one considers the HP $(1\pm \varepsilon x)P(x)$ which for $\varepsilon >0$ sufficiently small has the same signs of the last $d+1$ coefficients as $P$. Its new root equals $1/(\mp \varepsilon )$. After this one has to multiply the polynomial by $\pm 1/\varepsilon$ to make it monic again. This construction extends the SP by adding a $(+)$- or $(-)$-sign at its front.} \end{rems} \begin{defi} {\rm A SP is called {\em canonical} if it is realizable only by HPs with canonical order of the moduli of their roots.} \end{defi} \begin{ex}\label{ex1} {\rm (1) The following SPs $\Sigma _{m_1,m_2,\ldots ,m_s}$ are canonical:} $$\Sigma _{m_1,1}~,~\Sigma _{1,m_2}~,~\Sigma _{m_1,1,m_3}~~~{\rm and~for~}m_2\geq 3,~~~ \Sigma _{1,m_2,1}~,$$ {\rm see Theorem~1, Corollary~1, Theorem~5 and Theorem~2 in \cite{Koarxiv} respectively. The SP $\Sigma _{1,2,1}$ is not canonical -- by part (1) of Example~2 therein the SP $\Sigma _{1,2,1}$ is realizable by each of the three polynomials $(x+1)(x-1.5)(x-1.6)$, $(x+1)(x-1.5)(x-0.6)$ and $(x+1)(x-0.5)(x-0.6)$. (2) For $m_1\geq 2$, $m_2\geq 2$, the SP $\Sigma _{m_1,m_2}$ is not canonical, see Theorem~1 and Corollary~1 in~\cite{Koarxiv}.} \end{ex} In the present paper we give sufficient (see Theorem~\ref{tm1}, Proposition~\ref{prop1n11} and Corollary~\ref{cor1n11}) and necessary conditions (see Theorem~\ref{tm2}) for a SP to be canonical. In Section~\ref{secnoncanon} we consider non-canonical SPs with two sign variations and we give a lower bound on the number of different orders of the moduli of roots for which these SPs are realizable by~HPs. \section{Preliminaries} \begin{nota} {\rm (1) We set $\sigma ^m(P)=\sigma ((-1)^dP(-x))$ and $\sigma ^r(P)=\sigma (x^dP(1/x)/P(0))$. (2) We call {\em first representation} of a SP the one with signs $(+)$ and/or $(-)$. For a SP in its {\em second representation} $\Sigma _{m_1,\ldots ,m_s}$, if each of its maximal sequences of, say, $k$ consecutive units is replaced by the symbol $[k]$, then one obtains the {\em third representation} of the SP. E.g. the SP $$(+,-,-,+,-,+,-,-,-)~=~\Sigma _{1,2,1,1,1,3}$$ can be represented also in the form $\Sigma _{[1],2,[3],3}$. We call the signs $(+)$ and $(-)$ of the first representation and the numbers $m_i$ of the second one {\em components} of the SP. The components larger than $1$ and the maximal sequences of units in the third representation are called {\em elements} of the~SP.} \end{nota} \begin{rems}\label{rems2} {\rm (1) The polynomial $x^dP(1/x)$ is the {\em reverted} of the polynomial $P$ (i.e. read from the back). Its roots are the reciprocals of the roots of $P$. The roots of $P(-x)$ are the opposite of the roots of $P$. (2) The applications $$\iota_m:\sigma (P)\mapsto \sigma ^m(P)~~~\, \, \, {\rm and}~~~\, \, \, \iota_r:\sigma (P)\mapsto \sigma ^r(P)$$ are two commuting involutions. We set $$\sigma ^{mr}(P)~:=~ \sigma ^m(\sigma ^r(P))~=~\sigma ^r(\sigma ^m(P))~=~:\sigma ^{rm}(P))~.$$ For $d\geq 1$, it is always true that $\sigma (P)\neq \sigma ^m(P)$ (because their second signs are opposite), but one might have $\sigma (P)=\sigma ^r(P)$ or $\sigma (P)=\sigma ^{mr}(P)$. Thus the set $\{ \sigma (P)$, $\sigma ^m(P)$, $\sigma ^r(P)$, $\sigma ^{mr}(P)\}$ contains either four or two distinct~SPs. (3) The SPs $\sigma (P)$, $\sigma ^m(P)$, $\sigma ^r(P)$ and $\sigma ^{mr}(P)$ are simultaneously canonical or not. With regard to Example~\ref{ex1} one has} $$\begin{array}{ll} \sigma ^r(\Sigma _{1,m_2})=\Sigma _{m_2,1}~,&\\ \sigma ^m(\Sigma _{1,m_2})=\Sigma _{2,[m_2-1]}~,& \sigma ^{mr}(\Sigma _{1,m_2})=\Sigma _{[m_2-1],2}~,\\ \\ \sigma ^r(\Sigma _{m_1,1,m_3})=\Sigma _{m_3,1,m_1}~,&\\ \sigma ^m(\Sigma _{m_1,1,m_3})=\Sigma _{[m_1-1],3,[m_2-1]}~,& \sigma ^{mr}(\Sigma _{m_1,1,m_3})=\Sigma _{[m_2-1],3,[m_1-1]}~,\\ \\ \sigma ^r(\Sigma _{1,m_2,1})=\Sigma _{1,m_2,1}~~~{\rm and}& \sigma ^m(\Sigma _{1,m_2,1})=\sigma ^{mr}(\Sigma _{1,m_2,1})= \Sigma _{2,[m_2-2],2}~. \end{array}$$ \end{rems} \begin{defi}\label{defitype1} {\rm (1) We say that a SP $\sigma$ of length $d+1$ is {\em of type 1} (notation: $\sigma \in \mathcal{T}_{1,d}$) if either all its even or all its odd positions contain the same sign. (2) We say that a SP $\sigma$ of length $d+1$ is {\em of type 2} (notation: $\sigma \in \mathcal{T}_{2,d}$) if (i) in its second representation the SP $\sigma$ does not have two consecutive components $m_i$ larger than $1$; (ii) for $2\leq i\leq s-1$, one has $m_i\neq 2$ (but $m_1=2$ and/or $m_s=2$ is allowed).} \end{defi} \begin{rem} {\rm SPs of type 1 are used in the formulation of a result concerning another problem connected with Descartes' rule of signs and formulated for real (not necessarily hyperbolic) polynomials, see Proposition~4 in~\cite{FoKoSh}.} \end{rem} \begin{ex}\label{ex2} {\rm (1) For $d=6$ and for the SP $$\sigma ^{\dagger}~:=~(+,-,-,-,+,-,+)~=~\Sigma _{1,3,1,1,1}~=~ \Sigma _{[1],3,[3]}$$ one has $\mathcal{T}_{1,6}\ni \sigma ^{\dagger}\in \mathcal{T}_{2,6}$, because there are $(-)$-signs in all odd positions (namely, $1$, $3$ and $5$) and conditions (i) and (ii) from Definition~\ref{defitype1} hold true. (2) The SP $\sigma _0$ from Example~\ref{defiCPP} is neither a type 1 nor a type 2 SP. (3) One has $\mathcal{T}_{1,7}\not\ni \Sigma_{[1],4,[3]}\in \mathcal{T}_{2,7}$. (4) The following SPs are of type 1: $\Sigma _{A,[B]}$, $\Sigma _{[A],B}$, $\Sigma _{A,[2B+1],C}$, $\Sigma _{[A],2B+1,[C]}$, $A$, $B$, $C\in \mathbb{N}$.} \end{ex} \begin{rem} {\rm The SPs $\sigma (P)$, $\sigma ^m(P)$, $\sigma ^r(P)$ and $\sigma ^{mr}(P)$ are simultaneously of type 1 or not. E.g. of type 1 are the SPs $\sigma _{\bullet}:=\Sigma _{m_1,[u],m_s}$ for $s$ odd (with $u:=d+1-m_1-m_s$),} $$\sigma _{\bullet}^m=\Sigma _{[m_1-1],u+2,[m_s-1]}~,~~~\, \sigma _{\bullet}^r=\Sigma _{m_s,[u],m_1}~~~\, {\rm and}~~~\, \sigma _{\bullet}^{mr}=\Sigma _{[m_s-1],u+2,[m_1-1]}~.$$ \end{rem} \begin{prop} (1) One has $\mathcal{T}_{1,d}\subset \mathcal{T}_{2,d}$. (2) One has $\iota_m( \mathcal{T}_{2,d})=\mathcal{T}_{2,d}$ and $\iota_r( \mathcal{T}_{2,d})=\mathcal{T}_{2,d}$. \end{prop} \begin{proof} Part (1). Indeed, if condition (i) of Definition~\ref{defitype1} does not hold true, then the SP is of the form $$(\cdots ,+,+,-,-,\cdots )~~~\, \, \, {\rm or}~~~\, \, \, (\cdots ,-,-,+,+,\cdots )$$ and each of the sequences of signs of even or odd monomials has a sign variation. If condition (ii) does not hold true, then the SP is of the form $$(\cdots ,-,+,+,-,\cdots )~~~\, \, \, {\rm or}~~~\, \, \, (\cdots ,+,-,-,+,\cdots )$$ and again each of these sequences has at least one sign variation. Part (2). The inclusion $\iota_r( \mathcal{T}_{2,d})\subset \mathcal{T}_{2,d}$ follows directly from Definition~\ref{defitype1}. As $\iota_r$ is an involution, this inclusion is an equality. For a SP $\sigma ^{\triangle}\in \mathcal{T}_{2,d}$, its image $\iota _m(\sigma ^{\triangle})$ is defined by the following rules: (a) An element $A>1$ of $\sigma ^{\triangle}$ is replaced by $[A-2]$ if $A$ is not at one of the ends of $\sigma ^{\triangle}$, and by $[A-1]$ if it is. (b) An element $[B]$ of $\sigma ^{\triangle}$ is replaced by $B+2$ if $[B]$ is not at one of the ends of $\sigma ^{\triangle}$, and by $B+1$ if it is. One can deduce from rules (a) and (b) that conditions (i) and (ii) hold true for the SP~$\iota _m(\sigma ^{\triangle})$. Hence $\iota_r( \mathcal{T}_{2,d})\subset \mathcal{T}_{2,d}$ and as $\iota_m$ is an involution, this inclusion is an equality. \end{proof} \section{Results on canonical sign patterns} \begin{tm}\label{tm1} Every type 1 SP is canonical. \end{tm} \begin{proof} We prove the theorem by induction on $d$. For $d=1$, there is nothing to prove. For $d=2$, one has to consider the SPs $\sigma ^{\sharp}:=(+,+,-)$ and $\sigma ^{\flat}:=(+,-,-)$. For a HP $P:=(x-a)(x+b)=x^2+g_1x+g_0$, one has $g_1=b-a$ which is $>0$ if $b>a$ and $<0$ for $b<a$ from which for $d=2$ the theorem follows. Suppose that $d\geq 3$. Consider a HP $P$ with all roots simple defining a SP $\sigma$. In the one-parameter family of polynomials $P^*_t:=tP+(1-t)P'$, $t\in [0,1]$, every polynomial is hyperbolic with all roots simple and for $t\in (0,1]$, all roots of $P^*_t$ are nonzero. Moreover, for $t\in (0,1]$, the polynomial $P^*_t$ defines the SP $\sigma$. For $t=0$, by inductive assumption, the moduli of the roots of the HP $P'$ define the canonical order. For $t\in (0,1]$, there is no equality between a modulus of a positive and a modulus of a negative root of $P^*_t$. Indeed, if $P^*_t$ has roots $\pm \gamma$, $\gamma >0$, then \begin{equation}\label{eqQpm} Q_{t,\pm}(\gamma )~:=~P^*_t(\gamma )~\pm ~P^*_t(-\gamma )~=~0~.\end{equation} This is impossible, because at least one of the two quantities $Q_{t,\pm}(\gamma )$ is a sum of terms of the same sign. Thus the $d-1$ largest of the moduli of roots of $P^*_t$ define the same order as the roots of $P^*_0$ (which is the canonical order w.r.t. the SP obtained from $\sigma$ by deleting its last component). The root of least modulus (for $t$ close to $0$) is positive if the last two components of $\sigma$ are different and negative if they are equal. Thus for $t\in (0,1]$, the moduli of the roots of $P^*_t$ (hence in particular the ones of $P^*_1$) define the canonical order. \end{proof} \begin{tm}\label{tm2} A canonical SP is a type 2 SP. \end{tm} \begin{rem} {\rm Theorem~\ref{tm2} proposes necessary conditions for a SP to be canonical. It would be interesting to know how far from sufficient these conditions are.} \end{rem} \begin{proof} Suppose that a given SP $\sigma$ has components $m_j=A>1$ and $m_{j+1}=B>1$. The SP $\Sigma _{A,B}$ is not canonical, see part (2) of Example~\ref{ex1}. Hence one can construct two polynomials $P$ and $Q$ defining the SP $\Sigma _{A,B}$ and with different orders of their moduli of roots. To construct two polynomials realizing the SP $\sigma$ one starts with $P$ and $Q$ and then uses concatenation of polynomials as described in the proof of Proposition~\ref{propevery} and in Remarks~\ref{rems1}. At each new concatenation the modulus of the new root is either much smaller or much larger than the moduli of the previously existing roots. Hence the orders of the moduli of the roots of the two polynomials constructed in this way after $P$ and $Q$ remain different. If the SP $\sigma$ has a component $m_i=2$, $2\leq i\leq s-1$, then it suffices to consider the case $m_{i-1}=m_{i+1}=1$. In this case one chooses two polynomials $P$ and $Q$ realizing the SP $\Sigma _{1,2,1}$ with different orders of the moduli of their roots; such polynomials exist, see part (1) of Example~\ref{ex1}. After this one again uses the techniques of concatenation to realize the SP $\sigma$ with two different orders of the moduli of the roots, starting with $P$ and $Q$ respectively. \end{proof} \begin{prop}\label{prop1n11} For $d\geq 5$, the SP $\Sigma _{[1],d-2,[2]}$ is canonical. \end{prop} \begin{cor}\label{cor1n11} For $d\geq 5$, the three SPs $\Sigma_{[2],d-2,[1]}=\sigma ^r(\Sigma _{[1],d-2,[2]})$, $\Sigma_{2,[d-4],3}=\sigma ^m(\Sigma _{[1],d-2,[2]})$ and $\Sigma_{3,[d-4],2}=\sigma ^{mr}(\Sigma _{[1],d-2,[2]})$ are canonical. \end{cor} The corollary follows from part (3) of Remarks~\ref{rems2}. \begin{proof}[Proof of Proposition~\ref{prop1n11}] For $d\geq 5$ odd, the SP is of type 1 and one can apply Theorem~\ref{tm1}. For $d=4$, the SP is not canonical, see Theorem~\ref{tm2}. So we assume that $d\geq 6$ (the parity of $d$ is of no importance in the proof). Without loss of generality one can assume that the middle modulus of a positive root of a HP $P:=x^d+\sum _{j=0}^{d-1}a_jx^j$ realizing the SP $\Sigma _{[1],d-2,[2]}$ equals $1$ (this can be achieved by a linear change of the variable $x$). So we denote the moduli of positive roots by $0<\varepsilon <1<A$, and by $0<\gamma _1<\gamma _2<\cdots <\gamma _{d-3}$ the moduli of negative roots. Denote by $0<\delta _1<\cdots <\delta _{d-3}$ the moduli of negative and by $0<\varphi <\psi$ the moduli of positive roots of $P'$ (recall that $P'$ defines the SP $\Sigma _{1,d-2,1}$ which is canonical, see part (1) of Example~\ref{ex1}). As $\Sigma _{1,d-2,1}$ is canonical, one has $\varphi <\delta _1$, and by Rolle's theorem, $\gamma _j<\delta _{j+1}<\gamma _{j+1}$, $j=1$, $\ldots$, $d-4$. For $\delta _1$, one has $0<\delta _1<\gamma _1$. Thus $$\varepsilon ~<~\varphi ~<~\delta _1~<~\gamma _1~.$$ Denote by $0<\eta _1<\cdots <\eta _{d-3}$ the moduli of negative and by $0<\lambda <\mu$ the moduli of positive roots of the HP $P^{\dagger}:=xP'-dP=-a_{d-1}x^{d-1}-2a_{d-2}x^{d-2}-\cdots -da_0$. The latter defines the SP $\Sigma _{d-2,[2]}$ which is canonical, see part (1) of Example~\ref{ex1}. The positive roots of $P$ and $P^{\dagger}$ interlace, and so do their negative roots as well; we will see below that this is not true about all the roots of $P$ and $P^{\dagger}$. The leading coefficient of $P^{\dagger}$ is positive, so the limits at $+\infty$ of $P$ and $P^{\dagger}$ equal $+\infty$. Their limits at $-\infty$ are opposite. The leftmost root of $P$ equals $-\gamma _{d-3}$. One has $P^{\dagger}(-\gamma _{d-3})=-\gamma _{d-3}P'(-\gamma _{d-3})$. Hence $$\begin{array}{lll} {\rm either}&\lim _{x\rightarrow -\infty}P(x)~=~-\infty ~,& P'(-\gamma _{d-3})~>~0~,\\ \\ &\lim _{x\rightarrow -\infty}P^{\dagger}(x)~=~+\infty ~,& P^{\dagger}(-\gamma _{d-3})~<~0 \\ \\ {\rm or}& \lim _{x\rightarrow -\infty}P(x)~=~+\infty ~,&P'(-\gamma _{d-3})~<~0~,\\ \\ &\lim _{x\rightarrow -\infty}P^{\dagger}(x)~=~-\infty ~,& P^{\dagger}(-\gamma _{d-3})~>~0~. \end{array}$$ In both cases the leftmost root $-\eta _{d-3}$ of $P^{\dagger}$ is $<-\gamma _{d-3}$. By Rolle's theorem and using the fact that the SP $\Sigma _{d-2,[2]}$ is canonical, $$0~<~\lambda ~<~1~<~\mu ~<~\eta _1~<~\gamma _2~<~\eta _2~.$$ One can show that $-\eta _1<-\gamma _1<\varepsilon <\lambda$ which means that the interlacing of the roots of $P$ and $P^{\dagger}$ is interrupted when the variable $x$ passes through~$0$. The condition $a_{d-1}<0$ reads: \begin{equation}\label{eqAeps}A+1+\varepsilon -\sum _{j=1}^{d-3}\gamma _j~>~0~. \end{equation} As $\gamma _1>\varepsilon$ and $\gamma _2>1>\varepsilon$, condition (\ref{eqAeps}) is possible only if $A>\gamma _{d-3}$. Thus $\varepsilon <\gamma _1<\cdots <\gamma _{d-3}<A$ and to prove the proposition there remains to show that $1<\gamma _1$. Set $$\begin{array}{cclccclc}\sigma _1&:=&\sum _{j=3}^{d-3}1/\gamma _j&,& \sigma _2&:=&\sum _{3\leq i<j\leq d-3}1/(\gamma _i\gamma _j)&,\\ \\ B&:=& \frac{1}{A}+1+\frac{1}{\varepsilon}&{\rm and}&C&:=&\frac{1}{A\varepsilon}+ \frac{1}{A}+\frac{1}{\varepsilon}&.\end{array}$$ The conditions $a_0<0$, $a_1>0$ and $a_2<0$ imply \begin{equation}\label{eqtwoineq}\begin{array}{lclclclc} &&B&-&\left( \frac{1}{\gamma _1}+\frac{1}{\gamma _2}+\sigma _1\right) &>&0& {\rm and}\\ \\ \Phi&:=&\Lambda&-&B \left( \frac{1}{\gamma _1}+\frac{1}{\gamma _2}+ \sigma _1\right) &>&0~,&{\rm where}\\ \\ \Lambda&:=&C& +&\frac{1}{\gamma _1\gamma _2}+\left( \frac{1}{\gamma _1}+ \frac{1}{\gamma _2}\right) \sigma _1+\sigma _2&.&& \end{array} \end{equation} Suppose that $\gamma _1\leq 1$. Then the following inequalities hold true: \begin{equation}\label{eqineq1} \frac{1}{A\varepsilon}-\frac{1}{\gamma _2\varepsilon}~<~0~, \end{equation} because $A>\gamma _2$, \begin{equation}\label{eqineq2} -\frac{1}{\gamma _1}\left( \frac{1}{A}+\frac{1}{\varepsilon}\right) + \frac{1}{A}+\frac{1}{\varepsilon}~\leq ~0~, \end{equation} \begin{equation}\label{eqineq3} -B\sigma _1+\left( \frac{1}{\gamma _1}+\frac{1}{\gamma _2}\right) \sigma _1+ \sigma _2~<~0, \end{equation} (because $-B\sigma _1<-(1/\gamma _1+1/\gamma _2+\sigma _1)\sigma _1$, see the first of inequalities (\ref{eqtwoineq}), and one has $\sigma _2<(\sigma _1)^2$) and as $\gamma _2>1$, \begin{equation}\label{eqineq4} -\frac{1}{\gamma _1}+\frac{1}{\gamma _1\gamma _2}~<~0. \end{equation} The sum of the left-hand sides of inequalities (\ref{eqineq1}), (\ref{eqineq2}), (\ref{eqineq3}) and (\ref{eqineq4}) equals $$ \Lambda -\left( \frac{1}{\gamma _2\varepsilon}+\frac{1}{\gamma _1}\left( \frac{1}{A}+\frac{1}{\varepsilon}\right) +B\sigma _1+\frac{1}{\gamma _1} \right) =\Lambda -B\left( \frac{1}{\gamma _1}+\frac{1}{\gamma _2}+ \sigma _1\right) +\frac{1}{\gamma _2}+\frac{1}{\gamma _2A}~.$$ Thus $\Lambda -B\left( \frac{1}{\gamma _1}+\frac{1}{\gamma _2}+ \sigma _1\right) +\frac{1}{\gamma _2}+\frac{1}{\gamma _2A}<0$ which contradicts the second of inequalities (\ref{eqtwoineq}). \end{proof} \section{On non-canonical sign patterns\protect\label{secnoncanon}} The present section deals with SPs with two sign changes, i.e. with $s=3$, see Notation~\ref{nota1}. For $m_1\geq 2$, $m_2\geq 2$, $m_3\geq 2$, such a SP is not canonical, see Theorem~\ref{tm2}. \begin{nota} {\rm We set $m:=m_1$, $n:=m_2$, $q:=m_3$ and we denote by $0<\beta <\alpha$ the positive and by $-\gamma _{d-2}<\cdots <-\gamma _1<0$ the negative roots of a degree $d$ HP $P$ realizing the SP $\Sigma _{m,n,q}$. By $m^*$, $n^*$, $q^*$ we denote the numbers of negative roots of modulus larger than $\alpha$, between $\beta$ and $\alpha$ and smaller than $\beta$ respectively; hence $m^*+n^*+q^*=d-2$. By $\tau _1\geq 0$, $\tau _2\geq 0$, $\delta>0$, $\ell >0$ and $r\geq 2$, we denote integers, where $d=\delta +\tau _1+\tau _2$.} \end{nota} We remind that the canonical order of the roots corresponds to the case $m^*=m-1$, $n^*=n-1$, $q^*=q-1$, see Definition~\ref{defiCO}. \begin{tm}\label{tm3} (1) For $$\begin{array}{lll}r^2~<~\delta ~<~(r+1)^2~,&\delta -r~\in ~2\mathbb{Z}+1~,& \\ \\ m~\geq ~(\delta -r+1)/2~,&q~\geq ~(\delta -r+1)/2&{\rm and}~~~\, \, \, n~=~r~,\end{array}$$ the SP $\Sigma _{m,n,q}$ is realizable by HPs with all possible values of $m^*$, $n^*$, $q^*$ such that $m^*\geq \tau _1:=m-(\delta -r+1)/2$ and $q^*\geq \tau _2:=q-(\delta -r+1)/2$. (2) For $$\begin{array}{lll}r^2~<~\delta ~<~(r+1)^2~,&\delta -r~\in ~2\mathbb{Z}~,& \\ \\ m~\geq ~(\delta -r)/2~,&q~\geq ~(\delta -r)/2&{\rm and}~~~\, \, \, n~=~r+1~, \end{array}$$ the SP $\Sigma _{m,n,q}$ is realizable by HPs with all possible values of $m^*$, $n^*$, $q^*$ such that $m^*\geq \tau _1:=m-(\delta -r)/2$ and $q^*\geq \tau _2:=q-(\delta -r)/2$. (3) For $\delta =r^2$, the SP $$(~\tau _1+r(r-1)/2+1~,~r~,~\tau _2+r(r-1)/2~)~~~({\rm resp.}~~~ (~\tau _1+r(r-1)/2~,~r~,~\tau _2+r(r-1)/2+1~))$$ is realizable by HPs with all possible values of $m^*$, $n^*$, $q^*$ such that $m^*\geq \tau _1+1$ and $q^*\geq \tau _2$ (resp. $m^*\geq \tau _1$ and $q^*\geq \tau _2+1$). \end{tm} \begin{rems} {\rm (1) Consider the case $\tau _1=\tau _2=0$. Hence $d=\delta$ and all possible orders of the moduli of the $d-2$ negative and $2$ positive roots are realizable. The number of these orders is} $$\sum _{k=0}^{d-2}~\sum _{j=0}^{d-2-k}1~=~\sum _{k=0}^{d-2}(d-1-k)~=~d(d-1)/2$$ {\rm (here $k$ and $j$ are the numbers of moduli of negative roots larger than $\alpha$ and between $\beta$ and $\alpha$ respectively). At the same time $d\sim r^2$, i.e. $d\sim n^2$. Thus the theorem guarantees the possibility to realize the SP $\Sigma _{m,n,q}$ by $\sim n^4/2$ HPs with different orders of the moduli of their roots when $m$ and $q$ are (almost) equal. The latter condition is essential -- for $q=1$, the number of different orders is $\sim 2n$, see Theorem~4 in~\cite{Koarxiv}. (2) The theorem gives only sufficient conditions for realizability of certain SPs with two sign changes by HPs with different orders of the moduli of their roots. It would be interesting to obtain necessary conditions as well.} \end{rems} In order to prove the theorem we need a technical lemma. \begin{nota} {\rm We set $P_{\ell}(x):=(x-1)^2(x+1)^{\ell}$, $\ell \geq 2$. This polynomial contains either $0$ or $2$ vanishing coefficients, see Lemma~\ref{lm1}. By $\Sigma (\ell)$ we denote its SP which, in the case when there are $2$ vanishing coefficients, we represent in the form $(v,0,n,0,w)$. This means that $\Sigma (\ell)$ begins with $v=m-1$ signs~$(+)$ followed by a zero followed by $n=n(\Sigma (\ell ))$ signs~$(-)$ followed by a zero followed by $w=q-1$ signs~$(+)$. If there are no vanishing coefficients, then we write $\Sigma (\ell)=(v,n,w)$ in which case $v=m$ and $w=q$.} \end{nota} \begin{lm}\label{lm1} (1) For~~$r^2-2~<~\ell ~<~(r+1)^2-2$~~and~~$\ell -r~\in ~2\mathbb{Z}+1$, $$\Sigma (\ell )~=~(~(\ell -r+3)/2~,~r~,~(\ell -r+3)/2)~)~,$$ so~~$n(\Sigma (\ell ))=r$. (2) For~~$r^2-2~<~\ell ~<~(r+1)^2-2$~~and~~$\ell -r~\in ~2\mathbb{Z}$, $$\Sigma (\ell )~=~(~(\ell -r+2)/2~,~r+1~,~(\ell -r+2)/2~)~,$$ so~~$n(\Sigma (\ell ))~=~r+1$. (3) For~~$\ell ~=~r^2-2$, the~~SP~~$\Sigma (\ell )$~~equals $$\Sigma (\ell )~=~(~r(r-1)/2~,~0~,~r-1~,~0~,~r(r-1)/2~)~.$$ Hence~~$n(\Sigma (\ell ))~=~r-1$. \end{lm} \begin{proof} Clearly $P_{\ell}=\sum _{j=0}^{\ell +2}c_jx^j$, where $c_j={\ell \choose j}-2{\ell \choose j-1}+{\ell \choose j-2}$. The condition $c_j=0$ is equivalent to $$4j^2-4(\ell +2)j+(\ell +1)(\ell +2)~=~0$$ which yields $$j~=~j_{\pm}(\ell )~:=~(\ell +2\pm \sqrt{\ell +2})/2~.$$ For $\ell =r^2-2$, one gets $j=(r^2\pm r)/2$ from which part (3) follows (both numbers $(r^2\pm r)/2$ are natural). When $\ell$ is not of the form $r^2-2$ the condition $c_j=0$ does not provide a natural solution. Hence no coefficient of $P_{\ell}$ vanishes. The formula expressing $j_{\pm}(\ell )$ implies that $c_j>0$ for $j\leq [(\ell +2-(r+1))/2]$ while $c_{[(\ell +2-(r+1))/2]+1}<0$; here $[.]$ stands for the integer part of. If $\ell$ and $r$ are of different parity, then $$[(\ell +2-(r+1))/2]~=~(\ell -r+1)/2$$ which proves part (1). If $\ell$ and $r$ are of one and the same parity, then $$[(\ell +2-(r+1))/2]~=~(\ell -r)/2$$ which proves part (2). \end{proof} \begin{proof}[Proof of Theorem~\ref{tm3}] To prove parts (1) and (2) of Theorem~\ref{tm3} we use parts (1) and (2) of Lemma~\ref{lm1} respectively. We consider first the case $\tau _1=\tau _2=0$. In this case the conditions $$\begin{array}{lllll} m~\geq ~(\delta -r+1)/2~,&q~\geq ~(\delta -r+1)/2&{\rm and}&n~=~r& {\rm from~part~(2)~or}\\ \\ m~\geq ~(\delta -r)/2~,& q~\geq ~(\delta -r)/2&{\rm and}&n~=~r+1&{\rm from~part~(3)}\end{array}$$ are possible only if $$\begin{array}{lllll} m~=~(\delta -r+1)/2~,&q~=~(\delta -r+1)/2&{\rm and}&n~=~r&{\rm or}\\ \\ m~=~(\delta -r)/2~,&q~=~(\delta -r)/2&{\rm and}&n~=~r+1&\end{array}$$ respectively, because $m+n+q=\delta +1$. Set $d=\delta :=\ell +2$. We deform the polynomial $P_{\ell}$ corresponding to part (1) or (2) of Lemma~\ref{lm1} so that the moduli of the roots are all distinct and define any possible order (fixed in advance) on the positive half-axis. The positive roots $\beta <\alpha$ of the deformed polynomial (denoted by $\tilde{P}_{\ell}$) remain close to $1$ and the $\ell$ negative roots remain close to $-1$. Hence the signs of the coefficients of $\tilde{P}_{\ell}$ are the same as the signs of the coefficients of $P_{\ell}$ and $$\begin{array}{ll} \sigma (\tilde{P}_{\ell})~=~(~(\delta -r+1)/2~,~r~,~(\delta -r+1)/2~)& {\rm in~the~case~of~part~(2)~or}\\ \\ \sigma (\tilde{P}_{\ell})~=~(~(\delta -r)/2~,~r+1~,~(\delta -r)/2~)& {\rm in~the~case~of~part~(3).} \end{array}$$ This proves the theorem for $\tau _1=\tau _2=0$. In the general case, i.e. for $\tau _1\geq 0$ and $\tau _2\geq 0$, one first constructs the polynomial $\tilde{P}_{\ell}$ as above. Then one performs $\tau _1$ concatenations of $\tilde{P}_{\ell}$ with polynomials of the form $1+\varepsilon _jx$, $j=1$, $\ldots$, $\tau _1$, as explained in part (2) of Remarks~\ref{rems1}, where $$0~<~\varepsilon _{\tau _1}~\ll ~\varepsilon _{\tau _1-1}~\ll ~\cdots ~\ll ~ \varepsilon _1~\ll ~1~.$$ This adds $\tau _1$ negative roots whose moduli are larger than~$\alpha$. After this one performs $\tau _2$ concatenations, see part (1) of Remarks~\ref{rems1}, with polynomials of the form $x+\varepsilon _j$, $j=\tau _1+1$, $\ldots$, $\tau _1+\tau _2$, where $$0~<~\varepsilon _{\tau _1+\tau _2}~\ll ~\varepsilon _{\tau _1+\tau _2-1}~\ll ~ \cdots ~\ll ~\varepsilon _{\tau _1+1}~\ll ~\varepsilon _{\tau _1}~.$$ This adds $\tau _2$ negative roots whose moduli are smaller than $\beta$. Part (3). Consider first the case $\tau _1=\tau _2=0$. We use Lemma~\ref{lm1} with $\ell =r^2-3$. Hence one can apply part (2) of Lemma~\ref{lm1} with $r-1$ substituted for $r$ (because $\ell -(r-1)\in 2\mathbb{Z}$). This implies that the polynomial $P_{r^2-3}$ realizes the SP $(r(r-1)/2,r,r(r-1)/2)$. Setting $P_{r^2-3}:=\sum _{j=0}^da_jx^j$ one deduces that $$\begin{array}{llllll} a_{r(r-1)/2}~<~0&,&a_{r(r-1)/2-1}~>~0&,&a_{r(r-1)/2}+a_{r(r-1)/2-1}~=~0&,\\ \\ a_{r(r+1)/2+1}~>~0&,&a_{r(r+1)/2}~<~0&,&a_{r(r+1)/2+1}+a_{r(r+1)/2}~=~0&.\end{array}$$ The two equalities to $0$ result from the polynomial $P_{r^2-2}=(x+1)P_{r^2-3}$ having vanishing coefficients of $x^{r(r-1)/2}$ and $x^{r(r+1)/2}$, see part (3) of Lemma~\ref{lm1}. Hence for the SPs defined by the polynomials $P_{\pm}:=(x+1\pm \varepsilon )P_{r^2-3}$, $\varepsilon >0$, one has $$\sigma (P_+)~=~(r(r-1)/2+1,r,r(r-1)/2)~~~\, \, {\rm and}~~~\, \, \sigma (P_-)~=~(r(r-1)/2,r,r(r-1)/2+1)~.$$ Then one perturbs the roots of $P_{r^2-3}$ (the perturbed negative roots must keep away from the root $-1\mp \varepsilon$ of $P_{\pm}$). In the case of $P_+$ (resp. $P_-$) the largest (resp. the smallest) of the moduli of perturbed roots is the one of the negative root $-1-\varepsilon$ (resp. $-1+\varepsilon$) and the order of the remaining $d-3$ negative and $2$ positive roots can be arbitrary. This proves part (3) for $\tau _1=\tau _2=0$. In the general case, i.e. for $\tau _1\geq 0$ and $\tau _2\geq 0$, the proof is finished in the same way as for parts (1) and~(2). \end{proof} \end{document}
\begin{document} \title{Thresholded Quantum LIDAR \--- Exploiting Photon-Number-Resolving Detection } \author{Lior Cohen} \affiliation{ Hearne Institute for Theoretical Physics, and Department of Physics and Astronomy, Louisiana State University, Baton Rouge, Louisiana 70803, USA. } \affiliation{ [email protected]} \author{Elisha S. Matekole} \affiliation{ Hearne Institute for Theoretical Physics, and Department of Physics and Astronomy, Louisiana State University, Baton Rouge, Louisiana 70803, USA. } \author{Yoni Sher} \affiliation{ School of Computer Science and Engineering, Hebrew University of Jerusalem, Jerusalem 91904, Israel.} \author{Daniel Istrati} \affiliation{ Racah Institue for Physics, Hebrew University of Jerusalem, Jerusalem 91904, Israel.} \author{Hagai S. Eisenberg} \affiliation{ Racah Institue for Physics, Hebrew University of Jerusalem, Jerusalem 91904, Israel.} \author{Jonathan P. Dowling} \affiliation{ Hearne Institute for Theoretical Physics, and Department of Physics and Astronomy, Louisiana State University, Baton Rouge, Louisiana 70803, USA. } \affiliation{ NYU-ECNU Institute of Physics at NYU Shanghai, 3663 Zhongshan Road North, Shanghai, 200062, China. } \affiliation{ LCAS-Alibaba Quantum Computing Laboratory, CAS Center for Excellence in Quantum Information and Quantum Physics, University of Science and Technology of China, Shanghai 201315, China. } \affiliation{ National Institute of Information and Communications Technology, 4-2-1, Nukui-Kitamachi, Koganei, Tokyo 184-8795, Japan } \date{\today} \begin{abstract} We present a technique that improves the signal-to-noise-ratio (SNR) of range-finding, sensing, and other light-detection applications. The technique filters out low photon numbers using photon-number-resolving detectors (PNRDs). This technique has no classical analog and cannot be done with classical detectors. We investigate the properties of our technique and show under what conditions the scheme surpasses the classical SNR. Finally, we simulate the operation of a rangefinder, showing improvement with a low number of signal samplings and confirming the theory with a high number of signal samplings. \end{abstract} \flushbottom \maketitle \thispagestyle{empty} \textit{Introduction.}\----Electromagnetic radiation is regularly used for measuring and sensing the physical world. One particular sensing method, namely, laser range-finding and Light Detection and Ranging (LIDAR) is under continuous development. Increasing the range requires sensitive detectors, and more recently, single-photon detectors (SPDs) \cite{warburton2007subcentimeter,howland2013photon,pawlikowska2017single,li2019single}, and photon-number-resolving detectors (PNRDs) \cite{bao2014laser,sher2018low} have been used for this purpose. It is an ongoing question what quantum optics can contribute to applications like LIDAR. It has been proven that loss, such as in rangefinders and LIDARs, eliminates most quantum effects \cite{dorner2009optimal,lee2009optimization}, thus, it is ineffective to use quantum states of light for those applications, rather than classical light such as coherent states \cite{jiang2013super}. However, many proven quantum effects are not a result of using quantum states, but of using quantum detection of these states. For example, Bell-inequality violations are commonly attributed to the use of entangled states \cite{qian2015shifting}. However, all-optical demonstrations have been done with Gaussian states, such as spontaneous parametric down-conversion \cite{giustina2013bell}. It is well known that Bell's inequalities are satisfied when both the state and the detection are Gaussian \cite{bell1987speakable}, thus, in all-optical demonstrations, Bell-inequality violations are caused by the non-Gaussian single-photon detection \cite{giustina2013bell}. Having said that, even though rangefinders and LIDARs are operated with coherent states, quantum detection strategies such as parity \cite{jiang2013super}, and photon thresholding (filtering out low photon-numbers) \cite{bao2014laser} might still give a quantum advantage. In this paper, we rigorously derive the SNR improvement of threshold detection over intensity detection. One form of laser range-finding is illustrated in Fig. \ref{fig:system}. By sending short pulses of light, and recording their return time, one can measure the range to a target using the speed of light. The range-finding information can be extended to three-dimensional imaging by adding spatial resolution to the detection. Spatial resolution can be obtained by a gated camera \cite{busck2004gated}, raster scanning \cite{pawlikowska2017single} or blocking masks \cite{howland2013photon,sher2018low}. The last method also provides compressed data acquisition, where the number of required measurements is far less than the number of image pixels. \begin{figure} \caption{Illustration of the rangefinder system. A laser pulse is sent to a remote target and a small portion is reflected back into the device. After spatial and spectral filtering, the light is detected by a PNRD. Then, the photon number is thresholded by thresholding the voltage height. A one-bit comparator stops the timer when a voltage peak, caused by the detection of a bunch of photons, exceeds the voltage threshold. } \label{fig:system} \end{figure} In daylight range-finding, the classical noise from solar radiation dominates the quantum noise, the latter of which is due to the photon-number fluctuations of the coherent source. Solar radiation is a blackbody radiation, and thus, single-mode sunlight has thermal photon-statistics: \begin{equation} p_{\rm th}(n) = \frac{\bar{n}_{\rm th}^n}{(\bar{n}_{\rm th}+1)^{n+1}}\,, \end{equation} where $p_{\rm th}(n)$ is the probability to measure \textit{n}-photons within the coherence time, and $\bar{n}_{\rm th} = (e^{\hbar\omega/k_BT}-1)^{-1}$ is the average photon number, $\hbar$ and $k_B$ are the Dirac and Boltzmann constants and $\omega$ is the light frequency. The laser is a coherent light source and thus has a Poisson photon distribution: \begin{equation} p_{\rm p}(n) = e^{-\bar{n}_{\rm p}} \frac{\bar{n}_{\rm p}^n}{n!}\,, \end{equation} where $\bar{n}_{\rm p}$ is the average photon number. Since the solar flux is continuous, identifying the signal is equivalent to distinguishing a mixture of coherent and thermal light from thermal light alone. The mixture has mixed photon-statistics \cite{dovrat2012measurements}, $p(n) = \sum_{m=0}^n p_{\rm p}(m)p_{\rm th}(n-m)$ which can be written as \begin{equation} p(n) = e^{\frac{\bar{n}_{\rm p}}{x}-\bar{n}_{\rm p}} \frac{x^n}{n!} \Gamma\Big(\frac{\bar{n}_{\rm p}}{x},n+1\Big)\,, \end{equation} where $x = {\bar{n}_{\rm th}}/{(\bar{n}_{\rm th}+1)}$, and $\Gamma(y,n+1) = n!e^{-y}\sum_{m=0}^n ({y^m}/{m!}) = \int_y^\infty t^n e^{-t}dt $ is the incomplete gamma function. \textit{Quantum SNR versus classical SNR.}\----Typically, in quantum sensing technologies, it is the shot-noise limit (SNL) that is beaten \cite{aasi2013enhanced,israel2014supersensitive}. While sub-SNL sensitivity can be obtained when the classical noise is negligible, it is a much harder task when the classical noise is dominant \cite{escher2011general,cohen2016demonstration}. Nevertheless we show that even in this regime, the SNR of quantum detection schemes can still surpass the SNR of classical detection schemes. Let us compare the classical intensity and our quantum-thresholding detection. Here the signal is regarded as the detection output with the coherent light, and the noise with the thermal light alone. As standard intensity detection is sensitive only to the average number of detected photons, the average photon number of the thermal light alone is the noise and the sum of the average photon-number of the two light sources is the signal. Thus, the classical SNR is \begin{equation} {\rm SNR_c} = \frac{\bar{n}_{\rm p}+\bar{n}_{\rm th}} {\bar{n}_{\rm th}}\,. \end{equation} Threshold detection has a binary outcome; it is zero \---- if the detected photon number is below the threshold photon number, and one \---- if the detected photon number is above the threshold photon number. The signal of threshold detection is proportional to the probability of successfully exceeding the threshold when coherent light also hits the detector. The noise is proportional to the probability of exceeding the threshold when only thermal light hits the detector. These probabilities are calculated by summing all the photon-number statistics above $N$, the threshold photon-number. Thus, the noise is $\nu \sum_{n=N}^\infty p_{\rm th}(n) = \nu x^N$, and the signal $\nu\sum_{n=N}^\infty p(n) = \nu\big[1 - \sum_{n=0}^{N-1} p(n)\big]$, where $\nu$ is the number of experimental repetitions. After substituting $p(n)$, reordering the sums and summing over \textit{n}, we are left with, $\nu\big[1 - \sum_{m=0}^{N-1} \big(1-x^{N-m}\big)p_{\rm p}(m)\big]$. Using the formula of the incomplete gamma function and dividing by the noise, we get that the SNR for threshold detection is: \begin{equation} {\rm SNR_q} = \frac{1-\Big(\frac{\Gamma(\bar{n}_{\rm p},N)}{(N-1)!} - \frac{\Gamma(\frac{\bar{n}_{\rm p}}{x},N)}{(N-1)!} e^{\frac{\bar{n}_{\rm p}}{x}-\bar{n}_{\rm p}} x^N\Big)} {x^N}\label{Eq:quSNR}\,. \end{equation} Notice that the noise exponentially decays with the threshold number. This decay eventually gives the SNR improvement that we will see in the following. We wish to get some insights into the expression of Eq. \ref{Eq:quSNR}. First, we differentiate the SNR with respect to $\bar{n}_{\rm p}$, \begin{equation*} \frac{\partial}{\partial\bar{n}_{\rm p}}{\rm SNR_q} = \big(\frac{1}{x}-1\big) \frac{\Gamma(\frac{\bar{n}_{\rm p}}{x},N)}{(N-1)!} e^{\frac{\bar{n}_{\rm p}}{x}-\bar{n}_{\rm p}} x^N>0\, \end{equation*} which means that the SNR is a monotonically increasing function of the coherent mean-photon number regardless of the threshold and averaged thermal photon-number. This dependence is expected since increasing the signal intensity should increase the SNR. Next, we check the threshold dependence on photon number. The difference $[{\rm SNR_q}(N+1)-{\rm SNR_q}(N)]$ can be written as $[\sum_{n=N}^\infty p(n+1) - \sum_{n=N}^\infty p(n)x]/{x^{N+1}}$, where the first summation is transformed as $n \rightarrow n+1$. Now the two summations can be regrouped into one, and its argument is $(1-x)p_{\rm p}(n+1)$. Thus, the SNR obeys \begin{equation} [{\rm SNR_q}(N+1)-{\rm SNR_q}(N)] = \frac{1-x}{x^{N+1}}\sum_{n=N}^\infty p_{\rm p}(n+1)>0\,,\label{Eq:therMonoton} \end{equation} i.e, taking larger photon-number thresholds increases the SNR for any intensity of the coherent and thermal light. \begin{figure} \caption{The ratio of the quantum and classical SNR for fixed thermal average photon-number of one. Thresholds of $N=2,3,4,5$ are plotted where a thicker line corresponds to a higher threshold. The dashed black line at one represents the limit, above which the quantum scheme gets a better SNR.} \label{fig2} \end{figure} In order to demonstrate the advantage of our quantum scheme, Fig. \ref{fig2} shows the ratio of the quantum and classical SNR for a fixed averaged-thermal photon number of one. Different threshold photon numbers are plotted with different line widths. \textit{Discussion}.\----For many average signal and threshold photon numbers, the ratio of SNR is above one, which means that the quantum SNR exceeds the classical SNR. This improvement is a result of the difference between the signal and noise photon distribution. The thermal distribution is dominant near the low photon numbers, whereas the Poisson distribution is more dominant near the mean photon number (see Fig. S1 in the supplementary material \cite{WinNT}). By using threshold detection we exclude low photon numbers where the noise is dominant. As shown in Eq. \ref{Eq:therMonoton}, the quantum SNR increases when a larger photon number threshold is used. Thus, the ratio of the two SNRs increases with the threshold, since the classical SNR is independent of the threshold. However, taking threshold much larger than the average photon number will cause substantial decrease in the successful threshold detection. Any practical application should choose the threshold photon number in accordance with this trade-off; higher threshold means higher SNR but lower successful threshold detection, lower threshold means higher successful threshold detection but lower SNR. For a practical rangefinder or LIDAR, threshold detection success should be every couple of trials. Thus, in the regime of a few detected signal photons, the best improvement is around four. \begin{figure} \caption{\textbf{(a)} \label{fig3} \end{figure} In Fig. \ref{fig2}, for every threshold there is an averaged signal photon number where the improvement is maximal. In Fig. \ref{fig:3a} this maximum mean photon number is plotted as a function of the threshold. The improvement is maximal where the threshold is around the mean photon number. This observation can be understood by the fact that the coherent light has a more localized distribution than the thermal light, i.e. the variance of Poisson distribution equals the mean and that of thermal distribution equals the mean square. Thus, if the threshold is well-above the mean photon number of the signal, the detection loses most of the signal, and if it is well below the mean photon number, it is contaminated with noise without gaining signal. As seen in Fig. \ref{fig2}, the quantum SNR does not always exceed the classical SNR. Figure \ref{fig:3b} is a parameter-space plot, showing the parameters under which quantum detection is superior. Below the line (the darker area) threshold detection presents better SNR. As expected from Eq. \ref{Eq:therMonoton}, the area, where quantum detection outperforms the classical detection, grows as the threshold number is increased. We note that the curved point of each graph holds $N\approx \bar{n}_{\rm th}$. This fact may help to set the threshold as in most applications the noise intensity is approximately known or can be easily measured. In the same manner, it seems from the right bottom side of Fig. \ref{fig:3b} that threshold detection always gives better results where the noise is high and the signal is low. Thus, in high-noise low-signal regime, threshold detection is definitely preferable. We note that the average photon numbers ($\bar{n}_{\rm p},\,\bar{n}_{\rm th}$) are the measured averages, i.e. it already accounts for the loss of the detector. Other effects of the PNRD were considered, based on our PNRD model \cite{cohen2018absolute}, and those effects changed the results slightly. In particular, nonlinear loss has low effect on the results, because we limited our signal to a few photons where the nonlinear loss is negligible (see Fig. S2 and the discussion in the supplementary \cite{WinNT}). \begin{figure} \caption{The simulation results comparing intensity detection and thresholding detection for 100 \textbf{(a)} \label{fig4} \end{figure} While Eq. \ref{Eq:quSNR} and Fig. \ref{fig2} show the average results for the quantum SNR and SNR ratio (i.e. infinite ensemble of measurement samplings), most applications may sample the signal only a few times. We simulate multi-target range-finding to show the improvement with a finite number of samplings. In the simulation, the time is divided to 50 time-bins, where the thermal noise is fixed with $\bar n_{\rm th} = 1$. Each time-bin contains noise photons distributed thermally. Four targets are simulated by adding photons with a Poisson distribution of $0.5\,,1\,,3$ and $10$ mean photon numbers at times of $10,20,30$ and $40$, respectively. The simulation runs 100 and 10,000 times, where the former is equivalent to less-than-a-second operation of a typical rangefinder. The simulation results are shown in Fig. \ref{fig4}. Naturally, the effect of low sampling is larger fluctuations, which can be seen in Fig. \ref{fig:4a}, especially for five-photon thresholding where the detection rate is low. The weak target with $\bar{n}_{\rm p}=0.5$ is detected well with two-photon thresholding but not detected at all with five-photon thresholding. This effect is again due to the detection rate. When the number of simulation repetitions is increased, the ratio of the SNR approaches the values of Fig. \ref{fig2}. For the target with $\bar{n}_{\rm p}=10$, the output of five-photon thresholding is 31.7 and of intensity is 11.1. As the noise is normalized to one, the ratio of the SNR is just $\frac{31.7}{11.1}=2.86$ which is exactly the result of Fig. \ref{fig2}. For the weak target with $\bar{n}_{\rm p}=0.5$, the output of two-photon thresholding is 1.58, of five-photon thresholding is 1.77 and of intensity is 1.51, which gives SNR ratio of 1.04 and 1.17 where 1.05 and 1.10 are deduced from Fig. \ref{fig2}. We propose to implement the threshold detector with PNRD. There may be other implementation methods, such as N-photon-ionization processes. Additionally, other detection protocols using PNRDs may give higher gain of the localized photon distribution, and thus, better SNR improvement. Examples include exact photon-number detection (i.e. projecting on a specific Fock state) \cite{khoury2006nonlinear} and a range of photon-number detection. These protocols require knowledge about the signal intensity and are suited to applications with known signal intensity. Threshold detection does not require knowledge about the signal intensity, and thus is suited to applications like range-finding and LIDAR, where the signal intensity is a priory unknown. \textit{Summary.}\----We have shown that PNRDs can provide better SNR by thresholding the photon number, instead of directly detecting intensity. Additionally, we have theoretically tested our results for imperfect PNRD, including but not only nonlinear loss. This leads to a slightly lower SNR. The method seems to always improve the SNR in the high-noise low-signal regime. The method has been implemented in rangefinders and LIDARs, but can also be used for any application with low-signal detection in the presence of thermal noise. \textit{Acknowledgements.}\----LC, ESM, and JPD would like to acknowledge support from the Air Force Office of Scientific Research, the Army Research Office, the Defense Advanced Research Projects Agency, and the National Science Foundation. \nocite{dovrat2012simulations} \nocite{fox2006quantum} \end{document}
\begin{document} \title{Efficient progressive readout of a register of (qu)bits} \author{Antoine Tilloy} \email{[email protected]} \affiliation{Laboratoire de Physique Th\'eorique, Ecole Normale Sup\'erieure de Paris (PSL), France } \date{\today} \pacs{} \begin{abstract} Recently, a series of articles by Combes \textit{et al.} has shown that it was possible to greatly improve the measurement rate of a register of qubits for given detector resources by means of a clever feedback control scheme. However, this speed-up came at an exponential cost in terms of complexity and memory use. In this article, I propose a simple efficient algorithm --exponentially more frugal in memory and less complex to implement-- which is asymptotically as fast. I use extensively the implicit classicality of the situation to provide a slightly more straightforward interpretation of the results. I compute the speed-up rates exactly in the case of the proposed model and in the case of the open-loop scheme of Combes \textit{et al.} and prove that they indeed provide the same asymptotic speed-up. \end{abstract} \maketitle \section{Introduction} Measuring a quantum system usually takes a non negligible amount of time. In some cases, this time turns out to be much larger than the typical timescales of the system dynamics, making \eg system characterization and measurement-based control difficult. In the future, finite measurement times may also put constraints on the performance of quantum computers by limiting the speed at which large qubit register can be read out. Procedures that can reduce this measurement time while using the same detector resources are thus interesting both from a theoretical and practical point of view. In a recent series of articles \cite{combes2008,combes2010,combes2011,combes2015}, Combes \textit{et al.} have proposed control schemes which increase the measurement speed of qubit registers. The methods they proposed provide an impressive speedup rate proportional to the register size. However, in contrast to the simple no control procedure, they require a prohibitive exponential amount of memory. In addition to their relative complexity, this latter limitation makes these new procedures implementable only for small qubit registers. It may have seemed that the use of an exponential complexity was the price to pay for this linear speed-up: ``\textit{You cannot have your cake and eat it}''. Fortunately, it turns out that this is not the case here. In this article we introduce a \emph{quasi} open-loop scheme that gives a similar gain while requiring much fewer control operations on the system ($\simeq 2$ on average for typical parameter values) and using only a linear amount of memory. This article is structured as follows. We shall first introduce briefly in section \ref{sec:measurement} the quantum trajectory formalism for the continuous measurement of a qubit and show that, at least in the case we consider, it is formally equivalent to a fully classical probabilistic model. We shall then review in section \ref{sec:rapidmeasurement} the previous approaches to rapid measurement before presenting our own model and deriving its properties in section \ref{sec:model}. We will then briefly review additional numerical results in \ref{sec:numerics} and conclude by discussing possible improvements and extensions. The proof that the schemes provide the claimed speed-up rates are rather cumbersome and relegated to appendices. \section{Continuous measurement of a register of (qu)bits}\label{sec:measurement} The standard way to describe the progressive measurement of a quantum system is by means of repeated interaction schemes. A quantum system is coupled briefly with a ancilla which is subsequently measured. As the interaction has entangled the two quantum systems, measuring the ancilla gives some information on the system of interest. The measured ancilla is then discarded and a new one is sent to interact with the system before being measured again. Iterating this procedure many times then gives a progressive measurement of the system in a basis which is fixed by the system-ancilla unitary interaction. In the limit where this procedure is repeated infinitely frequently with an infinitely small interaction time, one gets a continuous stochastic evolution for the system which is called a quantum trajectory \cite{brun2002,jacobs2006}. In what follows we will first give, without proof, the equations one gets for a continuously monitored qubit. Then we will show that, in the specific case we consider, the same equations can be derived from a much simpler classical model. This classical picture provides valuable insights and the reader unfamiliar with quantum trajectories is encouraged to take it as the starting point. Using standard continuous quantum measurement theory \cite{barchielli1986,caves1987,diosi1988,barchielli1991,wiseman1996,belavkin1992,wiseman2009}, one can show that a qubit of density matrix $\rho\in \mathds{C}^2\otimes\mathds{C}^2$ subjected to the continuous measurement of the operator $\sigma_z$ obeys the Stochastic Master Equation (SME): \begin{equation}\label{eq:qubitsme} \mathrm{d} \rho_t = 2 \gamma \mathcal{D}[\sigma_z](\rho_t) \,\mathrm{d} t + \sqrt{2\gamma} \mathcal{H}[\sigma_z](\rho_t)\,\mathrm{d} W_t \end{equation} where $\gamma$ codes for the measurement strength, $W_t$ is a Wiener process and we have used the standard notation from \cite{wiseman2009}: \begin{equation} \begin{split} \mathcal{D}[A](\rho)&=A\rho A^\dagger -\frac{1}{2}(A^\dagger A \rho + \rho A^\dagger A)\\ \mathcal{H}[A](\rho)&=A\rho + \rho A^\dagger - \mathrm{tr}\left[ (A+A^\dagger)\rho\right]\rho \end{split} \end{equation} The associated measurement signal, which is the continuous and weak equivalent of the (random) result from a Von Neumann measurement, reads: \begin{equation}\label{eq:signal} \mathrm{d} Y_t = 2\sqrt{2\gamma} \, \mathrm{tr} (\sigma_z \rho_t)\, \mathrm{d} t + \mathrm{d} W_t \end{equation} In the absence of proper Hamiltonian for the qubit, it is easy to notice that the phases of the density matrix in the eigenbasis of $\sigma_z$ are exponentially suppressed and have no back-action on the diagonal coefficients. Consequently, we can consider, without lack of generality, that we start from a diagonal density matrix. In that case, eq. \eqref{eq:qubitsme} takes the simple form: \begin{equation}\label{eq:simplifiedsme} \mathrm{d} p_t = 2\sqrt{2\gamma} \,p_t (1-p_t)\, \mathrm{d} W_t \end{equation} where $p_t$ is simply the probability to be in the state $\ket{0}=\ket{+}_z$ at time $t$: $p_t = \langle0|\rho_t|0\rangle$. An interesting feature of this equation is that it is completely classical. In the absence of phases, eq. \eqref{eq:qubitsme} can be understood simply as the fancy quantum rewriting of an inherently classical model where a classical bit has a well defined value which is progressively revealed. Let us make this claim more precise by explicitly constructing the equivalent classical model. Consider a classical bit that can take two values $R=0$ and $R=1$ (or equivalently $+$ and $-$). The bit state is fixed but unknown. An observer progressively extracts information about the bit state by doing a succession of imperfect classical measurements with results $\delta_k=\pm 1$. A measurement gives some, but not all, the information on the system state, more precisely we take: \begin{equation} \begin{split} &\mathds{P}(\delta_k=1 | R=0)=\frac{1+\varepsilon}{2}\\ &\mathds{P}(\delta_k=1 | R=1)=\frac{1-\varepsilon}{2} \end{split} \label{eq:def} \end{equation} which fully specifies how the information is extracted. The parameter $\varepsilon \in \;]0,1[$ codes for the quality of the measurement and we will be interested in the very bad measurement limit $\varepsilon \rightarrow 0$. We write $\mathcal{F}_k=\sigma\left(\left\{ \delta_i\right\} , i\leq k\right)$ the natural filtration associated to the stochastic process of the measurement results. In other words, $\mathcal{F}_k$ corresponds to the intuitive notion of the information contained in the measurement results up to step $k$. The quantity of interest is the probability $p_k=\mathds{P}\left(R=0|\mathcal{F}_k\right)$, \ie the probability for the bit state to be zero knowing the first $k$ measurement results. A simple application of Bayes' rule gives the following update rule for $p_k$: \begin{equation}\label{eq:discrete} p_{k+1}=\frac{(1+\varepsilon\,\delta_{k+1})\,p_k}{(1+2\varepsilon\,\delta_{k+1})(p_k-1/2)}. \end{equation} This discrete update rule becomes a set of stochastic differential equations in the appropriate weak measurement limit $t=k \mathrm{d} t$, $\varepsilon =\sqrt{\gamma\mathrm{d} t}$ and $Y_t=\sqrt{\mathrm{d} t} \sum_{i=1}^k \delta_i$ (see \textit{e.g.} \cite{spikes}): \begin{equation} \left\lbrace \begin{split} \mathrm{d} p_t &= 2\sqrt{2\gamma} \,p_t (1-p_t)\, \mathrm{d} W_t\\ \mathrm{d} Y_t &= 2\sqrt{2\gamma}\left(2p_t-1\right) \, \mathrm{d} t + \mathrm{d} W_t \end{split}\right. \end{equation} Which is exactly the same set of equations as in the quantum case. From now on we will thus use the classical picture when it makes the proofs mathematically simpler or just more intuitive. The reader unfamiliar with continuous quantum measurement can also simply keep the previous classical picture in mind and stop being bothered with quantum mechanics, at least regarding the rest of this article. \begin{figure} \caption{Schematics of the continuous measurement of a register of bits with feedback control.} \label{fig:register} \end{figure} Building upon the previous construction, it is easy to describe the continuous measurement of a register of $n$ qubits (which, with the same argument as before, is equivalent to the continuous measurement of a register of classical bits). We assume that all the (qu)bits are measured independently by $n$ detectors, the density matrix $\rho\in \left(\mathds{C}^2\otimes\mathds{C}^2\right)^{\otimes n}$ verifies the SME: \begin{equation}\label{eq:arraysme} \mathrm{d} \rho_t = 2 \gamma\sum_{i=1}^n \mathcal{D}[\sigma_z^{(i)}](\rho_t) \,\mathrm{d} t + \sqrt{2\gamma}\sum_{i=1}^n \mathcal{H}[\sigma_z^{(i)}](\rho_t)\,\mathrm{d} W_t^{(i)} \end{equation} where: \begin{equation} \sigma_z^{(i)} = \mathds{1}\otimes \mathds{1} \otimes ... \otimes \sigma_z \otimes ... \otimes \mathds{1} \end{equation} with $\sigma_z$ in $i$-th position. The Wiener processes are uncorrelated, \ie $\mathrm{d} W_t^{(i)}\mathrm{d} W_t^{(j)}=\delta_{ij}\mathrm{d} t$. The signals $Y^{(i)}$ associated to the detectors verify the same equation as before: \begin{equation} \mathrm{d} Y_t^{(i)} = 2\sqrt{2\gamma} \, \mathrm{tr} \left(\sigma_z^{(i)} \rho_t\right)\, \mathrm{d} t + \mathrm{d} W_t^{(i)}. \end{equation} Everything can be rewritten with the help of a classical vocabulary in the same way as before. Assuming one has no prior information on the \emph{true} state $R\equiv \left(R^{(1)},...,R^{(n)}\right)$ of the classical register, all the bits can be considered independently in the sense that the total probability factorizes: \begin{equation} \mathds{P}\left[R|\sigma(\mathcal{F}^{(1)}_t,...,\mathcal{F}^{(n)}_t)\right]=\mathds{P}\left[R^{(1)}|\mathcal{F}^{(1)}_t \right]... \mathds{P}\left[R^{(n)}|\mathcal{F}^{(n)}_t\right]. \end{equation} It is only necessary to compute the evolution of the probabilities of the $n$ single bit states, or say of the $n$ marginals, to know the probability of a register configuration. Storing the $2^n$ probabilities of all the register configurations is not needed in this simple measurement setup. This helpful property will unfortunately be lost for more elaborate measurement schemes. To quantify the rate at which information is extracted as a function of time with the continuous measurement scheme, the (now standard) approach is to consider the log-infidelity ln $\Delta$ where $\Delta=1-\lambda_0$ with $\lambda_0$ the largest eigenvalue of $\rho$ (or equivalently here, the probability of the most probable register configuration). In addition to its simplicity, this measure has conceptual advantages which are detailed in \cite{combes2008}. We shall not elaborate on this fact here and simply assume that the log-infidelity is a relevant measure of information extraction. Writing the density matrix in a basis where the most probable state is noted $\bar{0}=\ket{00...0}$ and using It\^o rule, one gets after a straightforward computation: \begin{equation}\label{eq:infidelity} \mathds{E}\left[\mathrm{d} \ln\Delta_t\right] = -4\gamma \sum_{i=1}^{n} \mathrm{tr}\left[(\sigma_z^{(i)}-\mathds{1}) \rho_t\right]^2 \frac{(1-\Delta_t)^2}{\Delta_t^2}\mathrm{d} t \end{equation} In the simple case I consider here, it can be shown (see \eg \cite{combes2015}) that for large time, \ie $t\gg\gamma^{-1}$ the previous expression simplifies to: \begin{equation} \mathds{E}\left[\mathrm{d} \ln\Delta_t\right] = -16\gamma\,\mathrm{d} t \end{equation} The objective of rapid measurement schemes is to improve this convergence rate while still using the same detector resources. \section{Standard rapid measurement schemes}\label{sec:rapidmeasurement} Before going into the details of the rapid measurement schemes, we should give an intuition of why some asymptotic speed-up is expected. Consider that the previous measurement scheme has been run for a while and look at the two most probable configurations. Being able to discriminate rapidly between these two configurations is what makes a measurement scheme fast, at least in a first approximation. However, because the probability of a configuration can be written as a product of single bit probabilities, the two most probable configurations differ only by one bit. Consequently, only one detector actually provides relevant information while the $n-1$ other ones are essentially useless. Intuitively, one can expect that a good measurement scheme will find a way to harness the information extraction ability of the $n$ detectors at the same time. Doing so should naively provide a speed up of order $n$ (and we will see that this is what all the algorithms get). Let us now recall what is allowed for a rapid quantum measurement scheme. In contrast with rapid purification schemes \cite{jacobs2003,combes2006,wiseman2006,combes2010pur}, the control applied on the system should commute with the measured observables, \ie the control should not amount to a change of the measurement basis. The only operations that are consequently allowed are permutations of vectors in the measurement basis\cite{combes2008}. Incidentally, this means that the evolution of the density matrix with the control is still equivalent to that of a classical system because the control itself is a purely classical operation; rapid quantum measurement is inherently a classical problem. This classicality is very helpful to understand what the measurement result means after such a procedure. If we stick to the quantum, we have to say that the measurement result allows us to retrodict what the system state would have been in the absence of control. Classically, the system state is fixed, it is then subjected to a measurement procedure with operations that can easily be reversed at the end, once the result is known. Using the mathematical equivalence of the situation, we can thus say that everything happens \emph{as if} the system state were fixed but unknown at the beginning and that the optimal measurement procedure simply revealed it. Let us now review briefly the first proposal of Combes \textit{et al.} \cite{combes2008} for a locally optimal measurement scheme. A brief look at eq. \eqref{eq:infidelity} shows that the locally optimal case is obtained for a permutation of the initial basis that maximises the quantity: \begin{equation} \sum_{i=1}^{n}\mathrm{tr}\left[(\sigma_z^{(i)}-\mathds{1}) \rho_t^{LO}\right]^2 \end{equation} where $\rho_t^{LO}$ is the optimally permuted density matrix. The asymptotic speed-up is then defined as: \begin{equation} S_{LO}=\lim_{t\rightarrow+\infty}\mathds{E}\left[\frac{(1-\Delta_t)^2}{4\Delta_t^2}\sum_{i=1}^{n} \mathrm{tr}\left[(\sigma_z^{(i)}-\mathds{1}) \rho_t^{LO}\right]^2\right]. \end{equation} which is the asymptotic ratio of the convergence rates for the locally optimal case and the no feedback case. Following \cite{combes2008}, the key concept is the Hamming distance \cite{hamming1950}. The Hamming distance between two states counts the number of bit differences between them. The idea is to do a permutation of the pointer basis which puts the next-to-most probable states as far as possible (with respect to the Hamming distance) from the most probable one. This will maximise $\sum_{i=1}^{n}\mathrm{tr}\left[(\sigma_z^{(i)}-\mathds{1}) \rho_t^{LO}\right]^2$. More precisely, one first needs to order all the states but the most probable one by decreasing order of probability in a first list, then to order the states in decreasing order of Hamming distance with respect to the most probable one in a second list. The control then consists in mapping the states of the first list to the states of the second list while keeping the most probable state unchanged. Intuitively, one expects this scheme to provide a speed up of order $n$ because the probable states can be discriminated from the most probable one with approximately $n$ detectors at the same time. And indeed, in \cite{combes2008} the authors manage to prove that for large $n$: \begin{equation}\label{eq:LO} \frac{n}{4} \leq S_{L0} \leq n \end{equation} The previous locally optimal scheme requires a real time feedback loop which may be difficult to implement in practice. It would be more convenient to have a predefined strategy implementable in open-loop. In \cite{combes2015} the authors provide such a scheme. The idea is simply to do rapid random permutations of the pointer basis. One expects that, on average, the states will be at a Hamming distance of order $n/2$ from each other yielding the same kind of speed-up as before but for a different pre-factor. And indeed, in \cite{combes2015} the authors prove that the speed-up $S_{RP}$ for the open-loop random permutation scheme verifies for large $n$: \begin{equation} \frac{n}{4} \leq S_{RP} \leq \frac{n}{2} \end{equation} Actually, it is possible to prove that the upper bound is reached exactly, \ie that: \begin{equation} S_{RP} = \frac{n}{2}\,\frac{2^n}{2^n-1} \underset{n+\infty}{\sim} \frac{n}{2}, \end{equation} see appendix \ref{appendix:RP}. This result will allow us to compare this scheme with our new measurement procedure more precisely. These two schemes are certainly appealing but they suffer from an important limitation which makes them essentially impossible to implement on future large registers. Setting aside the astonishingly high number of permutations $N_P=\left(2^n\right)!$ that are needed in the open-loop case (because a smaller number, say only exponential, might give similar speed-ups), the main obstacle is that the two schemes require an exponential amount of memory to store the $2^n$ diagonal coefficients of the full density matrix, \ie the probabilities of all the register configurations. Indeed because of the successive permutations of the pointer basis, it is no longer true that the probability of a configuration can be reconstructed from a product of the $n$ marginals, a lot of information is stored in the bit correlations. Additionally, the two schemes require that the operator do a huge number of permutations on the system, something which may be difficult to implement in practice. \section{\emph{``Guess and Check''} Algorithm}\label{sec:model} \subsection{Description} Naively, a good way to build a procedure more frugal in memory would be to reduce the number of different basis used in the open-loop scheme hoping that the states still stay far away from one another on average with respect to the Hamming distance. It turns out that requiring that every state is far away from every other one on average is a very demanding requirement which is only needed for a \emph{truly} open-loop control scheme. Slightly relaxing the open-loop condition, it is possible to construct a quasi open-loop, or as we will call it ``\emph{guess and check}'' (GC) algorithm which is fast and memory efficient. The idea goes as follows. Imagine one knows a good candidate for the register state after running the standard measurement scheme for a while. Then two ordered pointer basis are enough to keep the candidate at an average Hamming distance of $n/2$ from every other state. The solution is simply to take an ordered basis $\mathscr{B}$ and then exchange the candidate and its bitwise opposite to get a new basis $\tilde{\mathscr{B}}$. If a given state is close to the candidate $c$ in $\mathscr{B}$ then it will be far from $c$ in $\tilde{\mathscr{B}}$ and \textit{vice versa}. Measuring successively in $\mathscr{B}$ and $\tilde{\mathscr{B}}$ should yield a convergence speed-up of $n/2$ provided the candidate initially chosen was correct. If this is not the case and the most probable state changes during the measurement process, then a crude yet practical solution is simply to start the whole process again and discard all the information acquired before. The key thing to notice is that the time spent in the guessing process and in eliminating wrong guesses is finite on average and has accordingly no impact on the asymptotic speed-up. This will be discussed in more details later, but let us start by presenting the algorithm precisely this time: \vskip0.5cm \textbf{\textit{Guess and Check} measurement protocol} \begin{enumerate} \item Run the standard measurement protocol until a register configuration, later called the \emph{candidate}, reaches a probability superior to a predefined threshold $p_0$ (\eg $p_0=1/2$). \item Implement the permutation mapping the initial ordered pointer basis $\mathscr{B}$ to the new ordered pointer basis $\tilde{\mathscr{B}}$ where the candidate and its bitwise opposite are exchanged. \item Measure in the two basis by applying the permutation and its inverse successively until the target infidelity is reached or until the probability of the candidate becomes negligibly small (say inferior to $p_0^{'} \ll p_0$). \item In the latter case, start the whole protocol again from the first step and discard all the information previously acquired. \end{enumerate} \begin{figure*} \caption{Guess and Check algorithm.} \label{fig} \end{figure*} At this point, a few comments are in order. The value of the thresholds, though important for the short term behavior of the protocol, has no impact on the asymptotic behavior of the infidelity. The frequency at which one should switch of measurement basis is voluntarily left open for the simple reason that it does not matter\footnote{Actually the same answer can be given to the program outlined by Combes \textit{et al.} in the conclusion of \cite{combes2015}: ``\textit{Finally and perhaps most importantly [...] future work should include imperfections [...] such as a finite number of permutations in a fixed time interval.}''. As long as all the permutations are sampled in the end, the frequency simply does not matter!}! Indeed, as it is clear from the classical picture, the measurements can be done in any order without changing the statistics (equivalently, one can assume that all the measurement have already been done in the two basis, that the system state is fixed, and that one only progressively reveals the measurement results). The only thing that is needed is that the same amount of time is spent in every basis and that the most probable state is computed from time to time to check if it has changed. Step 4 of the protocol is obviously highly suboptimal and could be improved greatly in the future. However, as it is written now, it has the great advantage of making the whole algorithm very easy to analyse rigorously. The protocol is \emph{quasi} open loop in the sense that only a small number of actions (finite on average) has to be done by the controller which makes it much easier to implement than a real-time feedback loop. Moreover, the feedback part consists in a simple unitary operation on a 2-level system consisting of the candidate and its bitwise opposite. The protocol is thus simple and robust, the only thing that remains is to show that it indeed provides a speedup of order $n$ and that it only requires a memory of size $\mathcal{O}(n)$. In what follows and for notational simplicity, we will assume that the candidate is labelled in the same way in $\mathscr{B}$ and $\tilde{\mathscr{B}}$ \ie we will use a \emph{notation} where all the bits are flipped in $\tilde{\mathscr{B}}$. Alternatively, one can consider that $\tilde{\mathscr{B}}$ is obtained from $\mathscr{B}$ via a full bit flip of all the states but the candidate and its bitwise opposite which is strictly equivalent. \subsection{Speedup of order $n$} Provided the candidate turns out to stay the most probable state during the whole process, it can be shown (see appendix \ref{appendix:GC}) that the infidelity decreases at a rate $n/2$ times larger than with the standard measurement scheme. More precisely, it can be shown that for large time: \begin{equation} \mathds{E}[-\mathrm{d} \ln \Delta_t]\underset{t+\infty}{\sim} \frac{n}{2} \times 16 \gamma t. \end{equation} However, this does not straightforwardly give the speedup rate as the candidate might just turn out not to stay the most probable state forever if the initial guess was wrong. Two additional contributions need to be taken into account. First, some time $\tau_0$ (finite on average) is needed to find a first candidate with probability superior to the predefined threshold. Second, some time $\tilde{\tau}_1$ may be needed to realize that this was not the good candidate. In such a situation, which happens with probability $(1-p_0)/(1-p_0^{'})\simeq (1-p_0) $, we then have to start over and wait for a time $\tau_1$ before finding a new good candidate, which may turn out to be wrong after a time $\tilde{\tau}_2$, and so on. As a result the average time $T$ spent out of the fast converging phase of the algorithm is: \begin{equation} T\simeq\mathds{E}[\tau_0] + \sum_{i=1}^{+\infty} (1-p_0)^i \left(\mathds{E}[\tilde{\tau}_i+\tau_i]\right) \end{equation} Because we start over every time the candidate turns out to be incorrect, $\tau_i$ and $\tilde{\tau}_i$ are random variables with a law independent on $i$. Consequently, the latter equation reduces to: \begin{equation} T\simeq\mathds{E}[\tau] +\frac{1-p_0}{p_0}\left(\mathds{E}[\tau] + \mathds{E}[\tilde{\tau}]\right) \end{equation} which is finite. As a result, the amount of time wasted trying to find the candidate and eliminating incorrect ones does not matter for the asymptotic properties of the log-infidelity. Finally we have, for the \textit{guess and check} algorithm and $n\geq 2$: \begin{equation} S_{GC} = \frac{n}{2} \end{equation} This means that the \textit{guess and check} procedure offers an asymptotic speedup equal to that of the true open-loop scheme for large $n$. \subsection{Computation with $\mathcal{O}(n)$ memory} To prove that we do not need to store the full density matrix, it is easier to use the mathematically equivalent classical picture. In what follows, we will decompose the information coming from the $2n$ measurement records ($n$ measurement apparatus in two distinct basis). For that matter, it is convenient to introduce the notations: \begin{equation} \begin{split} \mathcal{F}_t^k&=\sigma\left(\left\{ Y^{(k)}_u\right\} , u\leq t\right)\\ \tilde{\mathcal{F}}_t^k&=\sigma\left(\left\{ \tilde{Y}^{(k)}_u\right\} , u\leq t\right)\\ \mathcal{G}_t&=\sigma\left(\left\{Y^{(k)}_u, \tilde{Y}^{(k)}_u\right\} , u\leq t,k=1..n\right) \end{split} \end{equation} where $Y^{(k)}$ is the signal from detector $k$ in $\mathscr{B}$, where $\tilde{Y}^{(k)}$ is the signal from detector $k$ in $\tilde{\mathscr{B}}$ and where $\mathcal{G}_t$ thus contains all the information available up to time $t$. Marginals with respect to the filtrations $\mathcal{F}_t^k$ and $\tilde{\mathcal{F}}_t^k$ can be computed in real time using only one signal and independently of the rest via eq. \eqref{eq:simplifiedsme}. The objective is now to express probabilities with respect to the full filtration as a function of these easily computable marginals. Elementary applications of Bayes' rule give: \begin{equation} \mathds{P}[R=s|\mathcal{G}_t]=\frac{1}{Z_t}\prod_{k=1}^{n}\frac{\mathds{P}[R=s|\mathcal{F}^{(k)}_t]\mathds{P}[R=s|\tilde{\mathcal{F}}^{(k)}_t]}{\mathds{P}[R|\mathcal{G}_0]^2} \end{equation} where $Z_t$ is the normalization. Assuming equal probability at initial time for simplicity we get: \begin{equation} \mathds{P}[s|\mathcal{G}_t]\!=\!\frac{1}{\mathcal{Z}_t}\prod_{k=1}^{n}\mathds{P}\!\left[R^{(k)}=s^{(k)}|\mathcal{F}^{(k)}_t\right]\mathds{P}\!\left[\tilde{R}^{(k)}=\tilde{s}^{(k)}|\tilde{\mathcal{F}}^{(k)}_t\right] \end{equation} where, again, $R^{(k)}$ (resp. $\tilde{R}^{(k)}$) is the value of the $k$-th bit of $R$ in $\mathscr{B}$ (resp. in $\tilde{\mathscr{B}}$). The difficulty is now that the normalisation factor $\mathcal{Z}_t$ contains an exponential number of terms so that it would seem that we still need an exponential number of operations. However, because of the simple permutation between the two basis, the normalization factor $\mathcal{Z}_t$ can be computed exactly: \begin{equation}\label{eq:partitionfunction} \begin{split} \mathcal{Z}_t=&\sum_{s\in\mathscr{S}} \prod_{k=1}^{n}\mathds{P}\left[R^{(k)}=s^{(k)}|\mathcal{F}^{(k)}_t\right]\mathds{P}\left[\tilde{R}^{(k)}=\tilde{s}^{(k)}|\tilde{\mathcal{F}}^{(k)}_t\right]\\ =&\sum_{s\in\mathscr{S}} \prod_{k=1}^{n}\mathds{P}\!\!\left[R^{(k)}=s^{(k)}|\mathcal{F}^{(k)}_t\right]\!\mathds{P}\!\!\left[\tilde{R}^{(k)}=1-s^{(k)}|\tilde{\mathcal{F}}^{(k)}_t\right]\\ &+\prod_{k=1}^{n} \mathds{P}\left[R^{(k)}=0|\mathcal{F}^{(k)}_t\right]\mathds{P}\left[\tilde{R}^{(k)}=0|\tilde{\mathcal{F}}^{(k)}_t\right]\\ &+\prod_{k=1}^{n} \mathds{P}\left[R^{(k)}=1|\mathcal{F}^{(k)}_t\right]\mathds{P}\left[\tilde{R}^{(k)}=1|\tilde{\mathcal{F}}^{(k)}_t\right]\\ &-\prod_{k=1}^{n} \mathds{P}\left[R^{(k)}=0|\mathcal{F}^{(k)}_t\right]\mathds{P}\left[\tilde{R}^{(k)}=1|\tilde{\mathcal{F}}^{(k)}_t\right]\\ &-\prod_{k=1}^{n} \mathds{P}\left[R^{(k)}=1|\mathcal{F}^{(k)}_t\right]\mathds{P}\left[\tilde{R}^{(k)}=0|\tilde{\mathcal{F}}^{(k)}_t\right], \end{split} \end{equation} where we have used the fact that for all the states $s$ but two (the candidate $\bar{0}$ and its bitwise opposite $\bar{1}=\ket{11...1}$), $\tilde{s}^{(k)}=1-s^{(k)}$. To simplify the expressions we introduce the compact notations for the marginals in the two basis knowing only the information from one series of measurements: $p^{(k)}_t=\mathds{P}[R^{(k)}=0|\mathcal{F}_t^{(k)}]$ and $\tilde{p}^{(k)}_t=\mathds{P}[\tilde{R}^{(k)}=0|\tilde{\mathcal{F}}_t^{(k)}]$ (which, it should be emphasized, are not the \emph{true} marginals, \ie the marginals conditioned on all the available information). Theses marginals can be computed independently and easily in real time from the measurement records $Y_t^{(k)}$ and $\tilde{Y}_t^{(k)}$ using eq. \eqref{eq:signal} and \eqref{eq:simplifiedsme} (or in the discrete case eq. \eqref{eq:discrete}) . The sum in eq. \eqref{eq:partitionfunction} can be done separately on each term of the product which gives: \begin{equation} \begin{split} \mathcal{Z}_t=&\prod_{k=1}^{n} \left[p_t^{(k)} (1-\tilde{p}_t^{(k)}) +(1- p_t^{(k)}) \tilde{p}_t^{(k)}\right]\\ &+ \prod_{k=1}^{n} (2p_t^{(k)}-1)(2\tilde{p}_t^{(k)}-1) \end{split} \end{equation} where the first term comes from the sum over all states and the second comes from the $4$ correction terms of eq. \eqref{eq:partitionfunction}. This means that the normalization factor can be computed knowing only the $2n$ marginals in the two basis and doing \emph{only} a number $\mathcal{O}(n)$ of elementary operations (additions and multiplications) on them. Eventually, the probability of any state can be computed from a linear number of operations on the marginals. For example, we can compute the probability of the candidate $\lambda_0$: \begin{equation} \lambda_0=\frac{p_t^{(k)} \tilde{p}_t^{(k)} }{\mathcal{Z}_t\left(\{p_t,\tilde{p}_t\}\right)} \end{equation} which allows for an on-demand exact and rapid computation of the log-infidelity knowing only the $2n$ independently computable bit probabilities. \begin{figure*} \caption{On the left, speed-up provided by the GC algorithm for finite infidelity target $\Delta$ for various values of the number of qubits $n$. On the right, percentage of the asymptotic speed-up reached for finite infidelity target. For large values of $n$, the asymptotic speed-up is only approached for extremely small values of $\Delta$. The computations are done with thresholds $p_0=0.5$ and $p'_0=0.001$.} \label{fig:numerics} \end{figure*} \section{Numerics}\label{sec:numerics} Computing the speed-up rate numerically is useful for two reasons. First, the results previously derived are asymptotic and the speed-up could very well be much smaller for a reasonable non-zero infidelity target. Second and most importantly, computing the speed-up rate numerically for large values of $n$ is the best way to make sure that the protocol does not require an exponential amount of memory and is indeed easily implementable. The numerical computations can be easily carried out with the help of the discrete equation \eqref{eq:discrete} for $\varepsilon \ll 1$. Starting from a fully unknown register state, the time to reach a given infidelity target is computed for the standard no control scheme and for the GC procedure from which the non-asymptotic speed-up rate is computed. The results for various register sizes are shown in Fig. \ref{fig:numerics}. Unsurprisingly, for large registers, the asymptotic speed-up is only reached for absurdly small infidelity targets. This is because most of the time is spent in the ``guess'' phase trying to find a candidate. Optimizing over the thresholds $p_0$ and $p_0^{'}$ would probably slightly tame this noxious waste of time. Further, the suboptimal step 4 of the procedure does lead to a substantial slow down for reasonable infidelity targets and a less naive procedure might be able to make the non-asymptotic part of the algorithm less costly. Alternatively, one could imagine a multi-stage algorithm where a global candidate is found by applying the GC procedure on a series of subregisters, \ie where the ``guess'' phase itself is sped up using the GC algorithm. In any case, these numerical simulations show that the asymptotic speed-up should not be the only metric used to assess the efficiency of rapid measurement schemes in the future as the asymptotic regime may be irrelevant in realistic setups. Note that the previous schemes of Combes \textit{et al.} \cite{combes2008,combes2015} which could only be probed numerically for small registers, also showed lower performances for finite infidelity targets. All these reserves being made, the GC algorithm does provide a large speed-up in absolute value for all register sizes and reasonably small infidelity targets. As a result, and even without the previously mentioned potential improvements, the GC algorithm can be applied profitably to the rapid measurement of qubit registers. \section{Discussion} \label{sec:discussion} \begin{figure} \caption{Exact asymptotic speed-up for the Random Permutation scheme (RP) and the Guess and Check (GC) scheme computed in this article. The two lines ``sup LO'' and ``inf LO'' show the analytic upper and lower bounds known for the Locally Optimal measurement scheme of \cite{combes2015} \label{fig:comparison} \end{figure} We have proposed a new and simple protocol (GC) aimed at increasing the measurement rate of qubit registers and derived the exact asymptotic speed-up it provides. Its asymptotic speed-up rate, compared to that of the earlier schemes of \cite{combes2008} and \cite{combes2015}, is displayed in Fig. \ref{fig:comparison}. The main comparative advantage of the procedure does not reside in its performance increase but in its practical and computational simplicity. Indeed, in terms of control, the GC algorithm only requires a finite number (on average $2$ for thresholds $p_0=1/2$ and $p_0^{'}\ll 1$) of simple permutations on a subspace of dimension $2$. Even if the scheme is not, strictly speaking, open-loop, the fact that the control operations can be done with a delay without performance loss makes it much less demanding than a true real-time feedback control scheme. Eventually and most importantly, the GC algorithm allows to encode the probabilities of all the register configurations in $2n$ real numbers in contrast with the prohibitive exponential number required by other protocols. This last feature makes the GC algorithm, or other algorithms built upon similar ideas, particularly suitable for the rapid measurement of future quantum computer registers where an exponential memory scaling will simply be prohibitive. Although this was not strictly necessary for our derivation, we have used a classical probabilistic picture throughout the paper. As the rapid measurement problem is essentially a classical problem, recasting everything in an equivalent classical language provides a simpler and hopefully more pedagogical introduction to the subject. Additionally, it helps give intuitive and straightforward answers to questions otherwise non trivial like the sensitivity to control imperfections or the meaning of the result obtained at the end of the protocol. Since the core of the procedure is classical, one may wonder if quantum mechanics nevertheless plays a useful role in its implementation. The answer is positive, the GC scheme requires a permutation of two configurations, something which is highly non intuitive and with a problematic implementation in classical mechanics but which may be carried out with a simple Hamiltonian in quantum mechanics. Consequently, even if the intuition backing the GC protocol is mostly classical, it is likely that it is only implementable on a genuinely quantum system in practice. In the future, ideas similar to the one developed in this article could be applied to the rapid measurement of more general quantum systems. Even in the restricted context of qubit registers, the numerical simulations have shown that improving the short time behavior of the algorithm could greatly improve its performance in practice. For that matter, analytic studies of the finite time behavior of the log infidelity could certainly be illuminating. \begin{acknowledgments} I thank Denis Bernard and Michel Bauer for useful discussions. This work was supported in part by the ANR contract ANR-14-CE25-0003-01. \end{acknowledgments} \appendix \section{Exact speed-up for open-loop control}\label{appendix:RP} The exact speed-up can actually be computed using an exact solution of the stochastic differential equation and a bit of combinatorics. The method is very similar to the one used by Combes \textit{et al.} in \cite{combes2015} to compute the convergence rate of the infidelity in the no feedback case. We start by assuming that the random permutations are carried out very quickly so that the whole permutation group is sampled in any infinitesimal time interval. Notice again that this is just needed for simplicity, the order in which the measurements are done does not matter so we can reorganise them a posteriori to fulfill the previous condition. In this setting the register density matrix verifies: \begin{equation}\label{eq:permutations} \mathrm{d} \rho_t =\left[\frac{2\gamma}{(2^n)!}\right]^{1/2} \sum_{\tau\in\mathfrak{S}(2^n)}\sum_{k=1}^{2^n} \mathcal{H}[\sigma^{k,\tau}_z](\rho_t) \mathrm{d} W_t^{(k,\tau)} \end{equation} where $\mathfrak{S}(2^n)$ is the permutation group on the set $\mathscr{S}$ of the $2^n$ configurations, $\sigma_z^{k,\tau}=U_\tau^{-1}\sigma^{k}_z U_\tau$ where $U_\tau$ is the unitary operator implementing the permutation $\tau$ and the $W_t^{(k,\tau)}$ are independent Wiener processes \ie $\mathrm{d} W_t^{(k,\tau)}\mathrm{d} W_t^{(k',\tau')}=\delta_{k,k'}\delta_{\tau,\tau'}\mathrm{d} t$. Equation \eqref{eq:permutations} is invariant under the change $\sigma^{k,\tau}_z\rightarrow\sigma^{k,\tau}_z+\mathds{1}$ which allows to work with projectors on spaces of dimension $2^{n-1}$. There are \emph{only} $\binom{2^n}{\,2^{n-1}\,}$ such projectors which allows the following factorisation. \begin{equation} \mathrm{d} \rho_t = 2 \sqrt{2\gamma\,\mathcal{N}} \sum_{\mathcal{P}\subset\mathscr{S}, |\mathcal{P}|=2^{n-1}} \mathcal{H}[P^\mathcal{P}](\rho_t) \mathrm{d} W_t^{\mathcal{P}} \end{equation} with \begin{equation} \mathcal{N}=\frac{n}{\binom{2^n}{\,2^{n-1}\,}} \end{equation} and $P^{\mathcal{P}}$ denotes the projector on the subset $\mathcal{P}$ of the set of possible register configuration $\mathscr{S}$. The new Wiener processes are obtained as a normalized sum of the previous independent Wiener processes and are thus also independent Wiener processes. For pure mathematical convenience we can associate a corresponding set of signals which will allow us to work with what is often called linear quantum trajectories \cite{wiseman2009} (the knowledge of which is not needed here): \begin{equation}\label{eq:signalbis} \mathrm{d} Y^{\mathcal{P}}_t=4\sqrt{2\gamma\,\mathcal{N}}\,\mathrm{tr}\left[P^\mathcal{P} \rho_t\right] \mathrm{d} t + \mathrm{d} W^\mathcal{P}_t. \end{equation} We introduce an auxiliary density matrix $\tilde{\rho}$ verifying the linear SDE: \begin{equation} \mathrm{d}\tilde{\rho}_t = 4\sqrt{2\gamma\,\mathcal{N}} \sum_\mathcal{P} P^{\mathcal{P}}\tilde{\rho}_t\, \mathrm{d} Y^\mathcal{P}_t. \end{equation} One can verify, using the It\^o formula, that the \emph{true} density matrix $\rho$ can be recovered from $\tilde{\rho}$ through a simple normalisation. The previous equation can easily be expanded and gives in components: \begin{equation} \mathrm{d} \tilde{\lambda}_s = 4\sqrt{2\gamma\,\mathcal{N}} \,\tilde{\lambda}_s \sum_{\mathcal{P}, s\in \mathcal{P}} \mathrm{d} Y^\mathcal{P}_t, \end{equation} where $\tilde{\lambda_s}$ is the eigenvalue of $\tilde{\rho}$ associated to the state (or configuration) $s$ and which, once normalized, will give its probability. This equation can be solved exactly as a function of the $Y^\mathcal{P}_t$'s: \begin{equation} \tilde{\lambda}_s=\exp\left(4\sqrt{2\gamma\,\mathcal{N}} \,\sum_{\mathcal{P}, s\in \mathcal{P}} Y^\mathcal{P}_t - 8\gamma\,\mathcal{N}\binom{2^n}{\,2^{n-1}\,}\,t\right), \end{equation} Finally we can express the normalized probability for the state $s$ of maximum probability: \begin{equation} \lambda_0=\frac{\exp\left(4\sqrt{2\gamma\,\mathcal{N}} \,\sum_{\mathcal{P}, 0\in \mathcal{P}} Y^\mathcal{P}_t\right)}{\sum_s \exp\left(4\sqrt{2\gamma\,\mathcal{N}} \,\sum_{\mathcal{P}, s\in \mathcal{P}} Y^\mathcal{P}_t\right)} \end{equation} Up to now, everything is exact and some approximations are now needed to work out the large time limit. In this limit $\lambda_0$ is close to one and all the other probabilities are much smaller and decrease exponentially (on average) as a function of time. Using eq. \eqref{eq:signalbis} we thus get: \begin{equation} \begin{split} Y_t^\mathcal{P}&\underset{t+\infty}{\sim} 4\sqrt{2\gamma\,\mathcal{N}}\, t \; \text{if} \;0 \in \mathcal{P}\\ Y_t^\mathcal{Q}&=\underset{t+\infty}{o}(t) \; \text{if} \;0 \notin \mathcal{Q} \end{split} \end{equation} Now, one only needs to notice that for the sum over subsets containing $0$ there are $\binom{2^n-1}{2^{n-1}-1}=\binom{2^n}{2^{n-1}}/2$ non negligible terms whereas for the sum over subsets containing $s\neq 0$, there are only $\binom{2^n-2}{2^{n-1}-2}=\frac{1}{4}\frac{2^n-2}{2^n-1}\binom{2^n}{\,2^{n-1}\,}$ non negligible terms: \begin{equation} \begin{split} \sum_{\mathcal{P}, 0\in \mathcal{P}} Y^\mathcal{P}_t& \underset{t+\infty}{\sim}\frac{1}{2}\binom{2^n}{\,2^{n-1}\,} \times 4\sqrt{2\gamma\,\mathcal{N}}\, t \\ \sum_{\mathcal{P}, s\neq0 \in \mathcal{P}} Y^\mathcal{P}_t& \underset{t+\infty}{\sim}\frac{1}{4}\frac{2^n-2}{2^n-1}\binom{2^n}{\,2^{n-1}\,} \times 4\sqrt{2\gamma\,\mathcal{N}}\, t \end{split} \end{equation} Which gives: \begin{equation} \begin{split} \ln (1-\lambda_0)&\underset{t+\infty}{\sim}-\frac{1}{4}\frac{2^n}{2^n-1} \binom{2^n}{\,2^{n-1}\,} \left(4\sqrt{2\gamma\,\mathcal{N}}\right)^2 t\\ &\underset{t+\infty}{\sim} \frac{n}{2}\frac{2^n}{2^n-1} \times - 16\, \gamma\,t \end{split} \end{equation} So that the exact speed-up rate reads: \begin{equation} S_{RP} = \frac{n}{2}\,\frac{2^n}{2^n-1} \underset{n+\infty}{\sim} \frac{n}{2} \end{equation} which is what we had claimed. This exact result coincides with the upper bound proposed in \cite{combes2015} and seems consistent with its numerical results. \section{Speed-up rate for \emph{Guess and Check} control}\label{appendix:GC} In this section, we compute the speed-up rate for the ``Check'' part of the algorithm, assuming the candidate picked at the beginning turns out to be correct, \ie that it stays the most probable state during the whole process. Without lack of generality, we write then $\bar{0}$ the candidate (a notation valid in the two basis). We also assume for convenience that the measurements are made in the two basis simultaneously, which as we argued before, does not change anything in the statistics as long as the same amount of time is spent in each basis. The system density matrix verifies the following SDE: \begin{equation}\label{eq:gc} \begin{split} \mathrm{d} \rho_t =\;&\sqrt{\gamma} \sum_{k=1}^n \mathcal{H}[\sigma_z^{(k)}]\left(\rho_t\right)\,\mathrm{d} W_t^{(k)}\\ +&\sqrt{\gamma}\sum_{k'=1}^n \mathcal{H}[\sigma_z^{(k')}]\left(\tilde{\rho}_t\right)\,\mathrm{d} \tilde{W}_t^{(k')} \end{split} \end{equation} where $\tilde{\rho}_t$ is the matrix of $\rho_t$ in $\tilde{\mathscr{B}}$ and $W_t^{(k)}$, $\tilde{W}_t^{(k)}$ are independent Wiener processes. As in the previous case, we make the transformation $\sigma_z^{(k)}\rightarrow\sigma_z^{(k)}+\mathds{1}\equiv 2P^{(k)}$ which leaves the previous SDE invariant. The associated signals $Y^{(i)}$ and $\tilde{Y}^{(i)}$ verify: \begin{equation}\label{eq:signals} \begin{split} \mathrm{d} Y^{(i)}_t &= 4\sqrt{\gamma} \, \mathrm{tr} \left[P^{(i)} \rho_t\right]\, \mathrm{d} t + \mathrm{d} W^{(i)}_t\\ \mathrm{d} \tilde{Y}^{(i)}_t &= 4\sqrt{\gamma} \, \mathrm{tr} \left[P^{(i)} \tilde{\rho}_t\right]\, \mathrm{d} t + \mathrm{d} \tilde{W}^{(i)}_t \end{split} \end{equation} As in the previous appendix, we can solve eq. \eqref{eq:gc} explicitly (as a function of the signal) using a linearised version of the SDE: \begin{equation} \mathrm{d} \bar{\rho}_t=4\sqrt{\gamma} \sum_{k=1}^n\left[ P^{(k)}\bar{\rho}_t \mathrm{d} Y^{(k)}_t + \tilde{P}^{(k)}\bar{\rho}_t \mathrm{d} \tilde{Y}^{(k)}_t\right] \end{equation} which is solved in components: \begin{equation}\label{eq:nonnormalised} \begin{split} \bar{\lambda}_s=&\exp \left(4\sqrt{\gamma} \sum_{k=1}^n \left[(1-s^{(k)})Y^{(k)} + (1-\tilde{s}^{(k)}) \tilde{Y}^{(k)}\right]\right)\\ &\times \exp\left(-8\gamma \sum_{k=0}^n \left[(1-s^{(k)}) + (1-\tilde{s}^{(k)}\right]\, t\right). \end{split} \end{equation} At large time, when most of the probability is concentrated on the state $s=\bar{0}$, eq. \eqref{eq:signals} gives: \begin{equation} \begin{split} Y^{(k)}_t&\underset{t+\infty}{\sim} 4\sqrt{\gamma} t\\ \tilde{Y}^{(k)}_t&\underset{t+\infty}{\sim} 4\sqrt{\gamma} t \end{split} \end{equation} From eq. \eqref{eq:nonnormalised} it is then easy to see that the non-normalised eigenvalues have three possible behaviors: \begin{equation} \begin{split} \bar{\lambda}_{\bar{0}}&=\exp\left[16n\gamma t + o(t)\right]\\ \bar{\lambda}_{\bar{1}}&=\exp\left[o(t)\right]\\ \bar{\lambda}_s&=\exp\left[8n\gamma t + o(t)\right] \;\;\;\text{for}\;\;s\neq\bar{0}, \bar{1} \end{split} \end{equation} Recalling that \begin{equation} \lambda_{\bar{0}}=\frac{\bar{\lambda}_{\bar{0}}}{\sum_{s\in\mathscr{S}} \bar{\lambda}_s}, \end{equation} we finally get, for $n\neq1$: \begin{equation} \ln(\Delta_t)=\ln (1-\lambda_{\bar{0}})\underset{t+\infty}{\sim} - 16\, \gamma\,t \times \frac{n}{2} \end{equation} \end{document}